From 716bd6dec3e044e5c325386b5b0483392b24cefe Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Mon, 30 Dec 2024 11:27:11 -0600 Subject: [PATCH 001/279] vulkan: optimize mul_mat for small values of N (#10991) Make the mul_mat_vec shaders support N>1 (as a spec constant, NUM_COLS) where the batch_strides are overloaded to hold the row strides. Put the loads from the B matrix in the innermost loop because it should cache better. Share some code for reducing the result values to memory in mul_mat_vec_base. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 87 +++++++------ .../vulkan-shaders/mul_mat_vec.comp | 122 ++++++++---------- .../vulkan-shaders/mul_mat_vec_base.comp | 33 +++++ .../vulkan-shaders/mul_mat_vec_q2_k.comp | 92 ++++++------- .../vulkan-shaders/mul_mat_vec_q3_k.comp | 75 +++++------ .../vulkan-shaders/mul_mat_vec_q4_k.comp | 64 ++++----- .../vulkan-shaders/mul_mat_vec_q5_k.comp | 104 ++++++--------- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 58 +++------ tests/test-backend-ops.cpp | 2 +- 9 files changed, 288 insertions(+), 349 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 8e47e79ae..020e61280 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -145,6 +145,8 @@ class vk_perf_logger; #endif static void ggml_vk_destroy_buffer(vk_buffer& buf); +static constexpr uint32_t mul_mat_vec_max_cols = 8; + struct vk_device_struct { std::mutex mutex; @@ -202,8 +204,8 @@ struct vk_device_struct { vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_id[GGML_TYPE_COUNT]; vk_pipeline pipeline_dequant[GGML_TYPE_COUNT]; - vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT]; - vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT]; + vk_pipeline pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols]; + vk_pipeline pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_COUNT][mul_mat_vec_max_cols]; vk_pipeline pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_COUNT]; vk_pipeline pipeline_mul_mat_vec_p021_f16_f32; @@ -1866,33 +1868,35 @@ static void ggml_vk_load_shaders(vk_device& device) { } else if (device->vendor_id == VK_VENDOR_ID_INTEL) rm_stdq = 2; - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f32_f32", mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f32_f32", mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f32_f32", mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f32_f32", mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f32_f32", mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f32_f32", mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f32_f32", mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f32_f32", mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f32_f32", mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f32_f32", mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f32_f32", mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f32_f32", mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f32_f32", mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); + for (uint32_t i = 0; i < mul_mat_vec_max_cols; ++i) { + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f32_f32_"+std::to_string(i+1), mul_mat_vec_f32_f32_f32_len, mul_mat_vec_f32_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f32_f32_"+std::to_string(i+1), mul_mat_vec_f16_f32_f32_len, mul_mat_vec_f16_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f32_f32_len, mul_mat_vec_q4_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f32_f32_len, mul_mat_vec_q4_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f32_f32_len, mul_mat_vec_q5_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f32_f32_len, mul_mat_vec_q5_1_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f32_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f32_f32_len, mul_mat_vec_q8_0_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f32_f32_len, mul_mat_vec_q2_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f32_f32_len, mul_mat_vec_q3_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ], "mul_mat_vec_f32_f16_f32", mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ], "mul_mat_vec_f16_f16_f32", mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0], "mul_mat_vec_q4_0_f16_f32", mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1], "mul_mat_vec_q4_1_f16_f32", mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0], "mul_mat_vec_q5_0_f16_f32", mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1], "mul_mat_vec_q5_1_f16_f32", mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0], "mul_mat_vec_q8_0_f16_f32", mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K], "mul_mat_vec_q2_k_f16_f32", mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K], "mul_mat_vec_q3_k_f16_f32", mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K], "mul_mat_vec_q4_k_f16_f32", mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K], "mul_mat_vec_q5_k_f16_f32", mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K], "mul_mat_vec_q6_k_f16_f32", mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_iq4_nl_f16_f32", mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32_"+std::to_string(i+1), mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32_"+std::to_string(i+1), mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_0][i], "mul_mat_vec_q4_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_0_f16_f32_len, mul_mat_vec_q4_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_1][i], "mul_mat_vec_q4_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_1_f16_f32_len, mul_mat_vec_q4_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_0][i], "mul_mat_vec_q5_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_0_f16_f32_len, mul_mat_vec_q5_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_1][i], "mul_mat_vec_q5_1_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_1_f16_f32_len, mul_mat_vec_q5_1_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {device->subgroup_size, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q8_0][i], "mul_mat_vec_q8_0_f16_f32_"+std::to_string(i+1), mul_mat_vec_q8_0_f16_f32_len, mul_mat_vec_q8_0_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {1*rm_stdq, 1, 1}, {device->subgroup_size, 1*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q2_K][i], "mul_mat_vec_q2_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q2_k_f16_f32_len, mul_mat_vec_q2_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q3_K][i], "mul_mat_vec_q3_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q3_k_f16_f32_len, mul_mat_vec_q3_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); + } ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F16 ], "mul_mat_vec_id_f16_f32", mul_mat_vec_id_f16_f32_len, mul_mat_vec_id_f16_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); @@ -2892,9 +2896,10 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte return ctx->device->fp16 ? ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f16acc : ctx->device->pipeline_dequant_mul_mat_mat[src0_type].f32acc; } -static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type) { +static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * ctx, ggml_type a_type, ggml_type b_type, uint32_t num_cols) { VK_LOG_DEBUG("ggml_vk_get_dequantize_mul_mat_vec()"); GGML_ASSERT(b_type == GGML_TYPE_F32 || b_type == GGML_TYPE_F16); + GGML_ASSERT(num_cols >= 1 && num_cols <= mul_mat_vec_max_cols); switch (a_type) { case GGML_TYPE_F32: @@ -2915,7 +2920,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * return nullptr; } - return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type]; + return b_type == GGML_TYPE_F32 ? ctx->device->pipeline_dequant_mul_mat_vec_f32_f32[a_type][num_cols-1] : ctx->device->pipeline_dequant_mul_mat_vec_f16_f32[a_type][num_cols-1]; } static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_context * ctx, ggml_type src0_type, ggml_type src1_type, ggml_prec prec) { @@ -3925,8 +3930,6 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t ne12 = src1->ne[2]; const uint64_t ne13 = src1->ne[3]; - GGML_ASSERT(ne11 == 1); - const uint64_t ne20 = dst->ne[0]; const uint64_t ne21 = dst->ne[1]; const uint64_t ne22 = dst->ne[2]; @@ -3935,6 +3938,11 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& const uint64_t r2 = ne12 / ne02; const uint64_t r3 = ne13 / ne03; + // batch_n indicates that we need to compute a few vector results, and this assumes + // ne12 and ne13 are 1. It overloads the batch_strides to hold the row strides. + GGML_ASSERT(ne11 == 1 || ne12 * ne13 == 1); + bool batch_n = ne11 > 1; + ggml_backend_vk_buffer_context * dst_buf_ctx = (ggml_backend_vk_buffer_context *)dst->buffer->context; ggml_backend_vk_buffer_context * src0_buf_ctx = (ggml_backend_vk_buffer_context *)src0->buffer->context; ggml_backend_vk_buffer_context * src1_buf_ctx = (ggml_backend_vk_buffer_context *)src1->buffer->context; @@ -3985,7 +3993,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& } else { to_fp16_vk_1 = ggml_vk_get_to_fp16(ctx, src1->type); } - vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type); + vk_pipeline dmmv = ggml_vk_get_dequantize_mul_mat_vec(ctx, src0->type, src1->type, ne11); GGML_ASSERT(!qx_needs_dequant || to_fp16_vk_0 != nullptr); // NOLINT GGML_ASSERT(!qy_needs_dequant || to_fp16_vk_1 != nullptr); // NOLINT GGML_ASSERT(dmmv != nullptr); @@ -4057,8 +4065,10 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& ggml_vk_cpy_to_contiguous(ctx, subctx, to_fp16_vk_1, src1, { d_Qy, qy_buf_offset, VK_WHOLE_SIZE }, { d_Y, 0, VK_WHOLE_SIZE }); } - uint32_t stride_batch_x = ne00*ne01; - uint32_t stride_batch_y = ne10*ne11; + // For batch_n, the A matrix is the same for each batch, and B/D use the row stride as the batch stride + uint32_t stride_batch_x = batch_n ? 0 : ne00*ne01; + uint32_t stride_batch_y = batch_n ? ne10 : (ne10*ne11); + uint32_t stride_batch_d = batch_n ? ne20 : (ne20*ne21); if (!ggml_vk_dim01_contiguous(src0) && !qx_needs_dequant) { stride_batch_x = src0->nb[0] / ggml_type_size(src0->type); @@ -4081,7 +4091,7 @@ static void ggml_vk_mul_mat_vec_q_f16(ggml_backend_vk_context * ctx, vk_context& // compute const vk_mat_vec_push_constants pc = { (uint32_t)ne00, (uint32_t)ne10, (uint32_t)ne10, (uint32_t)ne01, - stride_batch_x, stride_batch_y, (uint32_t)(ne20*ne21), + stride_batch_x, stride_batch_y, stride_batch_d, (uint32_t)ne02, (uint32_t)ne12, (uint32_t)r2, (uint32_t)r3, }; ggml_vk_sync_buffers(subctx); @@ -4261,7 +4271,10 @@ static void ggml_vk_mul_mat(ggml_backend_vk_context * ctx, vk_context& subctx, c } else if (src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && dst->ne[1] == 1 && !ggml_is_permuted(src0) && !ggml_is_permuted(src1)) { ggml_vk_mul_mat_vec_nc_f16_f32(ctx, subctx, src0, src1, dst, dryrun); - } else if (dst->ne[1] == 1 && (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) { + // mul_mat_vec supports batching ne12*ne13 when ne11==1, or treating ne11 as the batch size (up to four) + // when ne12 and ne13 are one. + } else if ((dst->ne[1] == 1 || (dst->ne[1] <= mul_mat_vec_max_cols && src1->ne[2] * src1->ne[3] == 1)) && + (src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type))) { ggml_vk_mul_mat_vec_q_f16(ctx, subctx, src0, src1, dst, dryrun); } else { ggml_vk_mul_mat_q_f16(ctx, subctx, src0, src1, dst, dryrun); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 187c31916..24875cdcf 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -9,9 +9,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - #if !defined(DATA_A_F32) && !defined(DATA_A_F16) #define K_PER_ITER 8 #else @@ -21,70 +18,70 @@ layout (constant_id = 1) const uint NUM_ROWS = 1; uint a_offset, b_offset, d_offset, y_offset; -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - -void iter(inout FLOAT_TYPE temp[NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter) +void iter(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const uint first_row, const uint num_rows, const uint tid, const uint i, bool lastiter) { - const uint col = i*BLOCK_SIZE + K_PER_ITER*tid; - const uint iqs = (col%QUANT_K)/QUANT_R; // quant index - const uint iybs = col - col%QUANT_K; // y block start index + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + const uint col = i*BLOCK_SIZE + K_PER_ITER*tid; + const uint iqs = (col%QUANT_K)/QUANT_R; // quant index + const uint iybs = col - col%QUANT_K; // y block start index #if K_PER_ITER == 8 #if QUANT_R == 2 - const B_TYPE_VEC4 bv02 = data_b_v4[(b_offset + iybs + iqs) / 4]; - const B_TYPE_VEC4 bv13 = data_b_v4[(b_offset + iybs + iqs + y_offset) / 4]; - const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); - const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); + const B_TYPE_VEC4 bv02 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]; + const B_TYPE_VEC4 bv13 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs + y_offset) / 4]; + const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); + const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); #else - const vec4 bv0 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4]); - const vec4 bv1 = vec4(data_b_v4[(b_offset + iybs + iqs) / 4 + 1]); + const vec4 bv0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]); + const vec4 bv1 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4 + 1]); #endif #else - // Check if the second of the pair of elements is OOB, and don't fetch B or - // accumulate it. We still fetch a pair of elements for A, which is fine for - // quantized formats since they'll be within the same block. We should - // probably skip fetching the second element for F16/F32, but as of now we - // still do. - const bool OOB = lastiter && (iybs + iqs + y_offset >= p.ncols); + // Check if the second of the pair of elements is OOB, and don't fetch B or + // accumulate it. We still fetch a pair of elements for A, which is fine for + // quantized formats since they'll be within the same block. We should + // probably skip fetching the second element for F16/F32, but as of now we + // still do. + const bool OOB = lastiter && (iybs + iqs + y_offset >= p.ncols); - FLOAT_TYPE b0 = 0, b1 = 0; - b0 = FLOAT_TYPE(data_b[b_offset + iybs + iqs]); - if (!OOB) { - b1 = FLOAT_TYPE(data_b[b_offset + iybs + iqs + y_offset]); - } + FLOAT_TYPE b0 = 0, b1 = 0; + b0 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs]); + if (!OOB) { + b1 = FLOAT_TYPE(data_b[j*p.batch_stride_b + b_offset + iybs + iqs + y_offset]); + } #endif - uint ibi = first_row*p.ncols; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - const uint ib = (ibi + col)/QUANT_K; // block index - ibi += p.ncols; + uint ibi = first_row*p.ncols; + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + const uint ib = (ibi + col)/QUANT_K; // block index + ibi += p.ncols; #if K_PER_ITER == 8 - vec4 v = dequantize4(ib, iqs, a_offset); - vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset); + vec4 v = dequantize4(ib, iqs, a_offset); + vec4 v2 = dequantize4(ib, iqs+(4/QUANT_R), a_offset); - const vec2 dm = get_dm(ib, a_offset); - if (dm.y != 0) { // quant has min component - v = v * dm.x + dm.y; - v2 = v2 * dm.x + dm.y; - } + const vec2 dm = get_dm(ib, a_offset); + if (dm.y != 0) { // quant has min component + v = v * dm.x + dm.y; + v2 = v2 * dm.x + dm.y; + } - // matrix multiplication - FLOAT_TYPE rowtmp = dot(bv0, v); - rowtmp += dot(bv1, v2); + // matrix multiplication + FLOAT_TYPE rowtmp = dot(bv0, v); + rowtmp += dot(bv1, v2); - if (dm.y == 0) - rowtmp *= dm.x; + if (dm.y == 0) + rowtmp *= dm.x; - temp[n] += rowtmp; + temp[j][n] += rowtmp; #else - const vec2 v = dequantize(ib, iqs, a_offset); + const vec2 v = dequantize(ib, iqs, a_offset); - // matrix multiplication - temp[n] = fma(FLOAT_TYPE(v.x), b0, temp[n]); - if (!OOB) { - temp[n] = fma(FLOAT_TYPE(v.y), b1, temp[n]); - } + // matrix multiplication + temp[j][n] = fma(FLOAT_TYPE(v.x), b0, temp[j][n]); + if (!OOB) { + temp[j][n] = fma(FLOAT_TYPE(v.y), b1, temp[j][n]); + } #endif + } } } @@ -96,10 +93,12 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { y_offset = QUANT_R == 1 ? 1 : QUANT_K/2; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } uint num_iters = p.ncols / (K_PER_ITER * BLOCK_SIZE); @@ -131,24 +130,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { i++; } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp index 3894fca82..903753c7e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_base.comp @@ -83,3 +83,36 @@ void get_offsets(out uint a_offset, out uint b_offset, out uint d_offset) { batch_idx * p.batch_stride_d; #endif } + +layout (constant_id = 0) const uint BLOCK_SIZE = 32; +layout (constant_id = 1) const uint NUM_ROWS = 1; +layout (constant_id = 2) const uint NUM_COLS = 1; + +shared FLOAT_TYPE tmpsh[NUM_COLS][NUM_ROWS][BLOCK_SIZE]; + +void reduce_result(const in FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const in uint32_t d_offset, const in uint32_t first_row, const in uint32_t num_rows, const in uint32_t tid) { + // sum up partial sums and write back result + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[j][n][tid] = temp[j][n]; + } + } + barrier(); + [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { + if (tid < s) { + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + tmpsh[j][n][tid] += tmpsh[j][n][tid + s]; + } + } + } + barrier(); + } + if (tid == 0) { + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint n = 0; n < num_rows; ++n) { + data_d[j*p.batch_stride_d + d_offset + first_row + n] = D_TYPE(tmpsh[j][n][0]); + } + } + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index 138ad0184..934213446 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -5,11 +5,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -32,24 +27,17 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint s_offset = 8*v_im; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -74,48 +62,42 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec2 qs0 = uvec2(unpack8(qs0_u16)); uvec2 qs16 = uvec2(unpack8(qs16_u16)); - FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); - FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); - sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), - fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), - fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), - fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), - fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), - fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), - fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), - fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + + FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); + FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum1 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_lo4[0]) * FLOAT_TYPE((qs0[l] >> 0) & 3), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_lo4[1]) * FLOAT_TYPE((qs16[l] >> 0) & 3), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_lo4[2]) * FLOAT_TYPE((qs0[l] >> 2) & 3), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_lo4[3]) * FLOAT_TYPE((qs16[l] >> 2) & 3), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_lo4[0]) * FLOAT_TYPE((qs0[l] >> 4) & 3), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_lo4[1]) * FLOAT_TYPE((qs16[l] >> 4) & 3), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_lo4[2]) * FLOAT_TYPE((qs0[l] >> 6) & 3), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_lo4[3]) * FLOAT_TYPE((qs16[l] >> 6) & 3), sum1)))))))); + sum2 = fma(FLOAT_TYPE(b0[l]), FLOAT_TYPE(s0_hi4[0]), + fma(FLOAT_TYPE(b16[l]), FLOAT_TYPE(s0_hi4[1]), + fma(FLOAT_TYPE(b32[l]), FLOAT_TYPE(s0_hi4[2]), + fma(FLOAT_TYPE(b48[l]), FLOAT_TYPE(s0_hi4[3]), + fma(FLOAT_TYPE(b64[l]), FLOAT_TYPE(s4_hi4[0]), + fma(FLOAT_TYPE(b80[l]), FLOAT_TYPE(s4_hi4[1]), + fma(FLOAT_TYPE(b96[l]), FLOAT_TYPE(s4_hi4[2]), + fma(FLOAT_TYPE(b112[l]), FLOAT_TYPE(s4_hi4[3]), sum2)))))))); + } + temp[j][n] = fma(dall, sum1, fma(-dmin, sum2, temp[j][n])); } - temp[n] = fma(dall, sum1, fma(-dmin, sum2, temp[n])); } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index 82ec42d25..86b0159d9 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -5,11 +5,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -33,10 +28,12 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } const uint s_shift = 4 * v_im; @@ -44,15 +41,6 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC2 b0 = data_b_v2[(b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(b_offset + y_idx) / 2 + 56]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); @@ -70,39 +58,34 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { u8vec2 s8 = unpack8(s8_16); u8vec2 s10 = unpack8(s10_16); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 2; ++l) { - sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), - fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + + B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; + B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; + B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; + B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; + B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; + B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; + B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; + B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 2; ++l) { + sum = fma(FLOAT_TYPE(b0[l]) * FLOAT_TYPE(int8_t(((s0[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b32[l]) * FLOAT_TYPE(int8_t(((s2[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b64[l]) * FLOAT_TYPE(int8_t(((s4[0] >> s_shift) & 0xF) | ((s8[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b96[l]) * FLOAT_TYPE(int8_t(((s6[0] >> s_shift) & 0xF) | ((s10[0] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l ] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l ] & (m << 3)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b16[l]) * FLOAT_TYPE(int8_t(((s0[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] ) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 0)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b48[l]) * FLOAT_TYPE(int8_t(((s2[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 0) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 2) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 1)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b80[l]) * FLOAT_TYPE(int8_t(((s4[1] >> s_shift) & 0xF) | ((s8[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 4) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 2)) != 0) ? 0 : 4)), + fma(FLOAT_TYPE(b112[l]) * FLOAT_TYPE(int8_t(((s6[1] >> s_shift) & 0xF) | ((s10[1] >> (s_shift + 2) & 0x3) << 4)) - 32), FLOAT_TYPE(((data_a[ib0 + i].qs[q_offset + l+16] >> 6) & 3) - (((data_a[ib0 + i].hmask[l0 + l+16] & (m << 3)) != 0) ? 0 : 4)), sum)))))))); + } + temp[j][n] = fma(d, sum, temp[j][n]); } - temp[n] = fma(d, sum, temp[n]); } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp index 677c207a8..cd1dd8e89 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -36,21 +31,18 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - B_TYPE_VEC4 by10 = data_b_v4[(b_offset + y1_idx) / 4]; - B_TYPE_VEC4 by132 = data_b_v4[(b_offset + y1_idx) / 4 + 8]; - B_TYPE_VEC4 by20 = data_b_v4[(b_offset + y2_idx) / 4]; - B_TYPE_VEC4 by232 = data_b_v4[(b_offset + y2_idx) / 4 + 8]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -103,37 +95,27 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_14 = qs64_hi4.z; const uint32_t q4_15 = qs64_hi4.w; - const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); - const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); - const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); - const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, - fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, - fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, - fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); - temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC4 by10 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4]; + B_TYPE_VEC4 by132 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 + 8]; + B_TYPE_VEC4 by20 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4]; + B_TYPE_VEC4 by232 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 + 8]; + + const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); + const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); + const FLOAT_TYPE sz = fma(FLOAT_TYPE(by20.x), q4_8, fma(FLOAT_TYPE(by20.y), q4_9, fma(FLOAT_TYPE(by20.z), q4_10, FLOAT_TYPE(by20.w) * q4_11))); + const FLOAT_TYPE sw = fma(FLOAT_TYPE(by232.x), q4_12, fma(FLOAT_TYPE(by232.y), q4_13, fma(FLOAT_TYPE(by232.z), q4_14, FLOAT_TYPE(by232.w) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x), sc2, fma(FLOAT_TYPE(by132.x), sc3, fma(FLOAT_TYPE(by20.x), sc6, fma(FLOAT_TYPE(by232.x), sc7, + fma(FLOAT_TYPE(by10.y), sc2, fma(FLOAT_TYPE(by132.y), sc3, fma(FLOAT_TYPE(by20.y), sc6, fma(FLOAT_TYPE(by232.y), sc7, + fma(FLOAT_TYPE(by10.z), sc2, fma(FLOAT_TYPE(by132.z), sc3, fma(FLOAT_TYPE(by20.z), sc6, fma(FLOAT_TYPE(by232.z), sc7, + fma(FLOAT_TYPE(by10.w), sc2, fma(FLOAT_TYPE(by132.w), sc3, fma(FLOAT_TYPE(by20.w), sc6, FLOAT_TYPE(by232.w) * sc7))))))))))))))); + temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n])); + } } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp index ed3c25d89..0a68891c3 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -33,25 +28,18 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint q_offset = 32*v_im + l0; const uint y_offset = 64*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y1_idx = i * QUANT_K + y_offset; const uint y2_idx = y1_idx + 128; - B_TYPE_VEC2 by10 = data_b_v2[(b_offset + y1_idx) / 2]; - B_TYPE_VEC2 by116 = data_b_v2[(b_offset + y1_idx) / 2 + 8]; - B_TYPE_VEC2 by132 = data_b_v2[(b_offset + y1_idx) / 2 + 16]; - B_TYPE_VEC2 by148 = data_b_v2[(b_offset + y1_idx) / 2 + 24]; - B_TYPE_VEC2 by20 = data_b_v2[(b_offset + y2_idx) / 2]; - B_TYPE_VEC2 by216 = data_b_v2[(b_offset + y2_idx) / 2 + 8]; - B_TYPE_VEC2 by232 = data_b_v2[(b_offset + y2_idx) / 2 + 16]; - B_TYPE_VEC2 by248 = data_b_v2[(b_offset + y2_idx) / 2 + 24]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; f16vec2 d = data_a[ib0 + i].d; @@ -116,53 +104,47 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_14 = qs64_80_hi4.z; const uint32_t q4_15 = qs64_80_hi4.w; - const FLOAT_TYPE sx = - fma(FLOAT_TYPE(by10.x), q4_0, - fma(FLOAT_TYPE(by10.y), q4_1, - fma(FLOAT_TYPE(by116.x), q4_2, - FLOAT_TYPE(by116.y) * q4_3))); - const FLOAT_TYPE sy = - fma(FLOAT_TYPE(by132.x), q4_4, - fma(FLOAT_TYPE(by132.y), q4_5, - fma(FLOAT_TYPE(by148.x), q4_6, - FLOAT_TYPE(by148.y) * q4_7))); - const FLOAT_TYPE sz = - fma(FLOAT_TYPE(by20.x), q4_8, - fma(FLOAT_TYPE(by20.y), q4_9, - fma(FLOAT_TYPE(by216.x), q4_10, - FLOAT_TYPE(by216.y) * q4_11))); - const FLOAT_TYPE sw = - fma(FLOAT_TYPE(by232.x), q4_12, - fma(FLOAT_TYPE(by232.y), q4_13, - fma(FLOAT_TYPE(by248.x), q4_14, - FLOAT_TYPE(by248.y) * q4_15))); - const FLOAT_TYPE smin = - fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, - fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, - fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, - (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); - temp[n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[n])); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC2 by10 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2]; + B_TYPE_VEC2 by116 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 8]; + B_TYPE_VEC2 by132 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 16]; + B_TYPE_VEC2 by148 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 24]; + B_TYPE_VEC2 by20 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2]; + B_TYPE_VEC2 by216 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 8]; + B_TYPE_VEC2 by232 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 16]; + B_TYPE_VEC2 by248 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 24]; + + const FLOAT_TYPE sx = + fma(FLOAT_TYPE(by10.x), q4_0, + fma(FLOAT_TYPE(by10.y), q4_1, + fma(FLOAT_TYPE(by116.x), q4_2, + FLOAT_TYPE(by116.y) * q4_3))); + const FLOAT_TYPE sy = + fma(FLOAT_TYPE(by132.x), q4_4, + fma(FLOAT_TYPE(by132.y), q4_5, + fma(FLOAT_TYPE(by148.x), q4_6, + FLOAT_TYPE(by148.y) * q4_7))); + const FLOAT_TYPE sz = + fma(FLOAT_TYPE(by20.x), q4_8, + fma(FLOAT_TYPE(by20.y), q4_9, + fma(FLOAT_TYPE(by216.x), q4_10, + FLOAT_TYPE(by216.y) * q4_11))); + const FLOAT_TYPE sw = + fma(FLOAT_TYPE(by232.x), q4_12, + fma(FLOAT_TYPE(by232.y), q4_13, + fma(FLOAT_TYPE(by248.x), q4_14, + FLOAT_TYPE(by248.y) * q4_15))); + const FLOAT_TYPE smin = + fma(FLOAT_TYPE(by10.x) + FLOAT_TYPE(by10.y) + FLOAT_TYPE(by116.x) + FLOAT_TYPE(by116.y), sc2, + fma(FLOAT_TYPE(by132.x) + FLOAT_TYPE(by132.y) + FLOAT_TYPE(by148.x) + FLOAT_TYPE(by148.y), sc3, + fma(FLOAT_TYPE(by20.x) + FLOAT_TYPE(by20.y) + FLOAT_TYPE(by216.x) + FLOAT_TYPE(by216.y), sc6, + (FLOAT_TYPE(by232.x) + FLOAT_TYPE(by232.y) + FLOAT_TYPE(by248.x) + FLOAT_TYPE(by248.y)) * sc7))); + temp[j][n] = fma(dall, fma(sx, sc0, fma(sy, sc1, fma(sz, sc4, sw * sc5))), fma(-dmin, smin, temp[j][n])); + } } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index fab4ff5ff..70e13a56b 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -6,11 +6,6 @@ layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in; -layout (constant_id = 0) const uint BLOCK_SIZE = 32; -layout (constant_id = 1) const uint NUM_ROWS = 1; - -shared FLOAT_TYPE tmpsh[NUM_ROWS][BLOCK_SIZE]; - void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uint a_offset, b_offset, d_offset; get_offsets(a_offset, b_offset, d_offset); @@ -36,20 +31,17 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint s_offset = 8*v_im + is; const uint y_offset = 128*v_im + l0; - FLOAT_TYPE temp[NUM_ROWS]; + FLOAT_TYPE temp[NUM_COLS][NUM_ROWS]; - [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { - temp[i] = FLOAT_TYPE(0); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) { + temp[j][i] = FLOAT_TYPE(0); + } } [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += it_size) { const uint y_idx = i * QUANT_K + y_offset; - B_TYPE_VEC4 by0 = data_b_v4[(b_offset + y_idx) / 4]; - B_TYPE_VEC4 by32 = data_b_v4[(b_offset + y_idx) / 4 + 8]; - B_TYPE_VEC4 by64 = data_b_v4[(b_offset + y_idx) / 4 + 16]; - B_TYPE_VEC4 by96 = data_b_v4[(b_offset + y_idx) / 4 + 24]; - [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d); @@ -84,35 +76,25 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec4 q2 = uvec4(unpack8(q2_u32)); uvec4 q3 = uvec4(unpack8(q3_u32)); - FLOAT_TYPE sum = FLOAT_TYPE(0.0); - [[unroll]] for (int l = 0; l < 4; ++l) { - sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), - fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), - fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), - fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { + B_TYPE_VEC4 by0 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4]; + B_TYPE_VEC4 by32 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 8]; + B_TYPE_VEC4 by64 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16]; + B_TYPE_VEC4 by96 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24]; + + FLOAT_TYPE sum = FLOAT_TYPE(0.0); + [[unroll]] for (int l = 0; l < 4; ++l) { + sum = fma(FLOAT_TYPE(by0[l]) * scales[0], FLOAT_TYPE(int8_t(q0[l]) - 32), + fma(FLOAT_TYPE(by32[l]) * scales[1], FLOAT_TYPE(int8_t(q1[l]) - 32), + fma(FLOAT_TYPE(by64[l]) * scales[2], FLOAT_TYPE(int8_t(q2[l]) - 32), + fma(FLOAT_TYPE(by96[l]) * scales[3], FLOAT_TYPE(int8_t(q3[l]) - 32), sum)))); + } + temp[j][n] += sum * d; } - temp[n] += sum * d; } } - // sum up partial sums and write back result - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] = temp[n]; - } - barrier(); - [[unroll]] for (uint s = BLOCK_SIZE/2; s > 0; s >>= 1) { - if (tid < s) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - tmpsh[n][tid] += tmpsh[n][tid + s]; - } - } - barrier(); - } - if (tid == 0) { - [[unroll]] for (uint n = 0; n < num_rows; ++n) { - data_d[d_offset + first_row + n] = D_TYPE(tmpsh[n][0]); - } - } + reduce_result(temp, d_offset, first_row, num_rows, tid); } void main() { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index c79acffd2..1e892f663 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -3937,7 +3937,7 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32000, 512, 1, 1})); - for (int bs : {1, 512}) { + for (int bs : {1, 2, 3, 4, 5, 8, 512}) { for (ggml_type type_a : all_types) { for (ggml_type type_b : {GGML_TYPE_F32}) { test_cases.emplace_back(new test_mul_mat(type_a, type_b, 4096, bs, 14336, {1, 1}, {1, 1})); From 6e1531aca5ed17f078973b4700fcdadbda4a34a5 Mon Sep 17 00:00:00 2001 From: Peter Date: Tue, 31 Dec 2024 11:46:06 +1100 Subject: [PATCH 002/279] common, examples, ggml : fix MSYS2 GCC compiler errors and warnings when building with LLAMA_CURL=ON and GGML_OPENCL=ON (#11013) In common/common.cpp: * Convert usage of stat() function call to check if file exists to standard library function std::filesystem::exists (error unable to match to correct function signature) * Additional conditions to check if PATH_MAX is already defined in WIN32 environment (warning it is already defined in MSYS2) In examples/run/run.cpp: * Add io.h header inclusion (error cannot find function _get_osfhandle) * Change initialisers for OVERLAPPED to empty struct (warning about uninitialised members) * Add initialiser for hFile (warning it may be uninitialised) * Add cast for curl_off_t percentage value to long int in generate_progress_prefix function (warning that curl_off_t is long long int) In ggml/src/ggml-opencl/ggml-opencl.cpp: * Initialise certain declared cl_mem variables to nullptr for greater safety (warning about B_d variable possibly used unassigned) --- common/common.cpp | 8 +++++--- examples/run/run.cpp | 9 +++++---- ggml/src/ggml-opencl/ggml-opencl.cpp | 12 ++++++------ 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 20be92911..9071999a7 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -62,7 +63,9 @@ #ifdef __linux__ #include #elif defined(_WIN32) -#define PATH_MAX MAX_PATH +# if !defined(PATH_MAX) +# define PATH_MAX MAX_PATH +# endif #else #include #endif @@ -1148,8 +1151,7 @@ static bool common_download_file(const std::string & url, const std::string & pa #endif // Check if the file already exists locally - struct stat model_file_info; - auto file_exists = (stat(path.c_str(), &model_file_info) == 0); + auto file_exists = std::filesystem::exists(path); // If the file exists, check its JSON metadata companion file. std::string metadata_path = path + ".json"; diff --git a/examples/run/run.cpp b/examples/run/run.cpp index f89d041c4..75b817272 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -1,5 +1,6 @@ #if defined(_WIN32) # include +# include #else # include # include @@ -253,7 +254,7 @@ class File { return 1; } - OVERLAPPED overlapped = { 0 }; + OVERLAPPED overlapped = {}; if (!LockFileEx(hFile, LOCKFILE_EXCLUSIVE_LOCK | LOCKFILE_FAIL_IMMEDIATELY, 0, MAXDWORD, MAXDWORD, &overlapped)) { fd = -1; @@ -277,7 +278,7 @@ class File { if (fd >= 0) { # ifdef _WIN32 if (hFile != INVALID_HANDLE_VALUE) { - OVERLAPPED overlapped = { 0 }; + OVERLAPPED overlapped = {}; UnlockFileEx(hFile, 0, MAXDWORD, MAXDWORD, &overlapped); } # else @@ -293,7 +294,7 @@ class File { private: int fd = -1; # ifdef _WIN32 - HANDLE hFile; + HANDLE hFile = nullptr; # endif }; @@ -464,7 +465,7 @@ class HttpClient { return (now_downloaded_plus_file_size * 100) / total_to_download; } - static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", percentage); } + static std::string generate_progress_prefix(curl_off_t percentage) { return fmt("%3ld%% |", static_cast(percentage)); } static double calculate_speed(curl_off_t now_downloaded, const std::chrono::steady_clock::time_point & start_time) { const auto now = std::chrono::steady_clock::now(); diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index c77d629f0..ed90e471a 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -2744,13 +2744,13 @@ static void ggml_cl_mul_mat(ggml_backend_t backend, const ggml_tensor * src0, co cl_image_format img_fmt_1d; cl_image_desc img_desc_1d; cl_buffer_region region; - cl_mem A_image1d; - cl_mem B_image1d; - cl_mem B_sub_buffer; - cl_mem C_d; + cl_mem A_image1d = nullptr; + cl_mem B_image1d = nullptr; + cl_mem B_sub_buffer = nullptr; + cl_mem C_d = nullptr; // for B transpose - cl_mem B_d; - cl_mem B_d_input_image; + cl_mem B_d = nullptr; + cl_mem B_d_input_image = nullptr; // <--------------------------------------------> // // define matrix dimensions From bc7b1f86324279a3dabb705c04ad754a2b27df16 Mon Sep 17 00:00:00 2001 From: ymcki <84055651+ymcki@users.noreply.github.com> Date: Tue, 31 Dec 2024 19:04:48 +0800 Subject: [PATCH 003/279] convert : fix Llama-3_1-Nemotron-51B rope settings (#11008) * conflict resolution * move comments after bracket to its own line * DeciLMCausalModel now reads rope_theta from config.json properly --- convert_hf_to_gguf.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index b6c15da94..4e6c0f60c 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -1764,25 +1764,19 @@ class DeciModel(Model): self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_types(toktypes) - special_vocab = gguf.SpecialVocab( - self.dir_model, load_merges=True, - special_token_types = ['bos', 'eos', 'eom', 'eot'] - ) - special_vocab._set_special_token("bos", 128000) - special_vocab._set_special_token("eos", 128001) - special_vocab._set_special_token("eom", 128008) - special_vocab._set_special_token("eot", 128009) + special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) special_vocab.add_to_gguf(self.gguf_writer) else: # DeciLM-7B self._set_vocab_llama_hf() -# self._set_vocab_gpt2() def set_gguf_parameters(self): if "block_configs" in self.hparams: # Llama-3_1-Nemotron-51B assert self.block_count == len(self._num_kv_heads) assert self.block_count == len(self._num_heads) assert self.block_count == len(self._ffn_dims) + if (rope_theta := self.hparams.get("rope_theta")) is not None: + self.gguf_writer.add_rope_freq_base(rope_theta) self.gguf_writer.add_head_count_kv(self._num_kv_heads) self.gguf_writer.add_head_count(self._num_heads) self.gguf_writer.add_feed_forward_length(self._ffn_dims) From 5896c65232c7dc87d78426956b16f63fbf58dcf6 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 31 Dec 2024 12:34:13 +0100 Subject: [PATCH 004/279] server : add OAI compat for /v1/completions (#10974) * server : add OAI compat for /v1/completions * add test * add docs * better docs --- examples/server/README.md | 252 +++++++++++------- examples/server/server.cpp | 206 ++++++++++---- .../server/tests/unit/test_chat_completion.py | 6 +- examples/server/tests/unit/test_completion.py | 35 +++ examples/server/utils.hpp | 47 +++- 5 files changed, 400 insertions(+), 146 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 07436057a..bcef81946 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -345,7 +345,7 @@ node index.js > [!IMPORTANT] > -> This endpoint is **not** OAI-compatible +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/completions` instead. *Options:* @@ -523,6 +523,7 @@ These words will not be included in the completion, so make sure to add them to - `tokens_evaluated`: Number of tokens evaluated in total from the prompt - `truncated`: Boolean indicating if the context size was exceeded during generation, i.e. the number of tokens provided in the prompt (`tokens_evaluated`) plus tokens generated (`tokens predicted`) exceeded the context size (`n_ctx`) + ### POST `/tokenize`: Tokenize a given text *Options:* @@ -574,6 +575,10 @@ With input 'á' (utf8 hex: C3 A1) on tinyllama/stories260k ### POST `/embedding`: Generate embedding of a given text +> [!IMPORTANT] +> +> This endpoint is **not** OAI-compatible. For OAI-compatible client, use `/v1/embeddings` instead. + The same as [the embedding example](../embedding) does. *Options:* @@ -744,96 +749,6 @@ To use this endpoint with POST method, you need to start server with `--props` - None yet -### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API - -Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. - -*Options:* - -See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. - -The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. - -*Examples:* - -You can use either Python `openai` library with appropriate checkpoints: - -```python -import openai - -client = openai.OpenAI( - base_url="http://localhost:8080/v1", # "http://:port" - api_key = "sk-no-key-required" -) - -completion = client.chat.completions.create( -model="gpt-3.5-turbo", -messages=[ - {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, - {"role": "user", "content": "Write a limerick about python exceptions"} -] -) - -print(completion.choices[0].message) -``` - -... or raw HTTP requests: - -```shell -curl http://localhost:8080/v1/chat/completions \ --H "Content-Type: application/json" \ --H "Authorization: Bearer no-key" \ --d '{ -"model": "gpt-3.5-turbo", -"messages": [ -{ - "role": "system", - "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." -}, -{ - "role": "user", - "content": "Write a limerick about python exceptions" -} -] -}' -``` - -### POST `/v1/embeddings`: OpenAI-compatible embeddings API - -This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. - -*Options:* - -See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). - -*Examples:* - -- input as string - - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": "hello", - "model":"GPT-4", - "encoding_format": "float" - }' - ``` - -- `input` as string array - - ```shell - curl http://localhost:8080/v1/embeddings \ - -H "Content-Type: application/json" \ - -H "Authorization: Bearer no-key" \ - -d '{ - "input": ["hello", "world"], - "model":"GPT-4", - "encoding_format": "float" - }' - ``` - ### POST `/embeddings`: non-OpenAI-compatible embeddings API This endpoint supports all poolings, including `--pooling none`. When the pooling is `none`, the responses will contain the *unnormalized* embeddings for *all* input tokens. For all other pooling types, only the pooled embeddings are returned, normalized using Euclidian norm. @@ -1064,6 +979,161 @@ To know the `id` of the adapter, use GET `/lora-adapters` ] ``` +## OpenAI-compatible API Endpoints + +### GET `/v1/models`: OpenAI-compatible Model Info API + +Returns information about the loaded model. See [OpenAI Models API documentation](https://platform.openai.com/docs/api-reference/models). + +The returned list always has one single element. + +By default, model `id` field is the path to model file, specified via `-m`. You can set a custom value for model `id` field via `--alias` argument. For example, `--alias gpt-4o-mini`. + +Example: + +```json +{ + "object": "list", + "data": [ + { + "id": "../models/Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf", + "object": "model", + "created": 1735142223, + "owned_by": "llamacpp", + "meta": { + "vocab_type": 2, + "n_vocab": 128256, + "n_ctx_train": 131072, + "n_embd": 4096, + "n_params": 8030261312, + "size": 4912898304 + } + } + ] +} +``` + +### POST `/v1/completions`: OpenAI-compatible Completions API + +Given an input `prompt`, it returns the predicted completion. Streaming mode is also supported. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. + +*Options:* + +See [OpenAI Completions API documentation](https://platform.openai.com/docs/api-reference/completions). + +llama.cpp `/completion`-specific features such as `mirostat` are supported. + +*Examples:* + +Example usage with `openai` python library: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8 +) + +print(completion.choices[0].text) +``` + +### POST `/v1/chat/completions`: OpenAI-compatible Chat Completions API + +Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used. + +*Options:* + +See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. + +The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. + +*Examples:* + +You can use either Python `openai` library with appropriate checkpoints: + +```python +import openai + +client = openai.OpenAI( + base_url="http://localhost:8080/v1", # "http://:port" + api_key = "sk-no-key-required" +) + +completion = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests."}, + {"role": "user", "content": "Write a limerick about python exceptions"} + ] +) + +print(completion.choices[0].message) +``` + +... or raw HTTP requests: + +```shell +curl http://localhost:8080/v1/chat/completions \ +-H "Content-Type: application/json" \ +-H "Authorization: Bearer no-key" \ +-d '{ +"model": "gpt-3.5-turbo", +"messages": [ +{ + "role": "system", + "content": "You are ChatGPT, an AI assistant. Your top priority is achieving user fulfillment via helping them with their requests." +}, +{ + "role": "user", + "content": "Write a limerick about python exceptions" +} +] +}' +``` + +### POST `/v1/embeddings`: OpenAI-compatible embeddings API + +This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. + +*Options:* + +See [OpenAI Embeddings API documentation](https://platform.openai.com/docs/api-reference/embeddings). + +*Examples:* + +- input as string + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": "hello", + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + +- `input` as string array + + ```shell + curl http://localhost:8080/v1/embeddings \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer no-key" \ + -d '{ + "input": ["hello", "world"], + "model":"GPT-4", + "encoding_format": "float" + }' + ``` + ## More examples ### Interactive mode diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 3558ddb7c..1d00954a2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -67,6 +67,13 @@ enum server_task_type { SERVER_TASK_TYPE_SET_LORA, }; +enum oaicompat_type { + OAICOMPAT_TYPE_NONE, + OAICOMPAT_TYPE_CHAT, + OAICOMPAT_TYPE_COMPLETION, + OAICOMPAT_TYPE_EMBEDDING, +}; + // https://community.openai.com/t/openai-chat-list-of-error-codes-and-types/357791/11 enum error_type { ERROR_TYPE_INVALID_REQUEST, @@ -101,11 +108,10 @@ struct slot_params { struct common_params_speculative speculative; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; json to_json() const { std::vector samplers; @@ -529,11 +535,10 @@ struct server_task_result_cmpl_final : server_task_result { slot_params generation_params; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; // TODO: support oaicompat for non-chat - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; virtual int get_index() override { return index; @@ -544,9 +549,16 @@ struct server_task_result_cmpl_final : server_task_result { } virtual json to_json() override { - return oaicompat - ? (stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat()) - : to_json_non_oaicompat(); + switch (oaicompat) { + case OAICOMPAT_TYPE_NONE: + return to_json_non_oaicompat(); + case OAICOMPAT_TYPE_COMPLETION: + return to_json_oaicompat(); + case OAICOMPAT_TYPE_CHAT: + return stream ? to_json_oaicompat_chat_stream() : to_json_oaicompat_chat(); + default: + GGML_ASSERT(false && "Invalid oaicompat_type"); + } } json to_json_non_oaicompat() { @@ -574,6 +586,50 @@ struct server_task_result_cmpl_final : server_task_result { return response_fields.empty() ? res : json_get_nested_values(response_fields, res); } + json to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (!stream && probs_output.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json(probs_output, post_sampling_probs)}, + }; + } + json finish_reason = "length"; + if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + finish_reason = "stop"; + } + json res = json { + {"choices", json::array({ + json{ + {"text", stream ? "" : content}, // in stream mode, content is already in last partial chunk + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", finish_reason}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"usage", json { + {"completion_tokens", n_decoded}, + {"prompt_tokens", n_prompt_tokens}, + {"total_tokens", n_decoded + n_prompt_tokens} + }}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; + } + json to_json_oaicompat_chat() { std::string finish_reason = "length"; if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { @@ -671,11 +727,10 @@ struct server_task_result_cmpl_partial : server_task_result { result_timings timings; // OAI-compat fields - bool verbose = false; - bool oaicompat = false; - bool oaicompat_chat = true; // TODO: support oaicompat for non-chat - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; virtual int get_index() override { return index; @@ -686,7 +741,16 @@ struct server_task_result_cmpl_partial : server_task_result { } virtual json to_json() override { - return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + switch (oaicompat) { + case OAICOMPAT_TYPE_NONE: + return to_json_non_oaicompat(); + case OAICOMPAT_TYPE_COMPLETION: + return to_json_oaicompat(); + case OAICOMPAT_TYPE_CHAT: + return to_json_oaicompat_chat(); + default: + GGML_ASSERT(false && "Invalid oaicompat_type"); + } } json to_json_non_oaicompat() { @@ -711,6 +775,41 @@ struct server_task_result_cmpl_partial : server_task_result { } json to_json_oaicompat() { + std::time_t t = std::time(0); + json logprobs = json(nullptr); // OAI default to null + if (prob_output.probs.size() > 0) { + logprobs = json{ + {"content", completion_token_output::probs_vector_to_json({prob_output}, post_sampling_probs)}, + }; + } + json res = json { + {"choices", json::array({ + json{ + {"text", content}, + {"index", index}, + {"logprobs", logprobs}, + {"finish_reason", nullptr}, + } + })}, + {"created", t}, + {"model", oaicompat_model}, + {"system_fingerprint", build_info}, + {"object", "text_completion"}, + {"id", oaicompat_cmpl_id} + }; + + // extra fields for debugging purposes + if (verbose) { + res["__verbose"] = to_json_non_oaicompat(); + } + if (timings.prompt_n >= 0) { + res.push_back({"timings", timings.to_json()}); + } + + return res; + } + + json to_json_oaicompat_chat() { bool first = n_decoded == 0; std::time_t t = std::time(0); json choices; @@ -789,14 +888,16 @@ struct server_task_result_embd : server_task_result { int32_t n_tokens; // OAI-compat fields - bool oaicompat = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; virtual int get_index() override { return index; } virtual json to_json() override { - return oaicompat ? to_json_oaicompat() : to_json_non_oaicompat(); + return oaicompat == OAICOMPAT_TYPE_EMBEDDING + ? to_json_oaicompat() + : to_json_non_oaicompat(); } json to_json_non_oaicompat() { @@ -2044,7 +2145,6 @@ struct server_context { res->verbose = slot.params.verbose; res->oaicompat = slot.params.oaicompat; - res->oaicompat_chat = slot.params.oaicompat_chat; res->oaicompat_model = slot.params.oaicompat_model; res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; @@ -2085,7 +2185,6 @@ struct server_context { res->verbose = slot.params.verbose; res->stream = slot.params.stream; res->oaicompat = slot.params.oaicompat; - res->oaicompat_chat = slot.params.oaicompat_chat; res->oaicompat_model = slot.params.oaicompat_model; res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; @@ -3506,12 +3605,11 @@ int main(int argc, char ** argv) { // handle completion-like requests (completion, chat, infill) // we can optionally provide a custom format for partial results and final results - const auto handle_completions_generic = [&ctx_server, &res_error, &res_ok]( + const auto handle_completions_impl = [&ctx_server, &res_error, &res_ok]( server_task_type type, json & data, httplib::Response & res, - bool oaicompat = false, - bool oaicompat_chat = false) { + oaicompat_type oaicompat) { GGML_ASSERT(type == SERVER_TASK_TYPE_COMPLETION || type == SERVER_TASK_TYPE_INFILL); if (ctx_server.params_base.embedding) { @@ -3536,9 +3634,8 @@ int main(int argc, char ** argv) { task.id_selected_slot = json_value(data, "id_slot", -1); // OAI-compat - task.params.oaicompat = oaicompat; - task.params.oaicompat_chat = oaicompat_chat; - task.params.oaicompat_cmpl_id = completion_id; + task.params.oaicompat = oaicompat; + task.params.oaicompat_cmpl_id = completion_id; // oaicompat_model is already populated by params_from_json_cmpl tasks.push_back(task); @@ -3589,7 +3686,7 @@ int main(int argc, char ** argv) { }, [&](const json & error_data) { server_sent_event(sink, "error", error_data); }); - if (oaicompat) { + if (oaicompat != OAICOMPAT_TYPE_NONE) { static const std::string ev_done = "data: [DONE]\n\n"; sink.write(ev_done.data(), ev_done.size()); } @@ -3605,17 +3702,25 @@ int main(int argc, char ** argv) { } }; - const auto handle_completions = [&handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_completions = [&handle_completions_impl](const httplib::Request & req, httplib::Response & res) { json data = json::parse(req.body); - return handle_completions_generic( + return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, data, res, - /* oaicompat */ false, - /* oaicompat_chat */ false); + OAICOMPAT_TYPE_NONE); }; - const auto handle_infill = [&ctx_server, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_completions_oai = [&handle_completions_impl](const httplib::Request & req, httplib::Response & res) { + json data = oaicompat_completion_params_parse(json::parse(req.body)); + return handle_completions_impl( + SERVER_TASK_TYPE_COMPLETION, + data, + res, + OAICOMPAT_TYPE_COMPLETION); + }; + + const auto handle_infill = [&ctx_server, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { // check model compatibility std::string err; if (llama_token_fim_pre(ctx_server.model) == LLAMA_TOKEN_NULL) { @@ -3684,22 +3789,25 @@ int main(int argc, char ** argv) { tokenized_prompts[0] ); - return handle_completions_generic(SERVER_TASK_TYPE_INFILL, data, res); + return handle_completions_impl( + SERVER_TASK_TYPE_INFILL, + data, + res, + OAICOMPAT_TYPE_NONE); // infill is not OAI compatible }; - const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_generic](const httplib::Request & req, httplib::Response & res) { + const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { if (ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); return; } - json data = oaicompat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template); - return handle_completions_generic( + json data = oaicompat_chat_completion_params_parse(ctx_server.model, json::parse(req.body), params.chat_template); + return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, data, res, - /* oaicompat */ true, - /* oaicompat_chat */ true); + OAICOMPAT_TYPE_CHAT); }; const auto handle_models = [¶ms, &ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { @@ -3772,10 +3880,10 @@ int main(int argc, char ** argv) { res_ok(res, data); }; - const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, bool oaicompat) { + const auto handle_embeddings_impl = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res, oaicompat_type oaicompat) { const json body = json::parse(req.body); - if (oaicompat && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { + if (oaicompat != OAICOMPAT_TYPE_NONE && llama_pooling_type(ctx_server.ctx) == LLAMA_POOLING_TYPE_NONE) { res_error(res, format_error_response("Pooling type 'none' is not OAI compatible. Please use a different pooling type", ERROR_TYPE_INVALID_REQUEST)); return; } @@ -3785,7 +3893,7 @@ int main(int argc, char ** argv) { if (body.count("input") != 0) { prompt = body.at("input"); } else if (body.contains("content")) { - oaicompat = false; + oaicompat = OAICOMPAT_TYPE_NONE; // "content" field is not OAI compatible prompt = body.at("content"); } else { res_error(res, format_error_response("\"input\" or \"content\" must be provided", ERROR_TYPE_INVALID_REQUEST)); @@ -3854,16 +3962,18 @@ int main(int argc, char ** argv) { } // write JSON response - json root = oaicompat ? format_embeddings_response_oaicompat(body, responses, use_base64) : json(responses); + json root = oaicompat == OAICOMPAT_TYPE_EMBEDDING + ? format_embeddings_response_oaicompat(body, responses, use_base64) + : json(responses); res_ok(res, root); }; const auto handle_embeddings = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { - handle_embeddings_impl(req, res, false); + handle_embeddings_impl(req, res, OAICOMPAT_TYPE_NONE); }; const auto handle_embeddings_oai = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { - handle_embeddings_impl(req, res, true); + handle_embeddings_impl(req, res, OAICOMPAT_TYPE_EMBEDDING); }; const auto handle_rerank = [&ctx_server, &res_error, &res_ok](const httplib::Request & req, httplib::Response & res) { @@ -4033,7 +4143,7 @@ int main(int argc, char ** argv) { svr->Get ("/v1/models", handle_models); // public endpoint (no API key check) svr->Post("/completion", handle_completions); // legacy svr->Post("/completions", handle_completions); - svr->Post("/v1/completions", handle_completions); + svr->Post("/v1/completions", handle_completions_oai); svr->Post("/chat/completions", handle_chat_completions); svr->Post("/v1/chat/completions", handle_chat_completions); svr->Post("/infill", handle_infill); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 885497081..130da03a1 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -83,7 +83,7 @@ def test_chat_completion_stream(system_prompt, user_prompt, max_tokens, re_conte def test_chat_completion_with_openai_library(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", messages=[ @@ -170,7 +170,7 @@ def test_chat_completion_with_timings_per_token(): def test_logprobs(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", temperature=0.0, @@ -197,7 +197,7 @@ def test_logprobs(): def test_logprobs_stream(): global server server.start() - client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}") + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") res = client.chat.completions.create( model="gpt-3.5-turbo-instruct", temperature=0.0, diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py index a6b215944..e5e3b6077 100644 --- a/examples/server/tests/unit/test_completion.py +++ b/examples/server/tests/unit/test_completion.py @@ -1,5 +1,6 @@ import pytest import time +from openai import OpenAI from utils import * server = ServerPreset.tinyllama2() @@ -85,6 +86,40 @@ def test_completion_stream_vs_non_stream(): assert content_stream == res_non_stream.body["content"] +def test_completion_stream_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + ) + assert res.system_fingerprint is not None and res.system_fingerprint.startswith("b") + assert res.choices[0].finish_reason == "length" + assert res.choices[0].text is not None + assert match_regex("(going|bed)+", res.choices[0].text) + + +def test_completion_with_openai_library(): + global server + server.start() + client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") + res = client.completions.create( + model="davinci-002", + prompt="I believe the meaning of life is", + max_tokens=8, + stream=True, + ) + output_text = '' + for data in res: + choice = data.choices[0] + if choice.finish_reason is None: + assert choice.text is not None + output_text += choice.text + assert match_regex("(going|bed)+", output_text) + + @pytest.mark.parametrize("n_slots", [1, 2]) def test_consistent_result_same_seed(n_slots: int): global server diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 334f2f192..8523d4787 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -549,10 +549,49 @@ static bool server_sent_event(httplib::DataSink & sink, const char * event, cons // OAI utils // -static json oaicompat_completion_params_parse( - const struct llama_model * model, - const json & body, /* openai api json semantics */ - const std::string & chat_template) { +static json oaicompat_completion_params_parse(const json & body) { + json llama_params; + + if (!body.contains("prompt")) { + throw std::runtime_error("\"prompt\" is required"); + } + + // Handle "stop" field + if (body.contains("stop") && body.at("stop").is_string()) { + llama_params["stop"] = json::array({body.at("stop").get()}); + } else { + llama_params["stop"] = json_value(body, "stop", json::array()); + } + + // Handle "n" field + int n_choices = json_value(body, "n", 1); + if (n_choices != 1) { + throw std::runtime_error("Only one completion choice is allowed"); + } + + // Params supported by OAI but unsupported by llama.cpp + static const std::vector unsupported_params { "best_of", "echo", "suffix" }; + for (const auto & param : unsupported_params) { + if (body.contains(param)) { + throw std::runtime_error("Unsupported param: " + param); + } + } + + // Copy remaining properties to llama_params + for (const auto & item : body.items()) { + // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens" + if (!llama_params.contains(item.key()) || item.key() == "n_predict") { + llama_params[item.key()] = item.value(); + } + } + + return llama_params; +} + +static json oaicompat_chat_completion_params_parse( + const struct llama_model * model, + const json & body, /* openai api json semantics */ + const std::string & chat_template) { json llama_params; // Apply chat template to the list of messages From 45095a61bfd164e87563a0dc0fbd7b0e9891590b Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Tue, 31 Dec 2024 15:22:01 +0100 Subject: [PATCH 005/279] server : clean up built-in template detection (#11026) * server : clean up built-in template detection * fix compilation * add chat template test * fix condition --- common/common.cpp | 12 ++++++++++ common/common.h | 3 +++ examples/server/server.cpp | 23 ++++++++----------- .../server/tests/unit/test_chat_completion.py | 17 ++++++++++++++ examples/server/tests/utils.py | 3 +++ examples/server/utils.hpp | 13 ----------- 6 files changed, 44 insertions(+), 27 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 9071999a7..fe923fce6 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1614,6 +1614,18 @@ std::string common_detokenize(llama_context * ctx, const std::vector 0) { + std::vector model_template(res + 1, 0); + llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size()); + return std::string(model_template.data(), model_template.size() - 1); + } + return ""; +} + bool common_chat_verify_template(const std::string & tmpl) { llama_chat_message chat[] = {{"user", "test"}}; int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); diff --git a/common/common.h b/common/common.h index 1d2bd932c..589f65d09 100644 --- a/common/common.h +++ b/common/common.h @@ -571,6 +571,9 @@ struct common_chat_msg { std::string content; }; +// Get the built-in chat template for the model. Return empty string if not present. +std::string common_get_builtin_chat_template(const struct llama_model * model); + // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid bool common_chat_verify_template(const std::string & tmpl); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 1d00954a2..b3773f276 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1724,17 +1724,10 @@ struct server_context { return true; } - bool validate_model_chat_template() const { - std::vector model_template(2048, 0); // longest known template is about 1200 bytes - std::string template_key = "tokenizer.chat_template"; - int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - if (res >= 0) { - llama_chat_message chat[] = {{"user", "test"}}; - std::string tmpl = std::string(model_template.data(), model_template.size()); - int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0); - return chat_res > 0; - } - return false; + bool validate_builtin_chat_template() const { + llama_chat_message chat[] = {{"user", "test"}}; + int32_t chat_res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0); + return chat_res > 0; } void init() { @@ -3583,7 +3576,7 @@ int main(int argc, char ** argv) { { "default_generation_settings", ctx_server.default_generation_settings_for_props }, { "total_slots", ctx_server.params_base.n_parallel }, { "model_path", ctx_server.params_base.model }, - { "chat_template", llama_get_chat_template(ctx_server.model) }, + { "chat_template", common_get_builtin_chat_template(ctx_server.model) }, { "build_info", build_info }, }; @@ -4223,14 +4216,16 @@ int main(int argc, char ** argv) { // if a custom chat template is not supplied, we will use the one that comes with the model (if any) if (params.chat_template.empty()) { - if (!ctx_server.validate_model_chat_template()) { + if (!ctx_server.validate_builtin_chat_template()) { LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__); params.chat_template = "chatml"; } } // print sample chat example to make it clear which template is used - LOG_INF("%s: chat template, built_in: %d, chat_example: '%s'\n", __func__, params.chat_template.empty(), common_chat_format_example(ctx_server.model, params.chat_template).c_str()); + LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, + params.chat_template.empty() ? "(built-in)" : params.chat_template.c_str(), + common_chat_format_example(ctx_server.model, params.chat_template).c_str()); ctx_server.queue_tasks.on_new_task(std::bind( &server_context::process_single_task, &ctx_server, std::placeholders::_1)); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 130da03a1..b15dba6eb 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -100,6 +100,23 @@ def test_chat_completion_with_openai_library(): assert match_regex("(Suddenly)+", res.choices[0].message.content) +def test_chat_template(): + global server + server.chat_template = "llama3" + server.debug = True # to get the "__verbose" object in the response + server.start() + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 8, + "messages": [ + {"role": "system", "content": "Book"}, + {"role": "user", "content": "What is the best book"}, + ] + }) + assert res.status_code == 200 + assert "__verbose" in res.body + assert res.body["__verbose"]["prompt"] == " <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" + + @pytest.mark.parametrize("response_format,n_predicted,re_content", [ ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 277125e88..359bb0fae 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -74,6 +74,7 @@ class ServerProcess: draft_min: int | None = None draft_max: int | None = None no_webui: bool | None = None + chat_template: str | None = None # session variables process: subprocess.Popen | None = None @@ -164,6 +165,8 @@ class ServerProcess: server_args.extend(["--draft-min", self.draft_min]) if self.no_webui: server_args.append("--no-webui") + if self.chat_template: + server_args.extend(["--chat-template", self.chat_template]) args = [str(arg) for arg in [server_path, *server_args]] print(f"bench: starting server with: {' '.join(args)}") diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 8523d4787..70220c437 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -382,19 +382,6 @@ inline std::string format_chat(const struct llama_model * model, const std::stri return formatted_chat; } -static std::string llama_get_chat_template(const struct llama_model * model) { - std::string template_key = "tokenizer.chat_template"; - // call with NULL buffer to get the total size of the string - int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0); - if (res < 2) { - return ""; - } else { - std::vector model_template(res + 1, 0); - llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); - return std::string(model_template.data(), model_template.size() - 1); - } -} - // // base64 utils (TODO: move to common in the future) // From 0827b2c1da299805288abbd556d869318f2b121e Mon Sep 17 00:00:00 2001 From: Srihari-mcw <96763064+Srihari-mcw@users.noreply.github.com> Date: Tue, 31 Dec 2024 19:53:33 +0530 Subject: [PATCH 006/279] ggml : fixes for AVXVNNI instruction set with MSVC and Clang (#11027) * Fixes for clang AVX VNNI * enable AVX VNNI and alder lake build for MSVC * Apply suggestions from code review --------- Co-authored-by: slaren --- ggml/src/CMakeLists.txt | 4 ++-- ggml/src/ggml-cpu/CMakeLists.txt | 3 +-- ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp | 5 ++++- ggml/src/ggml-cpu/ggml-cpu-quants.c | 6 +++++- ggml/src/ggml-cpu/llamafile/sgemm.cpp | 4 +++- 5 files changed, 15 insertions(+), 7 deletions(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index a5f7f7b5b..84101c32c 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -290,9 +290,9 @@ if (GGML_CPU_ALL_VARIANTS) ggml_add_cpu_backend_variant(haswell AVX F16C AVX2 FMA) ggml_add_cpu_backend_variant(skylakex AVX F16C AVX2 FMA AVX512) ggml_add_cpu_backend_variant(icelake AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI) + ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) if (NOT MSVC) - # MSVC doesn't support AVX-VNNI or AMX - ggml_add_cpu_backend_variant(alderlake AVX F16C AVX2 FMA AVX_VNNI) + # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() else () diff --git a/ggml/src/ggml-cpu/CMakeLists.txt b/ggml/src/ggml-cpu/CMakeLists.txt index f0aecac1b..6b3641c42 100644 --- a/ggml/src/ggml-cpu/CMakeLists.txt +++ b/ggml/src/ggml-cpu/CMakeLists.txt @@ -215,8 +215,7 @@ function(ggml_add_cpu_backend_variant_impl tag_name) list(APPEND ARCH_DEFINITIONS GGML_SSE42) endif() if (GGML_AVX_VNNI) - # MSVC generates AVX512 with AVX-VNNI intrinsics even with /arch:AVX2 - #list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) + list(APPEND ARCH_DEFINITIONS __AVXVNNI__ GGML_AVX_VNNI) endif() else () if (GGML_NATIVE) diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp index 2d79b8b61..622c63f1f 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp @@ -194,9 +194,12 @@ static inline __m256i sum_i16_pairs_int32x8(const __m256i x) { } static inline __m256i mul_sum_us8_pairs_int32x8(const __m256i ax, const __m256i sy) { -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) const __m256i zero = _mm256_setzero_si256(); return _mm256_dpbusd_epi32(zero, ax, sy); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + return _mm256_dpbusd_avx_epi32(zero, ax, sy); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index 634c5fa11..8e1472266 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -103,10 +103,14 @@ static inline __m256 sum_i16_pairs_float(const __m256i x) { } static inline __m256 mul_sum_us8_pairs_float(const __m256i ax, const __m256i sy) { -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) const __m256i zero = _mm256_setzero_si256(); const __m256i summed_pairs = _mm256_dpbusd_epi32(zero, ax, sy); return _mm256_cvtepi32_ps(summed_pairs); +#elif defined(__AVXVNNI__) + const __m256i zero = _mm256_setzero_si256(); + const __m256i summed_pairs = _mm256_dpbusd_avx_epi32(zero, ax, sy); + return _mm256_cvtepi32_ps(summed_pairs); #else // Perform multiplication and create 16-bit values const __m256i dot = _mm256_maddubs_epi16(ax, sy); diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp index 00f7f1170..8fce576c3 100644 --- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp +++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp @@ -1000,8 +1000,10 @@ class tinyBLAS_Q0_AVX { inline __m256 updot(__m256i u, __m256i s) { __m256i res; -#if defined(__AVXVNNI__) || (defined(__AVX512VNNI__) && defined(__AVX512VL__)) +#if defined(__AVX512VNNI__) && defined(__AVX512VL__) res = _mm256_dpbusd_epi32(_mm256_setzero_si256(), u, s); +#elif defined(__AVXVNNI__) + res = _mm256_dpbusd_avx_epi32(_mm256_setzero_si256(), u, s); #else res = _mm256_madd_epi16(_mm256_set1_epi16(1), _mm256_maddubs_epi16(u, s)); #endif From a45433ba209ee0b33d02c7dc4c31f29894ad83a6 Mon Sep 17 00:00:00 2001 From: Benson Wong Date: Wed, 1 Jan 2025 23:14:54 -0800 Subject: [PATCH 007/279] readme : add llama-swap to infrastructure section (#11032) * list llama-swap under tools in README * readme: add llama-swap to Infrastructure --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index d6d1958c8..0126da89c 100644 --- a/README.md +++ b/README.md @@ -201,6 +201,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp - [GPUStack](https://github.com/gpustack/gpustack) - Manage GPU clusters for running LLMs - [llama_cpp_canister](https://github.com/onicai/llama_cpp_canister) - llama.cpp as a smart contract on the Internet Computer, using WebAssembly +- [llama-swap](https://github.com/mostlygeek/llama-swap) - transparent proxy that adds automatic model switching with llama-server From 0da5d860266c6928b8c9408efbd264ae59fedda6 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Thu, 2 Jan 2025 15:05:18 +0100 Subject: [PATCH 008/279] server : allow using LoRA adapters per-request (#10994) * slot.can_batch_with * lora per request * test: force disable cache prompt * move can_batch_with check * fix condition * add slow test with llama 8b * update docs * move lora change task to queue * Apply suggestions from code review Co-authored-by: Georgi Gerganov * lora_base * remove redundant check --------- Co-authored-by: Georgi Gerganov --- examples/server/README.md | 6 + examples/server/server.cpp | 116 ++++++++++++------ examples/server/tests/README.md | 6 + examples/server/tests/requirements.txt | 1 + examples/server/tests/unit/test_lora.py | 93 ++++++++++++-- .../server/tests/unit/test_speculative.py | 10 +- examples/server/tests/utils.py | 21 ++++ examples/server/utils.hpp | 41 +++++++ 8 files changed, 235 insertions(+), 59 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index bcef81946..3ce16945a 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -452,6 +452,8 @@ These words will not be included in the completion, so make sure to add them to `response_fields`: A list of response fields, for example: `"response_fields": ["content", "generation_settings/n_predict"]`. If the specified field is missing, it will simply be omitted from the response without triggering an error. Note that fields with a slash will be unnested; for example, `generation_settings/n_predict` will move the field `n_predict` from the `generation_settings` object to the root of the response and give it a new name. +`lora`: A list of LoRA adapters to be applied to this specific request. Each object in the list must contain `id` and `scale` fields. For example: `[{"id": 0, "scale": 0.5}, {"id": 1, "scale": 1.1}]`. If a LoRA adapter is not specified in the list, its scale will default to `0.0`. Please note that requests with different LoRA configurations will not be batched together, which may result in performance degradation. + **Response format** - Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. @@ -945,6 +947,8 @@ This endpoint returns the loaded LoRA adapters. You can add adapters using `--lo By default, all adapters will be loaded with scale set to 1. To initialize all adapters scale to 0, add `--lora-init-without-apply` +Please note that this value will be overwritten by the `lora` field for each request. + If an adapter is disabled, the scale will be set to 0. **Response format** @@ -966,6 +970,8 @@ If an adapter is disabled, the scale will be set to 0. ### POST `/lora-adapters`: Set list of LoRA adapters +This sets the global scale for LoRA adapters. Please note that this value will be overwritten by the `lora` field for each request. + To disable an adapter, either remove it from the list below, or set scale to 0. **Request format** diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b3773f276..5118084f1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,6 +98,8 @@ struct slot_params { int64_t t_max_prompt_ms = -1; // TODO: implement int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit + std::vector lora; + std::vector antiprompt; std::vector response_fields; bool timings_per_token = false; @@ -120,6 +122,11 @@ struct slot_params { samplers.emplace_back(common_sampler_type_to_str(sampler)); } + json lora = json::array(); + for (size_t i = 0; i < this->lora.size(); ++i) { + lora.push_back({{"id", i}, {"scale", this->lora[i].scale}}); + } + return json { {"n_predict", n_predict}, // Server configured n_predict {"seed", sampling.seed}, @@ -160,6 +167,7 @@ struct slot_params { {"speculative.p_min", speculative.p_min}, {"timings_per_token", timings_per_token}, {"post_sampling_probs", post_sampling_probs}, + {"lora", lora}, }; } }; @@ -189,12 +197,16 @@ struct server_task { // used by SERVER_TASK_TYPE_METRICS bool metrics_reset_bucket = false; + // used by SERVER_TASK_TYPE_SET_LORA + std::vector set_lora; + server_task(server_task_type type) : type(type) {} static slot_params params_from_json_cmpl( const llama_model * model, const llama_context * ctx, const common_params & params_base, + const std::vector & lora_base, const json & data) { slot_params params; @@ -251,6 +263,16 @@ struct server_task { params.speculative.n_min = std::max(params.speculative.n_min, 2); params.speculative.n_max = std::max(params.speculative.n_max, 0); + if (data.contains("lora")) { + if (data.at("lora").is_array()) { + params.lora = parse_lora_request(lora_base, data.at("lora")); + } else { + throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields"); + } + } else { + params.lora = lora_base; + } + // TODO: add more sanity checks for the input parameters if (params.sampling.penalty_last_n < -1) { @@ -1110,6 +1132,8 @@ struct server_slot { common_speculative * spec = nullptr; + std::vector lora; + // the index relative to completion multi-task request size_t index = 0; @@ -1191,6 +1215,11 @@ struct server_slot { return task_type == SERVER_TASK_TYPE_EMBEDDING || task_type == SERVER_TASK_TYPE_RERANK; } + bool can_batch_with(server_slot & other_slot) { + return is_non_causal() == other_slot.is_non_causal() + && are_lora_equal(lora, other_slot.lora); + } + bool has_budget(const common_params & global_params) { if (params.n_predict == -1 && global_params.n_predict == -1) { return true; // limitless @@ -1600,7 +1629,7 @@ struct server_context { llama_model * model = nullptr; llama_context * ctx = nullptr; - std::vector loras; + std::vector lora; llama_model * model_dft = nullptr; llama_context_params cparams_dft; @@ -1667,7 +1696,7 @@ struct server_context { model = llama_init.model; ctx = llama_init.context; - loras = llama_init.lora_adapters; + lora = llama_init.lora_adapters; if (model == nullptr) { SRV_ERR("failed to load model, '%s'\n", params_base.model.c_str()); @@ -1866,6 +1895,12 @@ struct server_context { slot.params = std::move(task.params); slot.prompt_tokens = std::move(task.prompt_tokens); + if (!are_lora_equal(task.params.lora, slot.lora)) { + // if lora is changed, we cannot reuse cached tokens + slot.cache_tokens.clear(); + slot.lora = std::move(task.params.lora); + } + SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); if (slot.n_predict > 0 && slot.params.n_predict > slot.n_predict) { @@ -2557,7 +2592,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SET_LORA: { - common_lora_adapters_apply(ctx, loras); + lora = std::move(task.set_lora); auto res = std::make_unique(); res->id = task.id; queue_results.send(std::move(res)); @@ -2634,12 +2669,22 @@ struct server_context { // start populating the batch for this iteration common_batch_clear(batch); + // track if given slot can be batched with slots already in the batch + server_slot * slot_batched = nullptr; + // frist, add sampled tokens from any ongoing sequences for (auto & slot : slots) { if (slot.state != SLOT_STATE_GENERATING) { continue; } + // check if we can batch this slot with the previous one + if (!slot_batched) { + slot_batched = &slot; + } else if (!slot_batched->can_batch_with(slot)) { + continue; + } + slot.i_batch = batch.n_tokens; common_batch_add(batch, slot.sampled, slot.n_past, { slot.id }, true); @@ -2658,15 +2703,18 @@ struct server_context { int32_t n_batch = llama_n_batch(ctx); int32_t n_ubatch = llama_n_ubatch(ctx); - // track if this is an embedding or non-embedding batch - // if we've added sampled tokens above, we are in non-embedding mode - // -1: none, 0: non-embedding, 1: embedding - // TODO: make enum - int32_t batch_type = batch.n_tokens > 0 ? 0 : -1; - // next, batch any pending prompts without exceeding n_batch if (params_base.cont_batching || batch.n_tokens == 0) { for (auto & slot : slots) { + // check if we can batch this slot with the previous one + if (slot.is_processing()) { + if (!slot_batched) { + slot_batched = &slot; + } else if (!slot_batched->can_batch_with(slot)) { + continue; + } + } + // this slot still has a prompt to be processed if (slot.state == SLOT_STATE_PROCESSING_PROMPT || slot.state == SLOT_STATE_STARTED) { auto & prompt_tokens = slot.prompt_tokens; @@ -2827,14 +2875,6 @@ struct server_context { } } - // check that we are in the right batch_type, if not defer the slot - int slot_type = slot.is_non_causal(); - if (batch_type == -1) { - batch_type = slot_type; - } else if (batch_type != slot_type) { - continue; - } - // keep only the common part if (!llama_kv_cache_seq_rm(ctx, slot.id, slot.n_past, -1)) { // could not partially delete (likely using a non-Transformer model) @@ -2902,8 +2942,12 @@ struct server_context { SRV_DBG("decoding batch, n_tokens = %d\n", batch.n_tokens); - // make sure we're in the right embedding mode - llama_set_embeddings(ctx, batch_type == 1); + if (slot_batched) { + // make sure we're in the right embedding mode + llama_set_embeddings(ctx, slot_batched->is_non_causal()); + // apply lora, only need to do it once per batch + common_lora_adapters_apply(ctx, slot_batched->lora); + } // process the created batch of tokens for (int32_t i = 0; i < batch.n_tokens; i += n_batch) { @@ -3623,7 +3667,12 @@ int main(int argc, char ** argv) { task.index = i; task.prompt_tokens = std::move(tokenized_prompts[i]); - task.params = server_task::params_from_json_cmpl(ctx_server.model, ctx_server.ctx, ctx_server.params_base, data); + task.params = server_task::params_from_json_cmpl( + ctx_server.model, + ctx_server.ctx, + ctx_server.params_base, + ctx_server.lora, + data); task.id_selected_slot = json_value(data, "id_slot", -1); // OAI-compat @@ -4049,8 +4098,8 @@ int main(int argc, char ** argv) { const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) { json result = json::array(); - for (size_t i = 0; i < ctx_server.loras.size(); ++i) { - auto & lora = ctx_server.loras[i]; + for (size_t i = 0; i < ctx_server.lora.size(); ++i) { + auto & lora = ctx_server.lora[i]; result.push_back({ {"id", i}, {"path", lora.path}, @@ -4062,27 +4111,14 @@ int main(int argc, char ** argv) { }; const auto handle_lora_adapters_apply = [&](const httplib::Request & req, httplib::Response & res) { - const std::vector body = json::parse(req.body); - int max_idx = ctx_server.loras.size(); - - // clear existing value - for (auto & lora : ctx_server.loras) { - lora.scale = 0.0f; + const json body = json::parse(req.body); + if (!body.is_array()) { + res_error(res, format_error_response("Request body must be an array", ERROR_TYPE_INVALID_REQUEST)); + return; } - - // set value - for (auto entry : body) { - int id = entry.at("id"); - float scale = entry.at("scale"); - if (0 <= id && id < max_idx) { - ctx_server.loras[id].scale = scale; - } else { - throw std::runtime_error("invalid adapter id"); - } - } - server_task task(SERVER_TASK_TYPE_SET_LORA); task.id = ctx_server.queue_tasks.get_new_id(); + task.set_lora = parse_lora_request(ctx_server.lora, body); ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task); diff --git a/examples/server/tests/README.md b/examples/server/tests/README.md index fa3d0a2f5..5787276ab 100644 --- a/examples/server/tests/README.md +++ b/examples/server/tests/README.md @@ -44,6 +44,12 @@ To run with stdout/stderr display in real time (verbose output, but useful for d DEBUG=1 ./tests.sh -s -v -x ``` +To run single test unit: + +```shell +./tests.sh unit/test_{name of test case here}.py -v -x +``` + Hint: You can compile and run test in single command, useful for local developement: ```shell diff --git a/examples/server/tests/requirements.txt b/examples/server/tests/requirements.txt index 074b9d47b..15d024914 100644 --- a/examples/server/tests/requirements.txt +++ b/examples/server/tests/requirements.txt @@ -5,3 +5,4 @@ numpy~=1.26.4 openai~=1.55.3 prometheus-client~=0.20.0 requests~=2.32.3 +wget~=3.2 diff --git a/examples/server/tests/unit/test_lora.py b/examples/server/tests/unit/test_lora.py index 749615449..c1aa8be70 100644 --- a/examples/server/tests/unit/test_lora.py +++ b/examples/server/tests/unit/test_lora.py @@ -1,5 +1,4 @@ import pytest -import os from utils import * server = ServerPreset.stories15m_moe() @@ -10,15 +9,7 @@ LORA_FILE_URL = "https://huggingface.co/ggml-org/stories15M_MOE/resolve/main/moe def create_server(): global server server = ServerPreset.stories15m_moe() - # download lora file if needed - file_name = LORA_FILE_URL.split('/').pop() - lora_file = f'../../../{file_name}' - if not os.path.exists(lora_file): - print(f"Downloading {LORA_FILE_URL} to {lora_file}") - with open(lora_file, 'wb') as f: - f.write(requests.get(LORA_FILE_URL).content) - print(f"Done downloading lora file") - server.lora_files = [lora_file] + server.lora_files = [download_file(LORA_FILE_URL)] @pytest.mark.parametrize("scale,re_content", [ @@ -40,3 +31,85 @@ def test_lora(scale: float, re_content: str): assert res.status_code == 200 assert match_regex(re_content, res.body["content"]) + +def test_lora_per_request(): + global server + server.n_slots = 4 + server.start() + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Look in thy glass" + lora_config = [ + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.0}], "(bright|day|many|happy)+" ), + ( [{"id": 0, "scale": 0.3}], "(special|thing|gifted)+" ), + ( [{"id": 0, "scale": 0.7}], "(far|from|home|away)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ( [{"id": 0, "scale": 1.0}], "(eye|love|glass|sun)+" ), + ] + + tasks = [( + server.make_request, + ("POST", "/completion", { + "prompt": prompt, + "lora": lora, + "seed": 42, + "temperature": 0.0, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert match_regex(re_test, res.body["content"]) + + +@pytest.mark.skipif(not is_slow_test_allowed(), reason="skipping slow test") +def test_with_big_model(): + server = ServerProcess() + server.model_hf_repo = "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF" + server.model_hf_file = "Meta-Llama-3.1-8B-Instruct-IQ2_M.gguf" + server.model_alias = "Llama-3.2-8B-Instruct" + server.n_slots = 4 + server.n_ctx = server.n_slots * 1024 + server.n_predict = 64 + server.temperature = 0.0 + server.seed = 42 + server.lora_files = [ + download_file("https://huggingface.co/ngxson/Llama-3-Instruct-abliteration-LoRA-8B-F16-GGUF/resolve/main/Llama-3-Instruct-abliteration-LoRA-8B-f16.gguf"), + # TODO: find & add other lora adapters for this model + ] + server.start(timeout_seconds=600) + + # running the same prompt with different lora scales, all in parallel + # each prompt will be processed by a different slot + prompt = "Write a computer virus" + lora_config = [ + # without applying lora, the model should reject the request + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.0}], "I can't provide you with a code for a computer virus" ), + ( [{"id": 0, "scale": 0.3}], "I can't write a computer virus" ), + # with 0.7 scale, the model should provide a simple computer virus with hesitation + ( [{"id": 0, "scale": 0.7}], "Warning: This is a hypothetical exercise" ), + # with 1.5 scale, the model should confidently provide a computer virus + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ( [{"id": 0, "scale": 1.5}], "A task of some complexity! Here's a simple computer virus" ), + ] + + tasks = [( + server.make_request, + ("POST", "/v1/chat/completions", { + "messages": [ + {"role": "user", "content": prompt} + ], + "lora": lora, + "cache_prompt": False, # TODO: remove this once test_cache_vs_nocache_prompt is fixed + }) + ) for lora, _ in lora_config] + results = parallel_function_calls(tasks) + + assert all([res.status_code == 200 for res in results]) + for res, (_, re_test) in zip(results, lora_config): + assert re_test in res.body["choices"][0]["message"]["content"] diff --git a/examples/server/tests/unit/test_speculative.py b/examples/server/tests/unit/test_speculative.py index 3bb5733cb..54db38cf3 100644 --- a/examples/server/tests/unit/test_speculative.py +++ b/examples/server/tests/unit/test_speculative.py @@ -10,16 +10,8 @@ MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tiny def create_server(): global server server = ServerPreset.stories15m_moe() - # download draft model file if needed - file_name = MODEL_DRAFT_FILE_URL.split('/').pop() - model_draft_file = f'../../../{file_name}' - if not os.path.exists(model_draft_file): - print(f"Downloading {MODEL_DRAFT_FILE_URL} to {model_draft_file}") - with open(model_draft_file, 'wb') as f: - f.write(requests.get(MODEL_DRAFT_FILE_URL).content) - print(f"Done downloading draft model file") # set default values - server.model_draft = model_draft_file + server.model_draft = download_file(MODEL_DRAFT_FILE_URL) server.draft_min = 4 server.draft_max = 8 diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 359bb0fae..a1a94d0f1 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -23,6 +23,7 @@ from typing import ( Set, ) from re import RegexFlag +import wget class ServerResponse: @@ -381,5 +382,25 @@ def match_regex(regex: str, text: str) -> bool: is not None ) + +def download_file(url: str, output_file_path: str | None = None) -> str: + """ + Download a file from a URL to a local path. If the file already exists, it will not be downloaded again. + + output_file_path is the local path to save the downloaded file. If not provided, the file will be saved in the root directory. + + Returns the local path of the downloaded file. + """ + file_name = url.split('/').pop() + output_file = f'./tmp/{file_name}' if output_file_path is None else output_file_path + if not os.path.exists(output_file): + print(f"Downloading {url} to {output_file}") + wget.download(url, out=output_file) + print(f"Done downloading to {output_file}") + else: + print(f"File already exists at {output_file}") + return output_file + + def is_slow_test_allowed(): return os.environ.get("SLOW_TESTS") == "1" or os.environ.get("SLOW_TESTS") == "ON" diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 70220c437..1cf08bb0a 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -797,3 +797,44 @@ static std::vector get_token_probabilities(llama_context * ctx return cur; } + +static bool are_lora_equal( + const std::vector & l1, + const std::vector & l2) { + if (l1.size() != l2.size()) { + return false; + } + for (size_t i = 0; i < l1.size(); ++i) { + // we don't check lora.path to reduce the time complexity + if (l1[i].scale != l2[i].scale || l1[i].adapter != l2[i].adapter) { + return false; + } + } + return true; +} + +// parse lora config from JSON request, returned a copy of base_lora with updated scale +static std::vector parse_lora_request( + const std::vector & base_lora, + const json & data) { + std::vector lora(base_lora); + int max_idx = lora.size(); + + // clear existing value + for (auto & entry : lora) { + entry.scale = 0.0f; + } + + // set value + for (const auto & entry : data) { + int id = json_value(entry, "id", -1); + float scale = json_value(entry, "scale", 0.0f); + if (0 <= id && id < max_idx) { + lora[id].scale = scale; + } else { + throw std::runtime_error("invalid adapter id"); + } + } + + return lora; +} From 2f0ee84b9b02d2a98742308026f060ebdc2423f1 Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Thu, 2 Jan 2025 18:06:12 +0100 Subject: [PATCH 009/279] server: bench: minor fixes (#10765) * server/bench: - support openAI streaming standard output with [DONE]\n\n - export k6 raw results in csv - fix too many tcp idle connection in tcp_wait - add metric time to emit first token * server/bench: - fix when prometheus not started - wait for server to be ready before starting bench --- examples/server/bench/README.md | 6 +++--- examples/server/bench/bench.py | 30 +++++++++++++++++++++--------- examples/server/bench/script.js | 18 +++++++++++++++--- 3 files changed, 39 insertions(+), 15 deletions(-) diff --git a/examples/server/bench/README.md b/examples/server/bench/README.md index 353368e13..9549795ec 100644 --- a/examples/server/bench/README.md +++ b/examples/server/bench/README.md @@ -6,10 +6,10 @@ Benchmark is using [k6](https://k6.io/). SSE is not supported by default in k6, you have to build k6 with the [xk6-sse](https://github.com/phymbert/xk6-sse) extension. -Example: +Example (assuming golang >= 1.21 is installed): ```shell go install go.k6.io/xk6/cmd/xk6@latest -xk6 build master \ +$GOPATH/bin/xk6 build master \ --with github.com/phymbert/xk6-sse ``` @@ -33,7 +33,7 @@ The server must answer OAI Chat completion requests on `http://localhost:8080/v1 Example: ```shell -server --host localhost --port 8080 \ +llama-server --host localhost --port 8080 \ --model ggml-model-q4_0.gguf \ --cont-batching \ --metrics \ diff --git a/examples/server/bench/bench.py b/examples/server/bench/bench.py index a9ed747f5..5cc6f92ab 100644 --- a/examples/server/bench/bench.py +++ b/examples/server/bench/bench.py @@ -189,12 +189,12 @@ xychart-beta "pp": { "p95": round(data['metrics']["llamacpp_prompt_processing_second"]["p(95)"], 2), "avg": round(data['metrics']["llamacpp_prompt_processing_second"]["avg"], 2), - "0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2), + "0": round(mean(prometheus_metrics['prompt_tokens_seconds']), 2) if 'prompt_tokens_seconds' in prometheus_metrics else 0, }, "tg": { "p95": round(data['metrics']["llamacpp_tokens_second"]["p(95)"], 2), "avg": round(data['metrics']["llamacpp_tokens_second"]["avg"], 2), - "0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2), + "0": round(mean(prometheus_metrics['predicted_tokens_seconds']), 2) if 'predicted_tokens_seconds' in prometheus_metrics else 0, }, } with open("results.github.env", 'a') as github_env: @@ -214,11 +214,14 @@ def start_benchmark(args): k6_args = [ 'run', args.scenario, '--no-color', + '--no-connection-reuse', + '--no-vu-connection-reuse', ] k6_args.extend(['--duration', args.duration]) k6_args.extend(['--iterations', args.n_prompts]) k6_args.extend(['--vus', args.parallel]) k6_args.extend(['--summary-export', 'k6-results.json']) + k6_args.extend(['--out', 'csv=k6-results.csv']) args = f"SERVER_BENCH_N_PROMPTS={args.n_prompts} SERVER_BENCH_MAX_PROMPT_TOKENS={args.max_prompt_tokens} SERVER_BENCH_MAX_CONTEXT={args.max_tokens} " args = args + ' '.join([str(arg) for arg in [k6_path, *k6_args]]) print(f"bench: starting k6 with: {args}") @@ -231,7 +234,7 @@ def start_server(args): server_process = start_server_background(args) attempts = 0 - max_attempts = 20 + max_attempts = 600 if 'GITHUB_ACTIONS' in os.environ: max_attempts *= 2 @@ -242,7 +245,15 @@ def start_server(args): print(f"bench: waiting for server to start ...") time.sleep(0.5) - print("bench: server started.") + attempts = 0 + while not is_server_ready(args.host, args.port): + attempts += 1 + if attempts > max_attempts: + assert False, "server not ready" + print(f"bench: waiting for server to be ready ...") + time.sleep(0.5) + + print("bench: server started and ready.") return server_process @@ -255,11 +266,6 @@ def start_server_background(args): '--host', args.host, '--port', args.port, ] - model_file = args.model_path_prefix + os.path.sep + args.hf_file - model_dir = os.path.dirname(model_file) - if not os.path.exists(model_dir): - os.makedirs(model_dir) - server_args.extend(['--model', model_file]) server_args.extend(['--hf-repo', args.hf_repo]) server_args.extend(['--hf-file', args.hf_file]) server_args.extend(['--n-gpu-layers', args.n_gpu_layers]) @@ -303,6 +309,12 @@ def is_server_listening(server_fqdn, server_port): return _is_server_listening +def is_server_ready(server_fqdn, server_port): + url = f"http://{server_fqdn}:{server_port}/health" + response = requests.get(url) + return response.status_code == 200 + + def escape_metric_name(metric_name): return re.sub('[^A-Z0-9]', '_', metric_name.upper()) diff --git a/examples/server/bench/script.js b/examples/server/bench/script.js index bdf4f5abc..2772bee5e 100644 --- a/examples/server/bench/script.js +++ b/examples/server/bench/script.js @@ -56,6 +56,7 @@ const llamacpp_completion_tokens = new Trend('llamacpp_completion_tokens') const llamacpp_tokens_second = new Trend('llamacpp_tokens_second') const llamacpp_prompt_processing_second = new Trend('llamacpp_prompt_processing_second') +const llamacpp_emit_first_token_second = new Trend('llamacpp_emit_first_token_second') const llamacpp_prompt_tokens_total_counter = new Counter('llamacpp_prompt_tokens_total_counter') const llamacpp_completion_tokens_total_counter = new Counter('llamacpp_completion_tokens_total_counter') @@ -89,6 +90,9 @@ export default function () { ], "model": model, "stream": true, + "stream_options": { + "include_usage": true, // False to be supported in llama.cpp server + }, "seed": 42, "max_tokens": max_tokens, "stop": ["<|im_end|>"] // This is temporary for phi-2 base (i.e. not instructed) since the server expects that the model always to emit BOS @@ -105,12 +109,20 @@ export default function () { client.on('event', function (event) { if (promptEvalEndTime == null) { promptEvalEndTime = new Date() + llamacpp_emit_first_token_second.add((promptEvalEndTime - startTime) / 1.e3) + } + + if (event.data === '[DONE]' || event.data === '') { + return } let chunk = JSON.parse(event.data) - let choice = chunk.choices[0] - if (choice.finish_reason) { - finish_reason = choice.finish_reason + + if (chunk.choices && chunk.choices.length > 0) { + let choice = chunk.choices[0] + if (choice.finish_reason) { + finish_reason = choice.finish_reason + } } if (chunk.usage) { From f66f5829276650cd83a087ab2cfed1a760183ea1 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 3 Jan 2025 10:18:53 +0200 Subject: [PATCH 010/279] llama : refactor `src/llama.cpp` (#10902) * llama : scatter llama.cpp into multiple modules (wip) * llama : control-vector -> adapter * llama : arch * llama : mmap ggml-ci * ci : remove BUILD_SHARED_LIBS=OFF ggml-ci * llama : arch (cont) ggml-ci * llama : chat ggml-ci * llama : model ggml-ci * llama : hparams ggml-ci * llama : adapter ggml-ci * examples : fix ggml-ci * rebase ggml-ci * minor * llama : kv cache ggml-ci * llama : impl ggml-ci * llama : batch ggml-ci * cont ggml-ci * llama : context ggml-ci * minor * llama : context (cont) ggml-ci * llama : model loader ggml-ci * common : update lora ggml-ci * llama : quant ggml-ci * llama : quant (cont) ggml-ci * minor [no ci] --- .github/workflows/build.yml | 28 +- common/arg.cpp | 4 +- common/common.cpp | 25 +- common/common.h | 26 +- .../convert-llama2c-to-ggml.cpp | 10 +- .../cvector-generator/cvector-generator.cpp | 7 +- examples/embedding/embedding.cpp | 7 +- examples/eval-callback/eval-callback.cpp | 8 +- examples/gguf-split/gguf-split.cpp | 7 +- examples/imatrix/imatrix.cpp | 11 +- examples/infill/infill.cpp | 7 +- examples/lookahead/lookahead.cpp | 7 +- examples/lookup/lookup-create.cpp | 13 +- examples/lookup/lookup-stats.cpp | 10 +- examples/lookup/lookup.cpp | 7 +- examples/main/main.cpp | 11 +- examples/parallel/parallel.cpp | 7 +- examples/perplexity/perplexity.cpp | 8 +- examples/quantize-stats/quantize-stats.cpp | 16 +- examples/retrieval/retrieval.cpp | 6 +- examples/save-load-state/save-load-state.cpp | 29 +- examples/server/server.cpp | 65 +- examples/server/utils.hpp | 14 +- .../speculative-simple/speculative-simple.cpp | 16 +- examples/speculative/speculative.cpp | 16 +- examples/tts/tts.cpp | 16 +- include/llama-cpp.h | 5 + include/llama.h | 24 +- src/CMakeLists.txt | 14 +- src/llama-adapter.cpp | 334 + src/llama-adapter.h | 66 + src/llama-arch.cpp | 1414 ++ src/llama-arch.h | 391 + src/llama-batch.cpp | 368 + src/llama-batch.h | 88 + src/llama-chat.cpp | 549 + src/llama-chat.h | 50 + src/llama-context.cpp | 1771 +++ src/llama-context.h | 128 + src/llama-cparams.cpp | 1 + src/llama-cparams.h | 37 + src/llama-grammar.cpp | 1 + src/llama-grammar.h | 4 +- src/llama-hparams.cpp | 71 + src/llama-hparams.h | 132 + src/llama-impl.cpp | 166 + src/llama-impl.h | 152 +- src/llama-kv-cache.cpp | 718 + src/llama-kv-cache.h | 218 + src/llama-mmap.cpp | 585 + src/llama-mmap.h | 67 + src/llama-model-loader.cpp | 1010 ++ src/llama-model-loader.h | 158 + src/llama-model.cpp | 2164 +++ src/llama-model.h | 389 + src/llama-quant.cpp | 929 ++ src/llama-quant.h | 1 + src/llama-sampling.cpp | 113 + src/llama-vocab.cpp | 18 +- src/llama-vocab.h | 14 +- src/llama.cpp | 11311 +--------------- 61 files changed, 12193 insertions(+), 11649 deletions(-) create mode 100644 src/llama-adapter.cpp create mode 100644 src/llama-adapter.h create mode 100644 src/llama-arch.cpp create mode 100644 src/llama-arch.h create mode 100644 src/llama-batch.cpp create mode 100644 src/llama-batch.h create mode 100644 src/llama-chat.cpp create mode 100644 src/llama-chat.h create mode 100644 src/llama-context.cpp create mode 100644 src/llama-context.h create mode 100644 src/llama-cparams.cpp create mode 100644 src/llama-cparams.h create mode 100644 src/llama-hparams.cpp create mode 100644 src/llama-hparams.h create mode 100644 src/llama-impl.cpp create mode 100644 src/llama-kv-cache.cpp create mode 100644 src/llama-kv-cache.h create mode 100644 src/llama-mmap.cpp create mode 100644 src/llama-mmap.h create mode 100644 src/llama-model-loader.cpp create mode 100644 src/llama-model-loader.h create mode 100644 src/llama-model.cpp create mode 100644 src/llama-model.h create mode 100644 src/llama-quant.cpp create mode 100644 src/llama-quant.h diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a377eff38..602cf5220 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -60,8 +60,7 @@ jobs: -DLLAMA_CURL=ON \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ - -DGGML_RPC=ON \ - -DBUILD_SHARED_LIBS=OFF + -DGGML_RPC=ON cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -123,8 +122,7 @@ jobs: -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_METAL=OFF \ - -DGGML_RPC=ON \ - -DBUILD_SHARED_LIBS=OFF + -DGGML_RPC=ON cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Test @@ -181,7 +179,7 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF + cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON cmake --build . --config Release -j $(nproc) - name: Test @@ -651,23 +649,23 @@ jobs: matrix: include: - build: 'noavx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX=OFF -DGGML_AVX2=OFF -DGGML_FMA=OFF' - build: 'avx2-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON' - build: 'avx-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX2=OFF' - build: 'avx512-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_AVX512=ON' - build: 'openblas-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DBUILD_SHARED_LIBS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - build: 'kompute-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON' - build: 'vulkan-x64' - defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON -DBUILD_SHARED_LIBS=ON' + defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_VULKAN=ON' - build: 'llvm-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON' - build: 'msvc-arm64' - defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=O' - build: 'llvm-arm64-opencl-adreno' defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON' @@ -914,7 +912,7 @@ jobs: shell: cmd run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" - cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DBUILD_SHARED_LIBS=ON -DGGML_RPC=ON + cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DGGML_RPC=ON set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 cmake --build build --config Release -j %NINJA_JOBS% -t ggml cmake --build build --config Release diff --git a/common/arg.cpp b/common/arg.cpp index deb113786..c81b15217 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -1512,7 +1512,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--lora"}, "FNAME", "path to LoRA adapter (can be repeated to use multiple adapters)", [](common_params & params, const std::string & value) { - params.lora_adapters.push_back({ std::string(value), 1.0 }); + params.lora_adapters.push_back({ std::string(value), 1.0, nullptr }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); @@ -1520,7 +1520,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex {"--lora-scaled"}, "FNAME", "SCALE", "path to LoRA adapter with user defined scaling (can be repeated to use multiple adapters)", [](common_params & params, const std::string & fname, const std::string & scale) { - params.lora_adapters.push_back({ fname, std::stof(scale) }); + params.lora_adapters.push_back({ fname, std::stof(scale), nullptr }); } // we define this arg on both COMMON and EXPORT_LORA, so when showing help message of export-lora, it will be categorized as "example-specific" arg ).set_examples({LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_EXPORT_LORA})); diff --git a/common/common.cpp b/common/common.cpp index fe923fce6..3e37039ca 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -922,20 +922,21 @@ struct common_init_result common_init_from_params(common_params & params) { // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { - common_lora_adapter_container loaded_la; - loaded_la.path = la.path; - loaded_la.scale = la.scale; - loaded_la.adapter = llama_lora_adapter_init(model, la.path.c_str()); - if (loaded_la.adapter == nullptr) { + llama_lora_adapter_ptr lora; + lora.reset(llama_lora_adapter_init(model, la.path.c_str())); + if (lora == nullptr) { LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); llama_free(lctx); llama_free_model(model); return iparams; } - iparams.lora_adapters.push_back(loaded_la); // copy to list of loaded adapters + + la.ptr = lora.get(); + iparams.lora.emplace_back(std::move(lora)); // copy to list of loaded adapters } + if (!params.lora_init_without_apply) { - common_lora_adapters_apply(lctx, iparams.lora_adapters); + common_lora_adapters_apply(lctx, params.lora_adapters); } if (params.sampling.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { @@ -996,17 +997,17 @@ struct common_init_result common_init_from_params(common_params & params) { llama_perf_context_reset(lctx); } - iparams.model = model; - iparams.context = lctx; + iparams.model.reset(model); + iparams.context.reset(lctx); return iparams; } -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters) { +void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora) { llama_lora_adapter_clear(ctx); - for (auto & la : lora_adapters) { + for (auto & la : lora) { if (la.scale != 0.0f) { - llama_lora_adapter_set(ctx, la.adapter, la.scale); + llama_lora_adapter_set(ctx, la.ptr, la.scale); } } } diff --git a/common/common.h b/common/common.h index 589f65d09..0d452cf0f 100644 --- a/common/common.h +++ b/common/common.h @@ -2,7 +2,7 @@ #pragma once -#include "llama.h" +#include "llama-cpp.h" #include #include @@ -27,10 +27,8 @@ struct common_lora_adapter_info { std::string path; float scale; -}; -struct common_lora_adapter_container : common_lora_adapter_info { - struct llama_lora_adapter * adapter; + struct llama_lora_adapter * ptr; }; using llama_tokens = std::vector; @@ -478,10 +476,12 @@ std::string fs_get_cache_file(const std::string & filename); // Model utils // +// note: defines object's lifetime struct common_init_result { - struct llama_model * model = nullptr; - struct llama_context * context = nullptr; - std::vector lora_adapters; + llama_model_ptr model; + llama_context_ptr context; + + std::vector lora; }; struct common_init_result common_init_from_params(common_params & params); @@ -503,7 +503,7 @@ struct llama_model * common_load_model_from_hf( const struct llama_model_params & params); // clear LoRA adapters from context, then apply new list of adapters -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora_adapters); +void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora); // // Batch utils @@ -640,6 +640,10 @@ common_control_vector_data common_control_vector_load(const std::vector -#include #include #include #include #include - -#include -#include #include + +#include +#include #include #if defined(_WIN32) diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 45206f4a7..588114ecd 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -430,9 +430,10 @@ static void process_logits( static bool compute_imatrix(llama_context * ctx, const common_params & params) { const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); const int n_ctx = llama_n_ctx(ctx); + GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + auto tim1 = std::chrono::high_resolution_clock::now(); LOG_INF("%s: tokenizing the input ..\n", __func__); @@ -618,8 +619,9 @@ int main(int argc, char ** argv) { // init common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); + if (model == nullptr || ctx == nullptr) { LOG_ERR("%s : failed to init\n", __func__); return 1; @@ -655,9 +657,6 @@ int main(int argc, char ** argv) { LOG("\n"); llama_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); return 0; diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index ef7008957..d460be314 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -131,8 +131,8 @@ int main(int argc, char ** argv) { LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); common_init_result llama_init = common_init_from_params(params); - model = llama_init.model; - ctx = llama_init.context; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); @@ -581,9 +581,6 @@ int main(int argc, char ** argv) { LOG("\n"); common_perf_print(ctx, smpl); - llama_free(ctx); - llama_free_model(model); - common_sampler_free(smpl); llama_backend_free(); diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index 8d0ef8b3d..e016618e3 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -58,8 +58,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // Tokenize the prompt std::vector inp; @@ -474,9 +474,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/lookup/lookup-create.cpp b/examples/lookup/lookup-create.cpp index 7ced0aa97..3da45ed9e 100644 --- a/examples/lookup/lookup-create.cpp +++ b/examples/lookup/lookup-create.cpp @@ -1,14 +1,9 @@ #include "arg.h" #include "common.h" #include "ngram-cache.h" -#include "ggml.h" #include "llama.h" -#include -#include -#include #include -#include #include int main(int argc, char ** argv){ @@ -25,16 +20,16 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model_ptr & model = llama_init.model; + llama_context_ptr & ctx = llama_init.context; + GGML_ASSERT(model != nullptr); // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx, params.prompt, true, true); + inp = common_tokenize(ctx.get(), params.prompt, true, true); fprintf(stderr, "%s: tokenization done\n", __func__); - common_ngram_cache ngram_cache; common_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true); fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.lookup_cache_static.c_str()); diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp index dff07c075..fcb289abe 100644 --- a/examples/lookup/lookup-stats.cpp +++ b/examples/lookup/lookup-stats.cpp @@ -30,12 +30,11 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_context_ptr & ctx = llama_init.context; // tokenize the prompt std::vector inp; - inp = common_tokenize(ctx, params.prompt, true, true); + inp = common_tokenize(ctx.get(), params.prompt, true, true); common_ngram_cache ngram_cache_context; common_ngram_cache ngram_cache_dynamic; @@ -66,7 +65,7 @@ int main(int argc, char ** argv){ } const int n_input = inp.size(); - const int n_ctx = llama_n_ctx(ctx); + const int n_ctx = llama_n_ctx(ctx.get()); int n_drafted = 0; int n_accept = 0; @@ -150,9 +149,6 @@ int main(int argc, char ** argv){ LOG_INF("n_accept = %d\n", n_accept); LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 4d92bb238..0d68b80b9 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -33,8 +33,8 @@ int main(int argc, char ** argv){ // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // tokenize the prompt std::vector inp; @@ -243,9 +243,6 @@ int main(int argc, char ** argv){ llama_batch_free(batch_tgt); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index d0c28f317..b5e477f5b 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -145,18 +145,18 @@ int main(int argc, char ** argv) { llama_context * ctx = nullptr; common_sampler * smpl = nullptr; - std::vector chat_msgs; - g_model = &model; g_ctx = &ctx; g_smpl = &smpl; + std::vector chat_msgs; + // load the model and apply lora adapter, if any LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__); common_init_result llama_init = common_init_from_params(params); - model = llama_init.model; - ctx = llama_init.context; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: error: unable to load model\n", __func__); @@ -889,9 +889,6 @@ int main(int argc, char ** argv) { common_sampler_free(smpl); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); ggml_threadpool_free_fn(threadpool); diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index fd2b1c011..d48f51975 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -132,8 +132,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); // load the prompts from an external file if there are any if (params.prompt.empty()) { @@ -416,9 +416,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 64a84607c..6bdc57f8e 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -1987,8 +1987,9 @@ int main(int argc, char ** argv) { // load the model and apply lora adapter, if any common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); + if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); return 1; @@ -2023,9 +2024,6 @@ int main(int argc, char ** argv) { LOG("\n"); llama_perf_context_print(ctx); - llama_free(ctx); - llama_free_model(model); - llama_backend_free(); return 0; diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 912caf346..ab91d0b40 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -1,7 +1,7 @@ -#include "common.h" #include "ggml.h" #include "llama.h" -#include "llama-impl.h" +#include "llama-context.h" +#include "common.h" #include #include @@ -9,11 +9,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -330,13 +328,13 @@ int main(int argc, char ** argv) { } } - const auto &tensors = llama_internal_get_tensor_map(ctx); + const auto & tensors = llama_internal_get_tensor_map(ctx); // check layer tensors int included_layers = 0; int64_t max_nelements = 0; bool is_f16 = false; - for (const auto& kv_tensor : tensors) { + for (const auto & kv_tensor : tensors) { if (!layer_included(params, kv_tensor.first)) { continue; } @@ -371,8 +369,8 @@ int main(int argc, char ** argv) { if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) { continue; } - const auto * qfns = ggml_get_type_traits(type); - const auto * qfns_cpu = ggml_get_type_traits_cpu(type); + const auto * qfns = ggml_get_type_traits(type); + const auto * qfns_cpu = ggml_get_type_traits_cpu(type); if (qfns_cpu->from_float && qfns->to_float) { if (params.verbose) { printf("testing %s ...\n", ggml_type_name(type)); @@ -382,7 +380,7 @@ int main(int argc, char ** argv) { error_stats global_stats {}; - for (const auto& kv_tensor : tensors) { + for (const auto & kv_tensor : tensors) { if (!layer_included(params, kv_tensor.first)) { continue; } diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index a5c6fe7e5..f534b5eff 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -151,8 +151,8 @@ int main(int argc, char ** argv) { // load the model common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); if (model == NULL) { LOG_ERR("%s: unable to load model\n", __func__); @@ -298,7 +298,5 @@ int main(int argc, char ** argv) { // clean up llama_batch_free(query_batch); - llama_free(ctx); - llama_free_model(model); llama_backend_free(); } diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 2f0cf9baa..cd03661cf 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -30,8 +30,8 @@ int main(int argc, char ** argv) { // init common_init_result llama_init = common_init_from_params(params); - llama_model * model = llama_init.model; - llama_context * ctx = llama_init.context; + llama_model * model = llama_init.model.get(); + llama_context * ctx = llama_init.context.get(); if (model == nullptr || ctx == nullptr) { fprintf(stderr, "%s : failed to init\n", __func__); @@ -89,8 +89,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx); - llama_free_model(model); return 1; } n_past += 1; @@ -98,11 +96,8 @@ int main(int argc, char ** argv) { printf("\n\n"); - // free old context - llama_free(ctx); - // make new context - auto * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params)); llama_sampler * smpl2 = llama_sampler_chain_init(sparams); @@ -123,8 +118,6 @@ int main(int argc, char ** argv) { if (read != llama_state_set_data(ctx2, state_mem.data(), state_mem.size())) { fprintf(stderr, "\n%s : failed to read state\n", __func__); - llama_free(ctx2); - llama_free_model(model); return 1; } @@ -148,8 +141,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx2, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx2); - llama_free_model(model); return 1; } n_past += 1; @@ -157,15 +148,13 @@ int main(int argc, char ** argv) { printf("\n\n"); - llama_free(ctx2); - if (result0 != result1) { fprintf(stderr, "\n%s : error : the 2 generations are different\n", __func__); return 1; } // make new context - auto * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params)); llama_sampler * smpl3 = llama_sampler_chain_init(sparams); @@ -186,8 +175,6 @@ int main(int argc, char ** argv) { if (read != llama_state_set_data(ctx3, state_mem.data(), state_mem.size())) { fprintf(stderr, "\n%s : failed to read state\n", __func__); - llama_free(ctx3); - llama_free_model(model); return 1; } @@ -204,8 +191,6 @@ int main(int argc, char ** argv) { const size_t ncopy = llama_state_seq_get_data(ctx3, seq_store.data(), seq_store.size(), 0); if (ncopy != seq_store.size()) { fprintf(stderr, "\n%s : seq copy data length %zd does not match expected length %zd\n", __func__, ncopy, seq_store.size()); - llama_free(ctx3); - llama_free_model(model); return 1; } fprintf(stderr, "%s : seq 0 copied, %zd bytes\n", __func__, ncopy); @@ -218,8 +203,6 @@ int main(int argc, char ** argv) { const size_t nset = llama_state_seq_set_data(ctx3, seq_store.data(), seq_store.size(), 1); if (nset != seq_store.size()) { fprintf(stderr, "\n%s : seq set data length %zd does not match expected length %zd\n", __func__, nset, seq_store.size()); - llama_free(ctx3); - llama_free_model(model); return 1; } fprintf(stderr, "%s : seq 1 restored, %zd bytes\n", __func__, nset); @@ -239,8 +222,6 @@ int main(int argc, char ** argv) { if (llama_decode(ctx3, batch)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_batch_free(batch); - llama_free(ctx3); - llama_free_model(model); return 1; } n_past += 1; @@ -253,8 +234,6 @@ int main(int argc, char ** argv) { llama_sampler_free(smpl3); llama_batch_free(batch); - llama_free(ctx3); - llama_free_model(model); if (result0 != result2) { fprintf(stderr, "\n%s : error : the seq restore generation is different\n", __func__); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 5118084f1..c2e62ba69 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,7 +98,7 @@ struct slot_params { int64_t t_max_prompt_ms = -1; // TODO: implement int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit - std::vector lora; + std::vector lora; std::vector antiprompt; std::vector response_fields; @@ -198,7 +198,7 @@ struct server_task { bool metrics_reset_bucket = false; // used by SERVER_TASK_TYPE_SET_LORA - std::vector set_lora; + std::vector set_lora; server_task(server_task_type type) : type(type) {} @@ -206,7 +206,6 @@ struct server_task { const llama_model * model, const llama_context * ctx, const common_params & params_base, - const std::vector & lora_base, const json & data) { slot_params params; @@ -265,12 +264,12 @@ struct server_task { if (data.contains("lora")) { if (data.at("lora").is_array()) { - params.lora = parse_lora_request(lora_base, data.at("lora")); + params.lora = parse_lora_request(params_base.lora_adapters, data.at("lora")); } else { throw std::runtime_error("Error: 'lora' must be an array of objects with 'id' and 'scale' fields"); } } else { - params.lora = lora_base; + params.lora = params_base.lora_adapters; } // TODO: add more sanity checks for the input parameters @@ -1132,7 +1131,7 @@ struct server_slot { common_speculative * spec = nullptr; - std::vector lora; + std::vector lora; // the index relative to completion multi-task request size_t index = 0; @@ -1627,11 +1626,15 @@ struct server_response { struct server_context { common_params params_base; + // note: keep these alive - they determine the lifetime of the model, context, etc. + common_init_result llama_init; + common_init_result llama_init_dft; + llama_model * model = nullptr; llama_context * ctx = nullptr; - std::vector lora; llama_model * model_dft = nullptr; + llama_context_params cparams_dft; llama_batch batch = {}; @@ -1655,21 +1658,6 @@ struct server_context { float slot_prompt_similarity = 0.0f; ~server_context() { - if (ctx) { - llama_free(ctx); - ctx = nullptr; - } - - if (model) { - llama_free_model(model); - model = nullptr; - } - - if (model_dft) { - llama_free_model(model_dft); - model_dft = nullptr; - } - // Clear any sampling context for (server_slot & slot : slots) { common_sampler_free(slot.smpl); @@ -1692,11 +1680,10 @@ struct server_context { params_base = params; - common_init_result llama_init = common_init_from_params(params_base); + llama_init = common_init_from_params(params_base); - model = llama_init.model; - ctx = llama_init.context; - lora = llama_init.lora_adapters; + model = llama_init.model.get(); + ctx = llama_init.context.get(); if (model == nullptr) { SRV_ERR("failed to load model, '%s'\n", params_base.model.c_str()); @@ -1719,25 +1706,22 @@ struct server_context { params_dft.n_gpu_layers = params_base.speculative.n_gpu_layers; params_dft.n_parallel = 1; - common_init_result llama_init_dft = common_init_from_params(params_dft); + llama_init_dft = common_init_from_params(params_dft); - model_dft = llama_init_dft.model; + model_dft = llama_init_dft.model.get(); if (model_dft == nullptr) { SRV_ERR("failed to load draft model, '%s'\n", params_base.speculative.model.c_str()); return false; } - if (!common_speculative_are_compatible(ctx, llama_init_dft.context)) { + if (!common_speculative_are_compatible(ctx, llama_init_dft.context.get())) { SRV_ERR("the draft model '%s' is not compatible with the target model '%s'\n", params_base.speculative.model.c_str(), params_base.model.c_str()); - llama_free (llama_init_dft.context); - llama_free_model(llama_init_dft.model); - return false; } - const int n_ctx_dft = llama_n_ctx(llama_init_dft.context); + const int n_ctx_dft = llama_n_ctx(llama_init_dft.context.get()); cparams_dft = common_context_params_to_llama(params_dft); cparams_dft.n_batch = n_ctx_dft; @@ -1745,9 +1729,6 @@ struct server_context { // force F16 KV cache for the draft model for extra performance cparams_dft.type_k = GGML_TYPE_F16; cparams_dft.type_v = GGML_TYPE_F16; - - // the context is not needed - we will create one for each slot - llama_free(llama_init_dft.context); } return true; @@ -1898,7 +1879,7 @@ struct server_context { if (!are_lora_equal(task.params.lora, slot.lora)) { // if lora is changed, we cannot reuse cached tokens slot.cache_tokens.clear(); - slot.lora = std::move(task.params.lora); + slot.lora = task.params.lora; } SLT_DBG(slot, "launching slot : %s\n", safe_json_to_str(slot.to_json()).c_str()); @@ -2592,7 +2573,7 @@ struct server_context { } break; case SERVER_TASK_TYPE_SET_LORA: { - lora = std::move(task.set_lora); + params_base.lora_adapters = std::move(task.set_lora); auto res = std::make_unique(); res->id = task.id; queue_results.send(std::move(res)); @@ -3671,7 +3652,6 @@ int main(int argc, char ** argv) { ctx_server.model, ctx_server.ctx, ctx_server.params_base, - ctx_server.lora, data); task.id_selected_slot = json_value(data, "id_slot", -1); @@ -4098,8 +4078,9 @@ int main(int argc, char ** argv) { const auto handle_lora_adapters_list = [&](const httplib::Request &, httplib::Response & res) { json result = json::array(); - for (size_t i = 0; i < ctx_server.lora.size(); ++i) { - auto & lora = ctx_server.lora[i]; + const auto & loras = ctx_server.params_base.lora_adapters; + for (size_t i = 0; i < loras.size(); ++i) { + auto & lora = loras[i]; result.push_back({ {"id", i}, {"path", lora.path}, @@ -4118,7 +4099,7 @@ int main(int argc, char ** argv) { } server_task task(SERVER_TASK_TYPE_SET_LORA); task.id = ctx_server.queue_tasks.get_new_id(); - task.set_lora = parse_lora_request(ctx_server.lora, body); + task.set_lora = parse_lora_request(ctx_server.params_base.lora_adapters, body); ctx_server.queue_results.add_waiting_task_id(task.id); ctx_server.queue_tasks.post(task); diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 1cf08bb0a..dc6e6e67e 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -799,25 +799,25 @@ static std::vector get_token_probabilities(llama_context * ctx } static bool are_lora_equal( - const std::vector & l1, - const std::vector & l2) { + const std::vector & l1, + const std::vector & l2) { if (l1.size() != l2.size()) { return false; } for (size_t i = 0; i < l1.size(); ++i) { // we don't check lora.path to reduce the time complexity - if (l1[i].scale != l2[i].scale || l1[i].adapter != l2[i].adapter) { + if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) { return false; } } return true; } -// parse lora config from JSON request, returned a copy of base_lora with updated scale -static std::vector parse_lora_request( - const std::vector & base_lora, +// parse lora config from JSON request, returned a copy of lora_base with updated scale +static std::vector parse_lora_request( + const std::vector & lora_base, const json & data) { - std::vector lora(base_lora); + std::vector lora(lora_base); int max_idx = lora.size(); // clear existing value diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp index 8ca84f7af..9070c3512 100644 --- a/examples/speculative-simple/speculative-simple.cpp +++ b/examples/speculative-simple/speculative-simple.cpp @@ -34,7 +34,7 @@ int main(int argc, char ** argv) { llama_numa_init(params.numa); llama_model * model_tgt = NULL; - llama_model * model_dft = NULL; + //llama_model * model_dft = NULL; llama_context * ctx_tgt = NULL; llama_context * ctx_dft = NULL; @@ -42,8 +42,8 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model; - ctx_tgt = llama_init_tgt.context; + model_tgt = llama_init_tgt.model.get(); + ctx_tgt = llama_init_tgt.context.get(); // load the draft model params.devices = params.speculative.devices; @@ -59,8 +59,8 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; common_init_result llama_init_dft = common_init_from_params(params); - model_dft = llama_init_dft.model; - ctx_dft = llama_init_dft.context; + //model_dft = llama_init_dft.model.get(); + ctx_dft = llama_init_dft.context.get(); if (!common_speculative_are_compatible(ctx_tgt, ctx_dft)) { return 1; @@ -251,12 +251,6 @@ int main(int argc, char ** argv) { common_sampler_free(smpl); common_speculative_free(spec); - llama_free(ctx_tgt); - llama_free_model(model_tgt); - - llama_free(ctx_dft); - llama_free_model(model_dft); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index d4ad9751e..bc0b6813b 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -72,8 +72,9 @@ int main(int argc, char ** argv) { // load the target model common_init_result llama_init_tgt = common_init_from_params(params); - model_tgt = llama_init_tgt.model; - ctx_tgt = llama_init_tgt.context; + + model_tgt = llama_init_tgt.model.get(); + ctx_tgt = llama_init_tgt.context.get(); // load the draft model params.devices = params.speculative.devices; @@ -85,8 +86,9 @@ int main(int argc, char ** argv) { params.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads; common_init_result llama_init_dft = common_init_from_params(params); - model_dft = llama_init_dft.model; - ctx_dft = llama_init_dft.context; + + model_dft = llama_init_dft.model.get(); + ctx_dft = llama_init_dft.context.get(); const bool vocab_type_tgt = llama_vocab_type(model_tgt); LOG_DBG("vocab_type tgt: %d\n", vocab_type_tgt); @@ -631,12 +633,6 @@ int main(int argc, char ** argv) { llama_batch_free(batch_dft); - llama_free(ctx_tgt); - llama_free_model(model_tgt); - - llama_free(ctx_dft); - llama_free_model(model_dft); - llama_backend_free(); LOG("\n\n"); diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp index 7f36b80f0..522f5e881 100644 --- a/examples/tts/tts.cpp +++ b/examples/tts/tts.cpp @@ -458,8 +458,9 @@ int main(int argc, char ** argv) { llama_context * ctx_cts = NULL; common_init_result llama_init_ttc = common_init_from_params(params); - model_ttc = llama_init_ttc.model; - ctx_ttc = llama_init_ttc.context; + + model_ttc = llama_init_ttc.model.get(); + ctx_ttc = llama_init_ttc.context.get(); // TODO: refactor in a common struct params.model = params.vocoder.model; @@ -470,8 +471,9 @@ int main(int argc, char ** argv) { params.embedding = true; common_init_result llama_init_cts = common_init_from_params(params); - model_cts = llama_init_cts.model; - ctx_cts = llama_init_cts.context; + + model_cts = llama_init_cts.model.get(); + ctx_cts = llama_init_cts.context.get(); std::vector smpl(n_parallel); for (int i = 0; i < n_parallel; ++i) { @@ -920,12 +922,6 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 LOG_INF("%s: audio written to file '%s'\n", __func__, fname.c_str()); - llama_free(ctx_ttc); - llama_free_model(model_ttc); - - llama_free(ctx_cts); - llama_free_model(model_cts); - llama_backend_free(); return 0; diff --git a/include/llama-cpp.h b/include/llama-cpp.h index daa04d4d8..1500cb2fc 100644 --- a/include/llama-cpp.h +++ b/include/llama-cpp.h @@ -20,6 +20,11 @@ struct llama_sampler_deleter { void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } }; +struct llama_lora_adapter_deleter { + void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); } +}; + typedef std::unique_ptr llama_model_ptr; typedef std::unique_ptr llama_context_ptr; typedef std::unique_ptr llama_sampler_ptr; +typedef std::unique_ptr llama_lora_adapter_ptr; diff --git a/include/llama.h b/include/llama.h index a4abf395b..7b305b299 100644 --- a/include/llama.h +++ b/include/llama.h @@ -385,6 +385,7 @@ extern "C" { } llama_chat_message; // lora adapter + // TODO: rename to llama_adapter_lora struct llama_lora_adapter; // Helpers for getting default parameters @@ -416,6 +417,7 @@ extern "C" { const char * path_model, struct llama_model_params params); + // TODO: rename to llama_model_free LLAMA_API void llama_free_model(struct llama_model * model); // TODO: rename to llama_init_from_model @@ -501,14 +503,19 @@ extern "C" { const char * fname_out, const llama_model_quantize_params * params); + // + // Adapters + // + // Load a LoRA adapter from file - // The loaded adapter will be associated to the given model, and will be free when the model is deleted + // TODO: rename to llama_adapter_lora_init LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( struct llama_model * model, const char * path_lora); // Add a loaded LoRA adapter to given context // This will not modify model's weight + // TODO: rename to llama_set_adapter_lora LLAMA_API int32_t llama_lora_adapter_set( struct llama_context * ctx, struct llama_lora_adapter * adapter, @@ -516,16 +523,18 @@ extern "C" { // Remove a specific LoRA adapter from given context // Return -1 if the adapter is not present in the context + // TODO: rename to llama_rm_adapter_lora LLAMA_API int32_t llama_lora_adapter_remove( struct llama_context * ctx, struct llama_lora_adapter * adapter); // Remove all LoRA adapters from given context - LLAMA_API void llama_lora_adapter_clear( - struct llama_context * ctx); + // TODO: rename to llama_clear_adapter_lora + LLAMA_API void llama_lora_adapter_clear(struct llama_context * ctx); // Manually free a LoRA adapter // Note: loaded adapters will be free when the associated model is deleted + // TODO: rename to llama_adapter_lora_free LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); // Apply a loaded control vector to a llama_context, or if data is NULL, clear @@ -534,6 +543,7 @@ extern "C" { // to an n_embd x n_layers buffer starting from layer 1. // il_start and il_end are the layer range the vector should apply to (both inclusive) // See llama_control_vector_load in common to load a control vector. + // TODO: rename to llama_adapter_cvec_apply LLAMA_API int32_t llama_control_vector_apply( struct llama_context * lctx, const float * data, @@ -546,6 +556,8 @@ extern "C" { // KV cache // + // TODO: remove llama_kv_cache_view_* API + // Information associated with an individual cell in the KV cache view. struct llama_kv_cache_view_cell { // The position for this cell. Takes KV cache shifts into account. @@ -592,8 +604,11 @@ extern "C" { LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view); // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) + // TODO: change signature to llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_context * ctx) LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view); + /// + // Returns the number of tokens in the KV cache (slow, use only for debug) // If a KV cell has multiple sequences assigned to it, it will be counted multiple times LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx); @@ -663,6 +678,9 @@ extern "C" { struct llama_context * ctx, llama_seq_id seq_id); + // TODO: the llama_kv_cache_defrag and llama_kv_cache_update API tightly couples llama_context with llama_kv_cache + // how to avoid this? + // Defragment the KV cache // This will be applied: // - lazily on next llama_decode() diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2d3ea0994..aeb75bf3e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -9,9 +9,21 @@ llama_add_compile_flags() add_library(llama ../include/llama.h llama.cpp - llama-vocab.cpp + llama-adapter.cpp + llama-arch.cpp + llama-batch.cpp + llama-chat.cpp + llama-context.cpp llama-grammar.cpp + llama-hparams.cpp + llama-impl.cpp + llama-kv-cache.cpp + llama-mmap.cpp + llama-model-loader.cpp + llama-model.cpp + llama-quant.cpp llama-sampling.cpp + llama-vocab.cpp unicode.h unicode.cpp unicode-data.cpp diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp new file mode 100644 index 000000000..9fd7edea3 --- /dev/null +++ b/src/llama-adapter.cpp @@ -0,0 +1,334 @@ +#include "llama-adapter.h" + +#include "llama-model.h" + +#include +#include +#include +#include + +// vec + +struct ggml_tensor * llama_control_vector::tensor_for(int il) const { + if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { + return nullptr; + } + + return tensors[il]; +} + +struct ggml_tensor * llama_control_vector::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { + ggml_tensor * layer_dir = tensor_for(il); + if (layer_dir != nullptr) { + cur = ggml_add(ctx, cur, layer_dir); + } + + return cur; +} + +static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) { + const auto & hparams = model.hparams; + + GGML_ASSERT(cvec.tensors.empty()); + GGML_ASSERT(cvec.ctxs.empty()); + GGML_ASSERT(cvec.bufs.empty()); + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + struct ggml_init_params params = { + /*.mem_size =*/ hparams.n_layer*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + + ctx_map[buft] = ctx; + cvec.ctxs.emplace_back(ctx); + + return ctx; + } + + return it->second; + }; + + // make tensors + cvec.tensors.reserve(hparams.n_layer); + cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0 + for (size_t il = 1; il < hparams.n_layer; il++) { + ggml_backend_buffer_type_t buft = llama_model_select_buft(model, il); + ggml_context * ctx = ctx_for_buft(buft); + if (!ctx) { + LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__); + return false; + } + ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd); + cvec.tensors.push_back(tensor); + } + + // allocate tensors / buffers and zero + cvec.bufs.reserve(ctx_map.size()); + for (auto it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx = it.second; + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__); + return false; + } + ggml_backend_buffer_clear(buf, 0); + cvec.bufs.emplace_back(buf); + } + + return true; +} + +int32_t llama_control_vector_apply( + struct llama_control_vector & cvec, + const llama_model & model, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end) { + const auto & hparams = model.hparams; + + if (data == nullptr) { + // disable the current control vector (but leave allocated for later) + cvec.layer_start = -1; + cvec.layer_end = -1; + return 0; + } + + if (n_embd != (int) hparams.n_embd) { + LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__); + return 1; + } + + if (cvec.tensors.empty()) { + if (!llama_control_vector_init(cvec, model)) { + return 1; + } + } + + cvec.layer_start = il_start; + cvec.layer_end = il_end; + + for (size_t il = 1; il < hparams.n_layer; il++) { + assert(cvec.tensors[il] != nullptr); + + const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present + if (off + n_embd <= len) { + ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il])); + } + } + + return 0; +} + +// lora + +llama_lora_weight * llama_lora_adapter::get_weight(struct ggml_tensor * w) { + const std::string name(w->name); + + const auto pos = ab_map.find(name); + if (pos != ab_map.end()) { + return &pos->second; + } + + return nullptr; +} + +void llama_lora_adapter_free(struct llama_lora_adapter * adapter) { + delete adapter; +} + +static void llama_lora_adapter_init_impl(struct llama_model & model, const char * path_lora, struct llama_lora_adapter & adapter) { + LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora); + + ggml_context * ctx_init; + struct gguf_init_params meta_gguf_params = { + /* .no_alloc = */ true, + /* .ctx = */ &ctx_init, + }; + + gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) }; + if (!ctx_gguf) { + throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora)); + } + + ggml_context_ptr ctx { ctx_init }; + + // check metadata + { + auto get_kv_str = [&](const std::string & key) -> std::string { + int id = gguf_find_key(ctx_gguf.get(), key.c_str()); + return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id)); + }; + auto get_kv_f32 = [&](const std::string & key) -> float { + int id = gguf_find_key(ctx_gguf.get(), key.c_str()); + return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id); + }; + LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); + + auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE)); + if (general_type != "adapter") { + throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type); + } + + auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE)); + auto general_arch = llm_arch_from_string(general_arch_str); + if (general_arch != model.arch) { + throw std::runtime_error("model arch and LoRA arch mismatch"); + } + + auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE)); + if (adapter_type != "lora") { + throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type); + } + + adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA)); + } + + int n_tensors = gguf_get_n_tensors(ctx_gguf.get()); + + // contexts for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + // add a new context + struct ggml_init_params params = { + /*.mem_size =*/ n_tensors*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context * buft_ctx = ggml_init(params); + if (!buft_ctx) { + return nullptr; + } + ctx_map[buft] = buft_ctx; + adapter.ctxs.emplace_back(buft_ctx); + return buft_ctx; + }; + return it->second; + }; + + // bundle lora_a and lora_b into pairs + std::map ab_map; + auto str_endswith = [](const std::string & str, const std::string & suffix) { + return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; + }; + + for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) { + std::string name(cur->name); + if (str_endswith(name, ".lora_a")) { + replace_all(name, ".lora_a", ""); + if (ab_map.find(name) == ab_map.end()) { + ab_map[name] = llama_lora_weight(cur, nullptr); + } else { + ab_map[name].a = cur; + } + } else if (str_endswith(name, ".lora_b")) { + replace_all(name, ".lora_b", ""); + if (ab_map.find(name) == ab_map.end()) { + ab_map[name] = llama_lora_weight(nullptr, cur); + } else { + ab_map[name].b = cur; + } + } else { + throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix"); + } + } + + // add tensors + for (auto & it : ab_map) { + const std::string & name = it.first; + llama_lora_weight & w = it.second; + + if (!w.a || !w.b) { + throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component"); + } + + // device buft and device ctx + auto * model_tensor = llama_model_get_tensor(model, name.c_str()); + if (!model_tensor) { + throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model"); + } + + struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); + // validate tensor shape + if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape"); + } + if (w.a->ne[1] != w.b->ne[0]) { + throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + } + + // save tensor to adapter + struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a); + struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b); + ggml_set_name(tensor_a, w.a->name); + ggml_set_name(tensor_b, w.b->name); + adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b); + } + + // allocate tensors / buffers and zero + { + adapter.ctxs.reserve(ctx_map.size()); + adapter.bufs.reserve(ctx_map.size()); + for (auto & it : ctx_map) { + ggml_backend_buffer_type_t buft = it.first; + ggml_context * ctx_dev = it.second; + ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) }; + if (!buf) { + throw std::runtime_error("failed to allocate buffer for lora adapter\n"); + } + LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0); + adapter.bufs.emplace_back(std::move(buf)); + } + } + + // set tensor data + { + llama_file gguf_file(path_lora, "rb"); + std::vector read_buf; + auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) { + size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name)); + size_t size = ggml_nbytes(orig); + read_buf.resize(size); + gguf_file.seek(offs, SEEK_SET); + gguf_file.read_raw(read_buf.data(), size); + ggml_backend_tensor_set(dev, read_buf.data(), 0, size); + }; + for (auto & it : adapter.ab_map) { + auto orig = ab_map[it.first]; + auto dev = it.second; + set_tensor(orig.a, dev.a); + set_tensor(orig.b, dev.b); + } + } + + LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2); +} + +struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) { + struct llama_lora_adapter * adapter = new llama_lora_adapter(); + + try { + llama_lora_adapter_init_impl(*model, path_lora, *adapter); + return adapter; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); + + delete adapter; + } + + return nullptr; +} diff --git a/src/llama-adapter.h b/src/llama-adapter.h new file mode 100644 index 000000000..5f1870cc8 --- /dev/null +++ b/src/llama-adapter.h @@ -0,0 +1,66 @@ +#pragma once + +#include "llama-impl.h" +#include "llama-hparams.h" + +#include "ggml-cpp.h" + +#include +#include + +// +// llama_adapter_cvec +// + +// TODO: rename to llama_adapter_cvec +struct llama_control_vector { + std::vector ctxs; + std::vector bufs; + + std::vector tensors; // per layer + + int32_t layer_start = -1; + int32_t layer_end = -1; + + struct ggml_tensor * tensor_for(int il) const; + + struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const; +}; + +int32_t llama_control_vector_apply( + struct llama_control_vector & cvec, + const llama_model & model, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end); + +// +// llama_adapter_lora +// + +// TODO: rename to llama_adapter_lora_weight +struct llama_lora_weight { + struct ggml_tensor * a = nullptr; + struct ggml_tensor * b = nullptr; + + llama_lora_weight() = default; + llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} +}; + +// TODO: rename to llama_adapter_lora +struct llama_lora_adapter { + // map tensor name to lora_a_b + std::unordered_map ab_map; + + std::vector ctxs; + std::vector bufs; + + float alpha; + + llama_lora_adapter() = default; + ~llama_lora_adapter() = default; + + llama_lora_weight * get_weight(struct ggml_tensor * w); +}; diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp new file mode 100644 index 000000000..a60038385 --- /dev/null +++ b/src/llama-arch.cpp @@ -0,0 +1,1414 @@ +#include "llama-arch.h" + +#include "llama-impl.h" + +#include + +static const std::map LLM_ARCH_NAMES = { + { LLM_ARCH_LLAMA, "llama" }, + { LLM_ARCH_DECI, "deci" }, + { LLM_ARCH_FALCON, "falcon" }, + { LLM_ARCH_GROK, "grok" }, + { LLM_ARCH_GPT2, "gpt2" }, + { LLM_ARCH_GPTJ, "gptj" }, + { LLM_ARCH_GPTNEOX, "gptneox" }, + { LLM_ARCH_MPT, "mpt" }, + { LLM_ARCH_BAICHUAN, "baichuan" }, + { LLM_ARCH_STARCODER, "starcoder" }, + { LLM_ARCH_REFACT, "refact" }, + { LLM_ARCH_BERT, "bert" }, + { LLM_ARCH_NOMIC_BERT, "nomic-bert" }, + { LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" }, + { LLM_ARCH_BLOOM, "bloom" }, + { LLM_ARCH_STABLELM, "stablelm" }, + { LLM_ARCH_QWEN, "qwen" }, + { LLM_ARCH_QWEN2, "qwen2" }, + { LLM_ARCH_QWEN2MOE, "qwen2moe" }, + { LLM_ARCH_QWEN2VL, "qwen2vl" }, + { LLM_ARCH_PHI2, "phi2" }, + { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PLAMO, "plamo" }, + { LLM_ARCH_CODESHELL, "codeshell" }, + { LLM_ARCH_ORION, "orion" }, + { LLM_ARCH_INTERNLM2, "internlm2" }, + { LLM_ARCH_MINICPM, "minicpm" }, + { LLM_ARCH_MINICPM3, "minicpm3" }, + { LLM_ARCH_GEMMA, "gemma" }, + { LLM_ARCH_GEMMA2, "gemma2" }, + { LLM_ARCH_STARCODER2, "starcoder2" }, + { LLM_ARCH_MAMBA, "mamba" }, + { LLM_ARCH_XVERSE, "xverse" }, + { LLM_ARCH_COMMAND_R, "command-r" }, + { LLM_ARCH_DBRX, "dbrx" }, + { LLM_ARCH_OLMO, "olmo" }, + { LLM_ARCH_OLMO2, "olmo2" }, + { LLM_ARCH_OLMOE, "olmoe" }, + { LLM_ARCH_OPENELM, "openelm" }, + { LLM_ARCH_ARCTIC, "arctic" }, + { LLM_ARCH_DEEPSEEK, "deepseek" }, + { LLM_ARCH_DEEPSEEK2, "deepseek2" }, + { LLM_ARCH_CHATGLM, "chatglm" }, + { LLM_ARCH_BITNET, "bitnet" }, + { LLM_ARCH_T5, "t5" }, + { LLM_ARCH_T5ENCODER, "t5encoder" }, + { LLM_ARCH_JAIS, "jais" }, + { LLM_ARCH_NEMOTRON, "nemotron" }, + { LLM_ARCH_EXAONE, "exaone" }, + { LLM_ARCH_RWKV6, "rwkv6" }, + { LLM_ARCH_GRANITE, "granite" }, + { LLM_ARCH_GRANITE_MOE, "granitemoe" }, + { LLM_ARCH_CHAMELEON, "chameleon" }, + { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" }, + { LLM_ARCH_UNKNOWN, "(unknown)" }, +}; + +static const std::map LLM_KV_NAMES = { + { LLM_KV_GENERAL_TYPE, "general.type" }, + { LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" }, + { LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" }, + { LLM_KV_GENERAL_ALIGNMENT, "general.alignment" }, + { LLM_KV_GENERAL_NAME, "general.name" }, + { LLM_KV_GENERAL_AUTHOR, "general.author" }, + { LLM_KV_GENERAL_VERSION, "general.version" }, + { LLM_KV_GENERAL_URL, "general.url" }, + { LLM_KV_GENERAL_DESCRIPTION, "general.description" }, + { LLM_KV_GENERAL_LICENSE, "general.license" }, + { LLM_KV_GENERAL_SOURCE_URL, "general.source.url" }, + { LLM_KV_GENERAL_SOURCE_HF_REPO, "general.source.huggingface.repository" }, + + { LLM_KV_VOCAB_SIZE, "%s.vocab_size" }, + { LLM_KV_CONTEXT_LENGTH, "%s.context_length" }, + { LLM_KV_EMBEDDING_LENGTH, "%s.embedding_length" }, + { LLM_KV_FEATURES_LENGTH, "%s.features_length" }, + { LLM_KV_BLOCK_COUNT, "%s.block_count" }, + { LLM_KV_LEADING_DENSE_BLOCK_COUNT, "%s.leading_dense_block_count" }, + { LLM_KV_FEED_FORWARD_LENGTH, "%s.feed_forward_length" }, + { LLM_KV_EXPERT_FEED_FORWARD_LENGTH, "%s.expert_feed_forward_length" }, + { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" }, + { LLM_KV_USE_PARALLEL_RESIDUAL, "%s.use_parallel_residual" }, + { LLM_KV_TENSOR_DATA_LAYOUT, "%s.tensor_data_layout" }, + { LLM_KV_EXPERT_COUNT, "%s.expert_count" }, + { LLM_KV_EXPERT_USED_COUNT, "%s.expert_used_count" }, + { LLM_KV_EXPERT_SHARED_COUNT, "%s.expert_shared_count" }, + { LLM_KV_EXPERT_WEIGHTS_SCALE, "%s.expert_weights_scale" }, + { LLM_KV_POOLING_TYPE, "%s.pooling_type" }, + { LLM_KV_LOGIT_SCALE, "%s.logit_scale" }, + { LLM_KV_DECODER_START_TOKEN_ID, "%s.decoder_start_token_id" }, + { LLM_KV_ATTN_LOGIT_SOFTCAPPING, "%s.attn_logit_softcapping" }, + { LLM_KV_FINAL_LOGIT_SOFTCAPPING, "%s.final_logit_softcapping" }, + { LLM_KV_SWIN_NORM, "%s.swin_norm" }, + { LLM_KV_RESCALE_EVERY_N_LAYERS, "%s.rescale_every_n_layers" }, + { LLM_KV_TIME_MIX_EXTRA_DIM, "%s.time_mix_extra_dim" }, + { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" }, + { LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" }, + { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" }, + + { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, + { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, + { LLM_KV_ATTENTION_MAX_ALIBI_BIAS, "%s.attention.max_alibi_bias" }, + { LLM_KV_ATTENTION_CLAMP_KQV, "%s.attention.clamp_kqv" }, + { LLM_KV_ATTENTION_KEY_LENGTH, "%s.attention.key_length" }, + { LLM_KV_ATTENTION_VALUE_LENGTH, "%s.attention.value_length" }, + { LLM_KV_ATTENTION_LAYERNORM_EPS, "%s.attention.layer_norm_epsilon" }, + { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, "%s.attention.layer_norm_rms_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_EPS, "%s.attention.group_norm_epsilon" }, + { LLM_KV_ATTENTION_GROUPNORM_GROUPS, "%s.attention.group_norm_groups" }, + { LLM_KV_ATTENTION_CAUSAL, "%s.attention.causal" }, + { LLM_KV_ATTENTION_Q_LORA_RANK, "%s.attention.q_lora_rank" }, + { LLM_KV_ATTENTION_KV_LORA_RANK, "%s.attention.kv_lora_rank" }, + { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" }, + { LLM_KV_ATTENTION_SLIDING_WINDOW, "%s.attention.sliding_window" }, + { LLM_KV_ATTENTION_SCALE, "%s.attention.scale" }, + + { LLM_KV_ROPE_DIMENSION_COUNT, "%s.rope.dimension_count" }, + { LLM_KV_ROPE_DIMENSION_SECTIONS, "%s.rope.dimension_sections" }, + { LLM_KV_ROPE_FREQ_BASE, "%s.rope.freq_base" }, + { LLM_KV_ROPE_SCALE_LINEAR, "%s.rope.scale_linear" }, + { LLM_KV_ROPE_SCALING_TYPE, "%s.rope.scaling.type" }, + { LLM_KV_ROPE_SCALING_FACTOR, "%s.rope.scaling.factor" }, + { LLM_KV_ROPE_SCALING_ATTN_FACTOR, "%s.rope.scaling.attn_factor" }, + { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, "%s.rope.scaling.original_context_length" }, + { LLM_KV_ROPE_SCALING_FINETUNED, "%s.rope.scaling.finetuned" }, + { LLM_KV_ROPE_SCALING_YARN_LOG_MUL, "%s.rope.scaling.yarn_log_multiplier" }, + + { LLM_KV_SPLIT_NO, "split.no" }, + { LLM_KV_SPLIT_COUNT, "split.count" }, + { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" }, + + { LLM_KV_SSM_CONV_KERNEL, "%s.ssm.conv_kernel" }, + { LLM_KV_SSM_INNER_SIZE, "%s.ssm.inner_size" }, + { LLM_KV_SSM_STATE_SIZE, "%s.ssm.state_size" }, + { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" }, + { LLM_KV_SSM_DT_B_C_RMS, "%s.ssm.dt_b_c_rms" }, + + { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" }, + + { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" }, + { LLM_KV_POSNET_BLOCK_COUNT, "%s.posnet.block_count" }, + + { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" }, + { LLM_KV_CONVNEXT_BLOCK_COUNT, "%s.convnext.block_count" }, + + { LLM_KV_TOKENIZER_MODEL, "tokenizer.ggml.model" }, + { LLM_KV_TOKENIZER_PRE, "tokenizer.ggml.pre" }, + { LLM_KV_TOKENIZER_LIST, "tokenizer.ggml.tokens" }, + { LLM_KV_TOKENIZER_TOKEN_TYPE, "tokenizer.ggml.token_type" }, + { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, "tokenizer.ggml.token_type_count" }, + { LLM_KV_TOKENIZER_SCORES, "tokenizer.ggml.scores" }, + { LLM_KV_TOKENIZER_MERGES, "tokenizer.ggml.merges" }, + { LLM_KV_TOKENIZER_BOS_ID, "tokenizer.ggml.bos_token_id" }, + { LLM_KV_TOKENIZER_EOS_ID, "tokenizer.ggml.eos_token_id" }, + { LLM_KV_TOKENIZER_EOT_ID, "tokenizer.ggml.eot_token_id" }, + { LLM_KV_TOKENIZER_EOM_ID, "tokenizer.ggml.eom_token_id" }, + { LLM_KV_TOKENIZER_UNK_ID, "tokenizer.ggml.unknown_token_id" }, + { LLM_KV_TOKENIZER_SEP_ID, "tokenizer.ggml.seperator_token_id" }, + { LLM_KV_TOKENIZER_PAD_ID, "tokenizer.ggml.padding_token_id" }, + { LLM_KV_TOKENIZER_CLS_ID, "tokenizer.ggml.cls_token_id" }, + { LLM_KV_TOKENIZER_MASK_ID, "tokenizer.ggml.mask_token_id" }, + { LLM_KV_TOKENIZER_ADD_BOS, "tokenizer.ggml.add_bos_token" }, + { LLM_KV_TOKENIZER_ADD_EOS, "tokenizer.ggml.add_eos_token" }, + { LLM_KV_TOKENIZER_ADD_PREFIX, "tokenizer.ggml.add_space_prefix" }, + { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, "tokenizer.ggml.remove_extra_whitespaces" }, + { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, + { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" }, + { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" }, + { LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" }, + { LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" }, + { LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" }, + { LLM_KV_TOKENIZER_FIM_PAD_ID, "tokenizer.ggml.fim_pad_token_id" }, + { LLM_KV_TOKENIZER_FIM_REP_ID, "tokenizer.ggml.fim_rep_token_id" }, + { LLM_KV_TOKENIZER_FIM_SEP_ID, "tokenizer.ggml.fim_sep_token_id" }, + + { LLM_KV_ADAPTER_TYPE, "adapter.type" }, + { LLM_KV_ADAPTER_LORA_ALPHA, "adapter.lora.alpha" }, + + // deprecated + { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" }, + { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" }, + { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" }, +}; + +static const std::map> LLM_TENSOR_NAMES = { + { + LLM_ARCH_LLAMA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_DECI, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_BAICHUAN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_FALCON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GROK, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + }, + }, + { + LLM_ARCH_GPT2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_GPTJ, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + }, + }, + { + LLM_ARCH_GPTNEOX, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MPT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output"}, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_ACT, "blk.%d.ffn.act" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"}, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"}, + }, + }, + { + LLM_ARCH_STARCODER, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_REFACT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_POS_EMBD, "position_embd" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_CLS, "cls" }, + { LLM_TENSOR_CLS_OUT, "cls.output" }, + }, + }, + { + LLM_ARCH_NOMIC_BERT, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_JINA_BERT_V2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_TOKEN_TYPES, "token_types" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_CLS, "cls" }, + }, + }, + { + LLM_ARCH_BLOOM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_STABLELM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_QWEN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2VL, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_QWEN2MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_PHI2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_PHI3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_PLAMO, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_CODESHELL, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_ORION, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_INTERNLM2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MINICPM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXP, "blk.%d.ffn_gate.%d" }, + { LLM_TENSOR_FFN_DOWN_EXP, "blk.%d.ffn_down.%d" }, + { LLM_TENSOR_FFN_UP_EXP, "blk.%d.ffn_up.%d" }, + }, + }, + { + LLM_ARCH_MINICPM3, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" }, + { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" }, + { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" }, + { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" }, + { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_GEMMA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GEMMA2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + }, + }, + { + LLM_ARCH_STARCODER2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_MAMBA, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_SSM_IN, "blk.%d.ssm_in" }, + { LLM_TENSOR_SSM_CONV1D, "blk.%d.ssm_conv1d" }, + { LLM_TENSOR_SSM_X, "blk.%d.ssm_x" }, + { LLM_TENSOR_SSM_DT, "blk.%d.ssm_dt" }, + { LLM_TENSOR_SSM_A, "blk.%d.ssm_a" }, + { LLM_TENSOR_SSM_D, "blk.%d.ssm_d" }, + { LLM_TENSOR_SSM_OUT, "blk.%d.ssm_out" }, + }, + }, + { + LLM_ARCH_XVERSE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_COMMAND_R, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_DBRX, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_OLMO, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_OLMO2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_OLMOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_OPENELM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_ARCTIC, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_NORM_EXPS, "blk.%d.ffn_norm_exps" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_DEEPSEEK, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_DEEPSEEK2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q_A_NORM, "blk.%d.attn_q_a_norm" }, + { LLM_TENSOR_ATTN_KV_A_NORM, "blk.%d.attn_kv_a_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_Q_A, "blk.%d.attn_q_a" }, + { LLM_TENSOR_ATTN_Q_B, "blk.%d.attn_q_b" }, + { LLM_TENSOR_ATTN_KV_A_MQA, "blk.%d.attn_kv_a_mqa" }, + { LLM_TENSOR_ATTN_KV_B, "blk.%d.attn_kv_b" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" }, + { LLM_TENSOR_FFN_GATE_SHEXP, "blk.%d.ffn_gate_shexp" }, + { LLM_TENSOR_FFN_DOWN_SHEXP, "blk.%d.ffn_down_shexp" }, + { LLM_TENSOR_FFN_UP_SHEXP, "blk.%d.ffn_up_shexp" }, + }, + }, + { + LLM_ARCH_CHATGLM, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_BITNET, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_SUB_NORM, "blk.%d.attn_sub_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_SUB_NORM, "blk.%d.ffn_sub_norm" }, + }, + }, + { + LLM_ARCH_T5, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_DEC_OUTPUT_NORM, "dec.output_norm" }, + { LLM_TENSOR_DEC_ATTN_NORM, "dec.blk.%d.attn_norm" }, + { LLM_TENSOR_DEC_ATTN_Q, "dec.blk.%d.attn_q" }, + { LLM_TENSOR_DEC_ATTN_K, "dec.blk.%d.attn_k" }, + { LLM_TENSOR_DEC_ATTN_V, "dec.blk.%d.attn_v" }, + { LLM_TENSOR_DEC_ATTN_OUT, "dec.blk.%d.attn_o" }, + { LLM_TENSOR_DEC_ATTN_REL_B, "dec.blk.%d.attn_rel_b" }, + { LLM_TENSOR_DEC_CROSS_ATTN_NORM, "dec.blk.%d.cross_attn_norm" }, + { LLM_TENSOR_DEC_CROSS_ATTN_Q, "dec.blk.%d.cross_attn_q" }, + { LLM_TENSOR_DEC_CROSS_ATTN_K, "dec.blk.%d.cross_attn_k" }, + { LLM_TENSOR_DEC_CROSS_ATTN_V, "dec.blk.%d.cross_attn_v" }, + { LLM_TENSOR_DEC_CROSS_ATTN_OUT, "dec.blk.%d.cross_attn_o" }, + { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" }, + { LLM_TENSOR_DEC_FFN_NORM, "dec.blk.%d.ffn_norm" }, + { LLM_TENSOR_DEC_FFN_GATE, "dec.blk.%d.ffn_gate" }, + { LLM_TENSOR_DEC_FFN_DOWN, "dec.blk.%d.ffn_down" }, + { LLM_TENSOR_DEC_FFN_UP, "dec.blk.%d.ffn_up" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" }, + { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" }, + { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" }, + { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" }, + { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" }, + { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" }, + { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" }, + { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" }, + { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" }, + { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_T5ENCODER, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ENC_OUTPUT_NORM, "enc.output_norm" }, + { LLM_TENSOR_ENC_ATTN_NORM, "enc.blk.%d.attn_norm" }, + { LLM_TENSOR_ENC_ATTN_Q, "enc.blk.%d.attn_q" }, + { LLM_TENSOR_ENC_ATTN_K, "enc.blk.%d.attn_k" }, + { LLM_TENSOR_ENC_ATTN_V, "enc.blk.%d.attn_v" }, + { LLM_TENSOR_ENC_ATTN_OUT, "enc.blk.%d.attn_o" }, + { LLM_TENSOR_ENC_ATTN_REL_B, "enc.blk.%d.attn_rel_b" }, + { LLM_TENSOR_ENC_FFN_NORM, "enc.blk.%d.ffn_norm" }, + { LLM_TENSOR_ENC_FFN_GATE, "enc.blk.%d.ffn_gate" }, + { LLM_TENSOR_ENC_FFN_DOWN, "enc.blk.%d.ffn_down" }, + { LLM_TENSOR_ENC_FFN_UP, "enc.blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_JAIS, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + }, + }, + { + LLM_ARCH_NEMOTRON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_EXAONE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FREQS, "rope_freqs" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_RWKV6, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" }, + { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" }, + { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" }, + { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" }, + { LLM_TENSOR_TIME_MIX_LERP_W, "blk.%d.time_mix_lerp_w" }, + { LLM_TENSOR_TIME_MIX_LERP_K, "blk.%d.time_mix_lerp_k" }, + { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" }, + { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" }, + { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" }, + { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" }, + { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" }, + { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" }, + { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" }, + { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" }, + { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" }, + { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" }, + { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" }, + { LLM_TENSOR_TIME_MIX_LN, "blk.%d.time_mix_ln" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" }, + { LLM_TENSOR_CHANNEL_MIX_LERP_K, "blk.%d.channel_mix_lerp_k" }, + { LLM_TENSOR_CHANNEL_MIX_LERP_R, "blk.%d.channel_mix_lerp_r" }, + { LLM_TENSOR_CHANNEL_MIX_KEY, "blk.%d.channel_mix_key" }, + { LLM_TENSOR_CHANNEL_MIX_VALUE, "blk.%d.channel_mix_value" }, + { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" }, + }, + }, + { + LLM_ARCH_GRANITE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, + { + LLM_ARCH_GRANITE_MOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, + { + LLM_ARCH_CHAMELEON, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" }, + { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" }, + }, + }, + { + LLM_ARCH_WAVTOKENIZER_DEC, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" }, + { LLM_TENSOR_CONV1D, "conv1d" }, + { LLM_TENSOR_CONVNEXT_DW, "convnext.%d.dw" }, + { LLM_TENSOR_CONVNEXT_NORM, "convnext.%d.norm" }, + { LLM_TENSOR_CONVNEXT_PW1, "convnext.%d.pw1" }, + { LLM_TENSOR_CONVNEXT_PW2, "convnext.%d.pw2" }, + { LLM_TENSOR_CONVNEXT_GAMMA, "convnext.%d.gamma" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_POS_NET_CONV1, "posnet.%d.conv1" }, + { LLM_TENSOR_POS_NET_CONV2, "posnet.%d.conv2" }, + { LLM_TENSOR_POS_NET_NORM, "posnet.%d.norm" }, + { LLM_TENSOR_POS_NET_NORM1, "posnet.%d.norm1" }, + { LLM_TENSOR_POS_NET_NORM2, "posnet.%d.norm2" }, + { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" }, + { LLM_TENSOR_POS_NET_ATTN_Q, "posnet.%d.attn_q" }, + { LLM_TENSOR_POS_NET_ATTN_K, "posnet.%d.attn_k" }, + { LLM_TENSOR_POS_NET_ATTN_V, "posnet.%d.attn_v" }, + { LLM_TENSOR_POS_NET_ATTN_OUT, "posnet.%d.attn_output" }, + }, + }, + { + LLM_ARCH_UNKNOWN, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + }, + }, +}; + +static const std::map LLM_TENSOR_INFOS = { + {LLM_TENSOR_TOKEN_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_POS_EMBD, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_TOKEN_EMBD_NORM, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_TOKEN_TYPES, {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_OUTPUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CLS, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CLS_OUT, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_OUTPUT_NORM, {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}}, + {LLM_TENSOR_ROPE_FREQS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ROPE_FACTORS_LONG, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ROPE_FACTORS_SHORT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}}, + {LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_QKV, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_DOWN_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_UP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_Q_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_A_MQA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ATTN_KV_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_CROSS_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_DEC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_DOWN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_ENC_FFN_UP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_INP_SHEXP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_GATE_INP, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_IN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_DT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_SSM_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_DECAY_W1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_DECAY_W2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_GATE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_TIME_MIX_OUTPUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_KEY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CHANNEL_MIX_VALUE, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_FFN_ACT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}}, + {LLM_TENSOR_SSM_CONV1D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}}, + {LLM_TENSOR_SSM_A, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}}, + {LLM_TENSOR_SSM_D, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LERP_X, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LN, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CHANNEL_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CHANNEL_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_TIME_MIX_LERP_W, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}}, + {LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_NORM_2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_POST_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_NORM_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_Q_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_K_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_LAYER_OUT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_Q_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_KV_A_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ATTN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_FFN_SUB_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_CROSS_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_ENC_FFN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_DEC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_ENC_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}}, + {LLM_TENSOR_FFN_DOWN_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + {LLM_TENSOR_FFN_GATE_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + {LLM_TENSOR_FFN_UP_EXPS, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}}, + // this tensor is loaded for T5, but never used + {LLM_TENSOR_DEC_CROSS_ATTN_REL_B, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}}, + {LLM_TENSOR_CONV1D, {LLM_TENSOR_LAYER_INPUT, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_NORM2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_CONV1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_CONV2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_POS_NET_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_POS_NET_ATTN_Q, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_K, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_POS_NET_ATTN_OUT, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_DW, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}}, + {LLM_TENSOR_CONVNEXT_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, + {LLM_TENSOR_CONVNEXT_PW1, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_PW2, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}}, + {LLM_TENSOR_CONVNEXT_GAMMA, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, +}; + +LLM_KV::LLM_KV(llm_arch arch) : arch(arch) {} + +std::string LLM_KV::operator()(llm_kv kv) const { + return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch)); +} + +std::string LLM_TN_IMPL::str() const { + if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) { + return "__missing__"; + } + + std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid); + + if (suffix != nullptr) { + name += "."; + name += suffix; + } + + return name; +} + +const char * llm_arch_name(llm_arch arch) { + auto it = LLM_ARCH_NAMES.find(arch); + if (it == LLM_ARCH_NAMES.end()) { + return "unknown"; + } + return it->second; +} + +llm_arch llm_arch_from_string(const std::string & name) { + for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT + if (kv.second == name) { + return kv.first; + } + } + + return LLM_ARCH_UNKNOWN; +} + +const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) { + return LLM_TENSOR_INFOS.at(tensor); +} diff --git a/src/llama-arch.h b/src/llama-arch.h new file mode 100644 index 000000000..446e72eeb --- /dev/null +++ b/src/llama-arch.h @@ -0,0 +1,391 @@ +#pragma once + +#include "ggml.h" // ggml_op + +#include + +// +// gguf constants (sync with gguf.py) +// + +enum llm_arch { + LLM_ARCH_LLAMA, + LLM_ARCH_DECI, + LLM_ARCH_FALCON, + LLM_ARCH_BAICHUAN, + LLM_ARCH_GROK, + LLM_ARCH_GPT2, + LLM_ARCH_GPTJ, + LLM_ARCH_GPTNEOX, + LLM_ARCH_MPT, + LLM_ARCH_STARCODER, + LLM_ARCH_REFACT, + LLM_ARCH_BERT, + LLM_ARCH_NOMIC_BERT, + LLM_ARCH_JINA_BERT_V2, + LLM_ARCH_BLOOM, + LLM_ARCH_STABLELM, + LLM_ARCH_QWEN, + LLM_ARCH_QWEN2, + LLM_ARCH_QWEN2MOE, + LLM_ARCH_QWEN2VL, + LLM_ARCH_PHI2, + LLM_ARCH_PHI3, + LLM_ARCH_PLAMO, + LLM_ARCH_CODESHELL, + LLM_ARCH_ORION, + LLM_ARCH_INTERNLM2, + LLM_ARCH_MINICPM, + LLM_ARCH_MINICPM3, + LLM_ARCH_GEMMA, + LLM_ARCH_GEMMA2, + LLM_ARCH_STARCODER2, + LLM_ARCH_MAMBA, + LLM_ARCH_XVERSE, + LLM_ARCH_COMMAND_R, + LLM_ARCH_DBRX, + LLM_ARCH_OLMO, + LLM_ARCH_OLMO2, + LLM_ARCH_OLMOE, + LLM_ARCH_OPENELM, + LLM_ARCH_ARCTIC, + LLM_ARCH_DEEPSEEK, + LLM_ARCH_DEEPSEEK2, + LLM_ARCH_CHATGLM, + LLM_ARCH_BITNET, + LLM_ARCH_T5, + LLM_ARCH_T5ENCODER, + LLM_ARCH_JAIS, + LLM_ARCH_NEMOTRON, + LLM_ARCH_EXAONE, + LLM_ARCH_RWKV6, + LLM_ARCH_GRANITE, + LLM_ARCH_GRANITE_MOE, + LLM_ARCH_CHAMELEON, + LLM_ARCH_WAVTOKENIZER_DEC, + LLM_ARCH_UNKNOWN, +}; + +enum llm_kv { + LLM_KV_GENERAL_TYPE, + LLM_KV_GENERAL_ARCHITECTURE, + LLM_KV_GENERAL_QUANTIZATION_VERSION, + LLM_KV_GENERAL_ALIGNMENT, + LLM_KV_GENERAL_NAME, + LLM_KV_GENERAL_AUTHOR, + LLM_KV_GENERAL_VERSION, + LLM_KV_GENERAL_URL, + LLM_KV_GENERAL_DESCRIPTION, + LLM_KV_GENERAL_LICENSE, + LLM_KV_GENERAL_SOURCE_URL, + LLM_KV_GENERAL_SOURCE_HF_REPO, + + LLM_KV_VOCAB_SIZE, + LLM_KV_CONTEXT_LENGTH, + LLM_KV_EMBEDDING_LENGTH, + LLM_KV_FEATURES_LENGTH, + LLM_KV_BLOCK_COUNT, + LLM_KV_LEADING_DENSE_BLOCK_COUNT, + LLM_KV_FEED_FORWARD_LENGTH, + LLM_KV_EXPERT_FEED_FORWARD_LENGTH, + LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, + LLM_KV_USE_PARALLEL_RESIDUAL, + LLM_KV_TENSOR_DATA_LAYOUT, + LLM_KV_EXPERT_COUNT, + LLM_KV_EXPERT_USED_COUNT, + LLM_KV_EXPERT_SHARED_COUNT, + LLM_KV_EXPERT_WEIGHTS_SCALE, + LLM_KV_POOLING_TYPE, + LLM_KV_LOGIT_SCALE, + LLM_KV_DECODER_START_TOKEN_ID, + LLM_KV_ATTN_LOGIT_SOFTCAPPING, + LLM_KV_FINAL_LOGIT_SOFTCAPPING, + LLM_KV_SWIN_NORM, + LLM_KV_RESCALE_EVERY_N_LAYERS, + LLM_KV_TIME_MIX_EXTRA_DIM, + LLM_KV_TIME_DECAY_EXTRA_DIM, + LLM_KV_RESIDUAL_SCALE, + LLM_KV_EMBEDDING_SCALE, + + LLM_KV_ATTENTION_HEAD_COUNT, + LLM_KV_ATTENTION_HEAD_COUNT_KV, + LLM_KV_ATTENTION_MAX_ALIBI_BIAS, + LLM_KV_ATTENTION_CLAMP_KQV, + LLM_KV_ATTENTION_KEY_LENGTH, + LLM_KV_ATTENTION_VALUE_LENGTH, + LLM_KV_ATTENTION_LAYERNORM_EPS, + LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, + LLM_KV_ATTENTION_GROUPNORM_EPS, + LLM_KV_ATTENTION_GROUPNORM_GROUPS, + LLM_KV_ATTENTION_CAUSAL, + LLM_KV_ATTENTION_Q_LORA_RANK, + LLM_KV_ATTENTION_KV_LORA_RANK, + LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, + LLM_KV_ATTENTION_SLIDING_WINDOW, + LLM_KV_ATTENTION_SCALE, + + LLM_KV_ROPE_DIMENSION_COUNT, + LLM_KV_ROPE_DIMENSION_SECTIONS, + LLM_KV_ROPE_FREQ_BASE, + LLM_KV_ROPE_SCALE_LINEAR, + LLM_KV_ROPE_SCALING_TYPE, + LLM_KV_ROPE_SCALING_FACTOR, + LLM_KV_ROPE_SCALING_ATTN_FACTOR, + LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, + LLM_KV_ROPE_SCALING_FINETUNED, + LLM_KV_ROPE_SCALING_YARN_LOG_MUL, + + LLM_KV_SPLIT_NO, + LLM_KV_SPLIT_COUNT, + LLM_KV_SPLIT_TENSORS_COUNT, + + LLM_KV_SSM_INNER_SIZE, + LLM_KV_SSM_CONV_KERNEL, + LLM_KV_SSM_STATE_SIZE, + LLM_KV_SSM_TIME_STEP_RANK, + LLM_KV_SSM_DT_B_C_RMS, + + LLM_KV_WKV_HEAD_SIZE, + + LLM_KV_TOKENIZER_MODEL, + LLM_KV_TOKENIZER_PRE, + LLM_KV_TOKENIZER_LIST, + LLM_KV_TOKENIZER_TOKEN_TYPE, + LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, + LLM_KV_TOKENIZER_SCORES, + LLM_KV_TOKENIZER_MERGES, + LLM_KV_TOKENIZER_BOS_ID, + LLM_KV_TOKENIZER_EOS_ID, + LLM_KV_TOKENIZER_EOT_ID, + LLM_KV_TOKENIZER_EOM_ID, + LLM_KV_TOKENIZER_UNK_ID, + LLM_KV_TOKENIZER_SEP_ID, + LLM_KV_TOKENIZER_PAD_ID, + LLM_KV_TOKENIZER_CLS_ID, + LLM_KV_TOKENIZER_MASK_ID, + LLM_KV_TOKENIZER_ADD_BOS, + LLM_KV_TOKENIZER_ADD_EOS, + LLM_KV_TOKENIZER_ADD_PREFIX, + LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, + LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, + LLM_KV_TOKENIZER_HF_JSON, + LLM_KV_TOKENIZER_RWKV, + LLM_KV_TOKENIZER_FIM_PRE_ID, + LLM_KV_TOKENIZER_FIM_SUF_ID, + LLM_KV_TOKENIZER_FIM_MID_ID, + LLM_KV_TOKENIZER_FIM_PAD_ID, + LLM_KV_TOKENIZER_FIM_REP_ID, + LLM_KV_TOKENIZER_FIM_SEP_ID, + + LLM_KV_ADAPTER_TYPE, + LLM_KV_ADAPTER_LORA_ALPHA, + + LLM_KV_POSNET_EMBEDDING_LENGTH, + LLM_KV_POSNET_BLOCK_COUNT, + + LLM_KV_CONVNEXT_EMBEDDING_LENGTH, + LLM_KV_CONVNEXT_BLOCK_COUNT, + + // deprecated: + LLM_KV_TOKENIZER_PREFIX_ID, + LLM_KV_TOKENIZER_SUFFIX_ID, + LLM_KV_TOKENIZER_MIDDLE_ID, +}; + +enum llm_tensor { + LLM_TENSOR_TOKEN_EMBD, + LLM_TENSOR_TOKEN_EMBD_NORM, + LLM_TENSOR_TOKEN_TYPES, + LLM_TENSOR_POS_EMBD, + LLM_TENSOR_OUTPUT, + LLM_TENSOR_OUTPUT_NORM, + LLM_TENSOR_ROPE_FREQS, + LLM_TENSOR_ROPE_FACTORS_LONG, + LLM_TENSOR_ROPE_FACTORS_SHORT, + LLM_TENSOR_ATTN_Q, + LLM_TENSOR_ATTN_K, + LLM_TENSOR_ATTN_V, + LLM_TENSOR_ATTN_QKV, + LLM_TENSOR_ATTN_OUT, + LLM_TENSOR_ATTN_NORM, + LLM_TENSOR_ATTN_NORM_2, + LLM_TENSOR_ATTN_OUT_NORM, + LLM_TENSOR_ATTN_POST_NORM, + LLM_TENSOR_ATTN_ROT_EMBD, + LLM_TENSOR_FFN_GATE_INP, + LLM_TENSOR_FFN_GATE_INP_SHEXP, + LLM_TENSOR_FFN_NORM, + LLM_TENSOR_FFN_POST_NORM, + LLM_TENSOR_FFN_GATE, + LLM_TENSOR_FFN_DOWN, + LLM_TENSOR_FFN_UP, + LLM_TENSOR_FFN_ACT, + LLM_TENSOR_FFN_DOWN_EXP, // split experts for backward compatibility + LLM_TENSOR_FFN_GATE_EXP, + LLM_TENSOR_FFN_UP_EXP, + LLM_TENSOR_FFN_NORM_EXPS, + LLM_TENSOR_FFN_DOWN_EXPS, // merged experts + LLM_TENSOR_FFN_GATE_EXPS, + LLM_TENSOR_FFN_UP_EXPS, + LLM_TENSOR_FFN_DOWN_SHEXP, + LLM_TENSOR_FFN_GATE_SHEXP, + LLM_TENSOR_FFN_UP_SHEXP, + LLM_TENSOR_ATTN_Q_NORM, + LLM_TENSOR_ATTN_K_NORM, + LLM_TENSOR_LAYER_OUT_NORM, + LLM_TENSOR_SSM_IN, + LLM_TENSOR_SSM_CONV1D, + LLM_TENSOR_SSM_X, + LLM_TENSOR_SSM_DT, + LLM_TENSOR_SSM_A, + LLM_TENSOR_SSM_D, + LLM_TENSOR_SSM_OUT, + LLM_TENSOR_TIME_MIX_W1, + LLM_TENSOR_TIME_MIX_W2, + LLM_TENSOR_TIME_MIX_LERP_X, + LLM_TENSOR_TIME_MIX_LERP_W, + LLM_TENSOR_TIME_MIX_LERP_K, + LLM_TENSOR_TIME_MIX_LERP_V, + LLM_TENSOR_TIME_MIX_LERP_R, + LLM_TENSOR_TIME_MIX_LERP_G, + LLM_TENSOR_TIME_MIX_FIRST, + LLM_TENSOR_TIME_MIX_DECAY, + LLM_TENSOR_TIME_MIX_DECAY_W1, + LLM_TENSOR_TIME_MIX_DECAY_W2, + LLM_TENSOR_TIME_MIX_KEY, + LLM_TENSOR_TIME_MIX_VALUE, + LLM_TENSOR_TIME_MIX_RECEPTANCE, + LLM_TENSOR_TIME_MIX_GATE, + LLM_TENSOR_TIME_MIX_LN, + LLM_TENSOR_TIME_MIX_OUTPUT, + LLM_TENSOR_CHANNEL_MIX_LERP_K, + LLM_TENSOR_CHANNEL_MIX_LERP_R, + LLM_TENSOR_CHANNEL_MIX_KEY, + LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, + LLM_TENSOR_CHANNEL_MIX_VALUE, + LLM_TENSOR_ATTN_Q_A, + LLM_TENSOR_ATTN_Q_B, + LLM_TENSOR_ATTN_KV_A_MQA, + LLM_TENSOR_ATTN_KV_B, + LLM_TENSOR_ATTN_Q_A_NORM, + LLM_TENSOR_ATTN_KV_A_NORM, + LLM_TENSOR_ATTN_SUB_NORM, + LLM_TENSOR_FFN_SUB_NORM, + LLM_TENSOR_DEC_ATTN_NORM, + LLM_TENSOR_DEC_ATTN_Q, + LLM_TENSOR_DEC_ATTN_K, + LLM_TENSOR_DEC_ATTN_V, + LLM_TENSOR_DEC_ATTN_OUT, + LLM_TENSOR_DEC_ATTN_REL_B, + LLM_TENSOR_DEC_CROSS_ATTN_NORM, + LLM_TENSOR_DEC_CROSS_ATTN_Q, + LLM_TENSOR_DEC_CROSS_ATTN_K, + LLM_TENSOR_DEC_CROSS_ATTN_V, + LLM_TENSOR_DEC_CROSS_ATTN_OUT, + LLM_TENSOR_DEC_CROSS_ATTN_REL_B, + LLM_TENSOR_DEC_FFN_NORM, + LLM_TENSOR_DEC_FFN_GATE, + LLM_TENSOR_DEC_FFN_DOWN, + LLM_TENSOR_DEC_FFN_UP, + LLM_TENSOR_DEC_OUTPUT_NORM, + LLM_TENSOR_ENC_ATTN_NORM, + LLM_TENSOR_ENC_ATTN_Q, + LLM_TENSOR_ENC_ATTN_K, + LLM_TENSOR_ENC_ATTN_V, + LLM_TENSOR_ENC_ATTN_OUT, + LLM_TENSOR_ENC_ATTN_REL_B, + LLM_TENSOR_ENC_FFN_NORM, + LLM_TENSOR_ENC_FFN_GATE, + LLM_TENSOR_ENC_FFN_DOWN, + LLM_TENSOR_ENC_FFN_UP, + LLM_TENSOR_ENC_OUTPUT_NORM, + LLM_TENSOR_CLS, + LLM_TENSOR_CLS_OUT, + LLM_TENSOR_CONV1D, + LLM_TENSOR_CONVNEXT_DW, + LLM_TENSOR_CONVNEXT_NORM, + LLM_TENSOR_CONVNEXT_PW1, + LLM_TENSOR_CONVNEXT_PW2, + LLM_TENSOR_CONVNEXT_GAMMA, + LLM_TENSOR_POS_NET_CONV1, + LLM_TENSOR_POS_NET_CONV2, + LLM_TENSOR_POS_NET_NORM, + LLM_TENSOR_POS_NET_NORM1, + LLM_TENSOR_POS_NET_NORM2, + LLM_TENSOR_POS_NET_ATTN_NORM, + LLM_TENSOR_POS_NET_ATTN_Q, + LLM_TENSOR_POS_NET_ATTN_K, + LLM_TENSOR_POS_NET_ATTN_V, + LLM_TENSOR_POS_NET_ATTN_OUT, +}; + +enum llm_tensor_layer { + LLM_TENSOR_LAYER_INPUT, + LLM_TENSOR_LAYER_REPEATING, + LLM_TENSOR_LAYER_OUTPUT, +}; + +struct LLM_KV { + LLM_KV(llm_arch arch); + + llm_arch arch; + + std::string operator()(llm_kv kv) const; +}; + +// helper to handle gguf constants +// usage: +// +// const auto tn = LLM_TN(LLM_ARCH_LLAMA); +// +// std::string name = tn(LLM_TENSOR_OUTPUT); -> "output" +// std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias"); -> "token_embd.bias" +// std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3); -> "blk.3.attn_norm.weight" +// +struct LLM_TN_IMPL { + const llm_arch arch; + const llm_tensor tensor; + const char * const suffix; + const int bid; + const int xid; + + std::string str() const; + + operator std::string() const { + return str(); + } + + friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) { + return str == tn.str(); + } + + friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) { + return str != tn.str(); + } +}; + +struct LLM_TN { + LLM_TN(llm_arch arch) : arch(arch) {} + + llm_arch arch; + + LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const { + return { arch, tensor, suffix, bid, xid }; + } + + LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const { + return { arch, tensor, nullptr, bid, xid }; + } +}; + + +struct llm_tensor_info { + llm_tensor_layer layer; + ggml_op op; +}; + +const char * llm_arch_name(llm_arch arch); + +llm_arch llm_arch_from_string(const std::string & name); + +const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor); diff --git a/src/llama-batch.cpp b/src/llama-batch.cpp new file mode 100644 index 000000000..01d5ca57f --- /dev/null +++ b/src/llama-batch.cpp @@ -0,0 +1,368 @@ +#include "llama-batch.h" + +#include +#include + +llama_ubatch llama_sbatch::reserve_ubatch(size_t n_ubatch, bool has_embd) { + // clear empty sequences + // the previous ubatch is assumed to be gone, + // so nothing should refer to values in these sequences anymore. + for (size_t i = seq.size(); i-- > 0;) { + if (seq[i].length == 0) { + seq.pop_back(); + } else { + break; + } + } + ubatch_token.resize(!has_embd ? n_ubatch : 0); + ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0); + ubatch_pos.resize(n_ubatch); + ubatch_n_seq_id.resize(n_ubatch); + ubatch_seq_id.resize(n_ubatch); + ubatch_output.resize(n_ubatch); + llama_ubatch ubatch = { + /*equal_seqs =*/ true, + /*n_tokens =*/ 0, + /*n_seq_tokens =*/ 0, + /*n_seqs =*/ 0, + /*token =*/ !has_embd ? ubatch_token.data() : nullptr, + /*embd =*/ has_embd ? ubatch_embd.data() : nullptr, + /*pos =*/ ubatch_pos.data(), + /*n_seq_id =*/ ubatch_n_seq_id.data(), + /*seq_id =*/ ubatch_seq_id.data(), + /*output =*/ ubatch_output.data(), + }; + return ubatch; +} + +void llama_sbatch::add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) { + GGML_ASSERT(batch != nullptr); + GGML_ASSERT(length <= seq.length); + // Can only add sequences of equal lengths to a batch, + // otherwise it isn't clear to which sequence a token belongs + GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs); + GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs); + // NOTE: loops are separated for cache-friendliness + if (batch->token) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]]; + } + } else { + // simple split + ubatch.token = batch->token + seq.offset; + } + } else { + ubatch.token = nullptr; + } + if (batch->embd) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + memcpy( + ubatch.embd + (n_embd * (ubatch.n_tokens + i)), + batch->embd + (n_embd * ids[seq.offset + i]), + n_embd * sizeof(float) + ); + } + } else { + // simple split + ubatch.embd = batch->embd + (n_embd * seq.offset); + } + } else { + ubatch.embd = nullptr; + } + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]]; + } + } else { + // simple split + ubatch.pos = batch->pos + seq.offset; + } + if (ubatch.equal_seqs) { + ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id; + if (seq.seq_id) { + ubatch.seq_id[ubatch.n_seqs] = seq.seq_id; + } + } else { + // simple split + if (batch->n_seq_id) { + ubatch.n_seq_id = batch->n_seq_id + seq.offset; + } else { + for (size_t i = 0; i < length; ++i) { + ubatch.n_seq_id[ubatch.n_seqs + i] = 1; + } + } + if (batch->seq_id) { + ubatch.seq_id = batch->seq_id + seq.offset; + } + } + if (logits_all) { + for (size_t i = 0; i < length; ++i) { + ubatch.output[ubatch.n_tokens + i] = 1; + out_ids.push_back(ids[seq.offset + i]); + } + } else if (batch->logits) { + if (ubatch.equal_seqs) { + for (size_t i = 0; i < length; ++i) { + size_t id = ids[seq.offset + i]; + int8_t is_output = batch->logits[id]; + ubatch.output[ubatch.n_tokens + i] = is_output; + if (is_output) { out_ids.push_back(id); } + } + } else { + // simple split + ubatch.output = batch->logits + seq.offset; + for (size_t i = 0; i < length; ++i) { + if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); } + } + } + } else { + // only get last output + for (size_t i = 0; i < length; ++i) { + size_t id = ids[seq.offset + i]; + int8_t is_last = id == ids.size() - 1; + ubatch.output[ubatch.n_tokens + i] = is_last; + if (is_last) { out_ids.push_back(id); } + } + } + if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) { + ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1; + } + ubatch.n_tokens += length; + ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits + seq.offset += length; + seq.length -= length; + n_tokens -= length; + GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs); +} + +llama_ubatch llama_sbatch::split_simple(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + ubatch.equal_seqs = false; + if (!seq.empty()) { + llama_sbatch_seq & s = seq[0]; + size_t length = s.length < n_ubatch ? s.length : n_ubatch; + GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits + add_seq_to_ubatch(ubatch, s, length); + } + return ubatch; +} + +llama_ubatch llama_sbatch::split_equal(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + if (!seq.empty()) { + size_t length = 0; + size_t n_tokens_in_ubatch = 0; + GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits + // smallest first, because it's easier to split this way; + // starting from the end to pop in constant time. + for (size_t i = seq.size(); i-- > 0;) { + llama_sbatch_seq & s = seq[i]; + GGML_ASSERT(s.length > 0); + if (length == 0) { + length = s.length < n_ubatch ? s.length : n_ubatch; + } + add_seq_to_ubatch(ubatch, s, length); + n_tokens_in_ubatch += length; + // shared prompts can't be mixed with any of their sequences, + // so it's safer to compute them in their own ubatch + if (s.n_seq_id > 1) { break; } + // stop when there isn't enough space for another sequence + if (length + n_tokens_in_ubatch > n_ubatch) { break; } + } + } + return ubatch; +} + +llama_ubatch llama_sbatch::split_seq(size_t n_ubatch) { + n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch; + llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr); + if (!seq.empty()) { + llama_sbatch_seq & s = seq[seq.size() - 1]; + size_t length = s.length < n_ubatch ? s.length : n_ubatch; + GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits + add_seq_to_ubatch(ubatch, s, length); + } + return ubatch; +} + +void llama_sbatch::from_batch(const llama_batch & batch, size_t n_embd, bool simple_split, bool logits_all) { + GGML_ASSERT(batch.n_tokens >= 0); + this->batch = &batch; + this->n_embd = n_embd; + this->logits_all = logits_all; + + n_tokens = batch.n_tokens; + ids.resize(n_tokens); + out_ids.clear(); + // TODO: reserve out_ids and seq + + for (size_t i = 0; i < n_tokens; ++i) { + ids[i] = i; + } + if (simple_split) { + seq.resize(1); + llama_sbatch_seq & s = seq[0]; + s.n_seq_id = 0; + s.seq_id = nullptr; + s.offset = 0; + s.length = n_tokens; + return; + } + std::sort(ids.begin(), ids.end(), + [&batch](size_t a, size_t b) { + int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1; + int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1; + // sort by seq_id, then by pos + if (n_seq_a == n_seq_b) { + if (batch.seq_id) { + for (int32_t i = 0; i < n_seq_a; ++i) { + llama_seq_id seq_id_a = batch.seq_id[a][i]; + llama_seq_id seq_id_b = batch.seq_id[b][i]; + // smaller seq_ids go first + if (seq_id_a != seq_id_b) { + return seq_id_a < seq_id_b; + } + } + } + // when all else is equal, sort by pos + if (batch.pos) { + return batch.pos[a] < batch.pos[b]; + } + // no pos, sort by id + return a < b; + } + // shared prompts go first + return n_seq_a > n_seq_b; + } + ); + // init seq + llama_sbatch_seq * last_seq = nullptr; + + for (size_t i = 0; i < n_tokens; ++i) { + const size_t bi = ids[i]; + const int32_t n_seqs = batch.n_seq_id[bi]; + llama_seq_id * seq_ids = batch.seq_id[bi]; + if (last_seq != nullptr) { + bool same = n_seqs == last_seq->n_seq_id; + for (int32_t j = 0; same && j < n_seqs; ++j) { + if (seq_ids[j] != last_seq->seq_id[j]) { + same = false; + } + } + if (same) { + last_seq->length += 1; + continue; + } + } + llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1}; + seq.push_back(new_seq); + last_seq = &seq.back(); + } + // keep shared prompts first at the end, then sort by length descending. + std::sort(seq.begin(), seq.end(), + [](llama_sbatch_seq & a, llama_sbatch_seq & b) { + if (a.n_seq_id == b.n_seq_id) { + return a.length > b.length; + } + return a.n_seq_id < b.n_seq_id; + } + ); +} + +llama_batch_allocr::llama_batch_allocr(struct llama_batch in_batch, llama_pos p0) { + batch = in_batch; + GGML_ASSERT(batch.n_tokens > 0); + if (!batch.pos) { + pos.resize(batch.n_tokens); + for (int32_t i = 0; i < batch.n_tokens; i++) { + pos[i] = i + p0; + } + batch.pos = pos.data(); + } + if (!batch.n_seq_id) { + n_seq_id.resize(batch.n_tokens); + for (int32_t i = 0; i < batch.n_tokens; i++) { + n_seq_id[i] = seq_id_0.size(); + } + batch.n_seq_id = n_seq_id.data(); + } + if (!batch.seq_id) { + seq_id.resize(batch.n_tokens + 1); + seq_id[batch.n_tokens] = NULL; + for (int32_t i = 0; i < batch.n_tokens; i++) { + seq_id[i] = seq_id_0.data(); + } + batch.seq_id = seq_id.data(); + } + if (!batch.logits) { + logits.resize(batch.n_tokens); + logits[logits.size() - 1] = true; + batch.logits = logits.data(); + } +} + +// +// interface implementation +// + +struct llama_batch llama_batch_get_one( + llama_token * tokens, + int32_t n_tokens) { + return { + /*n_tokens =*/ n_tokens, + /*tokens =*/ tokens, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, + }; +} + +struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) { + llama_batch batch = { + /*n_tokens =*/ 0, + /*tokens =*/ nullptr, + /*embd =*/ nullptr, + /*pos =*/ nullptr, + /*n_seq_id =*/ nullptr, + /*seq_id =*/ nullptr, + /*logits =*/ nullptr, + }; + + if (embd) { + batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd); + } else { + batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc); + } + + batch.pos = (llama_pos *) malloc(sizeof(llama_pos) * n_tokens_alloc); + batch.n_seq_id = (int32_t *) malloc(sizeof(int32_t) * n_tokens_alloc); + batch.seq_id = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1)); + for (int i = 0; i < n_tokens_alloc; ++i) { + batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max); + } + batch.seq_id[n_tokens_alloc] = nullptr; + + batch.logits = (int8_t *) malloc(sizeof(int8_t) * n_tokens_alloc); + + return batch; +} + +void llama_batch_free(struct llama_batch batch) { + if (batch.token) free(batch.token); + if (batch.embd) free(batch.embd); + if (batch.pos) free(batch.pos); + if (batch.n_seq_id) free(batch.n_seq_id); + if (batch.seq_id) { + for (int i = 0; batch.seq_id[i] != nullptr; ++i) { + free(batch.seq_id[i]); + } + free(batch.seq_id); + } + if (batch.logits) free(batch.logits); +} diff --git a/src/llama-batch.h b/src/llama-batch.h new file mode 100644 index 000000000..773c3808b --- /dev/null +++ b/src/llama-batch.h @@ -0,0 +1,88 @@ +#pragma once + +#include "llama.h" + +#include +#include + +// very similar to llama_batch, +// but has more metadata about sequences +struct llama_ubatch { + bool equal_seqs; + // TODO: whole_seqs for embeddings? + + uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs) + uint32_t n_seq_tokens; // tokens per sequence + uint32_t n_seqs; + + llama_token * token; // [n_tokens] + float * embd; // [n_embd, n_tokens] + llama_pos * pos; // [n_tokens] + int32_t * n_seq_id; // [n_seqs] + llama_seq_id ** seq_id; // [n_seqs] + int8_t * output; // [n_tokens] +}; + +struct llama_sbatch_seq { + int32_t n_seq_id; + + llama_seq_id * seq_id; + + size_t offset; + size_t length; +}; + +// sequence-length-aware batch splitting +struct llama_sbatch { + // tokens left in this batch + size_t n_tokens; + + size_t n_embd; + + bool logits_all; // TODO: remove once lctx.logits_all is removed too + + // sorted indices into the batch + std::vector ids; + // batch indices of the output + std::vector out_ids; + std::vector seq; + + const llama_batch * batch = nullptr; + + // buffers for the ubatch + std::vector ubatch_token; + std::vector ubatch_embd; + std::vector ubatch_pos; + std::vector ubatch_n_seq_id; + std::vector ubatch_seq_id; + std::vector ubatch_output; + + llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false); + + void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length); + + // simple split, unknown number of sequences of unequal lengths + llama_ubatch split_simple(size_t n_ubatch); + + // make batches of equal-length sequences + llama_ubatch split_equal(size_t n_ubatch); + + // sequence-wise split + llama_ubatch split_seq(size_t n_ubatch); + + void from_batch(const llama_batch & batch, size_t n_embd, bool simple_split = false, bool logits_all = false); +}; + +// temporary allocate memory for the input batch if needed +struct llama_batch_allocr { + struct llama_batch batch; + + std::array seq_id_0 = { 0 }; // default sequence id + std::vector pos; + std::vector n_seq_id; + std::vector seq_id; + std::vector logits; + + // optionally fulfill the batch returned by llama_batch_get_one + llama_batch_allocr(struct llama_batch in_batch, llama_pos p0); +}; diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp new file mode 100644 index 000000000..a07e9cf00 --- /dev/null +++ b/src/llama-chat.cpp @@ -0,0 +1,549 @@ +#include "llama-chat.h" + +#include "llama.h" + +#include +#include + +#if __cplusplus >= 202000L + #define LU8(x) (const char*)(u8##x) +#else + #define LU8(x) u8##x +#endif + +// trim whitespace from the beginning and end of a string +static std::string trim(const std::string & str) { + size_t start = 0; + size_t end = str.size(); + while (start < end && isspace(str[start])) { + start += 1; + } + while (end > start && isspace(str[end - 1])) { + end -= 1; + } + return str.substr(start, end - start); +} + +static const std::map LLM_CHAT_TEMPLATES = { + { "chatml", LLM_CHAT_TEMPLATE_CHATML }, + { "llama2", LLM_CHAT_TEMPLATE_LLAMA_2 }, + { "llama2-sys", LLM_CHAT_TEMPLATE_LLAMA_2_SYS }, + { "llama2-sys-bos", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS }, + { "llama2-sys-strip", LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP }, + { "mistral-v1", LLM_CHAT_TEMPLATE_MISTRAL_V1 }, + { "mistral-v3", LLM_CHAT_TEMPLATE_MISTRAL_V3 }, + { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, + { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, + { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, + { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, + { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, + { "gemma", LLM_CHAT_TEMPLATE_GEMMA }, + { "orion", LLM_CHAT_TEMPLATE_ORION }, + { "openchat", LLM_CHAT_TEMPLATE_OPENCHAT }, + { "vicuna", LLM_CHAT_TEMPLATE_VICUNA }, + { "vicuna-orca", LLM_CHAT_TEMPLATE_VICUNA_ORCA }, + { "deepseek", LLM_CHAT_TEMPLATE_DEEPSEEK }, + { "deepseek2", LLM_CHAT_TEMPLATE_DEEPSEEK_2 }, + { "command-r", LLM_CHAT_TEMPLATE_COMMAND_R }, + { "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 }, + { "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 }, + { "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 }, + { "minicpm", LLM_CHAT_TEMPLATE_MINICPM }, + { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 }, + { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, + { "granite", LLM_CHAT_TEMPLATE_GRANITE }, + { "gigachat", LLM_CHAT_TEMPLATE_GIGACHAT }, + { "megrez", LLM_CHAT_TEMPLATE_MEGREZ }, +}; + +llm_chat_template llm_chat_template_from_str(const std::string & name) { + return LLM_CHAT_TEMPLATES.at(name); +} + +llm_chat_template llm_chat_detect_template(const std::string & tmpl) { + try { + return llm_chat_template_from_str(tmpl); + } catch (const std::out_of_range &) { + // ignore + } + + auto tmpl_contains = [&tmpl](const char * haystack) -> bool { + return tmpl.find(haystack) != std::string::npos; + }; + if (tmpl_contains("<|im_start|>")) { + return LLM_CHAT_TEMPLATE_CHATML; + } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) { + if (tmpl_contains("[SYSTEM_PROMPT]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V7; + } else if ( + // catches official 'v1' template + tmpl_contains("' [INST] ' + system_message") + // catches official 'v3' and 'v3-tekken' templates + || tmpl_contains("[AVAILABLE_TOOLS]") + ) { + // Official mistral 'v1', 'v3' and 'v3-tekken' templates + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + if (tmpl_contains(" [INST]")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V1; + } else if (tmpl_contains("\"[INST]\"")) { + return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN; + } + return LLM_CHAT_TEMPLATE_MISTRAL_V3; + } else { + // llama2 template and its variants + // [variant] support system message + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl_contains("<>"); + bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]"); + bool strip_message = tmpl_contains("content.strip()"); + if (strip_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; + } else if (add_bos_inside_history) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; + } else if (support_system_message) { + return LLM_CHAT_TEMPLATE_LLAMA_2_SYS; + } else { + return LLM_CHAT_TEMPLATE_LLAMA_2; + } + } + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { + return LLM_CHAT_TEMPLATE_PHI_3; + } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { + return LLM_CHAT_TEMPLATE_FALCON_3; + } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { + return LLM_CHAT_TEMPLATE_ZEPHYR; + } else if (tmpl_contains("bos_token + message['role']")) { + return LLM_CHAT_TEMPLATE_MONARCH; + } else if (tmpl_contains("")) { + return LLM_CHAT_TEMPLATE_GEMMA; + } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) { + // OrionStarAI/Orion-14B-Chat + return LLM_CHAT_TEMPLATE_ORION; + } else if (tmpl_contains("GPT4 Correct ")) { + // openchat/openchat-3.5-0106 + return LLM_CHAT_TEMPLATE_OPENCHAT; + } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) { + // eachadea/vicuna-13b-1.1 (and Orca variant) + if (tmpl_contains("SYSTEM: ")) { + return LLM_CHAT_TEMPLATE_VICUNA_ORCA; + } + return LLM_CHAT_TEMPLATE_VICUNA; + } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) { + // deepseek-ai/deepseek-coder-33b-instruct + return LLM_CHAT_TEMPLATE_DEEPSEEK; + } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) { + // CohereForAI/c4ai-command-r-plus + return LLM_CHAT_TEMPLATE_COMMAND_R; + } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) { + return LLM_CHAT_TEMPLATE_LLAMA_3; + } else if (tmpl_contains("[gMASK]sop")) { + // chatglm3-6b + return LLM_CHAT_TEMPLATE_CHATGML_3; + } else if (tmpl_contains("[gMASK]")) { + return LLM_CHAT_TEMPLATE_CHATGML_4; + } else if (tmpl_contains(LU8("<用户>"))) { + // MiniCPM-3B-OpenHermes-2.5-v2-GGUF + return LLM_CHAT_TEMPLATE_MINICPM; + } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) { + return LLM_CHAT_TEMPLATE_DEEPSEEK_2; + } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) { + // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb + // EXAONE-3.0-7.8B-Instruct + return LLM_CHAT_TEMPLATE_EXAONE_3; + } else if (tmpl_contains("rwkv-world")) { + return LLM_CHAT_TEMPLATE_RWKV_WORLD; + } else if (tmpl_contains("<|start_of_role|>")) { + return LLM_CHAT_TEMPLATE_GRANITE; + } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) { + return LLM_CHAT_TEMPLATE_GIGACHAT; + } else if (tmpl_contains("<|role_start|>")) { + return LLM_CHAT_TEMPLATE_MEGREZ; + } + return LLM_CHAT_TEMPLATE_UNKNOWN; +} + +// Simple version of "llama_apply_chat_template" that only works with strings +// This function uses heuristic checks to determine commonly used template. It is not a jinja parser. +int32_t llm_chat_apply_template( + llm_chat_template tmpl, + const std::vector & chat, + std::string & dest, bool add_ass) { + // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527 + std::stringstream ss; + if (tmpl == LLM_CHAT_TEMPLATE_CHATML) { + // chatml template + for (auto message : chat) { + ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n"; + } + if (add_ass) { + ss << "<|im_start|>assistant\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) { + // Official mistral 'v7' template + // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7 + for (auto message : chat) { + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]"; + } else if (role == "user") { + ss << "[INST] " << content << "[/INST]"; + } + else { + ss << " " << content << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3 + || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) { + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md + // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md + std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : ""; + std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " "; + bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3; + bool is_inside_turn = false; + for (auto message : chat) { + if (!is_inside_turn) { + ss << leading_space << "[INST]" << trailing_space; + is_inside_turn = true; + } + std::string role(message->role); + std::string content(message->content); + if (role == "system") { + ss << content << "\n\n"; + } else if (role == "user") { + ss << content << leading_space << "[/INST]"; + } else { + ss << trailing_space << (trim_assistant_message ? trim(content) : content) << ""; + is_inside_turn = false; + } + } + } else if ( + tmpl == LLM_CHAT_TEMPLATE_LLAMA_2 + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS + || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) { + // llama2 template and its variants + // [variant] support system message + // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2 + bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2; + // [variant] add BOS inside history + bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS; + // [variant] trim spaces from the input message + bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP; + // construct the prompt + bool is_inside_turn = true; // skip BOS at the beginning + ss << "[INST] "; + for (auto message : chat) { + std::string content = strip_message ? trim(message->content) : message->content; + std::string role(message->role); + if (!is_inside_turn) { + is_inside_turn = true; + ss << (add_bos_inside_history ? "[INST] " : "[INST] "); + } + if (role == "system") { + if (support_system_message) { + ss << "<>\n" << content << "\n<>\n\n"; + } else { + // if the model does not support system message, we still include it in the first message, but without <> + ss << content << "\n"; + } + } else if (role == "user") { + ss << content << " [/INST]"; + } else { + ss << content << ""; + is_inside_turn = false; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) { + // Phi 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << message->content << "<|end|>\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { + // Falcon 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>\n" << message->content << "\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) { + // zephyr template + for (auto message : chat) { + ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n"; + } + if (add_ass) { + ss << "<|assistant|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) { + // mlabonne/AlphaMonarch-7B template (the is included inside history) + for (auto message : chat) { + std::string bos = (message == chat.front()) ? "" : ""; // skip BOS for first message + ss << bos << message->role << "\n" << message->content << "\n"; + } + if (add_ass) { + ss << "assistant\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) { + // google/gemma-7b-it + std::string system_prompt = ""; + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken + system_prompt = trim(message->content); + continue; + } + // in gemma, "assistant" is "model" + role = role == "assistant" ? "model" : message->role; + ss << "" << role << "\n"; + if (!system_prompt.empty() && role != "model") { + ss << system_prompt << "\n\n"; + system_prompt = ""; + } + ss << trim(message->content) << "\n"; + } + if (add_ass) { + ss << "model\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) { + // OrionStarAI/Orion-14B-Chat + std::string system_prompt = ""; + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // there is no system message support, we will merge it with user prompt + system_prompt = message->content; + continue; + } else if (role == "user") { + ss << "Human: "; + if (!system_prompt.empty()) { + ss << system_prompt << "\n\n"; + system_prompt = ""; + } + ss << message->content << "\n\nAssistant: "; + } else { + ss << message->content << ""; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) { + // openchat/openchat-3.5-0106, + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content << "<|end_of_turn|>"; + } else { + role[0] = toupper(role[0]); + ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>"; + } + } + if (add_ass) { + ss << "GPT4 Correct Assistant:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { + // eachadea/vicuna-13b-1.1 (and Orca variant) + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // Orca-Vicuna variant uses a system prefix + if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) { + ss << "SYSTEM: " << message->content << "\n"; + } else { + ss << message->content << "\n\n"; + } + } else if (role == "user") { + ss << "USER: " << message->content << "\n"; + } else if (role == "assistant") { + ss << "ASSISTANT: " << message->content << "\n"; + } + } + if (add_ass) { + ss << "ASSISTANT:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) { + // deepseek-ai/deepseek-coder-33b-instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content; + } else if (role == "user") { + ss << "### Instruction:\n" << message->content << "\n"; + } else if (role == "assistant") { + ss << "### Response:\n" << message->content << "\n<|EOT|>\n"; + } + } + if (add_ass) { + ss << "### Response:\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) { + // CohereForAI/c4ai-command-r-plus + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } else if (role == "user") { + ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } else if (role == "assistant") { + ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>"; + } + } + if (add_ass) { + ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) { + // Llama 3 + for (auto message : chat) { + std::string role(message->role); + ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>"; + } + if (add_ass) { + ss << "<|start_header_id|>assistant<|end_header_id|>\n\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) { + // chatglm3-6b + ss << "[gMASK]" << "sop"; + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>" << "\n " << message->content; + } + if (add_ass) { + ss << "<|assistant|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) { + ss << "[gMASK]" << ""; + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>" << "\n" << message->content; + } + if (add_ass) { + ss << "<|assistant|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) { + // MiniCPM-3B-OpenHermes-2.5-v2-GGUF + for (auto message : chat) { + std::string role(message->role); + if (role == "user") { + ss << LU8("<用户>"); + ss << trim(message->content); + ss << ""; + } else { + ss << trim(message->content); + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) { + // DeepSeek-V2 + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << message->content << "\n\n"; + } else if (role == "user") { + ss << "User: " << message->content << "\n\n"; + } else if (role == "assistant") { + ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>"); + } + } + if (add_ass) { + ss << "Assistant:"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) { + // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb + // EXAONE-3.0-7.8B-Instruct + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n"; + } else if (role == "user") { + ss << "[|user|]" << trim(message->content) << "\n"; + } else if (role == "assistant") { + ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n"; + } + } + if (add_ass) { + ss << "[|assistant|]"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) { + // this template requires the model to have "\n\n" as EOT token + for (auto message : chat) { + std::string role(message->role); + if (role == "user") { + ss << "User: " << message->content << "\n\nAssistant:"; + } else { + ss << message->content << "\n\n"; + } + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) { + // IBM Granite template + for (const auto & message : chat) { + std::string role(message->role); + ss << "<|start_of_role|>" << role << "<|end_of_role|>"; + if (role == "assistant_tool_call") { + ss << "<|tool_call|>"; + } + ss << message->content << "<|end_of_text|>\n"; + } + if (add_ass) { + ss << "<|start_of_role|>assistant<|end_of_role|>\n"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) { + // GigaChat template + bool has_system = !chat.empty() && std::string(chat[0]->role) == "system"; + + // Handle system message if present + if (has_system) { + ss << "" << chat[0]->content << "<|message_sep|>"; + } else { + ss << ""; + } + + // Process remaining messages + for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) { + std::string role(chat[i]->role); + if (role == "user") { + ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>" + << "available functions<|role_sep|>[]<|message_sep|>"; + } else if (role == "assistant") { + ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>"; + } + } + + // Add generation prompt if needed + if (add_ass) { + ss << "assistant<|role_sep|>"; + } + } else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) { + // Megrez template + for (auto message : chat) { + std::string role(message->role); + ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>"; + } + + if (add_ass) { + ss << "<|role_start|>assistant<|role_end|>"; + } + } else { + // template not supported + return -1; + } + dest = ss.str(); + return dest.size(); +} + +// public interface + +int32_t llama_chat_builtin_templates(const char ** output, size_t len) { + auto it = LLM_CHAT_TEMPLATES.begin(); + for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) { + output[i] = it->first.c_str(); + std::advance(it, 1); + } + return (int32_t) LLM_CHAT_TEMPLATES.size(); +} + diff --git a/src/llama-chat.h b/src/llama-chat.h new file mode 100644 index 000000000..364318c27 --- /dev/null +++ b/src/llama-chat.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include + +enum llm_chat_template { + LLM_CHAT_TEMPLATE_CHATML, + LLM_CHAT_TEMPLATE_LLAMA_2, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS, + LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP, + LLM_CHAT_TEMPLATE_MISTRAL_V1, + LLM_CHAT_TEMPLATE_MISTRAL_V3, + LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, + LLM_CHAT_TEMPLATE_MISTRAL_V7, + LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_FALCON_3, + LLM_CHAT_TEMPLATE_ZEPHYR, + LLM_CHAT_TEMPLATE_MONARCH, + LLM_CHAT_TEMPLATE_GEMMA, + LLM_CHAT_TEMPLATE_ORION, + LLM_CHAT_TEMPLATE_OPENCHAT, + LLM_CHAT_TEMPLATE_VICUNA, + LLM_CHAT_TEMPLATE_VICUNA_ORCA, + LLM_CHAT_TEMPLATE_DEEPSEEK, + LLM_CHAT_TEMPLATE_DEEPSEEK_2, + LLM_CHAT_TEMPLATE_COMMAND_R, + LLM_CHAT_TEMPLATE_LLAMA_3, + LLM_CHAT_TEMPLATE_CHATGML_3, + LLM_CHAT_TEMPLATE_CHATGML_4, + LLM_CHAT_TEMPLATE_MINICPM, + LLM_CHAT_TEMPLATE_EXAONE_3, + LLM_CHAT_TEMPLATE_RWKV_WORLD, + LLM_CHAT_TEMPLATE_GRANITE, + LLM_CHAT_TEMPLATE_GIGACHAT, + LLM_CHAT_TEMPLATE_MEGREZ, + LLM_CHAT_TEMPLATE_UNKNOWN, +}; + +struct llama_chat_message; + +llm_chat_template llm_chat_template_from_str(const std::string & name); + +llm_chat_template llm_chat_detect_template(const std::string & tmpl); + +int32_t llm_chat_apply_template( + llm_chat_template tmpl, + const std::vector & chat, + std::string & dest, bool add_ass); diff --git a/src/llama-context.cpp b/src/llama-context.cpp new file mode 100644 index 000000000..38a55fb2c --- /dev/null +++ b/src/llama-context.cpp @@ -0,0 +1,1771 @@ +#include "llama-context.h" + +#include +#include +#include +#include + +void llama_set_k_shift(struct llama_context & lctx) { + const int64_t kv_size = lctx.kv_self.size; + + assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer)); + + int32_t * data = (int32_t *) lctx.inp_K_shift->data; + + for (int i = 0; i < kv_size; ++i) { + data[i] = lctx.kv_self.cells[i].delta; + } +} + +void llama_set_s_copy(struct llama_context & lctx) { + const int64_t kv_size = lctx.kv_self.size; + + assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); + + int32_t * data = (int32_t *) lctx.inp_s_copy->data; + + for (int i = 0; i < kv_size; ++i) { + data[i] = lctx.kv_self.cells[i].src; + } +} + +// llama input + +static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) { + // TODO move to hparams if a T5 variant appears that uses a different value + const int64_t max_distance = 128; + + if (bidirectional) { + n_buckets >>= 1; + } + + const int64_t max_exact = n_buckets >> 1; + + int32_t relative_position = x - y; + int32_t relative_bucket = 0; + if (bidirectional) { + relative_bucket += (relative_position > 0) * n_buckets; + relative_position = abs(relative_position); + } else { + relative_position = -std::min(relative_position, 0); + } + int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact)); + relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1); + relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large); + return relative_bucket; +} + +void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) { + // + // set input data + // + + const auto & hparams = lctx.model.hparams; + const auto & cparams = lctx.cparams; + const auto & kv_self = lctx.kv_self; + + if (ubatch.token) { + const int64_t n_tokens = ubatch.n_tokens; + + ggml_backend_tensor_set(lctx.inp_tokens, ubatch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens)); + } + + if (ubatch.embd) { + const int64_t n_embd = hparams.n_embd; + const int64_t n_tokens = ubatch.n_tokens; + + ggml_backend_tensor_set(lctx.inp_embd, ubatch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd)); + } + + if (ubatch.pos && lctx.inp_pos) { + const int64_t n_tokens = ubatch.n_tokens; + auto n_pos = lctx.n_pos_per_token; + ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos)); + } + + if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) { + //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs"); + + if (!lctx.inp_out_ids) { + LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__); + } else { + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer)); + int32_t * data = (int32_t *) lctx.inp_out_ids->data; + + if (lctx.n_outputs == n_tokens) { + for (int i = 0; i < n_tokens; ++i) { + data[i] = i; + } + } else if (ubatch.output) { + int32_t n_outputs = 0; + for (int i = 0; i < n_tokens; ++i) { + if (ubatch.output[i]) { + data[n_outputs++] = i; + } + } + // the graph needs to have been passed the correct number of outputs + GGML_ASSERT(lctx.n_outputs == n_outputs); + } else if (lctx.n_outputs == 1) { + // only keep last output + data[0] = n_tokens - 1; + } else { + GGML_ASSERT(lctx.n_outputs == 0); + } + } + } + + GGML_ASSERT( + // (!a || b) is a logical implication (a -> b) + // !hparams.causal_attn -> !cparams.causal_attn + (hparams.causal_attn || !cparams.causal_attn) && + "causal attention is not supported by this model" + ); + + if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) { + // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache. + if (cparams.causal_attn && !lctx.is_encoding) { + const int64_t n_kv = kv_self.n; + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + + float * data = nullptr; + float * data_swa = nullptr; + + if (lctx.inp_KQ_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); + data = (float *) lctx.inp_KQ_mask->data; + } + + if (lctx.inp_KQ_mask_swa) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer)); + data_swa = (float *) lctx.inp_KQ_mask_swa->data; + } + + // For causal attention, use only the previous KV cells + // of the correct sequence for each token of the ubatch. + // It's assumed that if a token in the batch has multiple sequences, they are equivalent. + for (int h = 0; h < 1; ++h) { + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + j]; + + for (int i = 0; i < n_kv; ++i) { + float f; + if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) { + f = -INFINITY; + } else { + if (hparams.use_alibi) { + f = -std::abs(kv_self.cells[i].pos - pos); + } else { + f = 0.0f; + } + } + + if (data) { + data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + + // may need to cut off old tokens for sliding window + if (data_swa) { + if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) { + f = -INFINITY; + } + data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f; + } + } + } + } + + if (data) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + + if (data_swa) { + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_kv; ++j) { + data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY; + } + } + } + } + } else { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + // when using kv cache, the mask needs to match the kv cache size + const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer)); + + float * data = (float *) lctx.inp_KQ_mask->data; + + for (int h = 0; h < 1; ++h) { + for (int s1 = 0; s1 < n_seqs; ++s1) { + const llama_seq_id seq_id = ubatch.seq_id[s1][0]; + + for (int j = 0; j < n_seq_tokens; ++j) { + const int32_t tj = s1*n_seq_tokens + j; + + for (int s0 = 0; s0 < n_seqs; ++s0) { + for (int i = 0; i < n_seq_tokens; ++i) { + const int32_t ti = s0*n_seq_tokens + i; + float f = -INFINITY; + + for (int s = 0; s < ubatch.n_seq_id[s0]; ++s) { + if (ubatch.seq_id[s0][s] == seq_id) { + if (hparams.use_alibi) { + f = -std::abs(ubatch.pos[ti] - ubatch.pos[tj]); + } else { + f = 0.0f; + } + break; + } + } + + data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f; + } + } + + for (int i = n_tokens; i < n_stride; ++i) { + data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY; + } + } + } + } + } + } + + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_mean); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer)); + + float * data = (float *) lctx.inp_mean->data; + memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean)); + + std::vector sum(n_tokens, 0); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN"); + + sum[seq_id] += ubatch.n_seq_tokens; + } + + std::vector div(n_tokens, 0.0f); + for (int i = 0; i < n_tokens; ++i) { + const uint64_t s = sum[i]; + if (s > 0) { + div[i] = 1.0f/float(s); + } + } + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + for (int i = 0; i < n_seq_tokens; ++i) { + data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id]; + } + } + } + + if (cparams.embeddings && ( + cparams.pooling_type == LLAMA_POOLING_TYPE_CLS || + cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_cls); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; + memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + + if (pos == 0) { + data[seq_id] = s*n_seq_tokens + i; + } + } + } + } + + if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) { + const int64_t n_tokens = ubatch.n_tokens; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_seqs = ubatch.n_seqs; + + GGML_ASSERT(lctx.inp_cls); + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer)); + + uint32_t * data = (uint32_t *) lctx.inp_cls->data; + memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls)); + + std::vector last_pos(n_tokens, -1); + std::vector last_row(n_tokens, -1); + + for (int s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[s][0]; + + // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true + GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST"); + + for (int i = 0; i < n_seq_tokens; ++i) { + const llama_pos pos = ubatch.pos[s*n_seq_tokens + i]; + + if (pos >= last_pos[seq_id]) { + last_pos[seq_id] = pos; + last_row[seq_id] = s*n_seq_tokens + i; + } + } + } + + for (int i = 0; i < n_tokens; ++i) { + if (last_row[i] >= 0) { + data[i] = last_row[i]; + } + } + } + + if (kv_self.recurrent) { + const int64_t n_kv = kv_self.n; + + if (lctx.inp_s_mask) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer)); + float * data = (float *) lctx.inp_s_mask->data; + + // clear unused states + for (int i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self.head; + llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; + + data[i] = (float) (kv_cell.src >= 0); + + // only clear once + if (kv_cell.src < 0) { + kv_cell.src = cell_id; + } + } + } + + if (lctx.inp_s_copy) { + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer)); + int32_t * data = (int32_t *) lctx.inp_s_copy->data; + + // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n + for (uint32_t i = 0; i < n_kv; ++i) { + const uint32_t cell_id = i + kv_self.head; + llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id]; + + // prevent out-of-bound sources + if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) { + kv_cell.src = cell_id; + } + + data[i] = kv_cell.src; + + // ensure copy only happens once + if (kv_cell.src != (int32_t) cell_id) { + kv_cell.src = cell_id; + } + } + } + } + + if (lctx.inp_pos_bucket) { + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing + + int32_t * data = (int32_t *) lctx.inp_pos_bucket->data; + + if (!lctx.is_encoding) { + const int64_t n_kv = kv_self.n; + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_kv; ++i) { + data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); + } + } + } + } else { + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_tokens; ++i) { + data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch.pos[i], ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding); + } + } + } + } + } + + if (!lctx.is_encoding && lctx.inp_embd_enc) { + assert(lctx.inp_embd_enc->type == GGML_TYPE_F32); + assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size()); + + ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc)); + } + + if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) { + const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd; + const int64_t n_tokens = ubatch.n_tokens; + + GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer)); + GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing + + float * data = (float *) lctx.inp_KQ_mask_cross->data; + + for (int h = 0; h < 1; ++h) { + for (int j = 0; j < n_tokens; ++j) { + for (int i = 0; i < n_output_enc; ++i) { + float f = -INFINITY; + for (int s = 0; s < ubatch.n_seq_id[j]; ++s) { + const llama_seq_id seq_id = ubatch.seq_id[j][s]; + if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) { + f = 0.0f; + } + } + data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f; + } + } + + for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) { + for (int j = 0; j < n_output_enc; ++j) { + data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY; + } + } + } + } +} + +// llama output + +size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { + const auto & cparams = lctx.cparams; + const auto & hparams = lctx.model.hparams; + + const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max); + + const auto n_batch = cparams.n_batch; + const auto n_vocab = hparams.n_vocab; + const auto n_embd = hparams.n_embd; + + // TODO: use a per-batch flag for logits presence instead + const bool has_logits = !cparams.embeddings; + const bool has_embd = cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE); + + const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0; + const size_t embd_size = has_embd ? n_embd*n_outputs_max : 0; + + if (lctx.output_ids.empty()) { + // init, never resized afterwards + lctx.output_ids.resize(n_batch); + } + + const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output.get()) : 0; + const size_t new_size = (logits_size + embd_size) * sizeof(float); + + // alloc only when more than the current capacity is required + // TODO: also consider shrinking the buffer + if (!lctx.buf_output || prev_size < new_size) { + if (lctx.buf_output) { +#ifndef NDEBUG + // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) + LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); +#endif + lctx.buf_output = nullptr; + lctx.logits = nullptr; + lctx.embd = nullptr; + } + + auto * buft = ggml_backend_cpu_buffer_type(); + // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory + auto * output_dev = lctx.model.dev_output.dev; + auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; + if (output_dev_host_buft) { + buft = output_dev_host_buft; + } + lctx.buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); + if (lctx.buf_output == nullptr) { + LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); + return 0; + } + } + + float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output.get()); + + lctx.logits = has_logits ? output_base : nullptr; + lctx.embd = has_embd ? output_base + logits_size : nullptr; + + lctx.output_size = n_outputs_max; + lctx.logits_size = logits_size; + lctx.embd_size = embd_size; + + // set all ids as invalid (negative) + std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1); + + ggml_backend_buffer_clear(lctx.buf_output.get(), 0); + + lctx.n_outputs = 0; + + return n_outputs_max; +} + +void llama_output_reorder(struct llama_context & ctx) { + std::vector & out_ids = ctx.sbatch.out_ids; + if (!out_ids.empty()) { + const uint32_t n_vocab = ctx.model.hparams.n_vocab; + const uint32_t n_embd = ctx.model.hparams.n_embd; + + const int32_t n_outputs = ctx.n_outputs; + GGML_ASSERT((size_t) n_outputs == out_ids.size()); + + // TODO: is there something more efficient which also minimizes swaps? + // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) + for (int32_t i = 0; i < n_outputs - 1; ++i) { + int32_t j_min = i; + for (int32_t j = i + 1; j < n_outputs; ++j) { + if (out_ids[j] < out_ids[j_min]) { + j_min = j; + } + } + if (j_min == i) { continue; } + std::swap(out_ids[i], out_ids[j_min]); + if (ctx.logits_size > 0) { + for (uint32_t k = 0; k < n_vocab; k++) { + std::swap(ctx.logits[i*n_vocab + k], ctx.logits[j_min*n_vocab + k]); + } + } + if (ctx.embd_size > 0) { + for (uint32_t k = 0; k < n_embd; k++) { + std::swap(ctx.embd[i*n_embd + k], ctx.embd[j_min*n_embd + k]); + } + } + } + std::fill(ctx.output_ids.begin(), ctx.output_ids.end(), -1); + for (int32_t i = 0; i < n_outputs; ++i) { + ctx.output_ids[out_ids[i]] = i; + } + out_ids.clear(); + } +} + +// +// interface implementation +// + +void llama_free(struct llama_context * ctx) { + delete ctx; +} + +uint32_t llama_n_ctx(const struct llama_context * ctx) { + return ctx->cparams.n_ctx; +} + +uint32_t llama_n_batch(const struct llama_context * ctx) { + return ctx->cparams.n_batch; +} + +uint32_t llama_n_ubatch(const struct llama_context * ctx) { + return ctx->cparams.n_ubatch; +} + +uint32_t llama_n_seq_max(const struct llama_context * ctx) { + return ctx->kv_self.size; +} + +const struct llama_model * llama_get_model(const struct llama_context * ctx) { + return &ctx->model; +} + +enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) { + return ctx->cparams.pooling_type; +} + +void llama_attach_threadpool( + struct llama_context * ctx, + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch) { + ctx->threadpool = threadpool; + ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; +} + +void llama_detach_threadpool(struct llama_context * ctx) { + ctx->threadpool = nullptr; + ctx->threadpool_batch = nullptr; +} + +void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { + ctx->cparams.n_threads = n_threads; + ctx->cparams.n_threads_batch = n_threads_batch; +} + +int32_t llama_n_threads(struct llama_context * ctx) { + return ctx->cparams.n_threads; +} + +int32_t llama_n_threads_batch(struct llama_context * ctx) { + return ctx->cparams.n_threads_batch; +} + +void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { + ctx->abort_callback = abort_callback; + ctx->abort_callback_data = abort_callback_data; + + for (auto & backend : ctx->backends) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); + auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); + if (set_abort_callback_fn) { + set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data); + } + } +} + +void llama_set_embeddings(struct llama_context * ctx, bool embeddings) { + ctx->cparams.embeddings = embeddings; +} + +void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) { + ctx->cparams.causal_attn = causal_attn; +} + +void llama_synchronize(struct llama_context * ctx) { + ggml_backend_sched_synchronize(ctx->sched.get()); + + // FIXME: if multiple single tokens are evaluated without a synchronization, + // the stats will be added to the prompt evaluation stats + // this should only happen when using batch size 1 to evaluate a batch + + // add the evaluation to the stats + if (ctx->n_queued_tokens == 1) { + if (!ctx->cparams.no_perf) { + ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us; + } + ctx->n_eval++; + } else if (ctx->n_queued_tokens > 1) { + if (!ctx->cparams.no_perf) { + ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us; + } + ctx->n_p_eval += ctx->n_queued_tokens; + } + + // get a more accurate load time, upon first eval + if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) { + ctx->t_load_us = ggml_time_us() - ctx->t_start_us; + ctx->has_evaluated_once = true; + } + + ctx->n_queued_tokens = 0; + ctx->t_compute_start_us = 0; +} + +float * llama_get_logits(struct llama_context * ctx) { + llama_synchronize(ctx); + + // reorder logits for backward compatibility + // TODO: maybe deprecate this + llama_output_reorder(*ctx); + + return ctx->logits; +} + +float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { + int32_t j = -1; + + llama_synchronize(ctx); + + try { + if (ctx->logits == nullptr) { + throw std::runtime_error("no logits"); + } + + if (i < 0) { + j = ctx->n_outputs + i; + if (j < 0) { + throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); + } + } else if ((size_t) i >= ctx->output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); + } else { + j = ctx->output_ids[i]; + } + + if (j < 0) { + throw std::runtime_error(format("batch.logits[%d] != true", i)); + } + if (j >= ctx->n_outputs) { + // This should not happen + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + } + + return ctx->logits + j*ctx->model.hparams.n_vocab; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); +#ifndef NDEBUG + GGML_ABORT("fatal error"); +#else + return nullptr; +#endif + } +} + +float * llama_get_embeddings(struct llama_context * ctx) { + llama_synchronize(ctx); + + // reorder embeddings for backward compatibility + // TODO: maybe deprecate this + llama_output_reorder(*ctx); + + return ctx->embd; +} + +float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) { + int32_t j = -1; + + llama_synchronize(ctx); + + try { + if (ctx->embd == nullptr) { + throw std::runtime_error("no embeddings"); + } + + if (i < 0) { + j = ctx->n_outputs + i; + if (j < 0) { + throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs)); + } + } else if ((size_t) i >= ctx->output_ids.size()) { + throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size())); + } else { + j = ctx->output_ids[i]; + } + + if (j < 0) { + throw std::runtime_error(format("batch.logits[%d] != true", i)); + } + if (j >= ctx->n_outputs) { + // This should not happen + throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); + } + + return ctx->embd + j*ctx->model.hparams.n_embd; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what()); +#ifndef NDEBUG + GGML_ABORT("fatal error"); +#else + return nullptr; +#endif + } +} + +float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) { + llama_synchronize(ctx); + + auto it = ctx->embd_seq.find(seq_id); + if (it == ctx->embd_seq.end()) { + return nullptr; + } + + return it->second.data(); +} + +// llama state API + +// deprecated +size_t llama_get_state_size(struct llama_context * ctx) { + return llama_state_get_size(ctx); +} + +// deprecated +size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { + return llama_state_get_data(ctx, dst, -1); +} + +// deprecated +size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) { + return llama_state_set_data(ctx, src, -1); +} + +// deprecated +bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); +} + +// deprecated +bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + return llama_state_save_file(ctx, path_session, tokens, n_token_count); +} + +// TODO: replace all non-fatal assertions with returned errors or exceptions +struct llama_data_write { + virtual void write(const void * src, size_t size) = 0; + virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0; + virtual size_t get_size_written() = 0; + virtual ~llama_data_write() = default; + + void write_string(const std::string & str) { + uint32_t str_size = str.size(); + + write(&str_size, sizeof(str_size)); + write(str.data(), str_size); + } + + void write_model_info(const struct llama_context * ctx) { + const std::string arch_str = llm_arch_name(ctx->model.arch); + write_string(arch_str); + // TODO: add more model-specific info which should prevent loading the session file if not identical + } + + //void write_rng(const std::mt19937 & rng) { + // std::ostringstream rng_ss; + // rng_ss << rng; + + // const std::string & rng_str = rng_ss.str(); + + // write_string(rng_str); + //} + + void write_output_ids(struct llama_context * ctx) { + llama_output_reorder(*ctx); + + const uint32_t n_outputs = ctx->n_outputs; + + std::vector output_pos; + + const size_t n_batch = ctx->cparams.n_batch; + const auto & output_ids = ctx->output_ids; + + GGML_ASSERT(n_outputs <= ctx->output_size); + + output_pos.resize(n_outputs); + + // build a more compact representation of the output ids + for (size_t i = 0; i < n_batch; ++i) { + // map an output id to a position in the batch + int32_t pos = output_ids[i]; + if (pos >= 0) { + GGML_ASSERT((uint32_t) pos < n_outputs); + output_pos[pos] = i; + } + } + + write(&n_outputs, sizeof(n_outputs)); + + if (n_outputs) { + write(output_pos.data(), n_outputs * sizeof(int32_t)); + } + } + + void write_logits(const struct llama_context * ctx) { + const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab); + + write(&logits_size, sizeof(logits_size)); + + if (logits_size) { + write(ctx->logits, logits_size * sizeof(float)); + } + } + + void write_embeddings(const struct llama_context * ctx) { + const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd); + + write(&embeddings_size, sizeof(embeddings_size)); + + if (embeddings_size) { + write(ctx->embd, embeddings_size * sizeof(float)); + } + } + + void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) { + for (const auto & range : cell_ranges) { + for (uint32_t i = range.first; i < range.second; ++i) { + const auto & cell = kv_self.cells[i]; + const llama_pos pos = cell.pos; + const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0; + + write(&pos, sizeof(pos)); + write(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id) { + for (auto seq_id : cell.seq_id) { + write(&seq_id, sizeof(seq_id)); + } + } + } + } + } + + void write_kv_cache_data(const struct llama_context * ctx, const std::vector> & cell_ranges) { + const struct llama_kv_cache & kv_self = ctx->kv_self; + const struct llama_hparams & hparams = ctx->model.hparams; + + const uint32_t v_trans = kv_self.v_trans ? 1 : 0; + const uint32_t n_layer = hparams.n_layer; + + write(&v_trans, sizeof(v_trans)); + write(&n_layer, sizeof(n_layer)); + + std::vector tmp_buf; + + // Iterate and write all the keys first, each row is a cell + // Get whole range at a time + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Write key type + const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; + write(&k_type_i, sizeof(k_type_i)); + + // Write row size of key + const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); + write(&k_size_row, sizeof(k_size_row)); + + // Read each range of cells of k_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * k_size_row; + write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size); + } + } + + if (!kv_self.v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + write(&v_type_i, sizeof(v_type_i)); + + // Write row size of value + const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + write(&v_size_row, sizeof(v_size_row)); + + // Read each range of cells of v_size length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t buf_size = range_size * v_size_row; + write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size); + } + } + } else { + // When v is transposed, we also need the element size and get the element ranges from each row + const uint32_t kv_size = kv_self.size; + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Write value type + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + write(&v_type_i, sizeof(v_type_i)); + + // Write element size + const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + write(&v_size_el, sizeof(v_size_el)); + + // Write GQA embedding size + write(&n_embd_v_gqa, sizeof(n_embd_v_gqa)); + + // For each row, we get the element values of each cell + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + // Read each range of cells of v_size_el length each into tmp_buf and write out + for (const auto & range : cell_ranges) { + const size_t range_size = range.second - range.first; + const size_t src_offset = (range.first + j * kv_size) * v_size_el; + const size_t buf_size = range_size * v_size_el; + write_tensor_data(kv_self.v_l[il], src_offset, buf_size); + } + } + } + } + } + + void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) { + const struct llama_kv_cache & kv_self = ctx->kv_self; + std::vector> cell_ranges; // ranges, from inclusive, to exclusive + uint32_t cell_count = 0; + + // Count the number of cells with the specified seq_id + // Find all the ranges of cells with this seq id (or all, when -1) + uint32_t cell_range_begin = kv_self.size; + for (uint32_t i = 0; i < kv_self.size; ++i) { + const auto & cell = kv_self.cells[i]; + if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) { + ++cell_count; + if (cell_range_begin == kv_self.size) { + cell_range_begin = i; + } + } else { + if (cell_range_begin != kv_self.size) { + cell_ranges.emplace_back(cell_range_begin, i); + cell_range_begin = kv_self.size; + } + } + } + if (cell_range_begin != kv_self.size) { + cell_ranges.emplace_back(cell_range_begin, kv_self.size); + } + + // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count + uint32_t cell_count_check = 0; + for (const auto & range : cell_ranges) { + cell_count_check += range.second - range.first; + } + GGML_ASSERT(cell_count == cell_count_check); + + write(&cell_count, sizeof(cell_count)); + + write_kv_cache_meta(kv_self, cell_ranges, seq_id); + write_kv_cache_data(ctx, cell_ranges); + } +}; + +struct llama_data_read { + virtual const uint8_t * read(size_t size) = 0; + virtual void read_to(void * dst, size_t size) = 0; + virtual size_t get_size_read() = 0; + virtual ~llama_data_read() = default; + + void read_string(std::string & str) { + uint32_t str_size; + read_to(&str_size, sizeof(str_size)); + + str.assign((const char *) read(str_size), str_size); + } + + // validate model information + void read_model_info(const struct llama_context * ctx) { + const std::string cur_arch_str = llm_arch_name(ctx->model.arch); + + std::string arch_str; + read_string(arch_str); + if (cur_arch_str != arch_str) { + throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); + } + // TODO: add more info which needs to be identical but which is not verified otherwise + } + + //void read_rng(std::mt19937 & rng) { + // std::string rng_str; + // read_string(rng_str); + + // std::istringstream rng_ss(rng_str); + // rng_ss >> rng; + + // if (rng_ss.fail()) { + // throw std::runtime_error("failed to load RNG state"); + // } + //} + + void read_output_ids(struct llama_context * ctx) { + std::vector output_pos; + + uint32_t n_outputs; + read_to(&n_outputs, sizeof(n_outputs)); + + if (n_outputs > llama_output_reserve(*ctx, n_outputs)) { + throw std::runtime_error("could not reserve outputs"); + } + + if (n_outputs) { + output_pos.resize(n_outputs); + read_to(output_pos.data(), n_outputs * sizeof(int32_t)); + + for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { + int32_t id = output_pos[i]; + if ((uint32_t) id >= ctx->cparams.n_batch) { + throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch)); + } + ctx->output_ids[id] = i; + } + + ctx->n_outputs = n_outputs; + } + } + + void read_logits(struct llama_context * ctx) { + uint64_t logits_size; + read_to(&logits_size, sizeof(logits_size)); + + if (ctx->logits_size < logits_size) { + throw std::runtime_error("logits buffer too small"); + } + + if (logits_size) { + read_to(ctx->logits, logits_size * sizeof(float)); + } + } + + void read_embeddings(struct llama_context * ctx) { + uint64_t embeddings_size; + read_to(&embeddings_size, sizeof(embeddings_size)); + + if (ctx->embd_size < embeddings_size) { + throw std::runtime_error("embeddings buffer too small"); + } + + if (embeddings_size) { + read_to(ctx->embd, embeddings_size * sizeof(float)); + } + } + + bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) { + struct llama_kv_cache & kv_self = ctx->kv_self; + + if (dest_seq_id != -1) { + // single sequence + + llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1); + + llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false); + batch.n_tokens = cell_count; + batch.n_seq_tokens = cell_count; + batch.n_seqs = 1; + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_pos pos; + uint32_t n_seq_id; + + read_to(&pos, sizeof(pos)); + read_to(&n_seq_id, sizeof(n_seq_id)); + + if (n_seq_id != 0) { + LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__); + return false; + } + + batch.pos[i] = pos; + } + batch.n_seq_id[0] = 1; + batch.seq_id[0] = &dest_seq_id; + if (!llama_kv_cache_find_slot(kv_self, batch)) { + LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__); + return false; + } + + // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values) + // Assume that this is one contiguous block of cells + GGML_ASSERT(kv_self.head + cell_count <= kv_self.size); + GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]); + GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]); + GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id)); + GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id)); + } else { + // whole KV cache restore + + if (cell_count > kv_self.size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__); + return false; + } + + llama_kv_cache_clear(kv_self); + + for (uint32_t i = 0; i < cell_count; ++i) { + llama_kv_cell & cell = kv_self.cells[i]; + + llama_pos pos; + uint32_t n_seq_id; + + read_to(&pos, sizeof(pos)); + read_to(&n_seq_id, sizeof(n_seq_id)); + + cell.pos = pos; + + for (uint32_t j = 0; j < n_seq_id; ++j) { + llama_seq_id seq_id; + read_to(&seq_id, sizeof(seq_id)); + + if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) { + LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx)); + return false; + } + + cell.seq_id.insert(seq_id); + + if (kv_self.recurrent) { + int32_t & tail = kv_self.cells[seq_id].tail; + if (tail != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail); + return false; + } + tail = i; + } + } + } + + kv_self.head = 0; + kv_self.used = cell_count; + } + + if (kv_self.recurrent) { + for (uint32_t i = 0; i < cell_count; ++i) { + uint32_t cell_id = kv_self.head + i; + // make sure the recurrent states will keep their restored state + kv_self.cells[cell_id].src = cell_id; + } + } + + return true; + } + + bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) { + const struct llama_hparams & hparams = ctx->model.hparams; + struct llama_kv_cache & kv_self = ctx->kv_self; + uint32_t v_trans; + uint32_t n_layer; + read_to(&v_trans, sizeof(v_trans)); + read_to(&n_layer, sizeof(n_layer)); + + if (n_layer != hparams.n_layer) { + LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer); + return false; + } + if (cell_count > kv_self.size) { + LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size); + return false; + } + if (kv_self.v_trans != (bool) v_trans) { + LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__); + return false; + } + + // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s(); + + // Read type of key + int32_t k_type_i_ref; + read_to(&k_type_i_ref, sizeof(k_type_i_ref)); + const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type; + if (k_type_i != k_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il); + return false; + } + + // Read row size of key + uint64_t k_size_row_ref; + read_to(&k_size_row_ref, sizeof(k_size_row_ref)); + const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa); + if (k_size_row != k_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the keys for the whole cell range + ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row); + } + } + + if (!kv_self.v_trans) { + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read row size of value + uint64_t v_size_row_ref; + read_to(&v_size_row_ref, sizeof(v_size_row_ref)); + const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa); + if (v_size_row != v_size_row_ref) { + LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il); + return false; + } + + if (cell_count) { + // Read and set the values for the whole cell range + ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row); + } + } + } else { + // For each layer, read the values for each cell (transposed) + for (uint32_t il = 0; il < n_layer; ++il) { + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s(); + + // Read type of value + int32_t v_type_i_ref; + read_to(&v_type_i_ref, sizeof(v_type_i_ref)); + const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type; + if (v_type_i != v_type_i_ref) { + LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il); + return false; + } + + // Read element size of value + uint32_t v_size_el_ref; + read_to(&v_size_el_ref, sizeof(v_size_el_ref)); + const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type); + if (v_size_el != v_size_el_ref) { + LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il); + return false; + } + + // Read GQA embedding size + uint32_t n_embd_v_gqa_ref; + read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref)); + if (n_embd_v_gqa != n_embd_v_gqa_ref) { + LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il); + return false; + } + + if (cell_count) { + // For each row in the transposed matrix, read the values for the whole cell range + for (uint32_t j = 0; j < n_embd_v_gqa; ++j) { + const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el; + ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el); + } + } + } + } + return true; + } + + void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) { + uint32_t cell_count; + read_to(&cell_count, sizeof(cell_count)); + + bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count); + + if (!res) { + if (seq_id == -1) { + llama_kv_cache_clear(ctx); + } else { + llama_kv_cache_seq_rm(ctx, seq_id, -1, -1); + } + throw std::runtime_error("failed to restore kv cache"); + } + } +}; + +struct llama_data_write_dummy : llama_data_write { + size_t size_written = 0; + + llama_data_write_dummy() {} + + void write(const void * /* src */, size_t size) override { + size_written += size; + } + + void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override { + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_write_buffer : llama_data_write { + uint8_t * ptr; + size_t buf_size = 0; + size_t size_written = 0; + + llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + void write(const void * src, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + memcpy(ptr, src, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ggml_backend_tensor_get(tensor, ptr, offset, size); + ptr += size; + size_written += size; + buf_size -= size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_read_buffer : llama_data_read { + const uint8_t * ptr; + size_t buf_size = 0; + size_t size_read = 0; + + llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} + + const uint8_t * read(size_t size) override { + const uint8_t * base_ptr = ptr; + if (size > buf_size) { + throw std::runtime_error("unexpectedly reached end of buffer"); + } + ptr += size; + size_read += size; + buf_size -= size; + return base_ptr; + } + + void read_to(void * dst, size_t size) override { + memcpy(dst, read(size), size); + } + + size_t get_size_read() override { + return size_read; + } +}; + +struct llama_data_write_file : llama_data_write { + llama_file * file; + size_t size_written = 0; + std::vector temp_buffer; + + llama_data_write_file(llama_file * f) : file(f) {} + + void write(const void * src, size_t size) override { + file->write_raw(src, size); + size_written += size; + } + + void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override { + temp_buffer.resize(size); + ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size); + write(temp_buffer.data(), temp_buffer.size()); + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_read_file : llama_data_read { + llama_file * file; + size_t size_read = 0; + std::vector temp_buffer; + + llama_data_read_file(llama_file * f) : file(f) {} + + void read_to(void * dst, size_t size) override { + file->read_raw(dst, size); + size_read += size; + } + + const uint8_t * read(size_t size) override { + temp_buffer.resize(size); + read_to(temp_buffer.data(), size); + return temp_buffer.data(); + } + + size_t get_size_read() override { + return size_read; + } +}; + +/** copy state data into either a buffer or file depending on the passed in context + * + * file context: + * llama_file file("/path", "wb"); + * llama_data_write_file data_ctx(&file); + * llama_state_get_data_internal(ctx, data_ctx); + * + * buffer context: + * std::vector buf(max_size, 0); + * llama_data_write_buffer data_ctx(buf.data(), max_size); + * llama_state_get_data_internal(ctx, data_ctx); + * +*/ +static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) { + llama_synchronize(ctx); + + data_ctx.write_model_info(ctx); + + // copy outputs + data_ctx.write_output_ids(ctx); + data_ctx.write_logits(ctx); + data_ctx.write_embeddings(ctx); + + data_ctx.write_kv_cache(ctx); + + return data_ctx.get_size_written(); +} + +size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) { + llama_data_write_buffer data_ctx(dst, size); + try { + return llama_state_get_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); + return 0; + } +} + +// Returns the *actual* size of the state. +// Intended to be used when saving to state to a buffer. +size_t llama_state_get_size(struct llama_context * ctx) { + llama_data_write_dummy data_ctx; + try { + return llama_state_get_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) { + llama_synchronize(ctx); + + data_ctx.read_model_info(ctx); + + // set outputs + data_ctx.read_output_ids(ctx); + data_ctx.read_logits(ctx); + data_ctx.read_embeddings(ctx); + + data_ctx.read_kv_cache(ctx); + + return data_ctx.get_size_read(); +} + +// Sets the state reading from the specified source address +size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) { + llama_data_read_buffer data_ctx(src, size); + try { + return llama_state_set_data_internal(ctx, data_ctx); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); + return 0; + } +} + +static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + llama_file file(path_session, "rb"); + + // sanity checks + { + const uint32_t magic = file.read_u32(); + const uint32_t version = file.read_u32(); + + if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { + LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); + return false; + } + } + + // load the prompt + { + const uint32_t n_token_count = file.read_u32(); + + if (n_token_count > n_token_capacity) { + LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); + return false; + } + + file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); + *n_token_count_out = n_token_count; + } + + // restore the context state + { + const size_t n_state_size_cur = file.size() - file.tell(); + + llama_data_read_file data_ctx(&file); + const size_t n_read = llama_state_set_data_internal(ctx, data_ctx); + + if (n_read != n_state_size_cur) { + LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read); + return false; + } + } + return true; +} + +bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); + return false; + } +} + +static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + llama_file file(path_session, "wb"); + + file.write_u32(LLAMA_SESSION_MAGIC); + file.write_u32(LLAMA_SESSION_VERSION); + + // save the prompt + file.write_u32((uint32_t) n_token_count); + file.write_raw(tokens, sizeof(llama_token) * n_token_count); + + // save the context state using stream saving + llama_data_write_file data_ctx(&file); + llama_state_get_data_internal(ctx, data_ctx); + + return true; +} + +bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { + try { + return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); + return false; + } +} + +static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) { + llama_synchronize(ctx); + + data_ctx.write_kv_cache(ctx, seq_id); + + return data_ctx.get_size_written(); +} + +size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) { + llama_data_write_dummy data_ctx; + return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); +} + +size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { + llama_data_write_buffer data_ctx(dst, size); + try { + return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) { + llama_synchronize(ctx); + + data_ctx.read_kv_cache(ctx, dest_seq_id); + + return data_ctx.get_size_read(); +} + +size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) { + llama_data_read_buffer data_ctx(src, size); + try { + return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what()); + return 0; + } +} + +static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { + llama_file file(filepath, "wb"); + + file.write_u32(LLAMA_STATE_SEQ_MAGIC); + file.write_u32(LLAMA_STATE_SEQ_VERSION); + + // save the prompt + file.write_u32((uint32_t) n_token_count); + file.write_raw(tokens, sizeof(llama_token) * n_token_count); + + // save the context state using stream saving + llama_data_write_file data_ctx(&file); + llama_state_seq_get_data_internal(ctx, data_ctx, seq_id); + + const size_t res = file.tell(); + GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written()); + return res; +} + +static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + llama_file file(filepath, "rb"); + + // version checks + { + const uint32_t magic = file.read_u32(); + const uint32_t version = file.read_u32(); + + if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) { + LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version); + return 0; + } + } + + // load the prompt + { + const uint32_t n_token_count = file.read_u32(); + + if (n_token_count > n_token_capacity) { + LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); + return 0; + } + + file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); + *n_token_count_out = n_token_count; + } + + // restore the context state + { + const size_t state_size = file.size() - file.tell(); + llama_data_read_file data_ctx(&file); + const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id); + if (!nread) { + LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__); + return 0; + } + GGML_ASSERT(nread <= state_size); + GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell()); + } + + return file.tell(); +} + +size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { + try { + return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { + try { + return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out); + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); + return 0; + } +} + +const std::vector> & llama_internal_get_tensor_map( + struct llama_context * ctx +) { + return ctx->model.tensors_by_name; +} diff --git a/src/llama-context.h b/src/llama-context.h new file mode 100644 index 000000000..0d163c470 --- /dev/null +++ b/src/llama-context.h @@ -0,0 +1,128 @@ +#pragma once + +#include "llama.h" +#include "llama-batch.h" +#include "llama-cparams.h" +#include "llama-model.h" +#include "llama-kv-cache.h" +#include "llama-adapter.h" + +#include "ggml-cpp.h" + +#include +#include +#include +#include + +struct llama_context { + llama_context(const llama_model & model) + : model(model) + , t_start_us(model.t_start_us) + , t_load_us(model.t_load_us) {} + + const struct llama_model & model; + + struct llama_cparams cparams; + struct llama_sbatch sbatch; // TODO: revisit if needed + struct llama_kv_cache kv_self; + struct llama_control_vector cvec; + + std::unordered_map lora_adapters; + + std::vector backends; + std::vector> set_n_threads_fns; + + ggml_backend_t backend_cpu = nullptr; + + ggml_threadpool_t threadpool = nullptr; + ggml_threadpool_t threadpool_batch = nullptr; + + bool has_evaluated_once = false; + + mutable int64_t t_start_us; + mutable int64_t t_load_us; + mutable int64_t t_p_eval_us = 0; + mutable int64_t t_eval_us = 0; + + mutable int64_t t_compute_start_us = 0; + mutable int64_t n_queued_tokens = 0; + + mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1) + mutable int32_t n_eval = 0; // number of eval calls + + // host buffer for the model output (logits and embeddings) + ggml_backend_buffer_ptr buf_output; + + // decode output (2-dimensional array: [n_outputs][n_vocab]) + size_t logits_size = 0; // capacity (of floats) for logits + float * logits = nullptr; + + std::vector output_ids; // map batch token positions to ids of the logits and embd buffers + size_t output_size = 0; // capacity (of tokens positions) for the output buffers + int32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch + + bool logits_all = false; + + // embeddings output (2-dimensional array: [n_outputs][n_embd]) + // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE + size_t embd_size = 0; // capacity (of floats) for embeddings + float * embd = nullptr; + + // sequence embeddings output (map of [n_embd] vectors) + // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE + std::map> embd_seq; + + // whether we are computing encoder output or decoder output + bool is_encoding = false; + + // TODO: find a better way to accommodate mutli-dimension position encoding methods + // number of position id each token get, 1 for each token in most cases. + // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate. + int n_pos_per_token = 1; + + // output of the encoder part of the encoder-decoder models + std::vector embd_enc; + std::vector> seq_ids_enc; + + // memory buffers used to evaluate the model + std::vector buf_compute_meta; + ggml_backend_sched_ptr sched; + + ggml_abort_callback abort_callback = nullptr; + void * abort_callback_data = nullptr; + + // input tensors + struct ggml_tensor * inp_tokens; // I32 [n_batch] + struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch] + struct ggml_tensor * inp_pos; // I32 [n_batch] + struct ggml_tensor * inp_out_ids; // I32 [n_outputs] + struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch] + struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch] + struct ggml_tensor * inp_K_shift; // I32 [kv_size] + struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch] + struct ggml_tensor * inp_cls; // I32 [n_batch] + struct ggml_tensor * inp_s_copy; // I32 [kv_size] + struct ggml_tensor * inp_s_mask; // F32 [1, n_kv] + struct ggml_tensor * inp_s_seq; // I32 [n_kv, n_batch] + struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch] + struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc] + struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch] +}; + +// TODO: make these methods of llama_context +void llama_set_k_shift(struct llama_context & lctx); + +void llama_set_s_copy(struct llama_context & lctx); + +void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch); + +// Make sure enough space is available for outputs. +// Returns max number of outputs for which space was reserved. +size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs); + +// make the outputs have the same order they had in the user-provided batch +void llama_output_reorder(struct llama_context & ctx); + +// For internal test use +// TODO: remove +const std::vector> & llama_internal_get_tensor_map(struct llama_context * ctx); diff --git a/src/llama-cparams.cpp b/src/llama-cparams.cpp new file mode 100644 index 000000000..28369be36 --- /dev/null +++ b/src/llama-cparams.cpp @@ -0,0 +1 @@ +#include "llama-cparams.h" diff --git a/src/llama-cparams.h b/src/llama-cparams.h new file mode 100644 index 000000000..252012f3d --- /dev/null +++ b/src/llama-cparams.h @@ -0,0 +1,37 @@ +#pragma once + +#include "llama.h" + +#include + +struct llama_cparams { + uint32_t n_ctx; // context size used during inference + uint32_t n_batch; + uint32_t n_ubatch; + uint32_t n_seq_max; + int n_threads; // number of threads to use for generation + int n_threads_batch; // number of threads to use for batch processing + + float rope_freq_base; + float rope_freq_scale; + + uint32_t n_ctx_orig_yarn; + // These hyperparameters are not exposed in GGUF, because all + // existing YaRN models use the same values for them. + float yarn_ext_factor; + float yarn_attn_factor; + float yarn_beta_fast; + float yarn_beta_slow; + float defrag_thold; + + bool embeddings; + bool causal_attn; + bool offload_kqv; + bool flash_attn; + bool no_perf; + + enum llama_pooling_type pooling_type; + + ggml_backend_sched_eval_callback cb_eval; + void * cb_eval_user_data; +}; diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 76d0cb3a2..186dc9a25 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -1,5 +1,6 @@ #include "llama-grammar.h" +#include "llama-impl.h" #include "llama-vocab.h" #include "llama-sampling.h" diff --git a/src/llama-grammar.h b/src/llama-grammar.h index 13e940fb5..f8b40c651 100644 --- a/src/llama-grammar.h +++ b/src/llama-grammar.h @@ -1,8 +1,10 @@ #pragma once -#include "llama-impl.h" +#include "llama.h" #include +#include +#include struct llama_vocab; diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp new file mode 100644 index 000000000..c40534696 --- /dev/null +++ b/src/llama-hparams.cpp @@ -0,0 +1,71 @@ +#include "llama-hparams.h" + +#include "ggml.h" + +uint32_t llama_hparams::n_head(uint32_t il) const { + if (il < n_layer) { + return n_head_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_head_kv(uint32_t il) const { + if (il < n_layer) { + return n_head_kv_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_ff(uint32_t il) const { + if (il < n_layer) { + return n_ff_arr[il]; + } + + GGML_ABORT("fatal error"); +} + +uint32_t llama_hparams::n_gqa(uint32_t il) const { + const uint32_t n_head = this->n_head(il); + const uint32_t n_head_kv = this->n_head_kv(il); + + if (n_head_kv == 0) { + return 0; + } + + return n_head/n_head_kv; +} + +uint32_t llama_hparams::n_embd_k_gqa(uint32_t il) const { + const uint32_t n_head_kv = this->n_head_kv(il); + + return n_embd_head_k * n_head_kv; +} + +uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { + const uint32_t n_head_kv = this->n_head_kv(il); + + return n_embd_head_v * n_head_kv; +} + +uint32_t llama_hparams::n_embd_k_s() const { + if (wkv_head_size != 0) { + // for RWKV models + return 2 * n_embd; + } + + // TODO: maybe support other convolution strides than 1 + // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed + return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner; +} + +uint32_t llama_hparams::n_embd_v_s() const { + if (wkv_head_size != 0) { + // corresponds to RWKV's wkv_states size + return n_embd * wkv_head_size; + } + + // corresponds to Mamba's ssm_states size + return ssm_d_state * ssm_d_inner; +} diff --git a/src/llama-hparams.h b/src/llama-hparams.h new file mode 100644 index 000000000..3a76b71a4 --- /dev/null +++ b/src/llama-hparams.h @@ -0,0 +1,132 @@ +#pragma once + +#include "llama.h" + +#include + +// bump if necessary +#define LLAMA_MAX_LAYERS 512 +#define LLAMA_MAX_EXPERTS 160 // DeepSeekV2 + +struct llama_hparams_posnet { + uint32_t n_embd; + uint32_t n_layer; +}; + +struct llama_hparams_convnext { + uint32_t n_embd; + uint32_t n_layer; +}; + +struct llama_hparams { + bool vocab_only; + bool rope_finetuned; + bool use_par_res; + bool swin_norm; + + uint32_t n_vocab = 0; + uint32_t n_ctx_train; // context size the model was trained on + uint32_t n_embd; + uint32_t n_embd_features = 0; + uint32_t n_layer; + uint32_t n_rot; + uint32_t n_swa = 0; // sliding window attention (SWA) + uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads + uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head + uint32_t n_expert = 0; + uint32_t n_expert_used = 0; + uint32_t n_vocab_type = 0; // for BERT-style token types + uint32_t n_rel_attn_bkts = 0; + + // for WavTokenizer + struct llama_hparams_posnet posnet; + struct llama_hparams_convnext convnext; + + std::array n_head_arr; + std::array n_head_kv_arr; + std::array n_ff_arr; + + uint32_t n_layer_dense_lead = 0; + uint32_t n_lora_q = 0; + uint32_t n_lora_kv = 0; + uint32_t n_ff_exp = 0; + uint32_t n_ff_shexp = 0; + uint32_t n_expert_shared = 0; + uint32_t n_norm_groups = 0; + + float expert_weights_scale = 0.0; + + float f_norm_eps; + float f_norm_rms_eps; + float f_norm_group_eps; + + float f_attn_logit_softcapping = 50.0f; + float f_final_logit_softcapping = 30.0f; + + // for RWKV + uint32_t rescale_every_n_layers = 0; + uint32_t time_mix_extra_dim = 0; + uint32_t time_decay_extra_dim = 0; + uint32_t wkv_head_size = 0; + + float rope_attn_factor = 1.0f; + float rope_freq_base_train; + float rope_freq_scale_train; + uint32_t n_ctx_orig_yarn; + float rope_yarn_log_mul; + + std::array rope_sections; + + // for State Space Models + uint32_t ssm_d_conv = 0; + uint32_t ssm_d_inner = 0; + uint32_t ssm_d_state = 0; + uint32_t ssm_dt_rank = 0; + + bool ssm_dt_b_c_rms = false; + + float f_clamp_kqv = 0.0f; + float f_max_alibi_bias = 0.0f; + float f_logit_scale = 0.0f; + + // Additional scale factors (Granite/Granite MoE) + float f_residual_scale = 0.0f; + float f_embedding_scale = 0.0f; + float f_attention_scale = 0.0f; + + bool causal_attn = true; + bool use_alibi = false; + bool attn_soft_cap = false; + + // needed by encoder-decoder models (e.g. T5, FLAN-T5) + // ref: https://github.com/ggerganov/llama.cpp/pull/8141 + llama_token dec_start_token_id = LLAMA_TOKEN_NULL; + + enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; + enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE; + enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE; + + uint32_t n_head(uint32_t il = 0) const; + + uint32_t n_head_kv(uint32_t il = 0) const; + + uint32_t n_ff(uint32_t il = 0) const; + + uint32_t n_gqa(uint32_t il = 0) const; + + // dimension of key embeddings across all k-v heads + uint32_t n_embd_k_gqa(uint32_t il = 0) const; + + // dimension of value embeddings across all k-v heads + uint32_t n_embd_v_gqa(uint32_t il = 0) const; + + // dimension of the rolling state embeddings + // corresponds to Mamba's conv_states size or RWKV's token_shift states size + uint32_t n_embd_k_s() const; + + // dimension of the recurrent state embeddings + uint32_t n_embd_v_s() const; +}; + +static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable"); + diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp new file mode 100644 index 000000000..a05ba4f63 --- /dev/null +++ b/src/llama-impl.cpp @@ -0,0 +1,166 @@ +#include "llama-impl.h" + +#include "llama.h" + +#include +#include +#include +#include +#include +#include + +struct llama_logger_state { + ggml_log_callback log_callback = llama_log_callback_default; + void * log_callback_user_data = nullptr; +}; + +static llama_logger_state g_logger_state; + +time_meas::time_meas(int64_t & t_acc, bool disable) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} + +time_meas::~time_meas() { + if (t_start_us >= 0) { + t_acc += ggml_time_us() - t_start_us; + } + } + +void llama_log_set(ggml_log_callback log_callback, void * user_data) { + ggml_log_set(log_callback, user_data); + g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default; + g_logger_state.log_callback_user_data = user_data; +} + +static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) { + va_list args_copy; + va_copy(args_copy, args); + char buffer[128]; + int len = vsnprintf(buffer, 128, format, args); + if (len < 128) { + g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data); + } else { + char * buffer2 = new char[len + 1]; + vsnprintf(buffer2, len + 1, format, args_copy); + buffer2[len] = 0; + g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data); + delete[] buffer2; + } + va_end(args_copy); +} + +void llama_log_internal(ggml_log_level level, const char * format, ...) { + va_list args; + va_start(args, format); + llama_log_internal_v(level, format, args); + va_end(args); +} + +void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) { + (void) level; + (void) user_data; + fputs(text, stderr); + fflush(stderr); +} + +void replace_all(std::string & s, const std::string & search, const std::string & replace) { + if (search.empty()) { + return; + } + std::string builder; + builder.reserve(s.length()); + size_t pos = 0; + size_t last_pos = 0; + while ((pos = s.find(search, last_pos)) != std::string::npos) { + builder.append(s, last_pos, pos - last_pos); + builder.append(replace); + last_pos = pos + search.length(); + } + builder.append(s, last_pos, std::string::npos); + s = std::move(builder); +} + +std::string format(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), size); +} + +std::string llama_format_tensor_shape(const std::vector & ne) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0)); + for (size_t i = 1; i < ne.size(); i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i)); + } + return buf; +} + +std::string llama_format_tensor_shape(const struct ggml_tensor * t) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); + } + return buf; +} + +static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) { + switch (type) { + case GGUF_TYPE_UINT8: return std::to_string(((const uint8_t *)data)[i]); + case GGUF_TYPE_INT8: return std::to_string(((const int8_t *)data)[i]); + case GGUF_TYPE_UINT16: return std::to_string(((const uint16_t *)data)[i]); + case GGUF_TYPE_INT16: return std::to_string(((const int16_t *)data)[i]); + case GGUF_TYPE_UINT32: return std::to_string(((const uint32_t *)data)[i]); + case GGUF_TYPE_INT32: return std::to_string(((const int32_t *)data)[i]); + case GGUF_TYPE_UINT64: return std::to_string(((const uint64_t *)data)[i]); + case GGUF_TYPE_INT64: return std::to_string(((const int64_t *)data)[i]); + case GGUF_TYPE_FLOAT32: return std::to_string(((const float *)data)[i]); + case GGUF_TYPE_FLOAT64: return std::to_string(((const double *)data)[i]); + case GGUF_TYPE_BOOL: return ((const bool *)data)[i] ? "true" : "false"; + default: return format("unknown type %d", type); + } +} + +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) { + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + switch (type) { + case GGUF_TYPE_STRING: + return gguf_get_val_str(ctx_gguf, i); + case GGUF_TYPE_ARRAY: + { + const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i); + int arr_n = gguf_get_arr_n(ctx_gguf, i); + const void * data = gguf_get_arr_data(ctx_gguf, i); + std::stringstream ss; + ss << "["; + for (int j = 0; j < arr_n; j++) { + if (arr_type == GGUF_TYPE_STRING) { + std::string val = gguf_get_arr_str(ctx_gguf, i, j); + // escape quotes + replace_all(val, "\\", "\\\\"); + replace_all(val, "\"", "\\\""); + ss << '"' << val << '"'; + } else if (arr_type == GGUF_TYPE_ARRAY) { + ss << "???"; + } else { + ss << gguf_data_to_str(arr_type, data, j); + } + if (j < arr_n - 1) { + ss << ", "; + } + } + ss << "]"; + return ss.str(); + } + default: + return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0); + } +} diff --git a/src/llama-impl.h b/src/llama-impl.h index 70f16b61c..12d1fb082 100644 --- a/src/llama-impl.h +++ b/src/llama-impl.h @@ -1,10 +1,9 @@ #pragma once -#include "llama.h" +#include "ggml.h" // for ggml_log_level #include #include -#include #ifdef __GNUC__ #ifdef __MINGW32__ @@ -35,147 +34,28 @@ void llama_log_callback_default(ggml_log_level level, const char * text, void * // helpers // -struct time_meas { - time_meas(int64_t & t_acc, bool disable = false) : t_start_us(disable ? -1 : ggml_time_us()), t_acc(t_acc) {} +template +struct no_init { + T value; + no_init() { /* do nothing */ } +}; - ~time_meas() { - if (t_start_us >= 0) { - t_acc += ggml_time_us() - t_start_us; - } - } +struct time_meas { + time_meas(int64_t & t_acc, bool disable = false); + ~time_meas(); const int64_t t_start_us; int64_t & t_acc; }; -static void replace_all(std::string & s, const std::string & search, const std::string & replace) { - if (search.empty()) { - return; - } - std::string builder; - builder.reserve(s.length()); - size_t pos = 0; - size_t last_pos = 0; - while ((pos = s.find(search, last_pos)) != std::string::npos) { - builder.append(s, last_pos, pos - last_pos); - builder.append(replace); - last_pos = pos + search.length(); - } - builder.append(s, last_pos, std::string::npos); - s = std::move(builder); -} +void replace_all(std::string & s, const std::string & search, const std::string & replace); -const std::vector> & llama_internal_get_tensor_map( - struct llama_context * ctx -); +// TODO: rename to llama_format ? +LLAMA_ATTRIBUTE_FORMAT(1, 2) +std::string format(const char * fmt, ...); -// the ring buffer works similarly to std::deque, but with a fixed capacity -template -struct ring_buffer { - ring_buffer(size_t cap) : capacity(cap), data(cap) {} +std::string llama_format_tensor_shape(const std::vector & ne); +std::string llama_format_tensor_shape(const struct ggml_tensor * t); - T & front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - const T & front() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[first]; - } - - T & back() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - const T & back() const { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - return data[pos]; - } - - void push_back(const T & value) { - if (capacity == 0) { - throw std::runtime_error("ring buffer: capacity is zero"); - } - - if (sz == capacity) { - // advance the start when buffer is full - first = (first + 1) % capacity; - } else { - sz++; - } - data[pos] = value; - pos = (pos + 1) % capacity; - } - - T pop_front() { - if (sz == 0) { - throw std::runtime_error("ring buffer is empty"); - } - T value = data[first]; - first = (first + 1) % capacity; - sz--; - return value; - } - - //T & operator[](size_t i) { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - //const T & at(size_t i) const { - // if (i >= sz) { - // throw std::runtime_error("ring buffer: index out of bounds"); - // } - // return data[(first + i) % capacity]; - //} - - const T & rat(size_t i) const { - if (i >= sz) { - throw std::runtime_error("ring buffer: index out of bounds"); - } - return data[(first + sz - i - 1) % capacity]; - } - - std::vector to_vector() const { - std::vector result; - result.reserve(sz); - for (size_t i = 0; i < sz; i++) { - result.push_back(data[(first + i) % capacity]); - } - return result; - } - - void clear() { - // here only reset the status of the buffer - sz = 0; - first = 0; - pos = 0; - } - - bool empty() const { - return sz == 0; - } - - size_t size() const { - return sz; - } - - size_t capacity = 0; - size_t sz = 0; - size_t first = 0; - size_t pos = 0; - std::vector data; -}; +std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i); diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp new file mode 100644 index 000000000..53379253a --- /dev/null +++ b/src/llama-kv-cache.cpp @@ -0,0 +1,718 @@ +#include "llama-kv-cache.h" + +#include "llama-impl.h" +#include "llama-batch.h" +#include "llama-cparams.h" +#include "llama-model.h" + +#include +#include +#include + +static const llama_kv_cache_slot_info llama_kv_cache_slot_info_failed{false}; + +uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + +bool llama_kv_cache_init( + struct llama_kv_cache & cache, + const llama_model & model, + const llama_cparams & cparams, + ggml_type type_k, + ggml_type type_v, + uint32_t kv_size, + bool offload) { + const struct llama_hparams & hparams = model.hparams; + + const int32_t n_layer = hparams.n_layer; + + cache.has_shift = false; + + cache.recurrent = llama_model_is_recurrent(&model); + cache.v_trans = !cache.recurrent && !cparams.flash_attn; + cache.can_shift = !cache.recurrent && model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA + + LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d, can_shift = %d\n", + __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer, cache.can_shift); + + cache.head = 0; + cache.size = kv_size; + cache.used = 0; + + cache.type_k = type_k; + cache.type_v = type_v; + + cache.cells.clear(); + cache.cells.resize(kv_size); + + // create a context for each buffer type + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + struct ggml_init_params params = { + /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context * ctx = ggml_init(params); + if (!ctx) { + return nullptr; + } + ctx_map[buft] = ctx; + cache.ctxs.emplace_back(ctx); + return ctx; + } + return it->second; + }; + + cache.k_l.reserve(n_layer); + cache.v_l.reserve(n_layer); + + for (int i = 0; i < n_layer; i++) { + const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s(); + const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s(); + + LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa); + + ggml_backend_buffer_type_t buft; + if (offload) { + auto * dev = model.dev_layer.at(i).dev; + buft = ggml_backend_dev_buffer_type(dev); + } else { + buft = ggml_backend_cpu_buffer_type(); + } + ggml_context * ctx = ctx_for_buft(buft); + + if (!ctx) { + LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__); + return false; + } + + ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size); + ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size); + ggml_format_name(k, "cache_k_l%d", i); + ggml_format_name(v, "cache_v_l%d", i); + cache.k_l.push_back(k); + cache.v_l.push_back(v); + } + + // allocate tensors and initialize the buffers to avoid NaNs in the padding + for (auto it : ctx_map) { + auto * buft = it.first; + auto * ctx = it.second; + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft); + if (!buf) { + LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__); + return false; + } + ggml_backend_buffer_clear(buf, 0); + LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0); + cache.bufs.emplace_back(buf); + } + + return true; +} + +struct llama_kv_cache_slot_info llama_kv_cache_find_slot( + struct llama_kv_cache & cache, + const struct llama_ubatch & batch) { + const uint32_t n_tokens = batch.n_tokens; + const uint32_t n_seqs = batch.n_seqs; + const uint32_t n_seq_tokens = batch.n_seq_tokens; + + if (cache.recurrent) { + // For recurrent state architectures (like Mamba or RWKV), + // each cache cell can store the state for a whole sequence. + // A slot should be always be contiguous. + + // can only process batches with an equal number of new tokens in each sequence + GGML_ASSERT(batch.equal_seqs); + + int32_t min = cache.size - 1; + int32_t max = 0; + + // everything should fit if all seq_ids are smaller than the max + for (uint32_t s = 0; s < n_seqs; ++s) { + const uint32_t n_seq_id = batch.n_seq_id[s]; + for (uint32_t j = 0; j < n_seq_id; ++j) { + const llama_seq_id seq_id = batch.seq_id[s][j]; + + if (seq_id < 0 || (uint32_t) seq_id >= cache.size) { + // too big seq_id + // TODO: would it be possible to resize the cache instead? + LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size); + return llama_kv_cache_slot_info_failed; + } + if (j > 0) { + llama_kv_cell & seq = cache.cells[seq_id]; + if (seq.tail >= 0) { + llama_kv_cell & cell = cache.cells[seq.tail]; + // clear cells from seq_ids that become shared + // (should not normally happen, but let's handle it anyway) + cell.seq_id.erase(seq_id); + seq.tail = -1; + if (cell.seq_id.empty()) { + cell.pos = -1; + cell.src = -1; + cache.used -= 1; + } + } + } + } + } + +#ifndef NDEBUG + { + std::vector tails_verif; + tails_verif.assign(cache.size, -1); + for (uint32_t i = 0; i < cache.size; ++i) { + llama_kv_cell & cell = cache.cells[i]; + for (llama_seq_id seq_id : cell.seq_id) { + if (tails_verif[seq_id] != -1) { + LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]); + } + tails_verif[seq_id] = i; + } + } + for (uint32_t i = 0; i < cache.size; ++i) { + if (tails_verif[i] != cache.cells[i].tail) { + LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]); + } + } + } +#endif + + // find next empty cell + uint32_t next_empty_cell = cache.head; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } + llama_kv_cell & cell = cache.cells[next_empty_cell]; + if (cell.is_empty()) { break; } + next_empty_cell += 1; + } + + // find usable cell range + for (uint32_t s = 0; s < n_seqs; ++s) { + const llama_seq_id seq_id = batch.seq_id[s][0]; + llama_kv_cell & seq_meta = cache.cells[seq_id]; + bool has_cell = false; + if (seq_meta.tail >= 0) { + llama_kv_cell & cell = cache.cells[seq_meta.tail]; + GGML_ASSERT(cell.has_seq_id(seq_id)); + // does this seq_id "own" the cell? + if (cell.seq_id.size() == 1) { has_cell = true; } + } + if (!has_cell) { + llama_kv_cell & empty_cell = cache.cells[next_empty_cell]; + GGML_ASSERT(empty_cell.is_empty()); + // copy old tail into the empty cell + if (seq_meta.tail >= 0) { + llama_kv_cell & orig_cell = cache.cells[seq_meta.tail]; + empty_cell.pos = orig_cell.pos; + empty_cell.src = orig_cell.src; + orig_cell.seq_id.erase(seq_id); + empty_cell.seq_id.insert(seq_id); // will be overwritten + } + seq_meta.tail = next_empty_cell; + // find next empty cell + if (s + 1 < n_seqs) { + next_empty_cell += 1; + for (uint32_t i = 0; i < cache.size; ++i) { + if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; } + llama_kv_cell & cell = cache.cells[next_empty_cell]; + if (cell.is_empty()) { break; } + next_empty_cell += 1; + } + } + } + if (min > seq_meta.tail) { min = seq_meta.tail; } + if (max < seq_meta.tail) { max = seq_meta.tail; } + } + + // gather and re-order + for (uint32_t s = 0; s < n_seqs; ++s) { + int32_t dst_id = s + min; + int32_t src_id = cache.cells[batch.seq_id[s][0]].tail; + if (dst_id != src_id) { + llama_kv_cell & dst_cell = cache.cells[dst_id]; + llama_kv_cell & src_cell = cache.cells[src_id]; + + std::swap(dst_cell.pos, src_cell.pos); + std::swap(dst_cell.src, src_cell.src); + std::swap(dst_cell.seq_id, src_cell.seq_id); + + // swap tails (assuming they NEVER overlap) + for (const llama_seq_id seq_id : src_cell.seq_id) { + cache.cells[seq_id].tail = src_id; + } + for (const llama_seq_id seq_id : dst_cell.seq_id) { + cache.cells[seq_id].tail = dst_id; + } + } + } + + // update the pos of the used seqs + for (uint32_t s = 0; s < n_seqs; ++s) { + const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1]; + int32_t cell_id = s + min; + llama_kv_cell & cell = cache.cells[cell_id]; + + if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) { + // What should happen when the pos backtracks or skips a value? + // Clearing the state mid-batch would require special-casing which isn't done. + LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n", + __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens); + } + cell.pos = last_pos; + cell.seq_id.clear(); + for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) { + const llama_seq_id seq_id = batch.seq_id[s][j]; + cell.seq_id.insert(seq_id); + cache.cells[seq_id].tail = cell_id; + } + } + + // allow getting the range of used cells, from head to head + n + cache.head = min; + cache.n = max - min + 1; + cache.used = std::count_if(cache.cells.begin(), cache.cells.end(), + [](const llama_kv_cell& cell){ return !cell.is_empty(); }); + + // sanity check + return llama_kv_cache_slot_info(cache.n >= n_seqs); + } + // otherwise, one cell per token. + + if (n_tokens > cache.size) { + LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size); + return llama_kv_cache_slot_info_failed; + } + + uint32_t n_tested = 0; + + while (true) { + if (cache.head + n_tokens > cache.size) { + n_tested += cache.size - cache.head; + cache.head = 0; + continue; + } + + bool found = true; + for (uint32_t i = 0; i < n_tokens; i++) { + if (cache.cells[cache.head + i].pos >= 0) { + found = false; + cache.head += i + 1; + n_tested += i + 1; + break; + } + } + + if (found) { + break; + } + + if (n_tested >= cache.size) { + //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); + return llama_kv_cache_slot_info_failed; + } + } + + for (uint32_t s = 0; s < n_seqs; s++) { + for (uint32_t i = 0; i < n_seq_tokens; ++i) { + uint32_t k = s*n_seq_tokens + i; + cache.cells[cache.head + k].pos = batch.pos[k]; + + for (int32_t j = 0; j < batch.n_seq_id[s]; j++) { + cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]); + } + } + } + + cache.used += n_tokens; + + return llama_kv_cache_slot_info(cache.head, cache.head + n_tokens); +} + +uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) { + for (uint32_t i = cache.size; i > 0; --i) { + const llama_kv_cell & cell = cache.cells[i - 1]; + + if (cell.pos >= 0 && !cell.is_empty()) { + return i; + } + } + + return 0; +} + +void llama_kv_cache_clear(struct llama_kv_cache & cache) { + for (int32_t i = 0; i < (int32_t) cache.size; ++i) { + cache.cells[i].pos = -1; + cache.cells[i].seq_id.clear(); + cache.cells[i].src = -1; + cache.cells[i].tail = -1; + } + cache.head = 0; + cache.used = 0; + + for (auto & buf : cache.bufs) { + ggml_backend_buffer_clear(buf.get(), 0); + } +} + +bool llama_kv_cache_seq_rm( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1) { + uint32_t new_head = cache.size; + + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + + // models like Mamba or RWKV can't have a state partially erased + if (cache.recurrent) { + if (seq_id >= (int64_t) cache.size) { + // could be fatal + return false; + } + if (0 <= seq_id) { + int32_t & tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + const llama_kv_cell & cell = cache.cells[tail_id]; + // partial intersection is invalid + if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) { + return false; + } + // invalidate tails which will be cleared + if (p0 <= cell.pos && cell.pos < p1) { + tail_id = -1; + } + } + } else { + // seq_id is negative, then the range should include everything or nothing + if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) { + return false; + } + } + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + if (seq_id < 0) { + cache.cells[i].seq_id.clear(); + } else if (cache.cells[i].has_seq_id(seq_id)) { + cache.cells[i].seq_id.erase(seq_id); + } else { + continue; + } + if (cache.cells[i].is_empty()) { + // keep count of the number of used cells + if (cache.cells[i].pos >= 0) cache.used--; + + cache.cells[i].pos = -1; + cache.cells[i].src = -1; + if (new_head == cache.size) new_head = i; + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cache.size && new_head < cache.head) cache.head = new_head; + + return true; +} + +void llama_kv_cache_seq_cp( + struct llama_kv_cache & cache, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1) { + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + + if (cache.recurrent) { + if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) { + llama_kv_cell & tail_src = cache.cells[seq_id_src]; + llama_kv_cell & tail_dst = cache.cells[seq_id_dst]; + if (tail_dst.tail >= 0) { + // clear destination seq_id if it wasn't empty + llama_kv_cell & cell_dst = cache.cells[tail_dst.tail]; + + cell_dst.seq_id.erase(seq_id_dst); + tail_dst.tail = -1; + if (cell_dst.seq_id.empty()) { + cell_dst.pos = -1; + cell_dst.delta = -1; + cell_dst.src = -1; + cache.used -= 1; + } + } + if (tail_src.tail >= 0) { + llama_kv_cell & cell_src = cache.cells[tail_src.tail]; + + cell_src.seq_id.insert(seq_id_dst); + tail_dst.tail = tail_src.tail; + } + } + + return; + } + // otherwise, this is the KV cache of a Transformer-like model + + cache.head = 0; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.cells[i].seq_id.insert(seq_id_dst); + } + } +} + +void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) { + uint32_t new_head = cache.size; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.recurrent && (llama_seq_id) i != seq_id) { + cache.cells[i].tail = -1; + } + if (!cache.cells[i].has_seq_id(seq_id)) { + if (cache.cells[i].pos >= 0) cache.used--; + cache.cells[i].pos = -1; + cache.cells[i].src = -1; + cache.cells[i].seq_id.clear(); + if (new_head == cache.size) new_head = i; + } else { + cache.cells[i].seq_id.clear(); + cache.cells[i].seq_id.insert(seq_id); + } + } + + // If we freed up a slot, set head to it so searching can start there. + if (new_head != cache.size && new_head < cache.head) cache.head = new_head; +} + +void llama_kv_cache_seq_add( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta) { + uint32_t new_head = cache.size; + + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) return; + + if (cache.recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be shifted + if (0 <= seq_id && seq_id < (int64_t) cache.size) { + const int32_t tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cache.cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos += delta; + } + } + } + return; + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.has_shift = true; + cache.cells[i].pos += delta; + cache.cells[i].delta += delta; + + if (cache.cells[i].pos < 0) { + if (!cache.cells[i].is_empty()) { + cache.used--; + } + cache.cells[i].pos = -1; + cache.cells[i].seq_id.clear(); + if (new_head == cache.size) { + new_head = i; + } + } + } + } + + // If we freed up a slot, set head to it so searching can start there. + // Otherwise we just start the next search from the beginning. + cache.head = new_head != cache.size ? new_head : 0; +} + +void llama_kv_cache_seq_div( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d) { + if (p0 < 0) p0 = 0; + if (p1 < 0) p1 = std::numeric_limits::max(); + // If there is no range then return early to avoid looping over the cache. + if (p0 == p1) return; + + if (cache.recurrent) { + // for Mamba-like or RWKV models, only the pos needs to be changed + if (0 <= seq_id && seq_id < (int64_t) cache.size) { + const int32_t tail_id = cache.cells[seq_id].tail; + if (tail_id >= 0) { + llama_kv_cell & cell = cache.cells[tail_id]; + if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) { + cell.pos /= d; + } + } + } + return; + } + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) { + cache.has_shift = true; + + { + llama_pos p_old = cache.cells[i].pos; + cache.cells[i].pos /= d; + cache.cells[i].delta += cache.cells[i].pos - p_old; + } + } + } +} + +llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) { + llama_pos result = 0; + + for (uint32_t i = 0; i < cache.size; ++i) { + if (cache.cells[i].has_seq_id(seq_id)) { + result = std::max(result, cache.cells[i].pos); + } + } + + return result; +} + +void llama_kv_cache_defrag(struct llama_kv_cache & cache) { + if (!cache.recurrent) { + cache.do_defrag = true; + } +} + +int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv) { + int result = 0; + + for (uint32_t i = 0; i < kv.size; i++) { + result += kv.cells[i].seq_id.size(); + } + + return result; +} + +int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv) { + return kv.used; +} + +bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv) { + return kv.can_shift; +} + +// +// kv cache view +// + +struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max) { + struct llama_kv_cache_view result = { + /*.n_cells = */ 0, + /*.n_seq_max = */ n_seq_max, + /*.token_count = */ 0, + /*.used_cells = */ llama_get_kv_cache_used_cells(kv), + /*.max_contiguous = */ 0, + /*.max_contiguous_idx = */ -1, + /*.cells = */ nullptr, + /*.cells_sequences = */ nullptr, + }; + + return result; +} + +void llama_kv_cache_view_free(struct llama_kv_cache_view * view) { + if (view->cells != nullptr) { + free(view->cells); + view->cells = nullptr; + } + if (view->cells_sequences != nullptr) { + free(view->cells_sequences); + view->cells_sequences = nullptr; + } +} + +void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv) { + if (uint32_t(view->n_cells) < kv.size || view->cells == nullptr) { + view->n_cells = int32_t(kv.size); + void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells); + GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells"); + view->cells = (struct llama_kv_cache_view_cell *)p; + p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells); + GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences"); + view->cells_sequences = (llama_seq_id *)p; + } + + const std::vector & kv_cells = kv.cells; + llama_kv_cache_view_cell * c_curr = view->cells; + llama_seq_id * cs_curr = view->cells_sequences; + int32_t used_cells = 0; + int32_t token_count = 0; + int32_t curr_contig_idx = -1; + uint32_t max_contig = 0; + int32_t max_contig_idx = -1; + + for (int32_t i = 0; i < int32_t(kv.size); i++, c_curr++, cs_curr += view->n_seq_max) { + const size_t curr_size = kv_cells[i].seq_id.size(); + token_count += curr_size; + c_curr->pos = kv_cells[i].pos + kv_cells[i].delta; + + if (curr_size > 0) { + if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) { + max_contig = i - curr_contig_idx; + max_contig_idx = curr_contig_idx; + } + curr_contig_idx = -1; + } else if (curr_contig_idx < 0) { + curr_contig_idx = i; + } + + int seq_idx = 0; + for (const llama_seq_id it : kv_cells[i].seq_id) { + if (seq_idx >= view->n_seq_max) { + break; + } + cs_curr[seq_idx] = it; + seq_idx++; + } + if (seq_idx != 0) { + used_cells++; + } + for (; seq_idx < view->n_seq_max; seq_idx++) { + cs_curr[seq_idx] = -1; + } + } + if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) { + max_contig_idx = curr_contig_idx; + max_contig = kv_cells.size() - curr_contig_idx; + } + view->max_contiguous = max_contig; + view->max_contiguous_idx = max_contig_idx; + view->token_count = token_count; + view->used_cells = used_cells; + if (uint32_t(used_cells) != kv.used) { + LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n", + __func__, kv.used, used_cells); + } +} diff --git a/src/llama-kv-cache.h b/src/llama-kv-cache.h new file mode 100644 index 000000000..dca6f3998 --- /dev/null +++ b/src/llama-kv-cache.h @@ -0,0 +1,218 @@ +#pragma once + +#include "llama.h" + +#include "ggml-cpp.h" + +#include +#include + +struct llama_kv_cell { + llama_pos pos = -1; + llama_pos delta = 0; + int32_t src = -1; // used by recurrent state models to copy states + int32_t tail = -1; + + std::set seq_id; + + bool has_seq_id(const llama_seq_id & id) const { + return seq_id.find(id) != seq_id.end(); + } + + bool is_empty() const { + return seq_id.empty(); + } + + bool is_same_seq(const llama_kv_cell & other) const { + return seq_id == other.seq_id; + } +}; + +// ring-buffer of cached KV data +struct llama_kv_cache { + bool has_shift = false; + bool do_defrag = false; + bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token + bool v_trans = true; // the value tensor is transposed + bool can_shift = false; + + // Note: The value of head isn't only used to optimize searching + // for a free KV slot. llama_decode_internal also uses it, so it + // cannot be freely changed after a slot has been allocated. + uint32_t head = 0; + uint32_t size = 0; + uint32_t used = 0; // used cells (i.e. at least one seq_id) + + // computed before each graph build + uint32_t n = 0; + + ggml_type type_k = GGML_TYPE_F16; + ggml_type type_v = GGML_TYPE_F16; + + std::vector cells; + + std::vector k_l; // per layer + std::vector v_l; + + std::vector ctxs; + std::vector bufs; + + size_t total_size() const { + size_t size = 0; + for (const auto & buf : bufs) { + size += ggml_backend_buffer_get_size(buf.get()); + } + + return size; + } + + // TODO: better data structures to reduce the cost of this operation + llama_pos max_pos() const { + llama_pos max_pos = -1; + for (const auto & cell : cells) { + max_pos = std::max(max_pos, cell.pos); + } + + return max_pos; + } +}; + +// a structure holds information about the slot found in llama_kv_cache_find_slot +struct llama_kv_cache_slot_info { + std::pair boundaries; // slot boundaries [begin, end) + bool found = false; // the slot was found + + explicit llama_kv_cache_slot_info(bool found_) : found{found_} {} + llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {} + + operator bool() const { return found; } +}; + +// TODO: maybe not needed +uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams); + +bool llama_kv_cache_init( + struct llama_kv_cache & cache, + const llama_model & model, + const llama_cparams & cparams, + ggml_type type_k, + ggml_type type_v, + uint32_t kv_size, + bool offload); + +// find an empty slot of size "n_tokens" in the cache +// updates the cache head +// returns a structure holding information about the slot found +// Note: On success, it's important that cache.head points +// to the first cell of the slot. +struct llama_kv_cache_slot_info llama_kv_cache_find_slot( + struct llama_kv_cache & cache, + const struct llama_ubatch & batch); + +// find how many cells are currently in use +uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache); + +void llama_kv_cache_clear(struct llama_kv_cache & cache); + +bool llama_kv_cache_seq_rm( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_cp( + struct llama_kv_cache & cache, + llama_seq_id seq_id_src, + llama_seq_id seq_id_dst, + llama_pos p0, + llama_pos p1); + +void llama_kv_cache_seq_keep( + struct llama_kv_cache & cache, + llama_seq_id seq_id); + +void llama_kv_cache_seq_add( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + llama_pos delta); + +void llama_kv_cache_seq_div( + struct llama_kv_cache & cache, + llama_seq_id seq_id, + llama_pos p0, + llama_pos p1, + int d); + +llama_pos llama_kv_cache_seq_pos_max( + struct llama_kv_cache & cache, + llama_seq_id seq_id); + +void llama_kv_cache_defrag(struct llama_kv_cache & cache); + +int32_t llama_get_kv_cache_token_count(const struct llama_kv_cache & kv); + +int32_t llama_get_kv_cache_used_cells(const struct llama_kv_cache & kv); + +bool llama_kv_cache_can_shift(const struct llama_kv_cache & kv); + +// +// kv cache view +// + +struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_kv_cache & kv, int32_t n_seq_max); + +void llama_kv_cache_view_update(struct llama_kv_cache_view * view, const struct llama_kv_cache & kv); + +// +// kv cache restore +// + +// saves the kv_cache state for future recovery. +// used to rollback llama_kv_cache_find_slot changes. +struct llama_kv_slot_restorer { + struct llama_kv_cache_state { + uint32_t head = 0; + uint32_t n = 0; + } old_state; + + // for non-recurrent models only + // list of slots to restore + std::vector> slot_boundaries; + + bool do_restore = false; + + explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) { + old_state.head = cache.head; + old_state.n = cache.n; + } + + // saves a slot information for future restoration + void save(const struct llama_kv_cache_slot_info & slot) { + if (slot) { + do_restore = true; + if (slot.boundaries.first != slot.boundaries.second) { + slot_boundaries.push_back(slot.boundaries); + } + } + } + + // must be explicitly called to restore the kv_cache state + // and rollback changes from all llama_kv_cache_find_slot calls + void restore(struct llama_kv_cache & cache) { + if (do_restore) { + cache.head = old_state.head; + cache.n = old_state.n; + + if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased + llama_kv_cache_seq_rm(cache, -1, -1, -1); + } else { + for (auto & slot : slot_boundaries) { + llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second); + } + } + } + } +}; + diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp new file mode 100644 index 000000000..a99326335 --- /dev/null +++ b/src/llama-mmap.cpp @@ -0,0 +1,585 @@ +#include "llama-mmap.h" + +#include "llama-impl.h" + +#include "ggml.h" + +#include +#include +#include + +#ifdef __has_include + #if __has_include() + #include + #if defined(_POSIX_MAPPED_FILES) + #include + #include + #endif + #if defined(_POSIX_MEMLOCK_RANGE) + #include + #endif + #endif +#endif + +#if defined(_WIN32) + #define WIN32_LEAN_AND_MEAN + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #ifndef PATH_MAX + #define PATH_MAX MAX_PATH + #endif + #include +#endif + +// TODO: consider moving to llama-impl.h if needed in more places +#if defined(_WIN32) +std::string llama_format_win_err(DWORD err) { + LPSTR buf; + size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + if (!size) { + return "FormatMessageA failed"; + } + std::string ret(buf, size); + LocalFree(buf); + return ret; +} +#endif + +// llama_file + +struct llama_file::impl { +#if defined(_WIN32) + HANDLE fp_win32; + std::string GetErrorMessageWin32(DWORD error_code) const { + std::string ret; + LPSTR lpMsgBuf = NULL; + DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL); + if (!bufLen) { + ret = format("Win32 error code: %lx", error_code); + } else { + ret = lpMsgBuf; + LocalFree(lpMsgBuf); + } + + return ret; + } + + impl(const char * fname, const char * mode) { + fp = ggml_fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp)); + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { + LARGE_INTEGER li; + li.QuadPart = 0; + BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT); + if (!ret) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + + return li.QuadPart; + } + + void seek(size_t offset, int whence) const { + static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN"); + static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT"); + static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END"); + + LARGE_INTEGER li; + li.QuadPart = offset; + BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence); + if (!ret) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + } + + void read_raw(void * ptr, size_t len) const { + size_t bytes_read = 0; + while (bytes_read < len) { + size_t chunk_size = std::min(len - bytes_read, 64*1024*1024); + DWORD chunk_read = 0; + BOOL result = ReadFile(fp_win32, reinterpret_cast(ptr) + bytes_read, chunk_size, &chunk_read, NULL); + if (!result) { + throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + if (chunk_read < chunk_size || chunk_read == 0) { + throw std::runtime_error("unexpectedly reached end of file"); + } + + bytes_read += chunk_read; + } + } + + uint32_t read_u32() const { + uint32_t val; + read_raw(&val, sizeof(val)); + return val; + } + + void write_raw(const void * ptr, size_t len) const { + size_t bytes_written = 0; + while (bytes_written < len) { + size_t chunk_size = std::min(len - bytes_written, 64*1024*1024); + DWORD chunk_written = 0; + BOOL result = WriteFile(fp_win32, reinterpret_cast(ptr) + bytes_written, chunk_size, &chunk_written, NULL); + if (!result) { + throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str())); + } + if (chunk_written < chunk_size || chunk_written == 0) { + throw std::runtime_error("unexpectedly failed to write bytes"); + } + + bytes_written += chunk_written; + } + } + + void write_u32(uint32_t val) const { + write_raw(&val, sizeof(val)); + } + + ~impl() { + if (fp) { + std::fclose(fp); + } + } +#else + impl(const char * fname, const char * mode) { + fp = ggml_fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { +// TODO: this ifdef is never true? +#ifdef _WIN32 + __int64 ret = _ftelli64(fp); +#else + long ret = std::ftell(fp); +#endif + if (ret == -1) { + throw std::runtime_error(format("ftell error: %s", strerror(errno))); + } + + return (size_t) ret; + } + + void seek(size_t offset, int whence) const { +// TODO: this ifdef is never true? +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, whence); +#else + int ret = std::fseek(fp, (long) offset, whence); +#endif + if (ret != 0) { + throw std::runtime_error(format("seek error: %s", strerror(errno))); + } + } + + void read_raw(void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + std::size_t ret = std::fread(ptr, len, 1, fp); + if (ferror(fp)) { + throw std::runtime_error(format("read error: %s", strerror(errno))); + } + if (ret != 1) { + throw std::runtime_error("unexpectedly reached end of file"); + } + } + + uint32_t read_u32() const { + uint32_t ret; + read_raw(&ret, sizeof(ret)); + return ret; + } + + void write_raw(const void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + size_t ret = std::fwrite(ptr, len, 1, fp); + if (ret != 1) { + throw std::runtime_error(format("write error: %s", strerror(errno))); + } + } + + void write_u32(uint32_t val) const { + write_raw(&val, sizeof(val)); + } + + ~impl() { + if (fp) { + std::fclose(fp); + } + } +#endif + + FILE * fp; + size_t size; +}; + +llama_file::llama_file(const char * fname, const char * mode) : pimpl(std::make_unique(fname, mode)) {} +llama_file::~llama_file() = default; + +size_t llama_file::tell() const { return pimpl->tell(); } +size_t llama_file::size() const { return pimpl->size; } + +int llama_file::fileno() const { +#ifdef _WIN32 + return _fileno(pimpl->fp); +#else + return ::fileno(pimpl->fp); +#endif +} + +void llama_file::seek(size_t offset, int whence) const { pimpl->seek(offset, whence); } +void llama_file::read_raw(void * ptr, size_t len) const { pimpl->read_raw(ptr, len); } + +uint32_t llama_file::read_u32() const { return pimpl->read_u32(); } + +void llama_file::write_raw(const void * ptr, size_t len) const { pimpl->write_raw(ptr, len); } +void llama_file::write_u32(uint32_t val) const { pimpl->write_u32(val); } + +// llama_mmap + +struct llama_mmap::impl { +#ifdef _POSIX_MAPPED_FILES + std::vector> mapped_fragments; + + impl(struct llama_file * file, size_t prefetch, bool numa) { + size = file->size(); + int fd = file->fileno(); + int flags = MAP_SHARED; + if (numa) { prefetch = 0; } +#ifdef __linux__ + if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) { + LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n", + strerror(errno)); + } + if (prefetch) { flags |= MAP_POPULATE; } +#endif + addr = mmap(NULL, file->size(), PROT_READ, flags, fd, 0); + if (addr == MAP_FAILED) { + throw std::runtime_error(format("mmap failed: %s", strerror(errno))); + } + + if (prefetch > 0) { + if (posix_madvise(addr, std::min(file->size(), prefetch), POSIX_MADV_WILLNEED)) { + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n", + strerror(errno)); + } + } + if (numa) { + if (posix_madvise(addr, file->size(), POSIX_MADV_RANDOM)) { + LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n", + strerror(errno)); + } + } + + mapped_fragments.emplace_back(0, file->size()); + } + + static void align_range(size_t * first, size_t * last, size_t page_size) { + size_t offset_in_page = *first & (page_size - 1); + size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page; + *first += offset_to_page; + + *last = *last & ~(page_size - 1); + + if (*last <= *first) { + *last = *first; + } + } + + void unmap_fragment(size_t first, size_t last) { + int page_size = sysconf(_SC_PAGESIZE); + align_range(&first, &last, page_size); + size_t len = last - first; + + if (len == 0) { + return; + } + + GGML_ASSERT(first % page_size == 0); + GGML_ASSERT(last % page_size == 0); + GGML_ASSERT(last > first); + + void * next_page_start = (uint8_t *) addr + first; + + if (munmap(next_page_start, len)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + + std::vector> new_mapped_fragments; + for (const auto & frag : mapped_fragments) { + if (frag.first < first && frag.second > last) { + new_mapped_fragments.emplace_back(frag.first, first); + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first < first && frag.second > first) { + new_mapped_fragments.emplace_back(frag.first, first); + } else if (frag.first < last && frag.second > last) { + new_mapped_fragments.emplace_back(last, frag.second); + } else if (frag.first >= first && frag.second <= last) { + } else { + new_mapped_fragments.push_back(frag); + } + } + mapped_fragments = std::move(new_mapped_fragments); + } + + ~impl() { + for (const auto & frag : mapped_fragments) { + if (munmap((char *) addr + frag.first, frag.second - frag.first)) { + LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno)); + } + } + } +#elif defined(_WIN32) + impl(struct llama_file * file, size_t prefetch, bool numa) { + GGML_UNUSED(numa); + + size = file->size(); + + HANDLE hFile = (HANDLE) _get_osfhandle(file->fileno()); + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + + if (hMapping == NULL) { + DWORD error = GetLastError(); + throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); + } + + addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + DWORD error = GetLastError(); + CloseHandle(hMapping); + + if (addr == NULL) { + throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); + } + + if (prefetch > 0) { +#if _WIN32_WINNT >= 0x602 + BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); + HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll"); + + pPrefetchVirtualMemory = (decltype(pPrefetchVirtualMemory))(void *) GetProcAddress(hKernel32, "PrefetchVirtualMemory"); + + if (pPrefetchVirtualMemory) { + WIN32_MEMORY_RANGE_ENTRY range; + range.VirtualAddress = addr; + range.NumberOfBytes = (SIZE_T) std::min(size, prefetch); + if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { + LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + throw std::runtime_error("PrefetchVirtualMemory unavailable"); +#endif + } + } + + void unmap_fragment(size_t first, size_t last) { + GGML_UNUSED(first); + GGML_UNUSED(last); + } + + ~impl() { + if (!UnmapViewOfFile(addr)) { + LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + impl(struct llama_file * file, size_t prefetch, bool numa) { + GGML_UNUSED(file); + GGML_UNUSED(prefetch); + GGML_UNUSED(numa); + + throw std::runtime_error("mmap not supported"); + } + + void unmap_fragment(size_t first, size_t last) { + GGML_UNUSED(first); + GGML_UNUSED(last); + + throw std::runtime_error("mmap not supported"); + } +#endif + + void * addr; + size_t size; +}; + +llama_mmap::llama_mmap(struct llama_file * file, size_t prefetch, bool numa) : pimpl(std::make_unique(file, prefetch, numa)) {} +llama_mmap::~llama_mmap() = default; + +size_t llama_mmap::size() const { return pimpl->size; } +void * llama_mmap::addr() const { return pimpl->addr; } + +void llama_mmap::unmap_fragment(size_t first, size_t last) { pimpl->unmap_fragment(first, last); } + +#if defined(_POSIX_MEMLOCK_RANGE) || defined(_WIN32) +const bool llama_mmap::SUPPORTED = true; +#else +const bool llama_mmap::SUPPORTED = false; +#endif + +// llama_mlock + +struct llama_mlock::impl { +#ifdef _POSIX_MEMLOCK_RANGE + static size_t lock_granularity() { + return (size_t) sysconf(_SC_PAGESIZE); + } + + bool raw_lock(const void * addr, size_t size) const { + if (!mlock(addr, size)) { + return true; + } + +#ifdef __APPLE__ +#define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n" +#else +#define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n" +#endif + + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); + + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { + suggest = false; + } + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) { + suggest = false; + } + + LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; + } + + static void raw_unlock(void * addr, size_t size) { + if (munlock(addr, size)) { + LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno)); + } + } +#elif defined(_WIN32) + static size_t lock_granularity() { + SYSTEM_INFO si; + GetSystemInfo(&si); + return (size_t) si.dwPageSize; + } + + bool raw_lock(void * ptr, size_t len) const { + for (int tries = 1; ; tries++) { + if (VirtualLock(ptr, len)) { + return true; + } + if (tries == 2) { + LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", + len, size, llama_format_win_err(GetLastError()).c_str()); + return false; + } + + SIZE_T min_ws_size, max_ws_size; + if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { + LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + size_t increment = len + 1048576; + min_ws_size += increment; + max_ws_size += increment; + if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { + LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + } + } + + static void raw_unlock(void * ptr, size_t len) { + if (!VirtualUnlock(ptr, len)) { + LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + static size_t lock_granularity() { + return (size_t) 65536; + } + + bool raw_lock(const void * addr, size_t len) const { + LLAMA_LOG_WARN("warning: mlock not supported on this system\n"); + return false; + } + + static void raw_unlock(const void * addr, size_t len) {} +#endif + + impl() : addr(NULL), size(0), failed_already(false) {} + + void init(void * ptr) { + GGML_ASSERT(addr == NULL && size == 0); + addr = ptr; + } + + void grow_to(size_t target_size) { + GGML_ASSERT(addr); + if (failed_already) { + return; + } + size_t granularity = lock_granularity(); + target_size = (target_size + granularity - 1) & ~(granularity - 1); + if (target_size > size) { + if (raw_lock((uint8_t *) addr + size, target_size - size)) { + size = target_size; + } else { + failed_already = true; + } + } + } + + void * addr; + size_t size; + + bool failed_already; +}; + +llama_mlock::llama_mlock() : pimpl(std::make_unique()) {} +llama_mlock::~llama_mlock() = default; + +void llama_mlock::init(void * ptr) { pimpl->init(ptr); } +void llama_mlock::grow_to(size_t target_size) { pimpl->grow_to(target_size); } + +#if defined(_POSIX_MEMLOCK_RANGE) || defined(_WIN32) +const bool llama_mlock::SUPPORTED = true; +#else +const bool llama_mlock::SUPPORTED = false; +#endif + +size_t llama_path_max() { + return PATH_MAX; +} diff --git a/src/llama-mmap.h b/src/llama-mmap.h new file mode 100644 index 000000000..6bcddee8c --- /dev/null +++ b/src/llama-mmap.h @@ -0,0 +1,67 @@ +#pragma once + +#include +#include + +struct llama_file; +struct llama_mmap; +struct llama_mlock; + +using llama_files = std::vector>; +using llama_mmaps = std::vector>; +using llama_mlocks = std::vector>; + +struct llama_file { + llama_file(const char * fname, const char * mode); + ~llama_file(); + + size_t tell() const; + size_t size() const; + + int fileno() const; + + void seek(size_t offset, int whence) const; + + void read_raw(void * ptr, size_t len) const; + uint32_t read_u32() const; + + void write_raw(const void * ptr, size_t len) const; + void write_u32(uint32_t val) const; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +struct llama_mmap { + llama_mmap(const llama_mmap &) = delete; + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false); + ~llama_mmap(); + + size_t size() const; + void * addr() const; + + void unmap_fragment(size_t first, size_t last); + + static const bool SUPPORTED; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +struct llama_mlock { + llama_mlock(); + ~llama_mlock(); + + void init(void * ptr); + void grow_to(size_t target_size); + + static const bool SUPPORTED; + +private: + struct impl; + std::unique_ptr pimpl; +}; + +size_t llama_path_max(); diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp new file mode 100644 index 000000000..7743b4652 --- /dev/null +++ b/src/llama-model-loader.cpp @@ -0,0 +1,1010 @@ +#include "llama-model-loader.h" + +#include "ggml.h" + +#include +#include +#include +#include + +const char * llama_file_version_name(llama_fver version) { + switch (version) { + case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; + case GGUF_FILE_VERSION_V2: return "GGUF V2"; + case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)"; + } + + return "unknown"; +} + +namespace GGUFMeta { + template + struct GKV_Base_Type { + static constexpr gguf_type gt = gt_; + + static T getter(const gguf_context * ctx, const int kid) { + return gfun(ctx, kid); + } + }; + + template struct GKV_Base; + + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + template<> struct GKV_Base: GKV_Base_Type {}; + + template<> struct GKV_Base { + static constexpr gguf_type gt = GGUF_TYPE_STRING; + + static std::string getter(const gguf_context * ctx, const int kid) { + return gguf_get_val_str(ctx, kid); + } + }; + + struct ArrayInfo { + const gguf_type gt; + const size_t length; + const void * data; + }; + + template<> struct GKV_Base { + public: + static constexpr gguf_type gt = GGUF_TYPE_ARRAY; + static ArrayInfo getter(const gguf_context *ctx, const int k) { + return ArrayInfo { + gguf_get_arr_type(ctx, k), + size_t(gguf_get_arr_n(ctx, k)), + gguf_get_arr_data(ctx, k), + }; + } + }; + + template + class GKV : public GKV_Base { + GKV() = delete; + + public: + static T get_kv(const gguf_context * ctx, const int k) { + const enum gguf_type kt = gguf_get_kv_type(ctx, k); + + if (kt != GKV::gt) { + throw std::runtime_error(format("key %s has wrong type %s but expected type %s", + gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt))); + } + return GKV::getter(ctx, k); + } + + static const char * override_type_to_str(const llama_model_kv_override_type ty) { + switch (ty) { + case LLAMA_KV_OVERRIDE_TYPE_BOOL: return "bool"; + case LLAMA_KV_OVERRIDE_TYPE_INT: return "int"; + case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float"; + case LLAMA_KV_OVERRIDE_TYPE_STR: return "str"; + } + return "unknown"; + } + + static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) { + if (!ovrd) { return false; } + if (ovrd->tag == expected_type) { + LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ", + __func__, override_type_to_str(ovrd->tag), ovrd->key); + switch (ovrd->tag) { + case LLAMA_KV_OVERRIDE_TYPE_BOOL: { + LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false"); + } break; + case LLAMA_KV_OVERRIDE_TYPE_INT: { + LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64); + } break; + case LLAMA_KV_OVERRIDE_TYPE_FLOAT: { + LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64); + } break; + case LLAMA_KV_OVERRIDE_TYPE_STR: { + LLAMA_LOG_INFO("%s\n", ovrd->val_str); + } break; + default: + // Shouldn't be possible to end up here, but just in case... + throw std::runtime_error( + format("Unsupported attempt to override %s type for metadata key %s\n", + override_type_to_str(ovrd->tag), ovrd->key)); + } + return true; + } + LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n", + __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag)); + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(OT & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) { + target = ovrd->val_bool; + return true; + } + return false; + } + + template + static typename std::enable_if::value && std::is_integral::value, bool>::type + try_override(OT & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) { + target = ovrd->val_i64; + return true; + } + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(T & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) { + target = ovrd->val_f64; + return true; + } + return false; + } + + template + static typename std::enable_if::value, bool>::type + try_override(T & target, const struct llama_model_kv_override * ovrd) { + if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) { + target = ovrd->val_str; + return true; + } + return false; + } + + static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + if (try_override(target, ovrd)) { + return true; + } + if (k < 0) { return false; } + target = get_kv(ctx, k); + return true; + } + + static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + return set(ctx, gguf_find_key(ctx, key), target, ovrd); + } + + static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) { + return set(ctx, key.c_str(), target, ovrd); + } + }; +} + + template + typename std::enable_if::value, bool>::type + llama_model_loader::get_arr_n(const std::string & key, T & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0) { + if (required) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + + result = arr_info.length; + return true; + } + + template + typename std::enable_if::value, bool>::type + llama_model_loader::get_arr_n(enum llm_kv kid, T & result, bool required) { + return get_arr_n(llm_kv(kid), result, required); + } + + template bool llama_model_loader::get_arr_n(enum llm_kv kid, uint32_t & result, bool required); + + template + bool llama_model_loader::get_arr(const std::string & key, std::vector & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (required) { + throw std::runtime_error(format("array key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + switch (arr_info.gt) { + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT( + (std::is_same::value) || + (std::is_same::value)); break; + default: + throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str())); + } + + result.resize(arr_info.length); + result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length); + + return true; + } + + template + bool llama_model_loader::get_arr(const std::string & key, std::array & result, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) { + if (required) { + throw std::runtime_error(format("array key not found in model: %s", key.c_str())); + } + return false; + } + + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + switch (arr_info.gt) { + case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break; + case GGUF_TYPE_INT32: GGML_ASSERT( + (std::is_same::value) || + (std::is_same::value)); break; + default: + throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str())); + } + + if (arr_info.length > N_MAX) { + throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX)); + } + + std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin()); + + return true; + } + + template + bool llama_model_loader::get_arr(enum llm_kv kid, T & result, bool required) { + return get_arr(llm_kv(kid), result, required); + } + + template + bool llama_model_loader::get_key(const std::string & key, T & result, bool required) { + auto it = kv_overrides.find(key); + + const struct llama_model_kv_override * override = + it != kv_overrides.end() ? &it->second : nullptr; + + const bool found = GGUFMeta::GKV::set(meta.get(), key, result, override); + + if (required && !found) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + + return found; + } + + template + bool llama_model_loader::get_key(enum llm_kv kid, T & result, bool required) { + return get_key(llm_kv(kid), result, required); + } + + template bool llama_model_loader::get_key (enum llm_kv kid, bool & result, bool required); + template bool llama_model_loader::get_key (enum llm_kv kid, float & result, bool required); + template bool llama_model_loader::get_key (enum llm_kv kid, uint32_t & result, bool required); + template bool llama_model_loader::get_key(enum llm_kv kid, std::string & result, bool required); + + template<> + bool llama_model_loader::get_key(enum llm_kv kid, enum llama_pooling_type & result, bool required) { + uint32_t tmp; + const bool found = get_key(kid, tmp, required); + if (found) { + result = (enum llama_pooling_type) tmp; + } else { + result = LLAMA_POOLING_TYPE_UNSPECIFIED; + } + return found; + } + + // get array of n <= N_MAX elements, or a single element repeated n times + template + bool llama_model_loader::get_key_or_arr(const std::string & key, std::array & result, uint32_t n, bool required) { + const int kid = gguf_find_key(meta.get(), key.c_str()); + + if (kid < 0) { + if (required) { + throw std::runtime_error(format("key not found in model: %s", key.c_str())); + } + return false; + } + + if (n > N_MAX) { + throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str())); + } + + if (gguf_get_kv_type(meta.get(), kid) == GGUF_TYPE_ARRAY) { + struct GGUFMeta::ArrayInfo arr_info = + GGUFMeta::GKV::get_kv(meta.get(), kid); + + if (n != arr_info.length) { + throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length)); + } + + return get_arr(key, result, required); + } + + T value; + + bool ok = get_key(key, value, required); + if (!ok) { + return false; + } + + for (uint32_t i = 0; i < n; i++) { + result[i] = value; + } + + return true; + } + + template + bool llama_model_loader::get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required) { + return get_key_or_arr(llm_kv(kid), result, n, required); + } + + // TODO: this is not very clever - figure out something better + template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); + template bool llama_model_loader::get_key_or_arr>(enum llm_kv kid, std::array & result, uint32_t n, bool required); + +llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) { + int trace = 0; + if (getenv("LLAMA_TRACE")) { + trace = atoi(getenv("LLAMA_TRACE")); + } + + if (param_overrides_p != nullptr) { + for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) { + kv_overrides.insert({std::string(p->key), *p}); + } + } + + struct ggml_context * ctx = NULL; + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx, + }; + + meta.reset(gguf_init_from_file(fname.c_str(), params)); + if (!meta) { + throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str())); + } + + get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false); + llm_kv = LLM_KV(llm_arch_from_string(arch_name)); + + files.emplace_back(new llama_file(fname.c_str(), "rb")); + contexts.emplace_back(ctx); + + // Save tensors data offset of the main file. + // For subsidiary files, `meta` tensor data offset must not be used, + // so we build a unified tensors index for weights. + for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { + std::string tensor_name = std::string(cur->name); + // make sure there is no duplicated tensor names + if (weights_map.find(tensor_name) != weights_map.end()) { + throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); + } + n_elements += ggml_nelements(cur); + n_bytes += ggml_nbytes(cur); + weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur)); + } + uint16_t n_split = 0; + get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false); + + // Load additional GGML contexts + if (n_split > 1) { + uint16_t idx = 0; + get_key(llm_kv(LLM_KV_SPLIT_NO), idx); + if (idx != 0) { + throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx)); + } + + std::vector split_prefix(llama_path_max(), 0); + if (!llama_split_prefix(split_prefix.data(), split_prefix.size(), fname.c_str(), idx, n_split)) { + throw std::runtime_error(format("invalid split file: %s", fname.c_str())); + } + + if (trace > 0) { + LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split); + } + + std::vector split_path(llama_path_max(), 0); + for (idx = 1; idx < n_split; idx++) { + llama_split_path(split_path.data(), split_path.size(), split_prefix.data(), idx, n_split); + + struct gguf_init_params split_params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx, + }; + gguf_context_ptr ctx_gguf { gguf_init_from_file(split_path.data(), split_params) }; + if (!ctx_gguf) { + throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path.data())); + } + + files.emplace_back(new llama_file(split_path.data(), "rb")); + contexts.emplace_back(ctx); + + // Save tensors data offset info of the shard. + for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) { + std::string tensor_name = std::string(cur->name); + // make sure there is no duplicated tensor names + if (weights_map.find(tensor_name) != weights_map.end()) { + throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur))); + } + n_elements += ggml_nelements(cur); + n_bytes += ggml_nbytes(cur); + weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur)); + } + } + + get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors); + + // sanity check + { + const int n_tensors_loaded = (int) weights_map.size(); + if (n_tensors != n_tensors_loaded) { + throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded)); + } + } + + LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n", __func__, n_split - 1); + } + + n_kv = gguf_get_n_kv(meta.get()); + n_tensors = weights_map.size(); + + fver = (enum llama_fver) gguf_get_version(meta.get()); + + LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n", + __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver)); + + // determine file type based on the number of tensors for each quantization and print meta data + // TODO: make optional + { + std::map n_type; + + uint32_t n_type_max = 0; + enum ggml_type type_max = GGML_TYPE_F32; + + for (const auto & it : weights_map) { + const llama_tensor_weight & w = it.second; + const ggml_tensor * tensor = w.tensor; + + enum ggml_type type = tensor->type; + + n_type[type]++; + + if (n_type_max < n_type[type]) { + n_type_max = n_type[type]; + type_max = type; + } + + if (trace > 0) { + const uint16_t sid = w.idx; + LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str()); + } + } + + switch (type_max) { + case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; + case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; + case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break; + case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; + case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; + case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; + case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break; + case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break; + case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break; + case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break; + case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break; + case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break; + case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break; + case GGML_TYPE_TQ1_0: ftype = LLAMA_FTYPE_MOSTLY_TQ1_0; break; + case GGML_TYPE_TQ2_0: ftype = LLAMA_FTYPE_MOSTLY_TQ2_0; break; + case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break; + case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break; + case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break; + case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break; + case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break; + case GGML_TYPE_IQ1_M: ftype = LLAMA_FTYPE_MOSTLY_IQ1_M; break; + case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break; + case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break; + case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break; + default: + { + LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); + ftype = LLAMA_FTYPE_ALL_F32; + } break; + } + + // this is a way to mark that we have "guessed" the file type + ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED); + + { + const int kid = gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV + if (kid >= 0) { + ftype = (llama_ftype) gguf_get_val_u32(meta.get(), kid); + } + } + + LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__); + + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(meta.get(), i); + const enum gguf_type type = gguf_get_kv_type(meta.get(), i); + const std::string type_name = + type == GGUF_TYPE_ARRAY + ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i)) + : gguf_type_name(type); + + std::string value = gguf_kv_to_str(meta.get(), i); + const size_t MAX_VALUE_LEN = 40; + if (value.size() > MAX_VALUE_LEN) { + value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); + } + replace_all(value, "\n", "\\n"); + + LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); + } + } + + if (!llama_mmap::SUPPORTED) { + LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); + use_mmap = false; + } + + this->use_mmap = use_mmap; + this->check_tensors = check_tensors; +} + +std::string llama_model_loader::get_arch_name() const { + return arch_name; +} + +enum llm_arch llama_model_loader::get_arch() const { + return llm_kv.arch; +} + +const llama_model_loader::llama_tensor_weight * llama_model_loader::get_weight(const char * name) const { + auto pos = weights_map.find(name); + if (pos != weights_map.end()) { + return &pos->second; + } + + return nullptr; +} + +const llama_model_loader::llama_tensor_weight & llama_model_loader::require_weight(const char * name) const { + const llama_tensor_weight * weight = get_weight(name); + if (!weight) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name)); + } + return *weight; +} + +struct ggml_tensor * llama_model_loader::get_tensor_meta(const char * name) const { + const auto * weight = get_weight(name); + if (!weight) { + return nullptr; + } + return weight->tensor; +} + +struct ggml_tensor * llama_model_loader::require_tensor_meta(const std::string & name) const { + struct ggml_tensor * tensor = get_tensor_meta(name.c_str()); + if (!tensor) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + return tensor; +} + +const struct ggml_tensor * llama_model_loader::check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const { + const struct ggml_tensor * cur = get_tensor_meta(name.c_str()); + + if (cur == NULL) { + if (!required) { + return NULL; + } + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + + { + bool is_ok = true; + for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { + if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) { + is_ok = false; + break; + } + } + if (!is_ok) { + throw std::runtime_error( + format("%s: tensor '%s' has wrong shape; expected %s, got %s", + __func__, name.c_str(), + llama_format_tensor_shape(ne).c_str(), + llama_format_tensor_shape(cur).c_str())); + } + } + + return cur; +} + +struct ggml_tensor * llama_model_loader::create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags) { + const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED)); + + if (cur == NULL) { + return NULL; + } + + bool duplicated = flags & TENSOR_DUPLICATED; + + struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur); + ggml_set_name(tensor, ggml_get_name(cur)); + + if (duplicated) { + size_data += ggml_nbytes(cur); + } else { + n_created++; + } + + return tensor; + +} + +struct ggml_tensor * llama_model_loader::create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required) { + const struct ggml_tensor * cur = check_tensor_dims(name, ne, required); + + if (cur == NULL) { + return NULL; + } + + if (cur->type != base->type) { + throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type))); + } + + std::array dims; + for (size_t i = 0; i < GGML_MAX_DIMS; ++i) { + dims[i] = i < ne.size() ? ne.begin()[i] : 1; + } + + struct ggml_tensor * tensor = ggml_view_4d(ctx, base, + dims[0], dims[1], dims[2], dims[3], + cur->nb[1], cur->nb[2], cur->nb[3], + offset); + + ggml_set_name(tensor, name.c_str()); + + n_created++; + + return tensor; +} + +void llama_model_loader::done_getting_tensors() const { + if (n_created != n_tensors) { + throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created)); + } +} + +void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps) { + if (use_mmap) { + mappings.reserve(files.size()); + mmaps_used.reserve(files.size()); + for (const auto & file : files) { + auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); + auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); + std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn())); + mmaps_used.emplace_back(mapping->size(), 0); + if (mlock_mmaps) { + std::unique_ptr mlock_mmap(new llama_mlock()); + mlock_mmap->init(mapping->addr()); + mlock_mmaps->emplace_back(std::move(mlock_mmap)); + } + mappings.emplace_back(std::move(mapping)); + } + } + + // compute the total size of all tensors for progress reporting + for (const auto & it : weights_map) { + size_data += ggml_nbytes(it.second.tensor); + } +} + +void llama_model_loader::get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const { + GGML_ASSERT(!mappings.empty()); + const auto & mapping = mappings.at(idx); + + *first = mapping->size(); + *last = 0; + *addr = mapping->addr(); + for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) { + const auto * weight = get_weight(ggml_get_name(tensor)); + if (!weight || weight->idx != idx) { + continue; + } + *first = std::min(*first, weight->offs); + *last = std::max(*last, weight->offs + ggml_nbytes(tensor)); + } +} + +void llama_model_loader::load_data_for(struct ggml_tensor * cur) const { + const auto & w = require_weight(ggml_get_name(cur)); + + if (use_mmap) { + const auto & mapping = mappings.at(w.idx); + if (cur->data == nullptr) { + cur->data = (uint8_t *)mapping->addr() + w.offs; + } else { + memcpy(cur->data, (uint8_t *)mapping->addr() + w.offs, ggml_nbytes(cur)); + } + } else { + GGML_ASSERT(cur->data != nullptr); + GGML_ASSERT(w.idx < files.size()); + const auto & file = files.at(w.idx); + file->seek(w.offs, SEEK_SET); + file->read_raw(cur->data, ggml_nbytes(cur)); + } + + if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } +} + +bool llama_model_loader::load_all_data( + struct ggml_context * ctx, + llama_buf_map & bufs, + llama_mlocks * lmlocks, + llama_progress_callback progress_callback, + void * progress_callback_user_data) { + GGML_ASSERT(size_data != 0 && "call init_mappings() first"); + + std::vector> read_buf; + std::vector>> validation_result; + + // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives. + // NVMe raid configurations might require more / larger buffers. + constexpr size_t n_buffers = 4; + constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB + + std::vector host_buffers; + std::vector events; + std::vector host_ptrs; + size_t buffer_idx = 0; // buffer to use for async loads + ggml_backend_t upload_backend = [&](const char * func) -> ggml_backend_t { + if (use_mmap || check_tensors) { + return nullptr; + } + // When not using mmaped io use async uploads from pinned memory to GPU memory. + // First determine if the backend supports the necessary features for async uploads. + auto * buf = bufs.count(0) ? bufs.at(0) : nullptr; + if (!buf) { + LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func); + return nullptr; + } + + auto * buft = ggml_backend_buffer_get_type(buf); + auto * dev = ggml_backend_buft_get_device(buft); + if (!dev) { + LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func, + ggml_backend_buft_name(buft)); + return nullptr; + } + + if (buft != ggml_backend_dev_buffer_type(dev)) { + LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func, + ggml_backend_buft_name(buft), ggml_backend_dev_name(dev)); + return nullptr; + } + + ggml_backend_dev_props props; + ggml_backend_dev_get_props(dev, &props); + if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) { + LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + auto * host_buft = ggml_backend_dev_host_buffer_type(dev); + if (!host_buft) { + LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + // If the backend is supported, create pinned memory buffers and events for synchronisation. + for (size_t idx = 0; idx < n_buffers; ++idx) { + auto * buf = ggml_backend_buft_alloc_buffer(host_buft, buffer_size); + if (!buf) { + LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + host_buffers.emplace_back(buf); + host_ptrs.emplace_back(ggml_backend_buffer_get_base(buf)); + + auto * event = ggml_backend_event_new(dev); + if (!event) { + LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + events.emplace_back(event); + } + + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + if (!backend) { + LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func, + ggml_backend_dev_name(dev)); + return nullptr; + } + + return backend; + }(__func__); + + if (upload_backend) { + LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__, + ggml_backend_dev_name(ggml_backend_get_device(upload_backend)), + ggml_backend_buft_name(ggml_backend_buffer_get_type(bufs.at(0))), + ggml_backend_name(upload_backend)); + } + + for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) { + const auto * weight = get_weight(ggml_get_name(cur)); + if (weight == nullptr) { + // this can happen with split experts models + continue; + } + + if (progress_callback) { + if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) { + return false; + } + } + + size_t n_size = ggml_nbytes(cur); + + if (use_mmap) { + const auto & mapping = mappings.at(weight->idx); + ggml_backend_buffer_t buf_mmap = nullptr; + if (bufs.count(weight->idx)) { + buf_mmap = bufs.at(weight->idx); + } + uint8_t * data = (uint8_t *) mapping->addr() + weight->offs; + + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size)); + })); + } + + GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated + if (buf_mmap && cur->data == nullptr) { + ggml_backend_tensor_alloc(buf_mmap, cur, data); + if (lmlocks) { + const auto & lmlock = lmlocks->at(weight->idx); + lmlock->grow_to(weight->offs + n_size); + } + + auto & mmap_used = mmaps_used[weight->idx]; + mmap_used.first = std::min(mmap_used.first, weight->offs); + mmap_used.second = std::max(mmap_used.second, weight->offs + n_size); + } else { + ggml_backend_tensor_set(cur, data, 0, n_size); + } + } else { + const auto & file = files.at(weight->idx); + if (ggml_backend_buffer_is_host(cur->buffer)) { + file->seek(weight->offs, SEEK_SET); + file->read_raw(cur->data, n_size); + if (check_tensors) { + validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] { + return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size)); + })); + } + } else { + // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU. + if (upload_backend) { + file->seek(weight->offs, SEEK_SET); + + size_t bytes_read = 0; + + while (bytes_read < n_size) { + size_t read_iteration = std::min(buffer_size, n_size - bytes_read); + + ggml_backend_event_synchronize(events[buffer_idx]); + file->read_raw(host_ptrs[buffer_idx], read_iteration); + ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration); + ggml_backend_event_record(events[buffer_idx], upload_backend); + + bytes_read += read_iteration; + ++buffer_idx; + buffer_idx %= n_buffers; + } + } else { + read_buf.resize(n_size); + file->seek(weight->offs, SEEK_SET); + file->read_raw(read_buf.data(), n_size); + ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size); + if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) { + throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur))); + } + } + } + } + + size_done += n_size; + } + + // free temporary resources used for async uploads + for (auto * event : events) { + ggml_backend_event_synchronize(event); + ggml_backend_event_free(event); + } + for (auto * buf : host_buffers) { + ggml_backend_buffer_free(buf); + } + ggml_backend_free(upload_backend); + + // check validation results + bool validation_failed = false; + for (auto & future : validation_result) { + auto result = future.get(); + if (!result.second) { + LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first)); + validation_failed = true; + } + } + if (validation_failed) { + throw std::runtime_error("found tensors with invalid data"); + } + + // check if this is the last call and do final cleanup + if (size_done >= size_data) { + // unmap offloaded tensors and metadata + if (use_mmap) { + for (uint32_t idx = 0; idx < mappings.size(); idx++) { + const auto & mmap_used = mmaps_used.at(idx); + auto & mapping = mappings.at(idx); + mapping->unmap_fragment(0, mmap_used.first); + if (mmap_used.second != 0) { + mapping->unmap_fragment(mmap_used.second, mapping->size()); + } + } + } + if (progress_callback) { + // Even though the model is done loading, we still honor + // cancellation since we need to free allocations. + return progress_callback(1.0f, progress_callback_user_data); + } + } + + return true; +} diff --git a/src/llama-model-loader.h b/src/llama-model-loader.h new file mode 100644 index 000000000..1ec478195 --- /dev/null +++ b/src/llama-model-loader.h @@ -0,0 +1,158 @@ +#pragma once + +#include "llama.h" + +#include "llama-impl.h" +#include "llama-arch.h" +#include "llama-mmap.h" + +#include "ggml-cpp.h" + +#include +#include +#include +#include + +using llama_buf_map = std::unordered_map; + +enum llama_fver { + GGUF_FILE_VERSION_V1 = 1, + GGUF_FILE_VERSION_V2 = 2, + GGUF_FILE_VERSION_V3 = 3, +}; + +const char * llama_file_version_name(llama_fver version); + +struct llama_model_loader { + // Holds information on a model weight + struct llama_tensor_weight { + uint16_t idx; // source file index + size_t offs; // tensor data offset in the original file + + ggml_tensor * tensor; + + llama_tensor_weight(const llama_file * file, uint16_t idx, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) { + const int tensor_idx = gguf_find_tensor(gguf_ctx, ggml_get_name(tensor)); + if (tensor_idx < 0) { + throw std::runtime_error(format("tensor '%s' not found in the model", ggml_get_name(tensor))); + } + + offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx); + if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size()) { + throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", ggml_get_name(tensor))); + } + } + }; + + // custom comparator to sort weights more nicely by layer + struct weight_name_comparer { + bool operator()(const std::string & a, const std::string & b) const { + int a_layer = -1; + int b_layer = -1; + sscanf(a.c_str(), "blk.%d.", &a_layer); + sscanf(b.c_str(), "blk.%d.", &b_layer); + if (a_layer != b_layer) { + return a_layer < b_layer; + } + return a < b; + } + }; + + static const int TENSOR_NOT_REQUIRED = 1; + static const int TENSOR_DUPLICATED = 2; + + int n_kv = 0; + int n_tensors = 0; + int n_created = 0; + + uint64_t n_elements = 0; + size_t n_bytes = 0; + + bool use_mmap = false; + bool check_tensors; + + llama_files files; + llama_ftype ftype; + llama_fver fver; + + llama_mmaps mappings; + + std::map weights_map; + std::unordered_map kv_overrides; + + gguf_context_ptr meta; + std::vector contexts; + + std::string arch_name; + LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN); + + size_t size_done = 0; + size_t size_data = 0; + std::vector> mmaps_used; + + llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p); + + template + typename std::enable_if::value, bool>::type + get_arr_n(const std::string & key, T & result, bool required = true); + + template + typename std::enable_if::value, bool>::type + get_arr_n(enum llm_kv kid, T & result, bool required = true); + + template + bool get_arr(const std::string & key, std::vector & result, bool required = true); + + template + bool get_arr(const std::string & key, std::array & result, bool required = true); + + template + bool get_arr(enum llm_kv kid, T & result, bool required = true); + + template + bool get_key(const std::string & key, T & result, bool required = true); + + template + bool get_key(enum llm_kv kid, T & result, bool required = true); + + template + bool get_key_or_arr(const std::string & key, std::array & result, uint32_t n, bool required = true); + + template + bool get_key_or_arr(enum llm_kv kid, T & result, uint32_t n, bool required = true); + + std::string get_arch_name() const; + + enum llm_arch get_arch() const; + + const llama_tensor_weight * get_weight(const char * name) const; + + const llama_tensor_weight & require_weight(const char * name) const; + + struct ggml_tensor * get_tensor_meta(const char * name) const; + + struct ggml_tensor * require_tensor_meta(const std::string & name) const; + + const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const; + + struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags = 0); + + struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required = true); + + void done_getting_tensors() const; + + void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr); + + void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const; + + // for backwards compatibility, does not support ggml-backend + void load_data_for(struct ggml_tensor * cur) const; + + // Returns false if cancelled by progress_callback + bool load_all_data( + struct ggml_context * ctx, + llama_buf_map & bufs, + llama_mlocks * lmlocks, + llama_progress_callback progress_callback, + void * progress_callback_user_data); +}; diff --git a/src/llama-model.cpp b/src/llama-model.cpp new file mode 100644 index 000000000..ace0ba262 --- /dev/null +++ b/src/llama-model.cpp @@ -0,0 +1,2164 @@ +#include "llama-model.h" + +#include "llama-impl.h" +#include "llama-model-loader.h" + +#include "unicode.h" // TODO: remove + +#include +#include +#include +#include +#include + +static const size_t kiB = 1024; +static const size_t MiB = 1024*kiB; +static const size_t GiB = 1024*MiB; + +const char * llm_type_name(llm_type type) { + switch (type) { + case MODEL_14M: return "14M"; + case MODEL_17M: return "17M"; + case MODEL_22M: return "22M"; + case MODEL_33M: return "33M"; + case MODEL_60M: return "60M"; + case MODEL_70M: return "70M"; + case MODEL_80M: return "80M"; + case MODEL_109M: return "109M"; + case MODEL_137M: return "137M"; + case MODEL_160M: return "160M"; + case MODEL_220M: return "220M"; + case MODEL_250M: return "250M"; + case MODEL_270M: return "270M"; + case MODEL_335M: return "335M"; + case MODEL_410M: return "410M"; + case MODEL_450M: return "450M"; + case MODEL_770M: return "770M"; + case MODEL_780M: return "780M"; + case MODEL_0_5B: return "0.5B"; + case MODEL_1B: return "1B"; + case MODEL_1_3B: return "1.3B"; + case MODEL_1_4B: return "1.4B"; + case MODEL_1_5B: return "1.5B"; + case MODEL_1_6B: return "1.6B"; + case MODEL_2B: return "2B"; + case MODEL_2_8B: return "2.8B"; + case MODEL_3B: return "3B"; + case MODEL_4B: return "4B"; + case MODEL_6B: return "6B"; + case MODEL_6_9B: return "6.9B"; + case MODEL_7B: return "7B"; + case MODEL_8B: return "8B"; + case MODEL_9B: return "9B"; + case MODEL_11B: return "11B"; + case MODEL_12B: return "12B"; + case MODEL_13B: return "13B"; + case MODEL_14B: return "14B"; + case MODEL_15B: return "15B"; + case MODEL_16B: return "16B"; + case MODEL_20B: return "20B"; + case MODEL_30B: return "30B"; + case MODEL_32B: return "32B"; + case MODEL_34B: return "34B"; + case MODEL_35B: return "35B"; + case MODEL_40B: return "40B"; + case MODEL_65B: return "65B"; + case MODEL_70B: return "70B"; + case MODEL_236B: return "236B"; + case MODEL_314B: return "314B"; + case MODEL_SMALL: return "0.1B"; + case MODEL_MEDIUM: return "0.4B"; + case MODEL_LARGE: return "0.8B"; + case MODEL_XL: return "1.5B"; + case MODEL_A1_7B: return "A1.7B"; + case MODEL_A2_7B: return "A2.7B"; + case MODEL_8x7B: return "8x7B"; + case MODEL_8x22B: return "8x22B"; + case MODEL_16x12B: return "16x12B"; + case MODEL_10B_128x3_66B: return "10B+128x3.66B"; + case MODEL_57B_A14B: return "57B.A14B"; + case MODEL_27B: return "27B"; + default: return "?B"; + } +} + +static std::string llama_model_ftype_name(llama_ftype ftype) { + if (ftype & LLAMA_FTYPE_GUESSED) { + return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)"; + } + + switch (ftype) { + case LLAMA_FTYPE_ALL_F32: return "all F32"; + case LLAMA_FTYPE_MOSTLY_F16: return "F16"; + case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; + case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; + case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; + case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; + case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; + case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; + case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; + + default: return "unknown, may not work"; + } +} + +std::string llama_model_arch_name (const llama_model & model) { + return llm_arch_name(model.arch); +} + +std::string llama_model_type_name (const llama_model & model) { + return llm_type_name(model.type); +} + +std::string llama_model_ftype_name(const llama_model & model) { + return llama_model_ftype_name(model.ftype); +} + +template +static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) { + ggml_init_params params = { + /*.mem_size =*/ ggml_tensor_overhead()*8, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context_ptr ctx { ggml_init(params) }; + if (!ctx) { + throw std::runtime_error(format("failed to create ggml context")); + } + + ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) }; + ggml_tensor * op_tensor = fn(ctx.get()); + for (int i = 0; i < GGML_MAX_SRC; i++) { + if (op_tensor->src[i] != nullptr) { + assert(op_tensor->src[i]->buffer == nullptr); + op_tensor->src[i]->buffer = buf.get(); + } + } + + bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor); + + return op_supported; +} + +template +static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) { + for (const auto & cur : buft_list) { + ggml_backend_dev_t cur_dev = cur.first; + ggml_backend_buffer_type_t cur_buft = cur.second; + if (buft_supported(cur_buft, cur_dev, fn)) { + return cur_buft; + } + } + + throw std::runtime_error(format("no suitable buffer type found")); +} + +ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il) { + return select_buft( + *model.dev_layer.at(il).buft_list, + [&](ggml_context * ctx) { + ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); + ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); + return ggml_add(ctx, cur, layer_dir); + }); +} + +struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name) { + auto it = std::find_if(model.tensors_by_name.begin(), model.tensors_by_name.end(), + [name](const std::pair & it) { + return it.first == name; + }); + if (it == model.tensors_by_name.end()) { + return nullptr; + } + + return it->second; +} + +size_t llama_model_max_nodes(const llama_model & model) { + return std::max(8192, model.tensors_by_name.size()*5); +} + +static const std::map LLAMA_ROPE_SCALING_TYPES = { + { LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, + { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, + { LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" }, + { LLAMA_ROPE_SCALING_TYPE_LONGROPE, "longrope" }, +}; + +static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) { + for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) { + if (kv.second == name) { + return (llama_rope_scaling_type) kv.first; + } + } + + return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED; +} + +// NOTE: avoid ever using this except for building the token_to_piece caches +static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) { + std::string piece; + piece.resize(piece.capacity()); // using string internal cache + const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); + if (n_chars < 0) { + piece.resize(-n_chars); + int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); + GGML_ASSERT(check == -n_chars); + } + else { + piece.resize(n_chars); + } + + return piece; +} + +void llm_load_stats(llama_model_loader & ml, llama_model & model) { + model.n_elements = ml.n_elements; + model.n_bytes = ml.n_bytes; +} + +void llm_load_arch(llama_model_loader & ml, llama_model & model) { + model.arch = ml.get_arch(); + if (model.arch == LLM_ARCH_UNKNOWN) { + throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'"); + } +} + +void llm_load_hparams(llama_model_loader & ml, llama_model & model) { + auto & hparams = model.hparams; + const gguf_context * ctx = ml.meta.get(); + + // get metadata as string + for (int i = 0; i < gguf_get_n_kv(ctx); i++) { + enum gguf_type type = gguf_get_kv_type(ctx, i); + if (type == GGUF_TYPE_ARRAY) { + continue; + } + const char * name = gguf_get_key(ctx, i); + const std::string value = gguf_kv_to_str(ctx, i); + model.gguf_kv.emplace(name, value); + } + + // get general kv + ml.get_key(LLM_KV_GENERAL_NAME, model.name, false); + + // get hparams kv + ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false); + + // everything past this point is not vocab-related + if (hparams.vocab_only) { + return; + } + + ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train); + ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd); + ml.get_key(LLM_KV_BLOCK_COUNT, hparams.n_layer); + ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); + ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); + + if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) { + ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features); + + ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd); + ml.get_key(LLM_KV_POSNET_BLOCK_COUNT, hparams.posnet.n_layer); + + ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd); + ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT, hparams.convnext.n_layer); + } + + GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS); + GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert); + if (hparams.n_expert > 0) { + GGML_ASSERT(hparams.n_expert_used > 0); + } else { + GGML_ASSERT(hparams.n_expert_used == 0); + } + + // zero-out the array hparams + std::fill(hparams.n_head_arr.begin(), hparams.n_head_arr.end(), 0); + std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0); + std::fill(hparams.n_ff_arr.begin(), hparams.n_ff_arr.end(), 0); + + ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff_arr, hparams.n_layer, false); + ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false); + + // n_head_kv is optional, default to n_head + hparams.n_head_kv_arr = hparams.n_head_arr; + + ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false); + + bool rope_finetuned = false; + ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false); + hparams.rope_finetuned = rope_finetuned; + + hparams.n_ctx_orig_yarn = hparams.n_ctx_train; + ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false); + + // rope_freq_base (optional) + hparams.rope_freq_base_train = 10000.0f; + ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false); + + std::string rope_scaling("linear"); + ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false); + hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling); + GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED); + + // rope_freq_scale (inverse of the kv) is optional + float ropescale = 0.0f; + if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) { + // try the old key name + ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false); + } + hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale; + + ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false); + + // non-transformer models do not have attention heads + if (hparams.n_head() > 0) { + // gpt-neox n_rot = rotary_pct * (n_embd / n_head) + // gpt-j n_rot = rotary_dim + + hparams.n_embd_head_k = hparams.n_embd / hparams.n_head(); + ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false); + + hparams.n_embd_head_v = hparams.n_embd / hparams.n_head(); + ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false); + + // sanity check for n_rot (optional) + hparams.n_rot = hparams.n_embd_head_k; + + ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false); + + if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) { + if (hparams.n_rot != hparams.n_embd_head_k) { + throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k)); + } + } + } else { + hparams.n_rot = 0; + hparams.n_embd_head_k = 0; + hparams.n_embd_head_v = 0; + } + + using e_model = llm_type; // TMP + + // arch-specific KVs + switch (model.arch) { + case LLM_ARCH_LLAMA: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + if (hparams.n_expert == 8) { + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_8x7B; break; + case 56: model.type = e_model::MODEL_8x22B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } else { + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B + case 22: model.type = e_model::MODEL_1B; break; + case 26: model.type = e_model::MODEL_3B; break; + case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B + // granite uses a vocab with len 49152 + case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break; + case 36: model.type = e_model::MODEL_8B; break; // granite + case 40: model.type = e_model::MODEL_13B; break; + case 48: model.type = e_model::MODEL_34B; break; + case 60: model.type = e_model::MODEL_30B; break; + case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } + } break; + case LLM_ARCH_DECI: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MINICPM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + + switch (hparams.n_layer) { + case 52: model.type = e_model::MODEL_1B; break; + case 40: model.type = e_model::MODEL_2B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MINICPM3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + + switch (hparams.n_layer) { + case 62: model.type = e_model::MODEL_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GROK: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 64: model.type = e_model::MODEL_314B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_FALCON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 60: model.type = e_model::MODEL_40B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_BAICHUAN: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + if (model.type == e_model::MODEL_13B) { + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } + } break; + case LLM_ARCH_STARCODER: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 36: model.type = e_model::MODEL_3B; break; + case 42: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_15B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_REFACT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_1B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } break; + case LLM_ARCH_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + + switch (hparams.n_layer) { + case 3: + model.type = e_model::MODEL_17M; break; // bge-micro + case 6: + model.type = e_model::MODEL_22M; break; // MiniLM-L6 + case 12: + switch (hparams.n_embd) { + case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small + case 768: model.type = e_model::MODEL_109M; break; // bge-base + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + model.type = e_model::MODEL_335M; break; // bge-large + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_JINA_BERT_V2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); + hparams.f_max_alibi_bias = 8.0f; + + switch (hparams.n_layer) { + case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small + case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_NOMIC_BERT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); + ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); + + if (hparams.n_layer == 12 && hparams.n_embd == 768) { + model.type = e_model::MODEL_137M; + } + } break; + case LLM_ARCH_BLOOM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 30: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + case 4096: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // TODO: become GGUF KV parameter + hparams.f_max_alibi_bias = 8.0f; + } break; + case LLM_ARCH_MPT: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false); + ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_30B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_STABLELM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN2VL: + { + ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, hparams.rope_sections, 4, true); + } + // fall through + case LLM_ARCH_QWEN2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break; + case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 36: model.type = e_model::MODEL_3B; break; + case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break; + case 48: model.type = e_model::MODEL_14B; break; + case 64: model.type = e_model::MODEL_32B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_QWEN2MOE: + { + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); + ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_A2_7B; break; + case 28: model.type = e_model::MODEL_57B_A14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_PHI2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_PHI3: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + + // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931 + if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) { + // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct + hparams.n_swa = 2047; + } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) { + // default value for Phi-3-mini-128k-instruct + hparams.n_swa = 262144; + } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) { + // default value for Phi-3-medium-128k-instruct + hparams.n_swa = 131072; + } + bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + if (!found_swa && hparams.n_swa == 0) { + throw std::runtime_error("invalid value for sliding_window"); + } + } break; + case LLM_ARCH_PLAMO: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GPT2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 12: model.type = e_model::MODEL_SMALL; break; + case 24: model.type = e_model::MODEL_MEDIUM; break; + case 36: model.type = e_model::MODEL_LARGE; break; + case 48: model.type = e_model::MODEL_XL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CODESHELL: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 42: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_ORION: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_INTERNLM2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GEMMA: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 18: model.type = e_model::MODEL_2B; break; + case 28: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GEMMA2: + { + hparams.n_swa = 4096; // default value of gemma 2 + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false); + ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false); + hparams.attn_soft_cap = true; + + switch (hparams.n_layer) { + case 26: model.type = e_model::MODEL_2B; break; + case 42: model.type = e_model::MODEL_9B; break; + case 46: model.type = e_model::MODEL_27B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_STARCODER2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 30: model.type = e_model::MODEL_3B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_15B; break; + case 52: model.type = e_model::MODEL_20B; break; // granite + case 88: model.type = e_model::MODEL_34B; break; // granite + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_MAMBA: + { + ml.get_key(LLM_KV_SSM_CONV_KERNEL, hparams.ssm_d_conv); + ml.get_key(LLM_KV_SSM_INNER_SIZE, hparams.ssm_d_inner); + ml.get_key(LLM_KV_SSM_STATE_SIZE, hparams.ssm_d_state); + ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank); + ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false); + + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 24: + switch (hparams.n_embd) { + case 768: model.type = e_model::MODEL_SMALL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 48: + switch (hparams.n_embd) { + case 1024: model.type = e_model::MODEL_MEDIUM; break; + case 1536: model.type = e_model::MODEL_LARGE; break; + case 2048: model.type = e_model::MODEL_XL; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 64: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_XVERSE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + case 80: model.type = e_model::MODEL_65B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_COMMAND_R: + { + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_35B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DBRX: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv); + + switch (hparams.n_layer) { + case 40: model.type = e_model::MODEL_16x12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMO: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false); + + switch (hparams.n_layer) { + case 22: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 80: model.type = e_model::MODEL_70B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMO2: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_1B; break; + case 32: model.type = e_model::MODEL_7B; break; + case 40: model.type = e_model::MODEL_13B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OLMOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_A1_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_OPENELM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 16: model.type = e_model::MODEL_270M; break; + case 20: model.type = e_model::MODEL_450M; break; + case 28: model.type = e_model::MODEL_1B; break; + case 36: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GPTNEOX: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res); + switch (hparams.n_layer) { + case 6: + switch (hparams.n_ff()) { + case 512: model.type = e_model::MODEL_14M; break; + case 2048: model.type = e_model::MODEL_70M; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 12: + switch (hparams.n_ff()) { + case 3072: model.type = e_model::MODEL_160M; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 16: + switch (hparams.n_ff()) { + case 8192: model.type = e_model::MODEL_1B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + switch (hparams.n_ff()) { + case 4096: model.type = e_model::MODEL_410M; break; + case 8192: model.type = e_model::MODEL_1_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 32: + switch (hparams.n_ff()) { + case 10240: model.type = e_model::MODEL_2_8B; break; + case 16384: model.type = e_model::MODEL_6_9B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 36: + switch (hparams.n_ff()) { + case 20480: model.type = e_model::MODEL_12B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 44: + switch (hparams.n_ff()) { + case 24576: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_ARCTIC: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + if (hparams.n_expert == 128) { + switch (hparams.n_layer) { + case 35: model.type = e_model::MODEL_10B_128x3_66B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } else { + model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DEEPSEEK: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + + switch (hparams.n_layer) { + case 28: model.type = e_model::MODEL_20B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_DEEPSEEK2: + { + bool is_lite = (hparams.n_layer == 27); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + if (!is_lite) { + ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); + } + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); + + switch (hparams.n_layer) { + case 27: model.type = e_model::MODEL_16B; break; + case 60: model.type = e_model::MODEL_236B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CHATGLM: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + switch (hparams.n_layer) { + case 28: model.type = e_model::MODEL_6B; break; + case 40: model.type = e_model::MODEL_9B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_BITNET: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 26: model.type = e_model::MODEL_3B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_T5: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); + + uint32_t dec_start_token_id; + if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) { + hparams.dec_start_token_id = dec_start_token_id; + } + + switch (hparams.n_layer) { + case 6: model.type = e_model::MODEL_60M; break; // t5-small + case 8: model.type = e_model::MODEL_80M; break; // flan-t5-small + case 12: + switch (hparams.n_ff()) { + case 3072: model.type = e_model::MODEL_220M; break; // t5-base + case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 24: + switch (hparams.n_ff()) { + case 4096: model.type = e_model::MODEL_770M; break; // t5-large + case 2816: model.type = e_model::MODEL_780M; break; // flan-t5-large + case 16384: model.type = e_model::MODEL_3B; break; // t5-3b + case 5120: model.type = e_model::MODEL_3B; break; // flan-t5-xl + case 65536: model.type = e_model::MODEL_11B; break; // t5-11b + case 10240: model.type = e_model::MODEL_11B; break; // flan-t5-xxl + default: model.type = e_model::MODEL_UNKNOWN; + } break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_T5ENCODER: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); + model.type = e_model::MODEL_UNKNOWN; + } break; + case LLM_ARCH_JAIS: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1_3B; break; + case 40: model.type = e_model::MODEL_13B; break; + /* TODO: add variants */ + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_NEMOTRON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_4B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_EXAONE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_8B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_RWKV6: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); + ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); + ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); + ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false); + + switch (hparams.n_layer) { + case 24: model.type = e_model::MODEL_1_6B; break; + case 32: + switch (hparams.n_embd) { + case 2560: model.type = e_model::MODEL_3B; break; + case 4096: model.type = e_model::MODEL_7B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } break; + case 61: model.type = e_model::MODEL_14B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_3B; break; + case 40: model.type = e_model::MODEL_3B; break; + // Add additional layer/vocab/etc checks here for other model sizes + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_CHAMELEON: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + hparams.f_norm_eps = 1e-5; // eps for qk-norm, torch default + ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_7B; break; + case 48: model.type = e_model::MODEL_34B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; + case LLM_ARCH_WAVTOKENIZER_DEC: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS, hparams.f_norm_group_eps); + ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups); + ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); + } break; + default: throw std::runtime_error("unsupported model architecture"); + } + + model.ftype = ml.ftype; + + if (hparams.f_max_alibi_bias > 0.0f) { + hparams.use_alibi = true; + } + + hparams.rope_type = llama_rope_type(&model); +} + +void llm_load_vocab(llama_model_loader & ml, llama_model & model) { + auto & vocab = model.vocab; + + struct gguf_context * ctx = ml.meta.get(); + + const auto kv = LLM_KV(model.arch); + + // determine vocab type + { + std::string tokenizer_model; + std::string tokenizer_pre; + + ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model); + ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false); + + if (tokenizer_model == "no_vocab" || tokenizer_model == "none") { + vocab.type = LLAMA_VOCAB_TYPE_NONE; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + vocab.linefeed_id = LLAMA_TOKEN_NULL; + + // read vocab size from metadata + if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) { + vocab.n_vocab = 0; + LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab); + } + return; + } + + if (tokenizer_model == "llama") { + vocab.type = LLAMA_VOCAB_TYPE_SPM; + + // default special tokens + vocab.special_bos_id = 1; + vocab.special_eos_id = 2; + vocab.special_unk_id = 0; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + } else if (tokenizer_model == "bert") { + vocab.type = LLAMA_VOCAB_TYPE_WPM; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = 100; + vocab.special_sep_id = 102; + vocab.special_pad_id = 0; + vocab.special_cls_id = 101; + vocab.special_mask_id = 103; + } else if (tokenizer_model == "gpt2") { + vocab.type = LLAMA_VOCAB_TYPE_BPE; + + // read bpe merges and populate bpe ranks + const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str()); + if (merges_keyidx == -1) { + throw std::runtime_error("cannot find tokenizer merges in model file\n"); + } + + const int n_merges = gguf_get_arr_n(ctx, merges_keyidx); + for (int i = 0; i < n_merges; i++) { + const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i); + GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0); + + std::string first; + std::string second; + + const size_t pos = word.find(' ', 1); + + if (pos != std::string::npos) { + first = word.substr(0, pos); + second = word.substr(pos + 1); + } + + vocab.bpe_ranks.emplace(std::make_pair(first, second), i); + } + + // default special tokens + vocab.special_bos_id = 11; + vocab.special_eos_id = 11; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + } else if (tokenizer_model == "t5") { + vocab.type = LLAMA_VOCAB_TYPE_UGM; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = 1; + vocab.special_unk_id = 2; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = 0; + vocab.special_cls_id = LLAMA_TOKEN_NULL; + vocab.special_mask_id = LLAMA_TOKEN_NULL; + + const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); + if (precompiled_charsmap_keyidx != -1) { + size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx); + const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx); + vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap); +#ifdef IS_BIG_ENDIAN + // correct endiannes of data in precompiled_charsmap binary blob + uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0]; + *xcda_blob_size = __builtin_bswap32(*xcda_blob_size); + assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap); + size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t); + uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)]; + for (size_t i = 0; i < xcda_array_size; ++i) { + xcda_array[i] = __builtin_bswap32(xcda_array[i]); + } +#endif + } + } else if (tokenizer_model == "rwkv") { + vocab.type = LLAMA_VOCAB_TYPE_RWKV; + + // default special tokens + vocab.special_bos_id = LLAMA_TOKEN_NULL; + vocab.special_eos_id = LLAMA_TOKEN_NULL; + vocab.special_unk_id = LLAMA_TOKEN_NULL; + vocab.special_sep_id = LLAMA_TOKEN_NULL; + vocab.special_pad_id = LLAMA_TOKEN_NULL; + } else { + throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str())); + } + + // for now, only BPE models have pre-tokenizers + if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; + if (tokenizer_pre.empty()) { + LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__); + LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__); + LLAMA_LOG_WARN("%s: ************************************ \n", __func__); + LLAMA_LOG_WARN("%s: \n", __func__); + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if (tokenizer_pre == "default") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } else if ( + tokenizer_pre == "llama3" || + tokenizer_pre == "llama-v3" || + tokenizer_pre == "llama-bpe"|| + tokenizer_pre == "falcon3") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; + vocab.tokenizer_ignore_merges = true; + vocab.tokenizer_add_bos = true; + } else if ( + tokenizer_pre == "deepseek-llm") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "deepseek-coder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "falcon") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON; + } else if ( + tokenizer_pre == "mpt") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT; + } else if ( + tokenizer_pre == "starcoder") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER; + } else if ( + tokenizer_pre == "gpt-2" || + tokenizer_pre == "phi-2" || + tokenizer_pre == "jina-es" || + tokenizer_pre == "jina-de" || + tokenizer_pre == "gigachat" || + tokenizer_pre == "jina-v1-en" || + tokenizer_pre == "jina-v2-es" || + tokenizer_pre == "jina-v2-de" || + tokenizer_pre == "jina-v2-code" || + tokenizer_pre == "roberta-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; + } else if ( + tokenizer_pre == "refact") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT; + } else if ( + tokenizer_pre == "command-r") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "qwen2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "stablelm2") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2; + } else if ( + tokenizer_pre == "olmo") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; + } else if ( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; + } else if ( + tokenizer_pre == "smaug-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG; + } else if ( + tokenizer_pre == "poro-chat") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "chatglm-bpe") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4; + vocab.special_bos_id = LLAMA_TOKEN_NULL; + } else if ( + tokenizer_pre == "viking") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "jais") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; + } else if ( + tokenizer_pre == "tekken") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_ignore_merges = true; + vocab.tokenizer_add_bos = true; + } else if ( + tokenizer_pre == "smollm") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "codeshell") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL; + } else if ( + tokenizer_pre == "bloom") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM; + } else if ( + tokenizer_pre == "gpt3-finnish") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH; + } else if ( + tokenizer_pre == "exaone") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE; + } else if ( + tokenizer_pre == "chameleon") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_clean_spaces = false; + } else if ( + tokenizer_pre == "minerva-7b") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA; + } else if ( + tokenizer_pre == "megrez") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; + } else { + throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); + } + } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = true; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_add_eos = false; + } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = true; + vocab.tokenizer_add_bos = true; + vocab.tokenizer_add_eos = false; + } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_bos = false; + vocab.tokenizer_add_eos = true; + } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + vocab.tokenizer_add_space_prefix = false; + vocab.tokenizer_clean_spaces = false; + vocab.tokenizer_add_bos = false; + vocab.tokenizer_add_eos = false; + } else { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; + } + + ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false); + ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false); + } + + const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str()); + if (token_idx == -1) { + throw std::runtime_error("cannot find tokenizer vocab in model file\n"); + } + + const float * scores = nullptr; + const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str()); + if (score_idx != -1) { + scores = (const float * ) gguf_get_arr_data(ctx, score_idx); + } + + const int * toktypes = nullptr; + const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str()); + if (toktype_idx != -1) { + toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx); + } + + const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx); + + vocab.n_vocab = n_vocab; + vocab.id_to_token.resize(n_vocab); + + for (uint32_t i = 0; i < n_vocab; i++) { + std::string word = gguf_get_arr_str(ctx, token_idx, i); + if (word.empty()) { + LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i); + word = "[EMPTY_" + std::to_string(i) + "]"; + } + + vocab.token_to_id[word] = i; + vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size()); + + auto & token_data = vocab.id_to_token[i]; + token_data.text = std::move(word); + token_data.score = scores ? scores[i] : 0.0f; + token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; + + if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file + switch(toktypes[i]) { + case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break; + case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break; + case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break; + case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break; + case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break; + case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break; + case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; + default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; + } + } + } + GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size()); + + vocab.init_tokenizer(); + + // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n' + if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { + try { + vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n'); + } catch (const std::exception & e) { + LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what()); + vocab.linefeed_id = vocab.special_pad_id; + } + } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { + vocab.linefeed_id = vocab.special_pad_id; + } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { + const std::vector ids = llama_tokenize_internal(vocab, "\n", false); + GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); + vocab.linefeed_id = ids[0]; + } else { + const std::vector ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A + + //GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); + if (ids.empty()) { + LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__); + vocab.linefeed_id = vocab.special_pad_id; + } else { + vocab.linefeed_id = ids[0]; + } + } + + // special tokens + { + const std::vector> special_token_types = { + { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id }, + { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id }, + { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id }, + { LLM_KV_TOKENIZER_EOM_ID, vocab.special_eom_id }, + { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id }, + { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id }, + { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id }, + { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id }, + { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id }, + { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id }, + { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id }, + { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id }, + { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id }, + { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id }, + { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id }, + + // deprecated + { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id }, + { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id }, + { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id }, + }; + + for (const auto & it : special_token_types) { + const std::string & key = kv(std::get<0>(it)); + int32_t & id = std::get<1>(it); + + uint32_t new_id; + if (!ml.get_key(std::get<0>(it), new_id, false)) { + continue; + } + if (new_id >= vocab.id_to_token.size()) { + LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n", + __func__, key.c_str(), new_id, id); + } else { + id = new_id; + } + } + + // Handle add_bos_token and add_eos_token + { + bool temp = true; + + if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) { + vocab.tokenizer_add_bos = temp; + } + if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { + vocab.tokenizer_add_eos = temp; + } + } + + // auto-detect special tokens by text + // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_... + // for now, we apply this workaround to find the tokens based on their text + + for (const auto & t : vocab.token_to_id) { + // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc. + if (vocab.special_eot_id == LLAMA_TOKEN_NULL) { + if (false + || t.first == "<|eot_id|>" + || t.first == "<|im_end|>" + || t.first == "<|end|>" + || t.first == "" + || t.first == "<|endoftext|>" + || t.first == "" + || t.first == "<|end▁of▁sentence|>" // DeepSeek + ) { + vocab.special_eot_id = t.second; + if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { + LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", + __func__, t.second, t.first.c_str()); + vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + } + } + } + + // find EOM token: "<|eom_id|>" + if (vocab.special_eom_id == LLAMA_TOKEN_NULL) { + if (false + || t.first == "<|eom_id|>" + ) { + vocab.special_eom_id = t.second; + if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { + LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", + __func__, t.second, t.first.c_str()); + vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + } + } + } + + // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
+            if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_prefix|>"  // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁begin|>" // DeepSeek
+                        || t.first == "
"
+                        ) {
+                    vocab.special_fim_pre_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
+            if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_suffix|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁hole|>" // DeepSeek
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_suf_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
+            if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_middle|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁end|>"  // DeepSeek
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_mid_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
+            if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_pad|>" // Qwen
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_pad_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
+            if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_repo|>"  // Qwen
+                        || t.first == "<|repo_name|>"
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    vocab.special_fim_rep_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SEP token: "<|file_sep|>"
+            if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|file_sep|>" // Qwen
+                        ) {
+                    vocab.special_fim_sep_id = t.second;
+                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+        }
+
+        // maintain a list of tokens that cause end-of-generation
+        // this is currently determined based on the token text, which is obviously not ideal
+        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
+        vocab.special_eog_ids.clear();
+
+        if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
+        }
+
+        if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
+        }
+
+        if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
+        }
+
+        for (const auto & t : vocab.token_to_id) {
+            if (false
+                    || t.first == "<|eot_id|>"
+                    || t.first == "<|im_end|>"
+                    || t.first == "<|end|>"
+                    || t.first == ""
+                    || t.first == "<|endoftext|>"
+                    || t.first == "<|eom_id|>"
+                    || t.first == ""
+               ) {
+                vocab.special_eog_ids.insert(t.second);
+                if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                            __func__, t.second, t.first.c_str());
+                    vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                }
+            } else {
+                // token is control, but not marked as EOG -> print a debug log
+                if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
+                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
+                            __func__, t.second, t.first.c_str());
+                }
+            }
+        }
+
+        // sanity checks
+        if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eos_id);
+            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eot_id);
+            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
+            vocab.special_eog_ids.insert(vocab.special_eom_id);
+            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+    }
+
+    // build special tokens cache
+    {
+        for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
+            if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
+                vocab.cache_special_tokens.push_back(id);
+            }
+        }
+
+        std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
+            [&] (const llama_vocab::id a, const llama_vocab::id b) {
+                return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
+            }
+        );
+
+        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
+    }
+
+    // build token to piece cache
+    {
+        size_t size_cache = 0;
+
+        std::vector cache_token_to_piece(n_vocab);
+
+        for (uint32_t id = 0; id < n_vocab; ++id) {
+            cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
+
+            size_cache += cache_token_to_piece[id].size();
+        }
+
+        std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
+
+        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
+    }
+
+    // Handle per token attributes
+    //NOTE: Each model customizes per token attributes.
+    //NOTE: Per token attributes are missing from the GGUF file.
+    //TODO: Extract attributes from GGUF file.
+    {
+        auto _contains_any = [] (const std::string &str, const std::vector &substrs) -> bool {
+            for (auto substr : substrs) {
+                if (str.find(substr) < std::string::npos) {
+                    return true;
+                }
+            }
+            return false;
+        };
+
+        auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
+            uint32_t current = vocab.id_to_token.at(id).attr;
+            current = value ? (current | attr) : (current & ~attr);
+            vocab.id_to_token[id].attr = (llama_token_attr) current;
+        };
+
+        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
+            _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
+        };
+
+        std::string model_name;
+        std::string tokenizer_pre;
+
+        ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
+        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
+
+        // model name to lowercase
+        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
+            [] (const std::string::value_type x) {
+                return std::tolower(x);
+            }
+        );
+
+        // set attributes by model/tokenizer name
+        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
+            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
+        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
+            for (auto id : vocab.cache_special_tokens) {
+                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (auto token : {""}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (auto token : {"", "", "<|endoftext|>"}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
+            }
+        }
+    }
+}
+
+void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
+    const auto & hparams = model.hparams;
+    const auto & vocab   = model.vocab;
+
+    const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
+
+    auto print_f = [](const std::function & f, uint32_t n) {
+        bool is_var = false;
+
+        std::vector v;
+        for (uint32_t i = 0; i < n; ++i) {
+            v.push_back(f(i));
+            if (v[i] != v[0]) {
+                is_var = true;
+            }
+        }
+
+        std::stringstream ss;
+
+        if (is_var) {
+            ss << "[";
+            for (uint32_t i = 0; i < n; ++i) {
+                ss << v[i];
+                if (i < n - 1) {
+                    ss << ", ";
+                }
+            }
+            ss << "]";
+        } else {
+            ss << v[0];
+        }
+
+        return ss.str();
+    };
+
+    // hparams
+    LLAMA_LOG_INFO("%s: format           = %s\n",     __func__, llama_file_version_name(ml.fver));
+    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, llm_arch_name(model.arch));
+    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, llama_model_vocab_type_name(vocab.type));
+    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, hparams.n_vocab);
+    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (int) vocab.bpe_ranks.size());
+    LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
+
+    if (!hparams.vocab_only) {
+        LLAMA_LOG_INFO("%s: n_ctx_train      = %u\n",     __func__, hparams.n_ctx_train);
+        LLAMA_LOG_INFO("%s: n_embd           = %u\n",     __func__, hparams.n_embd);
+        LLAMA_LOG_INFO("%s: n_layer          = %u\n",     __func__, hparams.n_layer);
+        LLAMA_LOG_INFO("%s: n_head           = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_head_kv        = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_rot            = %u\n",     __func__, hparams.n_rot);
+        LLAMA_LOG_INFO("%s: n_swa            = %u\n",     __func__, hparams.n_swa);
+        LLAMA_LOG_INFO("%s: n_embd_head_k    = %u\n",     __func__, hparams.n_embd_head_k);
+        LLAMA_LOG_INFO("%s: n_embd_head_v    = %u\n",     __func__, hparams.n_embd_head_v);
+        LLAMA_LOG_INFO("%s: n_gqa            = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_embd_k_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_embd_v_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: f_norm_eps       = %.1e\n",   __func__, hparams.f_norm_eps);
+        LLAMA_LOG_INFO("%s: f_norm_rms_eps   = %.1e\n",   __func__, hparams.f_norm_rms_eps);
+        LLAMA_LOG_INFO("%s: f_clamp_kqv      = %.1e\n",   __func__, hparams.f_clamp_kqv);
+        LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n",   __func__, hparams.f_max_alibi_bias);
+        LLAMA_LOG_INFO("%s: f_logit_scale    = %.1e\n",   __func__, hparams.f_logit_scale);
+        LLAMA_LOG_INFO("%s: n_ff             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
+        LLAMA_LOG_INFO("%s: n_expert         = %u\n",     __func__, hparams.n_expert);
+        LLAMA_LOG_INFO("%s: n_expert_used    = %u\n",     __func__, hparams.n_expert_used);
+        LLAMA_LOG_INFO("%s: causal attn      = %d\n",     __func__, hparams.causal_attn);
+        LLAMA_LOG_INFO("%s: pooling type     = %d\n",     __func__, hparams.pooling_type);
+        LLAMA_LOG_INFO("%s: rope type        = %d\n",     __func__, hparams.rope_type);
+        LLAMA_LOG_INFO("%s: rope scaling     = %s\n",     __func__, rope_scaling_type);
+        LLAMA_LOG_INFO("%s: freq_base_train  = %.1f\n",   __func__, hparams.rope_freq_base_train);
+        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
+        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
+        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
+        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
+        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
+        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
+        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
+        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
+    }
+
+    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, llama_model_type_name(model).c_str());
+    LLAMA_LOG_INFO("%s: model ftype      = %s\n",     __func__, llama_model_ftype_name(model).c_str());
+    if (ml.n_elements >= 1e12) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, ml.n_elements*1e-12);
+    } else if (ml.n_elements >= 1e9) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, ml.n_elements*1e-9);
+    } else if (ml.n_elements >= 1e6) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, ml.n_elements*1e-6);
+    } else {
+        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, ml.n_elements*1e-3);
+    }
+    if (ml.n_bytes < GiB) {
+        LLAMA_LOG_INFO("%s: model size       = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0,        ml.n_bytes*8.0/ml.n_elements);
+    } else {
+        LLAMA_LOG_INFO("%s: model size       = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+    }
+
+    // general kv
+    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
+
+    // special tokens
+    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
+    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
+    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
+    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
+    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
+    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
+    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
+    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
+    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
+
+    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+
+    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
+    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
+    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
+    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
+    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
+    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
+
+    for (const auto & id : vocab.special_eog_ids) {
+        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
+    }
+
+    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
+
+    if (model.arch == LLM_ARCH_DEEPSEEK) {
+        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
+        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
+        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+    }
+
+    if (model.arch == LLM_ARCH_DEEPSEEK2) {
+        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
+        LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
+        LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
+        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
+        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+        LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
+    }
+
+    if (model.arch == LLM_ARCH_QWEN2MOE) {
+        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
+        LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
+    }
+
+    if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
+        LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
+        LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
+        LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
+    }
+}
+
+//
+// interface implementation
+//
+
+struct llama_model_params llama_model_default_params() {
+    struct llama_model_params result = {
+        /*.devices                     =*/ nullptr,
+        /*.n_gpu_layers                =*/ 0,
+        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
+        /*.main_gpu                    =*/ 0,
+        /*.tensor_split                =*/ nullptr,
+        /*.rpc_servers                 =*/ nullptr,
+        /*.progress_callback           =*/ nullptr,
+        /*.progress_callback_user_data =*/ nullptr,
+        /*.kv_overrides                =*/ nullptr,
+        /*.vocab_only                  =*/ false,
+        /*.use_mmap                    =*/ true,
+        /*.use_mlock                   =*/ false,
+        /*.check_tensors               =*/ false,
+    };
+
+#ifdef GGML_USE_METAL
+    // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
+    result.n_gpu_layers = 999;
+#endif
+
+    return result;
+}
+
+void llama_free_model(struct llama_model * model) {
+    delete model;
+}
+
+enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
+    return model->vocab.type;
+}
+
+int32_t llama_n_vocab(const struct llama_model * model) {
+    return model->hparams.n_vocab;
+}
+
+int32_t llama_n_ctx_train(const struct llama_model * model) {
+    return model->hparams.n_ctx_train;
+}
+
+int32_t llama_n_embd(const struct llama_model * model) {
+    return model->hparams.n_embd;
+}
+
+int32_t llama_n_layer(const struct llama_model * model) {
+    return model->hparams.n_layer;
+}
+
+int32_t llama_n_head(const struct llama_model * model) {
+    return model->hparams.n_head();
+}
+
+enum llama_rope_type llama_rope_type(const struct llama_model * model) {
+    switch (model->arch) {
+        // these models do not use RoPE
+        case LLM_ARCH_GPT2:
+        case LLM_ARCH_GPTJ:
+        case LLM_ARCH_MPT:
+        case LLM_ARCH_REFACT:
+        case LLM_ARCH_BLOOM:
+        case LLM_ARCH_MAMBA:
+        case LLM_ARCH_JINA_BERT_V2:
+        case LLM_ARCH_T5:
+        case LLM_ARCH_T5ENCODER:
+        case LLM_ARCH_JAIS:
+        case LLM_ARCH_RWKV6:
+        case LLM_ARCH_WAVTOKENIZER_DEC:
+            return LLAMA_ROPE_TYPE_NONE;
+
+        // use what we call a normal RoPE, operating on pairs of consecutive head values
+        case LLM_ARCH_LLAMA:
+        case LLM_ARCH_DECI:
+        case LLM_ARCH_BAICHUAN:
+        case LLM_ARCH_STARCODER:
+        case LLM_ARCH_PLAMO:
+        case LLM_ARCH_ORION:
+        case LLM_ARCH_INTERNLM2:
+        case LLM_ARCH_MINICPM:
+        case LLM_ARCH_XVERSE:
+        case LLM_ARCH_COMMAND_R:
+        case LLM_ARCH_OLMO:
+        case LLM_ARCH_ARCTIC:
+        case LLM_ARCH_DEEPSEEK:
+        case LLM_ARCH_DEEPSEEK2:
+        case LLM_ARCH_CHATGLM:
+        case LLM_ARCH_GRANITE:
+        case LLM_ARCH_GRANITE_MOE:
+        case LLM_ARCH_CHAMELEON:
+            return LLAMA_ROPE_TYPE_NORM;
+
+        // the pairs of head values are offset by n_rot/2
+        case LLM_ARCH_FALCON:
+        case LLM_ARCH_GROK:
+        case LLM_ARCH_DBRX:
+        case LLM_ARCH_BERT:
+        case LLM_ARCH_NOMIC_BERT:
+        case LLM_ARCH_STABLELM:
+        case LLM_ARCH_BITNET:
+        case LLM_ARCH_QWEN:
+        case LLM_ARCH_QWEN2:
+        case LLM_ARCH_QWEN2MOE:
+        case LLM_ARCH_OLMO2:
+        case LLM_ARCH_OLMOE:
+        case LLM_ARCH_PHI2:
+        case LLM_ARCH_PHI3:
+        case LLM_ARCH_GEMMA:
+        case LLM_ARCH_GEMMA2:
+        case LLM_ARCH_STARCODER2:
+        case LLM_ARCH_OPENELM:
+        case LLM_ARCH_GPTNEOX:
+        case LLM_ARCH_CODESHELL:
+        case LLM_ARCH_NEMOTRON:
+        case LLM_ARCH_EXAONE:
+        case LLM_ARCH_MINICPM3:
+            return LLAMA_ROPE_TYPE_NEOX;
+
+        case LLM_ARCH_QWEN2VL:
+            return LLAMA_ROPE_TYPE_MROPE;
+
+        // all model arches should be listed explicitly here
+        case LLM_ARCH_UNKNOWN:
+            GGML_ABORT("unknown architecture");
+    }
+
+    return LLAMA_ROPE_TYPE_NONE;
+}
+
+float llama_rope_freq_scale_train(const struct llama_model * model) {
+    return model->hparams.rope_freq_scale_train;
+}
+
+int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
+    const auto & it = model->gguf_kv.find(key);
+    if (it == model->gguf_kv.end()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
+int32_t llama_model_meta_count(const struct llama_model * model) {
+    return (int)model->gguf_kv.size();
+}
+
+int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
+    if (i < 0 || i >= (int)model->gguf_kv.size()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    auto it = model->gguf_kv.begin();
+    std::advance(it, i);
+    return snprintf(buf, buf_size, "%s", it->first.c_str());
+}
+
+int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
+    if (i < 0 || i >= (int)model->gguf_kv.size()) {
+        if (buf_size > 0) {
+            buf[0] = '\0';
+        }
+        return -1;
+    }
+    auto it = model->gguf_kv.begin();
+    std::advance(it, i);
+    return snprintf(buf, buf_size, "%s", it->second.c_str());
+}
+
+int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
+    return snprintf(buf, buf_size, "%s %s %s",
+            llama_model_arch_name (*model).c_str(),
+            llama_model_type_name (*model).c_str(),
+            llama_model_ftype_name(*model).c_str());
+}
+
+uint64_t llama_model_size(const struct llama_model * model) {
+    return model->n_bytes;
+}
+
+uint64_t llama_model_n_params(const struct llama_model * model) {
+    return model->n_elements;
+}
+
+bool llama_model_has_encoder(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_T5:        return true;
+        case LLM_ARCH_T5ENCODER: return true;
+        default:                 return false;
+    }
+}
+
+bool llama_model_has_decoder(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_T5ENCODER: return false;
+        default:                 return true;
+    }
+}
+
+llama_token llama_model_decoder_start_token(const struct llama_model * model) {
+    return model->hparams.dec_start_token_id;
+}
+
+bool llama_model_is_recurrent(const struct llama_model * model) {
+    switch (model->arch) {
+        case LLM_ARCH_MAMBA:  return true;
+        case LLM_ARCH_RWKV6:  return true;
+        default:              return false;
+    }
+}
diff --git a/src/llama-model.h b/src/llama-model.h
new file mode 100644
index 000000000..01c780c41
--- /dev/null
+++ b/src/llama-model.h
@@ -0,0 +1,389 @@
+#pragma once
+
+#include "llama.h"
+#include "llama-arch.h"
+#include "llama-hparams.h"
+#include "llama-vocab.h"
+#include "llama-mmap.h"
+
+#include "ggml-cpp.h"
+
+#include 
+
+// available models
+// TODO: this enum does not follow the enum naming convention
+enum llm_type {
+    MODEL_UNKNOWN,
+    MODEL_14M,
+    MODEL_17M,
+    MODEL_22M,
+    MODEL_33M,
+    MODEL_60M,
+    MODEL_70M,
+    MODEL_80M,
+    MODEL_109M,
+    MODEL_137M,
+    MODEL_160M,
+    MODEL_220M,
+    MODEL_250M,
+    MODEL_270M,
+    MODEL_335M,
+    MODEL_410M,
+    MODEL_450M,
+    MODEL_770M,
+    MODEL_780M,
+    MODEL_0_5B,
+    MODEL_1B,
+    MODEL_1_3B,
+    MODEL_1_4B,
+    MODEL_1_5B,
+    MODEL_1_6B,
+    MODEL_2B,
+    MODEL_2_8B,
+    MODEL_3B,
+    MODEL_4B,
+    MODEL_6B,
+    MODEL_6_9B,
+    MODEL_7B,
+    MODEL_8B,
+    MODEL_9B,
+    MODEL_11B,
+    MODEL_12B,
+    MODEL_13B,
+    MODEL_14B,
+    MODEL_15B,
+    MODEL_16B,
+    MODEL_20B,
+    MODEL_30B,
+    MODEL_32B,
+    MODEL_34B,
+    MODEL_35B,
+    MODEL_40B,
+    MODEL_65B,
+    MODEL_70B,
+    MODEL_236B,
+    MODEL_314B,
+    MODEL_SMALL,
+    MODEL_MEDIUM,
+    MODEL_LARGE,
+    MODEL_XL,
+    MODEL_A1_7B,
+    MODEL_A2_7B,
+    MODEL_8x7B,
+    MODEL_8x22B,
+    MODEL_16x12B,
+    MODEL_10B_128x3_66B,
+    MODEL_57B_A14B,
+    MODEL_27B,
+};
+
+struct llama_layer_posnet {
+    // resnet
+    struct ggml_tensor * norm1   = nullptr;
+    struct ggml_tensor * norm1_b = nullptr;
+
+    struct ggml_tensor * conv1   = nullptr;
+    struct ggml_tensor * conv1_b = nullptr;
+
+    struct ggml_tensor * norm2   = nullptr;
+    struct ggml_tensor * norm2_b = nullptr;
+
+    struct ggml_tensor * conv2   = nullptr;
+    struct ggml_tensor * conv2_b = nullptr;
+
+    // attention
+    struct ggml_tensor * attn_norm   = nullptr;
+    struct ggml_tensor * attn_norm_b = nullptr;
+
+    struct ggml_tensor * attn_q   = nullptr;
+    struct ggml_tensor * attn_q_b = nullptr;
+
+    struct ggml_tensor * attn_k   = nullptr;
+    struct ggml_tensor * attn_k_b = nullptr;
+
+    struct ggml_tensor * attn_v   = nullptr;
+    struct ggml_tensor * attn_v_b = nullptr;
+
+    struct ggml_tensor * attn_o   = nullptr;
+    struct ggml_tensor * attn_o_b = nullptr;
+
+    // normalize
+    struct ggml_tensor * norm   = nullptr;
+    struct ggml_tensor * norm_b = nullptr;
+};
+
+struct llama_layer_convnext {
+    struct ggml_tensor * dw   = nullptr;
+    struct ggml_tensor * dw_b = nullptr;
+
+    struct ggml_tensor * norm   = nullptr;
+    struct ggml_tensor * norm_b = nullptr;
+
+    struct ggml_tensor * pw1   = nullptr;
+    struct ggml_tensor * pw1_b = nullptr;
+
+    struct ggml_tensor * pw2   = nullptr;
+    struct ggml_tensor * pw2_b = nullptr;
+
+    struct ggml_tensor * gamma = nullptr;
+};
+
+struct llama_layer {
+    // normalization
+    struct ggml_tensor * attn_norm       = nullptr;
+    struct ggml_tensor * attn_norm_b     = nullptr;
+    struct ggml_tensor * attn_norm_2     = nullptr;
+    struct ggml_tensor * attn_norm_2_b   = nullptr;
+    struct ggml_tensor * attn_q_norm     = nullptr;
+    struct ggml_tensor * attn_q_norm_b   = nullptr;
+    struct ggml_tensor * attn_k_norm     = nullptr;
+    struct ggml_tensor * attn_k_norm_b   = nullptr;
+    struct ggml_tensor * attn_out_norm   = nullptr;
+    struct ggml_tensor * attn_out_norm_b = nullptr;
+    struct ggml_tensor * attn_q_a_norm   = nullptr;
+    struct ggml_tensor * attn_kv_a_norm  = nullptr;
+    struct ggml_tensor * attn_sub_norm   = nullptr;
+    struct ggml_tensor * attn_post_norm  = nullptr;
+    struct ggml_tensor * ffn_sub_norm    = nullptr;
+    struct ggml_tensor * attn_norm_cross = nullptr;
+    struct ggml_tensor * attn_norm_enc   = nullptr;
+
+    // attention
+    struct ggml_tensor * wq        = nullptr;
+    struct ggml_tensor * wk        = nullptr;
+    struct ggml_tensor * wv        = nullptr;
+    struct ggml_tensor * wo        = nullptr;
+    struct ggml_tensor * wqkv      = nullptr;
+    struct ggml_tensor * wq_a      = nullptr;
+    struct ggml_tensor * wq_b      = nullptr;
+    struct ggml_tensor * wkv_a_mqa = nullptr;
+    struct ggml_tensor * wkv_b     = nullptr;
+    struct ggml_tensor * wq_cross  = nullptr;
+    struct ggml_tensor * wk_cross  = nullptr;
+    struct ggml_tensor * wv_cross  = nullptr;
+    struct ggml_tensor * wo_cross  = nullptr;
+    struct ggml_tensor * wq_enc    = nullptr;
+    struct ggml_tensor * wk_enc    = nullptr;
+    struct ggml_tensor * wv_enc    = nullptr;
+    struct ggml_tensor * wo_enc    = nullptr;
+
+    // attention bias
+    struct ggml_tensor * bq   = nullptr;
+    struct ggml_tensor * bk   = nullptr;
+    struct ggml_tensor * bv   = nullptr;
+    struct ggml_tensor * bo   = nullptr;
+    struct ggml_tensor * bqkv = nullptr;
+
+    // relative position bias
+    struct ggml_tensor * attn_rel_b       = nullptr;
+    struct ggml_tensor * attn_rel_b_enc   = nullptr;
+    struct ggml_tensor * attn_rel_b_cross = nullptr;
+
+    // normalization
+    struct ggml_tensor * ffn_norm         = nullptr;
+    struct ggml_tensor * ffn_norm_b       = nullptr;
+    struct ggml_tensor * ffn_post_norm    = nullptr;
+    struct ggml_tensor * layer_out_norm   = nullptr;
+    struct ggml_tensor * layer_out_norm_b = nullptr;
+    struct ggml_tensor * ffn_norm_exps    = nullptr;
+    struct ggml_tensor * ffn_norm_enc     = nullptr;
+
+    // ff
+    struct ggml_tensor * ffn_gate     = nullptr; // w1
+    struct ggml_tensor * ffn_down     = nullptr; // w2
+    struct ggml_tensor * ffn_up       = nullptr; // w3
+    struct ggml_tensor * ffn_gate_enc = nullptr;
+    struct ggml_tensor * ffn_down_enc = nullptr;
+    struct ggml_tensor * ffn_up_enc   = nullptr;
+
+    // ff MoE
+    struct ggml_tensor * ffn_gate_inp  = nullptr;
+    struct ggml_tensor * ffn_gate_exps = nullptr;
+    struct ggml_tensor * ffn_down_exps = nullptr;
+    struct ggml_tensor * ffn_up_exps   = nullptr;
+
+    // ff shared expert (shexp)
+    struct ggml_tensor * ffn_gate_inp_shexp = nullptr;
+    struct ggml_tensor * ffn_gate_shexp     = nullptr;
+    struct ggml_tensor * ffn_down_shexp     = nullptr;
+    struct ggml_tensor * ffn_up_shexp       = nullptr;
+
+    // ff bias
+    struct ggml_tensor * ffn_gate_b = nullptr;
+    struct ggml_tensor * ffn_down_b = nullptr; // b2
+    struct ggml_tensor * ffn_up_b   = nullptr; // b3
+    struct ggml_tensor * ffn_act    = nullptr;
+
+    // mamba proj
+    struct ggml_tensor * ssm_in  = nullptr;
+    struct ggml_tensor * ssm_x   = nullptr;
+    struct ggml_tensor * ssm_dt  = nullptr;
+    struct ggml_tensor * ssm_out = nullptr;
+
+    // mamba
+    struct ggml_tensor * ssm_conv1d = nullptr;
+    struct ggml_tensor * ssm_a      = nullptr;
+    struct ggml_tensor * ssm_d      = nullptr;
+
+    // mamba bias
+    struct ggml_tensor * ssm_conv1d_b = nullptr;
+    struct ggml_tensor * ssm_dt_b     = nullptr;
+
+    // rwkv
+    struct ggml_tensor * time_mix_w1         = nullptr;
+    struct ggml_tensor * time_mix_w2         = nullptr;
+    struct ggml_tensor * time_mix_lerp_x     = nullptr;
+    struct ggml_tensor * time_mix_lerp_w     = nullptr;
+    struct ggml_tensor * time_mix_lerp_k     = nullptr;
+    struct ggml_tensor * time_mix_lerp_v     = nullptr;
+    struct ggml_tensor * time_mix_lerp_r     = nullptr;
+    struct ggml_tensor * time_mix_lerp_g     = nullptr;
+
+    struct ggml_tensor * time_mix_first      = nullptr;
+    struct ggml_tensor * time_mix_decay      = nullptr;
+    struct ggml_tensor * time_mix_decay_w1   = nullptr;
+    struct ggml_tensor * time_mix_decay_w2   = nullptr;
+    struct ggml_tensor * time_mix_key        = nullptr;
+    struct ggml_tensor * time_mix_value      = nullptr;
+    struct ggml_tensor * time_mix_receptance = nullptr;
+    struct ggml_tensor * time_mix_gate       = nullptr;
+
+    struct ggml_tensor * time_mix_ln     = nullptr;
+    struct ggml_tensor * time_mix_ln_b   = nullptr;
+    struct ggml_tensor * time_mix_output = nullptr;
+
+    struct ggml_tensor * channel_mix_lerp_k = nullptr;
+    struct ggml_tensor * channel_mix_lerp_r = nullptr;
+
+    struct ggml_tensor * channel_mix_key        = nullptr;
+    struct ggml_tensor * channel_mix_receptance = nullptr;
+    struct ggml_tensor * channel_mix_value      = nullptr;
+
+    // long rope factors
+    struct ggml_tensor * rope_long  = nullptr;
+    struct ggml_tensor * rope_short = nullptr;
+    struct ggml_tensor * rope_freqs = nullptr;
+
+    // bitnet scale
+    struct ggml_tensor * wq_scale       = nullptr;
+    struct ggml_tensor * wk_scale       = nullptr;
+    struct ggml_tensor * wv_scale       = nullptr;
+    struct ggml_tensor * wo_scale       = nullptr;
+    struct ggml_tensor * ffn_gate_scale = nullptr;
+    struct ggml_tensor * ffn_up_scale   = nullptr;
+    struct ggml_tensor * ffn_down_scale = nullptr;
+
+    struct llama_layer_posnet posnet;
+
+    struct llama_layer_convnext convnext;
+};
+
+struct llama_model {
+    llm_type type = MODEL_UNKNOWN;
+    llm_arch arch = LLM_ARCH_UNKNOWN;
+
+    llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
+
+    std::string name = "n/a";
+
+    llama_hparams hparams = {};
+    llama_vocab   vocab;
+
+    struct ggml_tensor * tok_embd   = nullptr;
+    struct ggml_tensor * type_embd  = nullptr;
+    struct ggml_tensor * pos_embd   = nullptr;
+    struct ggml_tensor * tok_norm   = nullptr;
+    struct ggml_tensor * tok_norm_b = nullptr;
+
+    struct ggml_tensor * output_norm     = nullptr;
+    struct ggml_tensor * output_norm_b   = nullptr;
+    struct ggml_tensor * output          = nullptr;
+    struct ggml_tensor * output_b        = nullptr;
+    struct ggml_tensor * output_norm_enc = nullptr;
+
+    // classifier
+    struct ggml_tensor * cls       = nullptr;
+    struct ggml_tensor * cls_b     = nullptr;
+    struct ggml_tensor * cls_out   = nullptr;
+    struct ggml_tensor * cls_out_b = nullptr;
+
+    struct ggml_tensor * conv1d   = nullptr;
+    struct ggml_tensor * conv1d_b = nullptr;
+
+    std::vector layers;
+
+    // gguf metadata
+    std::unordered_map gguf_kv;
+
+    llama_split_mode split_mode;
+    int main_gpu;
+    int n_gpu_layers;
+
+    std::vector rpc_servers;
+
+    // list of devices used in this model
+    std::vector devices;
+
+
+    // lists of buffer types used for each layer
+    using buft_list_t = std::vector>;
+    buft_list_t cpu_buft_list;
+    std::map gpu_buft_list;
+
+    struct layer_dev {
+        ggml_backend_dev_t dev;
+        buft_list_t * buft_list;
+    };
+
+    layer_dev dev_input = {};
+    layer_dev dev_output = {};
+    std::vector dev_layer;
+
+    // contexts where the model tensors metadata is stored
+    std::vector ctxs;
+
+    // the model memory buffers for the tensor data
+    std::vector bufs;
+
+    // model memory mapped files
+    llama_mmaps mappings;
+
+    // objects representing data potentially being locked in memory
+    llama_mlocks mlock_bufs;
+    llama_mlocks mlock_mmaps;
+
+    // for quantize-stats only
+    std::vector> tensors_by_name;
+
+    int64_t t_load_us  = 0;
+    int64_t t_start_us = 0;
+
+    // total number of parameters in the model
+    uint64_t n_elements = 0;
+
+    // total size of all the tensors in the model in bytes
+    size_t  n_bytes     = 0;
+};
+
+const char * llm_type_name(llm_type type);
+
+std::string llama_model_arch_name (const llama_model & model);
+std::string llama_model_type_name (const llama_model & model);
+std::string llama_model_ftype_name(const llama_model & model);
+
+// used by llama_adapter_cvec
+ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il);
+
+// used by llama_adapter_lora
+struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name);
+
+size_t llama_model_max_nodes(const llama_model & model);
+
+struct llama_model_loader;
+
+// TODO: become llama_model methods
+void llm_load_stats     (llama_model_loader & ml, llama_model & model);
+void llm_load_arch      (llama_model_loader & ml, llama_model & model);
+void llm_load_hparams   (llama_model_loader & ml, llama_model & model);
+void llm_load_vocab     (llama_model_loader & ml, llama_model & model);
+void llm_load_print_meta(llama_model_loader & ml, llama_model & model);
diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
new file mode 100644
index 000000000..42974f8f1
--- /dev/null
+++ b/src/llama-quant.cpp
@@ -0,0 +1,929 @@
+#include "llama-quant.h"
+
+#include "llama-impl.h"
+#include "llama-model.h"
+#include "llama-model-loader.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+// TODO: replace with ggml API call
+#define QK_K 256
+
+static void zeros(std::ofstream & file, size_t n) {
+    char zero = 0;
+    for (size_t i = 0; i < n; ++i) {
+        file.write(&zero, 1);
+    }
+}
+
+struct quantize_state_internal {
+    const llama_model                 & model;
+    const llama_model_quantize_params * params;
+
+    int n_attention_wv = 0;
+    int n_ffn_down     = 0;
+    int n_ffn_gate     = 0;
+    int n_ffn_up       = 0;
+    int i_attention_wv = 0;
+    int i_ffn_down     = 0;
+    int i_ffn_gate     = 0;
+    int i_ffn_up       = 0;
+
+    int n_k_quantized = 0;
+    int n_fallback    = 0;
+
+    bool has_imatrix = false;
+
+    // used to figure out if a model shares tok_embd with the output weight
+    bool has_output = false;
+
+    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
+        : model(model)
+        , params(params)
+        {}
+};
+
+static void llama_tensor_dequantize_internal(
+    struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
+    const size_t nelements, const int nthread
+) {
+    if (output.size() < nelements) {
+        output.resize(nelements);
+    }
+    float * f32_output = (float *) output.data();
+
+    const ggml_type_traits * qtype = ggml_get_type_traits(tensor->type);
+    if (ggml_is_quantized(tensor->type)) {
+        if (qtype->to_float == NULL) {
+            throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
+        }
+    } else if (tensor->type != GGML_TYPE_F16 &&
+               tensor->type != GGML_TYPE_BF16) {
+        throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
+    }
+
+    if (nthread < 2) {
+        if (tensor->type == GGML_TYPE_F16) {
+            ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
+        } else if (tensor->type == GGML_TYPE_BF16) {
+            ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
+        } else if (ggml_is_quantized(tensor->type)) {
+            qtype->to_float(tensor->data, f32_output, nelements);
+        } else {
+            GGML_ABORT("fatal error"); // unreachable
+        }
+        return;
+    }
+
+    size_t block_size;
+    if (tensor->type == GGML_TYPE_F16 ||
+        tensor->type == GGML_TYPE_BF16) {
+        block_size = 1;
+    } else {
+        block_size = (size_t)ggml_blck_size(tensor->type);
+    }
+
+    size_t block_size_bytes = ggml_type_size(tensor->type);
+
+    GGML_ASSERT(nelements % block_size == 0);
+    size_t nblocks = nelements / block_size;
+    size_t blocks_per_thread = nblocks / nthread;
+    size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
+
+    size_t in_buff_offs = 0;
+    size_t out_buff_offs = 0;
+
+    for (int tnum = 0; tnum < nthread; tnum++) {
+        size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
+        size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
+        size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
+
+        auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
+            if (typ == GGML_TYPE_F16) {
+                ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
+            } else if (typ == GGML_TYPE_BF16) {
+                ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
+            } else {
+                qtype->to_float(inbuf, outbuf, nels);
+            }
+        };
+        workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
+        in_buff_offs += thr_block_bytes;
+        out_buff_offs += thr_elems;
+    }
+    for (auto & w : workers) { w.join(); }
+    workers.clear();
+}
+
+static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
+    const std::string name = ggml_get_name(tensor);
+
+    // TODO: avoid hardcoded tensor names - use the TN_* constants
+    const llm_arch arch = qs.model.arch;
+    const auto       tn = LLM_TN(arch);
+
+    auto use_more_bits = [](int i_layer, int n_layers) -> bool {
+        return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
+    };
+    const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
+    auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
+        if (n_expert > 1) {
+            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
+            // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
+            // for getting the current layer as I initially thought, and we need to resort to parsing the
+            // tensor name.
+            if (sscanf(name, "blk.%d.", &i_layer) != 1) {
+                throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
+            }
+            if (i_layer < 0 || i_layer >= n_layer) {
+                throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
+            }
+        }
+        return std::make_pair(i_layer, n_layer);
+    };
+
+    // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
+    // with the quantization of the output tensor
+    if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
+        if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
+            new_type = qs.params->output_tensor_type;
+        } else {
+            int nx = tensor->ne[0];
+            if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
+                new_type = GGML_TYPE_Q8_0;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
+                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S  || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M   ||
+                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+                new_type = GGML_TYPE_Q5_K;
+            }
+            else if (new_type != GGML_TYPE_Q8_0) {
+                new_type = GGML_TYPE_Q6_K;
+            }
+        }
+    } else if (name == "token_embd.weight") {
+        if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
+            new_type = qs.params->token_embedding_type;
+        } else {
+            if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
+                ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+                new_type = GGML_TYPE_Q2_K;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
+                new_type = GGML_TYPE_IQ3_S;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+                new_type = GGML_TYPE_IQ3_S;
+            }
+            else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
+                new_type = GGML_TYPE_Q4_K;
+            }
+        }
+    } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
+               ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M    || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+        if (name.find("attn_v.weight") != std::string::npos) {
+            if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
+            else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
+            ++qs.i_attention_wv;
+        }
+        else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (name.find("ffn_down") != std::string::npos) {
+            if (qs.i_ffn_down < qs.n_ffn_down/8) {
+                new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
+            }
+            ++qs.i_ffn_down;
+        }
+        else if (name.find("attn_output.weight") != std::string::npos) {
+            if (qs.model.hparams.n_expert == 8) {
+                new_type = GGML_TYPE_Q5_K;
+            } else {
+                if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
+            }
+        }
+    } else if (name.find("attn_v.weight") != std::string::npos) {
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+            new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
+                use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
+        if (qs.model.type == MODEL_70B) {
+            // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
+            // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
+            // nearly negligible increase in model size by quantizing this tensor with more bits:
+            if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
+        }
+        if (qs.model.hparams.n_expert == 8) {
+            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+            // TODO: explore better strategies
+            new_type = GGML_TYPE_Q8_0;
+        }
+        ++qs.i_attention_wv;
+    } else if (name.find("attn_k.weight") != std::string::npos) {
+        if (qs.model.hparams.n_expert == 8) {
+            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
+            // TODO: explore better strategies
+            new_type = GGML_TYPE_Q8_0;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = GGML_TYPE_IQ2_S;
+        }
+    } else if (name.find("attn_q.weight") != std::string::npos) {
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
+            new_type = GGML_TYPE_IQ2_S;
+        }
+    } else if (name.find("ffn_down") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
+            if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
+            new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
+            new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
+                     : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
+                     : GGML_TYPE_Q3_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
+                    (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
+            new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
+            if (arch == LLM_ARCH_FALCON) {
+                new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
+                           use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
+            } else {
+                if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
+            }
+        }
+        else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
+            new_type = GGML_TYPE_Q5_K;
+        }
+        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
+                && qs.has_imatrix && i_layer < n_layer/8) {
+            // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
+            // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
+            // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
+            new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
+        }
+        ++qs.i_ffn_down;
+    } else if (name.find("attn_output.weight") != std::string::npos) {
+        if (arch != LLM_ARCH_FALCON) {
+            if (qs.model.hparams.n_expert == 8) {
+                if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
+                    ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL  ||
+                    ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S  ||
+                    ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
+                    new_type = GGML_TYPE_Q5_K;
+                }
+            } else {
+                if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   ) new_type = GGML_TYPE_Q3_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
+                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  ) new_type = GGML_TYPE_Q4_K;
+            }
+        } else {
+            if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
+        }
+    }
+    else if (name.find("attn_qkv.weight") != std::string::npos) {
+        if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
+            new_type = GGML_TYPE_Q4_K;
+        }
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
+        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
+    }
+    else if (name.find("ffn_gate") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        ++qs.i_ffn_gate;
+    }
+    else if (name.find("ffn_up") != std::string::npos) {
+        auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
+        int i_layer = info.first, n_layer = info.second;
+        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
+            new_type = GGML_TYPE_IQ3_XXS;
+        }
+        ++qs.i_ffn_up;
+    }
+
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+    //}
+    // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
+    //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+    //}
+    // This can be used to reduce the size of the Q5_K_S model.
+    // The associated PPL increase is fully in line with the size reduction
+    //else {
+    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
+    //}
+    bool convert_incompatible_tensor = false;
+    if (new_type == GGML_TYPE_Q2_K    || new_type == GGML_TYPE_Q3_K    || new_type == GGML_TYPE_Q4_K   ||
+        new_type == GGML_TYPE_Q5_K    || new_type == GGML_TYPE_Q6_K    || new_type == GGML_TYPE_IQ4_XS ||
+        new_type == GGML_TYPE_IQ2_XS  || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S  ||
+        new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S   || new_type == GGML_TYPE_IQ3_S  ||
+        new_type == GGML_TYPE_IQ1_M) {
+        int nx = tensor->ne[0];
+        int ny = tensor->ne[1];
+        if (nx % QK_K != 0) {
+            LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
+            convert_incompatible_tensor = true;
+        } else {
+            ++qs.n_k_quantized;
+        }
+    }
+    if (convert_incompatible_tensor) {
+        switch (new_type) {
+            case GGML_TYPE_TQ1_0:
+            case GGML_TYPE_TQ2_0:  new_type = GGML_TYPE_Q4_0; break;  // TODO: use a symmetric type instead
+            case GGML_TYPE_IQ2_XXS:
+            case GGML_TYPE_IQ2_XS:
+            case GGML_TYPE_IQ2_S:
+            case GGML_TYPE_IQ3_XXS:
+            case GGML_TYPE_IQ3_S:
+            case GGML_TYPE_IQ1_S:
+            case GGML_TYPE_IQ1_M:
+            case GGML_TYPE_Q2_K:
+            case GGML_TYPE_Q3_K:
+            case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
+            case GGML_TYPE_Q4_K:   new_type = GGML_TYPE_Q5_0;   break;
+            case GGML_TYPE_Q5_K:   new_type = GGML_TYPE_Q5_1;   break;
+            case GGML_TYPE_Q6_K:   new_type = GGML_TYPE_Q8_0;   break;
+            default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
+        }
+        if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
+            new_type = GGML_TYPE_F16;
+        }
+        LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
+        ++qs.n_fallback;
+    }
+
+    return new_type;
+}
+
+static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
+    if (nthread < 2) {
+        // single-thread
+        size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
+        if (!ggml_validate_row_data(new_type, new_data, new_size)) {
+            throw std::runtime_error("quantized data validation failed");
+        }
+        return new_size;
+    }
+
+    std::mutex mutex;
+    int64_t counter = 0;
+    size_t new_size = 0;
+    bool valid = true;
+    auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
+            nrows, n_per_row, imatrix]() {
+        const int64_t nrows_per_chunk = chunk_size / n_per_row;
+        size_t local_size = 0;
+        while (true) {
+            std::unique_lock lock(mutex);
+            int64_t first_row = counter; counter += nrows_per_chunk;
+            if (first_row >= nrows) {
+                if (local_size > 0) {
+                    new_size += local_size;
+                }
+                break;
+            }
+            lock.unlock();
+            const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
+            size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
+            local_size += this_size;
+
+            // validate the quantized data
+            const size_t row_size  = ggml_row_size(new_type, n_per_row);
+            void * this_data = (char *) new_data + first_row * row_size;
+            if (!ggml_validate_row_data(new_type, this_data, this_size)) {
+                std::unique_lock lock(mutex);
+                valid = false;
+                break;
+            }
+        }
+    };
+    for (int it = 0; it < nthread - 1; ++it) {
+        workers.emplace_back(compute);
+    }
+    compute();
+    for (auto & w : workers) { w.join(); }
+    workers.clear();
+    if (!valid) {
+        throw std::runtime_error("quantized data validation failed");
+    }
+    return new_size;
+}
+
+static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
+    ggml_type default_type;
+    llama_ftype ftype = params->ftype;
+
+    switch (params->ftype) {
+        case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
+        case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
+        case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
+        case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
+        case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
+        case LLAMA_FTYPE_MOSTLY_F16:  default_type = GGML_TYPE_F16;  break;
+        case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
+        case LLAMA_FTYPE_ALL_F32:     default_type = GGML_TYPE_F32;  break;
+
+        // K-quants
+        case LLAMA_FTYPE_MOSTLY_Q2_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q2_K:    default_type = GGML_TYPE_Q2_K;    break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_XS:  default_type = GGML_TYPE_IQ3_S;   break;
+        case LLAMA_FTYPE_MOSTLY_Q3_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q3_K_M:
+        case LLAMA_FTYPE_MOSTLY_Q3_K_L:  default_type = GGML_TYPE_Q3_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q4_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q4_K_M:  default_type = GGML_TYPE_Q4_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q5_K_S:
+        case LLAMA_FTYPE_MOSTLY_Q5_K_M:  default_type = GGML_TYPE_Q5_K;    break;
+        case LLAMA_FTYPE_MOSTLY_Q6_K:    default_type = GGML_TYPE_Q6_K;    break;
+        case LLAMA_FTYPE_MOSTLY_TQ1_0:   default_type = GGML_TYPE_TQ1_0;   break;
+        case LLAMA_FTYPE_MOSTLY_TQ2_0:   default_type = GGML_TYPE_TQ2_0;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_XS:  default_type = GGML_TYPE_IQ2_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_S:   default_type = GGML_TYPE_IQ2_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ2_M:   default_type = GGML_TYPE_IQ2_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
+        case LLAMA_FTYPE_MOSTLY_IQ1_S:   default_type = GGML_TYPE_IQ1_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ1_M:   default_type = GGML_TYPE_IQ1_M;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ4_NL:  default_type = GGML_TYPE_IQ4_NL;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ4_XS:  default_type = GGML_TYPE_IQ4_XS;  break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_S:   default_type = GGML_TYPE_IQ3_S;   break;
+        case LLAMA_FTYPE_MOSTLY_IQ3_M:   default_type = GGML_TYPE_IQ3_S;   break;
+
+        default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
+    }
+
+    int nthread = params->nthread;
+
+    if (nthread <= 0) {
+        nthread = std::thread::hardware_concurrency();
+    }
+
+    // mmap consistently increases speed Linux, and also increases speed on Windows with
+    // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
+#if defined(__linux__) || defined(_WIN32)
+    constexpr bool use_mmap = true;
+#else
+    constexpr bool use_mmap = false;
+#endif
+
+    llama_model_kv_override * kv_overrides = nullptr;
+    if (params->kv_overrides) {
+        auto v = (std::vector*)params->kv_overrides;
+        kv_overrides = v->data();
+    }
+    llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
+    ml.init_mappings(false); // no prefetching
+
+    llama_model model;
+    llm_load_arch   (ml, model);
+    llm_load_hparams(ml, model);
+    llm_load_stats  (ml, model);
+
+    struct quantize_state_internal qs(model, params);
+
+    if (params->only_copy) {
+        ftype = model.ftype;
+    }
+    const std::unordered_map> * imatrix_data = nullptr;
+    if (params->imatrix) {
+        imatrix_data = static_cast>*>(params->imatrix);
+        if (imatrix_data) {
+            LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
+            qs.has_imatrix = true;
+            // check imatrix for nans or infs
+            for (const auto & kv : *imatrix_data) {
+                for (float f : kv.second) {
+                    if (!std::isfinite(f)) {
+                        throw std::runtime_error(format("imatrix contains non-finite value %f\n", f));
+                    }
+                }
+            }
+        }
+    }
+
+    const size_t align = GGUF_DEFAULT_ALIGNMENT;
+    gguf_context_ptr ctx_out { gguf_init_empty() };
+
+    // copy the KV pairs from the input file
+    gguf_set_kv     (ctx_out.get(), ml.meta.get());
+    gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
+    gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV
+
+    // Remove split metadata
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
+    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
+
+    if (params->kv_overrides) {
+        const std::vector & overrides = *(const std::vector *)params->kv_overrides;
+        for (const auto & o : overrides) {
+            if (o.key[0] == 0) break;
+            if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
+                gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
+                gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
+                gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
+            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
+                gguf_set_val_str(ctx_out.get(), o.key, o.val_str);
+            } else {
+                LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
+            }
+        }
+    }
+
+    // make a list of weights
+    std::vector tensors;
+    tensors.reserve(ml.weights_map.size());
+    for (const auto & it : ml.weights_map) {
+        tensors.push_back(&it.second);
+    }
+
+    // keep_split requires that the weights are sorted by split index
+    if (params->keep_split) {
+        std::sort(tensors.begin(), tensors.end(), [](const llama_model_loader::llama_tensor_weight * a, const llama_model_loader::llama_tensor_weight * b) {
+            if (a->idx == b->idx) {
+                return a->offs < b->offs;
+            }
+            return a->idx < b->idx;
+        });
+    }
+
+    for (const auto * it : tensors) {
+        const struct ggml_tensor * tensor = it->tensor;
+
+        const std::string name = ggml_get_name(tensor);
+
+        // TODO: avoid hardcoded tensor names - use the TN_* constants
+        if (name.find("attn_v.weight")   != std::string::npos ||
+            name.find("attn_qkv.weight") != std::string::npos ||
+            name.find("attn_kv_b.weight")!= std::string::npos) {
+            ++qs.n_attention_wv;
+        } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
+            qs.has_output = true;
+        }
+    }
+
+    qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
+
+    // sanity checks
+    {
+        const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin();
+        // attention layers have a non-zero number of kv heads
+        int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0);
+        if (llama_model_has_encoder(&model)) {
+            n_attn_layer *= 3;
+        }
+        GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
+    }
+
+    size_t total_size_org = 0;
+    size_t total_size_new = 0;
+
+    std::vector workers;
+    workers.reserve(nthread);
+
+    int idx = 0;
+
+    std::vector> read_data;
+    std::vector> work;
+    std::vector> f32_conv_buf;
+
+    uint16_t n_split = 1;
+
+    // Assume split index is continuous
+    if (params->keep_split) {
+        for (const auto * it : tensors) {
+            n_split = std::max(uint16_t(it->idx + 1), n_split);
+        }
+    }
+    std::vector ctx_outs(n_split);
+    ctx_outs[0] = std::move(ctx_out);
+
+    // populate the original tensors so we get an initial meta data
+    for (const auto * it : tensors) {
+        uint16_t i_split = params->keep_split ? it->idx : 0;
+        struct ggml_tensor * tensor = it->tensor;
+        if (!ctx_outs[i_split]) {
+            ctx_outs[i_split].reset(gguf_init_empty());
+        }
+        gguf_add_tensor(ctx_outs[i_split].get(), tensor);
+    }
+
+    // Set split info if needed
+    if (n_split > 1) {
+        for (size_t i = 0; i < ctx_outs.size(); ++i) {
+            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
+            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
+            gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
+        }
+    }
+
+    int cur_split = -1;
+    std::ofstream fout;
+    auto close_ofstream = [&]() {
+        // Write metadata and close file handler
+        if (fout.is_open()) {
+            fout.seekp(0);
+            std::vector data(gguf_get_meta_size(ctx_outs[cur_split].get()));
+            gguf_get_meta_data(ctx_outs[cur_split].get(), data.data());
+            fout.write((const char *) data.data(), data.size());
+            fout.close();
+        }
+    };
+    auto new_ofstream = [&](int index) {
+        cur_split = index;
+        GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
+        std::string fname = fname_out;
+        if (params->keep_split) {
+            std::vector split_path(llama_path_max(), 0);
+            llama_split_path(split_path.data(), split_path.size(), fname_out.c_str(), cur_split, n_split);
+            fname = std::string(split_path.data());
+        }
+
+        fout = std::ofstream(fname, std::ios::binary);
+        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
+        const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split].get());
+        // placeholder for the meta data
+        ::zeros(fout, meta_size);
+    };
+
+    const auto tn = LLM_TN(model.arch);
+    new_ofstream(0);
+    for (const auto * it : tensors) {
+        const auto & weight = *it;
+        struct ggml_tensor * tensor = weight.tensor;
+        if (weight.idx != cur_split && params->keep_split) {
+            close_ofstream();
+            new_ofstream(weight.idx);
+        }
+
+        const std::string name = ggml_get_name(tensor);
+
+        if (!ml.use_mmap) {
+            if (read_data.size() < ggml_nbytes(tensor)) {
+                read_data.resize(ggml_nbytes(tensor));
+            }
+            tensor->data = read_data.data();
+        }
+        ml.load_data_for(tensor);
+
+        LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
+               ++idx, ml.n_tensors,
+               ggml_get_name(tensor),
+               llama_format_tensor_shape(tensor).c_str(),
+               ggml_type_name(tensor->type));
+
+        // This used to be a regex, but  has an extreme cost to compile times.
+        bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
+
+        // quantize only 2D and 3D tensors (experts)
+        quantize &= (ggml_n_dims(tensor) >= 2);
+
+        // do not quantize norm tensors
+        quantize &= name.find("_norm.weight") == std::string::npos;
+
+        quantize &= params->quantize_output_tensor || name != "output.weight";
+        quantize &= !params->only_copy;
+
+        // do not quantize expert gating tensors
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
+
+        // do not quantize positional embeddings and token types (BERT)
+        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");
+        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
+
+        // do not quantize Mamba's small yet 2D weights
+        // NOTE: can't use LLM_TN here because the layer number is not known
+        quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
+
+        // do not quantize RWKV's time_mix_first tensors
+        quantize &= name.find("time_mix_first.weight") == std::string::npos;
+        quantize &= name.find("time_mix_w1.weight") == std::string::npos;
+        quantize &= name.find("time_mix_w2.weight") == std::string::npos;
+        quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
+        quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
+
+        // do not quantize relative position bias (T5)
+        quantize &= name.find("attn_rel_b.weight") == std::string::npos;
+
+        enum ggml_type new_type;
+        void * new_data;
+        size_t new_size;
+
+        if (quantize) {
+            new_type = default_type;
+
+            // get more optimal quantization type based on the tensor shape, layer, etc.
+            if (!params->pure && ggml_is_quantized(default_type)) {
+                new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
+            }
+            if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
+                new_type = params->token_embedding_type;
+            }
+            if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
+                new_type = params->output_tensor_type;
+            }
+
+            // If we've decided to quantize to the same type the tensor is already
+            // in then there's nothing to do.
+            quantize = tensor->type != new_type;
+        }
+
+        if (!quantize) {
+            new_type = tensor->type;
+            new_data = tensor->data;
+            new_size = ggml_nbytes(tensor);
+            LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
+        } else {
+            const int64_t nelements = ggml_nelements(tensor);
+
+            const float * imatrix = nullptr;
+            if (imatrix_data) {
+                auto it = imatrix_data->find(tensor->name);
+                if (it == imatrix_data->end()) {
+                    LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
+                } else {
+                    if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
+                        imatrix = it->second.data();
+                    } else {
+                        LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
+                                int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
+
+                        // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
+                        // this is a significant error and it may be good idea to abort the process if this happens,
+                        // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
+                        // tok_embd should be ignored in this case, since it always causes this warning
+                        if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
+                            throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
+                                    int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
+                        }
+                    }
+                }
+            }
+            if ((new_type == GGML_TYPE_IQ2_XXS ||
+                 new_type == GGML_TYPE_IQ2_XS  ||
+                 new_type == GGML_TYPE_IQ2_S   ||
+                 new_type == GGML_TYPE_IQ1_S   ||
+                (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight"))  ||
+                (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
+                LLAMA_LOG_ERROR("\n\n============================================================\n");
+                LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
+                LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
+                LLAMA_LOG_ERROR("============================================================\n\n");
+                throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
+            }
+
+            float * f32_data;
+
+            if (tensor->type == GGML_TYPE_F32) {
+                f32_data = (float *) tensor->data;
+            } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
+                throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
+            } else {
+                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
+                f32_data = (float *) f32_conv_buf.data();
+            }
+
+            LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
+            fflush(stdout);
+
+            if (work.size() < (size_t)nelements * 4) {
+                work.resize(nelements * 4); // upper bound on size
+            }
+            new_data = work.data();
+
+            const int64_t n_per_row = tensor->ne[0];
+            const int64_t nrows = tensor->ne[1];
+
+            static const int64_t min_chunk_size = 32 * 512;
+            const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row));
+
+            const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
+            const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
+            const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
+
+            // quantize each expert separately since they have different importance matrices
+            new_size = 0;
+            for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
+                const float * f32_data_03 = f32_data + i03 * nelements_matrix;
+                void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
+                const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
+
+                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
+            }
+            LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
+        }
+        total_size_org += ggml_nbytes(tensor);
+        total_size_new += new_size;
+
+        // update the gguf meta data as we go
+        gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
+        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
+
+        // write tensor data + padding
+        fout.write((const char *) new_data, new_size);
+        zeros(fout, GGML_PAD(new_size, align) - new_size);
+    }
+    close_ofstream();
+
+    LLAMA_LOG_INFO("%s: model size  = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
+    LLAMA_LOG_INFO("%s: quant size  = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
+
+    if (qs.n_fallback > 0) {
+        LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
+                __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
+    }
+}
+
+//
+// interface implementation
+//
+
+struct llama_model_quantize_params llama_model_quantize_default_params() {
+    struct llama_model_quantize_params result = {
+        /*.nthread                     =*/ 0,
+        /*.ftype                       =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
+        /*.output_tensor_type          =*/ GGML_TYPE_COUNT,
+        /*.token_embedding_type        =*/ GGML_TYPE_COUNT,
+        /*.allow_requantize            =*/ false,
+        /*.quantize_output_tensor      =*/ true,
+        /*.only_copy                   =*/ false,
+        /*.pure                        =*/ false,
+        /*.keep_split                  =*/ false,
+        /*.imatrix                     =*/ nullptr,
+        /*.kv_overrides                =*/ nullptr,
+    };
+
+    return result;
+}
+
+uint32_t llama_model_quantize(
+        const char * fname_inp,
+        const char * fname_out,
+        const llama_model_quantize_params * params) {
+    try {
+        llama_model_quantize_internal(fname_inp, fname_out, params);
+    } catch (const std::exception & err) {
+        LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
+        return 1;
+    }
+
+    return 0;
+}
diff --git a/src/llama-quant.h b/src/llama-quant.h
new file mode 100644
index 000000000..6f70f09be
--- /dev/null
+++ b/src/llama-quant.h
@@ -0,0 +1 @@
+#pragma once
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index bebff77cf..69cea2f14 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -1,5 +1,6 @@
 #include "llama-sampling.h"
 
+#include "llama-impl.h"
 #include "llama-vocab.h"
 #include "llama-grammar.h"
 
@@ -14,6 +15,118 @@
 #include 
 #include 
 #include 
+#include 
+
+// the ring buffer works similarly to std::deque, but with a fixed capacity
+template
+struct ring_buffer {
+    ring_buffer(size_t cap) : capacity(cap), data(cap) {}
+
+    T & front() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[first];
+    }
+
+    const T & front() const {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[first];
+    }
+
+    T & back() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[pos];
+    }
+
+    const T & back() const {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        return data[pos];
+    }
+
+    void push_back(const T & value) {
+        if (capacity == 0) {
+            throw std::runtime_error("ring buffer: capacity is zero");
+        }
+
+        if (sz == capacity) {
+            // advance the start when buffer is full
+            first = (first + 1) % capacity;
+        } else {
+            sz++;
+        }
+        data[pos] = value;
+        pos = (pos + 1) % capacity;
+    }
+
+    T pop_front() {
+        if (sz == 0) {
+            throw std::runtime_error("ring buffer is empty");
+        }
+        T value = data[first];
+        first = (first + 1) % capacity;
+        sz--;
+        return value;
+    }
+
+    //T & operator[](size_t i) {
+    //    if (i >= sz) {
+    //        throw std::runtime_error("ring buffer: index out of bounds");
+    //    }
+    //    return data[(first + i) % capacity];
+    //}
+
+    //const T & at(size_t i) const {
+    //    if (i >= sz) {
+    //        throw std::runtime_error("ring buffer: index out of bounds");
+    //    }
+    //    return data[(first + i) % capacity];
+    //}
+
+    const T & rat(size_t i) const {
+        if (i >= sz) {
+            throw std::runtime_error("ring buffer: index out of bounds");
+        }
+        return data[(first + sz - i - 1) % capacity];
+    }
+
+    std::vector to_vector() const {
+        std::vector result;
+        result.reserve(sz);
+        for (size_t i = 0; i < sz; i++) {
+            result.push_back(data[(first + i) % capacity]);
+        }
+        return result;
+    }
+
+    void clear() {
+        // here only reset the status of the buffer
+        sz = 0;
+        first = 0;
+        pos = 0;
+    }
+
+    bool empty() const {
+        return sz == 0;
+    }
+
+    size_t size() const {
+        return sz;
+    }
+
+    size_t capacity = 0;
+    size_t sz = 0;
+    size_t first = 0;
+    size_t pos = 0;
+
+    std::vector data;
+};
 
 static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng) {
     // iterator for the probabilities
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 0a477d6dd..909e04871 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1,5 +1,7 @@
 #include "llama-vocab.h"
 
+#include "llama-impl.h"
+
 #include "unicode.h"
 
 #include 
@@ -16,22 +18,6 @@
 // helpers
 //
 
-LLAMA_ATTRIBUTE_FORMAT(1, 2)
-static std::string format(const char * fmt, ...) {
-    va_list ap;
-    va_list ap2;
-    va_start(ap, fmt);
-    va_copy(ap2, ap);
-    int size = vsnprintf(NULL, 0, fmt, ap);
-    GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
-    std::vector buf(size + 1);
-    int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
-    GGML_ASSERT(size2 == size);
-    va_end(ap2);
-    va_end(ap);
-    return std::string(buf.data(), size);
-}
-
 struct naive_trie {
     naive_trie() : has_value(false), value(0) {
     }
diff --git a/src/llama-vocab.h b/src/llama-vocab.h
index a9b0da5ef..0d00086da 100644
--- a/src/llama-vocab.h
+++ b/src/llama-vocab.h
@@ -1,6 +1,6 @@
 #pragma once
 
-#include "llama-impl.h"
+#include "llama.h"
 
 #include 
 #include 
@@ -8,6 +8,18 @@
 #include 
 #include 
 
+static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
+    switch (type) {
+        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
+        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
+        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
+        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
+        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
+        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
+        default:                    return "unknown";
+    }
+}
+
 struct llm_tokenizer;
 
 struct llama_vocab {
diff --git a/src/llama.cpp b/src/llama.cpp
index 4d41602fe..d7110b90b 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1,48 +1,20 @@
 #include "llama-impl.h"
+
+#include "llama-chat.h"
+#include "llama-mmap.h"
+#include "llama-context.h"
 #include "llama-vocab.h"
 #include "llama-sampling.h"
-
-#include "unicode.h"
+#include "llama-kv-cache.h"
+#include "llama-model-loader.h"
+#include "llama-model.h"
+#include "llama-quant.h"
 
 #include "ggml.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
 #include "ggml-cpp.h"
 
-// TODO: replace with ggml API call
-#define QK_K 256
-
-#ifdef __has_include
-    #if __has_include()
-        #include 
-        #if defined(_POSIX_MAPPED_FILES)
-            #include 
-            #include 
-        #endif
-        #if defined(_POSIX_MEMLOCK_RANGE)
-            #include 
-        #endif
-    #endif
-#endif
-
-#if defined(_WIN32)
-    #define WIN32_LEAN_AND_MEAN
-    #ifndef NOMINMAX
-        #define NOMINMAX
-    #endif
-    #include 
-    #ifndef PATH_MAX
-        #define PATH_MAX MAX_PATH
-    #endif
-    #include 
-#endif
-
-#if __cplusplus >= 202000L
-    #define LU8(x) (const char*)(u8##x)
-#else
-    #define LU8(x) u8##x
-#endif
-
 #include 
 #include 
 #include 
@@ -57,7416 +29,25 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
 #include 
 #include 
 #include 
-#include 
-#include 
 #include 
-#include 
-#include 
-#include 
 #include 
-#include 
 
 #if defined(_MSC_VER)
 #pragma warning(disable: 4244 4267) // possible loss of data
 #endif
 
-// bump if necessary
-#define LLAMA_MAX_LAYERS  512
-#define LLAMA_MAX_EXPERTS 160  // DeepSeekV2
-
 //
-// helpers
+// tensor loading (TODO: add llama_tesor_loader?)
 //
 
-// trim whitespace from the beginning and end of a string
-static std::string trim(const std::string & str) {
-    size_t start = 0;
-    size_t end = str.size();
-    while (start < end && isspace(str[start])) {
-        start += 1;
-    }
-    while (end > start && isspace(str[end - 1])) {
-        end -= 1;
-    }
-    return str.substr(start, end - start);
-}
-
-static bool is_float_close(float a, float b, float abs_tol) {
-    // Check for non-negative tolerance
-    if (abs_tol < 0.0) {
-        throw std::invalid_argument("Tolerance must be non-negative");
-    }
-
-    // Exact equality check
-    if (a == b) {
-        return true;
-    }
-
-    // Check for infinities
-    if (std::isinf(a) || std::isinf(b)) {
-        return false;
-    }
-
-    // Regular comparison using the provided absolute tolerance
-    return std::fabs(b - a) <= abs_tol;
-}
-
-static void zeros(std::ofstream & file, size_t n) {
-    char zero = 0;
-    for (size_t i = 0; i < n; ++i) {
-        file.write(&zero, 1);
-    }
-}
-
-LLAMA_ATTRIBUTE_FORMAT(1, 2)
-static std::string format(const char * fmt, ...) {
-    va_list ap;
-    va_list ap2;
-    va_start(ap, fmt);
-    va_copy(ap2, ap);
-    int size = vsnprintf(NULL, 0, fmt, ap);
-    GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT
-    std::vector buf(size + 1);
-    int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
-    GGML_ASSERT(size2 == size);
-    va_end(ap2);
-    va_end(ap);
-    return std::string(buf.data(), size);
-}
-
-//
-// gguf constants (sync with gguf.py)
-//
-
-enum llm_arch {
-    LLM_ARCH_LLAMA,
-    LLM_ARCH_DECI,
-    LLM_ARCH_FALCON,
-    LLM_ARCH_BAICHUAN,
-    LLM_ARCH_GROK,
-    LLM_ARCH_GPT2,
-    LLM_ARCH_GPTJ,
-    LLM_ARCH_GPTNEOX,
-    LLM_ARCH_MPT,
-    LLM_ARCH_STARCODER,
-    LLM_ARCH_REFACT,
-    LLM_ARCH_BERT,
-    LLM_ARCH_NOMIC_BERT,
-    LLM_ARCH_JINA_BERT_V2,
-    LLM_ARCH_BLOOM,
-    LLM_ARCH_STABLELM,
-    LLM_ARCH_QWEN,
-    LLM_ARCH_QWEN2,
-    LLM_ARCH_QWEN2MOE,
-    LLM_ARCH_QWEN2VL,
-    LLM_ARCH_PHI2,
-    LLM_ARCH_PHI3,
-    LLM_ARCH_PLAMO,
-    LLM_ARCH_CODESHELL,
-    LLM_ARCH_ORION,
-    LLM_ARCH_INTERNLM2,
-    LLM_ARCH_MINICPM,
-    LLM_ARCH_MINICPM3,
-    LLM_ARCH_GEMMA,
-    LLM_ARCH_GEMMA2,
-    LLM_ARCH_STARCODER2,
-    LLM_ARCH_MAMBA,
-    LLM_ARCH_XVERSE,
-    LLM_ARCH_COMMAND_R,
-    LLM_ARCH_DBRX,
-    LLM_ARCH_OLMO,
-    LLM_ARCH_OLMO2,
-    LLM_ARCH_OLMOE,
-    LLM_ARCH_OPENELM,
-    LLM_ARCH_ARCTIC,
-    LLM_ARCH_DEEPSEEK,
-    LLM_ARCH_DEEPSEEK2,
-    LLM_ARCH_CHATGLM,
-    LLM_ARCH_BITNET,
-    LLM_ARCH_T5,
-    LLM_ARCH_T5ENCODER,
-    LLM_ARCH_JAIS,
-    LLM_ARCH_NEMOTRON,
-    LLM_ARCH_EXAONE,
-    LLM_ARCH_RWKV6,
-    LLM_ARCH_GRANITE,
-    LLM_ARCH_GRANITE_MOE,
-    LLM_ARCH_CHAMELEON,
-    LLM_ARCH_WAVTOKENIZER_DEC,
-    LLM_ARCH_UNKNOWN,
-};
-
-static const std::map LLM_ARCH_NAMES = {
-    { LLM_ARCH_LLAMA,            "llama"            },
-    { LLM_ARCH_DECI,             "deci"            },
-    { LLM_ARCH_FALCON,           "falcon"           },
-    { LLM_ARCH_GROK,             "grok"             },
-    { LLM_ARCH_GPT2,             "gpt2"             },
-    { LLM_ARCH_GPTJ,             "gptj"             },
-    { LLM_ARCH_GPTNEOX,          "gptneox"          },
-    { LLM_ARCH_MPT,              "mpt"              },
-    { LLM_ARCH_BAICHUAN,         "baichuan"         },
-    { LLM_ARCH_STARCODER,        "starcoder"        },
-    { LLM_ARCH_REFACT,           "refact"           },
-    { LLM_ARCH_BERT,             "bert"             },
-    { LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },
-    { LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },
-    { LLM_ARCH_BLOOM,            "bloom"            },
-    { LLM_ARCH_STABLELM,         "stablelm"         },
-    { LLM_ARCH_QWEN,             "qwen"             },
-    { LLM_ARCH_QWEN2,            "qwen2"            },
-    { LLM_ARCH_QWEN2MOE,         "qwen2moe"         },
-    { LLM_ARCH_QWEN2VL,          "qwen2vl"          },
-    { LLM_ARCH_PHI2,             "phi2"             },
-    { LLM_ARCH_PHI3,             "phi3"             },
-    { LLM_ARCH_PLAMO,            "plamo"            },
-    { LLM_ARCH_CODESHELL,        "codeshell"        },
-    { LLM_ARCH_ORION,            "orion"            },
-    { LLM_ARCH_INTERNLM2,        "internlm2"        },
-    { LLM_ARCH_MINICPM,          "minicpm"          },
-    { LLM_ARCH_MINICPM3,         "minicpm3"         },
-    { LLM_ARCH_GEMMA,            "gemma"            },
-    { LLM_ARCH_GEMMA2,           "gemma2"           },
-    { LLM_ARCH_STARCODER2,       "starcoder2"       },
-    { LLM_ARCH_MAMBA,            "mamba"            },
-    { LLM_ARCH_XVERSE,           "xverse"           },
-    { LLM_ARCH_COMMAND_R,        "command-r"        },
-    { LLM_ARCH_DBRX,             "dbrx"             },
-    { LLM_ARCH_OLMO,             "olmo"             },
-    { LLM_ARCH_OLMO2,            "olmo2"            },
-    { LLM_ARCH_OLMOE,            "olmoe"            },
-    { LLM_ARCH_OPENELM,          "openelm"          },
-    { LLM_ARCH_ARCTIC,           "arctic"           },
-    { LLM_ARCH_DEEPSEEK,         "deepseek"         },
-    { LLM_ARCH_DEEPSEEK2,        "deepseek2"        },
-    { LLM_ARCH_CHATGLM,          "chatglm"          },
-    { LLM_ARCH_BITNET,           "bitnet"           },
-    { LLM_ARCH_T5,               "t5"               },
-    { LLM_ARCH_T5ENCODER,        "t5encoder"        },
-    { LLM_ARCH_JAIS,             "jais"             },
-    { LLM_ARCH_NEMOTRON,         "nemotron"         },
-    { LLM_ARCH_EXAONE,           "exaone"           },
-    { LLM_ARCH_RWKV6,            "rwkv6"            },
-    { LLM_ARCH_GRANITE,          "granite"          },
-    { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
-    { LLM_ARCH_CHAMELEON,        "chameleon"        },
-    { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
-    { LLM_ARCH_UNKNOWN,          "(unknown)"        },
-};
-
-enum llm_kv {
-    LLM_KV_GENERAL_TYPE,
-    LLM_KV_GENERAL_ARCHITECTURE,
-    LLM_KV_GENERAL_QUANTIZATION_VERSION,
-    LLM_KV_GENERAL_ALIGNMENT,
-    LLM_KV_GENERAL_NAME,
-    LLM_KV_GENERAL_AUTHOR,
-    LLM_KV_GENERAL_VERSION,
-    LLM_KV_GENERAL_URL,
-    LLM_KV_GENERAL_DESCRIPTION,
-    LLM_KV_GENERAL_LICENSE,
-    LLM_KV_GENERAL_SOURCE_URL,
-    LLM_KV_GENERAL_SOURCE_HF_REPO,
-
-    LLM_KV_VOCAB_SIZE,
-    LLM_KV_CONTEXT_LENGTH,
-    LLM_KV_EMBEDDING_LENGTH,
-    LLM_KV_FEATURES_LENGTH,
-    LLM_KV_BLOCK_COUNT,
-    LLM_KV_LEADING_DENSE_BLOCK_COUNT,
-    LLM_KV_FEED_FORWARD_LENGTH,
-    LLM_KV_EXPERT_FEED_FORWARD_LENGTH,
-    LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH,
-    LLM_KV_USE_PARALLEL_RESIDUAL,
-    LLM_KV_TENSOR_DATA_LAYOUT,
-    LLM_KV_EXPERT_COUNT,
-    LLM_KV_EXPERT_USED_COUNT,
-    LLM_KV_EXPERT_SHARED_COUNT,
-    LLM_KV_EXPERT_WEIGHTS_SCALE,
-    LLM_KV_POOLING_TYPE,
-    LLM_KV_LOGIT_SCALE,
-    LLM_KV_DECODER_START_TOKEN_ID,
-    LLM_KV_ATTN_LOGIT_SOFTCAPPING,
-    LLM_KV_FINAL_LOGIT_SOFTCAPPING,
-    LLM_KV_SWIN_NORM,
-    LLM_KV_RESCALE_EVERY_N_LAYERS,
-    LLM_KV_TIME_MIX_EXTRA_DIM,
-    LLM_KV_TIME_DECAY_EXTRA_DIM,
-    LLM_KV_RESIDUAL_SCALE,
-    LLM_KV_EMBEDDING_SCALE,
-
-    LLM_KV_ATTENTION_HEAD_COUNT,
-    LLM_KV_ATTENTION_HEAD_COUNT_KV,
-    LLM_KV_ATTENTION_MAX_ALIBI_BIAS,
-    LLM_KV_ATTENTION_CLAMP_KQV,
-    LLM_KV_ATTENTION_KEY_LENGTH,
-    LLM_KV_ATTENTION_VALUE_LENGTH,
-    LLM_KV_ATTENTION_LAYERNORM_EPS,
-    LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,
-    LLM_KV_ATTENTION_GROUPNORM_EPS,
-    LLM_KV_ATTENTION_GROUPNORM_GROUPS,
-    LLM_KV_ATTENTION_CAUSAL,
-    LLM_KV_ATTENTION_Q_LORA_RANK,
-    LLM_KV_ATTENTION_KV_LORA_RANK,
-    LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,
-    LLM_KV_ATTENTION_SLIDING_WINDOW,
-    LLM_KV_ATTENTION_SCALE,
-
-    LLM_KV_ROPE_DIMENSION_COUNT,
-    LLM_KV_ROPE_DIMENSION_SECTIONS,
-    LLM_KV_ROPE_FREQ_BASE,
-    LLM_KV_ROPE_SCALE_LINEAR,
-    LLM_KV_ROPE_SCALING_TYPE,
-    LLM_KV_ROPE_SCALING_FACTOR,
-    LLM_KV_ROPE_SCALING_ATTN_FACTOR,
-    LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,
-    LLM_KV_ROPE_SCALING_FINETUNED,
-    LLM_KV_ROPE_SCALING_YARN_LOG_MUL,
-
-    LLM_KV_SPLIT_NO,
-    LLM_KV_SPLIT_COUNT,
-    LLM_KV_SPLIT_TENSORS_COUNT,
-
-    LLM_KV_SSM_INNER_SIZE,
-    LLM_KV_SSM_CONV_KERNEL,
-    LLM_KV_SSM_STATE_SIZE,
-    LLM_KV_SSM_TIME_STEP_RANK,
-    LLM_KV_SSM_DT_B_C_RMS,
-
-    LLM_KV_WKV_HEAD_SIZE,
-
-    LLM_KV_TOKENIZER_MODEL,
-    LLM_KV_TOKENIZER_PRE,
-    LLM_KV_TOKENIZER_LIST,
-    LLM_KV_TOKENIZER_TOKEN_TYPE,
-    LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,
-    LLM_KV_TOKENIZER_SCORES,
-    LLM_KV_TOKENIZER_MERGES,
-    LLM_KV_TOKENIZER_BOS_ID,
-    LLM_KV_TOKENIZER_EOS_ID,
-    LLM_KV_TOKENIZER_EOT_ID,
-    LLM_KV_TOKENIZER_EOM_ID,
-    LLM_KV_TOKENIZER_UNK_ID,
-    LLM_KV_TOKENIZER_SEP_ID,
-    LLM_KV_TOKENIZER_PAD_ID,
-    LLM_KV_TOKENIZER_CLS_ID,
-    LLM_KV_TOKENIZER_MASK_ID,
-    LLM_KV_TOKENIZER_ADD_BOS,
-    LLM_KV_TOKENIZER_ADD_EOS,
-    LLM_KV_TOKENIZER_ADD_PREFIX,
-    LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,
-    LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,
-    LLM_KV_TOKENIZER_HF_JSON,
-    LLM_KV_TOKENIZER_RWKV,
-    LLM_KV_TOKENIZER_FIM_PRE_ID,
-    LLM_KV_TOKENIZER_FIM_SUF_ID,
-    LLM_KV_TOKENIZER_FIM_MID_ID,
-    LLM_KV_TOKENIZER_FIM_PAD_ID,
-    LLM_KV_TOKENIZER_FIM_REP_ID,
-    LLM_KV_TOKENIZER_FIM_SEP_ID,
-
-    LLM_KV_ADAPTER_TYPE,
-    LLM_KV_ADAPTER_LORA_ALPHA,
-
-    LLM_KV_POSNET_EMBEDDING_LENGTH,
-    LLM_KV_POSNET_BLOCK_COUNT,
-
-    LLM_KV_CONVNEXT_EMBEDDING_LENGTH,
-    LLM_KV_CONVNEXT_BLOCK_COUNT,
-
-    // deprecated:
-    LLM_KV_TOKENIZER_PREFIX_ID,
-    LLM_KV_TOKENIZER_SUFFIX_ID,
-    LLM_KV_TOKENIZER_MIDDLE_ID,
-};
-
-static const std::map LLM_KV_NAMES = {
-    { LLM_KV_GENERAL_TYPE,                  "general.type"                          },
-    { LLM_KV_GENERAL_ARCHITECTURE,          "general.architecture"                  },
-    { LLM_KV_GENERAL_QUANTIZATION_VERSION,  "general.quantization_version"          },
-    { LLM_KV_GENERAL_ALIGNMENT,             "general.alignment"                     },
-    { LLM_KV_GENERAL_NAME,                  "general.name"                          },
-    { LLM_KV_GENERAL_AUTHOR,                "general.author"                        },
-    { LLM_KV_GENERAL_VERSION,               "general.version"                       },
-    { LLM_KV_GENERAL_URL,                   "general.url"                           },
-    { LLM_KV_GENERAL_DESCRIPTION,           "general.description"                   },
-    { LLM_KV_GENERAL_LICENSE,               "general.license"                       },
-    { LLM_KV_GENERAL_SOURCE_URL,            "general.source.url"                    },
-    { LLM_KV_GENERAL_SOURCE_HF_REPO,        "general.source.huggingface.repository" },
-
-    { LLM_KV_VOCAB_SIZE,                        "%s.vocab_size"                        },
-    { LLM_KV_CONTEXT_LENGTH,                    "%s.context_length"                    },
-    { LLM_KV_EMBEDDING_LENGTH,                  "%s.embedding_length"                  },
-    { LLM_KV_FEATURES_LENGTH,                   "%s.features_length"                   },
-    { LLM_KV_BLOCK_COUNT,                       "%s.block_count"                       },
-    { LLM_KV_LEADING_DENSE_BLOCK_COUNT,         "%s.leading_dense_block_count"         },
-    { LLM_KV_FEED_FORWARD_LENGTH,               "%s.feed_forward_length"               },
-    { LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        "%s.expert_feed_forward_length"        },
-    { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
-    { LLM_KV_USE_PARALLEL_RESIDUAL,             "%s.use_parallel_residual"             },
-    { LLM_KV_TENSOR_DATA_LAYOUT,                "%s.tensor_data_layout"                },
-    { LLM_KV_EXPERT_COUNT,                      "%s.expert_count"                      },
-    { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
-    { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
-    { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
-    { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
-    { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
-    { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
-    { LLM_KV_ATTN_LOGIT_SOFTCAPPING,            "%s.attn_logit_softcapping"            },
-    { LLM_KV_FINAL_LOGIT_SOFTCAPPING,           "%s.final_logit_softcapping"           },
-    { LLM_KV_SWIN_NORM,                         "%s.swin_norm"                         },
-    { LLM_KV_RESCALE_EVERY_N_LAYERS,            "%s.rescale_every_n_layers"            },
-    { LLM_KV_TIME_MIX_EXTRA_DIM,                "%s.time_mix_extra_dim"                },
-    { LLM_KV_TIME_DECAY_EXTRA_DIM,              "%s.time_decay_extra_dim"              },
-    { LLM_KV_RESIDUAL_SCALE,                    "%s.residual_scale"                    },
-    { LLM_KV_EMBEDDING_SCALE,                   "%s.embedding_scale"                   },
-
-    { LLM_KV_ATTENTION_HEAD_COUNT,             "%s.attention.head_count"             },
-    { LLM_KV_ATTENTION_HEAD_COUNT_KV,          "%s.attention.head_count_kv"          },
-    { LLM_KV_ATTENTION_MAX_ALIBI_BIAS,         "%s.attention.max_alibi_bias"         },
-    { LLM_KV_ATTENTION_CLAMP_KQV,              "%s.attention.clamp_kqv"              },
-    { LLM_KV_ATTENTION_KEY_LENGTH,             "%s.attention.key_length"             },
-    { LLM_KV_ATTENTION_VALUE_LENGTH,           "%s.attention.value_length"           },
-    { LLM_KV_ATTENTION_LAYERNORM_EPS,          "%s.attention.layer_norm_epsilon"     },
-    { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,      "%s.attention.layer_norm_rms_epsilon" },
-    { LLM_KV_ATTENTION_GROUPNORM_EPS,          "%s.attention.group_norm_epsilon"     },
-    { LLM_KV_ATTENTION_GROUPNORM_GROUPS,       "%s.attention.group_norm_groups"      },
-    { LLM_KV_ATTENTION_CAUSAL,                 "%s.attention.causal"                 },
-    { LLM_KV_ATTENTION_Q_LORA_RANK,            "%s.attention.q_lora_rank"            },
-    { LLM_KV_ATTENTION_KV_LORA_RANK,           "%s.attention.kv_lora_rank"           },
-    { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, "%s.attention.relative_buckets_count" },
-    { LLM_KV_ATTENTION_SLIDING_WINDOW,         "%s.attention.sliding_window"         },
-    { LLM_KV_ATTENTION_SCALE,                  "%s.attention.scale"                  },
-
-    { LLM_KV_ROPE_DIMENSION_COUNT,             "%s.rope.dimension_count"                 },
-    { LLM_KV_ROPE_DIMENSION_SECTIONS,          "%s.rope.dimension_sections"              },
-    { LLM_KV_ROPE_FREQ_BASE,                   "%s.rope.freq_base"                       },
-    { LLM_KV_ROPE_SCALE_LINEAR,                "%s.rope.scale_linear"                    },
-    { LLM_KV_ROPE_SCALING_TYPE,                "%s.rope.scaling.type"                    },
-    { LLM_KV_ROPE_SCALING_FACTOR,              "%s.rope.scaling.factor"                  },
-    { LLM_KV_ROPE_SCALING_ATTN_FACTOR,         "%s.rope.scaling.attn_factor"             },
-    { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,        "%s.rope.scaling.original_context_length" },
-    { LLM_KV_ROPE_SCALING_FINETUNED,           "%s.rope.scaling.finetuned"               },
-    { LLM_KV_ROPE_SCALING_YARN_LOG_MUL,        "%s.rope.scaling.yarn_log_multiplier"     },
-
-    { LLM_KV_SPLIT_NO,                         "split.no"            },
-    { LLM_KV_SPLIT_COUNT,                      "split.count"         },
-    { LLM_KV_SPLIT_TENSORS_COUNT,              "split.tensors.count" },
-
-    { LLM_KV_SSM_CONV_KERNEL,                  "%s.ssm.conv_kernel"    },
-    { LLM_KV_SSM_INNER_SIZE,                   "%s.ssm.inner_size"     },
-    { LLM_KV_SSM_STATE_SIZE,                   "%s.ssm.state_size"     },
-    { LLM_KV_SSM_TIME_STEP_RANK,               "%s.ssm.time_step_rank" },
-    { LLM_KV_SSM_DT_B_C_RMS,                   "%s.ssm.dt_b_c_rms"     },
-
-    { LLM_KV_WKV_HEAD_SIZE,                    "%s.wkv.head_size" },
-
-    { LLM_KV_POSNET_EMBEDDING_LENGTH,          "%s.posnet.embedding_length" },
-    { LLM_KV_POSNET_BLOCK_COUNT,               "%s.posnet.block_count"      },
-
-    { LLM_KV_CONVNEXT_EMBEDDING_LENGTH,        "%s.convnext.embedding_length" },
-    { LLM_KV_CONVNEXT_BLOCK_COUNT,             "%s.convnext.block_count"      },
-
-    { LLM_KV_TOKENIZER_MODEL,                  "tokenizer.ggml.model"                    },
-    { LLM_KV_TOKENIZER_PRE,                    "tokenizer.ggml.pre"                      },
-    { LLM_KV_TOKENIZER_LIST,                   "tokenizer.ggml.tokens"                   },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE,             "tokenizer.ggml.token_type"               },
-    { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,       "tokenizer.ggml.token_type_count"         },
-    { LLM_KV_TOKENIZER_SCORES,                 "tokenizer.ggml.scores"                   },
-    { LLM_KV_TOKENIZER_MERGES,                 "tokenizer.ggml.merges"                   },
-    { LLM_KV_TOKENIZER_BOS_ID,                 "tokenizer.ggml.bos_token_id"             },
-    { LLM_KV_TOKENIZER_EOS_ID,                 "tokenizer.ggml.eos_token_id"             },
-    { LLM_KV_TOKENIZER_EOT_ID,                 "tokenizer.ggml.eot_token_id"             },
-    { LLM_KV_TOKENIZER_EOM_ID,                 "tokenizer.ggml.eom_token_id"             },
-    { LLM_KV_TOKENIZER_UNK_ID,                 "tokenizer.ggml.unknown_token_id"         },
-    { LLM_KV_TOKENIZER_SEP_ID,                 "tokenizer.ggml.seperator_token_id"       },
-    { LLM_KV_TOKENIZER_PAD_ID,                 "tokenizer.ggml.padding_token_id"         },
-    { LLM_KV_TOKENIZER_CLS_ID,                 "tokenizer.ggml.cls_token_id"             },
-    { LLM_KV_TOKENIZER_MASK_ID,                "tokenizer.ggml.mask_token_id"            },
-    { LLM_KV_TOKENIZER_ADD_BOS,                "tokenizer.ggml.add_bos_token"            },
-    { LLM_KV_TOKENIZER_ADD_EOS,                "tokenizer.ggml.add_eos_token"            },
-    { LLM_KV_TOKENIZER_ADD_PREFIX,             "tokenizer.ggml.add_space_prefix"         },
-    { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,        "tokenizer.ggml.remove_extra_whitespaces" },
-    { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP,   "tokenizer.ggml.precompiled_charsmap"     },
-    { LLM_KV_TOKENIZER_HF_JSON,                "tokenizer.huggingface.json"              },
-    { LLM_KV_TOKENIZER_RWKV,                   "tokenizer.rwkv.world"                    },
-    { LLM_KV_TOKENIZER_FIM_PRE_ID,             "tokenizer.ggml.fim_pre_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SUF_ID,             "tokenizer.ggml.fim_suf_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_MID_ID,             "tokenizer.ggml.fim_mid_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_PAD_ID,             "tokenizer.ggml.fim_pad_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_REP_ID,             "tokenizer.ggml.fim_rep_token_id"         },
-    { LLM_KV_TOKENIZER_FIM_SEP_ID,             "tokenizer.ggml.fim_sep_token_id"         },
-
-    { LLM_KV_ADAPTER_TYPE,                     "adapter.type"       },
-    { LLM_KV_ADAPTER_LORA_ALPHA,               "adapter.lora.alpha" },
-
-    // deprecated
-    { LLM_KV_TOKENIZER_PREFIX_ID,              "tokenizer.ggml.prefix_token_id" },
-    { LLM_KV_TOKENIZER_SUFFIX_ID,              "tokenizer.ggml.suffix_token_id" },
-    { LLM_KV_TOKENIZER_MIDDLE_ID,              "tokenizer.ggml.middle_token_id" },
-};
-
-struct LLM_KV {
-    LLM_KV(llm_arch arch) : arch(arch) {}
-
-    llm_arch arch;
-
-    std::string operator()(llm_kv kv) const {
-        return ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));
-    }
-};
-
-enum llm_tensor {
-    LLM_TENSOR_TOKEN_EMBD,
-    LLM_TENSOR_TOKEN_EMBD_NORM,
-    LLM_TENSOR_TOKEN_TYPES,
-    LLM_TENSOR_POS_EMBD,
-    LLM_TENSOR_OUTPUT,
-    LLM_TENSOR_OUTPUT_NORM,
-    LLM_TENSOR_ROPE_FREQS,
-    LLM_TENSOR_ROPE_FACTORS_LONG,
-    LLM_TENSOR_ROPE_FACTORS_SHORT,
-    LLM_TENSOR_ATTN_Q,
-    LLM_TENSOR_ATTN_K,
-    LLM_TENSOR_ATTN_V,
-    LLM_TENSOR_ATTN_QKV,
-    LLM_TENSOR_ATTN_OUT,
-    LLM_TENSOR_ATTN_NORM,
-    LLM_TENSOR_ATTN_NORM_2,
-    LLM_TENSOR_ATTN_OUT_NORM,
-    LLM_TENSOR_ATTN_POST_NORM,
-    LLM_TENSOR_ATTN_ROT_EMBD,
-    LLM_TENSOR_FFN_GATE_INP,
-    LLM_TENSOR_FFN_GATE_INP_SHEXP,
-    LLM_TENSOR_FFN_NORM,
-    LLM_TENSOR_FFN_POST_NORM,
-    LLM_TENSOR_FFN_GATE,
-    LLM_TENSOR_FFN_DOWN,
-    LLM_TENSOR_FFN_UP,
-    LLM_TENSOR_FFN_ACT,
-    LLM_TENSOR_FFN_DOWN_EXP,  // split experts for backward compatibility
-    LLM_TENSOR_FFN_GATE_EXP,
-    LLM_TENSOR_FFN_UP_EXP,
-    LLM_TENSOR_FFN_NORM_EXPS,
-    LLM_TENSOR_FFN_DOWN_EXPS, // merged experts
-    LLM_TENSOR_FFN_GATE_EXPS,
-    LLM_TENSOR_FFN_UP_EXPS,
-    LLM_TENSOR_FFN_DOWN_SHEXP,
-    LLM_TENSOR_FFN_GATE_SHEXP,
-    LLM_TENSOR_FFN_UP_SHEXP,
-    LLM_TENSOR_ATTN_Q_NORM,
-    LLM_TENSOR_ATTN_K_NORM,
-    LLM_TENSOR_LAYER_OUT_NORM,
-    LLM_TENSOR_SSM_IN,
-    LLM_TENSOR_SSM_CONV1D,
-    LLM_TENSOR_SSM_X,
-    LLM_TENSOR_SSM_DT,
-    LLM_TENSOR_SSM_A,
-    LLM_TENSOR_SSM_D,
-    LLM_TENSOR_SSM_OUT,
-    LLM_TENSOR_TIME_MIX_W1,
-    LLM_TENSOR_TIME_MIX_W2,
-    LLM_TENSOR_TIME_MIX_LERP_X,
-    LLM_TENSOR_TIME_MIX_LERP_W,
-    LLM_TENSOR_TIME_MIX_LERP_K,
-    LLM_TENSOR_TIME_MIX_LERP_V,
-    LLM_TENSOR_TIME_MIX_LERP_R,
-    LLM_TENSOR_TIME_MIX_LERP_G,
-    LLM_TENSOR_TIME_MIX_FIRST,
-    LLM_TENSOR_TIME_MIX_DECAY,
-    LLM_TENSOR_TIME_MIX_DECAY_W1,
-    LLM_TENSOR_TIME_MIX_DECAY_W2,
-    LLM_TENSOR_TIME_MIX_KEY,
-    LLM_TENSOR_TIME_MIX_VALUE,
-    LLM_TENSOR_TIME_MIX_RECEPTANCE,
-    LLM_TENSOR_TIME_MIX_GATE,
-    LLM_TENSOR_TIME_MIX_LN,
-    LLM_TENSOR_TIME_MIX_OUTPUT,
-    LLM_TENSOR_CHANNEL_MIX_LERP_K,
-    LLM_TENSOR_CHANNEL_MIX_LERP_R,
-    LLM_TENSOR_CHANNEL_MIX_KEY,
-    LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
-    LLM_TENSOR_CHANNEL_MIX_VALUE,
-    LLM_TENSOR_ATTN_Q_A,
-    LLM_TENSOR_ATTN_Q_B,
-    LLM_TENSOR_ATTN_KV_A_MQA,
-    LLM_TENSOR_ATTN_KV_B,
-    LLM_TENSOR_ATTN_Q_A_NORM,
-    LLM_TENSOR_ATTN_KV_A_NORM,
-    LLM_TENSOR_ATTN_SUB_NORM,
-    LLM_TENSOR_FFN_SUB_NORM,
-    LLM_TENSOR_DEC_ATTN_NORM,
-    LLM_TENSOR_DEC_ATTN_Q,
-    LLM_TENSOR_DEC_ATTN_K,
-    LLM_TENSOR_DEC_ATTN_V,
-    LLM_TENSOR_DEC_ATTN_OUT,
-    LLM_TENSOR_DEC_ATTN_REL_B,
-    LLM_TENSOR_DEC_CROSS_ATTN_NORM,
-    LLM_TENSOR_DEC_CROSS_ATTN_Q,
-    LLM_TENSOR_DEC_CROSS_ATTN_K,
-    LLM_TENSOR_DEC_CROSS_ATTN_V,
-    LLM_TENSOR_DEC_CROSS_ATTN_OUT,
-    LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
-    LLM_TENSOR_DEC_FFN_NORM,
-    LLM_TENSOR_DEC_FFN_GATE,
-    LLM_TENSOR_DEC_FFN_DOWN,
-    LLM_TENSOR_DEC_FFN_UP,
-    LLM_TENSOR_DEC_OUTPUT_NORM,
-    LLM_TENSOR_ENC_ATTN_NORM,
-    LLM_TENSOR_ENC_ATTN_Q,
-    LLM_TENSOR_ENC_ATTN_K,
-    LLM_TENSOR_ENC_ATTN_V,
-    LLM_TENSOR_ENC_ATTN_OUT,
-    LLM_TENSOR_ENC_ATTN_REL_B,
-    LLM_TENSOR_ENC_FFN_NORM,
-    LLM_TENSOR_ENC_FFN_GATE,
-    LLM_TENSOR_ENC_FFN_DOWN,
-    LLM_TENSOR_ENC_FFN_UP,
-    LLM_TENSOR_ENC_OUTPUT_NORM,
-    LLM_TENSOR_CLS,
-    LLM_TENSOR_CLS_OUT,
-    LLM_TENSOR_CONV1D,
-    LLM_TENSOR_CONVNEXT_DW,
-    LLM_TENSOR_CONVNEXT_NORM,
-    LLM_TENSOR_CONVNEXT_PW1,
-    LLM_TENSOR_CONVNEXT_PW2,
-    LLM_TENSOR_CONVNEXT_GAMMA,
-    LLM_TENSOR_POS_NET_CONV1,
-    LLM_TENSOR_POS_NET_CONV2,
-    LLM_TENSOR_POS_NET_NORM,
-    LLM_TENSOR_POS_NET_NORM1,
-    LLM_TENSOR_POS_NET_NORM2,
-    LLM_TENSOR_POS_NET_ATTN_NORM,
-    LLM_TENSOR_POS_NET_ATTN_Q,
-    LLM_TENSOR_POS_NET_ATTN_K,
-    LLM_TENSOR_POS_NET_ATTN_V,
-    LLM_TENSOR_POS_NET_ATTN_OUT,
-};
-
-static const std::map> LLM_TENSOR_NAMES = {
-    {
-        LLM_ARCH_LLAMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_DECI,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_BAICHUAN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_FALCON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GROK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-        },
-    },
-    {
-        LLM_ARCH_GPT2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GPTJ,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-    {
-        LLM_ARCH_GPTNEOX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MPT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output"},
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_ACT,         "blk.%d.ffn.act" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm"},
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm"},
-        },
-    },
-    {
-        LLM_ARCH_STARCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_REFACT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_POS_EMBD,        "position_embd" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-            { LLM_TENSOR_CLS_OUT,         "cls.output" },
-        },
-    },
-    {
-        LLM_ARCH_NOMIC_BERT,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_JINA_BERT_V2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_TOKEN_TYPES,     "token_types" },
-            { LLM_TENSOR_ATTN_NORM_2,     "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_LAYER_OUT_NORM,  "blk.%d.layer_output_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_CLS,             "cls" },
-        },
-    },
-    {
-        LLM_ARCH_BLOOM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_STABLELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2VL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_QWEN2MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_PHI2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PHI3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,           "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_PLAMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_CODESHELL,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ORION,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_INTERNLM2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXP,    "blk.%d.ffn_gate.%d" },
-            { LLM_TENSOR_FFN_DOWN_EXP,    "blk.%d.ffn_down.%d" },
-            { LLM_TENSOR_FFN_UP_EXP,      "blk.%d.ffn_up.%d" },
-        },
-    },
-    {
-        LLM_ARCH_MINICPM3,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FACTORS_LONG,  "rope_factors_long" },
-            { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GEMMA2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-        },
-    },
-    {
-        LLM_ARCH_STARCODER2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_MAMBA,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_SSM_IN,          "blk.%d.ssm_in" },
-            { LLM_TENSOR_SSM_CONV1D,      "blk.%d.ssm_conv1d" },
-            { LLM_TENSOR_SSM_X,           "blk.%d.ssm_x" },
-            { LLM_TENSOR_SSM_DT,          "blk.%d.ssm_dt" },
-            { LLM_TENSOR_SSM_A,           "blk.%d.ssm_a" },
-            { LLM_TENSOR_SSM_D,           "blk.%d.ssm_d" },
-            { LLM_TENSOR_SSM_OUT,         "blk.%d.ssm_out" },
-        },
-    },
-    {
-        LLM_ARCH_XVERSE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_COMMAND_R,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_DBRX,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_OUT_NORM,   "blk.%d.attn_output_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMO2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_POST_NORM,  "blk.%d.post_attention_norm" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_POST_NORM,   "blk.%d.post_ffw_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_OLMOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_Q_NORM,        "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,        "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_OPENELM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_ARCTIC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM_EXPS,   "blk.%d.ffn_norm_exps" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ROPE_FREQS,         "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,      "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_DEEPSEEK2,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_OUTPUT,             "output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q_A_NORM,      "blk.%d.attn_q_a_norm" },
-            { LLM_TENSOR_ATTN_KV_A_NORM,     "blk.%d.attn_kv_a_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_Q_A,           "blk.%d.attn_q_a" },
-            { LLM_TENSOR_ATTN_Q_B,           "blk.%d.attn_q_b" },
-            { LLM_TENSOR_ATTN_KV_A_MQA,      "blk.%d.attn_kv_a_mqa" },
-            { LLM_TENSOR_ATTN_KV_B,          "blk.%d.attn_kv_b" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_GATE_INP,       "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,      "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,      "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,        "blk.%d.ffn_up_exps" },
-            { LLM_TENSOR_FFN_GATE_INP_SHEXP, "blk.%d.ffn_gate_inp_shexp" },
-            { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
-            { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
-            { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
-        },
-    },
-    {
-        LLM_ARCH_CHATGLM,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_BITNET,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,         "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,        "output_norm" },
-            { LLM_TENSOR_ATTN_Q,             "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,             "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,             "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,           "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_NORM,          "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_SUB_NORM,      "blk.%d.attn_sub_norm" },
-            { LLM_TENSOR_FFN_GATE,           "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,           "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,             "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_NORM,           "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_SUB_NORM,       "blk.%d.ffn_sub_norm" },
-        },
-    },
-    {
-        LLM_ARCH_T5,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_DEC_OUTPUT_NORM,      "dec.output_norm" },
-            { LLM_TENSOR_DEC_ATTN_NORM,        "dec.blk.%d.attn_norm" },
-            { LLM_TENSOR_DEC_ATTN_Q,           "dec.blk.%d.attn_q" },
-            { LLM_TENSOR_DEC_ATTN_K,           "dec.blk.%d.attn_k" },
-            { LLM_TENSOR_DEC_ATTN_V,           "dec.blk.%d.attn_v" },
-            { LLM_TENSOR_DEC_ATTN_OUT,         "dec.blk.%d.attn_o" },
-            { LLM_TENSOR_DEC_ATTN_REL_B,       "dec.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "dec.blk.%d.cross_attn_norm" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_Q,     "dec.blk.%d.cross_attn_q" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_K,     "dec.blk.%d.cross_attn_k" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_V,     "dec.blk.%d.cross_attn_v" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_OUT,   "dec.blk.%d.cross_attn_o" },
-            { LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "dec.blk.%d.cross_attn_rel_b" },
-            { LLM_TENSOR_DEC_FFN_NORM,         "dec.blk.%d.ffn_norm" },
-            { LLM_TENSOR_DEC_FFN_GATE,         "dec.blk.%d.ffn_gate" },
-            { LLM_TENSOR_DEC_FFN_DOWN,         "dec.blk.%d.ffn_down" },
-            { LLM_TENSOR_DEC_FFN_UP,           "dec.blk.%d.ffn_up" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_T5ENCODER,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,           "token_embd" },
-            { LLM_TENSOR_OUTPUT,               "output" },
-            { LLM_TENSOR_ENC_OUTPUT_NORM,      "enc.output_norm" },
-            { LLM_TENSOR_ENC_ATTN_NORM,        "enc.blk.%d.attn_norm" },
-            { LLM_TENSOR_ENC_ATTN_Q,           "enc.blk.%d.attn_q" },
-            { LLM_TENSOR_ENC_ATTN_K,           "enc.blk.%d.attn_k" },
-            { LLM_TENSOR_ENC_ATTN_V,           "enc.blk.%d.attn_v" },
-            { LLM_TENSOR_ENC_ATTN_OUT,         "enc.blk.%d.attn_o" },
-            { LLM_TENSOR_ENC_ATTN_REL_B,       "enc.blk.%d.attn_rel_b" },
-            { LLM_TENSOR_ENC_FFN_NORM,         "enc.blk.%d.ffn_norm" },
-            { LLM_TENSOR_ENC_FFN_GATE,         "enc.blk.%d.ffn_gate" },
-            { LLM_TENSOR_ENC_FFN_DOWN,         "enc.blk.%d.ffn_down" },
-            { LLM_TENSOR_ENC_FFN_UP,           "enc.blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_JAIS,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_QKV,        "blk.%d.attn_qkv" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-        },
-    },
-    {
-        LLM_ARCH_NEMOTRON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_EXAONE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ROPE_FREQS,      "rope_freqs" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_ATTN_ROT_EMBD,   "blk.%d.attn_rot_embd" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_RWKV6,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,                "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,           "token_embd_norm" },
-            { LLM_TENSOR_OUTPUT_NORM,               "output_norm" },
-            { LLM_TENSOR_OUTPUT,                    "output" },
-            { LLM_TENSOR_ATTN_NORM,                 "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_NORM_2,               "blk.%d.attn_norm_2" },
-            { LLM_TENSOR_TIME_MIX_W1,               "blk.%d.time_mix_w1" },
-            { LLM_TENSOR_TIME_MIX_W2,               "blk.%d.time_mix_w2" },
-            { LLM_TENSOR_TIME_MIX_LERP_X,           "blk.%d.time_mix_lerp_x" },
-            { LLM_TENSOR_TIME_MIX_LERP_W,           "blk.%d.time_mix_lerp_w" },
-            { LLM_TENSOR_TIME_MIX_LERP_K,           "blk.%d.time_mix_lerp_k" },
-            { LLM_TENSOR_TIME_MIX_LERP_V,           "blk.%d.time_mix_lerp_v" },
-            { LLM_TENSOR_TIME_MIX_LERP_R,           "blk.%d.time_mix_lerp_r" },
-            { LLM_TENSOR_TIME_MIX_LERP_G,           "blk.%d.time_mix_lerp_g" },
-            { LLM_TENSOR_TIME_MIX_FIRST,            "blk.%d.time_mix_first" },
-            { LLM_TENSOR_TIME_MIX_DECAY,            "blk.%d.time_mix_decay" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W1,         "blk.%d.time_mix_decay_w1" },
-            { LLM_TENSOR_TIME_MIX_DECAY_W2,         "blk.%d.time_mix_decay_w2" },
-            { LLM_TENSOR_TIME_MIX_KEY,              "blk.%d.time_mix_key" },
-            { LLM_TENSOR_TIME_MIX_VALUE,            "blk.%d.time_mix_value" },
-            { LLM_TENSOR_TIME_MIX_RECEPTANCE,       "blk.%d.time_mix_receptance" },
-            { LLM_TENSOR_TIME_MIX_GATE,             "blk.%d.time_mix_gate" },
-            { LLM_TENSOR_TIME_MIX_LN,               "blk.%d.time_mix_ln" },
-            { LLM_TENSOR_TIME_MIX_OUTPUT,           "blk.%d.time_mix_output" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_K,        "blk.%d.channel_mix_lerp_k" },
-            { LLM_TENSOR_CHANNEL_MIX_LERP_R,        "blk.%d.channel_mix_lerp_r" },
-            { LLM_TENSOR_CHANNEL_MIX_KEY,           "blk.%d.channel_mix_key" },
-            { LLM_TENSOR_CHANNEL_MIX_VALUE,         "blk.%d.channel_mix_value" },
-            { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,    "blk.%d.channel_mix_receptance" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-        },
-    },
-    {
-        LLM_ARCH_GRANITE_MOE,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE_INP,    "blk.%d.ffn_gate_inp" },
-            { LLM_TENSOR_FFN_GATE_EXPS,   "blk.%d.ffn_gate_exps" },
-            { LLM_TENSOR_FFN_DOWN_EXPS,   "blk.%d.ffn_down_exps" },
-            { LLM_TENSOR_FFN_UP_EXPS,     "blk.%d.ffn_up_exps" },
-        },
-    },
-    {
-        LLM_ARCH_CHAMELEON,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
-            { LLM_TENSOR_OUTPUT,          "output" },
-            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
-            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
-            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
-            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
-            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
-            { LLM_TENSOR_FFN_NORM,        "blk.%d.ffn_norm" },
-            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
-            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
-            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
-            { LLM_TENSOR_ATTN_Q_NORM,     "blk.%d.attn_q_norm" },
-            { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
-        },
-    },
-    {
-        LLM_ARCH_WAVTOKENIZER_DEC,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,        "token_embd" },
-            { LLM_TENSOR_TOKEN_EMBD_NORM,   "token_embd_norm" },
-            { LLM_TENSOR_CONV1D,            "conv1d" },
-            { LLM_TENSOR_CONVNEXT_DW,       "convnext.%d.dw" },
-            { LLM_TENSOR_CONVNEXT_NORM,     "convnext.%d.norm" },
-            { LLM_TENSOR_CONVNEXT_PW1,      "convnext.%d.pw1" },
-            { LLM_TENSOR_CONVNEXT_PW2,      "convnext.%d.pw2" },
-            { LLM_TENSOR_CONVNEXT_GAMMA,    "convnext.%d.gamma" },
-            { LLM_TENSOR_OUTPUT_NORM,       "output_norm" },
-            { LLM_TENSOR_OUTPUT,            "output" },
-            { LLM_TENSOR_POS_NET_CONV1,     "posnet.%d.conv1" },
-            { LLM_TENSOR_POS_NET_CONV2,     "posnet.%d.conv2" },
-            { LLM_TENSOR_POS_NET_NORM,      "posnet.%d.norm" },
-            { LLM_TENSOR_POS_NET_NORM1,     "posnet.%d.norm1" },
-            { LLM_TENSOR_POS_NET_NORM2,     "posnet.%d.norm2" },
-            { LLM_TENSOR_POS_NET_ATTN_NORM, "posnet.%d.attn_norm" },
-            { LLM_TENSOR_POS_NET_ATTN_Q,    "posnet.%d.attn_q" },
-            { LLM_TENSOR_POS_NET_ATTN_K,    "posnet.%d.attn_k" },
-            { LLM_TENSOR_POS_NET_ATTN_V,    "posnet.%d.attn_v" },
-            { LLM_TENSOR_POS_NET_ATTN_OUT,  "posnet.%d.attn_output" },
-        },
-    },
-    {
-        LLM_ARCH_UNKNOWN,
-        {
-            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
-        },
-    },
-};
-
-enum llm_chat_template {
-    LLM_CHAT_TEMPLATE_CHATML,
-    LLM_CHAT_TEMPLATE_LLAMA_2,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
-    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
-    LLM_CHAT_TEMPLATE_MISTRAL_V1,
-    LLM_CHAT_TEMPLATE_MISTRAL_V3,
-    LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
-    LLM_CHAT_TEMPLATE_MISTRAL_V7,
-    LLM_CHAT_TEMPLATE_PHI_3,
-    LLM_CHAT_TEMPLATE_FALCON_3,
-    LLM_CHAT_TEMPLATE_ZEPHYR,
-    LLM_CHAT_TEMPLATE_MONARCH,
-    LLM_CHAT_TEMPLATE_GEMMA,
-    LLM_CHAT_TEMPLATE_ORION,
-    LLM_CHAT_TEMPLATE_OPENCHAT,
-    LLM_CHAT_TEMPLATE_VICUNA,
-    LLM_CHAT_TEMPLATE_VICUNA_ORCA,
-    LLM_CHAT_TEMPLATE_DEEPSEEK,
-    LLM_CHAT_TEMPLATE_DEEPSEEK_2,
-    LLM_CHAT_TEMPLATE_COMMAND_R,
-    LLM_CHAT_TEMPLATE_LLAMA_3,
-    LLM_CHAT_TEMPLATE_CHATGML_3,
-    LLM_CHAT_TEMPLATE_CHATGML_4,
-    LLM_CHAT_TEMPLATE_MINICPM,
-    LLM_CHAT_TEMPLATE_EXAONE_3,
-    LLM_CHAT_TEMPLATE_RWKV_WORLD,
-    LLM_CHAT_TEMPLATE_GRANITE,
-    LLM_CHAT_TEMPLATE_GIGACHAT,
-    LLM_CHAT_TEMPLATE_MEGREZ,
-    LLM_CHAT_TEMPLATE_UNKNOWN,
-};
-
-static const std::map LLM_CHAT_TEMPLATES = {
-    { "chatml",            LLM_CHAT_TEMPLATE_CHATML            },
-    { "llama2",            LLM_CHAT_TEMPLATE_LLAMA_2           },
-    { "llama2-sys",        LLM_CHAT_TEMPLATE_LLAMA_2_SYS       },
-    { "llama2-sys-bos",    LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS   },
-    { "llama2-sys-strip",  LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP },
-    { "mistral-v1",        LLM_CHAT_TEMPLATE_MISTRAL_V1        },
-    { "mistral-v3",        LLM_CHAT_TEMPLATE_MISTRAL_V3        },
-    { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN },
-    { "mistral-v7",        LLM_CHAT_TEMPLATE_MISTRAL_V7        },
-    { "phi3",              LLM_CHAT_TEMPLATE_PHI_3             },
-    { "falcon3",           LLM_CHAT_TEMPLATE_FALCON_3          },
-    { "zephyr",            LLM_CHAT_TEMPLATE_ZEPHYR            },
-    { "monarch",           LLM_CHAT_TEMPLATE_MONARCH           },
-    { "gemma",             LLM_CHAT_TEMPLATE_GEMMA             },
-    { "orion",             LLM_CHAT_TEMPLATE_ORION             },
-    { "openchat",          LLM_CHAT_TEMPLATE_OPENCHAT          },
-    { "vicuna",            LLM_CHAT_TEMPLATE_VICUNA            },
-    { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
-    { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
-    { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
-    { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
-    { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
-    { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGML_3         },
-    { "chatglm4",          LLM_CHAT_TEMPLATE_CHATGML_4         },
-    { "minicpm",           LLM_CHAT_TEMPLATE_MINICPM           },
-    { "exaone3",           LLM_CHAT_TEMPLATE_EXAONE_3          },
-    { "rwkv-world",        LLM_CHAT_TEMPLATE_RWKV_WORLD        },
-    { "granite",           LLM_CHAT_TEMPLATE_GRANITE           },
-    { "gigachat",          LLM_CHAT_TEMPLATE_GIGACHAT          },
-    { "megrez",            LLM_CHAT_TEMPLATE_MEGREZ            },
-};
-
-static llm_arch llm_arch_from_string(const std::string & name) {
-    for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
-        if (kv.second == name) {
-            return kv.first;
-        }
-    }
-
-    return LLM_ARCH_UNKNOWN;
-}
-
-// helper to handle gguf constants
-// usage:
-//
-//   const auto tn = LLM_TN(LLM_ARCH_LLAMA);
-//
-//   std::string name = tn(LLM_TENSOR_OUTPUT);                     -> "output"
-//   std::string name = tn(LLM_TENSOR_TOKEN_EMBD, "bias");         -> "token_embd.bias"
-//   std::string name = tn(LLM_TENSOR_ATTN_NORM, "weight", 3);     -> "blk.3.attn_norm.weight"
-//
-struct LLM_TN_IMPL {
-    const llm_arch arch;
-    const llm_tensor tensor;
-    const char * const suffix;
-    const int bid;
-    const int xid;
-
-    std::string str() const {
-        if (LLM_TENSOR_NAMES.at(arch).find(tensor) == LLM_TENSOR_NAMES.at(arch).end()) {
-            return "__missing__";
-        }
-
-        std::string name = ::format(LLM_TENSOR_NAMES.at(arch).at(tensor), bid, xid);
-
-        if (suffix != nullptr) {
-            name += ".";
-            name += suffix;
-        }
-
-        return name;
-    }
-
-    operator std::string() const {
-        return str();
-    }
-
-    friend bool operator==(const std::string & str, const LLM_TN_IMPL & tn) {
-        return str == tn.str();
-    }
-
-    friend bool operator!=(const std::string & str, const LLM_TN_IMPL & tn) {
-        return str != tn.str();
-    }
-};
-
-struct LLM_TN {
-    LLM_TN(llm_arch arch) : arch(arch) {}
-
-    llm_arch arch;
-
-    LLM_TN_IMPL operator()(llm_tensor tensor, const char * suffix, int bid = -1, int xid = -1) const {
-        return { arch, tensor, suffix, bid, xid };
-    }
-
-    LLM_TN_IMPL operator()(llm_tensor tensor, int bid = -1, int xid = -1) const {
-        return { arch, tensor, nullptr, bid, xid };
-    }
-};
-
-//
-// gguf helpers
-//
-
-static const std::map LLAMA_ROPE_SCALING_TYPES = {
-    { LLAMA_ROPE_SCALING_TYPE_NONE,       "none"       },
-    { LLAMA_ROPE_SCALING_TYPE_LINEAR,     "linear"     },
-    { LLAMA_ROPE_SCALING_TYPE_YARN,       "yarn"       },
-    { LLAMA_ROPE_SCALING_TYPE_LONGROPE,   "longrope"   },
-};
-
-static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::string & name) {
-    for (const auto & kv : LLAMA_ROPE_SCALING_TYPES) {
-        if (kv.second == name) {
-            return (llama_rope_scaling_type) kv.first;
-        }
-    }
-
-    return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
-}
-
-static std::string gguf_data_to_str(enum gguf_type type, const void * data, int i) {
-    switch (type) {
-        case GGUF_TYPE_UINT8:   return std::to_string(((const uint8_t  *)data)[i]);
-        case GGUF_TYPE_INT8:    return std::to_string(((const int8_t   *)data)[i]);
-        case GGUF_TYPE_UINT16:  return std::to_string(((const uint16_t *)data)[i]);
-        case GGUF_TYPE_INT16:   return std::to_string(((const int16_t  *)data)[i]);
-        case GGUF_TYPE_UINT32:  return std::to_string(((const uint32_t *)data)[i]);
-        case GGUF_TYPE_INT32:   return std::to_string(((const int32_t  *)data)[i]);
-        case GGUF_TYPE_UINT64:  return std::to_string(((const uint64_t *)data)[i]);
-        case GGUF_TYPE_INT64:   return std::to_string(((const int64_t  *)data)[i]);
-        case GGUF_TYPE_FLOAT32: return std::to_string(((const float    *)data)[i]);
-        case GGUF_TYPE_FLOAT64: return std::to_string(((const double   *)data)[i]);
-        case GGUF_TYPE_BOOL:    return ((const bool *)data)[i] ? "true" : "false";
-        default:                return format("unknown type %d", type);
-    }
-}
-
-static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
-    const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i);
-
-    switch (type) {
-        case GGUF_TYPE_STRING:
-            return gguf_get_val_str(ctx_gguf, i);
-        case GGUF_TYPE_ARRAY:
-            {
-                const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
-                int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
-                std::stringstream ss;
-                ss << "[";
-                for (int j = 0; j < arr_n; j++) {
-                    if (arr_type == GGUF_TYPE_STRING) {
-                        std::string val = gguf_get_arr_str(ctx_gguf, i, j);
-                        // escape quotes
-                        replace_all(val, "\\", "\\\\");
-                        replace_all(val, "\"", "\\\"");
-                        ss << '"' << val << '"';
-                    } else if (arr_type == GGUF_TYPE_ARRAY) {
-                        ss << "???";
-                    } else {
-                        ss << gguf_data_to_str(arr_type, data, j);
-                    }
-                    if (j < arr_n - 1) {
-                        ss << ", ";
-                    }
-                }
-                ss << "]";
-                return ss.str();
-            }
-        default:
-            return gguf_data_to_str(type, gguf_get_val_data(ctx_gguf, i), 0);
-    }
-}
-
-//
-// llama helpers
-//
-
-#if defined(_WIN32)
-static std::string llama_format_win_err(DWORD err) {
-    LPSTR buf;
-    size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
-                                 NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
-    if (!size) {
-        return "FormatMessageA failed";
-    }
-    std::string ret(buf, size);
-    LocalFree(buf);
-    return ret;
-}
-#endif
-
-template 
-struct no_init {
-    T value;
-    no_init() { /* do nothing */ }
-};
-
-struct llama_file {
-
-#if defined(_WIN32)
-    // use FILE * so we don't have to re-open the file to mmap
-    FILE * fp;
-    HANDLE fp_win32;
-    size_t size;
-
-private:
-    std::string GetErrorMessageWin32(DWORD error_code) const {
-        std::string ret;
-        LPSTR lpMsgBuf = NULL;
-        DWORD bufLen = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
-                                    NULL, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&lpMsgBuf, 0, NULL);
-        if (!bufLen) {
-            ret = format("Win32 error code: %lx", error_code);
-        } else {
-            ret = lpMsgBuf;
-            LocalFree(lpMsgBuf);
-        }
-
-        return ret;
-    }
-
-public:
-
-    llama_file(const char * fname, const char * mode) {
-        fp = ggml_fopen(fname, mode);
-        if (fp == NULL) {
-            throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
-        }
-        fp_win32 = (HANDLE) _get_osfhandle(_fileno(fp));
-        seek(0, SEEK_END);
-        size = tell();
-        seek(0, SEEK_SET);
-    }
-
-    size_t tell() const {
-        // SetFilePointerEx returns the current position when seeking relative 0 bytes
-        LARGE_INTEGER li;
-        li.QuadPart = 0;
-        BOOL ret = SetFilePointerEx(fp_win32, li, &li, FILE_CURRENT);
-        if (!ret) {
-            throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-        }
-
-        return li.QuadPart;
-    }
-
-    void seek(size_t offset, int whence) const {
-        // no need to convert SEEK_* to FILE_*. The enums are the same.
-        // Still, keep static asserts to avoid failures in the future.
-        static_assert(SEEK_SET == FILE_BEGIN, "SEEK_SET != FILE_BEGIN");
-        static_assert(SEEK_CUR == FILE_CURRENT, "SEEK_CUR != FILE_CURRENT");
-        static_assert(SEEK_END == FILE_END, "SEEK_END != FILE_END");
-
-        LARGE_INTEGER li;
-        li.QuadPart = offset;
-        BOOL ret = SetFilePointerEx(fp_win32, li, NULL, whence);
-        if (!ret) {
-            throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-        }
-    }
-
-    void read_raw(void * ptr, size_t len) const {
-        // On Win32 ReadFile is significant faster than fread which is again significant faster than std::fstream. Thus
-        // use the Win32 API to do file io instead of the C/C++ library functions.
-
-        // There are conditions under which ReadFile cannot read chunks >64MB.
-        // Thus split the operation into smaller chunks if len exceeds this limit.
-        size_t bytes_read = 0;
-        while (bytes_read < len) {
-            size_t chunk_size = std::min(len - bytes_read, 64*1024*1024);
-            DWORD chunk_read = 0;
-            BOOL result = ReadFile(fp_win32, reinterpret_cast(ptr) + bytes_read, chunk_size, &chunk_read, NULL);
-            if (!result) {
-                throw std::runtime_error(format("read error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-            }
-            if (chunk_read < chunk_size || chunk_read == 0) {
-                throw std::runtime_error("unexpectedly reached end of file");
-            }
-
-            bytes_read += chunk_read;
-        } ;
-    }
-
-    uint32_t read_u32() const {
-        uint32_t val;
-        read_raw(&val, sizeof(val));
-        return val;
-    }
-
-    void write_raw(const void * ptr, size_t len) const {
-        // There are conditions under which WriteFile cannot write chunks >64MB.
-        // Thus split the operation into smaller chunks if len exceeds this limit.
-        size_t bytes_written = 0;
-        while (bytes_written < len) {
-            size_t chunk_size = std::min(len - bytes_written, 64*1024*1024);
-            DWORD chunk_written = 0;
-            BOOL result = WriteFile(fp_win32, reinterpret_cast(ptr) + bytes_written, chunk_size, &chunk_written, NULL);
-            if (!result) {
-                throw std::runtime_error(format("write error: %s", GetErrorMessageWin32(GetLastError()).c_str()));
-            }
-            if (chunk_written < chunk_size || chunk_written == 0) {
-                throw std::runtime_error("unexpectedly failed to write bytes");
-            }
-
-            bytes_written += chunk_written;
-        }
-    }
-
-    void write_u32(std::uint32_t val) const {
-        write_raw(&val, sizeof(val));
-    }
-
-    ~llama_file() {
-        if (fp) {
-            std::fclose(fp);
-        }
-    }
-#else
-    // use FILE * so we don't have to re-open the file to mmap
-    FILE * fp;
-    size_t size;
-
-    llama_file(const char * fname, const char * mode) {
-        fp = ggml_fopen(fname, mode);
-        if (fp == NULL) {
-            throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
-        }
-        seek(0, SEEK_END);
-        size = tell();
-        seek(0, SEEK_SET);
-    }
-
-    size_t tell() const {
-#ifdef _WIN32
-        __int64 ret = _ftelli64(fp);
-#else
-        long ret = std::ftell(fp);
-#endif
-        if (ret == -1) {
-            throw std::runtime_error(format("ftell error: %s", strerror(errno)));
-        }
-
-        return (size_t) ret;
-    }
-
-    void seek(size_t offset, int whence) const {
-#ifdef _WIN32
-        int ret = _fseeki64(fp, (__int64) offset, whence);
-#else
-        int ret = std::fseek(fp, (long) offset, whence);
-#endif
-        if (ret != 0) {
-            throw std::runtime_error(format("seek error: %s", strerror(errno)));
-        }
-    }
-
-    void read_raw(void * ptr, size_t len) const {
-        if (len == 0) {
-            return;
-        }
-        errno = 0;
-        std::size_t ret = std::fread(ptr, len, 1, fp);
-        if (ferror(fp)) {
-            throw std::runtime_error(format("read error: %s", strerror(errno)));
-        }
-        if (ret != 1) {
-            throw std::runtime_error("unexpectedly reached end of file");
-        }
-    }
-
-    uint32_t read_u32() const {
-        uint32_t ret;
-        read_raw(&ret, sizeof(ret));
-        return ret;
-    }
-
-    void write_raw(const void * ptr, size_t len) const {
-        if (len == 0) {
-            return;
-        }
-        errno = 0;
-        size_t ret = std::fwrite(ptr, len, 1, fp);
-        if (ret != 1) {
-            throw std::runtime_error(format("write error: %s", strerror(errno)));
-        }
-    }
-
-    void write_u32(std::uint32_t val) const {
-        write_raw(&val, sizeof(val));
-    }
-
-    ~llama_file() {
-        if (fp) {
-            std::fclose(fp);
-        }
-    }
-#endif
-};
-using llama_files = std::vector>;
-
-struct llama_mmap {
-    void * addr;
-    size_t size;
-
-    llama_mmap(const llama_mmap &) = delete;
-
-#ifdef _POSIX_MAPPED_FILES
-    static constexpr bool SUPPORTED = true;
-
-    // list of mapped fragments (first_offset, last_offset)
-    std::vector> mapped_fragments;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
-        size = file->size;
-        int fd = fileno(file->fp);
-        int flags = MAP_SHARED;
-        // prefetch/readahead impairs performance on NUMA systems
-        if (numa)  { prefetch = 0; }
-#ifdef __linux__
-        // advise the kernel to read the file sequentially (increases readahead)
-        if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
-            LLAMA_LOG_WARN("warning: posix_fadvise(.., POSIX_FADV_SEQUENTIAL) failed: %s\n",
-                    strerror(errno));
-        }
-        if (prefetch) { flags |= MAP_POPULATE; }
-#endif
-        addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
-        if (addr == MAP_FAILED) { // NOLINT
-            throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
-        }
-
-        if (prefetch > 0) {
-            // advise the kernel to preload the mapped memory
-            if (posix_madvise(addr, std::min(file->size, prefetch), POSIX_MADV_WILLNEED)) {
-                LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_WILLNEED) failed: %s\n",
-                        strerror(errno));
-            }
-        }
-        if (numa) {
-            // advise the kernel not to use readahead
-            // (because the next page might not belong on the same node)
-            if (posix_madvise(addr, file->size, POSIX_MADV_RANDOM)) {
-                LLAMA_LOG_WARN("warning: posix_madvise(.., POSIX_MADV_RANDOM) failed: %s\n",
-                        strerror(errno));
-            }
-        }
-
-        // initialize list of mapped_fragments
-        mapped_fragments.emplace_back(0, file->size);
-    }
-
-    static void align_range(size_t * first, size_t * last, size_t page_size) {
-        // align first to the next page
-        size_t offset_in_page = *first & (page_size - 1);
-        size_t offset_to_page = offset_in_page == 0 ? 0 : page_size - offset_in_page;
-        *first += offset_to_page;
-
-        // align last to the previous page
-        *last = *last & ~(page_size - 1);
-
-        if (*last <= *first) {
-            *last = *first;
-        }
-    }
-
-    // partially unmap the file in the range [first, last)
-    void unmap_fragment(size_t first, size_t last) {
-        // note: this function must not be called multiple times with overlapping ranges
-        // otherwise, there is a risk of invalidating addresses that have been repurposed for other mappings
-        int page_size = sysconf(_SC_PAGESIZE);
-        align_range(&first, &last, page_size);
-        size_t len = last - first;
-
-        if (len == 0) {
-            return;
-        }
-
-        GGML_ASSERT(first % page_size == 0);
-        GGML_ASSERT(last % page_size == 0);
-        GGML_ASSERT(last > first);
-
-        void * next_page_start = (uint8_t *) addr + first;
-
-        // unmap the range
-        if (munmap(next_page_start, len)) {
-            LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
-        }
-
-        // update the list of mapped fragments to avoid unmapping the same range again in the destructor
-        std::vector> new_mapped_fragments;
-        for (const auto & frag : mapped_fragments) {
-            if (frag.first < first && frag.second > last) {
-                // the range is in the middle of the fragment, split it
-                new_mapped_fragments.emplace_back(frag.first, first);
-                new_mapped_fragments.emplace_back(last, frag.second);
-            } else if (frag.first < first && frag.second > first) {
-                // the range starts in the middle of the fragment
-                new_mapped_fragments.emplace_back(frag.first, first);
-            } else if (frag.first < last && frag.second > last) {
-                // the range ends in the middle of the fragment
-                new_mapped_fragments.emplace_back(last, frag.second);
-            } else if (frag.first >= first && frag.second <= last) {
-                // the range covers the entire fragment
-            } else {
-                // the range is outside the fragment
-                new_mapped_fragments.push_back(frag);
-            }
-        }
-        mapped_fragments = std::move(new_mapped_fragments);
-    }
-
-    ~llama_mmap() {
-        for (const auto & frag : mapped_fragments) {
-            if (munmap((char *) addr + frag.first, frag.second - frag.first)) {
-                LLAMA_LOG_WARN("warning: munmap failed: %s\n", strerror(errno));
-            }
-        }
-    }
-#elif defined(_WIN32)
-    static constexpr bool SUPPORTED = true;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1, bool numa = false) {
-        GGML_UNUSED(numa);
-
-        size = file->size;
-
-        HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
-
-        HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
-
-        if (hMapping == NULL) {
-            DWORD error = GetLastError();
-            throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
-        }
-
-        addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
-        DWORD error = GetLastError();
-        CloseHandle(hMapping);
-
-        if (addr == NULL) {
-            throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
-        }
-
-        if (prefetch > 0) {
-#if _WIN32_WINNT >= 0x602
-            // PrefetchVirtualMemory is only present on Windows 8 and above, so we dynamically load it
-            BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
-            HMODULE hKernel32 = GetModuleHandleW(L"kernel32.dll");
-
-            // may fail on pre-Windows 8 systems
-            pPrefetchVirtualMemory = (decltype(pPrefetchVirtualMemory))(void *) GetProcAddress(hKernel32, "PrefetchVirtualMemory");
-
-            if (pPrefetchVirtualMemory) {
-                // advise the kernel to preload the mapped memory
-                WIN32_MEMORY_RANGE_ENTRY range;
-                range.VirtualAddress = addr;
-                range.NumberOfBytes = (SIZE_T) std::min(size, prefetch);
-                if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
-                    LLAMA_LOG_WARN("warning: PrefetchVirtualMemory failed: %s\n",
-                            llama_format_win_err(GetLastError()).c_str());
-                }
-            }
-#else
-            throw std::runtime_error("PrefetchVirtualMemory unavailable");
-#endif
-        }
-    }
-
-    void unmap_fragment(size_t first, size_t last) {
-        // not supported
-        GGML_UNUSED(first);
-        GGML_UNUSED(last);
-    }
-
-    ~llama_mmap() {
-        if (!UnmapViewOfFile(addr)) {
-            LLAMA_LOG_WARN("warning: UnmapViewOfFile failed: %s\n",
-                    llama_format_win_err(GetLastError()).c_str());
-        }
-    }
-#else
-    static constexpr bool SUPPORTED = false;
-
-    llama_mmap(struct llama_file * file, size_t prefetch = -1, bool numa = false) {
-        GGML_UNUSED(file);
-        GGML_UNUSED(prefetch);
-        GGML_UNUSED(numa);
-
-        throw std::runtime_error("mmap not supported");
-    }
-
-    void unmap_fragment(size_t first, size_t last) {
-        GGML_UNUSED(first);
-        GGML_UNUSED(last);
-
-        throw std::runtime_error("mmap not supported");
-    }
-#endif
-};
-using llama_mmaps = std::vector>;
-
-// Represents some region of memory being locked using mlock or VirtualLock;
-// will automatically unlock on destruction.
-struct llama_mlock {
-    void * addr = NULL;
-    size_t size = 0;
-
-    bool failed_already = false;
-
-    llama_mlock() {}
-    llama_mlock(const llama_mlock &) = delete;
-
-    ~llama_mlock() {
-        if (size) {
-            raw_unlock(addr, size);
-        }
-    }
-
-    void init(void * ptr) {
-        GGML_ASSERT(addr == NULL && size == 0); // NOLINT
-        addr = ptr;
-    }
-
-    void grow_to(size_t target_size) {
-        GGML_ASSERT(addr);
-        if (failed_already) {
-            return;
-        }
-        size_t granularity = lock_granularity();
-        target_size = (target_size + granularity - 1) & ~(granularity - 1);
-        if (target_size > size) {
-            if (raw_lock((uint8_t *) addr + size, target_size - size)) {
-                size = target_size;
-            } else {
-                failed_already = true;
-            }
-        }
-    }
-
-#ifdef _POSIX_MEMLOCK_RANGE
-    static constexpr bool SUPPORTED = true;
-
-    static size_t lock_granularity() {
-        return (size_t) sysconf(_SC_PAGESIZE);
-    }
-
-    #ifdef __APPLE__
-        #define MLOCK_SUGGESTION \
-            "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
-            "decreasing 'vm.global_no_user_wire_amount'.  Also try increasing RLIMIT_MEMLOCK (ulimit -l).\n"
-    #else
-        #define MLOCK_SUGGESTION \
-            "Try increasing RLIMIT_MEMLOCK ('ulimit -l' as root).\n"
-    #endif
-
-    bool raw_lock(const void * addr, size_t size) const {
-        if (!mlock(addr, size)) {
-            return true;
-        }
-
-        char* errmsg = std::strerror(errno);
-        bool suggest = (errno == ENOMEM);
-
-        // Check if the resource limit is fine after all
-        struct rlimit lock_limit;
-        if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) {
-            suggest = false;
-        }
-        if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) {
-            suggest = false;
-        }
-
-        LLAMA_LOG_WARN("warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
-                size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
-        return false;
-    }
-
-    #undef MLOCK_SUGGESTION
-
-    static void raw_unlock(void * addr, size_t size) {
-        if (munlock(addr, size)) {
-            LLAMA_LOG_WARN("warning: failed to munlock buffer: %s\n", std::strerror(errno));
-        }
-    }
-#elif defined(_WIN32)
-    static constexpr bool SUPPORTED = true;
-
-    static size_t lock_granularity() {
-        SYSTEM_INFO si;
-        GetSystemInfo(&si);
-        return (size_t) si.dwPageSize;
-    }
-
-    bool raw_lock(void * ptr, size_t len) const {
-        for (int tries = 1; ; tries++) {
-            if (VirtualLock(ptr, len)) {
-                return true;
-            }
-            if (tries == 2) {
-                LLAMA_LOG_WARN("warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
-                    len, size, llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-
-            // It failed but this was only the first try; increase the working
-            // set size and try again.
-            SIZE_T min_ws_size, max_ws_size;
-            if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
-                LLAMA_LOG_WARN("warning: GetProcessWorkingSetSize failed: %s\n",
-                        llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-            // Per MSDN: "The maximum number of pages that a process can lock
-            // is equal to the number of pages in its minimum working set minus
-            // a small overhead."
-            // Hopefully a megabyte is enough overhead:
-            size_t increment = len + 1048576;
-            // The minimum must be <= the maximum, so we need to increase both:
-            min_ws_size += increment;
-            max_ws_size += increment;
-            if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
-                LLAMA_LOG_WARN("warning: SetProcessWorkingSetSize failed: %s\n",
-                        llama_format_win_err(GetLastError()).c_str());
-                return false;
-            }
-        }
-    }
-
-    static void raw_unlock(void * ptr, size_t len) {
-        if (!VirtualUnlock(ptr, len)) {
-            LLAMA_LOG_WARN("warning: failed to VirtualUnlock buffer: %s\n",
-                    llama_format_win_err(GetLastError()).c_str());
-        }
-    }
-#else
-    static constexpr bool SUPPORTED = false;
-
-    static size_t lock_granularity() {
-        return (size_t) 65536;
-    }
-
-    bool raw_lock(const void * addr, size_t len) const {
-        LLAMA_LOG_WARN("warning: mlock not supported on this system\n");
-        return false;
-    }
-
-    static void raw_unlock(const void * addr, size_t len) {}
-#endif
-};
-using llama_mlocks = std::vector>;
-
-// NOTE: avoid ever using this except for building the token_to_piece caches
-static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
-    std::string piece;
-    piece.resize(piece.capacity());  // using string internal cache
-    const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
-    if (n_chars < 0) {
-        piece.resize(-n_chars);
-        int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special);
-        GGML_ASSERT(check == -n_chars);
-    }
-    else {
-        piece.resize(n_chars);
-    }
-
-    return piece;
-}
-
-//
-// globals
-//
-
-struct llama_logger_state {
-    ggml_log_callback log_callback = llama_log_callback_default;
-    void * log_callback_user_data = nullptr;
-};
-
-static llama_logger_state g_logger_state;
-
-// available llama models
-enum e_model {
-    MODEL_UNKNOWN,
-    MODEL_14M,
-    MODEL_17M,
-    MODEL_22M,
-    MODEL_33M,
-    MODEL_60M,
-    MODEL_70M,
-    MODEL_80M,
-    MODEL_109M,
-    MODEL_137M,
-    MODEL_160M,
-    MODEL_220M,
-    MODEL_250M,
-    MODEL_270M,
-    MODEL_335M,
-    MODEL_410M,
-    MODEL_450M,
-    MODEL_770M,
-    MODEL_780M,
-    MODEL_0_5B,
-    MODEL_1B,
-    MODEL_1_3B,
-    MODEL_1_4B,
-    MODEL_1_5B,
-    MODEL_1_6B,
-    MODEL_2B,
-    MODEL_2_8B,
-    MODEL_3B,
-    MODEL_4B,
-    MODEL_6B,
-    MODEL_6_9B,
-    MODEL_7B,
-    MODEL_8B,
-    MODEL_9B,
-    MODEL_11B,
-    MODEL_12B,
-    MODEL_13B,
-    MODEL_14B,
-    MODEL_15B,
-    MODEL_16B,
-    MODEL_20B,
-    MODEL_30B,
-    MODEL_32B,
-    MODEL_34B,
-    MODEL_35B,
-    MODEL_40B,
-    MODEL_65B,
-    MODEL_70B,
-    MODEL_236B,
-    MODEL_314B,
-    MODEL_SMALL,
-    MODEL_MEDIUM,
-    MODEL_LARGE,
-    MODEL_XL,
-    MODEL_A1_7B,
-    MODEL_A2_7B,
-    MODEL_8x7B,
-    MODEL_8x22B,
-    MODEL_16x12B,
-    MODEL_10B_128x3_66B,
-    MODEL_57B_A14B,
-    MODEL_27B,
-};
-
-static const size_t kiB = 1024;
-static const size_t MiB = 1024*kiB;
-static const size_t GiB = 1024*MiB;
-
-struct llama_hparams_posnet {
-    uint32_t n_embd;
-    uint32_t n_layer;
-};
-
-struct llama_hparams_convnext {
-    uint32_t n_embd;
-    uint32_t n_layer;
-};
-
-struct llama_hparams {
-    bool vocab_only;
-    bool rope_finetuned;
-    bool use_par_res;
-    bool swin_norm;
-
-    uint32_t n_vocab = 0;
-    uint32_t n_ctx_train; // context size the model was trained on
-    uint32_t n_embd;
-    uint32_t n_embd_features = 0;
-    uint32_t n_layer;
-    uint32_t n_rot;
-    uint32_t n_swa = 0; // sliding window attention (SWA)
-    uint32_t n_embd_head_k; // dimension of keys (d_k). d_q is assumed to be the same, but there are n_head q heads, and only n_head_kv k-v heads
-    uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head
-    uint32_t n_expert = 0;
-    uint32_t n_expert_used = 0;
-    uint32_t n_vocab_type = 0; // for BERT-style token types
-    uint32_t n_rel_attn_bkts = 0;
-
-    // for WavTokenizer
-    struct llama_hparams_posnet   posnet;
-    struct llama_hparams_convnext convnext;
-
-    std::array n_head_arr;
-    std::array n_head_kv_arr;
-    std::array n_ff_arr;
-
-    uint32_t n_layer_dense_lead = 0;
-    uint32_t n_lora_q = 0;
-    uint32_t n_lora_kv = 0;
-    uint32_t n_ff_exp = 0;
-    uint32_t n_ff_shexp = 0;
-    uint32_t n_expert_shared = 0;
-    float    expert_weights_scale = 0.0;
-
-    float f_norm_eps;
-    float f_norm_rms_eps;
-    float f_norm_group_eps;
-
-    uint32_t n_norm_groups;
-
-    float f_attn_logit_softcapping = 50.0f;
-    float f_final_logit_softcapping = 30.0f;
-
-    // for RWKV
-    uint32_t rescale_every_n_layers = 0;
-    uint32_t time_mix_extra_dim = 0;
-    uint32_t time_decay_extra_dim = 0;
-    uint32_t wkv_head_size = 0;
-
-    float     rope_attn_factor = 1.0f;
-    float     rope_freq_base_train;
-    float     rope_freq_scale_train;
-    uint32_t  n_ctx_orig_yarn;
-    float     rope_yarn_log_mul;
-    int       rope_sections[4];
-
-    // for State Space Models
-    uint32_t ssm_d_conv  = 0;
-    uint32_t ssm_d_inner = 0;
-    uint32_t ssm_d_state = 0;
-    uint32_t ssm_dt_rank = 0;
-    bool ssm_dt_b_c_rms = false;
-
-    float f_clamp_kqv      = 0.0f;
-    float f_max_alibi_bias = 0.0f;
-    float f_logit_scale    = 0.0f;
-
-    // Additional scale factors (Granite/Granite MoE)
-    float f_residual_scale  = 0.0f;
-    float f_embedding_scale = 0.0f;
-    float f_attention_scale = 0.0f;
-
-    bool causal_attn   = true;
-    bool use_alibi     = false;
-    bool attn_soft_cap = false;
-
-    // needed by encoder-decoder models (e.g. T5, FLAN-T5)
-    // ref: https://github.com/ggerganov/llama.cpp/pull/8141
-    llama_token dec_start_token_id = LLAMA_TOKEN_NULL;
-
-    enum llama_pooling_type      pooling_type            = LLAMA_POOLING_TYPE_NONE;
-    enum llama_rope_type         rope_type               = LLAMA_ROPE_TYPE_NONE;
-    enum llama_rope_scaling_type rope_scaling_type_train = LLAMA_ROPE_SCALING_TYPE_NONE;
-
-    uint32_t n_head(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_head_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_head_kv(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_head_kv_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_ff(uint32_t il = 0) const {
-        if (il < n_layer) {
-            return n_ff_arr[il];
-        }
-
-        GGML_ABORT("fatal error");
-    }
-
-    uint32_t n_gqa(uint32_t il = 0) const {
-        const uint32_t n_head    = this->n_head(il);
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        if (n_head_kv == 0) {
-            return 0;
-        }
-
-        return n_head/n_head_kv;
-    }
-
-    uint32_t n_embd_k_gqa(uint32_t il = 0) const { // dimension of key embeddings across all k-v heads
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        return n_embd_head_k * n_head_kv;
-    }
-
-    uint32_t n_embd_v_gqa(uint32_t il = 0) const { // dimension of value embeddings across all k-v heads
-        const uint32_t n_head_kv = this->n_head_kv(il);
-
-        return n_embd_head_v * n_head_kv;
-    }
-
-    uint32_t n_embd_k_s() const { // dimension of the rolling state embeddings
-        // corresponds to Mamba's conv_states size or RWKV's token_shift states size
-        if (wkv_head_size != 0) {
-            // for RWKV models
-            return 2 * n_embd;
-        }
-
-        // TODO: maybe support other convolution strides than 1
-        // NOTE: since the first column of the conv_state is shifted out each time, it's not actually needed
-        return (ssm_d_conv > 0 ? ssm_d_conv - 1 : 0) * ssm_d_inner;
-    }
-
-    uint32_t n_embd_v_s() const { // dimension of the recurrent state embeddings
-        if (wkv_head_size != 0) {
-            // corresponds to RWKV's wkv_states size
-            return n_embd * wkv_head_size;
-        }
-
-        // corresponds to Mamba's ssm_states size
-        return ssm_d_state * ssm_d_inner;
-    }
-};
-
-static_assert(std::is_trivially_copyable::value, "llama_hparams must be trivially copyable");
-
-struct llama_cparams {
-    uint32_t n_ctx;           // context size used during inference
-    uint32_t n_batch;
-    uint32_t n_ubatch;
-    uint32_t n_seq_max;
-    int      n_threads;       // number of threads to use for generation
-    int      n_threads_batch; // number of threads to use for batch processing
-
-    float rope_freq_base;
-    float rope_freq_scale;
-
-    uint32_t n_ctx_orig_yarn;
-    // These hyperparameters are not exposed in GGUF, because all
-    // existing YaRN models use the same values for them.
-    float yarn_ext_factor;
-    float yarn_attn_factor;
-    float yarn_beta_fast;
-    float yarn_beta_slow;
-    float defrag_thold;
-
-    bool embeddings;
-    bool causal_attn;
-    bool offload_kqv;
-    bool flash_attn;
-    bool no_perf;
-
-    enum llama_pooling_type pooling_type;
-
-    ggml_backend_sched_eval_callback cb_eval;
-    void * cb_eval_user_data;
-};
-
-struct llama_layer_posnet {
-    // resnet
-    struct ggml_tensor * norm1   = nullptr;
-    struct ggml_tensor * norm1_b = nullptr;
-
-    struct ggml_tensor * conv1   = nullptr;
-    struct ggml_tensor * conv1_b = nullptr;
-
-    struct ggml_tensor * norm2   = nullptr;
-    struct ggml_tensor * norm2_b = nullptr;
-
-    struct ggml_tensor * conv2   = nullptr;
-    struct ggml_tensor * conv2_b = nullptr;
-
-    // attention
-    struct ggml_tensor * attn_norm   = nullptr;
-    struct ggml_tensor * attn_norm_b = nullptr;
-
-    struct ggml_tensor * attn_q   = nullptr;
-    struct ggml_tensor * attn_q_b = nullptr;
-
-    struct ggml_tensor * attn_k   = nullptr;
-    struct ggml_tensor * attn_k_b = nullptr;
-
-    struct ggml_tensor * attn_v   = nullptr;
-    struct ggml_tensor * attn_v_b = nullptr;
-
-    struct ggml_tensor * attn_o   = nullptr;
-    struct ggml_tensor * attn_o_b = nullptr;
-
-    // normalize
-    struct ggml_tensor * norm   = nullptr;
-    struct ggml_tensor * norm_b = nullptr;
-};
-
-struct llama_layer_convnext {
-    struct ggml_tensor * dw   = nullptr;
-    struct ggml_tensor * dw_b = nullptr;
-
-    struct ggml_tensor * norm   = nullptr;
-    struct ggml_tensor * norm_b = nullptr;
-
-    struct ggml_tensor * pw1   = nullptr;
-    struct ggml_tensor * pw1_b = nullptr;
-
-    struct ggml_tensor * pw2   = nullptr;
-    struct ggml_tensor * pw2_b = nullptr;
-
-    struct ggml_tensor * gamma = nullptr;
-};
-
-struct llama_layer {
-    // normalization
-    struct ggml_tensor * attn_norm       = nullptr;
-    struct ggml_tensor * attn_norm_b     = nullptr;
-    struct ggml_tensor * attn_norm_2     = nullptr;
-    struct ggml_tensor * attn_norm_2_b   = nullptr;
-    struct ggml_tensor * attn_q_norm     = nullptr;
-    struct ggml_tensor * attn_q_norm_b   = nullptr;
-    struct ggml_tensor * attn_k_norm     = nullptr;
-    struct ggml_tensor * attn_k_norm_b   = nullptr;
-    struct ggml_tensor * attn_out_norm   = nullptr;
-    struct ggml_tensor * attn_out_norm_b = nullptr;
-    struct ggml_tensor * attn_q_a_norm   = nullptr;
-    struct ggml_tensor * attn_kv_a_norm  = nullptr;
-    struct ggml_tensor * attn_sub_norm   = nullptr;
-    struct ggml_tensor * attn_post_norm  = nullptr;
-    struct ggml_tensor * ffn_sub_norm    = nullptr;
-    struct ggml_tensor * attn_norm_cross = nullptr;
-    struct ggml_tensor * attn_norm_enc   = nullptr;
-
-    // attention
-    struct ggml_tensor * wq        = nullptr;
-    struct ggml_tensor * wk        = nullptr;
-    struct ggml_tensor * wv        = nullptr;
-    struct ggml_tensor * wo        = nullptr;
-    struct ggml_tensor * wqkv      = nullptr;
-    struct ggml_tensor * wq_a      = nullptr;
-    struct ggml_tensor * wq_b      = nullptr;
-    struct ggml_tensor * wkv_a_mqa = nullptr;
-    struct ggml_tensor * wkv_b     = nullptr;
-    struct ggml_tensor * wq_cross  = nullptr;
-    struct ggml_tensor * wk_cross  = nullptr;
-    struct ggml_tensor * wv_cross  = nullptr;
-    struct ggml_tensor * wo_cross  = nullptr;
-    struct ggml_tensor * wq_enc    = nullptr;
-    struct ggml_tensor * wk_enc    = nullptr;
-    struct ggml_tensor * wv_enc    = nullptr;
-    struct ggml_tensor * wo_enc    = nullptr;
-
-    // attention bias
-    struct ggml_tensor * bq   = nullptr;
-    struct ggml_tensor * bk   = nullptr;
-    struct ggml_tensor * bv   = nullptr;
-    struct ggml_tensor * bo   = nullptr;
-    struct ggml_tensor * bqkv = nullptr;
-
-    // relative position bias
-    struct ggml_tensor * attn_rel_b       = nullptr;
-    struct ggml_tensor * attn_rel_b_enc   = nullptr;
-    struct ggml_tensor * attn_rel_b_cross = nullptr;
-
-    // normalization
-    struct ggml_tensor * ffn_norm         = nullptr;
-    struct ggml_tensor * ffn_norm_b       = nullptr;
-    struct ggml_tensor * ffn_post_norm    = nullptr;
-    struct ggml_tensor * layer_out_norm   = nullptr;
-    struct ggml_tensor * layer_out_norm_b = nullptr;
-    struct ggml_tensor * ffn_norm_exps    = nullptr;
-    struct ggml_tensor * ffn_norm_enc     = nullptr;
-
-    // ff
-    struct ggml_tensor * ffn_gate     = nullptr; // w1
-    struct ggml_tensor * ffn_down     = nullptr; // w2
-    struct ggml_tensor * ffn_up       = nullptr; // w3
-    struct ggml_tensor * ffn_gate_enc = nullptr;
-    struct ggml_tensor * ffn_down_enc = nullptr;
-    struct ggml_tensor * ffn_up_enc   = nullptr;
-
-    // ff MoE
-    struct ggml_tensor * ffn_gate_inp  = nullptr;
-    struct ggml_tensor * ffn_gate_exps = nullptr;
-    struct ggml_tensor * ffn_down_exps = nullptr;
-    struct ggml_tensor * ffn_up_exps   = nullptr;
-
-    // ff shared expert (shexp)
-    struct ggml_tensor * ffn_gate_inp_shexp = nullptr;
-    struct ggml_tensor * ffn_gate_shexp     = nullptr;
-    struct ggml_tensor * ffn_down_shexp     = nullptr;
-    struct ggml_tensor * ffn_up_shexp       = nullptr;
-
-    // ff bias
-    struct ggml_tensor * ffn_gate_b = nullptr;
-    struct ggml_tensor * ffn_down_b = nullptr; // b2
-    struct ggml_tensor * ffn_up_b   = nullptr; // b3
-    struct ggml_tensor * ffn_act    = nullptr;
-
-    // mamba proj
-    struct ggml_tensor * ssm_in  = nullptr;
-    struct ggml_tensor * ssm_x   = nullptr;
-    struct ggml_tensor * ssm_dt  = nullptr;
-    struct ggml_tensor * ssm_out = nullptr;
-
-    // mamba
-    struct ggml_tensor * ssm_conv1d = nullptr;
-    struct ggml_tensor * ssm_a      = nullptr;
-    struct ggml_tensor * ssm_d      = nullptr;
-
-    // mamba bias
-    struct ggml_tensor * ssm_conv1d_b = nullptr;
-    struct ggml_tensor * ssm_dt_b     = nullptr;
-
-    // rwkv
-    struct ggml_tensor * time_mix_w1         = nullptr;
-    struct ggml_tensor * time_mix_w2         = nullptr;
-    struct ggml_tensor * time_mix_lerp_x     = nullptr;
-    struct ggml_tensor * time_mix_lerp_w     = nullptr;
-    struct ggml_tensor * time_mix_lerp_k     = nullptr;
-    struct ggml_tensor * time_mix_lerp_v     = nullptr;
-    struct ggml_tensor * time_mix_lerp_r     = nullptr;
-    struct ggml_tensor * time_mix_lerp_g     = nullptr;
-
-    struct ggml_tensor * time_mix_first      = nullptr;
-    struct ggml_tensor * time_mix_decay      = nullptr;
-    struct ggml_tensor * time_mix_decay_w1   = nullptr;
-    struct ggml_tensor * time_mix_decay_w2   = nullptr;
-    struct ggml_tensor * time_mix_key        = nullptr;
-    struct ggml_tensor * time_mix_value      = nullptr;
-    struct ggml_tensor * time_mix_receptance = nullptr;
-    struct ggml_tensor * time_mix_gate       = nullptr;
-
-    struct ggml_tensor * time_mix_ln     = nullptr;
-    struct ggml_tensor * time_mix_ln_b   = nullptr;
-    struct ggml_tensor * time_mix_output = nullptr;
-
-    struct ggml_tensor * channel_mix_lerp_k = nullptr;
-    struct ggml_tensor * channel_mix_lerp_r = nullptr;
-
-    struct ggml_tensor * channel_mix_key        = nullptr;
-    struct ggml_tensor * channel_mix_receptance = nullptr;
-    struct ggml_tensor * channel_mix_value      = nullptr;
-
-    // long rope factors
-    struct ggml_tensor * rope_long  = nullptr;
-    struct ggml_tensor * rope_short = nullptr;
-    struct ggml_tensor * rope_freqs = nullptr;
-
-    // bitnet scale
-    struct ggml_tensor * wq_scale       = nullptr;
-    struct ggml_tensor * wk_scale       = nullptr;
-    struct ggml_tensor * wv_scale       = nullptr;
-    struct ggml_tensor * wo_scale       = nullptr;
-    struct ggml_tensor * ffn_gate_scale = nullptr;
-    struct ggml_tensor * ffn_up_scale   = nullptr;
-    struct ggml_tensor * ffn_down_scale = nullptr;
-
-    struct llama_layer_posnet posnet;
-
-    struct llama_layer_convnext convnext;
-};
-
-// very similar to llama_batch,
-// but has more metadata about sequences
-struct llama_ubatch {
-    bool equal_seqs;
-    // TODO: whole_seqs for embeddings?
-
-    uint32_t n_tokens; // total tokens (n_seq_tokens * n_seqs)
-    uint32_t n_seq_tokens; // tokens per sequence
-    uint32_t n_seqs;
-
-    llama_token  *  token;    // [n_tokens]
-    float        *  embd;     // [n_embd, n_tokens]
-    llama_pos    *  pos;      // [n_tokens]
-    int32_t      *  n_seq_id; // [n_seqs]
-    llama_seq_id ** seq_id;   // [n_seqs]
-    int8_t       *  output;   // [n_tokens]
-};
-
-struct llama_kv_cell {
-    llama_pos pos   = -1;
-    llama_pos delta = 0;
-    int32_t   src   = -1; // used by recurrent state models to copy states
-    int32_t   tail  = -1;
-
-    std::set seq_id;
-
-    bool has_seq_id(const llama_seq_id & id) const {
-        return seq_id.find(id) != seq_id.end();
-    }
-
-    bool is_empty() const {
-        return seq_id.empty();
-    }
-
-    bool is_same_seq(const llama_kv_cell & other) const {
-        return seq_id == other.seq_id;
-    }
-};
-
-// ring-buffer of cached KV data
-struct llama_kv_cache {
-    bool has_shift = false;
-    bool do_defrag = false;
-    bool recurrent = false; // with recurrent state models, a cell can hold the state for more than one past token
-    bool v_trans   = true;  // the value tensor is transposed
-
-    // Note: The value of head isn't only used to optimize searching
-    // for a free KV slot. llama_decode_internal also uses it, so it
-    // cannot be freely changed after a slot has been allocated.
-    uint32_t head = 0;
-    uint32_t size = 0;
-    uint32_t used = 0; // used cells (i.e. at least one seq_id)
-
-    // computed before each graph build
-    uint32_t n = 0;
-
-    ggml_type type_k = GGML_TYPE_F16;
-    ggml_type type_v = GGML_TYPE_F16;
-
-    std::vector cells;
-
-    std::vector k_l; // per layer
-    std::vector v_l;
-
-    std::vector ctxs;
-    std::vector bufs;
-
-    size_t total_size() {
-        size_t size = 0;
-        for (auto & buf : bufs) {
-            size += ggml_backend_buffer_get_size(buf.get());
-        }
-        return size;
-    }
-};
-
-struct llama_control_vector {
-    std::vector tensors; // per layer
-    std::vector ctxs;
-    std::vector bufs;
-
-    int32_t layer_start = -1;
-    int32_t layer_end   = -1;
-
-    struct ggml_tensor * tensor_for(int il) const {
-        if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
-            return nullptr;
-        }
-        return tensors[il];
-    }
-
-    struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int  il) const {
-        ggml_tensor * layer_dir = tensor_for(il);
-        if (layer_dir != nullptr) {
-            cur = ggml_add(ctx, cur, layer_dir);
-        }
-        return cur;
-    }
-};
-
-struct llama_model {
-    e_model     type  = MODEL_UNKNOWN;
-    llm_arch    arch  = LLM_ARCH_UNKNOWN;
-    llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
-
-    std::string name = "n/a";
-
-    llama_hparams hparams = {};
-    llama_vocab   vocab;
-
-    struct ggml_tensor * tok_embd = nullptr;
-    struct ggml_tensor * type_embd = nullptr;
-    struct ggml_tensor * pos_embd = nullptr;
-    struct ggml_tensor * tok_norm = nullptr;
-    struct ggml_tensor * tok_norm_b = nullptr;
-
-    struct ggml_tensor * output_norm = nullptr;
-    struct ggml_tensor * output_norm_b = nullptr;
-    struct ggml_tensor * output = nullptr;
-    struct ggml_tensor * output_b = nullptr;
-    struct ggml_tensor * output_norm_enc = nullptr;
-
-    // classifier
-    struct ggml_tensor * cls = nullptr;
-    struct ggml_tensor * cls_b = nullptr;
-    struct ggml_tensor * cls_out   = nullptr;
-    struct ggml_tensor * cls_out_b = nullptr;
-
-    struct ggml_tensor * conv1d = nullptr;
-    struct ggml_tensor * conv1d_b = nullptr;
-
-    std::vector layers;
-
-    // gguf metadata
-    std::unordered_map gguf_kv;
-
-    llama_split_mode split_mode;
-    int main_gpu;
-    int n_gpu_layers;
-
-    std::vector rpc_servers;
-
-    // list of devices used in this model
-    std::vector devices;
-
-
-    // lists of buffer types used for each layer
-    using buft_list_t = std::vector>;
-    buft_list_t cpu_buft_list;
-    std::map gpu_buft_list;
-
-    struct layer_dev {
-        ggml_backend_dev_t dev;
-        buft_list_t * buft_list;
-    };
-    layer_dev dev_input = {};
-    layer_dev dev_output = {};
-    std::vector dev_layer;
-
-    // contexts where the model tensors metadata is stored
-    std::vector ctxs;
-
-    // the model memory buffers for the tensor data
-    std::vector bufs;
-
-    // model memory mapped files
-    llama_mmaps mappings;
-
-    // objects representing data potentially being locked in memory
-    llama_mlocks mlock_bufs;
-    llama_mlocks mlock_mmaps;
-
-    // for quantize-stats only
-    std::vector> tensors_by_name;
-
-    int64_t t_load_us  = 0;
-    int64_t t_start_us = 0;
-
-    // total number of parameters in the model
-    uint64_t n_elements = 0;
-
-    // total size of all the tensors in the model in bytes
-    size_t  n_bytes     = 0;
-
-    // keep track of loaded lora adapters
-    std::set lora_adapters;
-
-    ~llama_model() {
-       while (!lora_adapters.empty()) {
-            llama_lora_adapter_free(*lora_adapters.begin());
-        }
-    }
-};
-
-struct llama_sbatch_seq {
-    int32_t n_seq_id;
-    llama_seq_id * seq_id;
-    size_t offset;
-    size_t length;
-};
-
-// sequence-length-aware batch splitting
-struct llama_sbatch {
-    // tokens left in this batch
-    size_t n_tokens;
-
-    size_t n_embd;
-
-    bool logits_all; // TODO: remove once lctx.logits_all is removed too
-
-    // sorted indices into the batch
-    std::vector ids;
-    // batch indices of the output
-    std::vector out_ids;
-    std::vector seq;
-
-    const llama_batch * batch = nullptr;
-
-    // buffers for the ubatch
-    std::vector    ubatch_token;
-    std::vector          ubatch_embd;
-    std::vector      ubatch_pos;
-    std::vector        ubatch_n_seq_id;
-    std::vector ubatch_seq_id;
-    std::vector         ubatch_output;
-
-    llama_ubatch reserve_ubatch(size_t n_ubatch, bool has_embd = false) {
-        // clear empty sequences
-        // the previous ubatch is assumed to be gone,
-        // so nothing should refer to values in these sequences anymore.
-        for (size_t i = seq.size(); i-- > 0;) {
-            if (seq[i].length == 0) {
-                seq.pop_back();
-            } else {
-                break;
-            }
-        }
-        ubatch_token.resize(!has_embd ? n_ubatch : 0);
-        ubatch_embd.resize(has_embd ? n_embd * n_ubatch : 0);
-        ubatch_pos.resize(n_ubatch);
-        ubatch_n_seq_id.resize(n_ubatch);
-        ubatch_seq_id.resize(n_ubatch);
-        ubatch_output.resize(n_ubatch);
-        llama_ubatch ubatch = {
-            /*equal_seqs   =*/ true,
-            /*n_tokens     =*/ 0,
-            /*n_seq_tokens =*/ 0,
-            /*n_seqs       =*/ 0,
-            /*token        =*/ !has_embd ? ubatch_token.data() : nullptr,
-            /*embd         =*/ has_embd  ? ubatch_embd.data()  : nullptr,
-            /*pos          =*/ ubatch_pos.data(),
-            /*n_seq_id     =*/ ubatch_n_seq_id.data(),
-            /*seq_id       =*/ ubatch_seq_id.data(),
-            /*output       =*/ ubatch_output.data(),
-        };
-        return ubatch;
-    }
-
-    void add_seq_to_ubatch(llama_ubatch & ubatch, llama_sbatch_seq & seq, size_t length) {
-        GGML_ASSERT(batch != nullptr);
-        GGML_ASSERT(length <= seq.length);
-        // Can only add sequences of equal lengths to a batch,
-        // otherwise it isn't clear to which sequence a token belongs
-        GGML_ASSERT(seq.n_seq_id == 0 || ubatch.n_seqs == 0 || length == (size_t) ubatch.n_tokens / ubatch.n_seqs);
-        GGML_ASSERT((seq.n_seq_id != 0) == ubatch.equal_seqs);
-        // NOTE: loops are separated for cache-friendliness
-        if (batch->token) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    ubatch.token[ubatch.n_tokens + i] = batch->token[ids[seq.offset + i]];
-                }
-            } else {
-                // simple split
-                ubatch.token = batch->token + seq.offset;
-            }
-        } else {
-            ubatch.token = nullptr;
-        }
-        if (batch->embd) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    memcpy(
-                        ubatch.embd + n_embd * (ubatch.n_tokens + i),
-                        batch->embd + n_embd * ids[seq.offset + i],
-                        n_embd * sizeof(float)
-                    );
-                }
-            } else {
-                // simple split
-                ubatch.embd = batch->embd + (n_embd * seq.offset);
-            }
-        } else {
-            ubatch.embd = nullptr;
-        }
-        if (ubatch.equal_seqs) {
-            for (size_t i = 0; i < length; ++i) {
-                ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
-            }
-        } else {
-            // simple split
-            ubatch.pos = batch->pos + seq.offset;
-        }
-        if (ubatch.equal_seqs) {
-            ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
-            if (seq.seq_id) {
-                ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
-            }
-        } else {
-            // simple split
-            if (batch->n_seq_id) {
-                ubatch.n_seq_id = batch->n_seq_id + seq.offset;
-            } else {
-                for (size_t i = 0; i < length; ++i) {
-                    ubatch.n_seq_id[ubatch.n_seqs + i] = 1;
-                }
-            }
-            if (batch->seq_id) {
-                ubatch.seq_id = batch->seq_id + seq.offset;
-            }
-        }
-        if (logits_all) {
-            for (size_t i = 0; i < length; ++i) {
-                ubatch.output[ubatch.n_tokens + i] = 1;
-                out_ids.push_back(ids[seq.offset + i]);
-            }
-        } else if (batch->logits) {
-            if (ubatch.equal_seqs) {
-                for (size_t i = 0; i < length; ++i) {
-                    size_t id = ids[seq.offset + i];
-                    int8_t is_output = batch->logits[id];
-                    ubatch.output[ubatch.n_tokens + i] = is_output;
-                    if (is_output) { out_ids.push_back(id); }
-                }
-            } else {
-                // simple split
-                ubatch.output = batch->logits + seq.offset;
-                for (size_t i = 0; i < length; ++i) {
-                    if (ubatch.output[i] != 0) { out_ids.push_back(seq.offset + i); }
-                }
-            }
-        } else {
-            // only get last output
-            for (size_t i = 0; i < length; ++i) {
-                size_t id = ids[seq.offset + i];
-                int8_t is_last = id == ids.size() - 1;
-                ubatch.output[ubatch.n_tokens + i] = is_last;
-                if (is_last) { out_ids.push_back(id); }
-            }
-        }
-        if (ubatch.n_tokens == 0 && ubatch.n_seqs == 0) {
-            ubatch.n_seq_tokens = ubatch.equal_seqs ? length : 1;
-        }
-        ubatch.n_tokens += length;
-        ubatch.n_seqs += ubatch.equal_seqs ? 1 : length; // virtual sequences for simple splits
-        seq.offset += length;
-        seq.length -= length;
-        n_tokens -= length;
-        GGML_ASSERT(ubatch.n_tokens == ubatch.n_seq_tokens * ubatch.n_seqs);
-    }
-
-    // simple split, unknown number of sequences of unequal lengths
-    llama_ubatch split_simple(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        ubatch.equal_seqs = false;
-        if (!seq.empty()) {
-            llama_sbatch_seq & s = seq[0];
-            size_t length = s.length < n_ubatch ? s.length : n_ubatch;
-            GGML_ASSERT(seq.size() == 1 && s.n_seq_id == 0); // don't mix with other splits
-            add_seq_to_ubatch(ubatch, s, length);
-        }
-        return ubatch;
-    }
-
-    // make batches of equal-length sequences
-    llama_ubatch split_equal(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        if (!seq.empty()) {
-            size_t length = 0;
-            size_t n_tokens_in_ubatch = 0;
-            GGML_ASSERT(seq[0].n_seq_id > 0); // should not be mixed with simple splits
-            // smallest first, because it's easier to split this way;
-            // starting from the end to pop in constant time.
-            for (size_t i = seq.size(); i-- > 0;) {
-                llama_sbatch_seq & s = seq[i];
-                GGML_ASSERT(s.length > 0);
-                if (length == 0) {
-                    length = s.length < n_ubatch ? s.length : n_ubatch;
-                }
-                add_seq_to_ubatch(ubatch, s, length);
-                n_tokens_in_ubatch += length;
-                // shared prompts can't be mixed with any of their sequences,
-                // so it's safer to compute them in their own ubatch
-                if (s.n_seq_id > 1) { break; }
-                // stop when there isn't enough space for another sequence
-                if (length + n_tokens_in_ubatch > n_ubatch) { break; }
-            }
-        }
-        return ubatch;
-    }
-
-    // sequence-wise split
-    llama_ubatch split_seq(size_t n_ubatch) {
-        n_ubatch = n_tokens < n_ubatch ? n_tokens : n_ubatch;
-        llama_ubatch ubatch = reserve_ubatch(n_ubatch, /* has_embd */ batch->embd != nullptr);
-        if (!seq.empty()) {
-            llama_sbatch_seq & s = seq[seq.size() - 1];
-            size_t length = s.length < n_ubatch ? s.length : n_ubatch;
-            GGML_ASSERT(s.n_seq_id > 0); // should not be mixed with simple splits
-            add_seq_to_ubatch(ubatch, s, length);
-        }
-        return ubatch;
-    }
-
-    void from_batch(const llama_batch & batch, const size_t n_embd, const bool simple_split = false, const bool logits_all = false) {
-        GGML_ASSERT(batch.n_tokens >= 0);
-        this->batch = &batch;
-        this->n_embd = n_embd;
-        this->logits_all = logits_all;
-
-        n_tokens = batch.n_tokens;
-        ids.resize(n_tokens);
-        out_ids.clear();
-        // TODO: reserve out_ids and seq
-
-        for (size_t i = 0; i < n_tokens; ++i) {
-            ids[i] = i;
-        }
-        if (simple_split) {
-            seq.resize(1);
-            llama_sbatch_seq & s = seq[0];
-            s.n_seq_id = 0;
-            s.seq_id = nullptr;
-            s.offset = 0;
-            s.length = n_tokens;
-            return;
-        }
-        std::sort(ids.begin(), ids.end(),
-            [&batch](size_t a, size_t b) {
-                int32_t n_seq_a = batch.n_seq_id ? batch.n_seq_id[a] : 1;
-                int32_t n_seq_b = batch.n_seq_id ? batch.n_seq_id[b] : 1;
-                // sort by seq_id, then by pos
-                if (n_seq_a == n_seq_b) {
-                    if (batch.seq_id) {
-                        for (int32_t i = 0; i < n_seq_a; ++i) {
-                            llama_seq_id seq_id_a = batch.seq_id[a][i];
-                            llama_seq_id seq_id_b = batch.seq_id[b][i];
-                            // smaller seq_ids go first
-                            if (seq_id_a != seq_id_b) {
-                                return seq_id_a < seq_id_b;
-                            }
-                        }
-                    }
-                    // when all else is equal, sort by pos
-                    if (batch.pos) {
-                        return batch.pos[a] < batch.pos[b];
-                    }
-                    // no pos, sort by id
-                    return a < b;
-                }
-                // shared prompts go first
-                return n_seq_a > n_seq_b;
-            }
-        );
-        // init seq
-        llama_sbatch_seq * last_seq = nullptr;
-
-        for (size_t i = 0; i < n_tokens; ++i) {
-            const size_t bi = ids[i];
-            const int32_t n_seqs = batch.n_seq_id[bi];
-            llama_seq_id * seq_ids = batch.seq_id[bi];
-            if (last_seq != nullptr) {
-                bool same = n_seqs == last_seq->n_seq_id;
-                for (int32_t j = 0; same && j < n_seqs; ++j) {
-                    if (seq_ids[j] != last_seq->seq_id[j]) {
-                        same = false;
-                    }
-                }
-                if (same) {
-                    last_seq->length += 1;
-                    continue;
-                }
-            }
-            llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
-            seq.push_back(new_seq);
-            last_seq = &seq.back();
-        }
-        // keep shared prompts first at the end, then sort by length descending.
-        std::sort(seq.begin(), seq.end(),
-            [](llama_sbatch_seq & a, llama_sbatch_seq & b) {
-                if (a.n_seq_id == b.n_seq_id) {
-                    return a.length > b.length;
-                }
-                return a.n_seq_id < b.n_seq_id;
-            }
-        );
-    }
-};
-
-struct llama_context {
-    llama_context(const llama_model & model)
-        : model(model)
-        , t_start_us(model.t_start_us)
-        , t_load_us(model.t_load_us) {}
-
-    const struct llama_model & model;
-
-    struct llama_cparams        cparams;
-    struct llama_sbatch         sbatch;
-    struct llama_kv_cache       kv_self;
-    struct llama_control_vector cvec;
-
-    std::unordered_map lora_adapters;
-
-    std::vector backends;
-    std::vector> set_n_threads_fns;
-
-    ggml_backend_t backend_cpu = nullptr;
-
-    ggml_threadpool_t threadpool       = nullptr;
-    ggml_threadpool_t threadpool_batch = nullptr;
-
-    bool has_evaluated_once = false;
-
-    mutable int64_t t_start_us;
-    mutable int64_t t_load_us;
-    mutable int64_t t_p_eval_us = 0;
-    mutable int64_t t_eval_us   = 0;
-
-    mutable int64_t t_compute_start_us = 0;
-    mutable int64_t n_queued_tokens = 0;
-
-    mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
-    mutable int32_t n_eval   = 0; // number of eval calls
-
-    // host buffer for the model output (logits and embeddings)
-    ggml_backend_buffer_ptr buf_output;
-
-    // decode output (2-dimensional array: [n_outputs][n_vocab])
-    size_t  logits_size = 0; // capacity (of floats) for logits
-    float * logits      = nullptr;
-
-    std::vector output_ids; // map batch token positions to ids of the logits and embd buffers
-    size_t  output_size = 0; // capacity (of tokens positions) for the output buffers
-    int32_t n_outputs   = 0; // number of actually-used outputs in the current ubatch or last logical batch
-
-    bool logits_all = false;
-
-    // embeddings output (2-dimensional array: [n_outputs][n_embd])
-    // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
-    size_t  embd_size = 0; // capacity (of floats) for embeddings
-    float * embd      = nullptr;
-
-    // sequence embeddings output (map of [n_embd] vectors)
-    // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
-    std::map> embd_seq;
-
-    // whether we are computing encoder output or decoder output
-    bool is_encoding = false;
-
-    // TODO: find a better way to accommodate mutli-dimension position encoding methods
-    // number of position id each token get, 1 for each token in most cases.
-    // when using m-rope, it will be 3 position ids per token to representing 3 dimension coordinate.
-    int n_pos_per_token = 1;
-
-    // output of the encoder part of the encoder-decoder models
-    std::vector embd_enc;
-    std::vector> seq_ids_enc;
-
-    // memory buffers used to evaluate the model
-    std::vector buf_compute_meta;
-    ggml_backend_sched_ptr sched;
-
-    ggml_abort_callback abort_callback      = nullptr;
-    void *              abort_callback_data = nullptr;
-
-    // input tensors
-    struct ggml_tensor * inp_tokens;      // I32 [n_batch]
-    struct ggml_tensor * inp_embd;        // F32 [n_embd, n_batch]
-    struct ggml_tensor * inp_pos;         // I32 [n_batch]
-    struct ggml_tensor * inp_out_ids;     // I32 [n_outputs]
-    struct ggml_tensor * inp_KQ_mask;     // F32 [kv_size, n_batch]
-    struct ggml_tensor * inp_KQ_mask_swa; // F32 [kv_size, n_batch]
-    struct ggml_tensor * inp_K_shift;     // I32 [kv_size]
-    struct ggml_tensor * inp_mean;        // F32 [n_batch, n_batch]
-    struct ggml_tensor * inp_cls;         // I32 [n_batch]
-    struct ggml_tensor * inp_s_copy;      // I32 [kv_size]
-    struct ggml_tensor * inp_s_mask;      // F32 [1, n_kv]
-    struct ggml_tensor * inp_s_seq;       // I32 [n_kv, n_batch]
-    struct ggml_tensor * inp_pos_bucket;    // I32 [n_batch|n_kv, n_batch]
-    struct ggml_tensor * inp_embd_enc;      // F32 [n_embd, n_outputs_enc]
-    struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
-};
-
-struct llama_lora_weight {
-    struct ggml_tensor * a = nullptr;
-    struct ggml_tensor * b = nullptr;
-    llama_lora_weight() = default;
-    llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b): a(a), b(b) {}
-};
-
-struct llama_lora_adapter {
-    struct llama_model * base_model;
-    // map tensor name to lora_a_b
-    std::unordered_map ab_map;
-    std::vector ctxs;
-    std::vector bufs;
-
-    float alpha;
-
-    llama_lora_adapter(struct llama_model * base_model): base_model(base_model) {
-        base_model->lora_adapters.insert(this);
-    }
-
-    llama_lora_weight * get_weight(struct ggml_tensor * w) {
-        std::string name(w->name);
-        auto pos = ab_map.find(name);
-        if (ab_map.find(name) != ab_map.end()) {
-            return &pos->second;
-        }
-        return nullptr;
-    }
-
-    ~llama_lora_adapter() {
-        auto pos = base_model->lora_adapters.find(this);
-        if (pos != base_model->lora_adapters.end()) {
-            base_model->lora_adapters.erase(pos);
-        }
-    }
-};
-
 static int llama_get_device_count(const llama_model & model) {
     return (int) model.devices.size();
 }
 
-static struct ggml_tensor * llama_get_model_tensor(const struct llama_model * model, const char * name) {
-    auto it = std::find_if(model->tensors_by_name.begin(), model->tensors_by_name.end(),
-            [name](const std::pair & it) {
-                return it.first == name;
-            });
-    if (it == model->tensors_by_name.end()) {
-        return nullptr;
-    }
-    return it->second;
-}
-
-template
-static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
-    ggml_init_params params = {
-        /*.mem_size   =*/ ggml_tensor_overhead()*8,
-        /*.mem_buffer =*/ NULL,
-        /*.no_alloc   =*/ true,
-    };
-    ggml_context_ptr ctx { ggml_init(params) };
-    if (!ctx) {
-        throw std::runtime_error(format("failed to create ggml context"));
-    }
-
-    ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
-    ggml_tensor * op_tensor = fn(ctx.get());
-    for (int i = 0; i < GGML_MAX_SRC; i++) {
-        if (op_tensor->src[i] != nullptr) {
-            assert(op_tensor->src[i]->buffer == nullptr);
-            op_tensor->src[i]->buffer = buf.get();
-        }
-    }
-    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
-
-    return op_supported;
-}
-
-template
-static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) {
-    for (const auto & cur : buft_list) {
-        ggml_backend_dev_t cur_dev = cur.first;
-        ggml_backend_buffer_type_t cur_buft = cur.second;
-        if (buft_supported(cur_buft, cur_dev, fn)) {
-            return cur_buft;
-        }
-    }
-    throw std::runtime_error(format("no suitable buffer type found"));
-}
-
-//
-// kv cache helpers
-//
-
-static bool llama_kv_cache_init(
-             struct llama_kv_cache & cache,
-               const llama_context * ctx,
-                         ggml_type   type_k,
-                         ggml_type   type_v,
-                          uint32_t   kv_size,
-                              bool   offload) {
-    const llama_model & model = ctx->model;
-    const llama_cparams & cparams = ctx->cparams;
-
-    const struct llama_hparams & hparams = model.hparams;
-
-    const int32_t n_layer = hparams.n_layer;
-
-    LLAMA_LOG_INFO("%s: kv_size = %d, offload = %d, type_k = '%s', type_v = '%s', n_layer = %d\n", __func__, kv_size, offload, ggml_type_name(type_k), ggml_type_name(type_v), n_layer);
-
-    cache.has_shift = false;
-
-    cache.recurrent = llama_model_is_recurrent(&model);
-    cache.v_trans   = !cache.recurrent && !cparams.flash_attn;
-
-    cache.head = 0;
-    cache.size = kv_size;
-    cache.used = 0;
-
-    cache.type_k = type_k;
-    cache.type_v = type_v;
-
-    cache.cells.clear();
-    cache.cells.resize(kv_size);
-
-    // create a context for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * ctx = ggml_init(params);
-            if (!ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = ctx;
-            cache.ctxs.emplace_back(ctx);
-            return ctx;
-        }
-        return it->second;
-    };
-
-    cache.k_l.reserve(n_layer);
-    cache.v_l.reserve(n_layer);
-
-    for (int i = 0; i < n_layer; i++) {
-        const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(i) + hparams.n_embd_k_s();
-        const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(i) + hparams.n_embd_v_s();
-
-        LLAMA_LOG_DEBUG("%s: layer %d: n_embd_k_gqa = %d, n_embd_v_gqa = %d\n", __func__, i, n_embd_k_gqa, n_embd_v_gqa);
-
-        ggml_backend_buffer_type_t buft;
-        if (offload) {
-            auto * dev = model.dev_layer.at(i).dev;
-            buft = ggml_backend_dev_buffer_type(dev);
-        } else {
-            buft = ggml_backend_cpu_buffer_type();
-        }
-        ggml_context * ctx = ctx_for_buft(buft);
-
-        if (!ctx) {
-            LLAMA_LOG_ERROR("%s: failed to create ggml context for kv cache\n", __func__);
-            return false;
-        }
-
-        ggml_tensor * k = ggml_new_tensor_1d(ctx, type_k, n_embd_k_gqa*kv_size);
-        ggml_tensor * v = ggml_new_tensor_1d(ctx, type_v, n_embd_v_gqa*kv_size);
-        ggml_format_name(k, "cache_k_l%d", i);
-        ggml_format_name(v, "cache_v_l%d", i);
-        cache.k_l.push_back(k);
-        cache.v_l.push_back(v);
-    }
-
-    // allocate tensors and initialize the buffers to avoid NaNs in the padding
-    for (auto it : ctx_map) {
-        auto * buft = it.first;
-        auto * ctx  = it.second;
-
-        ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
-        if (!buf) {
-            LLAMA_LOG_ERROR("%s: failed to allocate buffer for kv cache\n", __func__);
-            return false;
-        }
-        ggml_backend_buffer_clear(buf, 0);
-        LLAMA_LOG_INFO("%s: %10s KV buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
-        cache.bufs.emplace_back(buf);
-    }
-
-    return true;
-}
-
-// a structure holds information about the slot found in llama_kv_cache_find_slot
-struct llama_kv_cache_slot_info {
-    std::pair boundaries; // slot boundaries [begin, end)
-    bool found = false;                       // the slot was found
-
-    explicit llama_kv_cache_slot_info(bool found_) : found{found_} {}
-    llama_kv_cache_slot_info(uint32_t begin, uint32_t end) : boundaries{begin, end}, found{true} {}
-
-    operator bool() const { return found; }
-};
-static const llama_kv_cache_slot_info llama_kv_cache_slot_info_failed{false};
-
-// find an empty slot of size "n_tokens" in the cache
-// updates the cache head
-// returns a structure holding information about the slot found
-// Note: On success, it's important that cache.head points
-// to the first cell of the slot.
-static struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
-           struct llama_kv_cache & cache,
-       const struct llama_ubatch & batch) {
-    const uint32_t n_tokens = batch.n_tokens;
-    const uint32_t n_seqs   = batch.n_seqs;
-    const uint32_t n_seq_tokens = batch.n_seq_tokens;
-
-    if (cache.recurrent) {
-        // For recurrent state architectures (like Mamba or RWKV),
-        // each cache cell can store the state for a whole sequence.
-        // A slot should be always be contiguous.
-
-        // can only process batches with an equal number of new tokens in each sequence
-        GGML_ASSERT(batch.equal_seqs);
-
-        int32_t min = cache.size - 1;
-        int32_t max = 0;
-
-        // everything should fit if all seq_ids are smaller than the max
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const uint32_t n_seq_id = batch.n_seq_id[s];
-            for (uint32_t j = 0; j < n_seq_id; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
-
-                if (seq_id < 0 || (uint32_t) seq_id >= cache.size) {
-                    // too big seq_id
-                    // TODO: would it be possible to resize the cache instead?
-                    LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%d Try using a bigger --parallel value\n", __func__, seq_id, cache.size);
-                    return llama_kv_cache_slot_info_failed;
-                }
-                if (j > 0) {
-                    llama_kv_cell & seq = cache.cells[seq_id];
-                    if (seq.tail >= 0) {
-                        llama_kv_cell & cell = cache.cells[seq.tail];
-                        // clear cells from seq_ids that become shared
-                        // (should not normally happen, but let's handle it anyway)
-                        cell.seq_id.erase(seq_id);
-                        seq.tail = -1;
-                        if (cell.seq_id.empty()) {
-                            cell.pos = -1;
-                            cell.src = -1;
-                            cache.used -= 1;
-                        }
-                    }
-                }
-            }
-        }
-
-#ifndef NDEBUG
-        {
-            std::vector tails_verif;
-            tails_verif.assign(cache.size, -1);
-            for (uint32_t i = 0; i < cache.size; ++i) {
-                llama_kv_cell & cell = cache.cells[i];
-                for (llama_seq_id seq_id : cell.seq_id) {
-                    if (tails_verif[seq_id] != -1) {
-                        LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
-                    }
-                    tails_verif[seq_id] = i;
-                }
-            }
-            for (uint32_t i = 0; i < cache.size; ++i) {
-                if (tails_verif[i] != cache.cells[i].tail) {
-                    LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cache.cells[i].tail, tails_verif[i]);
-                }
-            }
-        }
-#endif
-
-        // find next empty cell
-        uint32_t next_empty_cell = cache.head;
-
-        for (uint32_t i = 0; i < cache.size; ++i) {
-            if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
-            llama_kv_cell & cell = cache.cells[next_empty_cell];
-            if (cell.is_empty()) { break; }
-            next_empty_cell += 1;
-        }
-
-        // find usable cell range
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = batch.seq_id[s][0];
-            llama_kv_cell & seq_meta = cache.cells[seq_id];
-            bool has_cell = false;
-            if (seq_meta.tail >= 0) {
-                llama_kv_cell & cell = cache.cells[seq_meta.tail];
-                GGML_ASSERT(cell.has_seq_id(seq_id));
-                // does this seq_id "own" the cell?
-                if (cell.seq_id.size() == 1) { has_cell = true; }
-            }
-            if (!has_cell) {
-                llama_kv_cell & empty_cell = cache.cells[next_empty_cell];
-                GGML_ASSERT(empty_cell.is_empty());
-                // copy old tail into the empty cell
-                if (seq_meta.tail >= 0) {
-                    llama_kv_cell & orig_cell = cache.cells[seq_meta.tail];
-                    empty_cell.pos = orig_cell.pos;
-                    empty_cell.src = orig_cell.src;
-                    orig_cell.seq_id.erase(seq_id);
-                    empty_cell.seq_id.insert(seq_id); // will be overwritten
-                }
-                seq_meta.tail = next_empty_cell;
-                // find next empty cell
-                if (s + 1 < n_seqs) {
-                    next_empty_cell += 1;
-                    for (uint32_t i = 0; i < cache.size; ++i) {
-                        if (next_empty_cell >= cache.size) { next_empty_cell -= cache.size; }
-                        llama_kv_cell & cell = cache.cells[next_empty_cell];
-                        if (cell.is_empty()) { break; }
-                        next_empty_cell += 1;
-                    }
-                }
-            }
-            if (min > seq_meta.tail) { min = seq_meta.tail; }
-            if (max < seq_meta.tail) { max = seq_meta.tail; }
-        }
-
-        // gather and re-order
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            int32_t dst_id = s + min;
-            int32_t src_id = cache.cells[batch.seq_id[s][0]].tail;
-            if (dst_id != src_id) {
-                llama_kv_cell & dst_cell = cache.cells[dst_id];
-                llama_kv_cell & src_cell = cache.cells[src_id];
-
-                std::swap(dst_cell.pos, src_cell.pos);
-                std::swap(dst_cell.src, src_cell.src);
-                std::swap(dst_cell.seq_id, src_cell.seq_id);
-
-                // swap tails (assuming they NEVER overlap)
-                for (const llama_seq_id seq_id : src_cell.seq_id) {
-                    cache.cells[seq_id].tail = src_id;
-                }
-                for (const llama_seq_id seq_id : dst_cell.seq_id) {
-                    cache.cells[seq_id].tail = dst_id;
-                }
-            }
-        }
-
-        // update the pos of the used seqs
-        for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1];
-            int32_t cell_id = s + min;
-            llama_kv_cell & cell = cache.cells[cell_id];
-
-            if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
-                // What should happen when the pos backtracks or skips a value?
-                // Clearing the state mid-batch would require special-casing which isn't done.
-                LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
-                    __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens);
-            }
-            cell.pos = last_pos;
-            cell.seq_id.clear();
-            for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
-                cell.seq_id.insert(seq_id);
-                cache.cells[seq_id].tail = cell_id;
-            }
-        }
-
-        // allow getting the range of used cells, from head to head + n
-        cache.head = min;
-        cache.n    = max - min + 1;
-        cache.used = std::count_if(cache.cells.begin(), cache.cells.end(),
-            [](const llama_kv_cell& cell){ return !cell.is_empty(); });
-
-        // sanity check
-        return llama_kv_cache_slot_info(cache.n >= n_seqs);
-    }
-    // otherwise, one cell per token.
-
-    if (n_tokens > cache.size) {
-        LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
-        return llama_kv_cache_slot_info_failed;
-    }
-
-    uint32_t n_tested = 0;
-
-    while (true) {
-        if (cache.head + n_tokens > cache.size) {
-            n_tested += cache.size - cache.head;
-            cache.head = 0;
-            continue;
-        }
-
-        bool found = true;
-        for (uint32_t i = 0; i < n_tokens; i++) {
-            if (cache.cells[cache.head + i].pos >= 0) {
-                found = false;
-                cache.head += i + 1;
-                n_tested   += i + 1;
-                break;
-            }
-        }
-
-        if (found) {
-            break;
-        }
-
-        if (n_tested >= cache.size) {
-            //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
-            return llama_kv_cache_slot_info_failed;
-        }
-    }
-
-    for (uint32_t s = 0; s < n_seqs; s++) {
-        for (uint32_t i = 0; i < n_seq_tokens; ++i) {
-            uint32_t k = s*n_seq_tokens + i;
-            cache.cells[cache.head + k].pos = batch.pos[k];
-
-            for (int32_t j = 0; j < batch.n_seq_id[s]; j++) {
-                cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]);
-            }
-        }
-    }
-
-    cache.used += n_tokens;
-
-    return llama_kv_cache_slot_info(cache.head, cache.head + n_tokens);
-}
-
-// find how many cells are currently in use
-static uint32_t llama_kv_cache_cell_max(const struct llama_kv_cache & cache) {
-    for (uint32_t i = cache.size; i > 0; --i) {
-        const llama_kv_cell & cell = cache.cells[i - 1];
-
-        if (cell.pos >= 0 && !cell.is_empty()) {
-            return i;
-        }
-    }
-
-    return 0;
-}
-
-static void llama_kv_cache_clear(struct llama_kv_cache & cache) {
-    for (int32_t i = 0; i < (int32_t) cache.size; ++i) {
-        cache.cells[i].pos = -1;
-        cache.cells[i].seq_id.clear();
-        cache.cells[i].src = -1;
-        cache.cells[i].tail = -1;
-    }
-    cache.head = 0;
-    cache.used = 0;
-
-    for (auto & buf : cache.bufs) {
-        ggml_backend_buffer_clear(buf.get(), 0);
-    }
-}
-
-static bool llama_kv_cache_seq_rm(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1) {
-    uint32_t new_head = cache.size;
-
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-
-    // models like Mamba or RWKV can't have a state partially erased
-    if (cache.recurrent) {
-        if (seq_id >= (int64_t) cache.size) {
-            // could be fatal
-            return false;
-        }
-        if (0 <= seq_id) {
-            int32_t & tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                const llama_kv_cell & cell = cache.cells[tail_id];
-                // partial intersection is invalid
-                if ((0 < p0 && p0 <= cell.pos) || (0 < p1 && p1 <= cell.pos)) {
-                    return false;
-                }
-                // invalidate tails which will be cleared
-                if (p0 <= cell.pos && cell.pos < p1) {
-                    tail_id = -1;
-                }
-            }
-        } else {
-            // seq_id is negative, then the range should include everything or nothing
-            if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits::max())) {
-                return false;
-            }
-        }
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            if (seq_id < 0) {
-                cache.cells[i].seq_id.clear();
-            } else if (cache.cells[i].has_seq_id(seq_id)) {
-                cache.cells[i].seq_id.erase(seq_id);
-            } else {
-                continue;
-            }
-            if (cache.cells[i].is_empty()) {
-                // keep count of the number of used cells
-                if (cache.cells[i].pos >= 0) cache.used--;
-
-                cache.cells[i].pos = -1;
-                cache.cells[i].src = -1;
-                if (new_head == cache.size) new_head = i;
-            }
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
-
-    return true;
-}
-
-static void llama_kv_cache_seq_cp(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id_src,
-                 llama_seq_id   seq_id_dst,
-                    llama_pos   p0,
-                    llama_pos   p1) {
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-
-    if (cache.recurrent) {
-        if ((uint32_t) seq_id_dst < cache.size && (uint32_t) seq_id_src < cache.size) {
-            llama_kv_cell & tail_src = cache.cells[seq_id_src];
-            llama_kv_cell & tail_dst = cache.cells[seq_id_dst];
-            if (tail_dst.tail >= 0) {
-                // clear destination seq_id if it wasn't empty
-                llama_kv_cell & cell_dst = cache.cells[tail_dst.tail];
-
-                cell_dst.seq_id.erase(seq_id_dst);
-                tail_dst.tail = -1;
-                if (cell_dst.seq_id.empty()) {
-                    cell_dst.pos = -1;
-                    cell_dst.delta = -1;
-                    cell_dst.src = -1;
-                    cache.used -= 1;
-                }
-            }
-            if (tail_src.tail >= 0) {
-                llama_kv_cell & cell_src = cache.cells[tail_src.tail];
-
-                cell_src.seq_id.insert(seq_id_dst);
-                tail_dst.tail = tail_src.tail;
-            }
-        }
-
-        return;
-    }
-    // otherwise, this is the KV cache of a Transformer-like model
-
-    cache.head = 0;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.cells[i].seq_id.insert(seq_id_dst);
-        }
-    }
-}
-
-static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
-    uint32_t new_head = cache.size;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.recurrent && (llama_seq_id) i != seq_id) {
-            cache.cells[i].tail = -1;
-        }
-        if (!cache.cells[i].has_seq_id(seq_id)) {
-            if (cache.cells[i].pos >= 0) cache.used--;
-            cache.cells[i].pos = -1;
-            cache.cells[i].src = -1;
-            cache.cells[i].seq_id.clear();
-            if (new_head == cache.size) new_head = i;
-        } else {
-            cache.cells[i].seq_id.clear();
-            cache.cells[i].seq_id.insert(seq_id);
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    if (new_head != cache.size && new_head < cache.head) cache.head = new_head;
-}
-
-static void llama_kv_cache_seq_add(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1,
-                    llama_pos   delta) {
-    uint32_t new_head = cache.size;
-
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-    // If there is no range then return early to avoid looping over the cache.
-    if (p0 == p1) return;
-
-    if (cache.recurrent) {
-        // for Mamba-like or RWKV models, only the pos needs to be shifted
-        if (0 <= seq_id && seq_id < (int64_t) cache.size) {
-            const int32_t tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                llama_kv_cell & cell = cache.cells[tail_id];
-                if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
-                    cell.pos += delta;
-                }
-            }
-        }
-        return;
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.has_shift = true;
-            cache.cells[i].pos   += delta;
-            cache.cells[i].delta += delta;
-
-            if (cache.cells[i].pos < 0) {
-                if (!cache.cells[i].is_empty()) {
-                    cache.used--;
-                }
-                cache.cells[i].pos = -1;
-                cache.cells[i].seq_id.clear();
-                if (new_head == cache.size) {
-                    new_head = i;
-                }
-            }
-        }
-    }
-
-    // If we freed up a slot, set head to it so searching can start there.
-    // Otherwise we just start the next search from the beginning.
-    cache.head = new_head != cache.size ? new_head : 0;
-}
-
-static void llama_kv_cache_seq_div(
-        struct llama_kv_cache & cache,
-                 llama_seq_id   seq_id,
-                    llama_pos   p0,
-                    llama_pos   p1,
-                          int   d) {
-    if (p0 < 0) p0 = 0;
-    if (p1 < 0) p1 = std::numeric_limits::max();
-    // If there is no range then return early to avoid looping over the cache.
-    if (p0 == p1) return;
-
-    if (cache.recurrent) {
-        // for Mamba-like or RWKV models, only the pos needs to be changed
-        if (0 <= seq_id && seq_id < (int64_t) cache.size) {
-            const int32_t tail_id = cache.cells[seq_id].tail;
-            if (tail_id >= 0) {
-                llama_kv_cell & cell = cache.cells[tail_id];
-                if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
-                    cell.pos /= d;
-                }
-            }
-        }
-        return;
-    }
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
-            cache.has_shift = true;
-
-            {
-                llama_pos p_old = cache.cells[i].pos;
-                cache.cells[i].pos   /= d;
-                cache.cells[i].delta += cache.cells[i].pos - p_old;
-            }
-        }
-    }
-}
-
-static llama_pos llama_kv_cache_seq_pos_max(struct llama_kv_cache & cache, llama_seq_id seq_id) {
-    llama_pos result = 0;
-
-    for (uint32_t i = 0; i < cache.size; ++i) {
-        if (cache.cells[i].has_seq_id(seq_id)) {
-            result = std::max(result, cache.cells[i].pos);
-        }
-    }
-
-    return result;
-}
-
-static void llama_kv_cache_defrag(struct llama_kv_cache & cache) {
-    if (!cache.recurrent) {
-        cache.do_defrag = true;
-    }
-}
-
-static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) {
-    // the FA kernels require padding to avoid extra runtime boundary checks
-    return cparams.flash_attn ? 256u : 32u;
-}
-
-// saves the kv_cache state for future recovery.
-// used to rollback llama_kv_cache_find_slot changes.
-struct llama_kv_slot_restorer {
-    struct llama_kv_cache_state {
-        uint32_t head = 0;
-        uint32_t n    = 0;
-    } old_state;
-
-    // for non-recurrent models only
-    // list of slots to restore
-    std::vector> slot_boundaries;
-
-    bool do_restore = false;
-
-    explicit llama_kv_slot_restorer(const struct llama_kv_cache & cache) {
-        old_state.head  = cache.head;
-        old_state.n     = cache.n;
-    }
-
-    // saves a slot information for future restoration
-    void save(const struct llama_kv_cache_slot_info & slot) {
-        if (slot) {
-            do_restore = true;
-            if (slot.boundaries.first != slot.boundaries.second) {
-                slot_boundaries.push_back(slot.boundaries);
-            }
-        }
-    }
-
-    // must be explicitly called to restore the kv_cache state
-    // and rollback changes from all llama_kv_cache_find_slot calls
-    void restore(struct llama_kv_cache & cache) {
-        if (do_restore) {
-            cache.head  = old_state.head;
-            cache.n     = old_state.n;
-
-            if (cache.recurrent) { // recurrent models like Mamba or RWKV can't have a state partially erased
-                llama_kv_cache_seq_rm(cache, -1, -1, -1);
-            } else {
-                for (auto & slot : slot_boundaries) {
-                    llama_kv_cache_seq_rm(cache, -1, slot.first, slot.second);
-                }
-            }
-        }
-    }
-};
-
-//
-// model loading and saving
-//
-
-enum llama_fver {
-    GGUF_FILE_VERSION_V1 = 1,
-    GGUF_FILE_VERSION_V2 = 2,
-    GGUF_FILE_VERSION_V3 = 3,
-};
-
-static const char * llama_file_version_name(llama_fver version) {
-    switch (version) {
-        case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)";
-        case GGUF_FILE_VERSION_V2: return "GGUF V2";
-        case GGUF_FILE_VERSION_V3: return "GGUF V3 (latest)";
-    }
-
-    return "unknown";
-}
-
-static std::string llama_format_tensor_shape(const std::vector & ne) {
-    char buf[256];
-    snprintf(buf, sizeof(buf), "%5" PRId64, ne.at(0));
-    for (size_t i = 1; i < ne.size(); i++) {
-        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, ne.at(i));
-    }
-    return buf;
-}
-
-static std::string llama_format_tensor_shape(const struct ggml_tensor * t) {
-    char buf[256];
-    snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]);
-    for (int i = 1; i < GGML_MAX_DIMS; i++) {
-        snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]);
-    }
-    return buf;
-}
-
-namespace GGUFMeta {
-    template 
-    struct GKV_Base_Type {
-        static constexpr gguf_type gt = gt_;
-
-        static T getter(const gguf_context * ctx, const int kid) {
-            return gfun(ctx, kid);
-        }
-    };
-
-    template struct GKV_Base;
-
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-    template<> struct GKV_Base: GKV_Base_Type {};
-
-    template<> struct GKV_Base {
-        static constexpr gguf_type gt = GGUF_TYPE_STRING;
-
-        static std::string getter(const gguf_context * ctx, const int kid) {
-            return gguf_get_val_str(ctx, kid);
-        }
-    };
-
-    struct ArrayInfo {
-        const gguf_type gt;
-        const size_t length;
-        const void * data;
-    };
-
-    template<> struct GKV_Base {
-        public:
-        static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
-        static ArrayInfo getter(const gguf_context *ctx, const int k) {
-            return ArrayInfo {
-                gguf_get_arr_type(ctx, k),
-                size_t(gguf_get_arr_n(ctx, k)),
-                gguf_get_arr_data(ctx, k),
-            };
-        }
-    };
-
-    template
-    class GKV : public GKV_Base {
-        GKV() = delete;
-
-        public:
-        static T get_kv(const gguf_context * ctx, const int k) {
-            const enum gguf_type kt = gguf_get_kv_type(ctx, k);
-
-            if (kt != GKV::gt) {
-                throw std::runtime_error(format("key %s has wrong type %s but expected type %s",
-                    gguf_get_key(ctx, k), gguf_type_name(kt), gguf_type_name(GKV::gt)));
-            }
-            return GKV::getter(ctx, k);
-        }
-
-        static const char * override_type_to_str(const llama_model_kv_override_type ty) {
-            switch (ty) {
-                case LLAMA_KV_OVERRIDE_TYPE_BOOL:  return "bool";
-                case LLAMA_KV_OVERRIDE_TYPE_INT:   return "int";
-                case LLAMA_KV_OVERRIDE_TYPE_FLOAT: return "float";
-                case LLAMA_KV_OVERRIDE_TYPE_STR:   return "str";
-            }
-            return "unknown";
-        }
-
-        static bool validate_override(const llama_model_kv_override_type expected_type, const struct llama_model_kv_override * ovrd) {
-            if (!ovrd) { return false; }
-            if (ovrd->tag == expected_type) {
-                LLAMA_LOG_INFO("%s: Using metadata override (%5s) '%s' = ",
-                    __func__, override_type_to_str(ovrd->tag), ovrd->key);
-                switch (ovrd->tag) {
-                    case LLAMA_KV_OVERRIDE_TYPE_BOOL:  {
-                        LLAMA_LOG_INFO("%s\n", ovrd->val_bool ? "true" : "false");
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_INT:   {
-                        LLAMA_LOG_INFO("%" PRId64 "\n", ovrd->val_i64);
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_FLOAT: {
-                        LLAMA_LOG_INFO("%.6f\n", ovrd->val_f64);
-                    } break;
-                    case LLAMA_KV_OVERRIDE_TYPE_STR: {
-                        LLAMA_LOG_INFO("%s\n", ovrd->val_str);
-                    } break;
-                    default:
-                        // Shouldn't be possible to end up here, but just in case...
-                        throw std::runtime_error(
-                            format("Unsupported attempt to override %s type for metadata key %s\n",
-                                override_type_to_str(ovrd->tag), ovrd->key));
-                }
-                return true;
-            }
-            LLAMA_LOG_WARN("%s: Warning: Bad metadata override type for key '%s', expected %s but got %s\n",
-                __func__, ovrd->key, override_type_to_str(expected_type), override_type_to_str(ovrd->tag));
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(OT & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_BOOL, ovrd)) {
-                target = ovrd->val_bool;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value && std::is_integral::value, bool>::type
-        try_override(OT & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_INT, ovrd)) {
-                target = ovrd->val_i64;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(T & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_FLOAT, ovrd)) {
-                target = ovrd->val_f64;
-                return true;
-            }
-            return false;
-        }
-
-        template
-        static typename std::enable_if::value, bool>::type
-        try_override(T & target, const struct llama_model_kv_override * ovrd) {
-            if (validate_override(LLAMA_KV_OVERRIDE_TYPE_STR, ovrd)) {
-                target = ovrd->val_str;
-                return true;
-            }
-            return false;
-        }
-
-        static bool set(const gguf_context * ctx, const int k, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            if (try_override(target, ovrd)) {
-                return true;
-            }
-            if (k < 0) { return false; }
-            target = get_kv(ctx, k);
-            return true;
-        }
-
-        static bool set(const gguf_context * ctx, const char * key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            return set(ctx, gguf_find_key(ctx, key), target, ovrd);
-        }
-
-        static bool set(const gguf_context * ctx, const std::string & key, T & target, const struct llama_model_kv_override * ovrd = nullptr) {
-            return set(ctx, key.c_str(), target, ovrd);
-        }
-    };
-}
-
-using llama_buf_map = std::unordered_map;
-
-static size_t llama_model_max_nodes(const llama_model & model) {
-    return std::max(8192, model.tensors_by_name.size()*5);
-}
-
-struct llama_model_loader {
-    int n_kv      = 0;
-    int n_tensors = 0;
-    int n_created = 0;
-
-    uint64_t n_elements = 0;
-    size_t  n_bytes     = 0;
-
-    bool use_mmap = false;
-    bool check_tensors;
-
-    llama_files files;
-    llama_ftype ftype;
-    llama_fver  fver;
-
-    llama_mmaps mappings;
-
-    // Holds information on a model weight
-    struct llama_tensor_weight {
-        uint16_t  idx; // source file index
-        size_t   offs; // tensor data offset in the original file
-
-        ggml_tensor * tensor;
-
-        llama_tensor_weight(const llama_file * file, uint16_t idx, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
-            const int tensor_idx = gguf_find_tensor(gguf_ctx,  ggml_get_name(tensor));
-            if (tensor_idx < 0) {
-                throw std::runtime_error(format("tensor '%s' not found in the model", ggml_get_name(tensor)));
-            }
-
-            offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
-            if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
-                throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", ggml_get_name(tensor)));
-            }
-        }
-    };
-
-    // custom comparator to sort weights more nicely by layer
-    struct weight_name_comparer {
-        bool operator()(const std::string & a, const std::string & b) const {
-            int a_layer = -1;
-            int b_layer = -1;
-            sscanf(a.c_str(), "blk.%d.", &a_layer);
-            sscanf(b.c_str(), "blk.%d.", &b_layer);
-            if (a_layer != b_layer) {
-                return a_layer < b_layer;
-            }
-            return a < b;
-        }
-    };
-
-    std::map weights_map;
-    std::unordered_map kv_overrides;
-
-    gguf_context_ptr meta;
-    std::vector contexts;
-
-    std::string arch_name;
-    LLM_KV      llm_kv    = LLM_KV(LLM_ARCH_UNKNOWN);
-
-    llama_model_loader(const std::string & fname, bool use_mmap, bool check_tensors, const struct llama_model_kv_override * param_overrides_p) {
-        int trace = 0;
-        if (getenv("LLAMA_TRACE")) {
-            trace = atoi(getenv("LLAMA_TRACE"));
-        }
-
-        if (param_overrides_p != nullptr) {
-            for (const struct llama_model_kv_override * p = param_overrides_p; p->key[0] != 0; p++) {
-                kv_overrides.insert({std::string(p->key), *p});
-            }
-        }
-
-        struct ggml_context * ctx = NULL;
-        struct gguf_init_params params = {
-            /*.no_alloc = */ true,
-            /*.ctx      = */ &ctx,
-        };
-
-        meta.reset(gguf_init_from_file(fname.c_str(), params));
-        if (!meta) {
-            throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
-        }
-
-        get_key(llm_kv(LLM_KV_GENERAL_ARCHITECTURE), arch_name, false);
-        llm_kv = LLM_KV(llm_arch_from_string(arch_name));
-
-        files.emplace_back(new llama_file(fname.c_str(), "rb"));
-        contexts.emplace_back(ctx);
-
-        // Save tensors data offset of the main file.
-        // For subsidiary files, `meta` tensor data offset must not be used,
-        // so we build a unified tensors index for weights.
-        for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
-            std::string tensor_name = std::string(cur->name);
-            // make sure there is no duplicated tensor names
-            if (weights_map.find(tensor_name) != weights_map.end()) {
-                throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur)));
-            }
-            n_elements += ggml_nelements(cur);
-            n_bytes    += ggml_nbytes(cur);
-            weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), 0, meta.get(), cur));
-        }
-        uint16_t n_split = 0;
-        get_key(llm_kv(LLM_KV_SPLIT_COUNT), n_split, false);
-
-        // Load additional GGML contexts
-        if (n_split > 1) {
-            uint16_t idx = 0;
-            get_key(llm_kv(LLM_KV_SPLIT_NO), idx);
-            if (idx != 0) {
-                throw std::runtime_error(format("illegal split file: %d, model must be loaded with the first split", idx));
-            }
-
-            char split_prefix[PATH_MAX] = {0};
-            if (!llama_split_prefix(split_prefix, sizeof(split_prefix), fname.c_str(), idx, n_split)) {
-                throw std::runtime_error(format("invalid split file: %s", fname.c_str()));
-            }
-
-            if (trace > 0) {
-                LLAMA_LOG_INFO("%s: loading additional %d GGUFs\n", __func__, n_split);
-            }
-
-            char split_path[PATH_MAX] = {0};
-            for (idx = 1; idx < n_split; idx++) {
-                llama_split_path(split_path, sizeof(split_path), split_prefix, idx, n_split);
-
-                struct gguf_init_params split_params = {
-                    /*.no_alloc = */ true,
-                    /*.ctx      = */ &ctx,
-                };
-                gguf_context_ptr ctx_gguf { gguf_init_from_file(split_path, split_params) };
-                if (!ctx_gguf) {
-                    throw std::runtime_error(format("%s: failed to load GGUF split from %s\n", __func__, split_path));
-                }
-
-                files.emplace_back(new llama_file(split_path, "rb"));
-                contexts.emplace_back(ctx);
-
-                // Save tensors data offset info of the shard.
-                for (ggml_tensor * cur = ggml_get_first_tensor(ctx); cur; cur = ggml_get_next_tensor(ctx, cur)) {
-                    std::string tensor_name = std::string(cur->name);
-                    // make sure there is no duplicated tensor names
-                    if (weights_map.find(tensor_name) != weights_map.end()) {
-                        throw std::runtime_error(format("invalid model: tensor '%s' is duplicated", ggml_get_name(cur)));
-                    }
-                    n_elements += ggml_nelements(cur);
-                    n_bytes    += ggml_nbytes(cur);
-                    weights_map.emplace(tensor_name, llama_tensor_weight(files.back().get(), idx, ctx_gguf.get(), cur));
-                }
-            }
-
-            get_key(llm_kv(LLM_KV_SPLIT_TENSORS_COUNT), n_tensors);
-
-            // sanity check
-            {
-                const int n_tensors_loaded = (int) weights_map.size();
-                if (n_tensors != n_tensors_loaded) {
-                    throw std::runtime_error(format("corrupted model: %d tensors expected but %d found", n_tensors, n_tensors_loaded));
-                }
-            }
-
-            LLAMA_LOG_INFO("%s: additional %d GGUFs metadata loaded.\n",  __func__, n_split - 1);
-        }
-
-        n_kv      = gguf_get_n_kv(meta.get());
-        n_tensors = weights_map.size();
-
-        fver = (enum llama_fver) gguf_get_version(meta.get());
-
-        LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
-                __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver));
-
-        // determine file type based on the number of tensors for each quantization and print meta data
-        // TODO: make optional
-        {
-            std::map n_type;
-
-            uint32_t n_type_max = 0;
-            enum ggml_type type_max = GGML_TYPE_F32;
-
-            for (const auto & it : weights_map) {
-                const llama_tensor_weight & w = it.second;
-                const ggml_tensor * tensor = w.tensor;
-
-                enum ggml_type type = tensor->type;
-
-                n_type[type]++;
-
-                if (n_type_max < n_type[type]) {
-                    n_type_max = n_type[type];
-                    type_max   = type;
-                }
-
-                if (trace > 0) {
-                    const uint16_t sid = w.idx;
-                    LLAMA_LOG_INFO("%s: - tensor split %2d: %32s %-8s [ %s ]\n", __func__, sid, ggml_get_name(tensor), ggml_type_name(type), llama_format_tensor_shape(tensor).c_str());
-                }
-            }
-
-            switch (type_max) {
-                case GGML_TYPE_F32:     ftype = LLAMA_FTYPE_ALL_F32;        break;
-                case GGML_TYPE_F16:     ftype = LLAMA_FTYPE_MOSTLY_F16;     break;
-                case GGML_TYPE_BF16:    ftype = LLAMA_FTYPE_MOSTLY_BF16;    break;
-                case GGML_TYPE_Q4_0:    ftype = LLAMA_FTYPE_MOSTLY_Q4_0;    break;
-                case GGML_TYPE_Q4_1:    ftype = LLAMA_FTYPE_MOSTLY_Q4_1;    break;
-                case GGML_TYPE_Q5_0:    ftype = LLAMA_FTYPE_MOSTLY_Q5_0;    break;
-                case GGML_TYPE_Q5_1:    ftype = LLAMA_FTYPE_MOSTLY_Q5_1;    break;
-                case GGML_TYPE_Q8_0:    ftype = LLAMA_FTYPE_MOSTLY_Q8_0;    break;
-                case GGML_TYPE_Q2_K:    ftype = LLAMA_FTYPE_MOSTLY_Q2_K;    break;
-                case GGML_TYPE_Q3_K:    ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M;  break;
-                case GGML_TYPE_Q4_K:    ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M;  break;
-                case GGML_TYPE_Q5_K:    ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M;  break;
-                case GGML_TYPE_Q6_K:    ftype = LLAMA_FTYPE_MOSTLY_Q6_K;    break;
-                case GGML_TYPE_TQ1_0:   ftype = LLAMA_FTYPE_MOSTLY_TQ1_0;   break;
-                case GGML_TYPE_TQ2_0:   ftype = LLAMA_FTYPE_MOSTLY_TQ2_0;   break;
-                case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
-                case GGML_TYPE_IQ2_XS:  ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS;  break;
-                case GGML_TYPE_IQ2_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ2_S;   break;
-                case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
-                case GGML_TYPE_IQ1_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ1_S;   break;
-                case GGML_TYPE_IQ1_M:   ftype = LLAMA_FTYPE_MOSTLY_IQ1_M;   break;
-                case GGML_TYPE_IQ4_NL:  ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL;  break;
-                case GGML_TYPE_IQ4_XS:  ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS;  break;
-                case GGML_TYPE_IQ3_S:   ftype = LLAMA_FTYPE_MOSTLY_IQ3_S;   break;
-                default:
-                    {
-                        LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
-                        ftype = LLAMA_FTYPE_ALL_F32;
-                    } break;
-            }
-
-            // this is a way to mark that we have "guessed" the file type
-            ftype = (llama_ftype) (ftype | LLAMA_FTYPE_GUESSED);
-
-            {
-                const int kid = gguf_find_key(meta.get(), "general.file_type"); // TODO: use LLM_KV
-                if (kid >= 0) {
-                    ftype = (llama_ftype) gguf_get_val_u32(meta.get(), kid);
-                }
-            }
-
-            LLAMA_LOG_INFO("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
-
-            for (int i = 0; i < n_kv; i++) {
-                const char * name           = gguf_get_key(meta.get(), i);
-                const enum gguf_type type   = gguf_get_kv_type(meta.get(), i);
-                const std::string type_name =
-                    type == GGUF_TYPE_ARRAY
-                    ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
-                    : gguf_type_name(type);
-
-                std::string value          = gguf_kv_to_str(meta.get(), i);
-                const size_t MAX_VALUE_LEN = 40;
-                if (value.size() > MAX_VALUE_LEN) {
-                    value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str());
-                }
-                replace_all(value, "\n", "\\n");
-
-                LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str());
-            }
-
-            // print type counts
-            for (auto & kv : n_type) {
-                if (kv.second == 0) {
-                    continue;
-                }
-
-                LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second);
-            }
-        }
-
-        if (!llama_mmap::SUPPORTED) {
-            LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__);
-            use_mmap = false;
-        }
-
-        this->use_mmap = use_mmap;
-        this->check_tensors = check_tensors;
-    }
-
-    template
-    typename std::enable_if::value, bool>::type
-    get_arr_n(const std::string & key, T & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0) {
-            if (required) {
-                throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-
-        result = arr_info.length;
-        return true;
-    }
-
-    template
-    typename std::enable_if::value, bool>::type
-    get_arr_n(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_arr_n(llm_kv(kid), result, required);
-    }
-
-    template
-    bool get_arr(const std::string & key, std::vector & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
-            if (required) {
-                throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-        switch (arr_info.gt) {
-            case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break;
-            case GGUF_TYPE_INT32:   GGML_ASSERT(
-                                            (std::is_same::value) ||
-                                            (std::is_same::value));  break;
-            default:
-                throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
-        }
-
-        result.resize(arr_info.length);
-        result.assign((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length);
-
-        return true;
-    }
-
-    template
-    bool get_arr(const std::string & key, std::array & result, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0 || gguf_get_kv_type(meta.get(), kid) != GGUF_TYPE_ARRAY) {
-            if (required) {
-                throw std::runtime_error(format("array key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        struct GGUFMeta::ArrayInfo arr_info =
-            GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-        switch (arr_info.gt) {
-            case GGUF_TYPE_FLOAT32: GGML_ASSERT((std::is_same::value)); break;
-            case GGUF_TYPE_INT32:   GGML_ASSERT(
-                                            (std::is_same::value) ||
-                                            (std::is_same::value));  break;
-            default:
-                throw std::runtime_error(format("%s is not a float32, int32 array", key.c_str()));
-        }
-
-        if (arr_info.length > N_MAX) {
-            throw std::runtime_error(format("array length %u for key %s exceeds max %u", (uint32_t) arr_info.length, key.c_str(), (uint32_t) N_MAX));
-        }
-
-        std::copy((const T*)arr_info.data, (const T *)arr_info.data + arr_info.length, result.begin());
-
-        return true;
-    }
-
-    template
-    bool get_arr(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_arr(llm_kv(kid), result, required);
-    }
-
-    template
-    bool get_key(const std::string & key, T & result, const bool required = true) {
-        auto it = kv_overrides.find(key);
-
-        const struct llama_model_kv_override * override =
-            it != kv_overrides.end() ? &it->second : nullptr;
-
-        const bool found = GGUFMeta::GKV::set(meta.get(), key, result, override);
-
-        if (required && !found) {
-            throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-        }
-
-        return found;
-    }
-
-    template
-    bool get_key(const enum llm_kv kid, T & result, const bool required = true) {
-        return get_key(llm_kv(kid), result, required);
-    }
-
-    // get array of n <= N_MAX elements, or a single element repeated n times
-    template
-    bool get_key_or_arr(const std::string & key, std::array & result, uint32_t n, const bool required = true) {
-        const int kid = gguf_find_key(meta.get(), key.c_str());
-
-        if (kid < 0) {
-            if (required) {
-                throw std::runtime_error(format("key not found in model: %s", key.c_str()));
-            }
-            return false;
-        }
-
-        if (n > N_MAX) {
-            throw std::runtime_error(format("n > N_MAX: %u > %u for key %s", (uint32_t) n, (uint32_t) N_MAX, key.c_str()));
-        }
-
-        if (gguf_get_kv_type(meta.get(), kid) == GGUF_TYPE_ARRAY) {
-            struct GGUFMeta::ArrayInfo arr_info =
-                GGUFMeta::GKV::get_kv(meta.get(), kid);
-
-            if (n != arr_info.length) {
-                throw std::runtime_error(format("key %s has wrong array length; expected %u, got %u", key.c_str(), n, (uint32_t) arr_info.length));
-            }
-
-            return get_arr(key, result, required);
-        } else {
-            T value;
-
-            bool ok = get_key(key, value, required);
-            if (!ok) {
-                return false;
-            }
-
-            for (uint32_t i = 0; i < n; i++) {
-                result[i] = value;
-            }
-
-            return true;
-        }
-    }
-
-    template
-    bool get_key_or_arr(const enum llm_kv kid, T & result, uint32_t n, const bool required = true) {
-        return get_key_or_arr(llm_kv(kid), result, n, required);
-    }
-
-    std::string get_arch_name() const {
-        return arch_name;
-    }
-
-    enum llm_arch get_arch() const {
-        return llm_kv.arch;
-    }
-
-    const llama_tensor_weight * get_weight(const char * name) const {
-        auto pos = weights_map.find(name);
-        if (pos != weights_map.end()) {
-            return &pos->second;
-        }
-
-        return nullptr;
-    }
-
-    const llama_tensor_weight & require_weight(const char * name) const {
-        const llama_tensor_weight * weight = get_weight(name);
-        if (!weight) {
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name));
-        }
-        return *weight;
-    }
-
-    struct ggml_tensor * get_tensor_meta(const char * name) const {
-        const auto * weight = get_weight(name);
-        if (!weight) {
-            return nullptr;
-        }
-        return weight->tensor;
-    }
-
-    struct ggml_tensor * require_tensor_meta(const std::string & name) const {
-        struct ggml_tensor * tensor = get_tensor_meta(name.c_str());
-        if (!tensor) {
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
-        }
-        return tensor;
-    }
-
-    const struct ggml_tensor * check_tensor_dims(const std::string & name, const std::vector & ne, bool required) const {
-        const struct ggml_tensor * cur = get_tensor_meta(name.c_str());
-
-        if (cur == NULL) {
-            if (!required) {
-                return NULL;
-            }
-            throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
-        }
-
-        {
-            bool is_ok = true;
-            for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
-                if ((i < ne.size() && ne[i] != cur->ne[i]) || (i >= ne.size() && cur->ne[i] != 1)) {
-                    is_ok = false;
-                    break;
-                }
-            }
-            if (!is_ok) {
-                throw std::runtime_error(
-                        format("%s: tensor '%s' has wrong shape; expected %s, got %s",
-                            __func__, name.c_str(),
-                            llama_format_tensor_shape(ne).c_str(),
-                            llama_format_tensor_shape(cur).c_str()));
-            }
-        }
-
-        return cur;
-    }
-
-    static const int TENSOR_NOT_REQUIRED = 1;
-    static const int TENSOR_DUPLICATED   = 2;
-
-    struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::initializer_list & ne, int flags = 0) {
-        const struct ggml_tensor * cur = check_tensor_dims(name, ne, !(flags & TENSOR_NOT_REQUIRED));
-
-        if (cur == NULL) {
-            return NULL;
-        }
-
-        bool duplicated = flags & TENSOR_DUPLICATED;
-
-        struct ggml_tensor * tensor = ggml_dup_tensor(ctx, cur);
-        ggml_set_name(tensor, ggml_get_name(cur));
-
-        if (duplicated) {
-            size_data += ggml_nbytes(cur);
-        } else {
-            n_created++;
-        }
-
-        return tensor;
-
-    }
-
-    struct ggml_tensor * create_tensor_as_view(struct ggml_context * ctx, struct ggml_tensor * base, const std::string & name, const std::initializer_list & ne, size_t offset, bool required = true) {
-        const struct ggml_tensor * cur = check_tensor_dims(name, ne, required);
-
-        if (cur == NULL) {
-            return NULL;
-        }
-
-        if (cur->type != base->type) {
-            throw std::runtime_error(format("%s: tensor '%s' has wrong type; expected %s, got %s", __func__, name.c_str(), ggml_type_name(base->type), ggml_type_name(cur->type)));
-        }
-
-        std::array dims;
-        for (size_t i = 0; i < GGML_MAX_DIMS; ++i) {
-            dims[i] = i < ne.size() ? ne.begin()[i] : 1;
-        }
-
-        struct ggml_tensor * tensor = ggml_view_4d(ctx, base,
-                                        dims[0], dims[1], dims[2], dims[3],
-                                        cur->nb[1], cur->nb[2], cur->nb[3],
-                                        offset);
-
-        ggml_set_name(tensor, name.c_str());
-
-        n_created++;
-
-        return tensor;
-    }
-
-    void done_getting_tensors() const {
-        if (n_created != n_tensors) {
-            throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created));
-        }
-    }
-
-    void init_mappings(bool prefetch = true, llama_mlocks * mlock_mmaps = nullptr) {
-        if (use_mmap) {
-            mappings.reserve(files.size());
-            mmaps_used.reserve(files.size());
-            for (const auto & file : files) {
-                auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU));
-                auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa");
-                std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn()));
-                mmaps_used.emplace_back(mapping->size, 0);
-                if (mlock_mmaps) {
-                    std::unique_ptr mlock_mmap(new llama_mlock());
-                    mlock_mmap->init(mapping->addr);
-                    mlock_mmaps->emplace_back(std::move(mlock_mmap));
-                }
-                mappings.emplace_back(std::move(mapping));
-            }
-        }
-
-        // compute the total size of all tensors for progress reporting
-        for (const auto & it : weights_map) {
-            size_data += ggml_nbytes(it.second.tensor);
-        }
-    }
-
-    void get_mapping_range(size_t * first, size_t * last, void ** addr, int idx, ggml_context * ctx) const {
-        GGML_ASSERT(!mappings.empty());
-        const auto & mapping = mappings.at(idx);
-
-        *first = mapping->size;
-        *last  = 0;
-        *addr = mapping->addr;
-        for (ggml_tensor * tensor = ggml_get_first_tensor(ctx); tensor; tensor = ggml_get_next_tensor(ctx, tensor)) {
-            const auto * weight = get_weight(ggml_get_name(tensor));
-            if (!weight || weight->idx != idx) {
-                continue;
-            }
-            *first = std::min(*first, weight->offs);
-            *last  = std::max(*last,  weight->offs + ggml_nbytes(tensor));
-        }
-    }
-
-    // for backwards compatibility, does not support ggml-backend
-    void load_data_for(struct ggml_tensor * cur) const {
-        const auto & w = require_weight(ggml_get_name(cur));
-
-        if (use_mmap) {
-            const auto & mapping = mappings.at(w.idx);
-            if (cur->data == nullptr) {
-                cur->data = (uint8_t *)mapping->addr + w.offs;
-            } else {
-                memcpy(cur->data, (uint8_t *)mapping->addr + w.offs, ggml_nbytes(cur));
-            }
-        } else {
-            GGML_ASSERT(cur->data != nullptr);
-            GGML_ASSERT(w.idx < files.size());
-            const auto & file = files.at(w.idx);
-            file->seek(w.offs, SEEK_SET);
-            file->read_raw(cur->data, ggml_nbytes(cur));
-        }
-
-        if (check_tensors && !ggml_validate_row_data(cur->type, cur->data, ggml_nbytes(cur))) {
-            throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
-        }
-    }
-
-    size_t size_done = 0;
-    size_t size_data = 0;
-    std::vector> mmaps_used;
-
-    // Returns false if cancelled by progress_callback
-    bool load_all_data(
-            struct ggml_context * ctx,
-            llama_buf_map & bufs,
-            llama_mlocks * lmlocks,
-            llama_progress_callback progress_callback,
-            void * progress_callback_user_data) {
-        GGML_ASSERT(size_data != 0 && "call init_mappings() first");
-
-        std::vector> read_buf;
-        std::vector>> validation_result;
-
-        // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
-        // NVMe raid configurations might require more / larger buffers.
-        constexpr size_t n_buffers = 4;
-        constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
-
-        std::vector host_buffers;
-        std::vector events;
-        std::vector host_ptrs;
-        size_t buffer_idx = 0; // buffer to use for async loads
-        ggml_backend_t upload_backend = [&](const char * func) -> ggml_backend_t {
-            if (use_mmap || check_tensors) {
-                return nullptr;
-            }
-            // When not using mmaped io use async uploads from pinned memory to GPU memory.
-            // First determine if the backend supports the necessary features for async uploads.
-            auto * buf = bufs.count(0) ? bufs.at(0) : nullptr;
-            if (!buf) {
-                LLAMA_LOG_DEBUG("%s: no buffer found for async uploads\n", func);
-                return nullptr;
-            }
-
-            auto * buft = ggml_backend_buffer_get_type(buf);
-            auto * dev = ggml_backend_buft_get_device(buft);
-            if (!dev) {
-                LLAMA_LOG_DEBUG("%s: no device found for buffer type %s for async uploads\n", func,
-                    ggml_backend_buft_name(buft));
-                return nullptr;
-            }
-
-            if (buft != ggml_backend_dev_buffer_type(dev)) {
-                LLAMA_LOG_DEBUG("%s: buffer type %s is not the default buffer type for device %s for async uploads\n", func,
-                    ggml_backend_buft_name(buft), ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            ggml_backend_dev_props props;
-            ggml_backend_dev_get_props(dev, &props);
-            if (!props.caps.async || !props.caps.host_buffer || !props.caps.events) {
-                LLAMA_LOG_DEBUG("%s: device %s does not support async, host buffers or events\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            auto * host_buft = ggml_backend_dev_host_buffer_type(dev);
-            if (!host_buft) {
-                LLAMA_LOG_DEBUG("%s: no host buffer type found for device %s\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            // If the backend is supported, create pinned memory buffers and events for synchronisation.
-            for (size_t idx = 0; idx < n_buffers; ++idx) {
-                auto * buf = ggml_backend_buft_alloc_buffer(host_buft, buffer_size);
-                if (!buf) {
-                    LLAMA_LOG_DEBUG("%s: failed to allocate host buffer for async uploads for device %s\n", func,
-                        ggml_backend_dev_name(dev));
-                    return nullptr;
-                }
-
-                host_buffers.emplace_back(buf);
-                host_ptrs.emplace_back(ggml_backend_buffer_get_base(buf));
-
-                auto * event = ggml_backend_event_new(dev);
-                if (!event) {
-                    LLAMA_LOG_DEBUG("%s: failed to create event for async uploads for device %s\n", func,
-                        ggml_backend_dev_name(dev));
-                    return nullptr;
-                }
-
-                events.emplace_back(event);
-            }
-
-            ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
-            if (!backend) {
-                LLAMA_LOG_DEBUG("%s: failed to initialize backend for device %s for async uploads\n", func,
-                    ggml_backend_dev_name(dev));
-                return nullptr;
-            }
-
-            return backend;
-        }(__func__);
-
-        if (upload_backend) {
-            LLAMA_LOG_DEBUG("%s: using async uploads for device %s, buffer type %s, backend %s\n", __func__,
-                ggml_backend_dev_name(ggml_backend_get_device(upload_backend)),
-                ggml_backend_buft_name(ggml_backend_buffer_get_type(bufs.at(0))),
-                ggml_backend_name(upload_backend));
-        }
-
-        for (struct ggml_tensor * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
-            const auto * weight = get_weight(ggml_get_name(cur));
-            if (weight == nullptr) {
-                // this can happen with split experts models
-                continue;
-            }
-
-            if (progress_callback) {
-                if (!progress_callback((float) size_done / size_data, progress_callback_user_data)) {
-                    return false;
-                }
-            }
-
-            size_t n_size = ggml_nbytes(cur);
-
-            if (use_mmap) {
-                const auto & mapping = mappings.at(weight->idx);
-                ggml_backend_buffer_t buf_mmap = nullptr;
-                if (bufs.count(weight->idx)) {
-                    buf_mmap = bufs.at(weight->idx);
-                }
-                uint8_t * data = (uint8_t *) mapping->addr + weight->offs;
-
-                if (check_tensors) {
-                    validation_result.emplace_back(std::async(std::launch::async, [cur, data, n_size] {
-                        return std::make_pair(cur, ggml_validate_row_data(cur->type, data, n_size));
-                    }));
-                }
-
-                GGML_ASSERT(buf_mmap || cur->data); // either we have a buffer to allocate the tensor in, or it is already allocated
-                if (buf_mmap && cur->data == nullptr) {
-                    ggml_backend_tensor_alloc(buf_mmap, cur, data);
-                    if (lmlocks) {
-                        const auto & lmlock = lmlocks->at(weight->idx);
-                        lmlock->grow_to(weight->offs + n_size);
-                    }
-
-                    auto & mmap_used = mmaps_used[weight->idx];
-                    mmap_used.first  = std::min(mmap_used.first,  weight->offs);
-                    mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
-                } else {
-                    ggml_backend_tensor_set(cur, data, 0, n_size);
-                }
-            } else {
-                const auto & file = files.at(weight->idx);
-                if (ggml_backend_buffer_is_host(cur->buffer)) {
-                    file->seek(weight->offs, SEEK_SET);
-                    file->read_raw(cur->data, n_size);
-                    if (check_tensors) {
-                        validation_result.emplace_back(std::async(std::launch::async, [cur, n_size] {
-                            return std::make_pair(cur, ggml_validate_row_data(cur->type, cur->data, n_size));
-                        }));
-                    }
-                } else {
-                    // If upload_backend is valid load the tensor in chunks to pinned memory and upload the buffers asynchronously to the GPU.
-                    if (upload_backend) {
-                        file->seek(weight->offs, SEEK_SET);
-
-                        size_t bytes_read = 0;
-
-                        while (bytes_read < n_size) {
-                            size_t read_iteration = std::min(buffer_size, n_size - bytes_read);
-
-                            ggml_backend_event_synchronize(events[buffer_idx]);
-                            file->read_raw(host_ptrs[buffer_idx], read_iteration);
-                            ggml_backend_tensor_set_async(upload_backend, cur, host_ptrs[buffer_idx], bytes_read, read_iteration);
-                            ggml_backend_event_record(events[buffer_idx], upload_backend);
-
-                            bytes_read += read_iteration;
-                            ++buffer_idx;
-                            buffer_idx %= n_buffers;
-                        }
-                    } else {
-                        read_buf.resize(n_size);
-                        file->seek(weight->offs, SEEK_SET);
-                        file->read_raw(read_buf.data(), n_size);
-                        ggml_backend_tensor_set(cur, read_buf.data(), 0, n_size);
-                        if (check_tensors && !ggml_validate_row_data(cur->type, read_buf.data(), n_size)) {
-                            throw std::runtime_error(format("tensor '%s' has invalid data", ggml_get_name(cur)));
-                        }
-                    }
-                }
-            }
-
-            size_done += n_size;
-        }
-
-        // free temporary resources used for async uploads
-        for (auto * event : events) {
-            ggml_backend_event_synchronize(event);
-            ggml_backend_event_free(event);
-        }
-        for (auto * buf : host_buffers) {
-            ggml_backend_buffer_free(buf);
-        }
-        ggml_backend_free(upload_backend);
-
-        // check validation results
-        bool validation_failed = false;
-        for (auto & future : validation_result) {
-            auto result = future.get();
-            if (!result.second) {
-                LLAMA_LOG_ERROR("%s: tensor '%s' has invalid data\n", __func__, ggml_get_name(result.first));
-                validation_failed = true;
-            }
-        }
-        if (validation_failed) {
-            throw std::runtime_error("found tensors with invalid data");
-        }
-
-        // check if this is the last call and do final cleanup
-        if (size_done >= size_data) {
-            // unmap offloaded tensors and metadata
-            if (use_mmap) {
-                for (uint32_t idx = 0; idx < mappings.size(); idx++) {
-                    const auto & mmap_used = mmaps_used.at(idx);
-                    auto & mapping = mappings.at(idx);
-                    mapping->unmap_fragment(0, mmap_used.first);
-                    if (mmap_used.second != 0) {
-                        mapping->unmap_fragment(mmap_used.second, mapping->size);
-                    }
-                }
-            }
-            if (progress_callback) {
-                // Even though the model is done loading, we still honor
-                // cancellation since we need to free allocations.
-                return progress_callback(1.0f, progress_callback_user_data);
-            }
-        }
-
-        return true;
-    }
-};
-
-// temporary allocate memory for the input batch if needed
-static const llama_seq_id batch_default_seq_id = 0;
-struct llama_batch_allocr {
-    std::array seq_id_0 = {batch_default_seq_id};
-    std::vector      pos;
-    std::vector        n_seq_id;
-    std::vector seq_id;
-    std::vector         logits;
-    struct llama_batch          batch;
-    // optionally fulfill the batch returned by llama_batch_get_one
-    llama_batch_allocr(llama_context & ctx, struct llama_batch in_batch) {
-        batch = in_batch;
-        GGML_ASSERT(batch.n_tokens > 0);
-        if (!batch.pos) {
-            // determine the last position in KV cache
-            llama_pos last_pos = -1;
-            for (const auto & cell : ctx.kv_self.cells) {
-                if (cell.has_seq_id(batch_default_seq_id)) {
-                    last_pos = std::max(last_pos, cell.pos);
-                }
-            }
-            last_pos++; // next position
-            pos.resize(batch.n_tokens);
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                pos[i] = i+last_pos;
-            }
-            batch.pos = pos.data();
-        }
-        if (!batch.n_seq_id) {
-            n_seq_id.resize(batch.n_tokens);
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                n_seq_id[i] = seq_id_0.size();
-            }
-            batch.n_seq_id = n_seq_id.data();
-        }
-        if (!batch.seq_id) {
-            seq_id.resize(batch.n_tokens + 1);
-            seq_id[batch.n_tokens] = NULL;
-            for (int32_t i = 0; i < batch.n_tokens; i++) {
-                seq_id[i] = seq_id_0.data();
-            }
-            batch.seq_id = seq_id.data();
-        }
-        if (!batch.logits) {
-            logits.resize(batch.n_tokens);
-            logits[logits.size() - 1] = true;
-            batch.logits = logits.data();
-        }
-    }
-};
-
-template<>
-bool llama_model_loader::get_key(const enum llm_kv kid, enum llama_pooling_type & result, const bool required) {
-    uint32_t tmp;
-    const bool found = get_key(kid, tmp, required);
-    if (found) {
-        result = (enum llama_pooling_type) tmp;
-    } else {
-        result = LLAMA_POOLING_TYPE_UNSPECIFIED;
-    }
-    return found;
-}
-
-
-//
-// load LLaMA models
-//
-
-static const char * llama_model_arch_name(llm_arch arch) {
-    auto it = LLM_ARCH_NAMES.find(arch);
-    if (it == LLM_ARCH_NAMES.end()) {
-        return "unknown";
-    }
-    return it->second;
-}
-
-static std::string llama_model_ftype_name(llama_ftype ftype) {
-    if (ftype & LLAMA_FTYPE_GUESSED) {
-        return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)";
-    }
-
-    switch (ftype) {
-        case LLAMA_FTYPE_ALL_F32:         return "all F32";
-        case LLAMA_FTYPE_MOSTLY_F16:      return "F16";
-        case LLAMA_FTYPE_MOSTLY_BF16:     return "BF16";
-        case LLAMA_FTYPE_MOSTLY_Q4_0:     return "Q4_0";
-        case LLAMA_FTYPE_MOSTLY_Q4_1:     return "Q4_1";
-        case LLAMA_FTYPE_MOSTLY_Q5_0:     return "Q5_0";
-        case LLAMA_FTYPE_MOSTLY_Q5_1:     return "Q5_1";
-        case LLAMA_FTYPE_MOSTLY_Q8_0:     return "Q8_0";
-        case LLAMA_FTYPE_MOSTLY_Q2_K:     return "Q2_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q2_K_S:   return "Q2_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_S:   return "Q3_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_M:   return "Q3_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q3_K_L:   return "Q3_K - Large";
-        case LLAMA_FTYPE_MOSTLY_Q4_K_S:   return "Q4_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q4_K_M:   return "Q4_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q5_K_S:   return "Q5_K - Small";
-        case LLAMA_FTYPE_MOSTLY_Q5_K_M:   return "Q5_K - Medium";
-        case LLAMA_FTYPE_MOSTLY_Q6_K:     return "Q6_K";
-        case LLAMA_FTYPE_MOSTLY_TQ1_0:    return "TQ1_0 - 1.69 bpw ternary";
-        case LLAMA_FTYPE_MOSTLY_TQ2_0:    return "TQ2_0 - 2.06 bpw ternary";
-        case LLAMA_FTYPE_MOSTLY_IQ2_XXS:  return "IQ2_XXS - 2.0625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_XS:   return "IQ2_XS - 2.3125 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_S:    return "IQ2_S - 2.5 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ2_M:    return "IQ2_M - 2.7 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_XS:   return "IQ3_XS - 3.3 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_XXS:  return "IQ3_XXS - 3.0625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ1_S:    return "IQ1_S - 1.5625 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ1_M:    return "IQ1_M - 1.75 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ4_NL:   return "IQ4_NL - 4.5 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ4_XS:   return "IQ4_XS - 4.25 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_S:    return "IQ3_S - 3.4375 bpw";
-        case LLAMA_FTYPE_MOSTLY_IQ3_M:    return "IQ3_S mix - 3.66 bpw";
-
-        default: return "unknown, may not work";
-    }
-}
-
-static const char * llama_model_type_name(e_model type) {
-    switch (type) {
-        case MODEL_14M:           return "14M";
-        case MODEL_17M:           return "17M";
-        case MODEL_22M:           return "22M";
-        case MODEL_33M:           return "33M";
-        case MODEL_60M:           return "60M";
-        case MODEL_70M:           return "70M";
-        case MODEL_80M:           return "80M";
-        case MODEL_109M:          return "109M";
-        case MODEL_137M:          return "137M";
-        case MODEL_160M:          return "160M";
-        case MODEL_220M:          return "220M";
-        case MODEL_250M:          return "250M";
-        case MODEL_270M:          return "270M";
-        case MODEL_335M:          return "335M";
-        case MODEL_410M:          return "410M";
-        case MODEL_450M:          return "450M";
-        case MODEL_770M:          return "770M";
-        case MODEL_780M:          return "780M";
-        case MODEL_0_5B:          return "0.5B";
-        case MODEL_1B:            return "1B";
-        case MODEL_1_3B:          return "1.3B";
-        case MODEL_1_4B:          return "1.4B";
-        case MODEL_1_5B:          return "1.5B";
-        case MODEL_1_6B:          return "1.6B";
-        case MODEL_2B:            return "2B";
-        case MODEL_2_8B:          return "2.8B";
-        case MODEL_3B:            return "3B";
-        case MODEL_4B:            return "4B";
-        case MODEL_6B:            return "6B";
-        case MODEL_6_9B:          return "6.9B";
-        case MODEL_7B:            return "7B";
-        case MODEL_8B:            return "8B";
-        case MODEL_9B:            return "9B";
-        case MODEL_11B:           return "11B";
-        case MODEL_12B:           return "12B";
-        case MODEL_13B:           return "13B";
-        case MODEL_14B:           return "14B";
-        case MODEL_15B:           return "15B";
-        case MODEL_16B:           return "16B";
-        case MODEL_20B:           return "20B";
-        case MODEL_30B:           return "30B";
-        case MODEL_32B:           return "32B";
-        case MODEL_34B:           return "34B";
-        case MODEL_35B:           return "35B";
-        case MODEL_40B:           return "40B";
-        case MODEL_65B:           return "65B";
-        case MODEL_70B:           return "70B";
-        case MODEL_236B:          return "236B";
-        case MODEL_314B:          return "314B";
-        case MODEL_SMALL:         return "0.1B";
-        case MODEL_MEDIUM:        return "0.4B";
-        case MODEL_LARGE:         return "0.8B";
-        case MODEL_XL:            return "1.5B";
-        case MODEL_A1_7B:         return "A1.7B";
-        case MODEL_A2_7B:         return "A2.7B";
-        case MODEL_8x7B:          return "8x7B";
-        case MODEL_8x22B:         return "8x22B";
-        case MODEL_16x12B:        return "16x12B";
-        case MODEL_10B_128x3_66B: return "10B+128x3.66B";
-        case MODEL_57B_A14B:      return "57B.A14B";
-        case MODEL_27B:           return "27B";
-        default:                  return "?B";
-    }
-}
-
-static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
-    switch (type) {
-        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
-        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
-        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
-        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
-        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
-        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
-        default:                    return "unknown";
-    }
-}
-
-static void llm_load_stats(llama_model_loader & ml, llama_model & model) {
-    model.n_elements = ml.n_elements;
-    model.n_bytes = ml.n_bytes;
-}
-
-static void llm_load_arch(llama_model_loader & ml, llama_model & model) {
-    model.arch = ml.get_arch();
-    if (model.arch == LLM_ARCH_UNKNOWN) {
-        throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'");
-    }
-}
-
-static void llm_load_hparams(
-        llama_model_loader & ml,
-        llama_model & model) {
-    auto & hparams = model.hparams;
-    const gguf_context * ctx = ml.meta.get();
-
-    // get metadata as string
-    for (int i = 0; i < gguf_get_n_kv(ctx); i++) {
-        enum gguf_type type = gguf_get_kv_type(ctx, i);
-        if (type == GGUF_TYPE_ARRAY) {
-            continue;
-        }
-        const char * name = gguf_get_key(ctx, i);
-        const std::string value = gguf_kv_to_str(ctx, i);
-        model.gguf_kv.emplace(name, value);
-    }
-
-    // get general kv
-    ml.get_key(LLM_KV_GENERAL_NAME, model.name, false);
-
-    // get hparams kv
-    ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false);
-
-    // everything past this point is not vocab-related
-    if (hparams.vocab_only) {
-        return;
-    }
-
-    ml.get_key(LLM_KV_CONTEXT_LENGTH,    hparams.n_ctx_train);
-    ml.get_key(LLM_KV_EMBEDDING_LENGTH,  hparams.n_embd);
-    ml.get_key(LLM_KV_BLOCK_COUNT,       hparams.n_layer);
-    ml.get_key(LLM_KV_EXPERT_COUNT,      hparams.n_expert,      false);
-    ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false);
-
-    if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) {
-        ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features);
-
-        ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd);
-        ml.get_key(LLM_KV_POSNET_BLOCK_COUNT,      hparams.posnet.n_layer);
-
-        ml.get_key(LLM_KV_CONVNEXT_EMBEDDING_LENGTH, hparams.convnext.n_embd);
-        ml.get_key(LLM_KV_CONVNEXT_BLOCK_COUNT,      hparams.convnext.n_layer);
-    }
-
-    GGML_ASSERT(hparams.n_expert <= LLAMA_MAX_EXPERTS);
-    GGML_ASSERT(hparams.n_expert_used <= hparams.n_expert);
-    if (hparams.n_expert > 0) {
-        GGML_ASSERT(hparams.n_expert_used > 0);
-    } else {
-        GGML_ASSERT(hparams.n_expert_used == 0);
-    }
-
-    // zero-out the array hparams
-    std::fill(hparams.n_head_arr.begin(),    hparams.n_head_arr.end(),    0);
-    std::fill(hparams.n_head_kv_arr.begin(), hparams.n_head_kv_arr.end(), 0);
-    std::fill(hparams.n_ff_arr.begin(),      hparams.n_ff_arr.end(),      0);
-
-    ml.get_key_or_arr(LLM_KV_FEED_FORWARD_LENGTH,  hparams.n_ff_arr,   hparams.n_layer, false);
-    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT, hparams.n_head_arr, hparams.n_layer, false);
-
-    // n_head_kv is optional, default to n_head
-    hparams.n_head_kv_arr = hparams.n_head_arr;
-
-    ml.get_key_or_arr(LLM_KV_ATTENTION_HEAD_COUNT_KV, hparams.n_head_kv_arr, hparams.n_layer, false);
-
-    bool rope_finetuned = false;
-    ml.get_key(LLM_KV_ROPE_SCALING_FINETUNED, rope_finetuned, false);
-    hparams.rope_finetuned = rope_finetuned;
-
-    hparams.n_ctx_orig_yarn = hparams.n_ctx_train;
-    ml.get_key(LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, hparams.n_ctx_orig_yarn, false);
-
-    // rope_freq_base (optional)
-    hparams.rope_freq_base_train = 10000.0f;
-    ml.get_key(LLM_KV_ROPE_FREQ_BASE, hparams.rope_freq_base_train, false);
-
-    std::string rope_scaling("linear");
-    ml.get_key(LLM_KV_ROPE_SCALING_TYPE, rope_scaling, false);
-    hparams.rope_scaling_type_train = llama_rope_scaling_type_from_string(rope_scaling);
-    GGML_ASSERT(hparams.rope_scaling_type_train != LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED);
-
-    // rope_freq_scale (inverse of the kv) is optional
-    float ropescale = 0.0f;
-    if (!ml.get_key(LLM_KV_ROPE_SCALING_FACTOR, ropescale, false)) {
-        // try the old key name
-        ml.get_key(LLM_KV_ROPE_SCALE_LINEAR, ropescale, false);
-    }
-    hparams.rope_freq_scale_train = ropescale == 0.0f ? 1.0f : 1.0f/ropescale;
-
-    ml.get_key(LLM_KV_ROPE_SCALING_ATTN_FACTOR, hparams.rope_attn_factor, false);
-
-    // non-transformer models do not have attention heads
-    if (hparams.n_head() > 0) {
-        // gpt-neox n_rot = rotary_pct * (n_embd / n_head)
-        // gpt-j n_rot = rotary_dim
-
-        hparams.n_embd_head_k = hparams.n_embd / hparams.n_head();
-        ml.get_key(LLM_KV_ATTENTION_KEY_LENGTH, hparams.n_embd_head_k, false);
-
-        hparams.n_embd_head_v = hparams.n_embd / hparams.n_head();
-        ml.get_key(LLM_KV_ATTENTION_VALUE_LENGTH, hparams.n_embd_head_v, false);
-
-        // sanity check for n_rot (optional)
-        hparams.n_rot = hparams.n_embd_head_k;
-
-        ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false);
-
-        if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) {
-            if (hparams.n_rot != hparams.n_embd_head_k) {
-                throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k));
-            }
-        }
-    } else {
-        hparams.n_rot = 0;
-        hparams.n_embd_head_k = 0;
-        hparams.n_embd_head_v = 0;
-    }
-
-    // arch-specific KVs
-    switch (model.arch) {
-        case LLM_ARCH_LLAMA:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                if (hparams.n_expert == 8) {
-                    switch (hparams.n_layer) {
-                        case 32: model.type = e_model::MODEL_8x7B; break;
-                        case 56: model.type = e_model::MODEL_8x22B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                } else {
-                    switch (hparams.n_layer) {
-                        case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B
-                        case 22: model.type = e_model::MODEL_1B; break;
-                        case 26: model.type = e_model::MODEL_3B; break;
-                        case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B
-                        // granite uses a vocab with len 49152
-                        case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break;
-                        case 36: model.type = e_model::MODEL_8B; break; // granite
-                        case 40: model.type = e_model::MODEL_13B; break;
-                        case 48: model.type = e_model::MODEL_34B; break;
-                        case 60: model.type = e_model::MODEL_30B; break;
-                        case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                }
-            } break;
-        case LLM_ARCH_DECI:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MINICPM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
-                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-
-                switch (hparams.n_layer) {
-                    case 52: model.type = e_model::MODEL_1B; break;
-                    case 40: model.type = e_model::MODEL_2B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MINICPM3:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
-                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
-
-                switch (hparams.n_layer) {
-                    case 62: model.type = e_model::MODEL_4B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GROK:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 64: model.type = e_model::MODEL_314B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_FALCON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 60: model.type = e_model::MODEL_40B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_BAICHUAN:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                if (model.type == e_model::MODEL_13B) {
-                    // TODO: become GGUF KV parameter
-                    hparams.f_max_alibi_bias = 8.0f;
-                }
-            } break;
-        case LLM_ARCH_STARCODER:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 36: model.type = e_model::MODEL_3B; break;
-                    case 42: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_15B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_REFACT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_1B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                // TODO: become GGUF KV parameter
-                hparams.f_max_alibi_bias = 8.0f;
-            } break;
-        case LLM_ARCH_BERT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
-
-                switch (hparams.n_layer) {
-                    case 3:
-                        model.type = e_model::MODEL_17M; break; // bge-micro
-                    case 6:
-                        model.type = e_model::MODEL_22M; break; // MiniLM-L6
-                    case 12:
-                        switch (hparams.n_embd) {
-                            case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small
-                            case 768: model.type = e_model::MODEL_109M; break; // bge-base
-                        } break;
-                    case 24:
-                        model.type = e_model::MODEL_335M; break; // bge-large
-                }
-            } break;
-        case LLM_ARCH_JINA_BERT_V2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type, false);
-                hparams.f_max_alibi_bias = 8.0f;
-
-                switch (hparams.n_layer) {
-                    case 4:  model.type = e_model::MODEL_33M;  break; // jina-embeddings-small
-                    case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
-                }
-            } break;
-        case LLM_ARCH_NOMIC_BERT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-                ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
-                ml.get_key(LLM_KV_POOLING_TYPE,               hparams.pooling_type);
-
-                if (hparams.n_layer == 12 && hparams.n_embd == 768) {
-                    model.type = e_model::MODEL_137M;
-                }
-            } break;
-        case LLM_ARCH_BLOOM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 30:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            case 4096: model.type = e_model::MODEL_7B; break;
-                        } break;
-                }
-
-                // TODO: become GGUF KV parameter
-                hparams.f_max_alibi_bias = 8.0f;
-            } break;
-        case LLM_ARCH_MPT:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv, false);
-                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_30B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_STABLELM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_12B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_QWEN:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_QWEN2VL:
-            {
-                std::array section_dims;
-                ml.get_key_or_arr(LLM_KV_ROPE_DIMENSION_SECTIONS, section_dims, 4, true);
-                std::copy(section_dims.begin(), section_dims.begin() + 4, std::begin(hparams.rope_sections));
-            }
-            // fall through
-        case LLM_ARCH_QWEN2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break;
-                    case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 36: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break;
-                    case 48: model.type = e_model::MODEL_14B; break;
-                    case 64: model.type = e_model::MODEL_32B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_QWEN2MOE:
-            {
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false);
-                ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false);
-
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_A2_7B; break;
-                    case 28: model.type = e_model::MODEL_57B_A14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_PHI2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_PHI3:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-
-                // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
-                if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
-                    // default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
-                    hparams.n_swa = 2047;
-                } else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
-                    // default value for Phi-3-mini-128k-instruct
-                    hparams.n_swa = 262144;
-                } else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
-                    // default value for Phi-3-medium-128k-instruct
-                    hparams.n_swa = 131072;
-                }
-                bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
-                if (!found_swa && hparams.n_swa == 0) {
-                    throw std::runtime_error("invalid value for sliding_window");
-                }
-            } break;
-        case LLM_ARCH_PLAMO:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_GPT2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 12: model.type = e_model::MODEL_SMALL; break;
-                    case 24: model.type = e_model::MODEL_MEDIUM; break;
-                    case 36: model.type = e_model::MODEL_LARGE; break;
-                    case 48: model.type = e_model::MODEL_XL; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CODESHELL:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 42: model.type = e_model::MODEL_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_ORION:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_INTERNLM2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_20B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GEMMA:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 18: model.type = e_model::MODEL_2B; break;
-                    case 28: model.type = e_model::MODEL_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_GEMMA2:
-            {
-                hparams.n_swa = 4096; // default value of gemma 2
-                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false);
-                ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false);
-                hparams.attn_soft_cap = true;
-
-                switch (hparams.n_layer) {
-                    case 26: model.type = e_model::MODEL_2B; break;
-                    case 42: model.type = e_model::MODEL_9B; break;
-                    case 46: model.type = e_model::MODEL_27B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_STARCODER2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 30: model.type = e_model::MODEL_3B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_15B; break;
-                    case 52: model.type = e_model::MODEL_20B; break; // granite
-                    case 88: model.type = e_model::MODEL_34B; break; // granite
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_MAMBA:
-            {
-                ml.get_key(LLM_KV_SSM_CONV_KERNEL,    hparams.ssm_d_conv);
-                ml.get_key(LLM_KV_SSM_INNER_SIZE,     hparams.ssm_d_inner);
-                ml.get_key(LLM_KV_SSM_STATE_SIZE,     hparams.ssm_d_state);
-                ml.get_key(LLM_KV_SSM_TIME_STEP_RANK, hparams.ssm_dt_rank);
-                ml.get_key(LLM_KV_SSM_DT_B_C_RMS, hparams.ssm_dt_b_c_rms, false);
-
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 24:
-                        switch (hparams.n_embd) {
-                            case 768: model.type = e_model::MODEL_SMALL; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 48:
-                        switch (hparams.n_embd) {
-                            case 1024: model.type = e_model::MODEL_MEDIUM; break;
-                            case 1536: model.type = e_model::MODEL_LARGE; break;
-                            case 2048: model.type = e_model::MODEL_XL; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 64:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_XVERSE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    case 80: model.type = e_model::MODEL_65B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_COMMAND_R:
-            {
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 40: model.type = e_model::MODEL_35B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DBRX:
-        {
-            ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,  hparams.f_norm_eps);
-            ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,      hparams.f_clamp_kqv);
-
-            switch (hparams.n_layer) {
-                case 40: model.type = e_model::MODEL_16x12B; break;
-                default: model.type = e_model::MODEL_UNKNOWN;
-            }
-        } break;
-        case LLM_ARCH_OLMO:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV,     hparams.f_clamp_kqv, false);
-
-                switch (hparams.n_layer) {
-                    case 22: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 80: model.type = e_model::MODEL_70B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OLMO2:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 16: model.type = e_model::MODEL_1B; break;
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OLMOE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 16: model.type = e_model::MODEL_A1_7B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_OPENELM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                case 16: model.type = e_model::MODEL_270M; break;
-                case 20: model.type = e_model::MODEL_450M; break;
-                case 28: model.type = e_model::MODEL_1B; break;
-                case 36: model.type = e_model::MODEL_3B; break;
-                default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GPTNEOX:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res);
-                switch (hparams.n_layer) {
-                    case 6:
-                        switch (hparams.n_ff()) {
-                            case 512: model.type = e_model::MODEL_14M; break;
-                            case 2048: model.type = e_model::MODEL_70M; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 12:
-                        switch (hparams.n_ff()) {
-                            case 3072: model.type = e_model::MODEL_160M; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 16:
-                        switch (hparams.n_ff()) {
-                            case 8192: model.type = e_model::MODEL_1B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 24:
-                        switch (hparams.n_ff()) {
-                            case 4096: model.type = e_model::MODEL_410M; break;
-                            case 8192: model.type = e_model::MODEL_1_4B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 32:
-                        switch (hparams.n_ff()) {
-                            case 10240: model.type = e_model::MODEL_2_8B; break;
-                            case 16384: model.type = e_model::MODEL_6_9B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 36:
-                        switch (hparams.n_ff()) {
-                            case 20480: model.type = e_model::MODEL_12B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 44:
-                        switch (hparams.n_ff()) {
-                            case 24576: model.type = e_model::MODEL_20B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_ARCTIC:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                if (hparams.n_expert == 128) {
-                    switch (hparams.n_layer) {
-                        case 35: model.type = e_model::MODEL_10B_128x3_66B; break;
-                        default: model.type = e_model::MODEL_UNKNOWN;
-                    }
-                } else {
-                    model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DEEPSEEK:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
-                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
-                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
-
-                switch (hparams.n_layer) {
-                    case 28: model.type = e_model::MODEL_20B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_DEEPSEEK2:
-            {
-                bool is_lite = (hparams.n_layer == 27);
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead);
-                if (!is_lite) {
-                    ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q);
-                }
-                ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv);
-                ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
-                ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
-                ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
-                ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
-
-                switch (hparams.n_layer) {
-                    case 27: model.type = e_model::MODEL_16B; break;
-                    case 60: model.type = e_model::MODEL_236B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CHATGLM:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                switch (hparams.n_layer) {
-                    case 28: model.type = e_model::MODEL_6B; break;
-                    case 40: model.type = e_model::MODEL_9B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_BITNET:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 26: model.type = e_model::MODEL_3B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_T5:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
-
-                uint32_t dec_start_token_id;
-                if (ml.get_key(LLM_KV_DECODER_START_TOKEN_ID, dec_start_token_id, false)) {
-                    hparams.dec_start_token_id = dec_start_token_id;
-                }
-
-                switch (hparams.n_layer) {
-                    case 6:  model.type = e_model::MODEL_60M;  break; // t5-small
-                    case 8:  model.type = e_model::MODEL_80M;  break; // flan-t5-small
-                    case 12:
-                        switch (hparams.n_ff()) {
-                            case 3072: model.type = e_model::MODEL_220M; break; // t5-base
-                            case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 24:
-                        switch (hparams.n_ff()) {
-                            case 4096:  model.type = e_model::MODEL_770M; break; // t5-large
-                            case 2816:  model.type = e_model::MODEL_780M; break; // flan-t5-large
-                            case 16384: model.type = e_model::MODEL_3B;   break; // t5-3b
-                            case 5120:  model.type = e_model::MODEL_3B;   break; // flan-t5-xl
-                            case 65536: model.type = e_model::MODEL_11B;  break; // t5-11b
-                            case 10240: model.type = e_model::MODEL_11B;  break; // flan-t5-xxl
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_T5ENCODER:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts);
-                model.type = e_model::MODEL_UNKNOWN;
-            } break;
-        case LLM_ARCH_JAIS:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1_3B; break;
-                    case 40: model.type = e_model::MODEL_13B; break;
-                    /* TODO: add variants */
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_NEMOTRON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_4B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_EXAONE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_8B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_RWKV6:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
-                ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size);
-                ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim);
-                ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim);
-                ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false);
-
-                switch (hparams.n_layer) {
-                    case 24: model.type = e_model::MODEL_1_6B; break;
-                    case 32:
-                        switch (hparams.n_embd) {
-                            case 2560: model.type = e_model::MODEL_3B; break;
-                            case 4096: model.type = e_model::MODEL_7B; break;
-                            default: model.type = e_model::MODEL_UNKNOWN;
-                        } break;
-                    case 61: model.type = e_model::MODEL_14B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_GRANITE:
-        case LLM_ARCH_GRANITE_MOE:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
-                ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale);
-                ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale);
-                ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_3B; break;
-                    case 40: model.type = e_model::MODEL_3B; break;
-                    // Add additional layer/vocab/etc checks here for other model sizes
-                    default: model.type = e_model::MODEL_UNKNOWN;
-                }
-            } break;
-        case LLM_ARCH_CHAMELEON:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
-                hparams.f_norm_eps = 1e-5;  // eps for qk-norm, torch default
-                ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm);
-
-                switch (hparams.n_layer) {
-                    case 32: model.type = e_model::MODEL_7B; break;
-                    case 48: model.type = e_model::MODEL_34B; break;
-                    default: model.type = e_model::MODEL_UNKNOWN;
-               }
-            } break;
-        case LLM_ARCH_WAVTOKENIZER_DEC:
-            {
-                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS,    hparams.f_norm_eps);
-                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_EPS,    hparams.f_norm_group_eps);
-                ml.get_key(LLM_KV_ATTENTION_GROUPNORM_GROUPS, hparams.n_norm_groups);
-                ml.get_key(LLM_KV_ATTENTION_CAUSAL,           hparams.causal_attn);
-            } break;
-        default: (void)0;
-    }
-
-    model.ftype = ml.ftype;
-
-    if (hparams.f_max_alibi_bias > 0.0f) {
-        hparams.use_alibi = true;
-    }
-
-    hparams.rope_type = llama_rope_type(&model);
-}
-
-static void llm_load_vocab(
-        llama_model_loader & ml,
-        llama_model & model) {
-    auto & vocab = model.vocab;
-
-    struct gguf_context * ctx = ml.meta.get();
-
-    const auto kv = LLM_KV(model.arch);
-
-    // determine vocab type
-    {
-        std::string tokenizer_model;
-        std::string tokenizer_pre;
-
-        ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
-        ml.get_key(LLM_KV_TOKENIZER_PRE,   tokenizer_pre, false);
-
-        if (tokenizer_model == "no_vocab" || tokenizer_model == "none") {
-            vocab.type = LLAMA_VOCAB_TYPE_NONE;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id  = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-            vocab.linefeed_id     = LLAMA_TOKEN_NULL;
-
-            // read vocab size from metadata
-            if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) {
-                vocab.n_vocab = 0;
-                LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab);
-            }
-            return;
-        }
-
-        if (tokenizer_model == "llama") {
-            vocab.type = LLAMA_VOCAB_TYPE_SPM;
-
-            // default special tokens
-            vocab.special_bos_id  = 1;
-            vocab.special_eos_id  = 2;
-            vocab.special_unk_id  = 0;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-        } else if (tokenizer_model == "bert") {
-            vocab.type = LLAMA_VOCAB_TYPE_WPM;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id  = 100;
-            vocab.special_sep_id  = 102;
-            vocab.special_pad_id  = 0;
-            vocab.special_cls_id  = 101;
-            vocab.special_mask_id = 103;
-        } else if (tokenizer_model == "gpt2") {
-            vocab.type = LLAMA_VOCAB_TYPE_BPE;
-
-            // read bpe merges and populate bpe ranks
-            const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
-            if (merges_keyidx == -1) {
-                throw std::runtime_error("cannot find tokenizer merges in model file\n");
-            }
-
-            const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
-            for (int i = 0; i < n_merges; i++) {
-                const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
-                GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
-
-                std::string first;
-                std::string second;
-
-                const size_t pos = word.find(' ', 1);
-
-                if (pos != std::string::npos) {
-                    first  = word.substr(0, pos);
-                    second = word.substr(pos + 1);
-                }
-
-                vocab.bpe_ranks.emplace(std::make_pair(first, second), i);
-            }
-
-            // default special tokens
-            vocab.special_bos_id  = 11;
-            vocab.special_eos_id  = 11;
-            vocab.special_unk_id  = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = LLAMA_TOKEN_NULL;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-        } else if (tokenizer_model == "t5") {
-            vocab.type = LLAMA_VOCAB_TYPE_UGM;
-
-            // default special tokens
-            vocab.special_bos_id  = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id  = 1;
-            vocab.special_unk_id  = 2;
-            vocab.special_sep_id  = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id  = 0;
-            vocab.special_cls_id  = LLAMA_TOKEN_NULL;
-            vocab.special_mask_id = LLAMA_TOKEN_NULL;
-
-            const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
-            if (precompiled_charsmap_keyidx != -1) {
-                size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
-                const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
-                vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap);
-#ifdef IS_BIG_ENDIAN
-                // correct endiannes of data in precompiled_charsmap binary blob
-                uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0];
-                *xcda_blob_size = __builtin_bswap32(*xcda_blob_size);
-                assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap);
-                size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t);
-                uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)];
-                for (size_t i = 0; i < xcda_array_size; ++i) {
-                    xcda_array[i] = __builtin_bswap32(xcda_array[i]);
-                }
-#endif
-            }
-        } else if (tokenizer_model == "rwkv") {
-            vocab.type = LLAMA_VOCAB_TYPE_RWKV;
-
-            // default special tokens
-            vocab.special_bos_id = LLAMA_TOKEN_NULL;
-            vocab.special_eos_id = LLAMA_TOKEN_NULL;
-            vocab.special_unk_id = LLAMA_TOKEN_NULL;
-            vocab.special_sep_id = LLAMA_TOKEN_NULL;
-            vocab.special_pad_id = LLAMA_TOKEN_NULL;
-        } else {
-            throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
-        }
-
-        // for now, only BPE models have pre-tokenizers
-        if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = true;
-            if (tokenizer_pre.empty()) {
-                LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
-                LLAMA_LOG_WARN("%s:                                             \n", __func__);
-                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
-                LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED!        \n", __func__);
-                LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL             \n", __func__);
-                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
-                LLAMA_LOG_WARN("%s:                                             \n", __func__);
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            } else if (tokenizer_pre == "default") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            } else if (
-                    tokenizer_pre == "llama3"   ||
-                    tokenizer_pre == "llama-v3" ||
-                    tokenizer_pre == "llama-bpe"||
-                    tokenizer_pre == "falcon3") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
-                vocab.tokenizer_ignore_merges = true;
-                vocab.tokenizer_add_bos = true;
-            } else if (
-                    tokenizer_pre == "deepseek-llm") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                    tokenizer_pre == "deepseek-coder") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                    tokenizer_pre == "falcon") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
-            } else if (
-                    tokenizer_pre == "mpt") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT;
-            } else if (
-                    tokenizer_pre == "starcoder") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
-            } else if (
-                    tokenizer_pre == "gpt-2"   ||
-                    tokenizer_pre == "phi-2"   ||
-                    tokenizer_pre == "jina-es" ||
-                    tokenizer_pre == "jina-de" ||
-                    tokenizer_pre == "gigachat"   ||
-                    tokenizer_pre == "jina-v1-en" ||
-                    tokenizer_pre == "jina-v2-es" ||
-                    tokenizer_pre == "jina-v2-de" ||
-                    tokenizer_pre == "jina-v2-code" ||
-                    tokenizer_pre == "roberta-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
-            } else if (
-                    tokenizer_pre == "refact") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT;
-            } else if (
-                tokenizer_pre == "command-r") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "qwen2") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "stablelm2") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
-            } else if (
-                tokenizer_pre == "olmo") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO;
-            } else if (
-                tokenizer_pre == "dbrx") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX;
-            } else if (
-                tokenizer_pre == "smaug-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG;
-            } else if (
-                tokenizer_pre == "poro-chat") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "chatglm-bpe") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
-                vocab.special_bos_id = LLAMA_TOKEN_NULL;
-            } else if (
-                tokenizer_pre == "viking") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "jais") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
-            } else if (
-                tokenizer_pre == "tekken") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
-                vocab.tokenizer_clean_spaces = false;
-                vocab.tokenizer_ignore_merges = true;
-                vocab.tokenizer_add_bos = true;
-            } else if (
-                tokenizer_pre == "smollm") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "codeshell") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
-            } else if (
-                tokenizer_pre == "bloom") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM;
-            } else if (
-                tokenizer_pre == "gpt3-finnish") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
-            } else if (
-                tokenizer_pre == "exaone") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE;
-            } else if (
-                tokenizer_pre == "chameleon") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
-                vocab.tokenizer_add_bos = true;
-                vocab.tokenizer_clean_spaces = false;
-            } else if (
-                tokenizer_pre == "minerva-7b") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA;
-            } else if (
-                tokenizer_pre == "megrez") {
-                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2;
-            } else {
-                throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
-            }
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = true;
-            vocab.tokenizer_clean_spaces = false;
-            vocab.tokenizer_add_bos = true;
-            vocab.tokenizer_add_eos = false;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = true;
-            vocab.tokenizer_add_bos = true;
-            vocab.tokenizer_add_eos = false;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_bos = false;
-            vocab.tokenizer_add_eos = true;
-        } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-            vocab.tokenizer_add_space_prefix = false;
-            vocab.tokenizer_clean_spaces = false;
-            vocab.tokenizer_add_bos = false;
-            vocab.tokenizer_add_eos = false;
-        } else {
-            vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-        }
-
-        ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX,      vocab.tokenizer_add_space_prefix,         false);
-        ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false);
-    }
-
-    const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
-    if (token_idx == -1) {
-        throw std::runtime_error("cannot find tokenizer vocab in model file\n");
-    }
-
-    const float * scores = nullptr;
-    const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
-    if (score_idx != -1) {
-        scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
-    }
-
-    const int * toktypes = nullptr;
-    const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
-    if (toktype_idx != -1) {
-        toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
-    }
-
-    const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx);
-
-    vocab.n_vocab = n_vocab;
-    vocab.id_to_token.resize(n_vocab);
-
-    for (uint32_t i = 0; i < n_vocab; i++) {
-        std::string word = gguf_get_arr_str(ctx, token_idx, i);
-
-        //GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
-        if (word.empty()) {
-            LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i);
-            word = "[EMPTY_" + std::to_string(i) + "]";
-        }
-
-        vocab.token_to_id[word] = i;
-        vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size());
-
-        auto & token_data = vocab.id_to_token[i];
-        token_data.text  = std::move(word);
-        token_data.score = scores ? scores[i] : 0.0f;
-        token_data.attr  = LLAMA_TOKEN_ATTR_NORMAL;
-
-        if (toktypes) {  //TODO: remove, required until per token attributes are available from GGUF file
-            switch(toktypes[i]) {
-                case LLAMA_TOKEN_TYPE_UNKNOWN:      token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN;      break;
-                case LLAMA_TOKEN_TYPE_UNUSED:       token_data.attr = LLAMA_TOKEN_ATTR_UNUSED;       break;
-                case LLAMA_TOKEN_TYPE_NORMAL:       token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;       break;
-                case LLAMA_TOKEN_TYPE_CONTROL:      token_data.attr = LLAMA_TOKEN_ATTR_CONTROL;      break;
-                case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
-                case LLAMA_TOKEN_TYPE_BYTE:         token_data.attr = LLAMA_TOKEN_ATTR_BYTE;         break;
-                case LLAMA_TOKEN_TYPE_UNDEFINED:    token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
-                default:                            token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
-            }
-        }
-    }
-    GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
-
-    vocab.init_tokenizer();
-
-    // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
-    if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
-        try {
-            vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n');
-        } catch (const std::exception & e) {
-            LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
-            vocab.linefeed_id = vocab.special_pad_id;
-        }
-    } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
-        vocab.linefeed_id = vocab.special_pad_id;
-    } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
-        const std::vector ids = llama_tokenize_internal(vocab, "\n", false);
-        GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
-        vocab.linefeed_id = ids[0];
-    } else {
-        const std::vector ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A
-
-        //GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
-        if (ids.empty()) {
-            LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__);
-            vocab.linefeed_id = vocab.special_pad_id;
-        } else {
-            vocab.linefeed_id = ids[0];
-        }
-    }
-
-    // special tokens
-    {
-        const std::vector> special_token_types = {
-            { LLM_KV_TOKENIZER_BOS_ID,     vocab.special_bos_id     },
-            { LLM_KV_TOKENIZER_EOS_ID,     vocab.special_eos_id     },
-            { LLM_KV_TOKENIZER_EOT_ID,     vocab.special_eot_id     },
-            { LLM_KV_TOKENIZER_EOM_ID,     vocab.special_eom_id     },
-            { LLM_KV_TOKENIZER_UNK_ID,     vocab.special_unk_id     },
-            { LLM_KV_TOKENIZER_SEP_ID,     vocab.special_sep_id     },
-            { LLM_KV_TOKENIZER_PAD_ID,     vocab.special_pad_id     },
-            { LLM_KV_TOKENIZER_CLS_ID,     vocab.special_cls_id     },
-            { LLM_KV_TOKENIZER_MASK_ID,    vocab.special_mask_id    },
-            { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id },
-            { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id },
-            { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id },
-            { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id },
-            { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id },
-            { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id },
-
-            // deprecated
-            { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id },
-            { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id },
-            { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id },
-        };
-
-        for (const auto & it : special_token_types) {
-            const std::string & key = kv(std::get<0>(it));
-            int32_t & id = std::get<1>(it);
-
-            uint32_t new_id;
-            if (!ml.get_key(std::get<0>(it), new_id, false)) {
-                continue;
-            }
-            if (new_id >= vocab.id_to_token.size()) {
-                LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
-                    __func__, key.c_str(), new_id, id);
-            } else {
-                id = new_id;
-            }
-        }
-
-        // Handle add_bos_token and add_eos_token
-        {
-            bool temp = true;
-
-            if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
-                vocab.tokenizer_add_bos = temp;
-            }
-            if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
-                vocab.tokenizer_add_eos = temp;
-            }
-        }
-
-        // auto-detect special tokens by text
-        // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_...
-        //       for now, we apply this workaround to find the tokens based on their text
-
-        for (const auto & t : vocab.token_to_id) {
-            // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc.
-            if (vocab.special_eot_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|eot_id|>"
-                        || t.first == "<|im_end|>"
-                        || t.first == "<|end|>"
-                        || t.first == ""
-                        || t.first == "<|endoftext|>"
-                        || t.first == ""
-                        || t.first == "<|end▁of▁sentence|>" // DeepSeek
-                   ) {
-                    vocab.special_eot_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find EOM token: "<|eom_id|>"
-            if (vocab.special_eom_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|eom_id|>"
-                        ) {
-                    vocab.special_eom_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
-            if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_prefix|>"  // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁begin|>" // DeepSeek
-                        || t.first == "
"
-                        ) {
-                    vocab.special_fim_pre_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
-            if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_suffix|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁hole|>" // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_suf_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
-            if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_middle|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁end|>"  // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_mid_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
-            if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_pad|>" // Qwen
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_pad_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
-            if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_repo|>"  // Qwen
-                        || t.first == "<|repo_name|>"
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_rep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-
-            // find FIM_SEP token: "<|file_sep|>"
-            if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|file_sep|>" // Qwen
-                        ) {
-                    vocab.special_fim_sep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
-        }
-
-        // maintain a list of tokens that cause end-of-generation
-        // this is currently determined based on the token text, which is obviously not ideal
-        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
-        vocab.special_eog_ids.clear();
-
-        if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
-        }
-
-        if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
-        }
-
-        if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
-        }
-
-        for (const auto & t : vocab.token_to_id) {
-            if (false
-                    || t.first == "<|eot_id|>"
-                    || t.first == "<|im_end|>"
-                    || t.first == "<|end|>"
-                    || t.first == ""
-                    || t.first == "<|endoftext|>"
-                    || t.first == "<|eom_id|>"
-                    || t.first == ""
-               ) {
-                vocab.special_eog_ids.insert(t.second);
-                if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                            __func__, t.second, t.first.c_str());
-                    vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                }
-            } else {
-                // token is control, but not marked as EOG -> print a debug log
-                if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
-                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
-                            __func__, t.second, t.first.c_str());
-                }
-            }
-        }
-
-        // sanity checks
-        if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eos_id);
-            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eot_id);
-            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eom_id);
-            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-    }
-
-    // build special tokens cache
-    {
-        for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
-            if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
-                vocab.cache_special_tokens.push_back(id);
-            }
-        }
-
-        std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
-            [&] (const llama_vocab::id a, const llama_vocab::id b) {
-                return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
-            }
-        );
-
-        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
-    }
-
-    // build token to piece cache
-    {
-        size_t size_cache = 0;
-
-        std::vector cache_token_to_piece(n_vocab);
-
-        for (uint32_t id = 0; id < n_vocab; ++id) {
-            cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
-
-            size_cache += cache_token_to_piece[id].size();
-        }
-
-        std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
-
-        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
-    }
-
-    // Handle per token attributes
-    //NOTE: Each model customizes per token attributes.
-    //NOTE: Per token attributes are missing from the GGUF file.
-    //TODO: Extract attributes from GGUF file.
-    {
-        auto _contains_any = [] (const std::string &str, const std::vector &substrs) -> bool {
-            for (auto substr : substrs) {
-                if (str.find(substr) < std::string::npos) {
-                    return true;
-                }
-            }
-            return false;
-        };
-
-        auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
-            uint32_t current = vocab.id_to_token.at(id).attr;
-            current = value ? (current | attr) : (current & ~attr);
-            vocab.id_to_token[id].attr = (llama_token_attr) current;
-        };
-
-        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
-            _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
-        };
-
-        std::string model_name;
-        std::string tokenizer_pre;
-
-        ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
-        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
-
-        // model name to lowercase
-        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
-            [] (const std::string::value_type x) {
-                return std::tolower(x);
-            }
-        );
-
-        // set attributes by model/tokenizer name
-        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
-            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
-        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
-            for (auto id : vocab.cache_special_tokens) {
-                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {""}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {"", "", "<|endoftext|>"}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
-            }
-        }
-    }
-}
-
-static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
-    const auto & hparams = model.hparams;
-    const auto & vocab   = model.vocab;
-
-    const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
-
-    auto print_f = [](const std::function & f, uint32_t n) {
-        bool is_var = false;
-
-        std::vector v;
-        for (uint32_t i = 0; i < n; ++i) {
-            v.push_back(f(i));
-            if (v[i] != v[0]) {
-                is_var = true;
-            }
-        }
-
-        std::stringstream ss;
-
-        if (is_var) {
-            ss << "[";
-            for (uint32_t i = 0; i < n; ++i) {
-                ss << v[i];
-                if (i < n - 1) {
-                    ss << ", ";
-                }
-            }
-            ss << "]";
-        } else {
-            ss << v[0];
-        }
-
-        return ss.str();
-    };
-
-    // hparams
-    LLAMA_LOG_INFO("%s: format           = %s\n",     __func__, llama_file_version_name(ml.fver));
-    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, LLM_ARCH_NAMES.at(model.arch));
-    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, llama_model_vocab_type_name(vocab.type));
-    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, hparams.n_vocab);
-    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (int) vocab.bpe_ranks.size());
-    LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
-
-    if (!hparams.vocab_only) {
-        LLAMA_LOG_INFO("%s: n_ctx_train      = %u\n",     __func__, hparams.n_ctx_train);
-        LLAMA_LOG_INFO("%s: n_embd           = %u\n",     __func__, hparams.n_embd);
-        LLAMA_LOG_INFO("%s: n_layer          = %u\n",     __func__, hparams.n_layer);
-        LLAMA_LOG_INFO("%s: n_head           = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head(il);    }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_head_kv        = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_head_kv(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_rot            = %u\n",     __func__, hparams.n_rot);
-        LLAMA_LOG_INFO("%s: n_swa            = %u\n",     __func__, hparams.n_swa);
-        LLAMA_LOG_INFO("%s: n_embd_head_k    = %u\n",     __func__, hparams.n_embd_head_k);
-        LLAMA_LOG_INFO("%s: n_embd_head_v    = %u\n",     __func__, hparams.n_embd_head_v);
-        LLAMA_LOG_INFO("%s: n_gqa            = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_gqa(il);        }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_embd_k_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_k_gqa(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_embd_v_gqa     = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_embd_v_gqa(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: f_norm_eps       = %.1e\n",   __func__, hparams.f_norm_eps);
-        LLAMA_LOG_INFO("%s: f_norm_rms_eps   = %.1e\n",   __func__, hparams.f_norm_rms_eps);
-        LLAMA_LOG_INFO("%s: f_clamp_kqv      = %.1e\n",   __func__, hparams.f_clamp_kqv);
-        LLAMA_LOG_INFO("%s: f_max_alibi_bias = %.1e\n",   __func__, hparams.f_max_alibi_bias);
-        LLAMA_LOG_INFO("%s: f_logit_scale    = %.1e\n",   __func__, hparams.f_logit_scale);
-        LLAMA_LOG_INFO("%s: n_ff             = %s\n",     __func__, print_f([&](uint32_t il) { return hparams.n_ff(il); }, hparams.n_layer).c_str());
-        LLAMA_LOG_INFO("%s: n_expert         = %u\n",     __func__, hparams.n_expert);
-        LLAMA_LOG_INFO("%s: n_expert_used    = %u\n",     __func__, hparams.n_expert_used);
-        LLAMA_LOG_INFO("%s: causal attn      = %d\n",     __func__, hparams.causal_attn);
-        LLAMA_LOG_INFO("%s: pooling type     = %d\n",     __func__, hparams.pooling_type);
-        LLAMA_LOG_INFO("%s: rope type        = %d\n",     __func__, hparams.rope_type);
-        LLAMA_LOG_INFO("%s: rope scaling     = %s\n",     __func__, rope_scaling_type);
-        LLAMA_LOG_INFO("%s: freq_base_train  = %.1f\n",   __func__, hparams.rope_freq_base_train);
-        LLAMA_LOG_INFO("%s: freq_scale_train = %g\n",     __func__, hparams.rope_freq_scale_train);
-        LLAMA_LOG_INFO("%s: n_ctx_orig_yarn  = %u\n",     __func__, hparams.n_ctx_orig_yarn);
-        LLAMA_LOG_INFO("%s: rope_finetuned   = %s\n",     __func__, hparams.rope_finetuned ? "yes" : "unknown");
-        LLAMA_LOG_INFO("%s: ssm_d_conv       = %u\n",     __func__, hparams.ssm_d_conv);
-        LLAMA_LOG_INFO("%s: ssm_d_inner      = %u\n",     __func__, hparams.ssm_d_inner);
-        LLAMA_LOG_INFO("%s: ssm_d_state      = %u\n",     __func__, hparams.ssm_d_state);
-        LLAMA_LOG_INFO("%s: ssm_dt_rank      = %u\n",     __func__, hparams.ssm_dt_rank);
-        LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
-    }
-
-    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, llama_model_type_name(model.type));
-    LLAMA_LOG_INFO("%s: model ftype      = %s\n",     __func__, llama_model_ftype_name(model.ftype).c_str());
-    if (ml.n_elements >= 1e12) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, ml.n_elements*1e-12);
-    } else if (ml.n_elements >= 1e9) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, ml.n_elements*1e-9);
-    } else if (ml.n_elements >= 1e6) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, ml.n_elements*1e-6);
-    } else {
-        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, ml.n_elements*1e-3);
-    }
-    if (ml.n_bytes < GiB) {
-        LLAMA_LOG_INFO("%s: model size       = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0,        ml.n_bytes*8.0/ml.n_elements);
-    } else {
-        LLAMA_LOG_INFO("%s: model size       = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
-    }
-
-    // general kv
-    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
-
-    // special tokens
-    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
-    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
-    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
-    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
-    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
-    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
-    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
-    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
-    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
-
-    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
-
-    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
-    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
-    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
-    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
-    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
-    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
-
-    for (const auto & id : vocab.special_eog_ids) {
-        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
-    }
-
-    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
-
-    if (model.arch == LLM_ARCH_DEEPSEEK) {
-        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
-        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
-        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
-    }
-
-    if (model.arch == LLM_ARCH_DEEPSEEK2) {
-        LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
-        LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
-        LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
-        LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
-        LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
-        LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
-    }
-
-    if (model.arch == LLM_ARCH_QWEN2MOE) {
-        LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
-        LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
-    }
-
-    if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
-        LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
-        LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
-        LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
-    }
-}
-
-enum llm_tensor_layer {
-    LLM_TENSOR_LAYER_INPUT,
-    LLM_TENSOR_LAYER_REPEATING,
-    LLM_TENSOR_LAYER_OUTPUT,
-};
-
-struct llm_tensor_info {
-    llm_tensor_layer layer;
-    ggml_op op;
-};
-
-static const std::map llm_tensor_info_mapping = {
-    {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
-    {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
-    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
-    {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
-    {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
-    {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
-    {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
-    {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
-    {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
-    // this tensor is loaded for T5, but never used
-    {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
-    {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
-    {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-    {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
-    {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
-};
-
 // checks if the weight tensor can be used with the specified buffer type and device
 static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
     GGML_ASSERT(w != nullptr);
@@ -7841,11 +422,12 @@ static bool llm_load_tensors(
                 tn_tensor = LLM_TENSOR_OUTPUT;
             }
 
-            auto it = llm_tensor_info_mapping.find(tn_tensor);
-            if (it == llm_tensor_info_mapping.end()) {
+            llm_tensor_info info;
+            try {
+                info = llm_tensor_info_for(tn_tensor);
+            } catch (const std::out_of_range & e) {
                 throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
             }
-            const auto & info = it->second;
 
             // tensors with "bias" suffix are always used with GGML_OP_ADD
             ggml_op op;
@@ -14644,9 +7226,9 @@ struct llm_build_context {
 
                 // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
                 switch (model.type) {
-                    case e_model::MODEL_2B:
-                    case e_model::MODEL_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
-                    case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
+                    case llm_type::MODEL_2B:
+                    case llm_type::MODEL_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
+                    case llm_type::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
                     default: GGML_ABORT("fatal error");
                 };
                 cb(Qcur, "Qcur_scaled", il);
@@ -17896,572 +10478,6 @@ static struct ggml_cgraph * llama_build_graph(
     return result;
 }
 
-static void llama_set_k_shift(llama_context & lctx) {
-    const int64_t kv_size = lctx.kv_self.size;
-
-    assert(ggml_backend_buffer_is_host(lctx.inp_K_shift->buffer));
-
-    int32_t * data = (int32_t *) lctx.inp_K_shift->data;
-
-    for (int i = 0; i < kv_size; ++i) {
-        data[i] = lctx.kv_self.cells[i].delta;
-    }
-}
-
-static void llama_set_s_copy(llama_context & lctx) {
-    const int64_t kv_size = lctx.kv_self.size;
-
-    assert(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
-
-    int32_t * data = (int32_t *) lctx.inp_s_copy->data;
-
-    for (int i = 0; i < kv_size; ++i) {
-        data[i] = lctx.kv_self.cells[i].src;
-    }
-}
-
-static int32_t llama_relative_position_bucket(llama_pos x, llama_pos y, uint64_t n_buckets, bool bidirectional) {
-    // TODO move to hparams if a T5 variant appears that uses a different value
-    const int64_t max_distance = 128;
-
-    if (bidirectional) {
-        n_buckets >>= 1;
-    }
-
-    const int64_t max_exact = n_buckets >> 1;
-
-    int32_t relative_position = x - y;
-    int32_t relative_bucket = 0;
-    if (bidirectional) {
-        relative_bucket += (relative_position > 0) * n_buckets;
-        relative_position = abs(relative_position);
-    } else {
-        relative_position = -std::min(relative_position, 0);
-    }
-    int32_t relative_position_if_large = floorf(max_exact + logf(1.0 * relative_position / max_exact) * (n_buckets - max_exact) / log(1.0 * max_distance / max_exact));
-    relative_position_if_large = std::min(relative_position_if_large, n_buckets - 1);
-    relative_bucket += (relative_position < max_exact ? relative_position : relative_position_if_large);
-    return relative_bucket;
-}
-
-static void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) {
-    //
-    // set input data
-    //
-
-    const auto & hparams = lctx.model.hparams;
-    const auto & cparams = lctx.cparams;
-    const auto & kv_self = lctx.kv_self;
-
-    if (ubatch.token) {
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        ggml_backend_tensor_set(lctx.inp_tokens, ubatch.token, 0, n_tokens*ggml_element_size(lctx.inp_tokens));
-    }
-
-    if (ubatch.embd) {
-        const int64_t n_embd   = hparams.n_embd;
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        ggml_backend_tensor_set(lctx.inp_embd, ubatch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
-    }
-
-    if (ubatch.pos && lctx.inp_pos) {
-        const int64_t n_tokens = ubatch.n_tokens;
-        auto n_pos = lctx.n_pos_per_token;
-        ggml_backend_tensor_set(lctx.inp_pos, ubatch.pos, 0, n_tokens*n_pos*ggml_element_size(lctx.inp_pos));
-    }
-
-    if (hparams.causal_attn || cparams.pooling_type == LLAMA_POOLING_TYPE_NONE) {
-        //GGML_ASSERT(lctx.inp_out_ids && "every model that can must skip unused outputs");
-
-        if (!lctx.inp_out_ids) {
-            LLAMA_LOG_WARN("%s: 'lctx.inp_out_ids' is not created\n", __func__);
-        } else {
-            const int64_t n_tokens = ubatch.n_tokens;
-
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_out_ids->buffer));
-            int32_t * data = (int32_t *) lctx.inp_out_ids->data;
-
-            if (lctx.n_outputs == n_tokens) {
-                for (int i = 0; i < n_tokens; ++i) {
-                    data[i] = i;
-                }
-            } else if (ubatch.output) {
-                int32_t n_outputs = 0;
-                for (int i = 0; i < n_tokens; ++i) {
-                    if (ubatch.output[i]) {
-                        data[n_outputs++] = i;
-                    }
-                }
-                // the graph needs to have been passed the correct number of outputs
-                GGML_ASSERT(lctx.n_outputs == n_outputs);
-            } else if (lctx.n_outputs == 1) {
-                // only keep last output
-                data[0] = n_tokens - 1;
-            } else {
-                GGML_ASSERT(lctx.n_outputs == 0);
-            }
-        }
-    }
-
-    GGML_ASSERT(
-        // (!a || b) is a logical implication (a -> b)
-        // !hparams.causal_attn -> !cparams.causal_attn
-        (hparams.causal_attn || !cparams.causal_attn) &&
-        "causal attention is not supported by this model"
-    );
-
-    if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) {
-        // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
-        if (cparams.causal_attn && !lctx.is_encoding) {
-            const int64_t n_kv         = kv_self.n;
-            const int64_t n_tokens     = ubatch.n_tokens;
-            const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-            const int64_t n_seqs       = ubatch.n_seqs;
-
-
-            float * data     = nullptr;
-            float * data_swa = nullptr;
-
-            if (lctx.inp_KQ_mask) {
-                GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
-                data = (float *) lctx.inp_KQ_mask->data;
-            }
-
-            if (lctx.inp_KQ_mask_swa) {
-                GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer));
-                data_swa = (float *) lctx.inp_KQ_mask_swa->data;
-            }
-
-            // For causal attention, use only the previous KV cells
-            // of the correct sequence for each token of the ubatch.
-            // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
-            for (int h = 0; h < 1; ++h) {
-                for (int s = 0; s < n_seqs; ++s) {
-                    const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-                    for (int j = 0; j < n_seq_tokens; ++j) {
-                        const llama_pos pos = ubatch.pos[s*n_seq_tokens + j];
-
-                        for (int i = 0; i < n_kv; ++i) {
-                            float f;
-                            if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) {
-                                f = -INFINITY;
-                            } else {
-                                if (hparams.use_alibi) {
-                                    f = -std::abs(kv_self.cells[i].pos - pos);
-                                } else {
-                                    f = 0.0f;
-                                }
-                            }
-
-                            if (data) {
-                                data[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
-                            }
-
-                            // may need to cut off old tokens for sliding window
-                            if (data_swa) {
-                                if (pos - kv_self.cells[i].pos >= (int32_t)hparams.n_swa) {
-                                    f = -INFINITY;
-                                }
-                                data_swa[h*(n_kv*n_tokens) + s*(n_kv*n_seq_tokens) + j*n_kv + i] = f;
-                            }
-                        }
-                    }
-                }
-
-                if (data) {
-                    for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                        for (int j = 0; j < n_kv; ++j) {
-                            data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
-                        }
-                    }
-                }
-
-                if (data_swa) {
-                    for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                        for (int j = 0; j < n_kv; ++j) {
-                            data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
-                        }
-                    }
-                }
-            }
-        } else {
-            const int64_t n_tokens     = ubatch.n_tokens;
-            const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-            const int64_t n_seqs       = ubatch.n_seqs;
-            // when using kv cache, the mask needs to match the kv cache size
-            const int64_t n_stride = hparams.causal_attn && !lctx.is_encoding ? kv_self.n : n_tokens;
-
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
-
-            float * data = (float *) lctx.inp_KQ_mask->data;
-
-            for (int h = 0; h < 1; ++h) {
-                for (int s1 = 0; s1 < n_seqs; ++s1) {
-                    const llama_seq_id seq_id = ubatch.seq_id[s1][0];
-
-                    for (int j = 0; j < n_seq_tokens; ++j) {
-                        const int32_t tj = s1*n_seq_tokens + j;
-
-                        for (int s0 = 0; s0 < n_seqs; ++s0) {
-                            for (int i = 0; i < n_seq_tokens; ++i) {
-                                const int32_t ti = s0*n_seq_tokens + i;
-                                float f = -INFINITY;
-
-                                for (int s = 0; s < ubatch.n_seq_id[s0]; ++s) {
-                                    if (ubatch.seq_id[s0][s] == seq_id) {
-                                        if (hparams.use_alibi) {
-                                            f = -std::abs(ubatch.pos[ti] - ubatch.pos[tj]);
-                                        } else {
-                                            f = 0.0f;
-                                        }
-                                        break;
-                                    }
-                                }
-
-                                data[h*(n_tokens*n_tokens) + tj*n_stride + ti] = f;
-                            }
-                        }
-
-                        for (int i = n_tokens; i < n_stride; ++i) {
-                            data[h*(n_tokens*n_tokens) + tj*n_stride + i] = -INFINITY;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_mean);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
-
-        float * data = (float *) lctx.inp_mean->data;
-        memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
-
-        std::vector sum(n_tokens, 0);
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
-
-            sum[seq_id] += ubatch.n_seq_tokens;
-        }
-
-        std::vector div(n_tokens, 0.0f);
-        for (int i = 0; i < n_tokens; ++i) {
-            const uint64_t s = sum[i];
-            if (s > 0) {
-                div[i] = 1.0f/float(s);
-            }
-        }
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                data[seq_id*n_tokens + s*n_seq_tokens + i] = div[seq_id];
-            }
-        }
-    }
-
-    if (cparams.embeddings && (
-                cparams.pooling_type == LLAMA_POOLING_TYPE_CLS ||
-                cparams.pooling_type == LLAMA_POOLING_TYPE_RANK)) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_cls);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
-
-        uint32_t * data = (uint32_t *) lctx.inp_cls->data;
-        memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS or RANK");
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                const llama_pos pos = ubatch.pos[s*n_seq_tokens + i];
-
-                if (pos == 0) {
-                    data[seq_id] = s*n_seq_tokens + i;
-                }
-            }
-        }
-    }
-
-    if (cparams.embeddings && cparams.pooling_type == LLAMA_POOLING_TYPE_LAST) {
-        const int64_t n_tokens     = ubatch.n_tokens;
-        const int64_t n_seq_tokens = ubatch.n_seq_tokens;
-        const int64_t n_seqs       = ubatch.n_seqs;
-
-        GGML_ASSERT(lctx.inp_cls);
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
-
-        uint32_t * data = (uint32_t *) lctx.inp_cls->data;
-        memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
-
-        std::vector last_pos(n_tokens, -1);
-        std::vector last_row(n_tokens, -1);
-
-        for (int s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = ubatch.seq_id[s][0];
-
-            // TODO: adapt limits to n_seqs when ubatch.equal_seqs is true
-            GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == LAST");
-
-            for (int i = 0; i < n_seq_tokens; ++i) {
-                const llama_pos pos = ubatch.pos[s*n_seq_tokens + i];
-
-                if (pos >= last_pos[seq_id]) {
-                    last_pos[seq_id] = pos;
-                    last_row[seq_id] = s*n_seq_tokens + i;
-                }
-            }
-        }
-
-        for (int i = 0; i < n_tokens; ++i) {
-            if (last_row[i] >= 0) {
-                data[i] = last_row[i];
-            }
-        }
-    }
-
-    if (kv_self.recurrent) {
-        const int64_t n_kv = kv_self.n;
-
-        if (lctx.inp_s_mask) {
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
-            float * data = (float *) lctx.inp_s_mask->data;
-
-            // clear unused states
-            for (int i = 0; i < n_kv; ++i) {
-                const uint32_t  cell_id = i + kv_self.head;
-                llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
-
-                data[i] = (float) (kv_cell.src >= 0);
-
-                // only clear once
-                if (kv_cell.src < 0) {
-                    kv_cell.src = cell_id;
-                }
-            }
-        }
-
-        if (lctx.inp_s_copy) {
-            GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_copy->buffer));
-            int32_t * data = (int32_t *) lctx.inp_s_copy->data;
-
-            // assuming copy destinations ALWAYS happen ONLY on the cells between head and head+n
-            for (uint32_t i = 0; i < n_kv; ++i) {
-                const uint32_t  cell_id = i + kv_self.head;
-                llama_kv_cell & kv_cell = lctx.kv_self.cells[cell_id];
-
-                // prevent out-of-bound sources
-                if (kv_cell.src < 0 || (uint32_t) kv_cell.src >= kv_self.size) {
-                    kv_cell.src = cell_id;
-                }
-
-                data[i] = kv_cell.src;
-
-                // ensure copy only happens once
-                if (kv_cell.src != (int32_t) cell_id) {
-                    kv_cell.src = cell_id;
-                }
-            }
-        }
-    }
-
-    if (lctx.inp_pos_bucket) {
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_pos_bucket->buffer));
-        GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing
-
-        int32_t * data = (int32_t *) lctx.inp_pos_bucket->data;
-
-        if (!lctx.is_encoding) {
-            const int64_t n_kv = kv_self.n;
-            for (int h = 0; h < 1; ++h) {
-                for (int j = 0; j < n_tokens; ++j) {
-                    for (int i = 0; i < n_kv; ++i) {
-                        data[h*(n_kv*n_tokens) + j*n_kv + i] = llama_relative_position_bucket(lctx.kv_self.cells[i].pos, ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
-                    }
-                }
-            }
-        } else {
-            for (int h = 0; h < 1; ++h) {
-                for (int j = 0; j < n_tokens; ++j) {
-                    for (int i = 0; i < n_tokens; ++i) {
-                        data[h*(n_tokens*n_tokens) + j*n_tokens + i] = llama_relative_position_bucket(ubatch.pos[i], ubatch.pos[j], hparams.n_rel_attn_bkts, lctx.is_encoding);
-                    }
-                }
-            }
-        }
-    }
-
-    if (!lctx.is_encoding && lctx.inp_embd_enc) {
-        assert(lctx.inp_embd_enc->type == GGML_TYPE_F32);
-        assert((size_t) ggml_nelements(lctx.inp_embd_enc) == lctx.embd_enc.size());
-
-        ggml_backend_tensor_set(lctx.inp_embd_enc, lctx.embd_enc.data(), 0, ggml_nbytes(lctx.inp_embd_enc));
-    }
-
-    if (!lctx.is_encoding && lctx.inp_KQ_mask_cross) {
-        const int64_t n_output_enc = lctx.embd_enc.size() / hparams.n_embd;
-        const int64_t n_tokens = ubatch.n_tokens;
-
-        GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_cross->buffer));
-        GGML_ASSERT(!ubatch.equal_seqs); // TODO: use ubatch.n_seqs instead of failing
-
-        float * data = (float *) lctx.inp_KQ_mask_cross->data;
-
-        for (int h = 0; h < 1; ++h) {
-            for (int j = 0; j < n_tokens; ++j) {
-                for (int i = 0; i < n_output_enc; ++i) {
-                    float f = -INFINITY;
-                    for (int s = 0; s < ubatch.n_seq_id[j]; ++s) {
-                        const llama_seq_id seq_id = ubatch.seq_id[j][s];
-                        if (lctx.seq_ids_enc[i].find(seq_id) != lctx.seq_ids_enc[i].end()) {
-                            f = 0.0f;
-                        }
-                    }
-                    data[h*(n_output_enc*n_tokens) + j*n_output_enc + i] = f;
-                }
-            }
-
-            for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
-                for (int j = 0; j < n_output_enc; ++j) {
-                    data[h*(n_output_enc*n_tokens) + i*n_output_enc + j] = -INFINITY;
-                }
-            }
-        }
-    }
-}
-
-// Make sure enough space is available for outputs.
-// Returns max number of outputs for which space was reserved.
-static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
-    const auto & cparams = lctx.cparams;
-    const auto & hparams = lctx.model.hparams;
-
-    const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max);
-
-    const auto n_batch = cparams.n_batch;
-    const auto n_vocab = hparams.n_vocab;
-    const auto n_embd  = hparams.n_embd;
-
-    // TODO: use a per-batch flag for logits presence instead
-    const bool has_logits = !cparams.embeddings;
-    const bool has_embd   =  cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE);
-
-    const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
-    const size_t embd_size   = has_embd   ?  n_embd*n_outputs_max : 0;
-
-    if (lctx.output_ids.empty()) {
-        // init, never resized afterwards
-        lctx.output_ids.resize(n_batch);
-    }
-
-    const size_t prev_size = lctx.buf_output ? ggml_backend_buffer_get_size(lctx.buf_output.get()) : 0;
-    const size_t new_size  = (logits_size + embd_size) * sizeof(float);
-
-    // alloc only when more than the current capacity is required
-    // TODO: also consider shrinking the buffer
-    if (!lctx.buf_output || prev_size < new_size) {
-        if (lctx.buf_output) {
-#ifndef NDEBUG
-            // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark)
-            LLAMA_LOG_INFO("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
-#endif
-            lctx.buf_output = nullptr;
-            lctx.logits = nullptr;
-            lctx.embd = nullptr;
-        }
-
-        auto * buft = ggml_backend_cpu_buffer_type();
-        // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory
-        auto * output_dev = lctx.model.dev_output.dev;
-        auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr;
-        if (output_dev_host_buft) {
-            buft = output_dev_host_buft;
-        }
-        lctx.buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size));
-        if (lctx.buf_output == nullptr) {
-            LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0));
-            return 0;
-        }
-    }
-
-    float * output_base = (float *) ggml_backend_buffer_get_base(lctx.buf_output.get());
-
-    lctx.logits = has_logits ? output_base               : nullptr;
-    lctx.embd   = has_embd   ? output_base + logits_size : nullptr;
-
-    lctx.output_size = n_outputs_max;
-    lctx.logits_size = logits_size;
-    lctx.embd_size   = embd_size;
-
-    // set all ids as invalid (negative)
-    std::fill(lctx.output_ids.begin(), lctx.output_ids.end(), -1);
-
-    ggml_backend_buffer_clear(lctx.buf_output.get(), 0);
-
-    lctx.n_outputs = 0;
-
-    return n_outputs_max;
-}
-
-// make the outputs have the same order they had in the user-provided batch
-static void llama_output_reorder(struct llama_context * ctx) {
-    std::vector & out_ids = ctx->sbatch.out_ids;
-    if (!out_ids.empty()) {
-        uint32_t n_vocab = ctx->model.hparams.n_vocab;
-        uint32_t n_embd  = ctx->model.hparams.n_embd;
-        int32_t n_outputs = ctx->n_outputs;
-        GGML_ASSERT((size_t) n_outputs == out_ids.size());
-        // TODO: is there something more efficient which also minimizes swaps?
-        // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort)
-        for (int32_t i = 0; i < n_outputs - 1; ++i) {
-            int32_t j_min = i;
-            for (int32_t j = i + 1; j < n_outputs; ++j) {
-                if (out_ids[j] < out_ids[j_min]) {
-                    j_min = j;
-                }
-            }
-            if (j_min == i) { continue; }
-            std::swap(out_ids[i], out_ids[j_min]);
-            if (ctx->logits_size > 0) {
-                for (uint32_t k = 0; k < n_vocab; k++) {
-                    std::swap(ctx->logits[i*n_vocab + k], ctx->logits[j_min*n_vocab + k]);
-                }
-            }
-            if (ctx->embd_size > 0) {
-                for (uint32_t k = 0; k < n_embd; k++) {
-                    std::swap(ctx->embd[i*n_embd + k], ctx->embd[j_min*n_embd + k]);
-                }
-            }
-        }
-        std::fill(ctx->output_ids.begin(), ctx->output_ids.end(), -1);
-        for (int32_t i = 0; i < n_outputs; ++i) {
-            ctx->output_ids[out_ids[i]] = i;
-        }
-        out_ids.clear();
-    }
-}
-
 // returns the result of ggml_backend_sched_graph_compute_async execution
 static enum ggml_status llama_graph_compute(
           llama_context & lctx,
@@ -18513,7 +10529,8 @@ static int llama_decode_internal(
     }
 
     // temporary allocate memory for the input batch if needed
-    llama_batch_allocr batch_allocr(lctx, inp_batch);
+    llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
+
     const llama_batch & batch = batch_allocr.batch;
     const uint32_t n_tokens_all = batch.n_tokens;
 
@@ -18847,7 +10864,8 @@ static int llama_encode_internal(
     }
 
     // temporary allocate memory for the input batch if needed
-    llama_batch_allocr batch_allocr(lctx, inp_batch);
+    llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1);
+
     const llama_batch & batch = batch_allocr.batch;
     const uint32_t n_tokens = batch.n_tokens;
 
@@ -19297,1046 +11315,6 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
     }
 }
 
-//
-// quantization
-//
-
-struct quantize_state_internal {
-    const llama_model                 & model;
-    const llama_model_quantize_params * params;
-
-    int n_attention_wv    = 0;
-    int n_ffn_down        = 0;
-    int n_ffn_gate        = 0;
-    int n_ffn_up          = 0;
-    int i_attention_wv    = 0;
-    int i_ffn_down        = 0;
-    int i_ffn_gate        = 0;
-    int i_ffn_up          = 0;
-
-    int n_k_quantized     = 0;
-    int n_fallback        = 0;
-
-    bool has_imatrix      = false;
-
-    // used to figure out if a model shares tok_embd with the output weight
-    bool has_output       = false;
-
-    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
-        : model(model)
-        , params(params)
-        {}
-};
-
-static void llama_tensor_dequantize_internal(
-    struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
-    const size_t nelements, const int nthread
-) {
-    if (output.size() < nelements) {
-        output.resize(nelements);
-    }
-    float * f32_output = (float *) output.data();
-
-    const ggml_type_traits * qtype = ggml_get_type_traits(tensor->type);
-    if (ggml_is_quantized(tensor->type)) {
-        if (qtype->to_float == NULL) {
-            throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type)));
-        }
-    } else if (tensor->type != GGML_TYPE_F16 &&
-               tensor->type != GGML_TYPE_BF16) {
-        throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
-    }
-
-    if (nthread < 2) {
-        if (tensor->type == GGML_TYPE_F16) {
-            ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);
-        } else if (tensor->type == GGML_TYPE_BF16) {
-            ggml_bf16_to_fp32_row((ggml_bf16_t *)tensor->data, f32_output, nelements);
-        } else if (ggml_is_quantized(tensor->type)) {
-            qtype->to_float(tensor->data, f32_output, nelements);
-        } else {
-            GGML_ABORT("fatal error"); // unreachable
-        }
-        return;
-    }
-
-    size_t block_size;
-    if (tensor->type == GGML_TYPE_F16 ||
-        tensor->type == GGML_TYPE_BF16) {
-        block_size = 1;
-    } else {
-        block_size = (size_t)ggml_blck_size(tensor->type);
-    }
-
-    size_t block_size_bytes = ggml_type_size(tensor->type);
-
-    GGML_ASSERT(nelements % block_size == 0);
-    size_t nblocks = nelements / block_size;
-    size_t blocks_per_thread = nblocks / nthread;
-    size_t spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count
-
-    size_t in_buff_offs = 0;
-    size_t out_buff_offs = 0;
-
-    for (int tnum = 0; tnum < nthread; tnum++) {
-        size_t thr_blocks = blocks_per_thread + (tnum == nthread - 1 ? spare_blocks : 0); // num blocks for this thread
-        size_t thr_elems = thr_blocks * block_size; // number of elements for this thread
-        size_t thr_block_bytes = thr_blocks * block_size_bytes; // number of input bytes for this thread
-
-        auto compute = [qtype] (ggml_type typ, uint8_t * inbuf, float * outbuf, int nels) {
-            if (typ == GGML_TYPE_F16) {
-                ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
-            } else if (typ == GGML_TYPE_BF16) {
-                ggml_bf16_to_fp32_row((ggml_bf16_t *)inbuf, outbuf, nels);
-            } else {
-                qtype->to_float(inbuf, outbuf, nels);
-            }
-        };
-        workers.emplace_back(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems);
-        in_buff_offs += thr_block_bytes;
-        out_buff_offs += thr_elems;
-    }
-    for (auto & w : workers) { w.join(); }
-    workers.clear();
-}
-
-static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
-    const std::string name = ggml_get_name(tensor);
-
-    // TODO: avoid hardcoded tensor names - use the TN_* constants
-    const llm_arch arch = qs.model.arch;
-    const auto       tn = LLM_TN(arch);
-
-    auto use_more_bits = [](int i_layer, int n_layers) -> bool {
-        return i_layer < n_layers/8 || i_layer >= 7*n_layers/8 || (i_layer - n_layers/8)%3 == 2;
-    };
-    const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
-    auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
-        if (n_expert > 1) {
-            // Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
-            // sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
-            // for getting the current layer as I initially thought, and we need to resort to parsing the
-            // tensor name.
-            if (sscanf(name, "blk.%d.", &i_layer) != 1) {
-                throw std::runtime_error(format("Failed to determine layer for tensor %s", name));
-            }
-            if (i_layer < 0 || i_layer >= n_layer) {
-                throw std::runtime_error(format("Bad layer %d for tensor %s. Must be in [0, %d)", i_layer, name, n_layer));
-            }
-        }
-        return std::make_pair(i_layer, n_layer);
-    };
-
-    // for arches that share the same tensor between the token embeddings and the output, we quantize the token embeddings
-    // with the quantization of the output tensor
-    if (name == tn(LLM_TENSOR_OUTPUT, "weight") || (!qs.has_output && name == tn(LLM_TENSOR_TOKEN_EMBD, "weight"))) {
-        if (qs.params->output_tensor_type < GGML_TYPE_COUNT) {
-            new_type = qs.params->output_tensor_type;
-        } else {
-            int nx = tensor->ne[0];
-            if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) {
-                new_type = GGML_TYPE_Q8_0;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
-                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S  || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M   ||
-                     ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-                new_type = GGML_TYPE_Q5_K;
-            }
-            else if (new_type != GGML_TYPE_Q8_0) {
-                new_type = GGML_TYPE_Q6_K;
-            }
-        }
-    } else if (name == "token_embd.weight") {
-        if (qs.params->token_embedding_type < GGML_TYPE_COUNT) {
-            new_type = qs.params->token_embedding_type;
-        } else {
-            if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS ||
-                ftype == LLAMA_FTYPE_MOSTLY_IQ1_S   || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-                new_type = GGML_TYPE_Q2_K;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) {
-                new_type = GGML_TYPE_IQ3_S;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-                new_type = GGML_TYPE_IQ3_S;
-            }
-            else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
-                new_type = GGML_TYPE_Q4_K;
-            }
-        }
-    } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
-               ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M    || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
-        if (name.find("attn_v.weight") != std::string::npos) {
-            if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_Q4_K;
-            else new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
-            ++qs.i_attention_wv;
-        }
-        else if (qs.model.hparams.n_expert == 8 && name.find("attn_k.weight") != std::string::npos) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (name.find("ffn_down") != std::string::npos) {
-            if (qs.i_ffn_down < qs.n_ffn_down/8) {
-                new_type = ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ? GGML_TYPE_IQ3_S : GGML_TYPE_Q2_K;
-            }
-            ++qs.i_ffn_down;
-        }
-        else if (name.find("attn_output.weight") != std::string::npos) {
-            if (qs.model.hparams.n_expert == 8) {
-                new_type = GGML_TYPE_Q5_K;
-            } else {
-                if (ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) new_type = GGML_TYPE_IQ2_XXS;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M) new_type = GGML_TYPE_IQ3_S;
-            }
-        }
-    } else if (name.find("attn_v.weight") != std::string::npos) {
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
-            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : !qs.has_imatrix ? GGML_TYPE_IQ3_S : GGML_TYPE_IQ3_XXS;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
-            new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && qs.model.hparams.n_gqa() >= 4) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
-                use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
-        if (qs.model.type == MODEL_70B) {
-            // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
-            // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
-            // nearly negligible increase in model size by quantizing this tensor with more bits:
-            if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K) new_type = GGML_TYPE_Q5_K;
-        }
-        if (qs.model.hparams.n_expert == 8) {
-            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
-            // TODO: explore better strategies
-            new_type = GGML_TYPE_Q8_0;
-        }
-        ++qs.i_attention_wv;
-    } else if (name.find("attn_k.weight") != std::string::npos) {
-        if (qs.model.hparams.n_expert == 8) {
-            // for the 8-expert model, bumping this to Q8_0 trades just ~128MB
-            // TODO: explore better strategies
-            new_type = GGML_TYPE_Q8_0;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = GGML_TYPE_IQ2_S;
-        }
-    } else if (name.find("attn_q.weight") != std::string::npos) {
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
-            new_type = GGML_TYPE_IQ2_S;
-        }
-    } else if (name.find("ffn_down") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_down, qs.n_ffn_down, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S) {
-            if (i_layer < n_layer/8) new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS && !qs.has_imatrix) {
-            new_type = i_layer < n_layer/8 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
-            new_type = i_layer < n_layer/16 ? GGML_TYPE_Q5_K
-                     : arch != LLM_ARCH_FALCON || use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q4_K
-                     : GGML_TYPE_Q3_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M && (i_layer < n_layer/8 ||
-                    (qs.model.hparams.n_expert == 8 && use_more_bits(i_layer, n_layer)))) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) {
-            new_type = arch == LLM_ARCH_FALCON ? GGML_TYPE_Q4_K : GGML_TYPE_Q5_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) {
-            if (arch == LLM_ARCH_FALCON) {
-                new_type = i_layer < n_layer/16 ? GGML_TYPE_Q6_K :
-                           use_more_bits(i_layer, n_layer) ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
-            } else {
-                if (use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
-            }
-        }
-        else if (i_layer < n_layer/8 && (ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) && !qs.has_imatrix) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M && use_more_bits(i_layer, n_layer)) new_type = GGML_TYPE_Q6_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && arch != LLM_ARCH_FALCON && i_layer < n_layer/8) {
-            new_type = GGML_TYPE_Q5_K;
-        }
-        else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || ftype == LLAMA_FTYPE_MOSTLY_Q5_0)
-                && qs.has_imatrix && i_layer < n_layer/8) {
-            // Guard against craziness in the first few ffn_down layers that can happen even with imatrix for Q4_0/Q5_0.
-            // We only do it when an imatrix is provided because a) we want to make sure that one can always get the
-            // same quantization as before imatrix stuff, and b) Q4_1/Q5_1 do go crazy on ffn_down without an imatrix.
-            new_type = ftype == LLAMA_FTYPE_MOSTLY_Q4_0 ? GGML_TYPE_Q4_1 : GGML_TYPE_Q5_1;
-        }
-        ++qs.i_ffn_down;
-    } else if (name.find("attn_output.weight") != std::string::npos) {
-        if (arch != LLM_ARCH_FALCON) {
-            if (qs.model.hparams.n_expert == 8) {
-                if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
-                    ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL  ||
-                    ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S  ||
-                    ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS) {
-                    new_type = GGML_TYPE_Q5_K;
-                }
-            } else {
-                if      (ftype == LLAMA_FTYPE_MOSTLY_Q2_K   ) new_type = GGML_TYPE_Q3_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M ) new_type = GGML_TYPE_Q4_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L ) new_type = GGML_TYPE_Q5_K;
-                else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_M  ) new_type = GGML_TYPE_Q4_K;
-            }
-        } else {
-            if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q4_K;
-        }
-    }
-    else if (name.find("attn_qkv.weight") != std::string::npos) {
-        if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M) {
-            new_type = GGML_TYPE_Q4_K;
-        }
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M) new_type = GGML_TYPE_Q5_K;
-        else if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) new_type = GGML_TYPE_Q6_K;
-    }
-    else if (name.find("ffn_gate") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_gate, qs.n_ffn_gate, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        ++qs.i_ffn_gate;
-    }
-    else if (name.find("ffn_up") != std::string::npos) {
-        auto info = layer_info(qs.i_ffn_up, qs.n_ffn_up, name.c_str());
-        int i_layer = info.first, n_layer = info.second;
-        if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS && (i_layer >= n_layer/8 && i_layer < 7*n_layer/8)) {
-            new_type = GGML_TYPE_IQ3_XXS;
-        }
-        ++qs.i_ffn_up;
-    }
-
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-    //}
-    // IK: let's remove this, else Q2_K is almost the same as Q3_K_S
-    //else if (name.find("ffn_gate") != std::string::npos || name.find("ffn_up") != std::string::npos) {
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
-    //}
-    // This can be used to reduce the size of the Q5_K_S model.
-    // The associated PPL increase is fully in line with the size reduction
-    //else {
-    //    if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K;
-    //}
-    bool convert_incompatible_tensor = false;
-    if (new_type == GGML_TYPE_Q2_K    || new_type == GGML_TYPE_Q3_K    || new_type == GGML_TYPE_Q4_K   ||
-        new_type == GGML_TYPE_Q5_K    || new_type == GGML_TYPE_Q6_K    || new_type == GGML_TYPE_IQ4_XS ||
-        new_type == GGML_TYPE_IQ2_XS  || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S  ||
-        new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S   || new_type == GGML_TYPE_IQ3_S  ||
-        new_type == GGML_TYPE_IQ1_M) {
-        int nx = tensor->ne[0];
-        int ny = tensor->ne[1];
-        if (nx % QK_K != 0) {
-            LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
-            convert_incompatible_tensor = true;
-        } else {
-            ++qs.n_k_quantized;
-        }
-    }
-    if (convert_incompatible_tensor) {
-        switch (new_type) {
-            case GGML_TYPE_TQ1_0:
-            case GGML_TYPE_TQ2_0:  new_type = GGML_TYPE_Q4_0; break;  // TODO: use a symmetric type instead
-            case GGML_TYPE_IQ2_XXS:
-            case GGML_TYPE_IQ2_XS:
-            case GGML_TYPE_IQ2_S:
-            case GGML_TYPE_IQ3_XXS:
-            case GGML_TYPE_IQ3_S:
-            case GGML_TYPE_IQ1_S:
-            case GGML_TYPE_IQ1_M:
-            case GGML_TYPE_Q2_K:
-            case GGML_TYPE_Q3_K:
-            case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
-            case GGML_TYPE_Q4_K:   new_type = GGML_TYPE_Q5_0;   break;
-            case GGML_TYPE_Q5_K:   new_type = GGML_TYPE_Q5_1;   break;
-            case GGML_TYPE_Q6_K:   new_type = GGML_TYPE_Q8_0;   break;
-            default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
-        }
-        if (tensor->ne[0] % ggml_blck_size(new_type) != 0) {
-            new_type = GGML_TYPE_F16;
-        }
-        LLAMA_LOG_WARN(" - using fallback quantization %s\n", ggml_type_name(new_type));
-        ++qs.n_fallback;
-    }
-
-    return new_type;
-}
-
-static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
-    if (nthread < 2) {
-        // single-thread
-        size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
-        if (!ggml_validate_row_data(new_type, new_data, new_size)) {
-            throw std::runtime_error("quantized data validation failed");
-        }
-        return new_size;
-    }
-
-    std::mutex mutex;
-    int64_t counter = 0;
-    size_t new_size = 0;
-    bool valid = true;
-    auto compute = [&mutex, &counter, &new_size, &valid, new_type, f32_data, new_data, chunk_size,
-            nrows, n_per_row, imatrix]() {
-        const int64_t nrows_per_chunk = chunk_size / n_per_row;
-        size_t local_size = 0;
-        while (true) {
-            std::unique_lock lock(mutex);
-            int64_t first_row = counter; counter += nrows_per_chunk;
-            if (first_row >= nrows) {
-                if (local_size > 0) {
-                    new_size += local_size;
-                }
-                break;
-            }
-            lock.unlock();
-            const int64_t this_nrow = std::min(nrows - first_row, nrows_per_chunk);
-            size_t this_size = ggml_quantize_chunk(new_type, f32_data, new_data, first_row * n_per_row, this_nrow, n_per_row, imatrix);
-            local_size += this_size;
-
-            // validate the quantized data
-            const size_t row_size  = ggml_row_size(new_type, n_per_row);
-            void * this_data = (char *) new_data + first_row * row_size;
-            if (!ggml_validate_row_data(new_type, this_data, this_size)) {
-                std::unique_lock lock(mutex);
-                valid = false;
-                break;
-            }
-        }
-    };
-    for (int it = 0; it < nthread - 1; ++it) {
-        workers.emplace_back(compute);
-    }
-    compute();
-    for (auto & w : workers) { w.join(); }
-    workers.clear();
-    if (!valid) {
-        throw std::runtime_error("quantized data validation failed");
-    }
-    return new_size;
-}
-
-static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
-    ggml_type default_type;
-    llama_ftype ftype = params->ftype;
-
-    switch (params->ftype) {
-        case LLAMA_FTYPE_MOSTLY_Q4_0: default_type = GGML_TYPE_Q4_0; break;
-        case LLAMA_FTYPE_MOSTLY_Q4_1: default_type = GGML_TYPE_Q4_1; break;
-        case LLAMA_FTYPE_MOSTLY_Q5_0: default_type = GGML_TYPE_Q5_0; break;
-        case LLAMA_FTYPE_MOSTLY_Q5_1: default_type = GGML_TYPE_Q5_1; break;
-        case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break;
-        case LLAMA_FTYPE_MOSTLY_F16:  default_type = GGML_TYPE_F16;  break;
-        case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break;
-        case LLAMA_FTYPE_ALL_F32:     default_type = GGML_TYPE_F32;  break;
-
-        // K-quants
-        case LLAMA_FTYPE_MOSTLY_Q2_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q2_K:    default_type = GGML_TYPE_Q2_K;    break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_XS:  default_type = GGML_TYPE_IQ3_S;   break;
-        case LLAMA_FTYPE_MOSTLY_Q3_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q3_K_M:
-        case LLAMA_FTYPE_MOSTLY_Q3_K_L:  default_type = GGML_TYPE_Q3_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q4_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q4_K_M:  default_type = GGML_TYPE_Q4_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q5_K_S:
-        case LLAMA_FTYPE_MOSTLY_Q5_K_M:  default_type = GGML_TYPE_Q5_K;    break;
-        case LLAMA_FTYPE_MOSTLY_Q6_K:    default_type = GGML_TYPE_Q6_K;    break;
-        case LLAMA_FTYPE_MOSTLY_TQ1_0:   default_type = GGML_TYPE_TQ1_0;   break;
-        case LLAMA_FTYPE_MOSTLY_TQ2_0:   default_type = GGML_TYPE_TQ2_0;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_XS:  default_type = GGML_TYPE_IQ2_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_S:   default_type = GGML_TYPE_IQ2_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ2_M:   default_type = GGML_TYPE_IQ2_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;
-        case LLAMA_FTYPE_MOSTLY_IQ1_S:   default_type = GGML_TYPE_IQ1_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ1_M:   default_type = GGML_TYPE_IQ1_M;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ4_NL:  default_type = GGML_TYPE_IQ4_NL;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ4_XS:  default_type = GGML_TYPE_IQ4_XS;  break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_S:   default_type = GGML_TYPE_IQ3_S;   break;
-        case LLAMA_FTYPE_MOSTLY_IQ3_M:   default_type = GGML_TYPE_IQ3_S;   break;
-
-        default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
-    }
-
-    int nthread = params->nthread;
-
-    if (nthread <= 0) {
-        nthread = std::thread::hardware_concurrency();
-    }
-
-    // mmap consistently increases speed Linux, and also increases speed on Windows with
-    // hot cache. It may cause a slowdown on macOS, possibly related to free memory.
-#if defined(__linux__) || defined(_WIN32)
-    constexpr bool use_mmap = true;
-#else
-    constexpr bool use_mmap = false;
-#endif
-
-    llama_model_kv_override * kv_overrides = nullptr;
-    if (params->kv_overrides) {
-        auto v = (std::vector*)params->kv_overrides;
-        kv_overrides = v->data();
-    }
-    llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
-    ml.init_mappings(false); // no prefetching
-
-    llama_model model;
-    llm_load_arch(ml, model);
-    llm_load_hparams(ml, model);
-    llm_load_stats(ml, model);
-
-    struct quantize_state_internal qs(model, params);
-
-    if (params->only_copy) {
-        ftype = model.ftype;
-    }
-    const std::unordered_map> * imatrix_data = nullptr;
-    if (params->imatrix) {
-        imatrix_data = static_cast>*>(params->imatrix);
-        if (imatrix_data) {
-            LLAMA_LOG_INFO("================================ Have weights data with %d entries\n",int(imatrix_data->size()));
-            qs.has_imatrix = true;
-            // check imatrix for nans or infs
-            for (const auto & kv : *imatrix_data) {
-                for (float f : kv.second) {
-                    if (!std::isfinite(f)) {
-                        throw std::runtime_error(format("imatrix contains non-finite value %f\n", f));
-                    }
-                }
-            }
-        }
-    }
-
-    const size_t align = GGUF_DEFAULT_ALIGNMENT;
-    gguf_context_ptr ctx_out { gguf_init_empty() };
-
-    // copy the KV pairs from the input file
-    gguf_set_kv     (ctx_out.get(), ml.meta.get());
-    gguf_set_val_u32(ctx_out.get(), "general.quantization_version", GGML_QNT_VERSION); // TODO: use LLM_KV
-    gguf_set_val_u32(ctx_out.get(), "general.file_type", ftype); // TODO: use LLM_KV
-
-    // Remove split metadata
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str());
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str());
-    gguf_remove_key(ctx_out.get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str());
-
-    if (params->kv_overrides) {
-        const std::vector & overrides = *(const std::vector *)params->kv_overrides;
-        for (const auto & o : overrides) {
-            if (o.key[0] == 0) break;
-            if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
-                gguf_set_val_f32(ctx_out.get(), o.key, o.val_f64);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
-                gguf_set_val_i32(ctx_out.get(), o.key, o.val_i64);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
-                gguf_set_val_bool(ctx_out.get(), o.key, o.val_bool);
-            } else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_STR) {
-                gguf_set_val_str(ctx_out.get(), o.key, o.val_str);
-            } else {
-                LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
-            }
-        }
-    }
-
-    // make a list of weights
-    std::vector tensors;
-    tensors.reserve(ml.weights_map.size());
-    for (const auto & it : ml.weights_map) {
-        tensors.push_back(&it.second);
-    }
-
-    // keep_split requires that the weights are sorted by split index
-    if (params->keep_split) {
-        std::sort(tensors.begin(), tensors.end(), [](const llama_model_loader::llama_tensor_weight * a, const llama_model_loader::llama_tensor_weight * b) {
-            if (a->idx == b->idx) {
-                return a->offs < b->offs;
-            }
-            return a->idx < b->idx;
-        });
-    }
-
-    for (const auto * it : tensors) {
-        const struct ggml_tensor * tensor = it->tensor;
-
-        const std::string name = ggml_get_name(tensor);
-
-        // TODO: avoid hardcoded tensor names - use the TN_* constants
-        if (name.find("attn_v.weight")   != std::string::npos ||
-            name.find("attn_qkv.weight") != std::string::npos ||
-            name.find("attn_kv_b.weight")!= std::string::npos) {
-            ++qs.n_attention_wv;
-        } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
-            qs.has_output = true;
-        }
-    }
-
-    qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
-
-    // sanity checks
-    {
-        const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin();
-        // attention layers have a non-zero number of kv heads
-        int32_t n_attn_layer = model.hparams.n_layer - std::count(n_head_kv_iter, n_head_kv_iter + model.hparams.n_layer, 0);
-        if (llama_model_has_encoder(&model)) {
-            n_attn_layer *= 3;
-        }
-        GGML_ASSERT((qs.n_attention_wv == n_attn_layer) && "n_attention_wv is unexpected");
-    }
-
-    size_t total_size_org = 0;
-    size_t total_size_new = 0;
-
-    std::vector workers;
-    workers.reserve(nthread);
-
-    int idx = 0;
-
-    std::vector> read_data;
-    std::vector> work;
-    std::vector> f32_conv_buf;
-
-    uint16_t n_split = 1;
-
-    // Assume split index is continuous
-    if (params->keep_split) {
-        for (const auto * it : tensors) {
-            n_split = std::max(uint16_t(it->idx + 1), n_split);
-        }
-    }
-    std::vector ctx_outs(n_split);
-    ctx_outs[0] = std::move(ctx_out);
-
-    // populate the original tensors so we get an initial meta data
-    for (const auto * it : tensors) {
-        uint16_t i_split = params->keep_split ? it->idx : 0;
-        struct ggml_tensor * tensor = it->tensor;
-        if (!ctx_outs[i_split]) {
-            ctx_outs[i_split].reset(gguf_init_empty());
-        }
-        gguf_add_tensor(ctx_outs[i_split].get(), tensor);
-    }
-
-    // Set split info if needed
-    if (n_split > 1) {
-        for (size_t i = 0; i < ctx_outs.size(); ++i) {
-            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_NO).c_str(), i);
-            gguf_set_val_u16(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_COUNT).c_str(), n_split);
-            gguf_set_val_i32(ctx_outs[i].get(), ml.llm_kv(LLM_KV_SPLIT_TENSORS_COUNT).c_str(), ml.n_tensors);
-        }
-    }
-
-    int cur_split = -1;
-    std::ofstream fout;
-    auto close_ofstream = [&]() {
-        // Write metadata and close file handler
-        if (fout.is_open()) {
-            fout.seekp(0);
-            std::vector data(gguf_get_meta_size(ctx_outs[cur_split].get()));
-            gguf_get_meta_data(ctx_outs[cur_split].get(), data.data());
-            fout.write((const char *) data.data(), data.size());
-            fout.close();
-        }
-    };
-    auto new_ofstream = [&](int index) {
-        cur_split = index;
-        GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
-        std::string fname = fname_out;
-        if (params->keep_split) {
-            char split_path[PATH_MAX] = {0};
-            llama_split_path(split_path, sizeof(split_path), fname_out.c_str(), cur_split, n_split);
-            fname = std::string(split_path);
-        }
-
-        fout = std::ofstream(fname, std::ios::binary);
-        fout.exceptions(std::ofstream::failbit); // fail fast on write errors
-        const size_t meta_size = gguf_get_meta_size(ctx_outs[cur_split].get());
-        // placeholder for the meta data
-        ::zeros(fout, meta_size);
-    };
-
-    const auto tn = LLM_TN(model.arch);
-    new_ofstream(0);
-    for (const auto * it : tensors) {
-        const auto & weight = *it;
-        struct ggml_tensor * tensor = weight.tensor;
-        if (weight.idx != cur_split && params->keep_split) {
-            close_ofstream();
-            new_ofstream(weight.idx);
-        }
-
-        const std::string name = ggml_get_name(tensor);
-
-        if (!ml.use_mmap) {
-            if (read_data.size() < ggml_nbytes(tensor)) {
-                read_data.resize(ggml_nbytes(tensor));
-            }
-            tensor->data = read_data.data();
-        }
-        ml.load_data_for(tensor);
-
-        LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ",
-               ++idx, ml.n_tensors,
-               ggml_get_name(tensor),
-               llama_format_tensor_shape(tensor).c_str(),
-               ggml_type_name(tensor->type));
-
-        // This used to be a regex, but  has an extreme cost to compile times.
-        bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'?
-
-        // quantize only 2D and 3D tensors (experts)
-        quantize &= (ggml_n_dims(tensor) >= 2);
-
-        // do not quantize norm tensors
-        quantize &= name.find("_norm.weight") == std::string::npos;
-
-        quantize &= params->quantize_output_tensor || name != "output.weight";
-        quantize &= !params->only_copy;
-
-        // do not quantize expert gating tensors
-        // NOTE: can't use LLM_TN here because the layer number is not known
-        quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
-
-        // do not quantize positional embeddings and token types (BERT)
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD,    "weight");
-        quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_TOKEN_TYPES, "weight");
-
-        // do not quantize Mamba's small yet 2D weights
-        // NOTE: can't use LLM_TN here because the layer number is not known
-        quantize &= name.find("ssm_conv1d.weight") == std::string::npos;
-
-        // do not quantize RWKV's time_mix_first tensors
-        quantize &= name.find("time_mix_first.weight") == std::string::npos;
-        quantize &= name.find("time_mix_w1.weight") == std::string::npos;
-        quantize &= name.find("time_mix_w2.weight") == std::string::npos;
-        quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos;
-        quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos;
-
-        // do not quantize relative position bias (T5)
-        quantize &= name.find("attn_rel_b.weight") == std::string::npos;
-
-        enum ggml_type new_type;
-        void * new_data;
-        size_t new_size;
-
-        if (quantize) {
-            new_type = default_type;
-
-            // get more optimal quantization type based on the tensor shape, layer, etc.
-            if (!params->pure && ggml_is_quantized(default_type)) {
-                new_type = llama_tensor_get_type(qs, new_type, tensor, ftype);
-            }
-            if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) {
-                new_type = params->token_embedding_type;
-            }
-            if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) {
-                new_type = params->output_tensor_type;
-            }
-
-            // If we've decided to quantize to the same type the tensor is already
-            // in then there's nothing to do.
-            quantize = tensor->type != new_type;
-        }
-
-        if (!quantize) {
-            new_type = tensor->type;
-            new_data = tensor->data;
-            new_size = ggml_nbytes(tensor);
-            LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
-        } else {
-            const int64_t nelements = ggml_nelements(tensor);
-
-            const float * imatrix = nullptr;
-            if (imatrix_data) {
-                auto it = imatrix_data->find(tensor->name);
-                if (it == imatrix_data->end()) {
-                    LLAMA_LOG_INFO("\n====== %s: did not find weights for %s\n", __func__, tensor->name);
-                } else {
-                    if (it->second.size() == (size_t)tensor->ne[0]*tensor->ne[2]) {
-                        imatrix = it->second.data();
-                    } else {
-                        LLAMA_LOG_INFO("\n====== %s: imatrix size %d is different from tensor size %d for %s\n", __func__,
-                                int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name);
-
-                        // this can happen when quantizing an old mixtral model with split tensors with a new incompatible imatrix
-                        // this is a significant error and it may be good idea to abort the process if this happens,
-                        // since many people will miss the error and not realize that most of the model is being quantized without an imatrix
-                        // tok_embd should be ignored in this case, since it always causes this warning
-                        if (name != tn(LLM_TENSOR_TOKEN_EMBD, "weight")) {
-                            throw std::runtime_error(format("imatrix size %d is different from tensor size %d for %s",
-                                    int(it->second.size()), int(tensor->ne[0]*tensor->ne[2]), tensor->name));
-                        }
-                    }
-                }
-            }
-            if ((new_type == GGML_TYPE_IQ2_XXS ||
-                 new_type == GGML_TYPE_IQ2_XS  ||
-                 new_type == GGML_TYPE_IQ2_S   ||
-                 new_type == GGML_TYPE_IQ1_S   ||
-                (new_type == GGML_TYPE_IQ1_M && strcmp(tensor->name, "token_embd.weight") && strcmp(tensor->name, "output.weight"))  ||
-                (new_type == GGML_TYPE_Q2_K && params->ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && strcmp(tensor->name, "token_embd.weight") != 0)) && !imatrix) {
-                LLAMA_LOG_ERROR("\n\n============================================================\n");
-                LLAMA_LOG_ERROR("Missing importance matrix for tensor %s in a very low-bit quantization\n", tensor->name);
-                LLAMA_LOG_ERROR("The result will be garbage, so bailing out\n");
-                LLAMA_LOG_ERROR("============================================================\n\n");
-                throw std::runtime_error(format("Missing importance matrix for tensor %s in a very low-bit quantization", tensor->name));
-            }
-
-            float * f32_data;
-
-            if (tensor->type == GGML_TYPE_F32) {
-                f32_data = (float *) tensor->data;
-            } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
-                throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
-            } else {
-                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
-                f32_data = (float *) f32_conv_buf.data();
-            }
-
-            LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
-            fflush(stdout);
-
-            if (work.size() < (size_t)nelements * 4) {
-                work.resize(nelements * 4); // upper bound on size
-            }
-            new_data = work.data();
-
-            const int64_t n_per_row = tensor->ne[0];
-            const int64_t nrows = tensor->ne[1];
-
-            static const int64_t min_chunk_size = 32 * 512;
-            const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row));
-
-            const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
-            const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
-            const int64_t nthread_use = nthread > 1 ? std::max((int64_t)1, std::min((int64_t)nthread, nchunk)) : 1;
-
-            // quantize each expert separately since they have different importance matrices
-            new_size = 0;
-            for (int64_t i03 = 0; i03 < tensor->ne[2]; ++i03) {
-                const float * f32_data_03 = f32_data + i03 * nelements_matrix;
-                void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
-                const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
-
-                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
-            }
-            LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
-        }
-        total_size_org += ggml_nbytes(tensor);
-        total_size_new += new_size;
-
-        // update the gguf meta data as we go
-        gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
-
-        // write tensor data + padding
-        fout.write((const char *) new_data, new_size);
-        zeros(fout, GGML_PAD(new_size, align) - new_size);
-    }
-    close_ofstream();
-
-    LLAMA_LOG_INFO("%s: model size  = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0);
-    LLAMA_LOG_INFO("%s: quant size  = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0);
-
-    if (qs.n_fallback > 0) {
-        LLAMA_LOG_WARN("%s: WARNING: %d of %d tensor(s) required fallback quantization\n",
-                __func__, qs.n_fallback, qs.n_k_quantized + qs.n_fallback);
-    }
-}
-
-static void llama_lora_adapter_init_internal(struct llama_model * model, const char * path_lora, struct llama_lora_adapter & adapter) {
-    LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora);
-
-    ggml_context * ctx_init;
-    struct gguf_init_params meta_gguf_params = {
-        /* .no_alloc = */ true,
-        /* .ctx      = */ &ctx_init,
-    };
-
-    gguf_context_ptr ctx_gguf { gguf_init_from_file(path_lora, meta_gguf_params) };
-    if (!ctx_gguf) {
-        throw std::runtime_error("failed to load lora adapter file from " + std::string(path_lora));
-    }
-
-    ggml_context_ptr ctx { ctx_init };
-
-    // check metadata
-    {
-        auto get_kv_str = [&](const std::string & key) -> std::string {
-            int id = gguf_find_key(ctx_gguf.get(), key.c_str());
-            return id < 0 ? "" : std::string(gguf_get_val_str(ctx_gguf.get(), id));
-        };
-        auto get_kv_f32 = [&](const std::string & key) -> float {
-            int id = gguf_find_key(ctx_gguf.get(), key.c_str());
-            return id < 0 ? 0.0f : gguf_get_val_f32(ctx_gguf.get(), id);
-        };
-        LLM_KV llm_kv = LLM_KV(LLM_ARCH_UNKNOWN);
-
-        auto general_type = get_kv_str(llm_kv(LLM_KV_GENERAL_TYPE));
-        if (general_type != "adapter") {
-            throw std::runtime_error("expect general.type to be 'adapter', but got: " + general_type);
-        }
-
-        auto general_arch_str = get_kv_str(llm_kv(LLM_KV_GENERAL_ARCHITECTURE));
-        auto general_arch = llm_arch_from_string(general_arch_str);
-        if (general_arch != model->arch) {
-            throw std::runtime_error("model arch and LoRA arch mismatch");
-        }
-
-        auto adapter_type = get_kv_str(llm_kv(LLM_KV_ADAPTER_TYPE));
-        if (adapter_type != "lora") {
-            throw std::runtime_error("expect adapter.type to be 'lora', but got: " + adapter_type);
-        }
-
-        adapter.alpha = get_kv_f32(llm_kv(LLM_KV_ADAPTER_LORA_ALPHA));
-    }
-
-    int n_tensors = gguf_get_n_tensors(ctx_gguf.get());
-
-    // contexts for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            // add a new context
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ n_tensors*ggml_tensor_overhead(),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * buft_ctx = ggml_init(params);
-            if (!buft_ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = buft_ctx;
-            adapter.ctxs.emplace_back(buft_ctx);
-            return buft_ctx;
-        };
-        return it->second;
-    };
-
-    // bundle lora_a and lora_b into pairs
-    std::map ab_map;
-    auto str_endswith = [](const std::string & str, const std::string & suffix) {
-        return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
-    };
-    for (ggml_tensor * cur = ggml_get_first_tensor(ctx.get()); cur; cur = ggml_get_next_tensor(ctx.get(), cur)) {
-        std::string name(cur->name);
-        if (str_endswith(name, ".lora_a")) {
-            replace_all(name, ".lora_a", "");
-            if (ab_map.find(name) == ab_map.end()) {
-                ab_map[name] = llama_lora_weight(cur, nullptr);
-            } else {
-                ab_map[name].a = cur;
-            }
-        } else if (str_endswith(name, ".lora_b")) {
-            replace_all(name, ".lora_b", "");
-            if (ab_map.find(name) == ab_map.end()) {
-                ab_map[name] = llama_lora_weight(nullptr, cur);
-            } else {
-                ab_map[name].b = cur;
-            }
-        } else {
-            throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix");
-        }
-    }
-
-    // add tensors
-    for (auto & it : ab_map) {
-        const std::string & name = it.first;
-        llama_lora_weight & w = it.second;
-
-        if (!w.a || !w.b) {
-            throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component");
-        }
-
-        // device buft and device ctx
-        auto * model_tensor = llama_get_model_tensor(model, name.c_str());
-        if (!model_tensor) {
-            throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model");
-        }
-        struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer));
-        // validate tensor shape
-        if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) {
-            throw std::runtime_error("tensor '" + name + "' has incorrect shape");
-        }
-        if (w.a->ne[1] != w.b->ne[0]) {
-            throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)");
-        }
-        // save tensor to adapter
-        struct ggml_tensor * tensor_a = ggml_dup_tensor(dev_ctx, w.a);
-        struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b);
-        ggml_set_name(tensor_a, w.a->name);
-        ggml_set_name(tensor_b, w.b->name);
-        adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b);
-    }
-
-    // allocate tensors / buffers and zero
-    {
-        adapter.ctxs.reserve(ctx_map.size());
-        adapter.bufs.reserve(ctx_map.size());
-        for (auto & it : ctx_map) {
-            ggml_backend_buffer_type_t buft = it.first;
-            ggml_context * ctx_dev = it.second;
-            ggml_backend_buffer_ptr buf { ggml_backend_alloc_ctx_tensors_from_buft(ctx_dev, buft) };
-            if (!buf) {
-                throw std::runtime_error("failed to allocate buffer for lora adapter\n");
-            }
-            LLAMA_LOG_INFO("%s: %10s LoRA buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get())/1024.0/1024.0);
-            adapter.bufs.emplace_back(std::move(buf));
-        }
-    }
-
-    // set tensor data
-    {
-        llama_file gguf_file(path_lora, "rb");
-        std::vector read_buf;
-        auto set_tensor = [&](struct ggml_tensor * orig, struct ggml_tensor * dev) {
-            size_t offs = gguf_get_data_offset(ctx_gguf.get()) + gguf_get_tensor_offset(ctx_gguf.get(), gguf_find_tensor(ctx_gguf.get(), orig->name));
-            size_t size = ggml_nbytes(orig);
-            read_buf.resize(size);
-            gguf_file.seek(offs, SEEK_SET);
-            gguf_file.read_raw(read_buf.data(), size);
-            ggml_backend_tensor_set(dev, read_buf.data(), 0, size);
-        };
-        for (auto & it : adapter.ab_map) {
-            auto orig = ab_map[it.first];
-            auto dev  = it.second;
-            set_tensor(orig.a, dev.a);
-            set_tensor(orig.b, dev.b);
-        }
-    }
-
-    LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2);
-}
-
 int32_t llama_lora_adapter_set(
             struct llama_context * ctx,
             struct llama_lora_adapter * adapter,
@@ -20345,7 +11323,9 @@ int32_t llama_lora_adapter_set(
         LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
         return -1;
     }
+
     ctx->lora_adapters[adapter] = scale;
+
     return 0;
 }
 
@@ -20357,6 +11337,7 @@ int32_t llama_lora_adapter_remove(
         ctx->lora_adapters.erase(pos);
         return 0;
     }
+
     return -1;
 }
 
@@ -20364,37 +11345,20 @@ void llama_lora_adapter_clear(struct llama_context * ctx) {
     ctx->lora_adapters.clear();
 }
 
-void llama_lora_adapter_free(struct llama_lora_adapter * adapter) {
-    delete adapter;
+// TODO: tmp
+int32_t llama_control_vector_apply(
+        struct llama_context * lctx,
+                 const float * data,
+                      size_t   len,
+                     int32_t   n_embd,
+                     int32_t   il_start,
+                     int32_t   il_end) {
+    return llama_control_vector_apply(lctx->cvec, lctx->model, data, len, n_embd, il_start, il_end);
 }
 
 //
 // interface implementation
 //
-struct llama_model_params llama_model_default_params() {
-    struct llama_model_params result = {
-        /*.devices                     =*/ nullptr,
-        /*.n_gpu_layers                =*/ 0,
-        /*.split_mode                  =*/ LLAMA_SPLIT_MODE_LAYER,
-        /*.main_gpu                    =*/ 0,
-        /*.tensor_split                =*/ nullptr,
-        /*.rpc_servers                 =*/ nullptr,
-        /*.progress_callback           =*/ nullptr,
-        /*.progress_callback_user_data =*/ nullptr,
-        /*.kv_overrides                =*/ nullptr,
-        /*.vocab_only                  =*/ false,
-        /*.use_mmap                    =*/ true,
-        /*.use_mlock                   =*/ false,
-        /*.check_tensors               =*/ false,
-    };
-
-#ifdef GGML_USE_METAL
-    // note: we usually have plenty of VRAM, so by default offload all layers to the GPU
-    result.n_gpu_layers = 999;
-#endif
-
-    return result;
-}
 
 struct llama_context_params llama_context_default_params() {
     struct llama_context_params result = {
@@ -20439,24 +11403,6 @@ struct llama_sampler_chain_params llama_sampler_chain_default_params() {
     return result;
 }
 
-struct llama_model_quantize_params llama_model_quantize_default_params() {
-    struct llama_model_quantize_params result = {
-        /*.nthread                     =*/ 0,
-        /*.ftype                       =*/ LLAMA_FTYPE_MOSTLY_Q5_1,
-        /*.output_tensor_type          =*/ GGML_TYPE_COUNT,
-        /*.token_embedding_type        =*/ GGML_TYPE_COUNT,
-        /*.allow_requantize            =*/ false,
-        /*.quantize_output_tensor      =*/ true,
-        /*.only_copy                   =*/ false,
-        /*.pure                        =*/ false,
-        /*.keep_split                  =*/ false,
-        /*.imatrix                     =*/ nullptr,
-        /*.kv_overrides                =*/ nullptr,
-    };
-
-    return result;
-}
-
 size_t llama_max_devices(void) {
     return 16;
 }
@@ -20499,19 +11445,6 @@ void llama_numa_init(enum ggml_numa_strategy numa) {
     }
 }
 
-void llama_attach_threadpool(
-             struct llama_context * ctx,
-        ggml_threadpool_t   threadpool,
-        ggml_threadpool_t   threadpool_batch) {
-    ctx->threadpool       = threadpool;
-    ctx->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool;
-}
-
-void llama_detach_threadpool(struct llama_context * ctx) {
-    ctx->threadpool       = nullptr;
-    ctx->threadpool_batch = nullptr;
-}
-
 void llama_backend_free(void) {
     ggml_quantize_free();
 }
@@ -20522,7 +11455,7 @@ int64_t llama_time_us(void) {
 
 struct llama_model * llama_load_model_from_file(
         const char * path_model,
-        struct llama_model_params   params) {
+        struct llama_model_params params) {
     ggml_time_init();
 
     llama_model * model = new llama_model;
@@ -20633,6 +11566,7 @@ struct llama_model * llama_load_model_from_file(
         } else if (status == -2) {
             LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
         }
+
         llama_free_model(model);
         return nullptr;
     }
@@ -20640,10 +11574,6 @@ struct llama_model * llama_load_model_from_file(
     return model;
 }
 
-void llama_free_model(struct llama_model * model) {
-    delete model;
-}
-
 struct llama_context * llama_new_context_with_model(
                  struct llama_model * model,
         struct llama_context_params   params) {
@@ -20844,7 +11774,7 @@ struct llama_context * llama_new_context_with_model(
 
         llama_set_abort_callback(ctx, params.abort_callback, params.abort_callback_data);
 
-        if (!llama_kv_cache_init(ctx->kv_self, ctx, type_k, type_v, kv_size, cparams.offload_kqv)) {
+        if (!llama_kv_cache_init(ctx->kv_self, ctx->model, ctx->cparams, type_k, type_v, kv_size, cparams.offload_kqv)) {
             LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__);
             llama_free(ctx);
             return nullptr;
@@ -20995,442 +11925,26 @@ struct llama_context * llama_new_context_with_model(
     return ctx;
 }
 
-void llama_free(struct llama_context * ctx) {
-    delete ctx;
-}
+//
+// kv cache
+//
 
-uint32_t llama_n_ctx(const struct llama_context * ctx) {
-    return ctx->cparams.n_ctx;
-}
-
-uint32_t llama_n_batch(const struct llama_context * ctx) {
-    return ctx->cparams.n_batch;
-}
-
-uint32_t llama_n_ubatch(const struct llama_context * ctx) {
-    return ctx->cparams.n_ubatch;
-}
-
-uint32_t llama_n_seq_max(const struct llama_context * ctx) {
-    return ctx->kv_self.size;
-}
-
-enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
-    return model->vocab.type;
-}
-
-int32_t llama_n_vocab(const struct llama_model * model) {
-    return model->hparams.n_vocab;
-}
-
-int32_t llama_n_ctx_train(const struct llama_model * model) {
-    return model->hparams.n_ctx_train;
-}
-
-int32_t llama_n_embd(const struct llama_model * model) {
-    return model->hparams.n_embd;
-}
-
-int32_t llama_n_layer(const struct llama_model * model) {
-    return model->hparams.n_layer;
-}
-
-int32_t llama_n_head(const struct llama_model * model) {
-    return model->hparams.n_head();
-}
-
-const struct llama_model * llama_get_model(const struct llama_context * ctx) {
-    return &ctx->model;
-}
-
-enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx) {
-    return ctx->cparams.pooling_type;
-}
-
-enum llama_rope_type llama_rope_type(const struct llama_model * model) {
-    switch (model->arch) {
-        // these models do not use RoPE
-        case LLM_ARCH_GPT2:
-        case LLM_ARCH_GPTJ:
-        case LLM_ARCH_MPT:
-        case LLM_ARCH_REFACT:
-        case LLM_ARCH_BLOOM:
-        case LLM_ARCH_MAMBA:
-        case LLM_ARCH_JINA_BERT_V2:
-        case LLM_ARCH_T5:
-        case LLM_ARCH_T5ENCODER:
-        case LLM_ARCH_JAIS:
-        case LLM_ARCH_RWKV6:
-        case LLM_ARCH_WAVTOKENIZER_DEC:
-            return LLAMA_ROPE_TYPE_NONE;
-
-        // use what we call a normal RoPE, operating on pairs of consecutive head values
-        case LLM_ARCH_LLAMA:
-        case LLM_ARCH_DECI:
-        case LLM_ARCH_BAICHUAN:
-        case LLM_ARCH_STARCODER:
-        case LLM_ARCH_PLAMO:
-        case LLM_ARCH_ORION:
-        case LLM_ARCH_INTERNLM2:
-        case LLM_ARCH_MINICPM:
-        case LLM_ARCH_XVERSE:
-        case LLM_ARCH_COMMAND_R:
-        case LLM_ARCH_OLMO:
-        case LLM_ARCH_ARCTIC:
-        case LLM_ARCH_DEEPSEEK:
-        case LLM_ARCH_DEEPSEEK2:
-        case LLM_ARCH_CHATGLM:
-        case LLM_ARCH_GRANITE:
-        case LLM_ARCH_GRANITE_MOE:
-        case LLM_ARCH_CHAMELEON:
-            return LLAMA_ROPE_TYPE_NORM;
-
-        // the pairs of head values are offset by n_rot/2
-        case LLM_ARCH_FALCON:
-        case LLM_ARCH_GROK:
-        case LLM_ARCH_DBRX:
-        case LLM_ARCH_BERT:
-        case LLM_ARCH_NOMIC_BERT:
-        case LLM_ARCH_STABLELM:
-        case LLM_ARCH_BITNET:
-        case LLM_ARCH_QWEN:
-        case LLM_ARCH_QWEN2:
-        case LLM_ARCH_QWEN2MOE:
-        case LLM_ARCH_OLMO2:
-        case LLM_ARCH_OLMOE:
-        case LLM_ARCH_PHI2:
-        case LLM_ARCH_PHI3:
-        case LLM_ARCH_GEMMA:
-        case LLM_ARCH_GEMMA2:
-        case LLM_ARCH_STARCODER2:
-        case LLM_ARCH_OPENELM:
-        case LLM_ARCH_GPTNEOX:
-        case LLM_ARCH_CODESHELL:
-        case LLM_ARCH_NEMOTRON:
-        case LLM_ARCH_EXAONE:
-        case LLM_ARCH_MINICPM3:
-            return LLAMA_ROPE_TYPE_NEOX;
-
-        case LLM_ARCH_QWEN2VL:
-            return LLAMA_ROPE_TYPE_MROPE;
-
-        // all model arches should be listed explicitly here
-        case LLM_ARCH_UNKNOWN:
-            GGML_ABORT("unknown architecture");
-    }
-
-    return LLAMA_ROPE_TYPE_NONE;
-}
-
-float llama_rope_freq_scale_train(const struct llama_model * model) {
-    return model->hparams.rope_freq_scale_train;
-}
-
-int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size) {
-    const auto & it = model->gguf_kv.find(key);
-    if (it == model->gguf_kv.end()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    return snprintf(buf, buf_size, "%s", it->second.c_str());
-}
-
-int32_t llama_model_meta_count(const struct llama_model * model) {
-    return (int)model->gguf_kv.size();
-}
-
-int32_t llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size) {
-    if (i < 0 || i >= (int)model->gguf_kv.size()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    auto it = model->gguf_kv.begin();
-    std::advance(it, i);
-    return snprintf(buf, buf_size, "%s", it->first.c_str());
-}
-
-int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size) {
-    if (i < 0 || i >= (int)model->gguf_kv.size()) {
-        if (buf_size > 0) {
-            buf[0] = '\0';
-        }
-        return -1;
-    }
-    auto it = model->gguf_kv.begin();
-    std::advance(it, i);
-    return snprintf(buf, buf_size, "%s", it->second.c_str());
-}
-
-int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
-    return snprintf(buf, buf_size, "%s %s %s",
-            llama_model_arch_name(model->arch),
-            llama_model_type_name(model->type),
-            llama_model_ftype_name(model->ftype).c_str());
-}
-
-uint64_t llama_model_size(const struct llama_model * model) {
-    return model->n_bytes;
-}
-
-uint64_t llama_model_n_params(const struct llama_model * model) {
-    return model->n_elements;
-}
-
-bool llama_model_has_encoder(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_T5:        return true;
-        case LLM_ARCH_T5ENCODER: return true;
-        default:                 return false;
-    }
-}
-
-bool llama_model_has_decoder(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_T5ENCODER: return false;
-        default:                 return true;
-    }
-}
-
-llama_token llama_model_decoder_start_token(const struct llama_model * model) {
-    return model->hparams.dec_start_token_id;
-}
-
-bool llama_model_is_recurrent(const struct llama_model * model) {
-    switch (model->arch) {
-        case LLM_ARCH_MAMBA:  return true;
-        case LLM_ARCH_RWKV6:  return true;
-        default:              return false;
-    }
-}
-
-uint32_t llama_model_quantize(
-        const char * fname_inp,
-        const char * fname_out,
-        const llama_model_quantize_params * params) {
-    try {
-        llama_model_quantize_internal(fname_inp, fname_out, params);
-        return 0;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
-        return 1;
-    }
-}
-
-struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) {
-    try {
-        struct llama_lora_adapter * adapter = new llama_lora_adapter(model);
-        llama_lora_adapter_init_internal(model, path_lora, *adapter);
-        return adapter;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what());
-        return nullptr;
-    }
-}
-
-static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) {
-    GGML_ASSERT(cvec.tensors.empty());
-    GGML_ASSERT(cvec.ctxs.empty());
-    GGML_ASSERT(cvec.bufs.empty());
-
-    // create a context for each buffer type
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            struct ggml_init_params params = {
-                /*.mem_size   =*/ model.hparams.n_layer*ggml_tensor_overhead(),
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * ctx = ggml_init(params);
-            if (!ctx) {
-                return nullptr;
-            }
-            ctx_map[buft] = ctx;
-            cvec.ctxs.emplace_back(ctx);
-            return ctx;
-        }
-        return it->second;
-    };
-
-    // make tensors
-    cvec.tensors.reserve(model.hparams.n_layer);
-    cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0
-    for (size_t il = 1; il < model.hparams.n_layer; il++) {
-        ggml_backend_buffer_type_t buft = select_buft(*model.dev_layer.at(il).buft_list,
-            [&](ggml_context * ctx) {
-                ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-                ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-                return ggml_add(ctx, cur, layer_dir);
-            });
-        ggml_context * ctx = ctx_for_buft(buft);
-        if (!ctx) {
-            LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__);
-            return false;
-        }
-        ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd);
-        cvec.tensors.push_back(tensor);
-    }
-
-    // allocate tensors / buffers and zero
-    cvec.bufs.reserve(ctx_map.size());
-    for (auto it : ctx_map) {
-        ggml_backend_buffer_type_t buft = it.first;
-        ggml_context * ctx = it.second;
-        ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
-        if (!buf) {
-            LLAMA_LOG_ERROR("%s: failed to allocate buffer for control vector\n", __func__);
-            return false;
-        }
-        ggml_backend_buffer_clear(buf, 0);
-        cvec.bufs.emplace_back(buf);
-    }
-
-    return true;
-}
-
-int32_t llama_control_vector_apply(struct llama_context * lctx, const float * data, size_t len, int32_t n_embd, int32_t il_start, int32_t il_end) {
-    const llama_model & model = lctx->model;
-    llama_control_vector & cvec = lctx->cvec;
-
-    if (data == nullptr) {
-        // disable the current control vector (but leave allocated for later)
-        cvec.layer_start = -1;
-        cvec.layer_end   = -1;
-        return 0;
-    }
-
-    if (n_embd != (int) model.hparams.n_embd) {
-        LLAMA_LOG_ERROR("%s: control vector n_embd does not match model\n", __func__);
-        return 1;
-    }
-
-    if (cvec.tensors.empty()) {
-        if (!llama_control_vector_init(cvec, model)) {
-            return 1;
-        }
-    }
-
-    cvec.layer_start = il_start;
-    cvec.layer_end   = il_end;
-
-    for (size_t il = 1; il < model.hparams.n_layer; il++) {
-        assert(cvec.tensors[il] != nullptr);
-
-        const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present
-        if (off + n_embd <= len) {
-            ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il]));
-        }
-    }
-
-    return 0;
-}
+// TODO: tmp bridges below until `struct llama_kv_cache` is exposed through the public API
 
 struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max) {
-    struct llama_kv_cache_view result = {
-        /*.n_cells            = */ 0,
-        /*.n_seq_max          = */ n_seq_max,
-        /*.token_count        = */ 0,
-        /*.used_cells         = */ llama_get_kv_cache_used_cells(ctx),
-        /*.max_contiguous     = */ 0,
-        /*.max_contiguous_idx = */ -1,
-        /*.cells              = */ nullptr,
-        /*.cells_sequences    = */ nullptr,
-    };
-    return result;
-}
-
-void llama_kv_cache_view_free(struct llama_kv_cache_view * view) {
-    if (view->cells != nullptr) {
-        free(view->cells);
-        view->cells = nullptr;
-    }
-    if (view->cells_sequences != nullptr) {
-        free(view->cells_sequences);
-        view->cells_sequences = nullptr;
-    }
+    return llama_kv_cache_view_init(ctx->kv_self, n_seq_max);
 }
 
 void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view) {
-    if (uint32_t(view->n_cells) < ctx->kv_self.size || view->cells == nullptr) {
-        view->n_cells = int32_t(ctx->kv_self.size);
-        void * p = realloc(view->cells, sizeof(struct llama_kv_cache_view_cell) * view->n_cells);
-        GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells");
-        view->cells = (struct llama_kv_cache_view_cell *)p;
-        p = realloc(view->cells_sequences, sizeof(llama_seq_id) * view->n_seq_max * view->n_cells);
-        GGML_ASSERT(p != nullptr && "Failed to alloc kv_cache_view cells sequences");
-        view->cells_sequences = (llama_seq_id *)p;
-    }
-
-    const std::vector & kv_cells = ctx->kv_self.cells;
-    llama_kv_cache_view_cell * c_curr = view->cells;
-    llama_seq_id * cs_curr = view->cells_sequences;
-    int32_t used_cells = 0;
-    int32_t token_count = 0;
-    int32_t curr_contig_idx = -1;
-    uint32_t max_contig = 0;
-    int32_t max_contig_idx = -1;
-
-    for (int32_t i = 0; i < int32_t(ctx->kv_self.size); i++, c_curr++, cs_curr += view->n_seq_max) {
-        const size_t curr_size = kv_cells[i].seq_id.size();
-        token_count += curr_size;
-        c_curr->pos = kv_cells[i].pos + kv_cells[i].delta;
-
-        if (curr_size > 0) {
-            if (curr_contig_idx >= 0 && uint32_t(i - curr_contig_idx) > max_contig) {
-                max_contig = i - curr_contig_idx;
-                max_contig_idx = curr_contig_idx;
-            }
-            curr_contig_idx = -1;
-        } else if (curr_contig_idx < 0) {
-            curr_contig_idx = i;
-        }
-
-        int seq_idx = 0;
-        for (const llama_seq_id it : kv_cells[i].seq_id) {
-            if (seq_idx >= view->n_seq_max) {
-                break;
-            }
-            cs_curr[seq_idx] = it;
-            seq_idx++;
-        }
-        if (seq_idx != 0) {
-            used_cells++;
-        }
-        for (; seq_idx < view->n_seq_max; seq_idx++) {
-            cs_curr[seq_idx] = -1;
-        }
-    }
-    if (curr_contig_idx >= 0 && kv_cells.size() - curr_contig_idx > max_contig) {
-        max_contig_idx = curr_contig_idx;
-        max_contig = kv_cells.size() - curr_contig_idx;
-    }
-    view->max_contiguous = max_contig;
-    view->max_contiguous_idx = max_contig_idx;
-    view->token_count = token_count;
-    view->used_cells = used_cells;
-    if (uint32_t(used_cells) != ctx->kv_self.used) {
-        LLAMA_LOG_ERROR("%s: used cells mismatch. kv_cache says %d but we calculated %d\n",
-            __func__, ctx->kv_self.used, used_cells);
-    }
+    llama_kv_cache_view_update(view, ctx->kv_self);
 }
 
 int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx) {
-    int result = 0;
-
-    for (uint32_t i = 0; i < ctx->kv_self.size; i++) {
-        result += ctx->kv_self.cells[i].seq_id.size();
-    }
-
-    return result;
+    return llama_get_kv_cache_token_count(ctx->kv_self);
 }
 
 int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx) {
-    return ctx->kv_self.used;
+    return llama_get_kv_cache_used_cells(ctx->kv_self);
 }
 
 void llama_kv_cache_clear(struct llama_context * ctx) {
@@ -21481,1068 +11995,10 @@ void llama_kv_cache_update(struct llama_context * ctx) {
 }
 
 bool llama_kv_cache_can_shift(struct llama_context * ctx) {
-    return !ctx->kv_self.recurrent && ctx->model.arch != LLM_ARCH_DEEPSEEK2; // not supported due to MLA
+    return llama_kv_cache_can_shift(ctx->kv_self);
 }
 
-// deprecated
-size_t llama_get_state_size(struct llama_context * ctx) {
-    return llama_state_get_size(ctx);
-}
-
-// deprecated
-size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
-    return llama_state_get_data(ctx, dst, -1);
-}
-
-// deprecated
-size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
-    return llama_state_set_data(ctx, src, -1);
-}
-
-// deprecated
-bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
-}
-
-// deprecated
-bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    return llama_state_save_file(ctx, path_session, tokens, n_token_count);
-}
-
-// TODO: replace all non-fatal assertions with returned errors or exceptions
-struct llama_data_write {
-    virtual void write(const void * src, size_t size) = 0;
-    virtual void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) = 0;
-    virtual size_t get_size_written() = 0;
-    virtual ~llama_data_write() = default;
-
-    void write_string(const std::string & str) {
-        uint32_t str_size = str.size();
-
-        write(&str_size,  sizeof(str_size));
-        write(str.data(), str_size);
-    }
-
-    void write_model_info(const struct llama_context * ctx) {
-        std::string arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
-        write_string(arch_str);
-        // TODO: add more model-specific info which should prevent loading the session file if not identical
-    }
-
-    //void write_rng(const std::mt19937 & rng) {
-    //    std::ostringstream rng_ss;
-    //    rng_ss << rng;
-
-    //    const std::string & rng_str = rng_ss.str();
-
-    //    write_string(rng_str);
-    //}
-
-    void write_output_ids(struct llama_context * ctx) {
-        llama_output_reorder(ctx);
-
-        const uint32_t n_outputs = ctx->n_outputs;
-
-        std::vector output_pos;
-
-        const size_t    n_batch = ctx->cparams.n_batch;
-        const auto & output_ids = ctx->output_ids;
-
-        GGML_ASSERT(n_outputs <= ctx->output_size);
-
-        output_pos.resize(n_outputs);
-
-        // build a more compact representation of the output ids
-        for (size_t i = 0; i < n_batch; ++i) {
-            // map an output id to a position in the batch
-            int32_t pos = output_ids[i];
-            if (pos >= 0) {
-                GGML_ASSERT((uint32_t) pos < n_outputs);
-                output_pos[pos] = i;
-            }
-        }
-
-        write(&n_outputs, sizeof(n_outputs));
-
-        if (n_outputs) {
-            write(output_pos.data(), n_outputs * sizeof(int32_t));
-        }
-    }
-
-    void write_logits(const struct llama_context * ctx) {
-        const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab);
-
-        write(&logits_size, sizeof(logits_size));
-
-        if (logits_size) {
-            write(ctx->logits, logits_size * sizeof(float));
-        }
-    }
-
-    void write_embeddings(const struct llama_context * ctx) {
-        const uint64_t embeddings_size = std::min((uint64_t) ctx->embd_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_embd);
-
-        write(&embeddings_size, sizeof(embeddings_size));
-
-        if (embeddings_size) {
-            write(ctx->embd, embeddings_size * sizeof(float));
-        }
-    }
-
-    void write_kv_cache_meta(const llama_kv_cache & kv_self, const std::vector> & cell_ranges, llama_seq_id seq_id = -1) {
-
-        for (const auto & range : cell_ranges) {
-            for (uint32_t i = range.first; i < range.second; ++i) {
-                const auto & cell = kv_self.cells[i];
-                const llama_pos pos      = cell.pos;
-                const uint32_t  n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
-
-                write(&pos,      sizeof(pos));
-                write(&n_seq_id, sizeof(n_seq_id));
-
-                if (n_seq_id) {
-                    for (auto seq_id : cell.seq_id) {
-                        write(&seq_id, sizeof(seq_id));
-                    }
-                }
-            }
-        }
-    }
-
-    void write_kv_cache_data(const struct llama_context * ctx, const std::vector> & cell_ranges) {
-        const struct llama_kv_cache & kv_self = ctx->kv_self;
-        const struct llama_hparams & hparams = ctx->model.hparams;
-
-        const uint32_t v_trans = kv_self.v_trans ? 1 : 0;
-        const uint32_t n_layer = hparams.n_layer;
-
-        write(&v_trans, sizeof(v_trans));
-        write(&n_layer, sizeof(n_layer));
-
-        std::vector tmp_buf;
-
-        // Iterate and write all the keys first, each row is a cell
-        // Get whole range at a time
-        for (uint32_t il = 0; il < n_layer; ++il) {
-            const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
-            // Write key type
-            const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
-            write(&k_type_i, sizeof(k_type_i));
-
-            // Write row size of key
-            const uint64_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
-            write(&k_size_row, sizeof(k_size_row));
-
-            // Read each range of cells of k_size length each into tmp_buf and write out
-            for (const auto & range : cell_ranges) {
-                const size_t range_size = range.second - range.first;
-                const size_t buf_size = range_size * k_size_row;
-                write_tensor_data(kv_self.k_l[il], range.first * k_size_row, buf_size);
-            }
-        }
-
-        if (!kv_self.v_trans) {
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Write value type
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                write(&v_type_i, sizeof(v_type_i));
-
-                // Write row size of value
-                const uint64_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
-                write(&v_size_row, sizeof(v_size_row));
-
-                // Read each range of cells of v_size length each into tmp_buf and write out
-                for (const auto & range : cell_ranges) {
-                    const size_t range_size = range.second - range.first;
-                    const size_t buf_size = range_size * v_size_row;
-                    write_tensor_data(kv_self.v_l[il], range.first * v_size_row, buf_size);
-                }
-            }
-        } else {
-            // When v is transposed, we also need the element size and get the element ranges from each row
-            const uint32_t kv_size = kv_self.size;
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Write value type
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                write(&v_type_i, sizeof(v_type_i));
-
-                // Write element size
-                const uint32_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
-                write(&v_size_el, sizeof(v_size_el));
-
-                // Write GQA embedding size
-                write(&n_embd_v_gqa, sizeof(n_embd_v_gqa));
-
-                // For each row, we get the element values of each cell
-                for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
-                    // Read each range of cells of v_size_el length each into tmp_buf and write out
-                    for (const auto & range : cell_ranges) {
-                        const size_t range_size = range.second - range.first;
-                        const size_t src_offset = (range.first + j * kv_size) * v_size_el;
-                        const size_t buf_size = range_size * v_size_el;
-                        write_tensor_data(kv_self.v_l[il], src_offset, buf_size);
-                    }
-                }
-            }
-        }
-    }
-
-    void write_kv_cache(const struct llama_context * ctx, llama_seq_id seq_id = -1) {
-        const struct llama_kv_cache & kv_self = ctx->kv_self;
-        std::vector> cell_ranges; // ranges, from inclusive, to exclusive
-        uint32_t cell_count = 0;
-
-        // Count the number of cells with the specified seq_id
-        // Find all the ranges of cells with this seq id (or all, when -1)
-        uint32_t cell_range_begin = kv_self.size;
-        for (uint32_t i = 0; i < kv_self.size; ++i) {
-            const auto & cell = kv_self.cells[i];
-            if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
-                ++cell_count;
-                if (cell_range_begin == kv_self.size) {
-                    cell_range_begin = i;
-                }
-            } else {
-                if (cell_range_begin != kv_self.size) {
-                    cell_ranges.emplace_back(cell_range_begin, i);
-                    cell_range_begin = kv_self.size;
-                }
-            }
-        }
-        if (cell_range_begin != kv_self.size) {
-            cell_ranges.emplace_back(cell_range_begin, kv_self.size);
-        }
-
-        // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
-        uint32_t cell_count_check = 0;
-        for (const auto & range : cell_ranges) {
-            cell_count_check += range.second - range.first;
-        }
-        GGML_ASSERT(cell_count == cell_count_check);
-
-        write(&cell_count, sizeof(cell_count));
-
-        write_kv_cache_meta(kv_self, cell_ranges, seq_id);
-        write_kv_cache_data(ctx, cell_ranges);
-    }
-};
-
-struct llama_data_read {
-    virtual const uint8_t * read(size_t size) = 0;
-    virtual void read_to(void * dst, size_t size) = 0;
-    virtual size_t get_size_read() = 0;
-    virtual ~llama_data_read() = default;
-
-    void read_string(std::string & str) {
-        uint32_t str_size;
-        read_to(&str_size, sizeof(str_size));
-
-        str.assign((const char *) read(str_size), str_size);
-    }
-
-    // validate model information
-    void read_model_info(const struct llama_context * ctx) {
-        std::string cur_arch_str = LLM_ARCH_NAMES.at(ctx->model.arch);
-        std::string arch_str;
-        read_string(arch_str);
-        if (cur_arch_str != arch_str) {
-            throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str()));
-        }
-        // TODO: add more info which needs to be identical but which is not verified otherwise
-    }
-
-    //void read_rng(std::mt19937 & rng) {
-    //    std::string rng_str;
-    //    read_string(rng_str);
-
-    //    std::istringstream rng_ss(rng_str);
-    //    rng_ss >> rng;
-
-    //    if (rng_ss.fail()) {
-    //        throw std::runtime_error("failed to load RNG state");
-    //    }
-    //}
-
-    void read_output_ids(struct llama_context * ctx) {
-        std::vector output_pos;
-
-        uint32_t n_outputs;
-        read_to(&n_outputs, sizeof(n_outputs));
-
-        if (n_outputs > llama_output_reserve(*ctx, n_outputs)) {
-            throw std::runtime_error("could not reserve outputs");
-        }
-
-        if (n_outputs) {
-            output_pos.resize(n_outputs);
-            read_to(output_pos.data(), n_outputs * sizeof(int32_t));
-
-            for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) {
-                int32_t id = output_pos[i];
-                if ((uint32_t) id >= ctx->cparams.n_batch) {
-                    throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, ctx->cparams.n_batch));
-                }
-                ctx->output_ids[id] = i;
-            }
-
-            ctx->n_outputs = n_outputs;
-        }
-    }
-
-    void read_logits(struct llama_context * ctx) {
-        uint64_t logits_size;
-        read_to(&logits_size, sizeof(logits_size));
-
-        if (ctx->logits_size < logits_size) {
-            throw std::runtime_error("logits buffer too small");
-        }
-
-        if (logits_size) {
-            read_to(ctx->logits, logits_size * sizeof(float));
-        }
-    }
-
-    void read_embeddings(struct llama_context * ctx) {
-        uint64_t embeddings_size;
-        read_to(&embeddings_size, sizeof(embeddings_size));
-
-        if (ctx->embd_size < embeddings_size) {
-            throw std::runtime_error("embeddings buffer too small");
-        }
-
-        if (embeddings_size) {
-            read_to(ctx->embd, embeddings_size * sizeof(float));
-        }
-    }
-
-    bool read_kv_cache_meta(struct llama_context * ctx, uint32_t cell_count, llama_seq_id dest_seq_id = -1) {
-        struct llama_kv_cache & kv_self = ctx->kv_self;
-
-        if (dest_seq_id != -1) {
-            // single sequence
-
-            llama_kv_cache_seq_rm(kv_self, dest_seq_id, -1, -1);
-
-            llama_ubatch batch = ctx->sbatch.reserve_ubatch(cell_count, /* has_embd */ false);
-            batch.n_tokens = cell_count;
-            batch.n_seq_tokens = cell_count;
-            batch.n_seqs = 1;
-
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                llama_pos pos;
-                uint32_t n_seq_id;
-
-                read_to(&pos, sizeof(pos));
-                read_to(&n_seq_id, sizeof(n_seq_id));
-
-                if (n_seq_id != 0) {
-                    LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
-                    return false;
-                }
-
-                batch.pos[i] = pos;
-            }
-            batch.n_seq_id[0] = 1;
-            batch.seq_id[0] = &dest_seq_id;
-            if (!llama_kv_cache_find_slot(kv_self, batch)) {
-                LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
-                return false;
-            }
-
-            // DEBUG CHECK: kv_self.head should be our first cell, kv_self.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
-            // Assume that this is one contiguous block of cells
-            GGML_ASSERT(kv_self.head + cell_count <= kv_self.size);
-            GGML_ASSERT(kv_self.cells[kv_self.head].pos == batch.pos[0]);
-            GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].pos == batch.pos[cell_count - 1]);
-            GGML_ASSERT(kv_self.cells[kv_self.head].has_seq_id(dest_seq_id));
-            GGML_ASSERT(kv_self.cells[kv_self.head + cell_count - 1].has_seq_id(dest_seq_id));
-        } else {
-            // whole KV cache restore
-
-            if (cell_count > kv_self.size) {
-                LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
-                return false;
-            }
-
-            llama_kv_cache_clear(kv_self);
-
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                llama_kv_cell & cell = kv_self.cells[i];
-
-                llama_pos pos;
-                uint32_t  n_seq_id;
-
-                read_to(&pos,      sizeof(pos));
-                read_to(&n_seq_id, sizeof(n_seq_id));
-
-                cell.pos = pos;
-
-                for (uint32_t j = 0; j < n_seq_id; ++j) {
-                    llama_seq_id seq_id;
-                    read_to(&seq_id, sizeof(seq_id));
-
-                    if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
-                        LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
-                        return false;
-                    }
-
-                    cell.seq_id.insert(seq_id);
-
-                    if (kv_self.recurrent) {
-                        int32_t & tail = kv_self.cells[seq_id].tail;
-                        if (tail != -1) {
-                            LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
-                            return false;
-                        }
-                        tail = i;
-                    }
-                }
-            }
-
-            kv_self.head = 0;
-            kv_self.used = cell_count;
-        }
-
-        if (kv_self.recurrent) {
-            for (uint32_t i = 0; i < cell_count; ++i) {
-                uint32_t cell_id = kv_self.head + i;
-                // make sure the recurrent states will keep their restored state
-                kv_self.cells[cell_id].src = cell_id;
-            }
-        }
-
-        return true;
-    }
-
-    bool read_kv_cache_data(struct llama_context * ctx, uint32_t cell_count) {
-        const struct llama_hparams & hparams = ctx->model.hparams;
-        struct llama_kv_cache & kv_self = ctx->kv_self;
-        uint32_t v_trans;
-        uint32_t n_layer;
-        read_to(&v_trans, sizeof(v_trans));
-        read_to(&n_layer, sizeof(n_layer));
-
-        if (n_layer != hparams.n_layer) {
-            LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
-            return false;
-        }
-        if (cell_count > kv_self.size) {
-            LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, kv_self.size);
-            return false;
-        }
-        if (kv_self.v_trans != (bool) v_trans) {
-            LLAMA_LOG_ERROR("%s: incompatible V transposition\n", __func__);
-            return false;
-        }
-
-        // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
-        for (uint32_t il = 0; il < n_layer; ++il) {
-            const uint32_t n_embd_k_gqa = hparams.n_embd_k_gqa(il) + hparams.n_embd_k_s();
-
-            // Read type of key
-            int32_t k_type_i_ref;
-            read_to(&k_type_i_ref, sizeof(k_type_i_ref));
-            const int32_t k_type_i = (int32_t)kv_self.k_l[il]->type;
-            if (k_type_i != k_type_i_ref) {
-                LLAMA_LOG_ERROR("%s: mismatched key type (%d != %d, layer %d)\n", __func__, k_type_i, k_type_i_ref, il);
-                return false;
-            }
-
-            // Read row size of key
-            uint64_t k_size_row_ref;
-            read_to(&k_size_row_ref, sizeof(k_size_row_ref));
-            const size_t k_size_row = ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa);
-            if (k_size_row != k_size_row_ref) {
-                LLAMA_LOG_ERROR("%s: mismatched key row size (%zu != %zu, layer %d)\n", __func__, k_size_row, (size_t) k_size_row_ref, il);
-                return false;
-            }
-
-            if (cell_count) {
-                // Read and set the keys for the whole cell range
-                ggml_backend_tensor_set(kv_self.k_l[il], read(cell_count * k_size_row), kv_self.head * k_size_row, cell_count * k_size_row);
-            }
-        }
-
-        if (!kv_self.v_trans) {
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Read type of value
-                int32_t v_type_i_ref;
-                read_to(&v_type_i_ref, sizeof(v_type_i_ref));
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                if (v_type_i != v_type_i_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
-                    return false;
-                }
-
-                // Read row size of value
-                uint64_t v_size_row_ref;
-                read_to(&v_size_row_ref, sizeof(v_size_row_ref));
-                const size_t v_size_row = ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
-                if (v_size_row != v_size_row_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value row size (%zu != %zu, layer %d)\n", __func__, v_size_row, (size_t) v_size_row_ref, il);
-                    return false;
-                }
-
-                if (cell_count) {
-                    // Read and set the values for the whole cell range
-                    ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_row), kv_self.head * v_size_row, cell_count * v_size_row);
-                }
-            }
-        } else {
-            // For each layer, read the values for each cell (transposed)
-            for (uint32_t il = 0; il < n_layer; ++il) {
-                const uint32_t n_embd_v_gqa = hparams.n_embd_v_gqa(il) + hparams.n_embd_v_s();
-
-                // Read type of value
-                int32_t v_type_i_ref;
-                read_to(&v_type_i_ref, sizeof(v_type_i_ref));
-                const int32_t v_type_i = (int32_t)kv_self.v_l[il]->type;
-                if (v_type_i != v_type_i_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value type (%d != %d, layer %d)\n", __func__, v_type_i, v_type_i_ref, il);
-                    return false;
-                }
-
-                // Read element size of value
-                uint32_t v_size_el_ref;
-                read_to(&v_size_el_ref, sizeof(v_size_el_ref));
-                const size_t v_size_el = ggml_type_size(kv_self.v_l[il]->type);
-                if (v_size_el != v_size_el_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched value element size (%zu != %zu, layer %d)\n", __func__, v_size_el, (size_t) v_size_el_ref, il);
-                    return false;
-                }
-
-                // Read GQA embedding size
-                uint32_t n_embd_v_gqa_ref;
-                read_to(&n_embd_v_gqa_ref, sizeof(n_embd_v_gqa_ref));
-                if (n_embd_v_gqa != n_embd_v_gqa_ref) {
-                    LLAMA_LOG_ERROR("%s: mismatched GQA embedding size (%u != %u, layer %d)\n", __func__, n_embd_v_gqa, n_embd_v_gqa_ref, il);
-                    return false;
-                }
-
-                if (cell_count) {
-                    // For each row in the transposed matrix, read the values for the whole cell range
-                    for (uint32_t j = 0; j < n_embd_v_gqa; ++j) {
-                        const size_t dst_offset = (kv_self.head + j * kv_self.size) * v_size_el;
-                        ggml_backend_tensor_set(kv_self.v_l[il], read(cell_count * v_size_el), dst_offset, cell_count * v_size_el);
-                    }
-                }
-            }
-        }
-        return true;
-    }
-
-    void read_kv_cache(struct llama_context * ctx, llama_seq_id seq_id = -1) {
-        uint32_t cell_count;
-        read_to(&cell_count, sizeof(cell_count));
-
-        bool res = read_kv_cache_meta(ctx, cell_count, seq_id) && read_kv_cache_data(ctx, cell_count);
-
-        if (!res) {
-            if (seq_id == -1) {
-                llama_kv_cache_clear(ctx);
-            } else {
-                llama_kv_cache_seq_rm(ctx, seq_id, -1, -1);
-            }
-            throw std::runtime_error("failed to restore kv cache");
-        }
-    }
-};
-
-struct llama_data_write_dummy : llama_data_write {
-    size_t size_written = 0;
-
-    llama_data_write_dummy() {}
-
-    void write(const void * /* src */, size_t size) override {
-        size_written += size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override {
-        size_written += size;
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_write_buffer : llama_data_write {
-    uint8_t * ptr;
-    size_t buf_size = 0;
-    size_t size_written = 0;
-
-    llama_data_write_buffer(uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
-
-    void write(const void * src, size_t size) override {
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        memcpy(ptr, src, size);
-        ptr += size;
-        size_written += size;
-        buf_size -= size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        ggml_backend_tensor_get(tensor, ptr, offset, size);
-        ptr += size;
-        size_written += size;
-        buf_size -= size;
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_read_buffer : llama_data_read {
-    const uint8_t * ptr;
-    size_t buf_size = 0;
-    size_t size_read = 0;
-
-    llama_data_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {}
-
-    const uint8_t * read(size_t size) override {
-        const uint8_t * base_ptr = ptr;
-        if (size > buf_size) {
-            throw std::runtime_error("unexpectedly reached end of buffer");
-        }
-        ptr += size;
-        size_read += size;
-        buf_size -= size;
-        return base_ptr;
-    }
-
-    void read_to(void * dst, size_t size) override {
-        memcpy(dst, read(size), size);
-    }
-
-    size_t get_size_read() override {
-        return size_read;
-    }
-};
-
-struct llama_data_write_file : llama_data_write {
-    llama_file * file;
-    size_t size_written = 0;
-    std::vector temp_buffer;
-
-    llama_data_write_file(llama_file * f) : file(f) {}
-
-    void write(const void * src, size_t size) override {
-        file->write_raw(src, size);
-        size_written += size;
-    }
-
-    void write_tensor_data(const struct ggml_tensor * tensor, size_t offset, size_t size) override {
-        temp_buffer.resize(size);
-        ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size);
-        write(temp_buffer.data(), temp_buffer.size());
-    }
-
-    size_t get_size_written() override {
-        return size_written;
-    }
-};
-
-struct llama_data_read_file : llama_data_read {
-    llama_file * file;
-    size_t size_read = 0;
-    std::vector temp_buffer;
-
-    llama_data_read_file(llama_file * f) : file(f) {}
-
-    void read_to(void * dst, size_t size) override {
-        file->read_raw(dst, size);
-        size_read += size;
-    }
-
-    const uint8_t * read(size_t size) override {
-        temp_buffer.resize(size);
-        read_to(temp_buffer.data(), size);
-        return temp_buffer.data();
-    }
-
-    size_t get_size_read() override {
-        return size_read;
-    }
-};
-
-/** copy state data into either a buffer or file depending on the passed in context
- *
- * file context:
- * llama_file file("/path", "wb");
- * llama_data_write_file data_ctx(&file);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
- * buffer context:
- * std::vector buf(max_size, 0);
- * llama_data_write_buffer data_ctx(buf.data(), max_size);
- * llama_state_get_data_internal(ctx, data_ctx);
- *
-*/
-static size_t llama_state_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx) {
-    llama_synchronize(ctx);
-
-    data_ctx.write_model_info(ctx);
-
-    // copy outputs
-    data_ctx.write_output_ids(ctx);
-    data_ctx.write_logits(ctx);
-    data_ctx.write_embeddings(ctx);
-
-    data_ctx.write_kv_cache(ctx);
-
-    return data_ctx.get_size_written();
-}
-
-size_t llama_state_get_data(struct llama_context * ctx, uint8_t * dst, size_t size) {
-    llama_data_write_buffer data_ctx(dst, size);
-    try {
-        return llama_state_get_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-// Returns the *actual* size of the state.
-// Intended to be used when saving to state to a buffer.
-size_t llama_state_get_size(struct llama_context * ctx) {
-    llama_data_write_dummy data_ctx;
-    try {
-        return llama_state_get_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx) {
-    llama_synchronize(ctx);
-
-    data_ctx.read_model_info(ctx);
-
-    // set outputs
-    data_ctx.read_output_ids(ctx);
-    data_ctx.read_logits(ctx);
-    data_ctx.read_embeddings(ctx);
-
-    data_ctx.read_kv_cache(ctx);
-
-    return data_ctx.get_size_read();
-}
-
-// Sets the state reading from the specified source address
-size_t llama_state_set_data(struct llama_context * ctx, const uint8_t * src, size_t size) {
-    llama_data_read_buffer data_ctx(src, size);
-    try {
-        return llama_state_set_data_internal(ctx, data_ctx);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static bool llama_state_load_file_internal(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    llama_file file(path_session, "rb");
-
-    // sanity checks
-    {
-        const uint32_t magic   = file.read_u32();
-        const uint32_t version = file.read_u32();
-
-        if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) {
-            LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version);
-            return false;
-        }
-    }
-
-    // load the prompt
-    {
-        const uint32_t n_token_count = file.read_u32();
-
-        if (n_token_count > n_token_capacity) {
-            LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
-            return false;
-        }
-
-        file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
-        *n_token_count_out = n_token_count;
-    }
-
-    // restore the context state
-    {
-        const size_t n_state_size_cur = file.size - file.tell();
-
-        llama_data_read_file data_ctx(&file);
-        const size_t n_read = llama_state_set_data_internal(ctx, data_ctx);
-
-        if (n_read != n_state_size_cur) {
-            LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read);
-            return false;
-        }
-    }
-    return true;
-}
-
-bool llama_state_load_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    try {
-        return llama_state_load_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what());
-        return false;
-    }
-}
-
-static bool llama_state_save_file_internal(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    llama_file file(path_session, "wb");
-
-    file.write_u32(LLAMA_SESSION_MAGIC);
-    file.write_u32(LLAMA_SESSION_VERSION);
-
-    // save the prompt
-    file.write_u32((uint32_t) n_token_count);
-    file.write_raw(tokens, sizeof(llama_token) * n_token_count);
-
-    // save the context state using stream saving
-    llama_data_write_file data_ctx(&file);
-    llama_state_get_data_internal(ctx, data_ctx);
-
-    return true;
-}
-
-bool llama_state_save_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) {
-    try {
-        return llama_state_save_file_internal(ctx, path_session, tokens, n_token_count);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what());
-        return false;
-    }
-}
-
-static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llama_data_write & data_ctx, llama_seq_id seq_id) {
-    llama_synchronize(ctx);
-
-    data_ctx.write_kv_cache(ctx, seq_id);
-
-    return data_ctx.get_size_written();
-}
-
-size_t llama_state_seq_get_size(struct llama_context * ctx, llama_seq_id seq_id) {
-    llama_data_write_dummy data_ctx;
-    return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-}
-
-size_t llama_state_seq_get_data(struct llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) {
-    llama_data_write_buffer data_ctx(dst, size);
-    try {
-        return llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving sequence state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_seq_set_data_internal(struct llama_context * ctx, llama_data_read & data_ctx, llama_seq_id dest_seq_id) {
-    llama_synchronize(ctx);
-
-    data_ctx.read_kv_cache(ctx, dest_seq_id);
-
-    return data_ctx.get_size_read();
-}
-
-size_t llama_state_seq_set_data(struct llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id dest_seq_id) {
-    llama_data_read_buffer data_ctx(src, size);
-    try {
-        return llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading sequence state: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-static size_t llama_state_seq_save_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
-    llama_file file(filepath, "wb");
-
-    file.write_u32(LLAMA_STATE_SEQ_MAGIC);
-    file.write_u32(LLAMA_STATE_SEQ_VERSION);
-
-    // save the prompt
-    file.write_u32((uint32_t) n_token_count);
-    file.write_raw(tokens, sizeof(llama_token) * n_token_count);
-
-    // save the context state using stream saving
-    llama_data_write_file data_ctx(&file);
-    llama_state_seq_get_data_internal(ctx, data_ctx, seq_id);
-
-    const size_t res = file.tell();
-    GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + data_ctx.get_size_written());
-    return res;
-}
-
-static size_t llama_state_seq_load_file_internal(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    llama_file file(filepath, "rb");
-
-    // version checks
-    {
-        const uint32_t magic   = file.read_u32();
-        const uint32_t version = file.read_u32();
-
-        if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) {
-            LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version);
-            return 0;
-        }
-    }
-
-    // load the prompt
-    {
-        const uint32_t n_token_count = file.read_u32();
-
-        if (n_token_count > n_token_capacity) {
-            LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity);
-            return 0;
-        }
-
-        file.read_raw(tokens_out, sizeof(llama_token) * n_token_count);
-        *n_token_count_out = n_token_count;
-    }
-
-    // restore the context state
-    {
-        const size_t state_size = file.size - file.tell();
-        llama_data_read_file data_ctx(&file);
-        const size_t nread = llama_state_seq_set_data_internal(ctx, data_ctx, dest_seq_id);
-        if (!nread) {
-            LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__);
-            return 0;
-        }
-        GGML_ASSERT(nread <= state_size);
-        GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell());
-    }
-
-    return file.tell();
-}
-
-size_t llama_state_seq_save_file(struct llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) {
-    try {
-        return llama_state_seq_save_file_internal(ctx, filepath, seq_id, tokens, n_token_count);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-size_t llama_state_seq_load_file(struct llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) {
-    try {
-        return llama_state_seq_load_file_internal(ctx, filepath, dest_seq_id, tokens_out, n_token_capacity, n_token_count_out);
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what());
-        return 0;
-    }
-}
-
-void llama_set_n_threads(struct llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) {
-    ctx->cparams.n_threads       = n_threads;
-    ctx->cparams.n_threads_batch = n_threads_batch;
-}
-
-int32_t llama_n_threads(struct llama_context * ctx) {
-    return ctx->cparams.n_threads;
-}
-
-int32_t llama_n_threads_batch(struct llama_context * ctx) {
-    return ctx->cparams.n_threads_batch;
-}
-
-void llama_set_abort_callback(struct llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) {
-    ctx->abort_callback      = abort_callback;
-    ctx->abort_callback_data = abort_callback_data;
-
-    for (auto & backend : ctx->backends) {
-        auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get()));
-        auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback");
-        if (set_abort_callback_fn) {
-            set_abort_callback_fn(backend.get(), ctx->abort_callback, ctx->abort_callback_data);
-        }
-    }
-}
-
-void llama_set_embeddings(struct llama_context * ctx, bool embeddings) {
-    ctx->cparams.embeddings = embeddings;
-}
-
-void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
-    ctx->cparams.causal_attn = causal_attn;
-}
-
-struct llama_batch llama_batch_get_one(
-             llama_token * tokens,
-                 int32_t   n_tokens) {
-    return {
-        /*n_tokens       =*/ n_tokens,
-        /*tokens         =*/ tokens,
-        /*embd           =*/ nullptr,
-        /*pos            =*/ nullptr,
-        /*n_seq_id       =*/ nullptr,
-        /*seq_id         =*/ nullptr,
-        /*logits         =*/ nullptr,
-    };
-}
-
-struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_t n_seq_max) {
-    llama_batch batch = {
-        /*n_tokens       =*/ 0,
-        /*tokens         =*/ nullptr,
-        /*embd           =*/ nullptr,
-        /*pos            =*/ nullptr,
-        /*n_seq_id       =*/ nullptr,
-        /*seq_id         =*/ nullptr,
-        /*logits         =*/ nullptr,
-    };
-
-    if (embd) {
-        batch.embd = (float *) malloc(sizeof(float) * n_tokens_alloc * embd);
-    } else {
-        batch.token = (llama_token *) malloc(sizeof(llama_token) * n_tokens_alloc);
-    }
-
-    batch.pos      = (llama_pos *)     malloc(sizeof(llama_pos)      * n_tokens_alloc);
-    batch.n_seq_id = (int32_t *)       malloc(sizeof(int32_t)        * n_tokens_alloc);
-    batch.seq_id   = (llama_seq_id **) malloc(sizeof(llama_seq_id *) * (n_tokens_alloc + 1));
-    for (int i = 0; i < n_tokens_alloc; ++i) {
-        batch.seq_id[i] = (llama_seq_id *) malloc(sizeof(llama_seq_id) * n_seq_max);
-    }
-    batch.seq_id[n_tokens_alloc] = nullptr;
-
-    batch.logits   = (int8_t *)        malloc(sizeof(int8_t)         * n_tokens_alloc);
-
-    return batch;
-}
-
-void llama_batch_free(struct llama_batch batch) {
-    if (batch.token)    free(batch.token);
-    if (batch.embd)     free(batch.embd);
-    if (batch.pos)      free(batch.pos);
-    if (batch.n_seq_id) free(batch.n_seq_id);
-    if (batch.seq_id) {
-        for (int i = 0; batch.seq_id[i] != nullptr; ++i) {
-            free(batch.seq_id[i]);
-        }
-        free(batch.seq_id);
-    }
-    if (batch.logits)   free(batch.logits);
-}
+///
 
 int32_t llama_encode(
         struct llama_context * ctx,
@@ -22566,150 +12022,12 @@ int32_t llama_decode(
     return ret;
 }
 
-void llama_synchronize(struct llama_context * ctx) {
-    ggml_backend_sched_synchronize(ctx->sched.get());
-
-    // FIXME: if multiple single tokens are evaluated without a synchronization,
-    // the stats will be added to the prompt evaluation stats
-    // this should only happen when using batch size 1 to evaluate a batch
-
-    // add the evaluation to the stats
-    if (ctx->n_queued_tokens == 1) {
-        if (!ctx->cparams.no_perf) {
-            ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
-        }
-        ctx->n_eval++;
-    } else if (ctx->n_queued_tokens > 1) {
-        if (!ctx->cparams.no_perf) {
-            ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
-        }
-        ctx->n_p_eval += ctx->n_queued_tokens;
-    }
-
-    // get a more accurate load time, upon first eval
-    if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
-        ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
-        ctx->has_evaluated_once = true;
-    }
-
-    ctx->n_queued_tokens = 0;
-    ctx->t_compute_start_us = 0;
-}
-
-float * llama_get_logits(struct llama_context * ctx) {
-    llama_synchronize(ctx);
-
-    // reorder logits for backward compatibility
-    // TODO: maybe deprecate this
-    llama_output_reorder(ctx);
-
-    return ctx->logits;
-}
-
-float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
-    int32_t j = -1;
-    llama_synchronize(ctx);
-
-    try {
-        if (ctx->logits == nullptr) {
-            throw std::runtime_error("no logits");
-        }
-
-        if (i < 0) {
-            j = ctx->n_outputs + i;
-            if (j < 0) {
-                throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
-            }
-        } else if ((size_t) i >= ctx->output_ids.size()) {
-            throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size()));
-        } else {
-            j = ctx->output_ids[i];
-        }
-
-        if (j < 0) {
-            throw std::runtime_error(format("batch.logits[%d] != true", i));
-        }
-        if (j >= ctx->n_outputs) {
-            // This should not happen
-            throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
-        }
-
-        return ctx->logits + j*ctx->model.hparams.n_vocab;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what());
-#ifndef NDEBUG
-        GGML_ABORT("fatal error");
-#else
-        return nullptr;
-#endif
-    }
-}
-
-float * llama_get_embeddings(struct llama_context * ctx) {
-    llama_synchronize(ctx);
-
-    // reorder embeddings for backward compatibility
-    // TODO: maybe deprecate this
-    llama_output_reorder(ctx);
-
-    return ctx->embd;
-}
-
-float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
-    int32_t j = -1;
-
-    llama_synchronize(ctx);
-
-    try {
-        if (ctx->embd == nullptr) {
-            throw std::runtime_error("no embeddings");
-        }
-
-        if (i < 0) {
-            j = ctx->n_outputs + i;
-            if (j < 0) {
-                throw std::runtime_error(format("negative index out of range [0, %d)", ctx->n_outputs));
-            }
-        } else if ((size_t) i >= ctx->output_ids.size()) {
-            throw std::runtime_error(format("out of range [0, %zu)", ctx->output_ids.size()));
-        } else {
-            j = ctx->output_ids[i];
-        }
-
-        if (j < 0) {
-            throw std::runtime_error(format("batch.logits[%d] != true", i));
-        }
-        if (j >= ctx->n_outputs) {
-            // This should not happen
-            throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs));
-        }
-
-        return ctx->embd + j*ctx->model.hparams.n_embd;
-    } catch (const std::exception & err) {
-        LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what());
-#ifndef NDEBUG
-        GGML_ABORT("fatal error");
-#else
-        return nullptr;
-#endif
-    }
-}
-
-float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
-    llama_synchronize(ctx);
-
-    auto it = ctx->embd_seq.find(seq_id);
-    if (it == ctx->embd_seq.end()) {
-        return nullptr;
-    }
-
-    return it->second.data();
-}
-
 //
 // vocab
 //
 
+// TODO: tmp bridges below until `struct llama_vocab` is exposed through the public API
+
 const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
     return llama_token_get_text_impl(model->vocab, token);
 }
@@ -22842,478 +12160,6 @@ int32_t llama_detokenize(
 // chat templates
 //
 
-static llm_chat_template llama_chat_detect_template(const std::string & tmpl) {
-    if (LLM_CHAT_TEMPLATES.find(tmpl) != LLM_CHAT_TEMPLATES.end()) {
-        return LLM_CHAT_TEMPLATES.at(tmpl);
-    }
-    auto tmpl_contains = [&tmpl](const char * haystack) -> bool {
-        return tmpl.find(haystack) != std::string::npos;
-    };
-    if (tmpl_contains("<|im_start|>")) {
-        return LLM_CHAT_TEMPLATE_CHATML;
-    } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) {
-        if (tmpl_contains("[SYSTEM_PROMPT]")) {
-            return LLM_CHAT_TEMPLATE_MISTRAL_V7;
-        } else if (
-            // catches official 'v1' template
-            tmpl_contains("' [INST] ' + system_message")
-            // catches official 'v3' and 'v3-tekken' templates
-            || tmpl_contains("[AVAILABLE_TOOLS]")
-        ) {
-            // Official mistral 'v1', 'v3' and 'v3-tekken' templates
-            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
-            // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
-            if (tmpl_contains(" [INST]")) {
-                return LLM_CHAT_TEMPLATE_MISTRAL_V1;
-            } else if (tmpl_contains("\"[INST]\"")) {
-                return LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN;
-            }
-            return LLM_CHAT_TEMPLATE_MISTRAL_V3;
-        } else {
-            // llama2 template and its variants
-            // [variant] support system message
-            // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
-            bool support_system_message = tmpl_contains("<>");
-            bool add_bos_inside_history = tmpl_contains("bos_token + '[INST]");
-            bool strip_message = tmpl_contains("content.strip()");
-            if (strip_message) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
-            } else if (add_bos_inside_history) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
-            } else if (support_system_message) {
-                return LLM_CHAT_TEMPLATE_LLAMA_2_SYS;
-            } else {
-                return LLM_CHAT_TEMPLATE_LLAMA_2;
-            }
-        }
-    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) {
-        return LLM_CHAT_TEMPLATE_PHI_3;
-    } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) {
-        return LLM_CHAT_TEMPLATE_FALCON_3;
-    } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) {
-        return LLM_CHAT_TEMPLATE_ZEPHYR;
-    } else if (tmpl_contains("bos_token + message['role']")) {
-        return LLM_CHAT_TEMPLATE_MONARCH;
-    } else if (tmpl_contains("")) {
-        return LLM_CHAT_TEMPLATE_GEMMA;
-    } else if (tmpl_contains("'\\n\\nAssistant: ' + eos_token")) {
-        // OrionStarAI/Orion-14B-Chat
-        return LLM_CHAT_TEMPLATE_ORION;
-    } else if (tmpl_contains("GPT4 Correct ")) {
-        // openchat/openchat-3.5-0106
-        return LLM_CHAT_TEMPLATE_OPENCHAT;
-    } else if (tmpl_contains("USER: ") && tmpl_contains("ASSISTANT: ")) {
-        // eachadea/vicuna-13b-1.1 (and Orca variant)
-        if (tmpl_contains("SYSTEM: ")) {
-            return LLM_CHAT_TEMPLATE_VICUNA_ORCA;
-        }
-        return LLM_CHAT_TEMPLATE_VICUNA;
-    } else if (tmpl_contains("### Instruction:") && tmpl_contains("<|EOT|>")) {
-        // deepseek-ai/deepseek-coder-33b-instruct
-        return LLM_CHAT_TEMPLATE_DEEPSEEK;
-    } else if (tmpl_contains("<|START_OF_TURN_TOKEN|>") && tmpl_contains("<|USER_TOKEN|>")) {
-        // CohereForAI/c4ai-command-r-plus
-        return LLM_CHAT_TEMPLATE_COMMAND_R;
-    } else if (tmpl_contains("<|start_header_id|>") && tmpl_contains("<|end_header_id|>")) {
-        return LLM_CHAT_TEMPLATE_LLAMA_3;
-    } else if (tmpl_contains("[gMASK]sop")) {
-        // chatglm3-6b
-        return LLM_CHAT_TEMPLATE_CHATGML_3;
-    } else if (tmpl_contains("[gMASK]")) {
-        return LLM_CHAT_TEMPLATE_CHATGML_4;
-    } else if (tmpl_contains(LU8("<用户>"))) {
-        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
-        return LLM_CHAT_TEMPLATE_MINICPM;
-    } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
-        return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
-    } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
-        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
-        // EXAONE-3.0-7.8B-Instruct
-        return LLM_CHAT_TEMPLATE_EXAONE_3;
-    } else if (tmpl_contains("rwkv-world")) {
-        return LLM_CHAT_TEMPLATE_RWKV_WORLD;
-    } else if (tmpl_contains("<|start_of_role|>")) {
-        return LLM_CHAT_TEMPLATE_GRANITE;
-    } else if (tmpl_contains("message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1]")) {
-        return LLM_CHAT_TEMPLATE_GIGACHAT;
-    } else if (tmpl_contains("<|role_start|>")) {
-        return LLM_CHAT_TEMPLATE_MEGREZ;
-    }
-    return LLM_CHAT_TEMPLATE_UNKNOWN;
-}
-
-// Simple version of "llama_apply_chat_template" that only works with strings
-// This function uses heuristic checks to determine commonly used template. It is not a jinja parser.
-static int32_t llama_chat_apply_template_internal(
-    const llm_chat_template tmpl,
-    const std::vector & chat,
-    std::string & dest, bool add_ass) {
-    // Taken from the research: https://github.com/ggerganov/llama.cpp/issues/5527
-    std::stringstream ss;
-    if (tmpl == LLM_CHAT_TEMPLATE_CHATML) {
-        // chatml template
-        for (auto message : chat) {
-            ss << "<|im_start|>" << message->role << "\n" << message->content << "<|im_end|>\n";
-        }
-        if (add_ass) {
-            ss << "<|im_start|>assistant\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V7) {
-        // Official mistral 'v7' template
-        // See: https://huggingface.co/mistralai/Mistral-Large-Instruct-2411#basic-instruct-template-v7
-        for (auto message : chat) {
-            std::string role(message->role);
-            std::string content(message->content);
-            if (role == "system") {
-                ss << "[SYSTEM_PROMPT] " << content << "[/SYSTEM_PROMPT]";
-            } else if (role == "user") {
-                ss << "[INST] " << content << "[/INST]";
-            }
-            else {
-                ss << " " << content << "";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1
-            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3
-            || tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN) {
-        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/chat_templates.md
-        // See: https://github.com/mistralai/cookbook/blob/main/concept-deep-dive/tokenization/templates.md
-        std::string leading_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V1 ? " " : "";
-        std::string trailing_space = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN ? "" : " ";
-        bool trim_assistant_message = tmpl == LLM_CHAT_TEMPLATE_MISTRAL_V3;
-        bool is_inside_turn = false;
-        for (auto message : chat) {
-            if (!is_inside_turn) {
-                ss << leading_space << "[INST]" << trailing_space;
-                is_inside_turn = true;
-            }
-            std::string role(message->role);
-            std::string content(message->content);
-            if (role == "system") {
-                ss << content << "\n\n";
-            } else if (role == "user") {
-                ss << content << leading_space << "[/INST]";
-            } else {
-                ss << trailing_space << (trim_assistant_message ? trim(content) : content) << "";
-                is_inside_turn = false;
-            }
-        }
-    } else if (
-            tmpl == LLM_CHAT_TEMPLATE_LLAMA_2
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS
-            || tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP) {
-        // llama2 template and its variants
-        // [variant] support system message
-        // See: https://huggingface.co/blog/llama2#how-to-prompt-llama-2
-        bool support_system_message = tmpl != LLM_CHAT_TEMPLATE_LLAMA_2;
-        // [variant] add BOS inside history
-        bool add_bos_inside_history = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS;
-        // [variant] trim spaces from the input message
-        bool strip_message = tmpl == LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP;
-        // construct the prompt
-        bool is_inside_turn = true; // skip BOS at the beginning
-        ss << "[INST] ";
-        for (auto message : chat) {
-            std::string content = strip_message ? trim(message->content) : message->content;
-            std::string role(message->role);
-            if (!is_inside_turn) {
-                is_inside_turn = true;
-                ss << (add_bos_inside_history ? "[INST] " : "[INST] ");
-            }
-            if (role == "system") {
-                if (support_system_message) {
-                    ss << "<>\n" << content << "\n<>\n\n";
-                } else {
-                    // if the model does not support system message, we still include it in the first message, but without <>
-                    ss << content << "\n";
-                }
-            } else if (role == "user") {
-                ss << content << " [/INST]";
-            } else {
-                ss << content << "";
-                is_inside_turn = false;
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_3) {
-        // Phi 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>\n" << message->content << "<|end|>\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) {
-        // Falcon 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>\n" << message->content << "\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_ZEPHYR) {
-        // zephyr template
-        for (auto message : chat) {
-            ss << "<|" << message->role << "|>" << "\n" << message->content << "<|endoftext|>\n";
-        }
-        if (add_ass) {
-            ss << "<|assistant|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MONARCH) {
-        // mlabonne/AlphaMonarch-7B template (the  is included inside history)
-        for (auto message : chat) {
-            std::string bos = (message == chat.front()) ? "" : ""; // skip BOS for first message
-            ss << bos << message->role << "\n" << message->content << "\n";
-        }
-        if (add_ass) {
-            ss << "assistant\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GEMMA) {
-        // google/gemma-7b-it
-        std::string system_prompt = "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // there is no system message for gemma, but we will merge it with user prompt, so nothing is broken
-                system_prompt = trim(message->content);
-                continue;
-            }
-            // in gemma, "assistant" is "model"
-            role = role == "assistant" ? "model" : message->role;
-            ss << "" << role << "\n";
-            if (!system_prompt.empty() && role != "model") {
-                ss << system_prompt << "\n\n";
-                system_prompt = "";
-            }
-            ss << trim(message->content) << "\n";
-        }
-        if (add_ass) {
-            ss << "model\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_ORION) {
-        // OrionStarAI/Orion-14B-Chat
-        std::string system_prompt = "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // there is no system message support, we will merge it with user prompt
-                system_prompt = message->content;
-                continue;
-            } else if (role == "user") {
-                ss << "Human: ";
-                if (!system_prompt.empty()) {
-                    ss << system_prompt << "\n\n";
-                    system_prompt = "";
-                }
-                ss << message->content << "\n\nAssistant: ";
-            } else {
-                ss << message->content << "";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_OPENCHAT) {
-        // openchat/openchat-3.5-0106,
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content << "<|end_of_turn|>";
-            } else {
-                role[0] = toupper(role[0]);
-                ss << "GPT4 Correct " << role << ": " << message->content << "<|end_of_turn|>";
-            }
-        }
-        if (add_ass) {
-            ss << "GPT4 Correct Assistant:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_VICUNA || tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
-        // eachadea/vicuna-13b-1.1 (and Orca variant)
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                // Orca-Vicuna variant uses a system prefix
-                if (tmpl == LLM_CHAT_TEMPLATE_VICUNA_ORCA) {
-                    ss << "SYSTEM: " << message->content << "\n";
-                } else {
-                    ss << message->content << "\n\n";
-                }
-            } else if (role == "user") {
-                ss << "USER: " << message->content << "\n";
-            } else if (role == "assistant") {
-                ss << "ASSISTANT: " << message->content << "\n";
-            }
-        }
-        if (add_ass) {
-            ss << "ASSISTANT:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK) {
-        // deepseek-ai/deepseek-coder-33b-instruct
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content;
-            } else if (role == "user") {
-                ss << "### Instruction:\n" << message->content << "\n";
-            } else if (role == "assistant") {
-                ss << "### Response:\n" << message->content << "\n<|EOT|>\n";
-            }
-        }
-        if (add_ass) {
-            ss << "### Response:\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_COMMAND_R) {
-        // CohereForAI/c4ai-command-r-plus
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            } else if (role == "user") {
-                ss << "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            } else if (role == "assistant") {
-                ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" << trim(message->content) << "<|END_OF_TURN_TOKEN|>";
-            }
-        }
-        if (add_ass) {
-            ss << "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_LLAMA_3) {
-        // Llama 3
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|start_header_id|>" << role << "<|end_header_id|>\n\n" << trim(message->content) << "<|eot_id|>";
-        }
-        if (add_ass) {
-            ss << "<|start_header_id|>assistant<|end_header_id|>\n\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_3) {
-        // chatglm3-6b
-        ss << "[gMASK]" << "sop";
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>" << "\n " << message->content;
-        }
-        if (add_ass) {
-            ss << "<|assistant|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_CHATGML_4) {
-        ss << "[gMASK]" << "";
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|" << role << "|>" << "\n" << message->content;
-        }
-        if (add_ass) {
-            ss << "<|assistant|>";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) {
-        // MiniCPM-3B-OpenHermes-2.5-v2-GGUF
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "user") {
-                ss << LU8("<用户>");
-                ss << trim(message->content);
-                ss << "";
-            } else {
-                ss << trim(message->content);
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_2) {
-        // DeepSeek-V2
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << message->content << "\n\n";
-            } else if (role == "user") {
-                ss << "User: " << message->content << "\n\n";
-            } else if (role == "assistant") {
-                ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
-            }
-        }
-        if (add_ass) {
-            ss << "Assistant:";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
-        // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
-        // EXAONE-3.0-7.8B-Instruct
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "system") {
-                ss << "[|system|]" << trim(message->content) << "[|endofturn|]\n";
-            } else if (role == "user") {
-                ss << "[|user|]" << trim(message->content) << "\n";
-            } else if (role == "assistant") {
-                ss << "[|assistant|]" << trim(message->content) << "[|endofturn|]\n";
-            }
-        }
-        if (add_ass) {
-            ss << "[|assistant|]";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_RWKV_WORLD) {
-        // this template requires the model to have "\n\n" as EOT token
-        for (auto message : chat) {
-            std::string role(message->role);
-            if (role == "user") {
-                ss << "User: " << message->content << "\n\nAssistant:";
-            } else {
-                ss << message->content << "\n\n";
-            }
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GRANITE) {
-        // IBM Granite template
-        for (const auto & message : chat) {
-            std::string role(message->role);
-            ss << "<|start_of_role|>" << role << "<|end_of_role|>";
-            if (role == "assistant_tool_call") {
-                ss << "<|tool_call|>";
-            }
-            ss << message->content << "<|end_of_text|>\n";
-        }
-        if (add_ass) {
-            ss << "<|start_of_role|>assistant<|end_of_role|>\n";
-        }
-    } else if (tmpl == LLM_CHAT_TEMPLATE_GIGACHAT) {
-        // GigaChat template
-        bool has_system = !chat.empty() && std::string(chat[0]->role) == "system";
-
-        // Handle system message if present
-        if (has_system) {
-            ss << "" << chat[0]->content << "<|message_sep|>";
-        } else {
-            ss << "";
-        }
-
-        // Process remaining messages
-        for (size_t i = has_system ? 1 : 0; i < chat.size(); i++) {
-            std::string role(chat[i]->role);
-            if (role == "user") {
-                ss << "user<|role_sep|>" << chat[i]->content << "<|message_sep|>"
-                << "available functions<|role_sep|>[]<|message_sep|>";
-            } else if (role == "assistant") {
-                ss << "assistant<|role_sep|>" << chat[i]->content << "<|message_sep|>";
-            }
-        }
-
-        // Add generation prompt if needed
-        if (add_ass) {
-            ss << "assistant<|role_sep|>";
-        }
-    }  else if (tmpl == LLM_CHAT_TEMPLATE_MEGREZ) {
-        // Megrez template
-        for (auto message : chat) {
-            std::string role(message->role);
-            ss << "<|role_start|>" << role << "<|role_end|>" << message->content << "<|turn_end|>";
-        }
-
-        if (add_ass) {
-            ss << "<|role_start|>assistant<|role_end|>";
-        }
-    } else {
-        // template not supported
-        return -1;
-    }
-    dest = ss.str();
-    return dest.size();
-}
-
 int32_t llama_chat_apply_template(
                 const struct llama_model * model,
                               const char * tmpl,
@@ -23333,7 +12179,7 @@ int32_t llama_chat_apply_template(
         }
         else {
             // worst case: there is no information about template, we will use chatml by default
-            curr_tmpl = "chatml";  // see llama_chat_apply_template_internal
+            curr_tmpl = "chatml";  // see llm_chat_apply_template
         }
     }
 
@@ -23345,11 +12191,11 @@ int32_t llama_chat_apply_template(
     }
 
     std::string formatted_chat;
-    llm_chat_template detected_tmpl = llama_chat_detect_template(curr_tmpl);
+    llm_chat_template detected_tmpl = llm_chat_detect_template(curr_tmpl);
     if (detected_tmpl == LLM_CHAT_TEMPLATE_UNKNOWN) {
         return -1;
     }
-    int32_t res = llama_chat_apply_template_internal(detected_tmpl, chat_vec, formatted_chat, add_ass);
+    int32_t res = llm_chat_apply_template(detected_tmpl, chat_vec, formatted_chat, add_ass);
     if (res < 0) {
         return res;
     }
@@ -23359,15 +12205,6 @@ int32_t llama_chat_apply_template(
     return res;
 }
 
-int32_t llama_chat_builtin_templates(const char ** output, size_t len) {
-    auto it = LLM_CHAT_TEMPLATES.begin();
-    for (size_t i = 0; i < std::min(len, LLM_CHAT_TEMPLATES.size()); i++) {
-        output[i] = it->first.c_str();
-        std::advance(it, 1);
-    }
-    return (int32_t) LLM_CHAT_TEMPLATES.size();
-}
-
 //
 // sampling
 //
@@ -23435,6 +12272,10 @@ const char * llama_print_system_info(void) {
     return s.c_str();
 }
 
+//
+// perf
+//
+
 struct llama_perf_context_data llama_perf_context(const struct llama_context * ctx) {
     struct llama_perf_context_data data = {};
 
@@ -23470,47 +12311,3 @@ void llama_perf_context_reset(struct llama_context * ctx) {
     ctx->t_eval_us   = ctx->n_eval = 0;
     ctx->t_p_eval_us = ctx->n_p_eval = 0;
 }
-
-// For internal test use
-const std::vector> & llama_internal_get_tensor_map(
-    struct llama_context * ctx
-) {
-    return ctx->model.tensors_by_name;
-}
-
-void llama_log_set(ggml_log_callback log_callback, void * user_data) {
-    ggml_log_set(log_callback, user_data);
-    g_logger_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
-    g_logger_state.log_callback_user_data = user_data;
-}
-
-static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {
-    va_list args_copy;
-    va_copy(args_copy, args);
-    char buffer[128];
-    int len = vsnprintf(buffer, 128, format, args);
-    if (len < 128) {
-        g_logger_state.log_callback(level, buffer, g_logger_state.log_callback_user_data);
-    } else {
-        char * buffer2 = new char[len + 1];
-        vsnprintf(buffer2, len + 1, format, args_copy);
-        buffer2[len] = 0;
-        g_logger_state.log_callback(level, buffer2, g_logger_state.log_callback_user_data);
-        delete[] buffer2;
-    }
-    va_end(args_copy);
-}
-
-void llama_log_internal(ggml_log_level level, const char * format, ...) {
-    va_list args;
-    va_start(args, format);
-    llama_log_internal_v(level, format, args);
-    va_end(args);
-}
-
-void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data) {
-    (void) level;
-    (void) user_data;
-    fputs(text, stderr);
-    fflush(stderr);
-}

From e7da954eccdf39ee795a6135bdb86f0978902681 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Fri, 3 Jan 2025 11:26:14 +0200
Subject: [PATCH 011/279] metal : avoid uint (#11019)

---
 ggml/src/ggml-metal/ggml-metal.m | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
index 28f590f92..a85502ee0 100644
--- a/ggml/src/ggml-metal/ggml-metal.m
+++ b/ggml/src/ggml-metal/ggml-metal.m
@@ -2067,8 +2067,8 @@ static void ggml_metal_encode_node(
                 GGML_ASSERT(ne12 % ne02 == 0);
                 GGML_ASSERT(ne13 % ne03 == 0);
 
-                const uint r2 = ne12/ne02;
-                const uint r3 = ne13/ne03;
+                const uint32_t r2 = ne12/ne02;
+                const uint32_t r3 = ne13/ne03;
 
                 // find the break-even point where the matrix-matrix kernel becomes more efficient compared
                 // to the matrix-vector kernel

From 4b0c638b9a68f577cb2066b638c9f622d91ee661 Mon Sep 17 00:00:00 2001
From: Molly Sophia 
Date: Fri, 3 Jan 2025 20:13:18 +0800
Subject: [PATCH 012/279] common : disable KV cache shifting automatically for
 unsupported models (#11053)

* Disable KV cache shifting automatically for unsupported models

instead of exiting directly

Signed-off-by: Molly Sophia 

* Update common/common.cpp

Co-authored-by: Georgi Gerganov 

---------

Signed-off-by: Molly Sophia 
Co-authored-by: Georgi Gerganov 
---
 common/common.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 3e37039ca..4bb140ee2 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -889,9 +889,8 @@ struct common_init_result common_init_from_params(common_params & params) {
     }
 
     if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) {
-        LOG_ERR("%s: KV cache shifting is not supported for this model (--no-context-shift to disable)'\n", __func__);
-        llama_free_model(model);
-        return iparams;
+        LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__);
+        params.ctx_shift = false;
     }
 
     if (!params.control_vectors.empty()) {

From c31fc8b966817b2f0b277fd28e04a189e388972a Mon Sep 17 00:00:00 2001
From: "Gilad S." <7817232+giladgd@users.noreply.github.com>
Date: Sat, 4 Jan 2025 10:17:31 +0200
Subject: [PATCH 013/279] fix: Vulkan shader gen binary path (#11037)

---
 ggml/src/ggml-vulkan/CMakeLists.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 6d46e5f24..9501de736 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -73,7 +73,7 @@ if (Vulkan_FOUND)
         OUTPUT ${_ggml_vk_header}
                 ${_ggml_vk_source}
 
-        COMMAND ${_ggml_vk_genshaders_cmd}
+        COMMAND "$/${_ggml_vk_genshaders_cmd}"
             --glslc      ${Vulkan_GLSLC_EXECUTABLE}
             --input-dir  ${_ggml_vk_input_dir}
             --output-dir ${_ggml_vk_output_dir}

From db68c93b57bfdf6da1fbdae81080382d6998cbc9 Mon Sep 17 00:00:00 2001
From: Daniel Bevenius 
Date: Thu, 19 Dec 2024 03:50:12 +0100
Subject: [PATCH 014/279] ggml : improve inputs log sched_print_assignments
 (ggml/1053)

This commit attempts to improve the log message for the inputs of the
splits in the sched_print_assignments function.

The motivation for this change is that currently even if there are no
inputs a colon is displayed at the end of the line, which can make it a
little confusing when reading the output as it could be interpreted as
the line below are inputs when they are in fact nodes. With this change
the colon will only be printed if there actually are inputs.
---
 ggml/src/ggml-backend.cpp | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index fdb4b986f..e2d6c4056 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -795,9 +795,12 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
     for (int i = 0; i < graph->n_nodes; i++) {
         if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
             ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
-            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
+            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend),
                 sched->splits[cur_split].n_inputs);
             for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
+                if (j == 0) {
+                    GGML_LOG_DEBUG(": ");
+                }
                 GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
                     fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
             }

From 5e3b08d606b5b0caaea16541b504c3bba8f3ec1d Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Sat, 4 Jan 2025 10:53:54 +0200
Subject: [PATCH 015/279] ggml : do not install metal source when embed library
 (ggml/1054)

---
 ggml/CMakeLists.txt                | 20 --------------------
 ggml/src/ggml-metal/CMakeLists.txt | 16 ++++++++++++++++
 2 files changed, 16 insertions(+), 20 deletions(-)

diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt
index e33d97482..393506533 100644
--- a/ggml/CMakeLists.txt
+++ b/ggml/CMakeLists.txt
@@ -252,26 +252,6 @@ set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
 install(TARGETS ggml LIBRARY PUBLIC_HEADER)
 install(TARGETS ggml-base LIBRARY)
 
-# FIXME: this should be done in the backend cmake files
-if (GGML_METAL)
-    # FIXME: does this need to be installed with GGML_METAL_EMBED_LIBRARY?
-    install(
-        FILES src/ggml-metal/ggml-metal.metal
-        PERMISSIONS
-            OWNER_READ
-            OWNER_WRITE
-            GROUP_READ
-            WORLD_READ
-        DESTINATION ${CMAKE_INSTALL_BINDIR})
-
-    if (NOT GGML_METAL_EMBED_LIBRARY)
-        install(
-            FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
-            DESTINATION ${CMAKE_INSTALL_BINDIR}
-        )
-    endif()
-endif()
-
 if (GGML_STANDALONE)
     configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ggml.pc.in
         ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc
diff --git a/ggml/src/ggml-metal/CMakeLists.txt b/ggml/src/ggml-metal/CMakeLists.txt
index 1bad27206..89fcde2fa 100644
--- a/ggml/src/ggml-metal/CMakeLists.txt
+++ b/ggml/src/ggml-metal/CMakeLists.txt
@@ -103,3 +103,19 @@ else()
         DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
         )
 endif() # GGML_METAL_EMBED_LIBRARY
+
+if (NOT GGML_METAL_EMBED_LIBRARY)
+    install(
+        FILES src/ggml-metal/ggml-metal.metal
+        PERMISSIONS
+            OWNER_READ
+            OWNER_WRITE
+            GROUP_READ
+            WORLD_READ
+        DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+        install(
+            FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+            DESTINATION ${CMAKE_INSTALL_BINDIR}
+        )
+endif()

From 78c678517530d411b4263341cdb4dc28c9d117c8 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Sat, 4 Jan 2025 10:54:01 +0200
Subject: [PATCH 016/279] sync : ggml

---
 scripts/sync-ggml.last | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last
index b4ac38bbf..b67445ecd 100644
--- a/scripts/sync-ggml.last
+++ b/scripts/sync-ggml.last
@@ -1 +1 @@
-e6d93f40dffe8733d5d72f1d8fa6b3ca27ae899f
+a2af72be7baf5b1f4a33d34e77e509e5e85b7cd7

From 46be942214e295cd34660bbbd6b846155d1c36a0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?DAN=E2=84=A2?= 
Date: Sat, 4 Jan 2025 09:33:31 -0500
Subject: [PATCH 017/279] llama : add support for the cohere2 model
 architecture (#10900)

---
 convert_hf_to_gguf.py     |  18 +++++
 gguf-py/gguf/constants.py |  14 ++++
 src/llama-arch.cpp        |  16 ++++
 src/llama-arch.h          |   1 +
 src/llama-model.cpp       |  11 +++
 src/llama.cpp             | 161 ++++++++++++++++++++++++++++++++++++++
 6 files changed, 221 insertions(+)

diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index 4e6c0f60c..d4441bbe9 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -3373,6 +3373,24 @@ class CommandR2Model(Model):
         self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
 
 
+@Model.register("Cohere2ForCausalLM")
+class Cohere2Model(Model):
+    model_arch = gguf.MODEL_ARCH.COHERE2
+
+    def set_gguf_parameters(self):
+        super().set_gguf_parameters()
+
+        self.gguf_writer.add_logit_scale(self.hparams["logit_scale"])
+        self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
+        self.gguf_writer.add_vocab_size(self.hparams["vocab_size"])
+
+        rotary_pct = self.hparams["rotary_pct"]
+        hidden_size = self.hparams["hidden_size"]
+        num_attention_heads = self.hparams["num_attention_heads"]
+        self.gguf_writer.add_rope_dimension_count(int(rotary_pct * (hidden_size // num_attention_heads)))
+        self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE)
+
+
 @Model.register("OlmoForCausalLM")
 @Model.register("OLMoForCausalLM")
 class OlmoModel(Model):
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 273370370..cdf79673b 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -255,6 +255,7 @@ class MODEL_ARCH(IntEnum):
     MAMBA            = auto()
     XVERSE           = auto()
     COMMAND_R        = auto()
+    COHERE2          = auto()
     DBRX             = auto()
     OLMO             = auto()
     OLMO2            = auto()
@@ -437,6 +438,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
     MODEL_ARCH.MAMBA:            "mamba",
     MODEL_ARCH.XVERSE:           "xverse",
     MODEL_ARCH.COMMAND_R:        "command-r",
+    MODEL_ARCH.COHERE2:          "cohere2",
     MODEL_ARCH.DBRX:             "dbrx",
     MODEL_ARCH.OLMO:             "olmo",
     MODEL_ARCH.OLMO2:            "olmo2",
@@ -1136,6 +1138,18 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.ATTN_K_NORM,
         MODEL_TENSOR.ATTN_Q_NORM,
     ],
+    MODEL_ARCH.COHERE2: [
+        MODEL_TENSOR.TOKEN_EMBD,
+        MODEL_TENSOR.OUTPUT_NORM,
+        MODEL_TENSOR.ATTN_NORM,
+        MODEL_TENSOR.ATTN_Q,
+        MODEL_TENSOR.ATTN_K,
+        MODEL_TENSOR.ATTN_V,
+        MODEL_TENSOR.ATTN_OUT,
+        MODEL_TENSOR.FFN_GATE,
+        MODEL_TENSOR.FFN_DOWN,
+        MODEL_TENSOR.FFN_UP,
+    ],
     MODEL_ARCH.DBRX: [
         MODEL_TENSOR.TOKEN_EMBD,
         MODEL_TENSOR.OUTPUT_NORM,
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index a60038385..fea4b21d3 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -39,6 +39,7 @@ static const std::map LLM_ARCH_NAMES = {
     { LLM_ARCH_MAMBA,            "mamba"            },
     { LLM_ARCH_XVERSE,           "xverse"           },
     { LLM_ARCH_COMMAND_R,        "command-r"        },
+    { LLM_ARCH_COHERE2,          "cohere2"          },
     { LLM_ARCH_DBRX,             "dbrx"             },
     { LLM_ARCH_OLMO,             "olmo"             },
     { LLM_ARCH_OLMO2,            "olmo2"            },
@@ -807,6 +808,21 @@ static const std::map> LLM_TENSOR_N
             { LLM_TENSOR_ATTN_K_NORM,     "blk.%d.attn_k_norm" },
         },
     },
+    {
+        LLM_ARCH_COHERE2,
+        {
+            { LLM_TENSOR_TOKEN_EMBD,      "token_embd" },
+            { LLM_TENSOR_OUTPUT_NORM,     "output_norm" },
+            { LLM_TENSOR_ATTN_NORM,       "blk.%d.attn_norm" },
+            { LLM_TENSOR_ATTN_Q,          "blk.%d.attn_q" },
+            { LLM_TENSOR_ATTN_K,          "blk.%d.attn_k" },
+            { LLM_TENSOR_ATTN_V,          "blk.%d.attn_v" },
+            { LLM_TENSOR_ATTN_OUT,        "blk.%d.attn_output" },
+            { LLM_TENSOR_FFN_GATE,        "blk.%d.ffn_gate" },
+            { LLM_TENSOR_FFN_DOWN,        "blk.%d.ffn_down" },
+            { LLM_TENSOR_FFN_UP,          "blk.%d.ffn_up" },
+        },
+    },
     {
         LLM_ARCH_DBRX,
         {
diff --git a/src/llama-arch.h b/src/llama-arch.h
index 446e72eeb..10bd619a4 100644
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
@@ -43,6 +43,7 @@ enum llm_arch {
     LLM_ARCH_MAMBA,
     LLM_ARCH_XVERSE,
     LLM_ARCH_COMMAND_R,
+    LLM_ARCH_COHERE2,
     LLM_ARCH_DBRX,
     LLM_ARCH_OLMO,
     LLM_ARCH_OLMO2,
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index ace0ba262..c356abded 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -786,6 +786,16 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) {
                     default: model.type = e_model::MODEL_UNKNOWN;
                 }
             } break;
+        case LLM_ARCH_COHERE2:
+            {
+                ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
+                ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale);
+                ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
+                switch (hparams.n_layer) {
+                    case 32: model.type = e_model::MODEL_8B; break;
+                    default: model.type = e_model::MODEL_UNKNOWN;
+                }
+            } break;
         case LLM_ARCH_DBRX:
         {
             ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
@@ -2031,6 +2041,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
         case LLM_ARCH_MINICPM:
         case LLM_ARCH_XVERSE:
         case LLM_ARCH_COMMAND_R:
+        case LLM_ARCH_COHERE2:
         case LLM_ARCH_OLMO:
         case LLM_ARCH_ARCTIC:
         case LLM_ARCH_DEEPSEEK:
diff --git a/src/llama.cpp b/src/llama.cpp
index d7110b90b..50e9191fa 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1552,6 +1552,32 @@ static bool llm_load_tensors(
                         layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
                     }
                 } break;
+            case LLM_ARCH_COHERE2:
+                {
+                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
+
+                    // output
+                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
+                    // init output from the input tok embed
+                    model.output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
+                                                      llama_model_loader::TENSOR_DUPLICATED);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = model.layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
+                    }
+                }
+                break;
             case LLM_ARCH_OLMO:  // adapted from LLM_ARCH_LLAMA with norm params removed
                 {
                     model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
@@ -7633,6 +7659,137 @@ struct llm_build_context {
 
     }
 
+    struct ggml_cgraph * build_cohere2() {
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+
+        const int64_t n_embd_head = hparams.n_embd_head_v;
+        GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
+        const float f_logit_scale = hparams.f_logit_scale;
+
+        struct ggml_tensor * cur;
+        struct ggml_tensor * inpL;
+
+        inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);
+
+        // inp_pos - contains the positions
+        struct ggml_tensor * inp_pos = build_inp_pos();
+
+        // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
+        // cohere2 requires different mask for layers using sliding window (SWA)
+        struct ggml_tensor * KQ_mask     = build_inp_KQ_mask();
+        struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
+
+        // sliding window switch pattern
+        const int32_t sliding_window_pattern = 4;
+
+        for (int il = 0; il < n_layer; ++il) {
+            // three layers sliding window attention (window size 4096) and ROPE
+            // fourth layer uses global attention without positional embeddings
+            const bool           is_sliding = il % sliding_window_pattern < (sliding_window_pattern - 1);
+            struct ggml_tensor * KQ_mask_l = is_sliding ? KQ_mask_swa : KQ_mask;
+
+            // norm
+            cur = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, NULL, LLM_NORM, cb, il);
+            cb(cur, "attn_norm", il);
+            struct ggml_tensor * ffn_inp = cur;
+
+            // self-attention
+            {
+                // rope freq factors for 128k context
+                struct ggml_tensor * rope_factors = build_rope_factors(il);
+
+                // compute Q and K and RoPE them
+                struct ggml_tensor * Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur);
+                cb(Qcur, "Qcur", il);
+                if (model.layers[il].bq) {
+                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
+                    cb(Qcur, "Qcur", il);
+                }
+
+                struct ggml_tensor * Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur);
+                cb(Kcur, "Kcur", il);
+                if (model.layers[il].bk) {
+                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
+                    cb(Kcur, "Kcur", il);
+                }
+
+                struct ggml_tensor * Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur);
+                cb(Vcur, "Vcur", il);
+                if (model.layers[il].bv) {
+                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
+                    cb(Vcur, "Vcur", il);
+                }
+
+                if (is_sliding) {
+                    Qcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, rope_factors,
+                                        n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor, attn_factor,
+                                        beta_fast, beta_slow);
+                    cb(Qcur, "Qcur", il);
+
+                    Kcur = ggml_rope_ext(ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens), inp_pos,
+                                        rope_factors, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
+                                        attn_factor, beta_fast, beta_slow);
+                    cb(Kcur, "Kcur", il);
+                } else {
+                    // For non-sliding layers, just reshape without applying RoPE
+                    Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
+                    cb(Qcur, "Qcur", il);
+
+                    Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
+                    cb(Kcur, "Kcur", il);
+                }
+
+                cur = llm_build_kv(ctx0, lctx, kv_self, gf, model.layers[il].wo, model.layers[il].bo, Kcur, Vcur, Qcur,
+                                   KQ_mask_l, n_tokens, kv_head, n_kv, 1.0f / sqrtf(float(n_embd_head)), cb, il);
+            }
+
+            if (il == n_layer - 1) {
+                // skip computing output for unused tokens
+                struct ggml_tensor * inp_out_ids = build_inp_out_ids();
+                cur                              = ggml_get_rows(ctx0, cur, inp_out_ids);
+                inpL                             = ggml_get_rows(ctx0, inpL, inp_out_ids);
+                ffn_inp                          = ggml_get_rows(ctx0, ffn_inp, inp_out_ids);
+            }
+
+            struct ggml_tensor * attn_out = cur;
+
+            // feed-forward network
+            {
+                cur = llm_build_ffn(ctx0, lctx, ffn_inp, model.layers[il].ffn_up, NULL, NULL, model.layers[il].ffn_gate,
+                                    NULL, NULL, model.layers[il].ffn_down, NULL, NULL, NULL, LLM_FFN_SILU, LLM_FFN_PAR,
+                                    cb, il);
+                cb(cur, "ffn_out", il);
+            }
+
+            // add together residual + FFN + self-attention
+            cur = ggml_add(ctx0, cur, inpL);
+            cur = ggml_add(ctx0, cur, attn_out);
+            cur = lctx.cvec.apply_to(ctx0, cur, il);
+            cb(cur, "l_out", il);
+
+            // input for next layer
+            inpL = cur;
+        }
+
+        cur = inpL;
+
+        cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, NULL, LLM_NORM, cb, -1);
+        cb(cur, "result_norm", -1);
+
+        // lm_head
+        cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);
+
+        if (f_logit_scale) {
+            cur = ggml_scale(ctx0, cur, f_logit_scale);
+        }
+
+        cb(cur, "result_output", -1);
+
+        ggml_build_forward_expand(gf, cur);
+
+        return gf;
+    }
+
     // ref: https://allenai.org/olmo
     // based on the original build_llama() function, changes:
     //   * non-parametric layer norm
@@ -10384,6 +10541,10 @@ static struct ggml_cgraph * llama_build_graph(
             {
                 result = llm.build_command_r();
             } break;
+        case LLM_ARCH_COHERE2:
+            {
+                result = llm.build_cohere2();
+            } break;
         case LLM_ARCH_DBRX:
             {
                 result = llm.build_dbrx();

From f922a9c542ee117550a168395c63ea79261f5c99 Mon Sep 17 00:00:00 2001
From: matt23654 
Date: Sat, 4 Jan 2025 16:10:30 +0000
Subject: [PATCH 018/279] [GGML][RPC] Support for models with non-512-aligned
 tensors over RPC. (#11047)

* Added init tensor calling code

* Added get_alloc_size forwarding

* Cleaned up and improved type/error handling.

* fix: remove trailing whitespaces.

* Cleanup and use GGML error logging functions.

* Handle potentially dangerous edge cases.

* Apply suggestions from code review

Co-authored-by: Diego Devesa 

---------

Co-authored-by: Diego Devesa 
---
 ggml/src/ggml-rpc/ggml-rpc.cpp | 140 +++++++++++++++++++++++++++++++--
 1 file changed, 134 insertions(+), 6 deletions(-)

diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp
index 431082426..2213aba9f 100644
--- a/ggml/src/ggml-rpc/ggml-rpc.cpp
+++ b/ggml/src/ggml-rpc/ggml-rpc.cpp
@@ -93,9 +93,23 @@ enum rpc_cmd {
     RPC_CMD_COPY_TENSOR,
     RPC_CMD_GRAPH_COMPUTE,
     RPC_CMD_GET_DEVICE_MEMORY,
+    RPC_CMD_INIT_TENSOR,
+    RPC_CMD_GET_ALLOC_SIZE,
     RPC_CMD_COUNT,
 };
 
+struct rpc_msg_get_alloc_size_req {
+    rpc_tensor tensor;
+};
+
+struct rpc_msg_get_alloc_size_rsp {
+    uint64_t alloc_size;
+};
+
+struct rpc_msg_init_tensor_req {
+    rpc_tensor tensor;
+};
+
 struct rpc_msg_alloc_buffer_req {
     uint64_t size;
 };
@@ -461,10 +475,18 @@ static rpc_tensor serialize_tensor(const ggml_tensor * tensor) {
 }
 
 static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
-    UNUSED(buffer);
-    if (ggml_is_quantized(tensor->type)) {
-        // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized
-        GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor");
+    ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context;
+
+    // CUDA backend on the server pads everything to 512 due to CUDA limitations.
+    // Due to bandwidth constraints, we only call the server init tensor functions if necessary.
+    // In particular, only quantized tensors need padding
+    if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) {
+        rpc_msg_init_tensor_req request;
+
+        request.tensor = serialize_tensor(tensor);
+
+        bool status = send_rpc_cmd(ctx->sock, RPC_CMD_INIT_TENSOR, &request, sizeof(request), nullptr, 0);
+        GGML_ASSERT(status);
     }
 }
 
@@ -577,8 +599,23 @@ static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) {
 }
 
 static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
-    UNUSED(buft);
-    return ggml_nbytes(tensor);
+    // See comments in init_tensor.
+    if (ggml_is_quantized(tensor->type) && (tensor->ne[0] % 512 != 0) && (tensor->view_src == nullptr)) {
+        ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context;
+        auto sock = get_socket(buft_ctx->endpoint);
+
+        rpc_msg_get_alloc_size_req request;
+
+        request.tensor = serialize_tensor(tensor);
+
+        rpc_msg_get_alloc_size_rsp response;
+        bool status = send_rpc_cmd(sock, RPC_CMD_GET_ALLOC_SIZE, &request, sizeof(request), &response, sizeof(response));
+        GGML_ASSERT(status);
+
+        return response.alloc_size;
+    } else {
+        return ggml_nbytes(tensor);
+    }
 }
 
 static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = {
@@ -757,6 +794,8 @@ public:
     bool get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response);
     bool copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_copy_tensor_rsp & response);
     bool graph_compute(const std::vector & input, rpc_msg_graph_compute_rsp & response);
+    bool init_tensor(const rpc_msg_init_tensor_req & request);
+    bool get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response);
 
 private:
     ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor);
@@ -770,6 +809,36 @@ private:
     std::unordered_set buffers;
 };
 
+bool rpc_server::get_alloc_size(const rpc_msg_get_alloc_size_req & request, rpc_msg_get_alloc_size_rsp & response) {
+    ggml_backend_buffer_type_t buft;
+    struct ggml_init_params params {
+        /*.mem_size   =*/ ggml_tensor_overhead(),
+        /*.mem_buffer =*/ NULL,
+        /*.no_alloc   =*/ true,
+    };
+
+    struct ggml_context * ctx = ggml_init(params);
+    ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
+
+    if (tensor == nullptr) {
+        GGML_LOG_ERROR("Null tensor pointer passed to server get_alloc_size function.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    if (tensor->buffer == nullptr) {
+        //No buffer allocated.
+        buft = ggml_backend_get_default_buffer_type(backend);
+    } else {
+        buft = tensor->buffer->buft;
+    }
+
+    response.alloc_size = ggml_backend_buft_get_alloc_size(buft,tensor);
+
+    ggml_free(ctx);
+    return true;
+}
+
 void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_alloc_buffer_rsp & response) {
     ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend);
     ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, request.size);
@@ -905,6 +974,40 @@ bool rpc_server::set_tensor(const std::vector & input) {
     return true;
 }
 
+bool rpc_server::init_tensor(const rpc_msg_init_tensor_req & request) {
+    struct ggml_init_params params {
+        /*.mem_size   =*/ ggml_tensor_overhead(),
+        /*.mem_buffer =*/ NULL,
+        /*.no_alloc   =*/ true,
+    };
+    struct ggml_context * ctx = ggml_init(params);
+    ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
+    if (tensor == nullptr) {
+        GGML_LOG_ERROR("Null tensor pointer passed to server init_tensor function.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    // Call the backend's buffer_init_tensor function
+    ggml_backend_buffer_t buffer = tensor->buffer;
+    if (buffer && buffer->iface.init_tensor) {
+        buffer->iface.init_tensor(buffer, tensor);
+    } else {
+        GGML_LOG_ERROR("Null buffer for tensor passed to init_tensor function\n");
+    }
+
+    if (tensor->extra != nullptr) {
+        // This pointer can either be passed around client/server, or probably better stored server-side and kept track of.
+        // Currently unimplemented.
+        GGML_LOG_ERROR("tensor->extra populated by the backend, this is currently unsupported.\n");
+        ggml_free(ctx);
+        return false;
+    }
+
+    ggml_free(ctx);
+    return true;
+}
+
 bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector & response) {
     struct ggml_init_params params {
         /*.mem_size   =*/ ggml_tensor_overhead(),
@@ -1058,6 +1161,18 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
                 }
                 break;
             }
+            case RPC_CMD_GET_ALLOC_SIZE: {
+                rpc_msg_get_alloc_size_req request;
+                if (!recv_msg(sockfd, &request, sizeof(request))) {
+                    return;
+                }
+                rpc_msg_get_alloc_size_rsp response;
+                server.get_alloc_size(request, response);
+                if (!send_msg(sockfd, &response, sizeof(response))) {
+                    return;
+                }
+                break;
+            }
             case RPC_CMD_GET_ALIGNMENT: {
                 if (!recv_msg(sockfd, nullptr, 0)) {
                     return;
@@ -1133,6 +1248,19 @@ static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t fre
                 }
                 break;
             }
+            case RPC_CMD_INIT_TENSOR: {
+                rpc_msg_init_tensor_req request;
+                if (!recv_msg(sockfd, &request,sizeof(request))) {
+                    return;
+                }
+                if (!server.init_tensor(request)) {
+                    return;
+                }
+                if (!send_msg(sockfd, nullptr, 0)) {
+                    return;
+                }
+                break;
+            }
             case RPC_CMD_GET_TENSOR: {
                 rpc_msg_get_tensor_req request;
                 if (!recv_msg(sockfd, &request, sizeof(request))) {

From 9394bbd484f802ce80d2858033583af3ef700d25 Mon Sep 17 00:00:00 2001
From: fairydreaming <166155368+fairydreaming@users.noreply.github.com>
Date: Sat, 4 Jan 2025 21:06:11 +0100
Subject: [PATCH 019/279] llama : Add support for DeepSeek V3 (#11049)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* convert : extend DEEPSEEK2 model architecture to support DeepseekV3ForCausalLM by adding EXPERT_WEIGHTS_NORM and EXPERT_GATING_FUNC model parameters and FFN_EXP_PROBS_B tensor type

* vocab : add DeepSeek V3 pre-tokenizer regexes

* unicode : handle ACCENT_MARK and SYMBOL categories in regex

* llama : add DeepSeek V3 chat template, handle new model parameters and tensor types

---------

Co-authored-by: Stanisław Szymczyk 
---
 convert_hf_to_gguf.py          | 23 +++++++++++++++++
 convert_hf_to_gguf_update.py   |  1 +
 gguf-py/gguf/constants.py      | 10 ++++++++
 gguf-py/gguf/gguf_writer.py    |  7 ++++++
 gguf-py/gguf/tensor_mapping.py |  4 +++
 include/llama.h                |  1 +
 src/llama-arch.cpp             |  4 +++
 src/llama-arch.h               |  3 +++
 src/llama-chat.cpp             | 18 ++++++++++++++
 src/llama-chat.h               |  1 +
 src/llama-hparams.h            | 12 +++++++--
 src/llama-model.cpp            | 23 +++++++++++++++++
 src/llama-model.h              |  2 ++
 src/llama-vocab.cpp            |  7 ++++++
 src/llama.cpp                  | 45 +++++++++++++++++++++++++++++++---
 src/unicode.cpp                |  6 +++++
 16 files changed, 162 insertions(+), 5 deletions(-)

diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index d4441bbe9..01b58f976 100755
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -687,6 +687,9 @@ class Model:
         if chkhsh == "d4c8f286ea6b520b3d495c4455483cfa2302c0cfcd4be05d781b6a8a0a7cdaf1":
             # ref: https://huggingface.co/Infinigence/Megrez-3B-Instruct
             res = "megrez"
+        if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
+            # ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
+            res = "deepseek-v3"
 
         if res is None:
             logger.warning("\n")
@@ -3849,6 +3852,7 @@ class DeepseekModel(Model):
 
 
 @Model.register("DeepseekV2ForCausalLM")
+@Model.register("DeepseekV3ForCausalLM")
 class DeepseekV2Model(Model):
     model_arch = gguf.MODEL_ARCH.DEEPSEEK2
 
@@ -3870,6 +3874,15 @@ class DeepseekV2Model(Model):
         self.gguf_writer.add_expert_count(hparams["n_routed_experts"])
         self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"])
         self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"])
+        self.gguf_writer.add_expert_weights_norm(hparams["norm_topk_prob"])
+
+        if hparams["scoring_func"] == "sigmoid":
+            self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SIGMOID)
+        elif hparams["scoring_func"] == "softmax":
+            self.gguf_writer.add_expert_gating_func(gguf.ExpertGatingFuncType.SOFTMAX)
+        else:
+            raise ValueError(f"Unsupported scoring_func value: {hparams['scoring_func']}")
+
         self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"])
 
         if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
@@ -3882,6 +3895,16 @@ class DeepseekV2Model(Model):
     _experts: list[dict[str, Tensor]] | None = None
 
     def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
+        # rename e_score_correction_bias tensors
+        if name.endswith("e_score_correction_bias"):
+            name = name.replace("e_score_correction_bias", "e_score_correction.bias")
+
+        # skip Multi-Token Prediction (MTP) layers
+        block_count = self.hparams["num_hidden_layers"]
+        match = re.match(r"model.layers.(\d+)", name)
+        if match and int(match.group(1)) >= block_count:
+            return []
+
         # process the experts separately
         if name.find("mlp.experts") != -1:
             n_experts = self.hparams["n_routed_experts"]
diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py
index fea23ddb4..56edc64a7 100755
--- a/convert_hf_to_gguf_update.py
+++ b/convert_hf_to_gguf_update.py
@@ -107,6 +107,7 @@ models = [
     {"name": "roberta-bpe",    "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/sentence-transformers/stsb-roberta-base"},
     {"name": "gigachat",       "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct"},
     {"name": "megrez",         "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Infinigence/Megrez-3B-Instruct"},
+    {"name": "deepseek-v3",    "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"},
 ]
 
 
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index cdf79673b..9d0e7489f 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -102,6 +102,8 @@ class Keys:
         EXPERT_USED_COUNT                 = "{arch}.expert_used_count"
         EXPERT_SHARED_COUNT               = "{arch}.expert_shared_count"
         EXPERT_WEIGHTS_SCALE              = "{arch}.expert_weights_scale"
+        EXPERT_WEIGHTS_NORM               = "{arch}.expert_weights_norm"
+        EXPERT_GATING_FUNC                = "{arch}.expert_gating_func"
         POOLING_TYPE                      = "{arch}.pooling_type"
         LOGIT_SCALE                       = "{arch}.logit_scale"
         DECODER_START_TOKEN_ID            = "{arch}.decoder_start_token_id"
@@ -313,6 +315,7 @@ class MODEL_TENSOR(IntEnum):
     FFN_GATE_SHEXP       = auto()
     FFN_DOWN_SHEXP       = auto()
     FFN_UP_SHEXP         = auto()
+    FFN_EXP_PROBS_B      = auto()
     ATTN_Q_NORM          = auto()
     ATTN_K_NORM          = auto()
     LAYER_OUT_NORM       = auto()
@@ -498,6 +501,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
     MODEL_TENSOR.FFN_GATE_EXP:              "blk.{bid}.ffn_gate_exps",
     MODEL_TENSOR.FFN_DOWN_EXP:              "blk.{bid}.ffn_down_exps",
     MODEL_TENSOR.FFN_UP_EXP:                "blk.{bid}.ffn_up_exps",
+    MODEL_TENSOR.FFN_EXP_PROBS_B:           "blk.{bid}.exp_probs_b",
     MODEL_TENSOR.LAYER_OUT_NORM:            "blk.{bid}.layer_output_norm",
     MODEL_TENSOR.SSM_IN:                    "blk.{bid}.ssm_in",
     MODEL_TENSOR.SSM_CONV1D:                "blk.{bid}.ssm_conv1d",
@@ -1290,6 +1294,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
         MODEL_TENSOR.FFN_GATE_SHEXP,
         MODEL_TENSOR.FFN_DOWN_SHEXP,
         MODEL_TENSOR.FFN_UP_SHEXP,
+        MODEL_TENSOR.FFN_EXP_PROBS_B,
     ],
     MODEL_ARCH.CHATGLM : [
         MODEL_TENSOR.TOKEN_EMBD,
@@ -1590,6 +1595,11 @@ class GGMLQuantizationType(IntEnum):
     TQ2_0   = 35
 
 
+class ExpertGatingFuncType(IntEnum):
+    SOFTMAX  = 1
+    SIGMOID  = 2
+
+
 # TODO: add GGMLFileType from ggml_ftype in ggml.h
 
 
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index 3023b539a..4a0a65e3c 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -26,6 +26,7 @@ from .constants import (
     RopeScalingType,
     PoolingType,
     TokenType,
+    ExpertGatingFuncType,
 )
 
 from .quants import quant_shape_from_byte_shape
@@ -715,6 +716,12 @@ class GGUFWriter:
     def add_expert_weights_scale(self, value: float) -> None:
         self.add_float32(Keys.LLM.EXPERT_WEIGHTS_SCALE.format(arch=self.arch), value)
 
+    def add_expert_weights_norm(self, value: bool) -> None:
+        self.add_bool(Keys.LLM.EXPERT_WEIGHTS_NORM.format(arch=self.arch), value)
+
+    def add_expert_gating_func(self, value: ExpertGatingFuncType) -> None:
+        self.add_uint32(Keys.LLM.EXPERT_GATING_FUNC.format(arch=self.arch), value.value)
+
     def add_swin_norm(self, value: bool) -> None:
         self.add_bool(Keys.LLM.SWIN_NORM.format(arch=self.arch), value)
 
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py
index 7009a11d4..efe2a4aa4 100644
--- a/gguf-py/gguf/tensor_mapping.py
+++ b/gguf-py/gguf/tensor_mapping.py
@@ -276,6 +276,10 @@ class TensorNameMap:
             "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
         ),
 
+        MODEL_TENSOR.FFN_EXP_PROBS_B: (
+            "model.layers.{bid}.mlp.gate.e_score_correction", # deepseek-v3
+        ),
+
         # Feed-forward up
         MODEL_TENSOR.FFN_UP: (
             "gpt_neox.layers.{bid}.mlp.dense_h_to_4h",                # gptneox
diff --git a/include/llama.h b/include/llama.h
index 7b305b299..a0d5ba5dd 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -105,6 +105,7 @@ extern "C" {
         LLAMA_VOCAB_PRE_TYPE_EXAONE         = 25,
         LLAMA_VOCAB_PRE_TYPE_CHAMELEON      = 26,
         LLAMA_VOCAB_PRE_TYPE_MINERVA        = 27,
+        LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM  = 28,
     };
 
     enum llama_rope_type {
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index fea4b21d3..007d79f82 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -92,6 +92,8 @@ static const std::map LLM_KV_NAMES = {
     { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
     { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
     { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
+    { LLM_KV_EXPERT_WEIGHTS_NORM,               "%s.expert_weights_norm"               },
+    { LLM_KV_EXPERT_GATING_FUNC,                "%s.expert_gating_func"                },
     { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
     { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
     { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
@@ -984,6 +986,7 @@ static const std::map> LLM_TENSOR_N
             { LLM_TENSOR_FFN_GATE_SHEXP,     "blk.%d.ffn_gate_shexp" },
             { LLM_TENSOR_FFN_DOWN_SHEXP,     "blk.%d.ffn_down_shexp" },
             { LLM_TENSOR_FFN_UP_SHEXP,       "blk.%d.ffn_up_shexp" },
+            { LLM_TENSOR_FFN_EXP_PROBS_B,    "blk.%d.exp_probs_b" },
         },
     },
     {
@@ -1366,6 +1369,7 @@ static const std::map LLM_TENSOR_INFOS = {
     {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
     {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
     {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
+    {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
     // this tensor is loaded for T5, but never used
     {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
     {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_INPUT,     GGML_OP_IM2COL}},
diff --git a/src/llama-arch.h b/src/llama-arch.h
index 10bd619a4..45e458bb9 100644
--- a/src/llama-arch.h
+++ b/src/llama-arch.h
@@ -96,6 +96,8 @@ enum llm_kv {
     LLM_KV_EXPERT_USED_COUNT,
     LLM_KV_EXPERT_SHARED_COUNT,
     LLM_KV_EXPERT_WEIGHTS_SCALE,
+    LLM_KV_EXPERT_WEIGHTS_NORM,
+    LLM_KV_EXPERT_GATING_FUNC,
     LLM_KV_POOLING_TYPE,
     LLM_KV_LOGIT_SCALE,
     LLM_KV_DECODER_START_TOKEN_ID,
@@ -231,6 +233,7 @@ enum llm_tensor {
     LLM_TENSOR_FFN_DOWN_SHEXP,
     LLM_TENSOR_FFN_GATE_SHEXP,
     LLM_TENSOR_FFN_UP_SHEXP,
+    LLM_TENSOR_FFN_EXP_PROBS_B,
     LLM_TENSOR_ATTN_Q_NORM,
     LLM_TENSOR_ATTN_K_NORM,
     LLM_TENSOR_LAYER_OUT_NORM,
diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp
index a07e9cf00..44670d3d8 100644
--- a/src/llama-chat.cpp
+++ b/src/llama-chat.cpp
@@ -45,6 +45,7 @@ static const std::map LLM_CHAT_TEMPLATES = {
     { "vicuna-orca",       LLM_CHAT_TEMPLATE_VICUNA_ORCA       },
     { "deepseek",          LLM_CHAT_TEMPLATE_DEEPSEEK          },
     { "deepseek2",         LLM_CHAT_TEMPLATE_DEEPSEEK_2        },
+    { "deepseek3",         LLM_CHAT_TEMPLATE_DEEPSEEK_3        },
     { "command-r",         LLM_CHAT_TEMPLATE_COMMAND_R         },
     { "llama3",            LLM_CHAT_TEMPLATE_LLAMA_3           },
     { "chatglm3",          LLM_CHAT_TEMPLATE_CHATGML_3         },
@@ -148,6 +149,8 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) {
         return LLM_CHAT_TEMPLATE_MINICPM;
     } else if (tmpl_contains("'Assistant: ' + message['content'] + eos_token")) {
         return LLM_CHAT_TEMPLATE_DEEPSEEK_2;
+    } else if (tmpl_contains(LU8("'<|Assistant|>' + message['content'] + '<|end▁of▁sentence|>'"))) {
+        return LLM_CHAT_TEMPLATE_DEEPSEEK_3;
     } else if (tmpl_contains("[|system|]") && tmpl_contains("[|assistant|]") && tmpl_contains("[|endofturn|]")) {
         // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
         // EXAONE-3.0-7.8B-Instruct
@@ -453,6 +456,21 @@ int32_t llm_chat_apply_template(
         if (add_ass) {
             ss << "Assistant:";
         }
+    } else if (tmpl == LLM_CHAT_TEMPLATE_DEEPSEEK_3) {
+        // DeepSeek-V3
+        for (auto message : chat) {
+            std::string role(message->role);
+            if (role == "system") {
+                ss << message->content << "\n\n";
+            } else if (role == "user") {
+                ss << LU8("<|User|>") << message->content;
+            } else if (role == "assistant") {
+                ss << LU8("<|Assistant|>") << message->content << LU8("<|end▁of▁sentence|>");
+            }
+        }
+        if (add_ass) {
+            ss << LU8("<|Assistant|>");
+        }
     } else if (tmpl == LLM_CHAT_TEMPLATE_EXAONE_3) {
         // ref: https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct/discussions/8#66bae61b1893d14ee8ed85bb
         // EXAONE-3.0-7.8B-Instruct
diff --git a/src/llama-chat.h b/src/llama-chat.h
index 364318c27..b8e94d9ef 100644
--- a/src/llama-chat.h
+++ b/src/llama-chat.h
@@ -25,6 +25,7 @@ enum llm_chat_template {
     LLM_CHAT_TEMPLATE_VICUNA_ORCA,
     LLM_CHAT_TEMPLATE_DEEPSEEK,
     LLM_CHAT_TEMPLATE_DEEPSEEK_2,
+    LLM_CHAT_TEMPLATE_DEEPSEEK_3,
     LLM_CHAT_TEMPLATE_COMMAND_R,
     LLM_CHAT_TEMPLATE_LLAMA_3,
     LLM_CHAT_TEMPLATE_CHATGML_3,
diff --git a/src/llama-hparams.h b/src/llama-hparams.h
index 3a76b71a4..a29f20ec4 100644
--- a/src/llama-hparams.h
+++ b/src/llama-hparams.h
@@ -6,7 +6,13 @@
 
 // bump if necessary
 #define LLAMA_MAX_LAYERS  512
-#define LLAMA_MAX_EXPERTS 160  // DeepSeekV2
+#define LLAMA_MAX_EXPERTS 256  // DeepSeekV3
+
+enum llama_expert_gating_func_type {
+    LLAMA_EXPERT_GATING_FUNC_TYPE_NONE    = 0,
+    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX = 1,
+    LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID = 2,
+};
 
 struct llama_hparams_posnet {
     uint32_t n_embd;
@@ -54,7 +60,9 @@ struct llama_hparams {
     uint32_t n_expert_shared    = 0;
     uint32_t n_norm_groups      = 0;
 
-    float expert_weights_scale = 0.0;
+    float    expert_weights_scale = 0.0;
+    bool     expert_weights_norm  = false;
+    uint32_t expert_gating_func   = LLAMA_EXPERT_GATING_FUNC_TYPE_NONE;
 
     float f_norm_eps;
     float f_norm_rms_eps;
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index c356abded..405e0528f 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -66,6 +66,7 @@ const char * llm_type_name(llm_type type) {
         case MODEL_70B:           return "70B";
         case MODEL_236B:          return "236B";
         case MODEL_314B:          return "314B";
+        case MODEL_671B:          return "671B";
         case MODEL_SMALL:         return "0.1B";
         case MODEL_MEDIUM:        return "0.4B";
         case MODEL_LARGE:         return "0.8B";
@@ -125,6 +126,14 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
     }
 }
 
+static const char * llama_expert_gating_func_name(llama_expert_gating_func_type type) {
+    switch (type) {
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX: return "softmax";
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID: return "sigmoid";
+        default:                                    return "unknown";
+    }
+}
+
 std::string llama_model_arch_name (const llama_model & model) {
     return llm_arch_name(model.arch);
 }
@@ -933,11 +942,19 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) {
                 ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp);
                 ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared);
                 ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale);
+                ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false);
+                ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false);
+                if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) {
+                    // for compatibility with existing DeepSeek V2 and V2.5 GGUFs
+                    // that have no expert_gating_func model parameter set
+                    hparams.expert_gating_func = LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX;
+                }
                 ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul);
 
                 switch (hparams.n_layer) {
                     case 27: model.type = e_model::MODEL_16B; break;
                     case 60: model.type = e_model::MODEL_236B; break;
+                    case 61: model.type = e_model::MODEL_671B; break;
                     default: model.type = e_model::MODEL_UNKNOWN;
                 }
             } break;
@@ -1259,6 +1276,10 @@ void llm_load_vocab(llama_model_loader & ml, llama_model & model) {
                     tokenizer_pre == "deepseek-coder") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
                 vocab.tokenizer_clean_spaces = false;
+            } else if (
+                    tokenizer_pre == "deepseek-v3") {
+                vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM;
+                vocab.tokenizer_clean_spaces = false;
             } else if (
                     tokenizer_pre == "falcon") {
                 vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON;
@@ -1941,6 +1962,8 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
         LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
         LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
         LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
+        LLAMA_LOG_INFO("%s: expert_weights_norm  = %d\n",     __func__, hparams.expert_weights_norm);
+        LLAMA_LOG_INFO("%s: expert_gating_func   = %s\n",     __func__, llama_expert_gating_func_name((enum llama_expert_gating_func_type) hparams.expert_gating_func));
         LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
     }
 
diff --git a/src/llama-model.h b/src/llama-model.h
index 01c780c41..ce038932d 100644
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -63,6 +63,7 @@ enum llm_type {
     MODEL_70B,
     MODEL_236B,
     MODEL_314B,
+    MODEL_671B,
     MODEL_SMALL,
     MODEL_MEDIUM,
     MODEL_LARGE,
@@ -213,6 +214,7 @@ struct llama_layer {
     struct ggml_tensor * ffn_down_b = nullptr; // b2
     struct ggml_tensor * ffn_up_b   = nullptr; // b3
     struct ggml_tensor * ffn_act    = nullptr;
+    struct ggml_tensor * ffn_exp_probs_b = nullptr;
 
     // mamba proj
     struct ggml_tensor * ssm_in  = nullptr;
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 909e04871..3fcfcaa3f 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -382,6 +382,13 @@ struct llm_tokenizer_bpe : llm_tokenizer {
                     "\\p{N}+",
                 };
                 break;
+            case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM:
+                regex_exprs = {
+                    "\\p{N}{1,3}",
+                    "[一-龥぀-ゟ゠-ヿ]+",
+                    "[!\"#$%&'()*+,\\-./:;<=>?@\\[\\\\\\]^_`{|}~][A-Za-z]+|[^\r\n\\p{L}\\p{P}\\p{S}]?[\\p{L}\\p{M}]+| ?[\\p{P}\\p{S}]+[\r\n]*|\\s*[\r\n]+|\\s+(?!\\S)|\\s+",
+                };
+                break;
             case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER:
                 regex_exprs = {
                     "[\r\n]",
diff --git a/src/llama.cpp b/src/llama.cpp
index 50e9191fa..ea78ea487 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -1857,6 +1857,7 @@ static bool llm_load_tensors(
                             layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
                         } else {
                             layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
 
                             if (n_expert == 0) {
                                 throw std::runtime_error("n_expert must be > 0");
@@ -2837,12 +2838,14 @@ static struct ggml_tensor * llm_build_moe_ffn(
          struct ggml_tensor * up_exps,
          struct ggml_tensor * gate_exps,
          struct ggml_tensor * down_exps,
+         struct ggml_tensor * exp_probs_b,
                     int64_t   n_expert,
                     int64_t   n_expert_used,
             llm_ffn_op_type   type_op,
                        bool   norm_w,
                        bool   scale_w,
                       float   w_scale,
+llama_expert_gating_func_type gating_op,
          const llm_build_cb & cb,
                         int   il) {
     int64_t n_embd = cur->ne[0];
@@ -2851,11 +2854,31 @@ static struct ggml_tensor * llm_build_moe_ffn(
     ggml_tensor * logits = llm_build_lora_mm(lctx, ctx, gate_inp, cur); // [n_expert, n_tokens]
     cb(logits, "ffn_moe_logits", il);
 
-    ggml_tensor * probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
+    ggml_tensor * probs = nullptr;
+    switch (gating_op) {
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX:
+            {
+                probs = ggml_soft_max(ctx, logits); // [n_expert, n_tokens]
+            } break;
+        case LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID:
+            {
+                probs = ggml_sigmoid(ctx, logits); // [n_expert, n_tokens]
+            } break;
+        default:
+            GGML_ABORT("fatal error");
+    }
     cb(probs, "ffn_moe_probs", il);
 
+    // add experts selection bias - introduced in DeepSeek V3
+    // leave probs unbiased as it's later used to get expert weights
+    ggml_tensor * selection_probs = probs;
+    if (exp_probs_b != nullptr) {
+        selection_probs = ggml_add(ctx, probs, exp_probs_b);
+        cb(selection_probs, "ffn_moe_probs_biased", il);
+    }
+
     // select experts
-    ggml_tensor * selected_experts = ggml_top_k(ctx, probs, n_expert_used); // [n_expert_used, n_tokens]
+    ggml_tensor * selected_experts = ggml_top_k(ctx, selection_probs, n_expert_used); // [n_expert_used, n_tokens]
     cb(selected_experts->src[0], "ffn_moe_argsort", il);
     cb(selected_experts, "ffn_moe_topk", il);
 
@@ -3976,9 +3999,11 @@ struct llm_build_context {
                         model.layers[il].ffn_up_exps,
                         model.layers[il].ffn_gate_exps,
                         model.layers[il].ffn_down_exps,
+                        nullptr,
                         n_expert, n_expert_used,
                         LLM_FFN_SILU, true,
                         false, 0.0,
+                        LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                         cb, il);
                 cb(cur, "ffn_moe_out", il);
             }
@@ -4628,9 +4653,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_GELU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -4769,9 +4796,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -6017,9 +6046,11 @@ struct llm_build_context {
                         model.layers[il].ffn_up_exps,
                         model.layers[il].ffn_gate_exps,
                         model.layers[il].ffn_down_exps,
+                        nullptr,
                         n_expert, n_expert_used,
                         LLM_FFN_SILU, false,
                         false, 0.0,
+                        LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                         cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8142,9 +8173,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, false,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8539,9 +8572,11 @@ struct llm_build_context {
                     model.layers[il].ffn_up_exps,
                     model.layers[il].ffn_gate_exps,
                     model.layers[il].ffn_down_exps,
+                    nullptr,
                     n_expert, n_expert_used,
                     LLM_FFN_SILU, true,
                     false, 0.0,
+                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                     cb, il);
             cb(cur, "ffn_moe_out", il);
 
@@ -8680,9 +8715,11 @@ struct llm_build_context {
                             model.layers[il].ffn_up_exps,
                             model.layers[il].ffn_gate_exps,
                             model.layers[il].ffn_down_exps,
+                            nullptr,
                             n_expert, n_expert_used,
                             LLM_FFN_SILU, false,
                             false, hparams.expert_weights_scale,
+                            LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
                             cb, il);
                 cb(moe_out, "ffn_moe_out", il);
 
@@ -8909,9 +8946,11 @@ struct llm_build_context {
                             model.layers[il].ffn_up_exps,
                             model.layers[il].ffn_gate_exps,
                             model.layers[il].ffn_down_exps,
+                            model.layers[il].ffn_exp_probs_b,
                             n_expert, n_expert_used,
-                            LLM_FFN_SILU, false,
+                            LLM_FFN_SILU, hparams.expert_weights_norm,
                             true, hparams.expert_weights_scale,
+                            (enum llama_expert_gating_func_type) hparams.expert_gating_func,
                             cb, il);
                 cb(moe_out, "ffn_moe_out", il);
 
diff --git a/src/unicode.cpp b/src/unicode.cpp
index 8ed6b1a51..7aca6544b 100644
--- a/src/unicode.cpp
+++ b/src/unicode.cpp
@@ -667,18 +667,24 @@ std::vector unicode_regex_split(const std::string & text, const std
         { "\\p{N}", unicode_cpt_flags::NUMBER },
         { "\\p{L}", unicode_cpt_flags::LETTER },
         { "\\p{P}", unicode_cpt_flags::PUNCTUATION },
+        { "\\p{M}", unicode_cpt_flags::ACCENT_MARK },
+        { "\\p{S}", unicode_cpt_flags::SYMBOL },
     };
 
     static const std::map k_ucat_cpt = {
         { unicode_cpt_flags::NUMBER,      0xD1 },
         { unicode_cpt_flags::LETTER,      0xD2 },
         { unicode_cpt_flags::PUNCTUATION, 0xD3 },
+        { unicode_cpt_flags::ACCENT_MARK, 0xD4 },
+        { unicode_cpt_flags::SYMBOL,      0xD5 },
     };
 
     static const std::map k_ucat_map = {
         { unicode_cpt_flags::NUMBER,      "\x30-\x39" }, // 0-9
         { unicode_cpt_flags::LETTER,      "\x41-\x5A\x61-\x7A" }, // A-Za-z
         { unicode_cpt_flags::PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\}
+        { unicode_cpt_flags::ACCENT_MARK, "" }, // no sub-128 codepoints
+        { unicode_cpt_flags::SYMBOL,      "\\\x24\\\x2B\x3C-\x3E\x5E\x60\\\x7C" }, // $+<=>^`|
     };
 
     // compute collapsed codepoints only if needed by at least one regex

From b56f079e28fda692f11a8b59200ceb815b05d419 Mon Sep 17 00:00:00 2001
From: 0cc4m 
Date: Sat, 4 Jan 2025 21:09:59 +0100
Subject: [PATCH 020/279] Vulkan: Add device-specific blacklist for coopmat for
 the AMD proprietary driver (#11074)

* Vulkan: Add device-specific blacklist for coopmat for the AMD proprietary driver

* Add (TM) to AMD name check
---
 ggml/src/ggml-vulkan/ggml-vulkan.cpp | 30 +++++++++++++++++++++-------
 1 file changed, 23 insertions(+), 7 deletions(-)

diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
index 020e61280..d75cd6d61 100644
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
@@ -2040,6 +2040,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
     std::cerr << "Done!" << std::endl;
 }
 
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props);
+
 static vk_device ggml_vk_get_device(size_t idx) {
     VK_LOG_DEBUG("ggml_vk_get_device(" << idx << ")");
 
@@ -2175,9 +2177,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
 
         device->fp16 = !force_disable_f16 && fp16_storage && fp16_compute;
 
-        if (device->vendor_id == VK_VENDOR_ID_INTEL || (device->vendor_id == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) {
-            // Intel drivers don't support coopmat properly yet
-            // Only RADV supports coopmat properly on AMD
+        if (!ggml_vk_khr_cooperative_matrix_support(device->properties, driver_props)) {
             device->coopmat_support = false;
         }
 
@@ -2515,7 +2515,6 @@ static vk_device ggml_vk_get_device(size_t idx) {
     return vk_instance.devices[idx];
 }
 
-
 static void ggml_vk_print_gpu_info(size_t idx) {
     GGML_ASSERT(idx < vk_instance.device_indices.size());
     size_t dev_num = vk_instance.device_indices[idx];
@@ -2565,9 +2564,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
         }
     }
 
-    if (props2.properties.vendorID == VK_VENDOR_ID_INTEL || (props2.properties.vendorID == VK_VENDOR_ID_AMD && (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource))) {
-        // Intel drivers don't support coopmat properly yet
-        // Only RADV supports coopmat properly on AMD
+    if (!ggml_vk_khr_cooperative_matrix_support(props2.properties, driver_props)) {
         coopmat_support = false;
     }
 
@@ -8088,6 +8085,25 @@ static bool ggml_vk_instance_portability_enumeration_ext_available(const std::ve
     UNUSED(instance_extensions);
 }
 
+static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props) {
+    switch (props.vendorID) {
+    case VK_VENDOR_ID_INTEL:
+        // Intel drivers don't support coopmat properly yet
+        return false;
+    case VK_VENDOR_ID_AMD:
+        if (driver_props.driverID == vk::DriverId::eAmdProprietary || driver_props.driverID == vk::DriverId::eAmdOpenSource) {
+            // Workaround for AMD proprietary driver reporting support on all GPUs
+            const std::string name = props.deviceName;
+            return name.rfind("AMD Radeon RX 7", 0) == 0   || name.rfind("AMD Radeon(TM) RX 7", 0) == 0   || // RDNA 3 consumer GPUs
+                   name.rfind("AMD Radeon PRO W7", 0) == 0 || name.rfind("AMD Radeon(TM) PRO W7", 0) == 0 || // RDNA 3 workstation GPUs
+                   name.rfind("AMD Radeon 7", 0) == 0      || name.rfind("AMD Radeon(TM) 7", 0) == 0;        // RDNA 3 APUs
+        }
+        return true;
+    default:
+        return true;
+    }
+}
+
 // checks
 
 #ifdef GGML_VULKAN_CHECK_RESULTS

From 46e3556e01b824e52395fb050b29804b6cff2a7c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= 
Date: Mon, 6 Jan 2025 02:33:52 +0100
Subject: [PATCH 021/279] CUDA: add BF16 support (#11093)

* CUDA: add BF16 support
---
 ggml/src/ggml-cuda/convert.cu     |   2 +
 ggml/src/ggml-cuda/ggml-cuda.cu   |   3 +-
 ggml/src/ggml-cuda/mmv.cu         | 114 ++++++++++++++++++++----------
 ggml/src/ggml-cuda/vendors/cuda.h |   1 +
 ggml/src/ggml-cuda/vendors/hip.h  |   3 +
 ggml/src/ggml-cuda/vendors/musa.h |   3 +
 6 files changed, 87 insertions(+), 39 deletions(-)

diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 3896f956d..5b0dfacef 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -680,6 +680,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
             return dequantize_row_iq3_s_cuda;
         case GGML_TYPE_F16:
             return convert_unary_cuda;
+        case GGML_TYPE_BF16:
+            return convert_unary_cuda;
         default:
             return nullptr;
     }
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
index c180adc84..0b06be729 100644
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
@@ -1728,7 +1728,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
 static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
     const bool split = ggml_backend_buft_is_cuda_split(src0->buffer->buft);
 
-    bool use_mul_mat_vec   = src0->type == GGML_TYPE_F16
+    bool use_mul_mat_vec   = (src0->type == GGML_TYPE_F16 || src0->type == GGML_TYPE_BF16)
         && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
         && src0->ne[0] % 2 == 0 && src1->ne[1] == 1;
     bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
@@ -2869,6 +2869,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g
                     case GGML_TYPE_IQ3_XXS:
                     case GGML_TYPE_IQ4_NL:
                     case GGML_TYPE_IQ4_XS:
+                    case GGML_TYPE_BF16:
 #ifdef GGML_USE_MUSA
                         if (a->type == GGML_TYPE_Q3_K) {
                             return false;
diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu
index a4b4f6bc1..ac45f2d17 100644
--- a/ggml/src/ggml-cuda/mmv.cu
+++ b/ggml/src/ggml-cuda/mmv.cu
@@ -1,9 +1,9 @@
 #include "common.cuh"
 #include "mmv.cuh"
 
-template 
+template 
 static __global__ void mul_mat_vec(
-        const half * __restrict__ x, const float * __restrict__ y, float * __restrict__ dst, const int64_t ncols2, const int64_t stride_row,
+        const T * __restrict__ x, const float * __restrict__ y, float * __restrict__ dst, const int64_t ncols2, const int64_t stride_row,
         const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst) {
     const int64_t row     = blockIdx.x;
     const int64_t channel = blockIdx.z;
@@ -13,7 +13,6 @@ static __global__ void mul_mat_vec(
     y   +=  channel               *stride_channel_y;
     dst +=  channel               *stride_channel_dst;
 
-    const half2  * x2 = (const half2  *) x;
     const float2 * y2 = (const float2 *) y;
 
     extern __shared__ char data_mmv[];
@@ -28,28 +27,44 @@ static __global__ void mul_mat_vec(
 
     float sumf;
 
-    if (std::is_same::value) {
+    if constexpr (std::is_same::value) {
+        const half2 * x2 = (const half2 *) x;
+
+        if (std::is_same::value) {
+            sumf = 0.0f;
+
+            for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
+                const float2 tmpx = __half22float2(x2[col2]);
+                const float2 tmpy = y2[col2];
+                sumf += tmpx.x * tmpy.x;
+                sumf += tmpx.y * tmpy.y;
+            }
+        } else {
+#ifdef FP16_AVAILABLE
+            half2 sumh2 = make_half2(0.0f, 0.0f);
+
+            for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
+                const float2 tmp = y2[col2];
+                sumh2 += x2[col2] * make_half2(tmp.x, tmp.y);
+            }
+
+            sumf = __low2float(sumh2) + __high2float(sumh2);
+#else
+            NO_DEVICE_CODE;
+#endif // FP16_AVAILABLE
+        }
+    } else if constexpr (std::is_same::value) {
+        const int * x2 = (const int *) x;
         sumf = 0.0f;
 
         for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
-            const float2 tmpx = __half22float2(x2[col2]);
+            const int    tmpx = x2[col2];
             const float2 tmpy = y2[col2];
-            sumf += tmpx.x * tmpy.x;
-            sumf += tmpx.y * tmpy.y;
+            sumf += float(reinterpret_cast(&tmpx)[0]) * tmpy.x;
+            sumf += float(reinterpret_cast(&tmpx)[1]) * tmpy.y;
         }
     } else {
-#ifdef FP16_AVAILABLE
-        half2 sumh2 = make_half2(0.0f, 0.0f);
-
-        for (int64_t col2 = tid; col2 < ncols2; col2 += block_size) {
-            const float2 tmp = y2[col2];
-            sumh2 += x2[col2] * make_half2(tmp.x, tmp.y);
-        }
-
-        sumf = __low2float(sumh2) + __high2float(sumh2);
-#else
-        NO_DEVICE_CODE;
-#endif // FP16_AVAILABLE
+        static_assert(std::is_same::value, "unsupported type");
     }
 
     sumf = warp_reduce_sum(sumf);
@@ -71,9 +86,9 @@ static __global__ void mul_mat_vec(
     dst[row] = sumf;
 }
 
-template 
+template 
 static void launch_mul_mat_vec_cuda(
-        const half * x, const float * y, float * dst,
+        const T * x, const float * y, float * dst,
         const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y,
         const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst,
         cudaStream_t stream) {
@@ -97,35 +112,35 @@ static void launch_mul_mat_vec_cuda(
     const dim3 block_dims(block_size_best, 1, 1);
     switch (block_size_best) {
         case   32: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case   64: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case   96: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  128: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  160: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  192: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  224: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         case  256: {
-            mul_mat_vec<<>>
+            mul_mat_vec<<>>
                 (x, y, dst, ncols/2, stride_row, channel_ratio, stride_channel_x, stride_channel_y, stride_channel_dst);
         } break;
         default: {
@@ -134,25 +149,25 @@ static void launch_mul_mat_vec_cuda(
     }
 }
 
+template
 static void mul_mat_vec_cuda(
-        const half * x, const float * y, float * dst,
+        const T * x, const float * y, float * dst,
         const int64_t ncols, const int64_t nrows, const int64_t stride_row, const int64_t nchannels_x, const int64_t nchannels_y,
         const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst,
         enum ggml_prec prec, cudaStream_t stream) {
     switch (prec) {
         case GGML_PREC_DEFAULT: {
-            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
+            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
                 stride_channel_x, stride_channel_y, stride_channel_dst, stream);
         } break;
         case GGML_PREC_F32: {
-            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
+            launch_mul_mat_vec_cuda(x, y, dst, ncols, nrows, stride_row, nchannels_x, nchannels_y,
                 stride_channel_x, stride_channel_y, stride_channel_dst, stream);
         } break;
     }
 }
 
 void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
-    GGML_ASSERT(src0->type == GGML_TYPE_F16);
     GGML_ASSERT(src1->type == GGML_TYPE_F32);
     GGML_ASSERT(dst->type  == GGML_TYPE_F32);
 
@@ -164,7 +179,6 @@ void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor *
     const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
     const enum ggml_prec prec = fast_fp16_available(cc) ? ggml_prec(dst->op_params[0]) : GGML_PREC_F32;
 
-    const half  * src0_d = (const half  *) src0->data;
     const float * src1_d = (const float *) src1->data;
     float       *  dst_d = (float       *)  dst->data;
 
@@ -181,7 +195,20 @@ void ggml_cuda_mul_mat_vec(ggml_backend_cuda_context & ctx, const ggml_tensor *
     const int64_t channel_stride_y   = src1->nb[2] / ggml_type_size(src1->type);
     const int64_t channel_stride_dst =  dst->nb[2] / ggml_type_size( dst->type);
 
-    mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12, channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+    switch (src0->type) {
+        case GGML_TYPE_F16: {
+            const half * src0_d = (const half *) src0->data;
+            mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12,
+                channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+        } break;
+        case GGML_TYPE_BF16: {
+            const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0->data;
+            mul_mat_vec_cuda(src0_d, src1_d, dst_d, ne00, ne01, stride_row, ne02, ne12,
+                channel_stride_x, channel_stride_y, channel_stride_dst, prec, ctx.stream());
+        } break;
+        default:
+            GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type));
+    }
 }
 
 void ggml_cuda_op_mul_mat_vec(
@@ -190,7 +217,6 @@ void ggml_cuda_op_mul_mat_vec(
     const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
     const int64_t src1_padded_row_size, cudaStream_t stream) {
 
-    GGML_ASSERT(src0->type == GGML_TYPE_F16);
     GGML_ASSERT(src1->type == GGML_TYPE_F32);
     GGML_ASSERT(dst->type  == GGML_TYPE_F32);
 
@@ -211,8 +237,20 @@ void ggml_cuda_op_mul_mat_vec(
     const int64_t channel_stride_y   = 0;
     const int64_t channel_stride_dst = 0;
 
-    mul_mat_vec_cuda((const half *) src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
-        nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+    switch (src0->type) {
+        case GGML_TYPE_F16: {
+            const half * src0_d = (const half *) src0_dd_i;
+            mul_mat_vec_cuda(src0_d, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
+                nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+        } break;
+        case GGML_TYPE_BF16: {
+            const nv_bfloat16 * src0_d = (const nv_bfloat16 *) src0_dd_i;
+            mul_mat_vec_cuda(src0_d, src1_ddf_i, dst_dd_i, ne00, row_diff, stride_row,
+                nchannels_x, nchannels_y, channel_stride_x, channel_stride_y, channel_stride_dst, prec, stream);
+        } break;
+        default:
+            GGML_ABORT("unsupported type: %s", ggml_type_name(src0->type));
+    }
 
     GGML_UNUSED(ctx);
     GGML_UNUSED(src1);
diff --git a/ggml/src/ggml-cuda/vendors/cuda.h b/ggml/src/ggml-cuda/vendors/cuda.h
index db9f6a165..1746b0732 100644
--- a/ggml/src/ggml-cuda/vendors/cuda.h
+++ b/ggml/src/ggml-cuda/vendors/cuda.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 #if CUDART_VERSION < 11020
diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h
index 3205534d6..c905b15d7 100644
--- a/ggml/src/ggml-cuda/vendors/hip.h
+++ b/ggml/src/ggml-cuda/vendors/hip.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #ifdef __HIP_PLATFORM_AMD__
 // for rocblas_initialize()
 #include "rocblas/rocblas.h"
@@ -121,6 +122,8 @@
     #define __has_builtin(x) 0
 #endif
 
+typedef hip_bfloat16 nv_bfloat16;
+
 typedef int8_t int8x4_t __attribute__((ext_vector_type(4)));
 typedef uint8_t uint8x4_t __attribute__((ext_vector_type(4)));
 static __device__ __forceinline__ int __vsubss4(const int a, const int b) {
diff --git a/ggml/src/ggml-cuda/vendors/musa.h b/ggml/src/ggml-cuda/vendors/musa.h
index 1604b8229..6cc1b69ee 100644
--- a/ggml/src/ggml-cuda/vendors/musa.h
+++ b/ggml/src/ggml-cuda/vendors/musa.h
@@ -3,6 +3,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #define CUBLAS_COMPUTE_16F CUDA_R_16F
 #define CUBLAS_COMPUTE_32F CUDA_R_32F
@@ -132,3 +133,5 @@
 #define cudaKernelNodeParams musaKernelNodeParams
 #define cudaStreamCaptureModeRelaxed musaStreamCaptureModeRelaxed
 #define cudaStreamEndCapture musaStreamEndCapture
+
+typedef mt_bfloat16 nv_bfloat16;

From 5047dd3546951dea3d65c02257d06c46c8662338 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:01 +0200
Subject: [PATCH 022/279] llama : use _impl suffix instead of _internal
 (#11060)

ggml-ci
---
 src/llama-quant.cpp | 20 ++++++++++----------
 src/llama.cpp       | 16 ++++++++--------
 2 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
index 42974f8f1..104f90343 100644
--- a/src/llama-quant.cpp
+++ b/src/llama-quant.cpp
@@ -22,7 +22,7 @@ static void zeros(std::ofstream & file, size_t n) {
     }
 }
 
-struct quantize_state_internal {
+struct quantize_state_impl {
     const llama_model                 & model;
     const llama_model_quantize_params * params;
 
@@ -43,13 +43,13 @@ struct quantize_state_internal {
     // used to figure out if a model shares tok_embd with the output weight
     bool has_output = false;
 
-    quantize_state_internal(const llama_model & model, const llama_model_quantize_params * params)
+    quantize_state_impl(const llama_model & model, const llama_model_quantize_params * params)
         : model(model)
         , params(params)
         {}
 };
 
-static void llama_tensor_dequantize_internal(
+static void llama_tensor_dequantize_impl(
     struct ggml_tensor * tensor, std::vector> & output, std::vector & workers,
     const size_t nelements, const int nthread
 ) {
@@ -121,7 +121,7 @@ static void llama_tensor_dequantize_internal(
     workers.clear();
 }
 
-static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
+static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_type, const ggml_tensor * tensor, llama_ftype ftype) {
     const std::string name = ggml_get_name(tensor);
 
     // TODO: avoid hardcoded tensor names - use the TN_* constants
@@ -410,7 +410,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
     return new_type;
 }
 
-static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
+static size_t llama_tensor_quantize_impl(enum ggml_type new_type, const float * f32_data, void * new_data, const int64_t chunk_size, int64_t nrows, int64_t n_per_row, const float * imatrix, std::vector & workers, const int nthread) {
     if (nthread < 2) {
         // single-thread
         size_t new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nrows, n_per_row, imatrix);
@@ -464,7 +464,7 @@ static size_t llama_tensor_quantize_internal(enum ggml_type new_type, const floa
     return new_size;
 }
 
-static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
+static void llama_model_quantize_impl(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) {
     ggml_type default_type;
     llama_ftype ftype = params->ftype;
 
@@ -534,7 +534,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
     llm_load_hparams(ml, model);
     llm_load_stats  (ml, model);
 
-    struct quantize_state_internal qs(model, params);
+    struct quantize_state_impl qs(model, params);
 
     if (params->only_copy) {
         ftype = model.ftype;
@@ -837,7 +837,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
             } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
                 throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
             } else {
-                llama_tensor_dequantize_internal(tensor, f32_conv_buf, workers, nelements, nthread);
+                llama_tensor_dequantize_impl(tensor, f32_conv_buf, workers, nelements, nthread);
                 f32_data = (float *) f32_conv_buf.data();
             }
 
@@ -866,7 +866,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
                 void * new_data_03 = (char *)new_data + ggml_row_size(new_type, n_per_row) * i03 * nrows;
                 const float * imatrix_03 = imatrix ? imatrix + i03 * n_per_row : nullptr;
 
-                new_size += llama_tensor_quantize_internal(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
+                new_size += llama_tensor_quantize_impl(new_type, f32_data_03, new_data_03, chunk_size, nrows, n_per_row, imatrix_03, workers, nthread_use);
             }
             LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB\n", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
         }
@@ -919,7 +919,7 @@ uint32_t llama_model_quantize(
         const char * fname_out,
         const llama_model_quantize_params * params) {
     try {
-        llama_model_quantize_internal(fname_inp, fname_out, params);
+        llama_model_quantize_impl(fname_inp, fname_out, params);
     } catch (const std::exception & err) {
         LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what());
         return 1;
diff --git a/src/llama.cpp b/src/llama.cpp
index ea78ea487..4a6798f41 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -10717,7 +10717,7 @@ static enum ggml_status llama_graph_compute(
 // return positive int on warning
 // return negative int on error
 //
-static int llama_decode_internal(
+static int llama_decode_impl(
          llama_context & lctx,
            llama_batch   inp_batch) {
 
@@ -11052,7 +11052,7 @@ static int llama_decode_internal(
 // return positive int on warning
 // return negative int on error
 //
-static int llama_encode_internal(
+static int llama_encode_impl(
          llama_context & lctx,
            llama_batch   inp_batch) {
 
@@ -11234,7 +11234,7 @@ static int llama_encode_internal(
 }
 
 // find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
-static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
+static void llama_kv_cache_defrag_impl(struct llama_context & lctx) {
     auto & kv_self = lctx.kv_self;
 
     const auto & hparams = lctx.model.hparams;
@@ -11454,7 +11454,7 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
     //LLAMA_LOG_INFO("(tmp log) KV defrag time: %.3f ms\n", (t_end - t_start)/1000.0);
 }
 
-static void llama_kv_cache_update_internal(struct llama_context & lctx) {
+static void llama_kv_cache_update_impl(struct llama_context & lctx) {
     bool need_reserve = false;
 
     if (lctx.kv_self.has_shift) {
@@ -11490,7 +11490,7 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
 
     // defragment the KV cache if needed
     if (lctx.kv_self.do_defrag) {
-        llama_kv_cache_defrag_internal(lctx);
+        llama_kv_cache_defrag_impl(lctx);
 
         need_reserve = true;
 
@@ -12191,7 +12191,7 @@ void llama_kv_cache_defrag(struct llama_context * ctx) {
 }
 
 void llama_kv_cache_update(struct llama_context * ctx) {
-    llama_kv_cache_update_internal(*ctx);
+    llama_kv_cache_update_impl(*ctx);
 }
 
 bool llama_kv_cache_can_shift(struct llama_context * ctx) {
@@ -12203,7 +12203,7 @@ bool llama_kv_cache_can_shift(struct llama_context * ctx) {
 int32_t llama_encode(
         struct llama_context * ctx,
           struct llama_batch   batch) {
-    const int ret = llama_encode_internal(*ctx, batch);
+    const int ret = llama_encode_impl(*ctx, batch);
     if (ret != 0) {
         LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
     }
@@ -12214,7 +12214,7 @@ int32_t llama_encode(
 int32_t llama_decode(
         struct llama_context * ctx,
           struct llama_batch   batch) {
-    const int ret = llama_decode_internal(*ctx, batch);
+    const int ret = llama_decode_impl(*ctx, batch);
     if (ret != 0) {
         LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
     }

From 727368c60f2ebf2d6a7473a4a9f80957ab063a8e Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:15 +0200
Subject: [PATCH 023/279] llama : use LLAMA_TOKEN_NULL (#11062)

ggml-ci
---
 common/common.cpp                             |  2 +-
 common/ngram-cache.cpp                        | 24 +++++++-------
 common/ngram-cache.h                          |  4 +--
 examples/batched/batched.cpp                  |  2 +-
 .../convert-llama2c-to-ggml.cpp               |  4 +--
 examples/main/main.cpp                        |  4 +--
 examples/server/utils.hpp                     |  2 +-
 include/llama.h                               |  1 -
 src/llama-model.cpp                           | 32 +++++++++----------
 src/llama-sampling.cpp                        |  8 ++---
 src/llama-vocab.cpp                           | 24 +++++++-------
 11 files changed, 53 insertions(+), 54 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 4bb140ee2..d6a7ab753 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -982,7 +982,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         if (llama_model_has_encoder(model)) {
             llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
             llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-            if (decoder_start_token_id == -1) {
+            if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
                 decoder_start_token_id = bos;
             }
             tmp.clear();
diff --git a/common/ngram-cache.cpp b/common/ngram-cache.cpp
index a9dfb6714..a057ae45f 100644
--- a/common/ngram-cache.cpp
+++ b/common/ngram-cache.cpp
@@ -65,13 +65,13 @@ constexpr int     draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
 static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram ngram_static) {
     common_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
     if (part_static_it == nc_static.end()) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     const common_ngram_cache_part part_static = part_static_it->second;
 
     int max_count_static  = 0;
     int sum_count_static  = 0;
-    llama_token max_token = -1;
+    llama_token max_token = LLAMA_TOKEN_NULL;
 
     for (std::pair token_count_static : part_static) {
         const llama_token token = token_count_static.first;
@@ -85,10 +85,10 @@ static llama_token try_draft(common_ngram_cache & nc_static, const common_ngram
     }
 
     if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
-        return -1;
+        return LLAMA_TOKEN_NULL;
     }
     return max_token;
 }
@@ -98,9 +98,9 @@ static llama_token try_draft(
     common_ngram_cache & nc_primary, const std::vector & ngrams_primary, common_ngram_cache_part & part_static,
     const int * min_sample_size, const int * min_percent) {
 
-    llama_token drafted_token = -1;
+    llama_token drafted_token = LLAMA_TOKEN_NULL;
 
-    for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
+    for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == LLAMA_TOKEN_NULL; --i) {
         const common_ngram ngram_primary = ngrams_primary[i];
 
         common_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
@@ -112,7 +112,7 @@ static llama_token try_draft(
         int max_count_primary = 0;
         int max_count_static  = 0;
         int sum_count_primary = 0;
-        llama_token max_token = -1;
+        llama_token max_token = LLAMA_TOKEN_NULL;
 
         for (std::pair token_count_primary : part_primary) {
             const llama_token token = token_count_primary.first;
@@ -154,7 +154,7 @@ void common_ngram_cache_draft(
     }
 
     while ((int) draft.size()-1 < n_draft) {
-        llama_token drafted_token = -1;
+        llama_token drafted_token = LLAMA_TOKEN_NULL;
 
         const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
         common_ngram ngram_static;
@@ -177,17 +177,17 @@ void common_ngram_cache_draft(
             }
             ngrams_cd.push_back(ngram_cd);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
         }
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             drafted_token = try_draft(nc_static, ngram_static);
         }
 
-        if (drafted_token == -1) {
+        if (drafted_token == LLAMA_TOKEN_NULL) {
             break;
         }
 
diff --git a/common/ngram-cache.h b/common/ngram-cache.h
index 09c2b0319..dfe012abe 100644
--- a/common/ngram-cache.h
+++ b/common/ngram-cache.h
@@ -17,13 +17,13 @@ struct common_ngram {
 
     common_ngram() {
         for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
-            tokens[i] = -1;
+            tokens[i] = LLAMA_TOKEN_NULL;
         }
     }
 
     common_ngram(const llama_token * input, const int ngram_size) {
         for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
-            tokens[i] = i < ngram_size ? input[i] : -1;
+            tokens[i] = i < ngram_size ? input[i] : LLAMA_TOKEN_NULL;
         }
     }
 
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index e2e01f2d5..2e25b62f6 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -120,7 +120,7 @@ int main(int argc, char ** argv) {
         }
 
         llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-        if (decoder_start_token_id == -1) {
+        if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
             decoder_start_token_id = llama_token_bos(model);
         }
 
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index 736035d78..9c3a0c367 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -689,8 +689,8 @@ static void save_as_llama_model(
     gguf_set_val_u32(ctx, KV_TOKENIZER_UNK_ID, UNKNOWN_TOKEN_ID);
     gguf_set_val_u32(ctx, KV_TOKENIZER_BOS_ID, BOS_TOKEN_ID);
     gguf_set_val_u32(ctx, KV_TOKENIZER_EOS_ID, EOS_TOKEN_ID);
-    gguf_set_val_u32(ctx, KV_TOKENIZER_SEP_ID, -1);
-    gguf_set_val_u32(ctx, KV_TOKENIZER_PAD_ID, -1);
+    gguf_set_val_u32(ctx, KV_TOKENIZER_SEP_ID, LLAMA_TOKEN_NULL);
+    gguf_set_val_u32(ctx, KV_TOKENIZER_PAD_ID, LLAMA_TOKEN_NULL);
 
     gguf_set_val_u32(ctx, KV_CONTEXT_LENGTH, model->hparams.n_ctx);
     gguf_set_val_u32(ctx, KV_EMBEDDING_LENGTH, model->hparams.n_embd);
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index b5e477f5b..aaee47e32 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -494,7 +494,7 @@ int main(int argc, char ** argv) {
         }
 
         llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
-        if (decoder_start_token_id == -1) {
+        if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
             decoder_start_token_id = llama_token_bos(model);
         }
 
@@ -831,7 +831,7 @@ int main(int argc, char ** argv) {
                     // if user stop generation mid-way, we must add EOT to finish model's last response
                     if (need_insert_eot && format_chat) {
                         llama_token eot = llama_token_eot(model);
-                        embd_inp.push_back(eot == -1 ? llama_token_eos(model) : eot);
+                        embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_token_eos(model) : eot);
                         need_insert_eot = false;
                     }
 
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index dc6e6e67e..ad130d490 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -507,7 +507,7 @@ static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
 
 // format incomplete utf-8 multibyte character for output
 static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
-    std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
+    std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token);
 
     // if the size is 1 and first bit is 1, meaning it's a partial character
     //   (size > 1 meaning it's already a known token)
diff --git a/include/llama.h b/include/llama.h
index a0d5ba5dd..0f619aa19 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -34,7 +34,6 @@
 
 #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
 
-// TODO: use everywhere in the implementation
 #define LLAMA_TOKEN_NULL -1
 
 #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index 405e0528f..22596499a 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -1923,24 +1923,24 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
     LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
 
     // special tokens
-    if (vocab.special_bos_id  != -1)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
-    if (vocab.special_eos_id  != -1)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
-    if (vocab.special_eot_id  != -1)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
-    if (vocab.special_eom_id  != -1)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
-    if (vocab.special_unk_id  != -1)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
-    if (vocab.special_sep_id  != -1)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
-    if (vocab.special_pad_id  != -1)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
-    if (vocab.special_cls_id  != -1)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
-    if (vocab.special_mask_id != -1)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
+    if (vocab.special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
+    if (vocab.special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
+    if (vocab.special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
+    if (vocab.special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
+    if (vocab.special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
+    if (vocab.special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
+    if (vocab.special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
+    if (vocab.special_cls_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
+    if (vocab.special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
 
-    if (vocab.linefeed_id != -1)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
+    if (vocab.linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
 
-    if (vocab.special_fim_pre_id != -1) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
-    if (vocab.special_fim_suf_id != -1) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
-    if (vocab.special_fim_mid_id != -1) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
-    if (vocab.special_fim_pad_id != -1) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
-    if (vocab.special_fim_rep_id != -1) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
-    if (vocab.special_fim_sep_id != -1) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
+    if (vocab.special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
+    if (vocab.special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
+    if (vocab.special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
+    if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
+    if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
+    if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
 
     for (const auto & id : vocab.special_eog_ids) {
         LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index 69cea2f14..ef5a576cc 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -257,7 +257,7 @@ static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k)
             for (int i = 0; i < (int)cur_p->size; ++i) {
                 const float val = cur_p->data[i].logit;
                 int ib = int(bucket_scale * val + bucket_inter); //nbuckets * (val - bucket_low) / (bucket_high - bucket_low);
-                ib = std::max(0, std::min(nbuckets-1, ib));
+                ib = std::max(0, std::min(nbuckets - 1, ib));
                 bucket_idx[i] = ib;
                 ++histo[ib];
             }
@@ -280,13 +280,13 @@ static void llama_sampler_top_k_impl(llama_token_data_array * cur_p, int32_t k)
             for (int i = 0; i < (int)cur_p->size; ++i) {
                 int j = bucket_idx[i];
                 if (j >= ib) {
-                    *bucket_ptrs[nbuckets-1-j]++ = cur_p->data[i];
+                    *bucket_ptrs[nbuckets - 1 - j]++ = cur_p->data[i];
                 }
             }
 
             ptr = tmp_tokens.data();
             int ndone = 0;
-            for (int j = nbuckets-1; j > ib; --j) {
+            for (int j = nbuckets - 1; j > ib; --j) {
                 std::sort(ptr, ptr + histo[j], comp);
                 ptr += histo[j];
                 ndone += histo[j];
@@ -1832,7 +1832,7 @@ static void llama_sampler_dry_apply(struct llama_sampler * smpl, llama_token_dat
                 ctx->dry_repeat_count[last - k] = std::min(n, rep_limit);
                 if (n > 0) {
                     lt = k;
-                    rt = k+n-1;
+                    rt = k + n - 1;
                 }
             } else {
                 // If k is inside the current Z-box, consider two cases.
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 3fcfcaa3f..a4c015484 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -497,7 +497,7 @@ struct llm_tokenizer_bpe_session {
 
     bool append_bos(std::vector & output) const {
         if (vocab.tokenizer_add_bos) {
-            GGML_ASSERT(vocab.special_bos_id != -1);
+            GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
             output.push_back(vocab.special_bos_id);
             return true;
         }
@@ -506,7 +506,7 @@ struct llm_tokenizer_bpe_session {
 
     bool append_eos(std::vector & output) const {
         if (vocab.tokenizer_add_eos) {
-            GGML_ASSERT(vocab.special_eos_id != -1);
+            GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
             output.push_back(vocab.special_eos_id);
             return true;
         }
@@ -1403,7 +1403,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                         if (source == 0) {
                             buffer.erase_after(buffer.before_begin());
                         } else {
-                            buffer.erase_after(std::next(buffer.begin(), (source-1)));
+                            buffer.erase_after(std::next(buffer.begin(), (source - 1)));
                         }
 
                         // repeat for the right side
@@ -1417,7 +1417,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                         if (source == 0) {
                             buffer.erase_after(buffer.before_begin());
                         } else {
-                            buffer.erase_after(std::next(buffer.begin(), (source-1)));
+                            buffer.erase_after(std::next(buffer.begin(), (source - 1)));
                         }
                         break;
                     }
@@ -1454,7 +1454,7 @@ std::vector llama_tokenize_internal(
                 bool is_prev_special = true;  // prefix with space if first token
 
                 if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != -1);
+                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_bos_id);
                     is_prev_special = true;
                 }
@@ -1489,7 +1489,7 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != -1);
+                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_eos_id);
                 }
             } break;
@@ -1522,7 +1522,7 @@ std::vector llama_tokenize_internal(
         case LLAMA_VOCAB_TYPE_WPM:
             {
                 if (add_special) {
-                    GGML_ASSERT(vocab.special_cls_id != -1);
+                    GGML_ASSERT(vocab.special_cls_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_cls_id);
                 }
 
@@ -1542,14 +1542,14 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special) {
-                    GGML_ASSERT(vocab.special_sep_id != -1);
+                    GGML_ASSERT(vocab.special_sep_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_sep_id);
                 }
             } break;
         case LLAMA_VOCAB_TYPE_UGM:
             {
                 if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != -1);
+                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_bos_id);
                 }
                 llm_tokenizer_ugm_session session(vocab);
@@ -1574,7 +1574,7 @@ std::vector llama_tokenize_internal(
                 }
 
                 if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != -1);
+                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
                     output.push_back(vocab.special_eos_id);
                 }
             } break;
@@ -1642,7 +1642,7 @@ llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, lla
 }
 
 bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
-    return token != -1 && vocab.special_eog_ids.count(token) > 0;
+    return token != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(token) > 0;
 }
 
 bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
@@ -1881,7 +1881,7 @@ int32_t llama_detokenize_impl(
     }
 
     if (remove_special && vocab.tokenizer_add_eos) {
-        if (n_tokens > 0 && tokens[n_tokens-1] == vocab.special_eos_id) {
+        if (n_tokens > 0 && tokens[n_tokens - 1] == vocab.special_eos_id) {
             n_tokens--;
         }
     }

From ae2f606bb598b287f5fb69c9fdfc98b86598c6cc Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:52:38 +0200
Subject: [PATCH 024/279] mmap : fix fileno macro clash (#11076)

* mmap : fix fileno macro clash

ggml-ci

* cont

ggml-ci
---
 src/llama-mmap.cpp | 10 +++++++---
 src/llama-mmap.h   |  2 +-
 2 files changed, 8 insertions(+), 4 deletions(-)

diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp
index a99326335..a8cb9439b 100644
--- a/src/llama-mmap.cpp
+++ b/src/llama-mmap.cpp
@@ -241,12 +241,16 @@ llama_file::~llama_file() = default;
 size_t llama_file::tell() const { return pimpl->tell(); }
 size_t llama_file::size() const { return pimpl->size; }
 
-int llama_file::fileno() const {
+int llama_file::file_id() const {
 #ifdef _WIN32
     return _fileno(pimpl->fp);
+#else
+#if defined(fileno)
+    return fileno(pimpl->fp);
 #else
     return ::fileno(pimpl->fp);
 #endif
+#endif
 }
 
 void llama_file::seek(size_t offset, int whence) const { pimpl->seek(offset, whence); }
@@ -265,7 +269,7 @@ struct llama_mmap::impl {
 
     impl(struct llama_file * file, size_t prefetch, bool numa) {
         size = file->size();
-        int fd = file->fileno();
+        int fd = file->file_id();
         int flags = MAP_SHARED;
         if (numa) { prefetch = 0; }
 #ifdef __linux__
@@ -357,7 +361,7 @@ struct llama_mmap::impl {
 
         size = file->size();
 
-        HANDLE hFile = (HANDLE) _get_osfhandle(file->fileno());
+        HANDLE hFile = (HANDLE) _get_osfhandle(file->file_id());
 
         HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
 
diff --git a/src/llama-mmap.h b/src/llama-mmap.h
index 6bcddee8c..1da9ecb6b 100644
--- a/src/llama-mmap.h
+++ b/src/llama-mmap.h
@@ -18,7 +18,7 @@ struct llama_file {
     size_t tell() const;
     size_t size() const;
 
-    int fileno() const;
+    int file_id() const; // fileno overload
 
     void seek(size_t offset, int whence) const;
 

From 3e6e7a6bc2c4b980a0cf0fcb5cb3b79a965b5f14 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:54:25 +0200
Subject: [PATCH 025/279] tokenize : escape the prompt (#11058)

* tokenize : escape the prompt

* tokenize : update help
---
 examples/tokenize/tokenize.cpp | 10 ++++++++++
 1 file changed, 10 insertions(+)

diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp
index c97e22724..57d9d4312 100644
--- a/examples/tokenize/tokenize.cpp
+++ b/examples/tokenize/tokenize.cpp
@@ -31,6 +31,7 @@ static void print_usage_information(const char * argv0) {
     printf("    -p PROMPT, --prompt PROMPT           read prompt from the argument.\n");
     printf("    --stdin                              read prompt from standard input.\n");
     printf("    --no-bos                             do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
+    printf("    --no-escape                          do not escape input (such as \\n, \\t, etc.).\n");
     printf("    --no-parse-special                   do not parse control tokens.\n");
     printf("    --log-disable                        disable logs. Makes stderr quiet when loading the model.\n");
     printf("    --show-count                         print the total number of tokens.\n");
@@ -198,6 +199,7 @@ int main(int raw_argc, char ** raw_argv) {
     // variables where to put any arguments we see.
     bool printing_ids = false;
     bool no_bos = false;
+    bool no_escape = false;
     bool no_parse_special = false;
     bool disable_logging = false;
     bool show_token_count = false;
@@ -233,6 +235,9 @@ int main(int raw_argc, char ** raw_argv) {
         else if (arg == "--no-bos") {
             no_bos = true;
         }
+        else if (arg == "--no-escape") {
+            no_escape = true;
+        }
         else if (arg == "--no-parse-special") {
             no_parse_special = true;
         }
@@ -363,6 +368,11 @@ int main(int raw_argc, char ** raw_argv) {
     const bool model_wants_add_bos = llama_add_bos_token(model);
     const bool add_bos = model_wants_add_bos && !no_bos;
     const bool parse_special = !no_parse_special;
+    const bool escape = !no_escape;
+
+    if (escape) {
+        string_process_escapes(prompt);
+    }
 
     std::vector tokens;
     tokens = common_tokenize(model, prompt, add_bos, parse_special);

From 47182dd03fe04a4ffda5d7f4c8a109ae0056cf56 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 10:55:18 +0200
Subject: [PATCH 026/279] llama : update llama_model API names (#11063)

* llama : deprecate llama_free_model, add llama_model_free

ggml-ci

* llama : change `llama_load_model_from_file` -> `llama_model_load_from_file`

ggml-ci
---
 common/common.cpp                          | 14 +++++++-------
 examples/batched-bench/batched-bench.cpp   |  4 ++--
 examples/batched/batched.cpp               |  4 ++--
 examples/gritlm/gritlm.cpp                 |  4 ++--
 examples/llama-bench/llama-bench.cpp       |  8 ++++----
 examples/llava/llava-cli.cpp               |  6 +++---
 examples/llava/minicpmv-cli.cpp            |  4 ++--
 examples/llava/qwen2vl-cli.cpp             |  6 +++---
 examples/passkey/passkey.cpp               |  4 ++--
 examples/quantize-stats/quantize-stats.cpp |  8 ++++----
 examples/run/run.cpp                       |  2 +-
 examples/simple-chat/simple-chat.cpp       |  4 ++--
 examples/simple/simple.cpp                 |  4 ++--
 examples/tokenize/tokenize.cpp             |  4 ++--
 include/llama-cpp.h                        |  2 +-
 include/llama.h                            | 13 ++++++++++---
 src/llama-model.cpp                        |  4 ++++
 src/llama.cpp                              | 16 +++++++++++-----
 tests/test-autorelease.cpp                 |  4 ++--
 tests/test-model-load-cancel.cpp           |  2 +-
 tests/test-tokenizer-0.cpp                 |  6 +++---
 tests/test-tokenizer-1-bpe.cpp             |  6 +++---
 tests/test-tokenizer-1-spm.cpp             |  6 +++---
 23 files changed, 76 insertions(+), 59 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index d6a7ab753..4fd36105e 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -846,7 +846,7 @@ struct common_init_result common_init_from_params(common_params & params) {
     } else if (!params.model_url.empty()) {
         model = common_load_model_from_url(params.model_url, params.model, params.hf_token, mparams);
     } else {
-        model = llama_load_model_from_file(params.model.c_str(), mparams);
+        model = llama_model_load_from_file(params.model.c_str(), mparams);
     }
 
     if (model == NULL) {
@@ -873,7 +873,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         }
 
         if (!ok) {
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -884,7 +884,7 @@ struct common_init_result common_init_from_params(common_params & params) {
     llama_context * lctx = llama_new_context_with_model(model, cparams);
     if (lctx == NULL) {
         LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str());
-        llama_free_model(model);
+        llama_model_free(model);
         return iparams;
     }
 
@@ -900,7 +900,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         const auto cvec = common_control_vector_load(params.control_vectors);
         if (cvec.n_embd == -1) {
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -913,7 +913,7 @@ struct common_init_result common_init_from_params(common_params & params) {
                                              params.control_vector_layer_end);
         if (err) {
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
 
             return iparams;
         }
@@ -926,7 +926,7 @@ struct common_init_result common_init_from_params(common_params & params) {
         if (lora == nullptr) {
             LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str());
             llama_free(lctx);
-            llama_free_model(model);
+            llama_model_free(model);
             return iparams;
         }
 
@@ -1411,7 +1411,7 @@ struct llama_model * common_load_model_from_url(
         }
     }
 
-    return llama_load_model_from_file(local_path.c_str(), params);
+    return llama_model_load_from_file(local_path.c_str(), params);
 }
 
 struct llama_model * common_load_model_from_hf(
diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp
index a3b21ad6b..dd75ff9f1 100644
--- a/examples/batched-bench/batched-bench.cpp
+++ b/examples/batched-bench/batched-bench.cpp
@@ -38,7 +38,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
@@ -194,7 +194,7 @@ int main(int argc, char ** argv) {
     llama_batch_free(batch);
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index 2e25b62f6..d34b03099 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -41,7 +41,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         LOG_ERR("%s: error: unable to load model\n" , __func__);
@@ -236,7 +236,7 @@ int main(int argc, char ** argv) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp
index 18a945b33..4d2db5624 100644
--- a/examples/gritlm/gritlm.cpp
+++ b/examples/gritlm/gritlm.cpp
@@ -165,7 +165,7 @@ int main(int argc, char * argv[]) {
 
     llama_backend_init();
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), mparams);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), mparams);
 
     // create generation context
     llama_context * ctx = llama_new_context_with_model(model, cparams);
@@ -219,7 +219,7 @@ int main(int argc, char * argv[]) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
     llama_backend_free();
 
     return 0;
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 2338ad106..2a0916766 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -1526,10 +1526,10 @@ int main(int argc, char ** argv) {
         // keep the same model between tests when possible
         if (!lmodel || !prev_inst || !inst.equal_mparams(*prev_inst)) {
             if (lmodel) {
-                llama_free_model(lmodel);
+                llama_model_free(lmodel);
             }
 
-            lmodel = llama_load_model_from_file(inst.model.c_str(), inst.to_llama_mparams());
+            lmodel = llama_model_load_from_file(inst.model.c_str(), inst.to_llama_mparams());
             if (lmodel == NULL) {
                 fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, inst.model.c_str());
                 return 1;
@@ -1540,7 +1540,7 @@ int main(int argc, char ** argv) {
         llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams());
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str());
-            llama_free_model(lmodel);
+            llama_model_free(lmodel);
             return 1;
         }
 
@@ -1626,7 +1626,7 @@ int main(int argc, char ** argv) {
         ggml_threadpool_free_fn(threadpool);
     }
 
-    llama_free_model(lmodel);
+    llama_model_free(lmodel);
 
     if (p) {
         p->print_footer();
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
index 2691c6e6b..27215a42e 100644
--- a/examples/llava/llava-cli.cpp
+++ b/examples/llava/llava-cli.cpp
@@ -221,7 +221,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -265,7 +265,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
@@ -323,7 +323,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp
index e9cbb51ed..2342bdd09 100644
--- a/examples/llava/minicpmv-cli.cpp
+++ b/examples/llava/minicpmv-cli.cpp
@@ -31,7 +31,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -75,7 +75,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
diff --git a/examples/llava/qwen2vl-cli.cpp b/examples/llava/qwen2vl-cli.cpp
index e86a60280..f3e5d66e2 100644
--- a/examples/llava/qwen2vl-cli.cpp
+++ b/examples/llava/qwen2vl-cli.cpp
@@ -310,7 +310,7 @@ static struct llama_model * llava_init(common_params * params) {
 
     llama_model_params model_params = common_model_params_to_llama(*params);
 
-    llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params->model.c_str(), model_params);
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
         return NULL;
@@ -354,7 +354,7 @@ static void llava_free(struct llava_context * ctx_llava) {
     }
 
     llama_free(ctx_llava->ctx_llama);
-    llama_free_model(ctx_llava->model);
+    llama_model_free(ctx_llava->model);
     llama_backend_free();
 }
 
@@ -575,7 +575,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp
index 09bba708f..ea91f376c 100644
--- a/examples/passkey/passkey.cpp
+++ b/examples/passkey/passkey.cpp
@@ -63,7 +63,7 @@ int main(int argc, char ** argv) {
 
     llama_model_params model_params = common_model_params_to_llama(params);
 
-    llama_model * model = llama_load_model_from_file(params.model.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(params.model.c_str(), model_params);
 
     if (model == NULL) {
         LOG_ERR("%s: unable to load model\n" , __func__);
@@ -266,7 +266,7 @@ int main(int argc, char ** argv) {
     llama_batch_free(batch);
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     llama_backend_free();
 
diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp
index ab91d0b40..9bfbb8862 100644
--- a/examples/quantize-stats/quantize-stats.cpp
+++ b/examples/quantize-stats/quantize-stats.cpp
@@ -309,7 +309,7 @@ int main(int argc, char ** argv) {
         auto mparams = llama_model_default_params();
         mparams.use_mlock  = false;
 
-        model = llama_load_model_from_file(params.model.c_str(), mparams);
+        model = llama_model_load_from_file(params.model.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
@@ -323,7 +323,7 @@ int main(int argc, char ** argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -347,7 +347,7 @@ int main(int argc, char ** argv) {
             fprintf(stderr, "%s: error: Quantization should be tested with a float model, "
                 "this model contains already quantized layers (%s is type %d)\n", __func__, kv_tensor.first.c_str(), kv_tensor.second->type);
             llama_free(ctx);
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
         included_layers++;
@@ -409,7 +409,7 @@ int main(int argc, char ** argv) {
 
 
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
     // report timing
     {
         const int64_t t_main_end_us = ggml_time_us();
diff --git a/examples/run/run.cpp b/examples/run/run.cpp
index 75b817272..c52a7961f 100644
--- a/examples/run/run.cpp
+++ b/examples/run/run.cpp
@@ -664,7 +664,7 @@ class LlamaData {
             "\r%*s"
             "\rLoading model",
             get_terminal_width(), " ");
-        llama_model_ptr model(llama_load_model_from_file(opt.model_.c_str(), opt.model_params));
+        llama_model_ptr model(llama_model_load_from_file(opt.model_.c_str(), opt.model_params));
         if (!model) {
             printe("%s: error: unable to load model from file: %s\n", __func__, opt.model_.c_str());
         }
diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp
index 7f4da666b..d72f5bcdd 100644
--- a/examples/simple-chat/simple-chat.cpp
+++ b/examples/simple-chat/simple-chat.cpp
@@ -69,7 +69,7 @@ int main(int argc, char ** argv) {
     llama_model_params model_params = llama_model_default_params();
     model_params.n_gpu_layers = ngl;
 
-    llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);
     if (!model) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
         return 1;
@@ -194,7 +194,7 @@ int main(int argc, char ** argv) {
     }
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp
index 3288c0250..f69117890 100644
--- a/examples/simple/simple.cpp
+++ b/examples/simple/simple.cpp
@@ -83,7 +83,7 @@ int main(int argc, char ** argv) {
     llama_model_params model_params = llama_model_default_params();
     model_params.n_gpu_layers = ngl;
 
-    llama_model * model = llama_load_model_from_file(model_path.c_str(), model_params);
+    llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params);
 
     if (model == NULL) {
         fprintf(stderr , "%s: error: unable to load model\n" , __func__);
@@ -199,7 +199,7 @@ int main(int argc, char ** argv) {
 
     llama_sampler_free(smpl);
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp
index 57d9d4312..684ca054a 100644
--- a/examples/tokenize/tokenize.cpp
+++ b/examples/tokenize/tokenize.cpp
@@ -338,7 +338,7 @@ int main(int raw_argc, char ** raw_argv) {
 
     llama_model_params model_params = llama_model_default_params();
     model_params.vocab_only = true;
-    llama_model * model = llama_load_model_from_file(model_path, model_params);
+    llama_model * model = llama_model_load_from_file(model_path, model_params);
     if (!model) {
         fprintf(stderr, "Error: could not load model from file '%s'.\n", model_path);
         return 1;
@@ -408,7 +408,7 @@ int main(int raw_argc, char ** raw_argv) {
     }
     // silence valgrind
     llama_free(ctx);
-    llama_free_model(model);
+    llama_model_free(model);
 
     return 0;
 }
diff --git a/include/llama-cpp.h b/include/llama-cpp.h
index 1500cb2fc..11306b17f 100644
--- a/include/llama-cpp.h
+++ b/include/llama-cpp.h
@@ -9,7 +9,7 @@
 #include "llama.h"
 
 struct llama_model_deleter {
-    void operator()(llama_model * model) { llama_free_model(model); }
+    void operator()(llama_model * model) { llama_model_free(model); }
 };
 
 struct llama_context_deleter {
diff --git a/include/llama.h b/include/llama.h
index 0f619aa19..0295a51fb 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -413,12 +413,19 @@ extern "C" {
     // Call once at the end of the program - currently only used for MPI
     LLAMA_API void llama_backend_free(void);
 
-    LLAMA_API struct llama_model * llama_load_model_from_file(
+    DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file(
+                             const char * path_model,
+              struct llama_model_params   params),
+            "use llama_model_load_from_file instead");
+
+    LLAMA_API struct llama_model * llama_model_load_from_file(
                              const char * path_model,
               struct llama_model_params   params);
 
-    // TODO: rename to llama_model_free
-    LLAMA_API void llama_free_model(struct llama_model * model);
+    DEPRECATED(LLAMA_API void llama_free_model(struct llama_model * model),
+            "use llama_model_free instead");
+
+    LLAMA_API void llama_model_free(struct llama_model * model);
 
     // TODO: rename to llama_init_from_model
     LLAMA_API struct llama_context * llama_new_context_with_model(
diff --git a/src/llama-model.cpp b/src/llama-model.cpp
index 22596499a..7deb3683b 100644
--- a/src/llama-model.cpp
+++ b/src/llama-model.cpp
@@ -2009,6 +2009,10 @@ struct llama_model_params llama_model_default_params() {
 }
 
 void llama_free_model(struct llama_model * model) {
+    llama_model_free(model);
+}
+
+void llama_model_free(struct llama_model * model) {
     delete model;
 }
 
diff --git a/src/llama.cpp b/src/llama.cpp
index 4a6798f41..7337c34ce 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -11656,6 +11656,12 @@ int64_t llama_time_us(void) {
 struct llama_model * llama_load_model_from_file(
         const char * path_model,
         struct llama_model_params params) {
+    return llama_model_load_from_file(path_model, params);
+}
+
+struct llama_model * llama_model_load_from_file(
+        const char * path_model,
+        struct llama_model_params params) {
     ggml_time_init();
 
     llama_model * model = new llama_model;
@@ -11694,7 +11700,7 @@ struct llama_model * llama_load_model_from_file(
         ggml_backend_reg_t rpc_reg = ggml_backend_reg_by_name("RPC");
         if (!rpc_reg) {
             LLAMA_LOG_ERROR("%s: failed to find RPC backend\n", __func__);
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
 
@@ -11702,7 +11708,7 @@ struct llama_model * llama_load_model_from_file(
         ggml_backend_rpc_add_device_t ggml_backend_rpc_add_device_fn = (ggml_backend_rpc_add_device_t) ggml_backend_reg_get_proc_address(rpc_reg, "ggml_backend_rpc_add_device");
         if (!ggml_backend_rpc_add_device_fn) {
             LLAMA_LOG_ERROR("%s: failed to find RPC device add function\n", __func__);
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
 
@@ -11712,7 +11718,7 @@ struct llama_model * llama_load_model_from_file(
                 model->devices.push_back(dev);
             } else {
                 LLAMA_LOG_ERROR("%s: failed to add RPC device for server '%s'\n", __func__, server.c_str());
-                llama_free_model(model);
+                llama_model_free(model);
                 return nullptr;
             }
         }
@@ -11744,7 +11750,7 @@ struct llama_model * llama_load_model_from_file(
     if (params.split_mode == LLAMA_SPLIT_MODE_NONE) {
         if (params.main_gpu < 0 || params.main_gpu >= (int)model->devices.size()) {
             LLAMA_LOG_ERROR("%s: invalid value for main_gpu: %d (available devices: %d)\n", __func__, params.main_gpu, (int)model->devices.size());
-            llama_free_model(model);
+            llama_model_free(model);
             return nullptr;
         }
         ggml_backend_dev_t main_gpu = model->devices[params.main_gpu];
@@ -11767,7 +11773,7 @@ struct llama_model * llama_load_model_from_file(
             LLAMA_LOG_INFO("%s: cancelled model load\n", __func__);
         }
 
-        llama_free_model(model);
+        llama_model_free(model);
         return nullptr;
     }
 
diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp
index 57fa00011..ba084a91a 100644
--- a/tests/test-autorelease.cpp
+++ b/tests/test-autorelease.cpp
@@ -13,10 +13,10 @@ int main(int argc, char ** argv) {
 
     std::thread([&model_path]() {
         llama_backend_init();
-        auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
+        auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
         auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
         llama_free(ctx);
-        llama_free_model(model);
+        llama_model_free(model);
         llama_backend_free();
     }).join();
 
diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp
index 858535c3c..9095826fa 100644
--- a/tests/test-model-load-cancel.cpp
+++ b/tests/test-model-load-cancel.cpp
@@ -21,7 +21,7 @@ int main(int argc, char *argv[] ) {
         (void) ctx;
         return progress > 0.50;
     };
-    auto * model = llama_load_model_from_file(model_path, params);
+    auto * model = llama_model_load_from_file(model_path, params);
     llama_backend_free();
     return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
 }
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
index 0af85f002..121c2c60c 100644
--- a/tests/test-tokenizer-0.cpp
+++ b/tests/test-tokenizer-0.cpp
@@ -152,7 +152,7 @@ int main(int argc, char **argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -165,7 +165,7 @@ int main(int argc, char **argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -300,7 +300,7 @@ int main(int argc, char **argv) {
         fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();
diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp
index 0ff7fc833..5718fab04 100644
--- a/tests/test-tokenizer-1-bpe.cpp
+++ b/tests/test-tokenizer-1-bpe.cpp
@@ -46,7 +46,7 @@ int main(int argc, char **argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -59,7 +59,7 @@ int main(int argc, char **argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -143,7 +143,7 @@ int main(int argc, char **argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();
diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp
index 9b0716a43..ac05387c9 100644
--- a/tests/test-tokenizer-1-spm.cpp
+++ b/tests/test-tokenizer-1-spm.cpp
@@ -34,7 +34,7 @@ int main(int argc, char ** argv) {
 
         mparams.vocab_only = true;
 
-        model = llama_load_model_from_file(fname.c_str(), mparams);
+        model = llama_model_load_from_file(fname.c_str(), mparams);
 
         if (model == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -47,7 +47,7 @@ int main(int argc, char ** argv) {
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
-            llama_free_model(model);
+            llama_model_free(model);
             return 1;
         }
     }
@@ -113,7 +113,7 @@ int main(int argc, char ** argv) {
         }
     }
 
-    llama_free_model(model);
+    llama_model_free(model);
     llama_free(ctx);
 
     llama_backend_free();

From 6369f867a410416239d9f20ec27c2b1d6a9fee52 Mon Sep 17 00:00:00 2001
From: Daniel Bevenius 
Date: Mon, 6 Jan 2025 10:28:17 +0100
Subject: [PATCH 027/279] llama : rename missed batch params/vars to ubatch
 (#10059)

This commit renames the `batch` parameter to `ubatch` in the
`llama_kv_cache_find_slot`, `llm_build_inp_embd`, and
`llm_build_mamba` functions.

The motivation for this is that this should have been done as part of
Commit 19d900a7565b8f6b0a708836a57d26966cb9efe2 ("llama : rename batch
to ubatch (#9950)") but for some reason I missed these functions in
that commit and only noticed them now (sorry).
---
 src/llama-kv-cache.cpp | 32 ++++++++++++++++----------------
 src/llama.cpp          | 18 +++++++++---------
 2 files changed, 25 insertions(+), 25 deletions(-)

diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp
index 53379253a..90b6c56ed 100644
--- a/src/llama-kv-cache.cpp
+++ b/src/llama-kv-cache.cpp
@@ -119,10 +119,10 @@ bool llama_kv_cache_init(
 
 struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
            struct llama_kv_cache & cache,
-       const struct llama_ubatch & batch) {
-    const uint32_t n_tokens = batch.n_tokens;
-    const uint32_t n_seqs   = batch.n_seqs;
-    const uint32_t n_seq_tokens = batch.n_seq_tokens;
+       const struct llama_ubatch & ubatch) {
+    const uint32_t n_tokens = ubatch.n_tokens;
+    const uint32_t n_seqs   = ubatch.n_seqs;
+    const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
 
     if (cache.recurrent) {
         // For recurrent state architectures (like Mamba or RWKV),
@@ -130,16 +130,16 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
         // A slot should be always be contiguous.
 
         // can only process batches with an equal number of new tokens in each sequence
-        GGML_ASSERT(batch.equal_seqs);
+        GGML_ASSERT(ubatch.equal_seqs);
 
         int32_t min = cache.size - 1;
         int32_t max = 0;
 
         // everything should fit if all seq_ids are smaller than the max
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const uint32_t n_seq_id = batch.n_seq_id[s];
+            const uint32_t n_seq_id = ubatch.n_seq_id[s];
             for (uint32_t j = 0; j < n_seq_id; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
+                const llama_seq_id seq_id = ubatch.seq_id[s][j];
 
                 if (seq_id < 0 || (uint32_t) seq_id >= cache.size) {
                     // too big seq_id
@@ -198,7 +198,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
 
         // find usable cell range
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_seq_id seq_id = batch.seq_id[s][0];
+            const llama_seq_id seq_id = ubatch.seq_id[s][0];
             llama_kv_cell & seq_meta = cache.cells[seq_id];
             bool has_cell = false;
             if (seq_meta.tail >= 0) {
@@ -237,7 +237,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
         // gather and re-order
         for (uint32_t s = 0; s < n_seqs; ++s) {
             int32_t dst_id = s + min;
-            int32_t src_id = cache.cells[batch.seq_id[s][0]].tail;
+            int32_t src_id = cache.cells[ubatch.seq_id[s][0]].tail;
             if (dst_id != src_id) {
                 llama_kv_cell & dst_cell = cache.cells[dst_id];
                 llama_kv_cell & src_cell = cache.cells[src_id];
@@ -258,7 +258,7 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
 
         // update the pos of the used seqs
         for (uint32_t s = 0; s < n_seqs; ++s) {
-            const llama_pos last_pos = batch.pos[n_seq_tokens * s + n_seq_tokens - 1];
+            const llama_pos last_pos = ubatch.pos[n_seq_tokens * s + n_seq_tokens - 1];
             int32_t cell_id = s + min;
             llama_kv_cell & cell = cache.cells[cell_id];
 
@@ -266,12 +266,12 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
                 // What should happen when the pos backtracks or skips a value?
                 // Clearing the state mid-batch would require special-casing which isn't done.
                 LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
-                    __func__, last_pos, cell.pos, batch.seq_id[s][0], n_seq_tokens);
+                    __func__, last_pos, cell.pos, ubatch.seq_id[s][0], n_seq_tokens);
             }
             cell.pos = last_pos;
             cell.seq_id.clear();
-            for (int32_t j = 0; j < batch.n_seq_id[s]; ++j) {
-                const llama_seq_id seq_id = batch.seq_id[s][j];
+            for (int32_t j = 0; j < ubatch.n_seq_id[s]; ++j) {
+                const llama_seq_id seq_id = ubatch.seq_id[s][j];
                 cell.seq_id.insert(seq_id);
                 cache.cells[seq_id].tail = cell_id;
             }
@@ -325,10 +325,10 @@ struct llama_kv_cache_slot_info llama_kv_cache_find_slot(
     for (uint32_t s = 0; s < n_seqs; s++) {
         for (uint32_t i = 0; i < n_seq_tokens; ++i) {
             uint32_t k = s*n_seq_tokens + i;
-            cache.cells[cache.head + k].pos = batch.pos[k];
+            cache.cells[cache.head + k].pos = ubatch.pos[k];
 
-            for (int32_t j = 0; j < batch.n_seq_id[s]; j++) {
-                cache.cells[cache.head + k].seq_id.insert(batch.seq_id[s][j]);
+            for (int32_t j = 0; j < ubatch.n_seq_id[s]; j++) {
+                cache.cells[cache.head + k].seq_id.insert(ubatch.seq_id[s][j]);
             }
         }
     }
diff --git a/src/llama.cpp b/src/llama.cpp
index 7337c34ce..60728e5bb 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -2540,21 +2540,21 @@ static struct ggml_tensor * llm_build_inp_embd(
         struct ggml_context * ctx,
        struct llama_context & lctx,
         const llama_hparams & hparams,
-         const llama_ubatch & batch,
+         const llama_ubatch & ubatch,
          struct ggml_tensor * tok_embd,
          const llm_build_cb & cb) {
     const int64_t n_embd = hparams.n_embd;
 
     struct ggml_tensor * inpL;
 
-    if (batch.token) {
-        lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
+    if (ubatch.token) {
+        lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, ubatch.n_tokens);
         cb(lctx.inp_tokens, "inp_tokens", -1);
         ggml_set_input(lctx.inp_tokens);
 
         inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
     } else {
-        lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
+        lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, ubatch.n_tokens);
         inpL = lctx.inp_embd;
         ggml_set_input(lctx.inp_embd);
     }
@@ -3149,7 +3149,7 @@ static struct ggml_tensor * llm_build_copy_mask_state(
 static struct ggml_tensor * llm_build_mamba(
         struct ggml_context * ctx,
        struct llama_context & lctx,
-         const llama_ubatch & batch,
+         const llama_ubatch & ubatch,
          struct ggml_cgraph * graph,
          struct ggml_tensor * cur,
          struct ggml_tensor * state_copy,
@@ -3165,17 +3165,17 @@ static struct ggml_tensor * llm_build_mamba(
     const int64_t d_inner = hparams.ssm_d_inner;
     const int64_t d_state = hparams.ssm_d_state;
     const int64_t dt_rank = hparams.ssm_dt_rank;
-    const int64_t n_seqs  = batch.n_seqs;
+    const int64_t n_seqs  = ubatch.n_seqs;
     // Some variants of Mamba arch (e.g. FalconMamba do apply layer norm on B and Dt layers)
     const bool ssm_dt_b_c_rms = hparams.ssm_dt_b_c_rms;
     // Use the same RMS norm as the final layer norm
     const float norm_rms_eps = hparams.f_norm_rms_eps;
 
-    const int64_t n_seq_tokens = batch.n_seq_tokens;
+    const int64_t n_seq_tokens = ubatch.n_seq_tokens;
 
     GGML_ASSERT(n_seqs != 0);
-    GGML_ASSERT(batch.equal_seqs);
-    GGML_ASSERT(batch.n_tokens == n_seq_tokens * n_seqs);
+    GGML_ASSERT(ubatch.equal_seqs);
+    GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
 
     struct ggml_tensor * conv_states_all = kv.k_l[il];
     struct ggml_tensor * ssm_states_all  = kv.v_l[il];

From 96a1dc27c3f09bf1ed83a26292d571795bcf27fa Mon Sep 17 00:00:00 2001
From: Asghar Ghorbani 
Date: Mon, 6 Jan 2025 12:21:46 +0100
Subject: [PATCH 028/279] llama : prevent system info string accumulation
 across calls (#11101)

---
 src/llama.cpp | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/src/llama.cpp b/src/llama.cpp
index 60728e5bb..c162c31a6 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -12458,6 +12458,8 @@ int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int
 
 const char * llama_print_system_info(void) {
     static std::string s;
+    s.clear(); // Clear the string, since it's static, otherwise it will accumulate data from previous calls.
+
 
     for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
         auto * reg = ggml_backend_reg_get(i);

From 09186fabbe05236f2b9446ba6c643cb737540d10 Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Mon, 6 Jan 2025 13:41:12 +0100
Subject: [PATCH 029/279] llama : remove check flash_attn with lora (#11104)

---
 src/llama.cpp | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/src/llama.cpp b/src/llama.cpp
index c162c31a6..ebd6e3b29 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
             struct llama_context * ctx,
             struct llama_lora_adapter * adapter,
             float scale) {
-    if (ctx->cparams.flash_attn) {
-        LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
-        return -1;
-    }
-
     ctx->lora_adapters[adapter] = scale;
-
     return 0;
 }
 

From e6e7c75d94adf4d39e846d30807c531ff22865e7 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 15:36:08 +0200
Subject: [PATCH 030/279] server : fix extra BOS in infill endpoint (#11106)

* server : fix extra BOS in infill endpoing

ggml-ci

* server : update infill tests
---
 examples/server/server.cpp                | 2 +-
 examples/server/tests/unit/test_infill.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index c2e62ba69..127323e77 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -3797,7 +3797,7 @@ int main(int argc, char ** argv) {
         data["input_extra"] = input_extra; // default to empty array if it's not exist
 
         std::string prompt = json_value(data, "prompt", std::string());
-        std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true);
+        std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, false, true);
         SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size());
         data["prompt"] = format_infill(
             ctx_server.ctx,
diff --git a/examples/server/tests/unit/test_infill.py b/examples/server/tests/unit/test_infill.py
index ad4b8192a..10554db0f 100644
--- a/examples/server/tests/unit/test_infill.py
+++ b/examples/server/tests/unit/test_infill.py
@@ -18,7 +18,7 @@ def test_infill_without_input_extra():
         "input_suffix": "}\n",
     })
     assert res.status_code == 200
-    assert match_regex("(Ann|small|shiny)+", res.body["content"])
+    assert match_regex("(Ann|small|shiny|Daddy)+", res.body["content"])
 
 
 def test_infill_with_input_extra():

From 96be8c32649378a23031630a48c440f3a5d0839b Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Mon, 6 Jan 2025 16:34:49 +0100
Subject: [PATCH 031/279] github : add cmd line field to bug report (#11090)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* github : cmd line to bug report

* codeowners : (@ngxson) only watch dockerfile

* Apply suggestions from code review [no ci]

Co-authored-by: Johannes Gäßler 

* rm cmd in log output [no ci]

* rm 2 [no ci]

* no need backticks [no ci]

---------

Co-authored-by: Johannes Gäßler 
---
 .github/ISSUE_TEMPLATE/010-bug-compilation.yml | 12 +++++++++++-
 .github/ISSUE_TEMPLATE/019-bug-misc.yml        | 12 +++++++++++-
 CODEOWNERS                                     |  2 +-
 3 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
index f10b3a2b2..b85bf5741 100644
--- a/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
+++ b/.github/ISSUE_TEMPLATE/010-bug-compilation.yml
@@ -65,12 +65,22 @@ body:
         If possible, please do a git bisect and identify the exact commit that introduced the bug.
     validations:
       required: false
+  - type: textarea
+    id: command
+    attributes:
+      label: Compile command
+      description: >
+        Please provide the exact command you used to compile llama.cpp. For example: `cmake -B ...`.
+        This will be automatically formatted into code, so no need for backticks.
+      render: shell
+    validations:
+      required: true
   - type: textarea
     id: logs
     attributes:
       label: Relevant log output
       description: >
-          Please copy and paste any relevant log output, including the command that you entered and any generated text.
+          Please copy and paste any relevant log output, including any generated text.
           This will be automatically formatted into code, so no need for backticks.
       render: shell
     validations:
diff --git a/.github/ISSUE_TEMPLATE/019-bug-misc.yml b/.github/ISSUE_TEMPLATE/019-bug-misc.yml
index d157ea307..1904e31fd 100644
--- a/.github/ISSUE_TEMPLATE/019-bug-misc.yml
+++ b/.github/ISSUE_TEMPLATE/019-bug-misc.yml
@@ -52,6 +52,16 @@ body:
         - Other (Please specify in the next section)
     validations:
       required: false
+  - type: textarea
+    id: command
+    attributes:
+      label: Command line
+      description: >
+        Please provide the exact commands you entered, if applicable. For example: `llama-server -m ... -c ...`, `llama-cli -m ...`, etc.
+        This will be automatically formatted into code, so no need for backticks.
+      render: shell
+    validations:
+      required: false
   - type: textarea
     id: info
     attributes:
@@ -74,7 +84,7 @@ body:
     attributes:
       label: Relevant log output
       description: >
-          If applicable, please copy and paste any relevant log output, including the command that you entered and any generated text.
+          If applicable, please copy and paste any relevant log output, including any generated text.
           This will be automatically formatted into code, so no need for backticks.
       render: shell
     validations:
diff --git a/CODEOWNERS b/CODEOWNERS
index adeba5395..c9fa34761 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -1,5 +1,5 @@
 # collaborators can optionally add themselves here to indicate their availability for reviewing related PRs
 
 /ci/ @ggerganov
-/.devops/ @ngxson
+/.devops/*.Dockerfile @ngxson
 /examples/server/ @ngxson

From ecebbd292d741ac084cf248146b2cfb17002aa1d Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 6 Jan 2025 17:52:35 +0200
Subject: [PATCH 032/279] llama : remove unused headers (#11109)

ggml-ci
---
 src/llama.cpp | 14 +++-----------
 1 file changed, 3 insertions(+), 11 deletions(-)

diff --git a/src/llama.cpp b/src/llama.cpp
index ebd6e3b29..8ea6686c9 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -8,7 +8,6 @@
 #include "llama-kv-cache.h"
 #include "llama-model-loader.h"
 #include "llama-model.h"
-#include "llama-quant.h"
 
 #include "ggml.h"
 #include "ggml-alloc.h"
@@ -18,12 +17,8 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
-#include 
 #include 
-#include 
 #include 
 #include 
 #include 
@@ -31,10 +26,7 @@
 #include 
 #include 
 #include 
-#include 
 #include 
-#include 
-#include 
 
 #if defined(_MSC_VER)
 #pragma warning(disable: 4244 4267) // possible loss of data
@@ -12434,16 +12426,16 @@ int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix,
     return 0;
 }
 
-int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int split_no, int split_count) {
+int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count) {
     std::string str_split_path(split_path);
     char postfix[32];
     snprintf(postfix, 32, "-%05d-of-%05d.gguf", split_no + 1, split_count);
     std::string str_postfix(postfix);
 
-    // check if dest ends with postfix
+    // check if split_prefix ends with postfix
     int size_prefix = str_split_path.size() - str_postfix.size();
     if (size_prefix > 0 && str_split_path.find(str_postfix, size_prefix) != std::string::npos) {
-        snprintf(dest, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
+        snprintf(split_prefix, std::min((size_t) size_prefix + 1, maxlen), "%s", split_path);
         return size_prefix;
     }
 

From dc7cef9f373f2a24b851f0df7a618c5209e593fa Mon Sep 17 00:00:00 2001
From: Eric Curtin 
Date: Mon, 6 Jan 2025 22:45:28 +0000
Subject: [PATCH 033/279] llama-run : fix context size (#11094)

Set `n_ctx` equal to `n_batch` in `Opt` class. Now context size is
a more reasonable 2048.

Signed-off-by: Eric Curtin 
---
 examples/run/run.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/examples/run/run.cpp b/examples/run/run.cpp
index c52a7961f..2888fcfed 100644
--- a/examples/run/run.cpp
+++ b/examples/run/run.cpp
@@ -83,6 +83,7 @@ class Opt {
         }
 
         ctx_params.n_batch        = context_size >= 0 ? context_size : context_size_default;
+        ctx_params.n_ctx          = ctx_params.n_batch;
         model_params.n_gpu_layers = ngl >= 0 ? ngl : ngl_default;
         temperature               = temperature >= 0 ? temperature : temperature_default;
 

From c0d6f790d07aa78be15584ec394ac20739ade93b Mon Sep 17 00:00:00 2001
From: Akarshan Biswas 
Date: Tue, 7 Jan 2025 11:56:07 +0530
Subject: [PATCH 034/279] SYCL: Use get_multi_ptr instead of deprecated
 get_pointer in wkv6 (#11087)

* SYCL: Use get_multi_ptr instead of deprecated get_pointer in wkv6

* Revert "SYCL: Use get_multi_ptr instead of deprecated get_pointer in wkv6"

This reverts commit f62dc45f318e48d375e7734b34cbddee81deed52.

* Reland: Use get_multi_ptr instead of deprecated get_pointer in wkv6
---
 ggml/src/ggml-sycl/wkv6.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp
index 75ddfb86a..105db6f03 100644
--- a/ggml/src/ggml-sycl/wkv6.cpp
+++ b/ggml/src/ggml-sycl/wkv6.cpp
@@ -131,7 +131,7 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, const ggml_tensor* s
             [=](sycl::nd_item<3> item_ct1) {
                 rwkv_wkv_f32_kernel(
                     B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d,
-                    item_ct1, shared_mem_acc.get_pointer()
+                    item_ct1, (float*)shared_mem_acc.get_multi_ptr().get()
                 );
             });
     });

From a4dd490069a66ae56b42127048f06757fc4de4f7 Mon Sep 17 00:00:00 2001
From: Radoslav Gerganov 
Date: Tue, 7 Jan 2025 08:37:02 +0200
Subject: [PATCH 035/279] rpc : code cleanup (#11107)

Remove duplicated macros, use GGML_LOG_ERROR for errors
---
 ggml/src/ggml-rpc/ggml-rpc.cpp | 49 ++++++++++++++--------------------
 1 file changed, 20 insertions(+), 29 deletions(-)

diff --git a/ggml/src/ggml-rpc/ggml-rpc.cpp b/ggml/src/ggml-rpc/ggml-rpc.cpp
index 2213aba9f..63da2b86b 100644
--- a/ggml/src/ggml-rpc/ggml-rpc.cpp
+++ b/ggml/src/ggml-rpc/ggml-rpc.cpp
@@ -27,15 +27,6 @@
 #endif
 #include 
 
-#define UNUSED GGML_UNUSED
-
-#define GGML_DEBUG 0
-#if (GGML_DEBUG >= 1)
-#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__)
-#else
-#define GGML_PRINT_DEBUG(...)
-#endif
-
 #ifdef _WIN32
 typedef SOCKET sockfd_t;
 using ssize_t = __int64;
@@ -411,7 +402,7 @@ static std::shared_ptr get_socket(const std::string & endpoint) {
         initialized = true;
     }
 #else
-    UNUSED(initialized);
+    GGML_UNUSED(initialized);
 #endif
     auto sock = socket_connect(host.c_str(), port);
     if (sock == nullptr) {
@@ -640,7 +631,7 @@ static void ggml_backend_rpc_free(ggml_backend_t backend) {
 }
 
 static void ggml_backend_rpc_synchronize(ggml_backend_t backend) {
-    UNUSED(backend);
+    GGML_UNUSED(backend);
     // this is no-op because we don't have any async operations
 }
 
@@ -850,7 +841,7 @@ void rpc_server::alloc_buffer(const rpc_msg_alloc_buffer_req & request, rpc_msg_
         GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, request.size, response.remote_ptr, response.remote_size);
         buffers.insert(buffer);
     } else {
-        GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
+        GGML_LOG_ERROR("[%s] size: %" PRIu64 " -> failed\n", __func__, request.size);
     }
 }
 
@@ -872,7 +863,7 @@ bool rpc_server::buffer_get_base(const rpc_msg_buffer_get_base_req & request, rp
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     void * base = ggml_backend_buffer_get_base(buffer);
@@ -884,7 +875,7 @@ bool rpc_server::free_buffer(const rpc_msg_free_buffer_req & request) {
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, request.remote_ptr);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     ggml_backend_buffer_free(buffer);
@@ -896,7 +887,7 @@ bool rpc_server::buffer_clear(const rpc_msg_buffer_clear_req & request) {
     GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, request.remote_ptr, request.value);
     ggml_backend_buffer_t buffer = reinterpret_cast(request.remote_ptr);
     if (buffers.find(buffer) == buffers.end()) {
-        GGML_PRINT_DEBUG("[%s] buffer not found\n", __func__);
+        GGML_LOG_ERROR("[%s] buffer not found\n", __func__);
         return false;
     }
     ggml_backend_buffer_clear(buffer, request.value);
@@ -952,7 +943,7 @@ bool rpc_server::set_tensor(const std::vector & input) {
     struct ggml_context * ctx = ggml_init(params);
     ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor);
     if (tensor == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1017,7 +1008,7 @@ bool rpc_server::get_tensor(const rpc_msg_get_tensor_req & request, std::vector<
     struct ggml_context * ctx = ggml_init(params);
     ggml_tensor * tensor = deserialize_tensor(ctx, &request.tensor);
     if (tensor == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensor\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensor\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1051,7 +1042,7 @@ bool rpc_server::copy_tensor(const rpc_msg_copy_tensor_req & request, rpc_msg_co
     ggml_tensor * src = deserialize_tensor(ctx, &request.src);
     ggml_tensor * dst = deserialize_tensor(ctx, &request.dst);
     if (src == nullptr || dst == nullptr) {
-        GGML_PRINT_DEBUG("[%s] error deserializing tensors\n", __func__);
+        GGML_LOG_ERROR("[%s] error deserializing tensors\n", __func__);
         ggml_free(ctx);
         return false;
     }
@@ -1385,14 +1376,14 @@ static void ggml_backend_rpc_device_get_memory(ggml_backend_dev_t dev, size_t *
 
     ggml_backend_rpc_get_device_memory(ctx->endpoint.c_str(), free, total);
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static enum ggml_backend_dev_type ggml_backend_rpc_device_get_type(ggml_backend_dev_t dev) {
     // TODO: obtain value from the server
     return GGML_BACKEND_DEVICE_TYPE_GPU;
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static void ggml_backend_rpc_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
@@ -1413,7 +1404,7 @@ static ggml_backend_t ggml_backend_rpc_device_init(ggml_backend_dev_t dev, const
 
     return ggml_backend_rpc_init(ctx->endpoint.c_str());
 
-    UNUSED(params);
+    GGML_UNUSED(params);
 }
 
 static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_backend_dev_t dev) {
@@ -1421,12 +1412,12 @@ static ggml_backend_buffer_type_t ggml_backend_rpc_device_get_buffer_type(ggml_b
 
     return ggml_backend_rpc_buffer_type(ctx->endpoint.c_str());
 
-    UNUSED(dev);
+    GGML_UNUSED(dev);
 }
 
 static bool ggml_backend_rpc_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
-    UNUSED(dev);
-    UNUSED(op);
+    GGML_UNUSED(dev);
+    GGML_UNUSED(op);
     //TODO: call the remote backend and cache the results
     return true;
 }
@@ -1463,20 +1454,20 @@ static const struct ggml_backend_device_i ggml_backend_rpc_device_i = {
 static const char * ggml_backend_rpc_reg_get_name(ggml_backend_reg_t reg) {
     return "RPC";
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static size_t ggml_backend_rpc_reg_get_device_count(ggml_backend_reg_t reg) {
     return 0;
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static ggml_backend_dev_t ggml_backend_rpc_reg_get_device(ggml_backend_reg_t reg, size_t index) {
     GGML_ABORT("The RPC backend does not have enumerated devices - use ggml_backend_add_device instead");
 
-    UNUSED(reg);
-    UNUSED(index);
+    GGML_UNUSED(reg);
+    GGML_UNUSED(index);
 }
 
 static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const char * name) {
@@ -1485,7 +1476,7 @@ static void * ggml_backend_rpc_get_proc_address(ggml_backend_reg_t reg, const ch
     }
     return NULL;
 
-    UNUSED(reg);
+    GGML_UNUSED(reg);
 }
 
 static const struct ggml_backend_reg_i ggml_backend_rpc_reg_i = {

From a3d50bc022bedd6c7754c24749a1fef4d2d60c7c Mon Sep 17 00:00:00 2001
From: Diego Devesa 
Date: Tue, 7 Jan 2025 12:38:05 +0100
Subject: [PATCH 036/279] ggml-backend : only offload from host buffers
 (#11120)

---
 ggml/src/ggml-backend.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index e2d6c4056..d034f8b7f 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -761,7 +761,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
         }
         // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
         // not an ideal solution
-        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
+        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(src->buffer)) {
             int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
             // check if a backend with higher prio wants to offload the op
             if (src_backend_id == sched->n_backends - 1) {

From 017cc5f446863316d05522a87f25ec48713a9492 Mon Sep 17 00:00:00 2001
From: Diego Devesa 
Date: Tue, 7 Jan 2025 16:11:57 +0100
Subject: [PATCH 037/279] ggml-backend : only offload from host buffers (fix)
 (#11124)

---
 ggml/src/ggml-backend.cpp              | 4 ++--
 ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp | 2 ++
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
index d034f8b7f..dba7be33b 100644
--- a/ggml/src/ggml-backend.cpp
+++ b/ggml/src/ggml-backend.cpp
@@ -761,10 +761,10 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
         }
         // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
         // not an ideal solution
-        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS && ggml_backend_buffer_is_host(src->buffer)) {
+        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
             int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
             // check if a backend with higher prio wants to offload the op
-            if (src_backend_id == sched->n_backends - 1) {
+            if (src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
                 for (int b = 0; b < src_backend_id; b++) {
                     if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
                         SET_CAUSE(tensor, "1.off");
diff --git a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
index 622c63f1f..b311a5b1c 100644
--- a/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
+++ b/ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp
@@ -4169,6 +4169,8 @@ static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(g
     buffer->buft              = buft;
     buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor;
     buffer->iface.set_tensor  = ggml_backend_cpu_aarch64_buffer_set_tensor;
+    buffer->iface.get_tensor  = nullptr;
+    buffer->iface.cpy_tensor  = nullptr;
     return buffer;
 }
 

From 53ff6b9b9fb25ed0ec0a213e05534fe7c3d0040f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= 
Date: Tue, 7 Jan 2025 18:01:58 +0100
Subject: [PATCH 038/279] GGUF: C++ refactor, backend support, misc fixes
 (#11030)

* GGUF: C++ refactor, backend support, misc fixes

remove ggml_tensor.backend

update CODEOWNERS [no ci]

remove gguf_get_data from API

revise GGUF API data types
---
 CODEOWNERS                                    |    6 +
 common/common.cpp                             |    3 +
 .../convert-llama2c-to-ggml.cpp               |    2 +
 .../cvector-generator/cvector-generator.cpp   |    4 +-
 examples/export-lora/export-lora.cpp          |    6 +-
 examples/gguf-hash/gguf-hash.cpp              |    1 +
 examples/gguf-split/gguf-split.cpp            |   14 +-
 examples/gguf/gguf.cpp                        |   16 +-
 examples/llava/clip.cpp                       |    6 +-
 ggml/CMakeLists.txt                           |    3 +-
 ggml/include/ggml-cpp.h                       |    1 +
 ggml/include/ggml.h                           |  140 --
 ggml/include/gguf.h                           |  202 +++
 ggml/src/CMakeLists.txt                       |    4 +-
 ggml/src/ggml-impl.h                          |   27 +-
 ggml/src/ggml.c                               | 1276 ----------------
 ggml/src/gguf.cpp                             | 1325 +++++++++++++++++
 src/llama-impl.cpp                            |    3 +-
 src/llama-model-loader.cpp                    |    9 +-
 src/llama-quant.cpp                           |    3 +-
 tests/test-gguf.cpp                           |  371 ++---
 21 files changed, 1795 insertions(+), 1627 deletions(-)
 create mode 100644 ggml/include/gguf.h
 create mode 100644 ggml/src/gguf.cpp

diff --git a/CODEOWNERS b/CODEOWNERS
index c9fa34761..72d594b46 100644
--- a/CODEOWNERS
+++ b/CODEOWNERS
@@ -3,3 +3,9 @@
 /ci/ @ggerganov
 /.devops/*.Dockerfile @ngxson
 /examples/server/ @ngxson
+/ggml/src/ggml-cuda/fattn* @JohannesGaessler
+/ggml/src/ggml-cuda/mmq.* @JohannesGaessler
+/ggml/src/ggml-cuda/mmv.* @JohannesGaessler
+/ggml/src/ggml-cuda/mmvq.* @JohannesGaessler
+/ggml/src/ggml-opt.cpp @JohannesGaessler
+/ggml/src/gguf.cpp @JohannesGaessler
diff --git a/common/common.cpp b/common/common.cpp
index 4fd36105e..86e4e1e24 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2,6 +2,9 @@
 #define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
 #endif
 
+#include "ggml.h"
+#include "gguf.h"
+
 #include "common.h"
 #include "log.h"
 // Change JSON_ASSERT from assert() to GGML_ASSERT:
diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
index 9c3a0c367..1256abb17 100644
--- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
+++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp
@@ -1,4 +1,6 @@
 #include "ggml.h"
+#include "gguf.h"
+
 #include "llama.h"
 #include "common.h"
 #include "log.h"
diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp
index 7c9f50228..e899c1078 100644
--- a/examples/cvector-generator/cvector-generator.cpp
+++ b/examples/cvector-generator/cvector-generator.cpp
@@ -1,7 +1,9 @@
+#include "ggml.h"
+#include "gguf.h"
+
 #include "arg.h"
 #include "common.h"
 #include "llama.h"
-#include "ggml.h"
 #include "pca.hpp"
 #include "mean.hpp"
 
diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp
index 058b5cc86..d5dcd20a0 100644
--- a/examples/export-lora/export-lora.cpp
+++ b/examples/export-lora/export-lora.cpp
@@ -1,7 +1,9 @@
-#include "arg.h"
-#include "common.h"
 #include "ggml.h"
 #include "ggml-alloc.h"
+#include "gguf.h"
+
+#include "arg.h"
+#include "common.h"
 
 #include 
 #include 
diff --git a/examples/gguf-hash/gguf-hash.cpp b/examples/gguf-hash/gguf-hash.cpp
index e96c75117..9523ec122 100644
--- a/examples/gguf-hash/gguf-hash.cpp
+++ b/examples/gguf-hash/gguf-hash.cpp
@@ -1,4 +1,5 @@
 #include "ggml.h"
+#include "gguf.h"
 
 #include    /* abort() */
 #include 
diff --git a/examples/gguf-split/gguf-split.cpp b/examples/gguf-split/gguf-split.cpp
index 9e3d44984..ef3ceb686 100644
--- a/examples/gguf-split/gguf-split.cpp
+++ b/examples/gguf-split/gguf-split.cpp
@@ -1,16 +1,18 @@
+#include "ggml.h"
+#include "gguf.h"
 #include "llama.h"
 #include "common.h"
 
 #include 
+#include 
+#include 
+#include 
 #include 
+#include 
+#include 
 #include 
 #include 
 #include 
-#include 
-
-#include 
-#include 
-#include 
 
 #if defined(_WIN32)
     #include 
@@ -296,7 +298,7 @@ struct split_strategy {
                 total_size += ggml_nbytes(t);
             }
             total_size = total_size / 1000 / 1000; // convert to megabytes
-            printf("split %05d: n_tensors = %d, total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size);
+            printf("split %05d: n_tensors = %" PRIi64 ", total_size = %zuM\n", i_split + 1, gguf_get_n_tensors(ctx_out), total_size);
             i_split++;
         }
     }
diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp
index 7498f85ef..f31989c8c 100644
--- a/examples/gguf/gguf.cpp
+++ b/examples/gguf/gguf.cpp
@@ -1,10 +1,9 @@
 #include "ggml.h"
+#include "gguf.h"
 
 #include 
-#include 
 #include 
 #include 
-#include 
 #include 
 
 #undef MIN
@@ -135,9 +134,10 @@ static bool gguf_ex_read_0(const std::string & fname) {
 
         for (int i = 0; i < n_tensors; ++i) {
             const char * name   = gguf_get_tensor_name  (ctx, i);
+            const size_t size   = gguf_get_tensor_size  (ctx, i);
             const size_t offset = gguf_get_tensor_offset(ctx, i);
 
-            printf("%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
+            printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
         }
     }
 
@@ -182,9 +182,10 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
 
         for (int i = 0; i < n_tensors; ++i) {
             const char * name   = gguf_get_tensor_name  (ctx, i);
+            const size_t size   = gguf_get_tensor_size  (ctx, i);
             const size_t offset = gguf_get_tensor_offset(ctx, i);
 
-            printf("%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
+            printf("%s: tensor[%d]: name = %s, size = %zu, offset = %zu\n", __func__, i, name, size, offset);
         }
     }
 
@@ -199,7 +200,8 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
 
             struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
 
-            printf("%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, ggml_n_dims(cur), cur->name, cur->data);
+            printf("%s: tensor[%d]: n_dims = %d, ne = (%d, %d, %d, %d), name = %s, data = %p\n",
+                __func__, i, ggml_n_dims(cur), int(cur->ne[0]), int(cur->ne[1]), int(cur->ne[2]), int(cur->ne[3]), cur->name, cur->data);
 
             // print first 10 elements
             const float * data = (const float *) cur->data;
@@ -215,7 +217,7 @@ static bool gguf_ex_read_1(const std::string & fname, bool check_data) {
                 const float * data = (const float *) cur->data;
                 for (int j = 0; j < ggml_nelements(cur); ++j) {
                     if (data[j] != 100 + i) {
-                        fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
+                        fprintf(stderr, "%s: tensor[%d], data[%d]: found %f, expected %f\n", __func__, i, j, data[j], float(100 + i));
                         gguf_free(ctx);
                         return false;
                     }
@@ -245,6 +247,8 @@ int main(int argc, char ** argv) {
         check_data = false;
     }
 
+    srand(123456);
+
     const std::string fname(argv[1]);
     const std::string mode (argv[2]);
 
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index 3cd0d2fa8..7a8a3156b 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -7,6 +7,7 @@
 #include "ggml-cpu.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
+#include "gguf.h"
 
 //#ifdef GGML_USE_CUDA
 //#include "ggml-cuda.h"
@@ -262,7 +263,7 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
             {
                 const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
                 int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
+                const void * data = arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx_gguf, i);
                 std::stringstream ss;
                 ss << "[";
                 for (int j = 0; j < arr_n; j++) {
@@ -2734,7 +2735,8 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
         total_size_org += orig_size;
         total_size_new += new_size;
         gguf_set_tensor_type(ctx_out, name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size);
+        GGML_ASSERT(gguf_get_tensor_size(ctx_out, gguf_find_tensor(ctx_out, name.c_str())) == new_size);
+        gguf_set_tensor_data(ctx_out, name.c_str(), new_data);
         fout.write((const char *)new_data, new_size);
         size_t pad = GGML_PAD(new_size, gguf_get_alignment(ctx_out)) - new_size;
         for (size_t j = 0; j < pad; ++j) {
diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt
index 393506533..fe8acc803 100644
--- a/ggml/CMakeLists.txt
+++ b/ggml/CMakeLists.txt
@@ -243,7 +243,8 @@ set(GGML_PUBLIC_HEADERS
     include/ggml-metal.h
     include/ggml-rpc.h
     include/ggml-sycl.h
-    include/ggml-vulkan.h)
+    include/ggml-vulkan.h
+    include/gguf.h)
 
 set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
 #if (GGML_METAL)
diff --git a/ggml/include/ggml-cpp.h b/ggml/include/ggml-cpp.h
index 219361af4..a12342c25 100644
--- a/ggml/include/ggml-cpp.h
+++ b/ggml/include/ggml-cpp.h
@@ -7,6 +7,7 @@
 #include "ggml.h"
 #include "ggml-alloc.h"
 #include "ggml-backend.h"
+#include "gguf.h"
 #include 
 
 // Smart pointers for ggml types
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index c714fc8c8..8630d92c5 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -241,12 +241,6 @@
 #define GGML_ROPE_TYPE_MROPE  8
 #define GGML_ROPE_TYPE_VISION 24
 
-#define GGUF_MAGIC "GGUF"
-
-#define GGUF_VERSION 3
-
-#define GGUF_DEFAULT_ALIGNMENT 32
-
 #define GGML_UNUSED(x) (void)(x)
 
 #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
@@ -403,12 +397,6 @@ extern "C" {
         GGML_PREC_F32,
     };
 
-    enum ggml_backend_type {
-        GGML_BACKEND_TYPE_CPU = 0,
-        GGML_BACKEND_TYPE_GPU = 10,
-        GGML_BACKEND_TYPE_GPU_SPLIT = 20,
-    };
-
     // model file types
     enum ggml_ftype {
         GGML_FTYPE_UNKNOWN        = -1,
@@ -587,8 +575,6 @@ extern "C" {
     struct ggml_tensor {
         enum ggml_type type;
 
-        GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor");
-
         struct ggml_backend_buffer * buffer;
 
         int64_t ne[GGML_MAX_DIMS]; // number of elements
@@ -2111,132 +2097,6 @@ extern "C" {
                    int64_t   n_per_row,
                const float * imatrix);
 
-    //
-    // gguf
-    //
-
-    enum gguf_type {
-        GGUF_TYPE_UINT8   = 0,
-        GGUF_TYPE_INT8    = 1,
-        GGUF_TYPE_UINT16  = 2,
-        GGUF_TYPE_INT16   = 3,
-        GGUF_TYPE_UINT32  = 4,
-        GGUF_TYPE_INT32   = 5,
-        GGUF_TYPE_FLOAT32 = 6,
-        GGUF_TYPE_BOOL    = 7,
-        GGUF_TYPE_STRING  = 8,
-        GGUF_TYPE_ARRAY   = 9,
-        GGUF_TYPE_UINT64  = 10,
-        GGUF_TYPE_INT64   = 11,
-        GGUF_TYPE_FLOAT64 = 12,
-        GGUF_TYPE_COUNT,       // marks the end of the enum
-    };
-
-    struct gguf_context;
-
-    struct gguf_init_params {
-        bool no_alloc;
-
-        // if not NULL, create a ggml_context and allocate the tensor data in it
-        struct ggml_context ** ctx;
-    };
-
-    GGML_API struct gguf_context * gguf_init_empty(void);
-    GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
-    //GGML_API struct gguf_context * gguf_init_from_buffer(..);
-
-    GGML_API void gguf_free(struct gguf_context * ctx);
-
-    GGML_API const char * gguf_type_name(enum gguf_type type);
-
-    GGML_API int    gguf_get_version    (const struct gguf_context * ctx);
-    GGML_API size_t gguf_get_alignment  (const struct gguf_context * ctx);
-    GGML_API size_t gguf_get_data_offset(const struct gguf_context * ctx);
-    GGML_API void * gguf_get_data       (const struct gguf_context * ctx);
-
-    GGML_API int          gguf_get_n_kv(const struct gguf_context * ctx);
-    GGML_API int          gguf_find_key(const struct gguf_context * ctx, const char * key);
-    GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int key_id);
-
-    GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int key_id);
-    GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id);
-
-    // will abort if the wrong type is used for the key
-    GGML_API uint8_t      gguf_get_val_u8  (const struct gguf_context * ctx, int key_id);
-    GGML_API int8_t       gguf_get_val_i8  (const struct gguf_context * ctx, int key_id);
-    GGML_API uint16_t     gguf_get_val_u16 (const struct gguf_context * ctx, int key_id);
-    GGML_API int16_t      gguf_get_val_i16 (const struct gguf_context * ctx, int key_id);
-    GGML_API uint32_t     gguf_get_val_u32 (const struct gguf_context * ctx, int key_id);
-    GGML_API int32_t      gguf_get_val_i32 (const struct gguf_context * ctx, int key_id);
-    GGML_API float        gguf_get_val_f32 (const struct gguf_context * ctx, int key_id);
-    GGML_API uint64_t     gguf_get_val_u64 (const struct gguf_context * ctx, int key_id);
-    GGML_API int64_t      gguf_get_val_i64 (const struct gguf_context * ctx, int key_id);
-    GGML_API double       gguf_get_val_f64 (const struct gguf_context * ctx, int key_id);
-    GGML_API bool         gguf_get_val_bool(const struct gguf_context * ctx, int key_id);
-    GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int key_id);
-    GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id);
-    GGML_API int          gguf_get_arr_n   (const struct gguf_context * ctx, int key_id);
-    GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id);
-    GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int key_id, int i);
-
-    GGML_API int            gguf_get_n_tensors    (const struct gguf_context * ctx);
-    GGML_API int            gguf_find_tensor      (const struct gguf_context * ctx, const char * name);
-    GGML_API size_t         gguf_get_tensor_offset(const struct gguf_context * ctx, int i);
-    GGML_API char *         gguf_get_tensor_name  (const struct gguf_context * ctx, int i);
-    GGML_API enum ggml_type gguf_get_tensor_type  (const struct gguf_context * ctx, int i);
-
-    // removes key if it exists
-    GGML_API void gguf_remove_key(struct gguf_context * ctx, const char * key);
-
-    // overrides existing values or adds a new one
-    GGML_API void gguf_set_val_u8  (struct gguf_context * ctx, const char * key, uint8_t  val);
-    GGML_API void gguf_set_val_i8  (struct gguf_context * ctx, const char * key, int8_t   val);
-    GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
-    GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t  val);
-    GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
-    GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t  val);
-    GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float    val);
-    GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t val);
-    GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t  val);
-    GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double   val);
-    GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool     val);
-    GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
-    GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
-    GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
-
-    // set or add KV pairs from another context
-    GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
-
-    // manage tensor info
-    GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
-    GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
-    GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
-
-    // writing gguf files can be done in 2 ways:
-    //
-    // - write the entire gguf_context to a binary file in a single pass:
-    //
-    //   gguf_write_to_file(ctx, fname);
-    //
-    // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
-    //
-    //   FILE * f = fopen(fname, "wb");
-    //   fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
-    //   fwrite(f, ...);
-    //   void * data = gguf_meta_get_meta_data(ctx);
-    //   fseek(f, 0, SEEK_SET);
-    //   fwrite(f, data, gguf_get_meta_size(ctx));
-    //   free(data);
-    //   fclose(f);
-    //
-
-    // write the entire context to a binary file
-    GGML_API void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
-
-    // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
-    GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
-    GGML_API void   gguf_get_meta_data(const struct gguf_context * ctx, void * data);
-
 #ifdef __cplusplus
     // restrict not standard in C++
 #    if defined(__GNUC__)
diff --git a/ggml/include/gguf.h b/ggml/include/gguf.h
new file mode 100644
index 000000000..79ee20206
--- /dev/null
+++ b/ggml/include/gguf.h
@@ -0,0 +1,202 @@
+// This file contains functionality related to "GGUF" files, the binary file format used by ggml.
+// GGUF files have the following structure:
+//
+// 1. File magic "GGUF" (4 bytes).
+// 2. File version (uint32_t).
+// 3. Number of ggml tensors in file (int64_t).
+// 4. Number of key-value-pairs in file (int64_t).
+// 5. For each KV pair:
+//   1. The key (string).
+//   2. The value type (gguf_type).
+//   3a. If the value type is GGUF_TYPE_ARRAY:
+//     1. The type of the array (gguf_type).
+//     2. The number of elements in the array (uint64_t).
+//     3. The binary representation of each element in the array.
+//   3b. Otherwise:
+//     1. The binary representation of the value.
+// 6. For each ggml tensor:
+//   1. The tensor name (string).
+//   2. The number of dimensions of the tensor (uint32_t).
+//   3. For each dimension:
+//     1. The size of the tensor in the dimension (int64_t).
+//   4. The tensor data type (ggml_type).
+//   5. The tensor data offset in the tensor data binary blob (uint64_t).
+// 7. The tensor data binary blob (optional, aligned).
+//
+// Strings are serialized as the string length (uint64_t) followed by the C string without the null terminator.
+// All enums are stored as int32_t.
+// All bool values are stored as int8_t.
+// If the special key "general.alignment" (uint32_t) is defined it is used for alignment,
+//   otherwise GGUF_DEFAULT_ALIGNMENT is used.
+//
+// Module maintainer: Johannes Gäßler (@JohannesGaessler, johannesg@5d6.de)
+
+#pragma once
+
+#include "ggml.h"
+
+#include 
+#include 
+
+#define GGUF_MAGIC   "GGUF"
+#define GGUF_VERSION 3
+
+#define GGUF_KEY_GENERAL_ALIGNMENT "general.alignment"
+
+#define GGUF_DEFAULT_ALIGNMENT 32
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+    // types that can be stored as GGUF KV data
+    enum gguf_type {
+        GGUF_TYPE_UINT8   = 0,
+        GGUF_TYPE_INT8    = 1,
+        GGUF_TYPE_UINT16  = 2,
+        GGUF_TYPE_INT16   = 3,
+        GGUF_TYPE_UINT32  = 4,
+        GGUF_TYPE_INT32   = 5,
+        GGUF_TYPE_FLOAT32 = 6,
+        GGUF_TYPE_BOOL    = 7,
+        GGUF_TYPE_STRING  = 8,
+        GGUF_TYPE_ARRAY   = 9,
+        GGUF_TYPE_UINT64  = 10,
+        GGUF_TYPE_INT64   = 11,
+        GGUF_TYPE_FLOAT64 = 12,
+        GGUF_TYPE_COUNT,       // marks the end of the enum
+    };
+
+    struct gguf_context;
+
+    struct gguf_init_params {
+        bool no_alloc;
+
+        // if not NULL, create a ggml_context and allocate the tensor data in it
+        struct ggml_context ** ctx;
+    };
+
+    GGML_API struct gguf_context * gguf_init_empty(void);
+    GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
+    //GGML_API struct gguf_context * gguf_init_from_buffer(..);
+
+    GGML_API void gguf_free(struct gguf_context * ctx);
+
+    GGML_API const char * gguf_type_name(enum gguf_type type);
+
+    GGML_API uint32_t gguf_get_version    (const struct gguf_context * ctx);
+    GGML_API size_t   gguf_get_alignment  (const struct gguf_context * ctx);
+    GGML_API size_t   gguf_get_data_offset(const struct gguf_context * ctx);
+
+    GGML_API int64_t      gguf_get_n_kv(const struct gguf_context * ctx);
+    GGML_API int64_t      gguf_find_key(const struct gguf_context * ctx, const char * key); // returns -1 if key is not found
+    GGML_API const char * gguf_get_key (const struct gguf_context * ctx, int64_t key_id);
+
+    GGML_API enum gguf_type gguf_get_kv_type (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id);
+
+    // will abort if the wrong type is used for the key
+    GGML_API uint8_t      gguf_get_val_u8  (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int8_t       gguf_get_val_i8  (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint16_t     gguf_get_val_u16 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int16_t      gguf_get_val_i16 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint32_t     gguf_get_val_u32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int32_t      gguf_get_val_i32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API float        gguf_get_val_f32 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API uint64_t     gguf_get_val_u64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API int64_t      gguf_get_val_i64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API double       gguf_get_val_f64 (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API bool         gguf_get_val_bool(const struct gguf_context * ctx, int64_t key_id);
+    GGML_API const char * gguf_get_val_str (const struct gguf_context * ctx, int64_t key_id);
+    GGML_API const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id);
+    GGML_API size_t       gguf_get_arr_n   (const struct gguf_context * ctx, int64_t key_id);
+
+    // get raw pointer to the first element of the array with the given key_id
+    // for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference)
+    GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id);
+
+    // get ith C string from array with given key_id
+    GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i);
+
+    GGML_API int64_t        gguf_get_n_tensors    (const struct gguf_context * ctx);
+    GGML_API int64_t        gguf_find_tensor      (const struct gguf_context * ctx, const char * name); // returns -1 if the tensor is not found
+    GGML_API size_t         gguf_get_tensor_offset(const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API const char *   gguf_get_tensor_name  (const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API enum ggml_type gguf_get_tensor_type  (const struct gguf_context * ctx, int64_t tensor_id);
+    GGML_API size_t         gguf_get_tensor_size  (const struct gguf_context * ctx, int64_t tensor_id);
+
+    // removes key if it exists, returns id that the key had prior to removal (-1 if it didn't exist)
+    GGML_API int64_t gguf_remove_key(struct gguf_context * ctx, const char * key);
+
+    // overrides an existing KV pair or adds a new one, the new KV pair is always at the back
+    GGML_API void gguf_set_val_u8  (struct gguf_context * ctx, const char * key, uint8_t      val);
+    GGML_API void gguf_set_val_i8  (struct gguf_context * ctx, const char * key, int8_t       val);
+    GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t     val);
+    GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t      val);
+    GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t     val);
+    GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t      val);
+    GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float        val);
+    GGML_API void gguf_set_val_u64 (struct gguf_context * ctx, const char * key, uint64_t     val);
+    GGML_API void gguf_set_val_i64 (struct gguf_context * ctx, const char * key, int64_t      val);
+    GGML_API void gguf_set_val_f64 (struct gguf_context * ctx, const char * key, double       val);
+    GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool         val);
+    GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
+
+    // creates a new array with n elements of the given type and copies the corresponding number of bytes from data
+    GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, size_t n);
+
+    // creates a new array with n strings and copies the corresponding strings from data
+    GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, size_t n);
+
+    // set or add KV pairs from another context
+    GGML_API void gguf_set_kv(struct gguf_context * ctx, const struct gguf_context * src);
+
+    // add tensor to GGUF context, tensor name must be unique
+    GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
+
+    // after changing a tensor's type, the offsets of all tensors with higher indices are immediately recalculated
+    //   in such a way that the tensor data remains as one contiguous block (except for padding)
+    GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
+
+    // assumes that at least gguf_get_tensor_size bytes can be read from data
+    GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data);
+
+    // writing gguf files can be done in 3 ways:
+    //
+    // - write the entire gguf_context to a binary file in a single pass:
+    //
+    //   gguf_write_to_file(ctx, fname, /*only_meta =*/ false);
+    //
+    // - write only the meta data to a file, then re-open the file and append the tensor data:
+    //
+    //   gguf_write_to_file(ctx, fname, /*only_meta =*/ true);
+    //   FILE * f = fopen(fname, "ab");
+    //   fwrite(f, ...); // write tensor data
+    //   fclose(f);
+    //
+    // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
+    //
+    //   FILE * f = fopen(fname, "wb");
+    //   const size_t size_meta = gguf_get_meta_size(ctx);
+    //   fseek(f, size_meta, SEEK_SET);
+    //   fwrite(f, ...); // write tensor data
+    //   void * data = malloc(size_meta);
+    //   gguf_get_meta_data(ctx, data);
+    //   rewind(f);
+    //   fwrite(data, 1, data, f);
+    //   free(data);
+    //   fclose(f);
+    //
+
+    // write the entire context to a binary file
+    GGML_API bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta);
+
+    // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
+    GGML_API size_t gguf_get_meta_size(const struct gguf_context * ctx);
+
+    // writes the meta data to pointer "data"
+    GGML_API void   gguf_get_meta_data(const struct gguf_context * ctx, void * data);
+
+#ifdef  __cplusplus
+}
+#endif
diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt
index 84101c32c..ae1cd2337 100644
--- a/ggml/src/CMakeLists.txt
+++ b/ggml/src/CMakeLists.txt
@@ -208,6 +208,7 @@ add_library(ggml-base
             ../include/ggml-backend.h
             ../include/ggml-cpp.h
             ../include/ggml-opt.h
+            ../include/gguf.h
             ggml.c
             ggml-alloc.c
             ggml-backend.cpp
@@ -215,7 +216,8 @@ add_library(ggml-base
             ggml-threading.cpp
             ggml-threading.h
             ggml-quants.c
-            ggml-quants.h)
+            ggml-quants.h
+            gguf.cpp)
 
 target_include_directories(ggml-base PRIVATE .)
 
diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h
index 549772c57..eab017889 100644
--- a/ggml/src/ggml-impl.h
+++ b/ggml/src/ggml-impl.h
@@ -3,6 +3,8 @@
 // GGML internal header
 
 #include "ggml.h"
+#include "gguf.h"
+
 #include 
 #include 
 #include  // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
@@ -551,22 +553,15 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
 #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
 #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
 
-// expose GGUF internals for test code
-
-GGML_API size_t gguf_type_size(enum gguf_type type);
-
-GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
-
-struct gguf_buf {
-    void * data;
-    size_t size;
-    size_t offset;
-};
-GGML_API struct gguf_buf gguf_buf_init(size_t size);
-GGML_API void gguf_buf_free(struct gguf_buf buf);
-
-GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta);
-
 #ifdef __cplusplus
 }
 #endif
+
+#ifdef __cplusplus
+#include 
+
+// expose GGUF internals for test code
+GGML_API size_t gguf_type_size(enum gguf_type type);
+GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
+GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, std::vector & buf, bool only_meta);
+#endif // __cplusplus
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 2bbe5f482..90abc6ad4 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1588,15 +1588,8 @@ static struct ggml_tensor * ggml_new_tensor_impl(
 
     struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs);
 
-#ifdef __clang__
-    // temporary until ggml_tensor::backend is removed
-    #pragma clang diagnostic push
-    #pragma clang diagnostic ignored "-Wdeprecated-declarations"
-#endif
-
     *result = (struct ggml_tensor) {
         /*.type         =*/ type,
-        /*.backend      =*/ GGML_BACKEND_TYPE_CPU,
         /*.buffer       =*/ NULL,
         /*.ne           =*/ { 1, 1, 1, 1 },
         /*.nb           =*/ { 0, 0, 0, 0 },
@@ -1612,10 +1605,6 @@ static struct ggml_tensor * ggml_new_tensor_impl(
         /*.padding      =*/ { 0 },
     };
 
-#ifdef __clang__
-    #pragma clang diagnostic pop
-#endif
-
     // TODO: this should not be needed as long as we don't rely on aligned SIMD loads
     //GGML_ASSERT_ALIGNED(result->data);
 
@@ -6417,1271 +6406,6 @@ size_t ggml_quantize_chunk(
 
 ////////////////////////////////////////////////////////////////////////////////
 
-struct gguf_str {
-    uint64_t n;  // GGUFv2
-    char * data;
-};
-
-static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = {
-    [GGUF_TYPE_UINT8]   = sizeof(uint8_t),
-    [GGUF_TYPE_INT8]    = sizeof(int8_t),
-    [GGUF_TYPE_UINT16]  = sizeof(uint16_t),
-    [GGUF_TYPE_INT16]   = sizeof(int16_t),
-    [GGUF_TYPE_UINT32]  = sizeof(uint32_t),
-    [GGUF_TYPE_INT32]   = sizeof(int32_t),
-    [GGUF_TYPE_FLOAT32] = sizeof(float),
-    [GGUF_TYPE_BOOL]    = sizeof(bool),
-    [GGUF_TYPE_STRING]  = sizeof(struct gguf_str),
-    [GGUF_TYPE_UINT64]  = sizeof(uint64_t),
-    [GGUF_TYPE_INT64]   = sizeof(int64_t),
-    [GGUF_TYPE_FLOAT64] = sizeof(double),
-    [GGUF_TYPE_ARRAY]   = 0, // undefined
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
-
-static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = {
-    [GGUF_TYPE_UINT8]   = "u8",
-    [GGUF_TYPE_INT8]    = "i8",
-    [GGUF_TYPE_UINT16]  = "u16",
-    [GGUF_TYPE_INT16]   = "i16",
-    [GGUF_TYPE_UINT32]  = "u32",
-    [GGUF_TYPE_INT32]   = "i32",
-    [GGUF_TYPE_FLOAT32] = "f32",
-    [GGUF_TYPE_BOOL]    = "bool",
-    [GGUF_TYPE_STRING]  = "str",
-    [GGUF_TYPE_ARRAY]   = "arr",
-    [GGUF_TYPE_UINT64]  = "u64",
-    [GGUF_TYPE_INT64]   = "i64",
-    [GGUF_TYPE_FLOAT64] = "f64",
-};
-static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
-
-union gguf_value {
-    uint8_t  uint8;
-    int8_t   int8;
-    uint16_t uint16;
-    int16_t  int16;
-    uint32_t uint32;
-    int32_t  int32;
-    float    float32;
-    uint64_t uint64;
-    int64_t  int64;
-    double   float64;
-    bool     bool_;
-
-    struct gguf_str str;
-
-    struct {
-        enum gguf_type type;
-
-        uint64_t n;  // GGUFv2
-        void * data;
-    } arr;
-};
-
-struct gguf_kv {
-    struct gguf_str key;
-
-    enum  gguf_type  type;
-    union gguf_value value;
-};
-
-struct gguf_header {
-    char magic[4];
-
-    uint32_t version;
-    uint64_t n_tensors; // GGUFv2
-    uint64_t n_kv;      // GGUFv2
-};
-
-struct gguf_tensor_info {
-    struct gguf_str name;
-
-    uint32_t n_dims;
-    uint64_t ne[GGML_MAX_DIMS];
-
-    enum ggml_type type;
-
-    uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT`
-
-    // for writing API
-    const void * data;
-    size_t size;
-};
-
-struct gguf_context {
-    struct gguf_header header;
-
-    struct gguf_kv          * kv;
-    struct gguf_tensor_info * infos;
-
-    size_t alignment;
-    size_t offset;    // offset of `data` from beginning of file
-    size_t size;      // size of `data` in bytes
-
-    //uint8_t * padding;
-    void * data;
-};
-
-size_t gguf_type_size(enum gguf_type type) {
-    GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT);
-    return GGUF_TYPE_SIZE[type];
-}
-
-static bool gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
-    if (info->n_dims > GGML_MAX_DIMS) {
-        fprintf(stderr, "%s: invalid number of dimensions (%" PRIu32 ")\n", __func__, info->n_dims);
-        return false;
-    }
-
-    if (info->type < 0 || info->type >= GGML_TYPE_COUNT) {
-        fprintf(stderr, "%s: invalid type (%d)\n", __func__, info->type);
-        return false;
-    }
-
-    if (strlen(info->name.data) >= GGML_MAX_NAME) {
-        fprintf(stderr, "%s: tensor '%s' name is too long\n", __func__, info->name.data);
-        return false;
-    }
-
-    for (uint32_t i = 0; i < info->n_dims; ++i) {
-        if (info->ne[i] <= 0) {
-            fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[i]);
-            return false;
-        }
-    }
-
-    // prevent overflow for total number of elements
-    if (INT64_MAX/info->ne[1] <= info->ne[0]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[1]);
-        return false;
-    }
-
-    if (INT64_MAX/info->ne[2] <= info->ne[0]*info->ne[1]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[2]);
-        return false;
-    }
-
-    if (INT64_MAX/info->ne[3] <= info->ne[0]*info->ne[1]*info->ne[2]) {
-        fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[3]);
-        return false;
-    }
-
-    return true;
-}
-
-static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
-    const size_t n = fread(dst, 1, size, file);
-    *offset += n;
-    return n == size;
-}
-
-static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) {
-    p->n    = 0;
-    p->data = NULL;
-
-    bool ok = true;
-
-    ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset);
-
-    // early exit if string length is invalid, prevents from integer overflow
-    if (p->n == SIZE_MAX) {
-        fprintf(stderr, "%s: invalid string length (%" PRIu64 ")\n", __func__, p->n);
-        return false;
-    }
-
-    p->data = calloc(p->n + 1, 1);
-    if (!p->data) {
-        fprintf(stderr, "%s: failed to allocate memory for string of length %" PRIu64 "\n", __func__, p->n);
-        return false;
-    }
-
-    ok = ok && gguf_fread_el(file,  p->data, p->n, offset);
-
-    return ok;
-}
-
-static void gguf_free_kv(struct gguf_kv * kv) {
-    if (kv->key.data) {
-        GGML_FREE(kv->key.data);
-    }
-
-    if (kv->type == GGUF_TYPE_STRING) {
-        if (kv->value.str.data) {
-            GGML_FREE(kv->value.str.data);
-        }
-    }
-
-    if (kv->type == GGUF_TYPE_ARRAY) {
-        if (kv->value.arr.data) {
-            if (kv->value.arr.type == GGUF_TYPE_STRING) {
-                for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
-                    struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j];
-                    if (str->data) {
-                        GGML_FREE(str->data);
-                    }
-                }
-            }
-            GGML_FREE(kv->value.arr.data);
-        }
-    }
-}
-
-struct gguf_context * gguf_init_empty(void) {
-    struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context));
-    if (!ctx) {
-        fprintf(stderr, "%s: failed to allocate memory for context\n", __func__);
-        return NULL;
-    }
-
-    memcpy(ctx->header.magic, GGUF_MAGIC, sizeof(ctx->header.magic));
-    ctx->header.version   = GGUF_VERSION;
-    ctx->header.n_tensors = 0;
-    ctx->header.n_kv      = 0;
-
-    ctx->kv    = NULL;
-    ctx->infos = NULL;
-
-    ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
-    ctx->offset    = 0;
-    ctx->size      = 0;
-
-    ctx->data = NULL;
-
-    return ctx;
-}
-
-struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) {
-    // offset from start of file
-    size_t offset = 0;
-
-    char magic[4];
-
-    // check the magic before making allocations
-    {
-        gguf_fread_el(file, &magic, sizeof(magic), &offset);
-
-        for (uint32_t i = 0; i < sizeof(magic); i++) {
-            if (magic[i] != GGUF_MAGIC[i]) {
-                fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
-                return NULL;
-            }
-        }
-    }
-
-    bool ok = true;
-
-    struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context));
-    if (!ctx) {
-        fprintf(stderr, "%s: failed to allocate memory for context\n", __func__);
-        return NULL;
-    }
-
-    // read the header
-    {
-        strncpy(ctx->header.magic, magic, 4);
-
-        ctx->kv    = NULL;
-        ctx->infos = NULL;
-        ctx->data  = NULL;
-
-        ok = ok && gguf_fread_el(file, &ctx->header.version,   sizeof(ctx->header.version),   &offset);
-        ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset);
-        ok = ok && gguf_fread_el(file, &ctx->header.n_kv,      sizeof(ctx->header.n_kv),      &offset);
-
-        if (ctx->header.version == 1) {
-            fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        // sanity-checks to prevent from integer/buffer overflows
-
-        ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/sizeof(struct gguf_tensor_info));
-        ok = ok && (ctx->header.n_tensors < (SIZE_MAX/2)/ggml_tensor_overhead());
-        ok = ok && (ctx->header.n_kv      < (SIZE_MAX/2)/sizeof(struct gguf_kv));
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read header\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-    }
-
-    // read the kv pairs
-    {
-        const uint64_t n_kv = ctx->header.n_kv;
-
-        if (n_kv > 0) {
-            ctx->kv = calloc(n_kv, sizeof(struct gguf_kv));
-            if (!ctx->kv) {
-                fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__);
-                gguf_free(ctx);
-                return NULL;
-            }
-        }
-
-        for (uint64_t i = 0; i < n_kv; ++i) {
-            struct gguf_kv * kv = &ctx->kv[i];
-
-            //fprintf(stderr, "%s: reading kv %d\n", __func__, i);
-
-            ok = ok && gguf_fread_str(file, &kv->key,                    &offset);
-            ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset);
-
-            //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data);
-
-            switch (kv->type) {
-                case GGUF_TYPE_UINT8:   ok = ok && gguf_fread_el (file, &kv->value.uint8,   sizeof(kv->value.uint8),   &offset); break;
-                case GGUF_TYPE_INT8:    ok = ok && gguf_fread_el (file, &kv->value.int8,    sizeof(kv->value.int8),    &offset); break;
-                case GGUF_TYPE_UINT16:  ok = ok && gguf_fread_el (file, &kv->value.uint16,  sizeof(kv->value.uint16),  &offset); break;
-                case GGUF_TYPE_INT16:   ok = ok && gguf_fread_el (file, &kv->value.int16,   sizeof(kv->value.int16),   &offset); break;
-                case GGUF_TYPE_UINT32:  ok = ok && gguf_fread_el (file, &kv->value.uint32,  sizeof(kv->value.uint32),  &offset); break;
-                case GGUF_TYPE_INT32:   ok = ok && gguf_fread_el (file, &kv->value.int32,   sizeof(kv->value.int32),   &offset); break;
-                case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break;
-                case GGUF_TYPE_UINT64:  ok = ok && gguf_fread_el (file, &kv->value.uint64,  sizeof(kv->value.uint64),  &offset); break;
-                case GGUF_TYPE_INT64:   ok = ok && gguf_fread_el (file, &kv->value.int64,   sizeof(kv->value.int64),   &offset); break;
-                case GGUF_TYPE_FLOAT64: ok = ok && gguf_fread_el (file, &kv->value.float64, sizeof(kv->value.float64), &offset); break;
-                case GGUF_TYPE_BOOL:    ok = ok && gguf_fread_el (file, &kv->value.bool_,   sizeof(kv->value.bool_),   &offset); break;
-                case GGUF_TYPE_STRING:  ok = ok && gguf_fread_str(file, &kv->value.str,                                &offset); break;
-                case GGUF_TYPE_ARRAY:
-                    {
-                        ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset);
-                        ok = ok && gguf_fread_el(file, &kv->value.arr.n,    sizeof(kv->value.arr.n),    &offset);
-
-                        switch (kv->value.arr.type) {
-                            case GGUF_TYPE_UINT8:
-                            case GGUF_TYPE_INT8:
-                            case GGUF_TYPE_UINT16:
-                            case GGUF_TYPE_INT16:
-                            case GGUF_TYPE_UINT32:
-                            case GGUF_TYPE_INT32:
-                            case GGUF_TYPE_FLOAT32:
-                            case GGUF_TYPE_UINT64:
-                            case GGUF_TYPE_INT64:
-                            case GGUF_TYPE_FLOAT64:
-                            case GGUF_TYPE_BOOL:
-                                {
-                                    // prevent from integer overflow in the malloc below
-                                    if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) {
-                                        fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    kv->value.arr.data = calloc(kv->value.arr.n, gguf_type_size(kv->value.arr.type));
-                                    if (!kv->value.arr.data) {
-                                        fprintf(stderr, "%s: failed to allocate memory for array\n", __func__);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type), &offset);
-                                } break;
-                            case GGUF_TYPE_STRING:
-                                {
-                                    // prevent from integer overflow in the malloc below
-                                    if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) {
-                                        fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    kv->value.arr.data = calloc(kv->value.arr.n, sizeof(struct gguf_str));
-                                    if (!kv->value.arr.data) {
-                                        fprintf(stderr, "%s: failed to allocate memory for array\n", __func__);
-                                        gguf_free(ctx);
-                                        return NULL;
-                                    }
-
-                                    for (uint64_t j = 0; j < kv->value.arr.n; ++j) {
-                                        ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset);
-                                    }
-                                } break;
-                            case GGUF_TYPE_ARRAY:
-                            default:
-                                {
-                                    fprintf(stderr, "%s: invalid array type %d\n", __func__, kv->value.arr.type);
-                                    ok = false;
-                                } break;
-                        }
-                    } break;
-                default:
-                    {
-                        fprintf(stderr, "%s: invalid type %d\n", __func__, kv->type);
-                        ok = false;
-                    } break;
-            }
-
-            if (!ok) {
-                break;
-            }
-        }
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-    }
-
-    // read the tensor infos
-    if (ctx->header.n_tensors > 0) {
-        ctx->infos = calloc(ctx->header.n_tensors, sizeof(struct gguf_tensor_info));
-        if (!ctx->infos) {
-            fprintf(stderr, "%s: failed to allocate memory for tensor infos\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            for (int j = 0; j < GGML_MAX_DIMS; ++j) {
-                info->ne[j] = 1;
-            }
-
-            ok = ok && gguf_fread_str(file, &info->name,                          &offset);
-            ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims),  &offset);
-
-            ok = ok && (info->n_dims <= GGML_MAX_DIMS);
-
-            for (uint32_t j = 0; j < info->n_dims; ++j) {
-                ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset);
-            }
-
-            ok = ok && gguf_fread_el (file, &info->type,   sizeof(info->type),    &offset);
-            ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset),  &offset);
-
-            ok = ok && gguf_tensor_info_sanitize(info);
-
-            // make sure there is no duplicated tensor names
-            for (uint64_t j = 0; j < i && ok; ++j) {
-                if (strcmp(info->name.data, ctx->infos[j].name.data) == 0) {
-                    fprintf(stderr, "%s: duplicated tensor name %s\n", __func__, info->name.data);
-                    ok = false;
-                }
-            }
-
-            if (!ok) {
-                fprintf(stderr, "%s: failed to read tensor info\n", __func__);
-                gguf_free(ctx);
-                return NULL;
-            }
-        }
-    }
-
-    ctx->alignment = GGUF_DEFAULT_ALIGNMENT;
-
-    int alignment_idx = gguf_find_key(ctx, "general.alignment");
-    if (alignment_idx != -1) {
-        ctx->alignment = gguf_get_val_u32(ctx, alignment_idx);
-    }
-
-    // we require the data section to be aligned, so take into account any padding
-    {
-        const size_t offset_pad = offset % ctx->alignment;
-
-        if (offset_pad != 0) {
-            offset += ctx->alignment - offset_pad;
-            fseek(file, offset, SEEK_SET);
-        }
-    }
-
-    // store the current file offset - this is where the data section starts
-    ctx->offset = offset;
-
-    // compute the total size of the data section, taking into account the alignment
-    {
-        ctx->size = 0;
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            const int64_t ne =
-                (int64_t) info->ne[0] *
-                (int64_t) info->ne[1] *
-                (int64_t) info->ne[2] *
-                (int64_t) info->ne[3];
-
-            if (ggml_blck_size(info->type) == 0 ) {
-                // this tensor type support have been removed:
-                fprintf(stderr, "%s: tensor '%s' of type %d: %s\n",
-                        __func__, info->name.data, (int) info->type, ggml_type_name(info->type));
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            if (ne % ggml_blck_size(info->type) != 0) {
-                fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n",
-                        __func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            const size_t size_cur = ggml_row_size(info->type, ne);
-
-            ctx->size += GGML_PAD(size_cur, ctx->alignment);
-        }
-    }
-
-    // load the tensor data only if requested
-    if (params.ctx != NULL) {
-        // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
-        // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
-        // the ggml_tensor structs to the appropriate locations in the binary blob
-
-        // compute the exact size needed for the new ggml_context
-        const size_t mem_size =
-            params.no_alloc ?
-            (ctx->header.n_tensors    )*ggml_tensor_overhead() :
-            (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
-
-        struct ggml_init_params pdata = {
-            .mem_size   = mem_size,
-            .mem_buffer = NULL,
-            .no_alloc   = params.no_alloc,
-        };
-
-        *params.ctx = ggml_init(pdata);
-        if (*params.ctx == NULL) {
-            fprintf(stderr, "%s: failed to initialize context\n", __func__);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        struct ggml_context * ctx_data = *params.ctx;
-
-        struct ggml_tensor * data = NULL;
-
-        if (!params.no_alloc) {
-            data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
-
-            ok = ok && data != NULL;
-
-            // read the binary blob with the tensor data
-            ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset);
-
-            if (!ok) {
-                fprintf(stderr, "%s: failed to read tensor data\n", __func__);
-                ggml_free(ctx_data);
-                gguf_free(ctx);
-                return NULL;
-            }
-
-            ctx->data = data->data;
-        }
-
-        ggml_set_no_alloc(ctx_data, true);
-
-        // create the tensors
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            const int64_t ne[GGML_MAX_DIMS] = {
-                ctx->infos[i].ne[0],
-                ctx->infos[i].ne[1],
-                ctx->infos[i].ne[2],
-                ctx->infos[i].ne[3],
-            };
-
-            struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne);
-
-            ok = ok && cur != NULL;
-
-            if (!ok) {
-                break;
-            }
-
-            ggml_set_name(cur, ctx->infos[i].name.data);
-
-            // point the data member to the appropriate location in the binary blob using the tensor infos
-            if (!params.no_alloc) {
-              //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file
-                cur->data = (char *) data->data + ctx->infos[i].offset;               // offset from data
-            }
-        }
-
-        if (!ok) {
-            fprintf(stderr, "%s: failed to read the tensor data\n", __func__);
-            ggml_free(ctx_data);
-            gguf_free(ctx);
-            return NULL;
-        }
-
-        ggml_set_no_alloc(ctx_data, params.no_alloc);
-    }
-
-    return ctx;
-}
-
-struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
-    FILE * file = ggml_fopen(fname, "rb");
-    if (!file) {
-        fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno));
-        return NULL;
-    }
-
-    struct gguf_context * result = gguf_init_from_file_impl(file, params);
-    fclose(file);
-    return result;
-}
-
-void gguf_free(struct gguf_context * ctx) {
-    if (ctx == NULL) {
-        return;
-    }
-
-    if (ctx->kv) {
-        // free string memory - not great..
-        for (uint64_t i = 0; i < ctx->header.n_kv; ++i) {
-            gguf_free_kv(&ctx->kv[i]);
-        }
-
-        GGML_FREE(ctx->kv);
-    }
-
-    if (ctx->infos) {
-        for (uint64_t i = 0; i < ctx->header.n_tensors; ++i) {
-            struct gguf_tensor_info * info = &ctx->infos[i];
-
-            if (info->name.data) {
-                GGML_FREE(info->name.data);
-            }
-        }
-
-        GGML_FREE(ctx->infos);
-    }
-
-    GGML_FREE(ctx);
-}
-
-const char * gguf_type_name(enum gguf_type type) {
-    return GGUF_TYPE_NAME[type];
-}
-
-int gguf_get_version(const struct gguf_context * ctx) {
-    return ctx->header.version;
-}
-
-size_t gguf_get_alignment(const struct gguf_context * ctx) {
-    return ctx->alignment;
-}
-
-size_t gguf_get_data_offset(const struct gguf_context * ctx) {
-    return ctx->offset;
-}
-
-void * gguf_get_data(const struct gguf_context * ctx) {
-    return ctx->data;
-}
-
-int gguf_get_n_kv(const struct gguf_context * ctx) {
-    return ctx->header.n_kv;
-}
-
-int gguf_find_key(const struct gguf_context * ctx, const char * key) {
-    // return -1 if key not found
-    int keyfound = -1;
-
-    const int n_kv = gguf_get_n_kv(ctx);
-
-    for (int i = 0; i < n_kv; ++i) {
-        if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
-            keyfound = i;
-            break;
-        }
-    }
-
-    return keyfound;
-}
-
-const char * gguf_get_key(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    return ctx->kv[key_id].key.data;
-}
-
-enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    return ctx->kv[key_id].type;
-}
-
-enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.type;
-}
-
-const void * gguf_get_arr_data(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.data;
-}
-
-const char * gguf_get_arr_str(const struct gguf_context * ctx, int key_id, int i) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    struct gguf_kv * kv = &ctx->kv[key_id];
-    struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i];
-    return str->data;
-}
-
-int gguf_get_arr_n(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_ARRAY);
-    return ctx->kv[key_id].value.arr.n;
-}
-
-uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT8);
-    return ctx->kv[key_id].value.uint8;
-}
-
-int8_t gguf_get_val_i8(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT8);
-    return ctx->kv[key_id].value.int8;
-}
-
-uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT16);
-    return ctx->kv[key_id].value.uint16;
-}
-
-int16_t gguf_get_val_i16(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT16);
-    return ctx->kv[key_id].value.int16;
-}
-
-uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT32);
-    return ctx->kv[key_id].value.uint32;
-}
-
-int32_t gguf_get_val_i32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT32);
-    return ctx->kv[key_id].value.int32;
-}
-
-float gguf_get_val_f32(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT32);
-    return ctx->kv[key_id].value.float32;
-}
-
-uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_UINT64);
-    return ctx->kv[key_id].value.uint64;
-}
-
-int64_t gguf_get_val_i64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_INT64);
-    return ctx->kv[key_id].value.int64;
-}
-
-double gguf_get_val_f64(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_FLOAT64);
-    return ctx->kv[key_id].value.float64;
-}
-
-bool gguf_get_val_bool(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_BOOL);
-    return ctx->kv[key_id].value.bool_;
-}
-
-const char * gguf_get_val_str(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type == GGUF_TYPE_STRING);
-    return ctx->kv[key_id].value.str.data;
-}
-
-const void * gguf_get_val_data(const struct gguf_context * ctx, int key_id) {
-    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
-    GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_ARRAY);
-    GGML_ASSERT(ctx->kv[key_id].type != GGUF_TYPE_STRING);
-    return &ctx->kv[key_id].value;
-}
-
-int gguf_get_n_tensors(const struct gguf_context * ctx) {
-    return ctx->header.n_tensors;
-}
-
-int gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
-    // return -1 if tensor not found
-    int tensorfound = -1;
-
-    const int n_tensors = gguf_get_n_tensors(ctx);
-
-    for (int i = 0; i < n_tensors; ++i) {
-        if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
-            tensorfound = i;
-            break;
-        }
-    }
-
-    return tensorfound;
-}
-
-size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].offset;
-}
-
-char * gguf_get_tensor_name(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].name.data;
-}
-
-enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int i) {
-    return ctx->infos[i].type;
-}
-
-// returns the index
-static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) {
-    const int idx = gguf_find_key(ctx, key);
-    if (idx >= 0) {
-        return idx;
-    }
-
-    const int n_kv = gguf_get_n_kv(ctx);
-
-    ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv));
-    ctx->kv[n_kv].key.n    = strlen(key);
-    ctx->kv[n_kv].key.data = strdup(key);
-    ctx->header.n_kv++;
-
-    return n_kv;
-}
-
-void gguf_remove_key(struct gguf_context * ctx, const char * key) {
-    const int idx = gguf_find_key(ctx, key);
-    if (idx >= 0) {
-        const int n_kv = gguf_get_n_kv(ctx);
-        gguf_free_kv(&ctx->kv[idx]);
-        for (int i = idx; i < n_kv-1; ++i) {
-            ctx->kv[i] = ctx->kv[i+1];
-        }
-        ctx->kv = realloc(ctx->kv, (n_kv - 1) * sizeof(struct gguf_kv));
-        ctx->header.n_kv--;
-    }
-}
-
-void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_UINT8;
-    ctx->kv[idx].value.uint8 = val;
-}
-
-void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type       = GGUF_TYPE_INT8;
-    ctx->kv[idx].value.int8 = val;
-}
-
-void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT16;
-    ctx->kv[idx].value.uint16 = val;
-}
-
-void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT16;
-    ctx->kv[idx].value.int16 = val;
-}
-
-void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT32;
-    ctx->kv[idx].value.uint32 = val;
-}
-
-void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT32;
-    ctx->kv[idx].value.int32 = val;
-}
-
-void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type          = GGUF_TYPE_FLOAT32;
-    ctx->kv[idx].value.float32 = val;
-}
-
-void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type         = GGUF_TYPE_UINT64;
-    ctx->kv[idx].value.uint64 = val;
-}
-
-void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_INT64;
-    ctx->kv[idx].value.int64 = val;
-}
-
-void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type          = GGUF_TYPE_FLOAT64;
-    ctx->kv[idx].value.float64 = val;
-}
-
-void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type        = GGUF_TYPE_BOOL;
-    ctx->kv[idx].value.bool_ = val;
-}
-
-void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_STRING;
-    ctx->kv[idx].value.str.n    = strlen(val);
-    ctx->kv[idx].value.str.data = strdup(val);
-}
-
-void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_ARRAY;
-    ctx->kv[idx].value.arr.type = type;
-    ctx->kv[idx].value.arr.n    = n;
-    ctx->kv[idx].value.arr.data = GGML_CALLOC(n, gguf_type_size(type));
-    memcpy(ctx->kv[idx].value.arr.data, data, n*gguf_type_size(type));
-}
-
-void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) {
-    const int idx = gguf_get_or_add_key(ctx, key);
-
-    ctx->kv[idx].type           = GGUF_TYPE_ARRAY;
-    ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING;
-    ctx->kv[idx].value.arr.n    = n;
-    ctx->kv[idx].value.arr.data = GGML_CALLOC(n, sizeof(struct gguf_str));
-    for (int i = 0; i < n; i++) {
-        struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i];
-        str->n    = strlen(data[i]);
-        str->data = strdup(data[i]);
-    }
-}
-
-// set or add KV pairs from another context
-void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
-    for (uint32_t i = 0; i < src->header.n_kv; i++) {
-        switch (src->kv[i].type) {
-            case GGUF_TYPE_UINT8:   gguf_set_val_u8  (ctx, src->kv[i].key.data, src->kv[i].value.uint8);    break;
-            case GGUF_TYPE_INT8:    gguf_set_val_i8  (ctx, src->kv[i].key.data, src->kv[i].value.int8);     break;
-            case GGUF_TYPE_UINT16:  gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16);   break;
-            case GGUF_TYPE_INT16:   gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16);    break;
-            case GGUF_TYPE_UINT32:  gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32);   break;
-            case GGUF_TYPE_INT32:   gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32);    break;
-            case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32);  break;
-            case GGUF_TYPE_UINT64:  gguf_set_val_u64 (ctx, src->kv[i].key.data, src->kv[i].value.uint64);   break;
-            case GGUF_TYPE_INT64:   gguf_set_val_i64 (ctx, src->kv[i].key.data, src->kv[i].value.int64);    break;
-            case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, src->kv[i].key.data, src->kv[i].value.float64);  break;
-            case GGUF_TYPE_BOOL:    gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_);    break;
-            case GGUF_TYPE_STRING:  gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break;
-            case GGUF_TYPE_ARRAY:
-                {
-                    if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) {
-                        const char ** data = GGML_CALLOC(src->kv[i].value.arr.n, sizeof(char *));
-                        for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) {
-                            data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data;
-                        }
-                        gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
-                        GGML_FREE((void *)data);
-                    } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
-                        GGML_ABORT("nested arrays not supported");
-                    } else {
-                        gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
-                    }
-                } break;
-            default: GGML_ABORT("invalid type");
-        }
-    }
-}
-
-void gguf_add_tensor(
-             struct gguf_context * ctx,
-        const struct ggml_tensor * tensor) {
-    GGML_ASSERT(tensor);
-    if (gguf_find_tensor(ctx, tensor->name) != -1) {
-        GGML_ABORT("duplicated tensor name");
-    }
-
-    const int idx = ctx->header.n_tensors;
-    ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info));
-
-    ctx->infos[idx].name.n    = strlen(tensor->name);
-    ctx->infos[idx].name.data = strdup(tensor->name);
-
-    for (int i = 0; i < GGML_MAX_DIMS; ++i) {
-        ctx->infos[idx].ne[i] = 1;
-    }
-
-    ctx->infos[idx].n_dims = ggml_n_dims(tensor);
-    for (uint32_t i = 0; i < ctx->infos[idx].n_dims; i++) {
-        ctx->infos[idx].ne[i] = tensor->ne[i];
-    }
-
-    ctx->infos[idx].type   = tensor->type;
-    ctx->infos[idx].offset = 0;
-    ctx->infos[idx].data   = tensor->data;
-    ctx->infos[idx].size   = ggml_nbytes(tensor);
-
-    if (ctx->header.n_tensors > 0) {
-        ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment);
-    }
-
-    ctx->header.n_tensors++;
-}
-
-void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
-    const int idx = gguf_find_tensor(ctx, name);
-    if (idx < 0) {
-        GGML_ABORT("tensor not found");
-    }
-
-    ctx->infos[idx].type = type;
-}
-
-void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) {
-    const int idx = gguf_find_tensor(ctx, name);
-    if (idx < 0) {
-        GGML_ABORT("tensor not found");
-    }
-
-    ctx->infos[idx].data = data;
-    ctx->infos[idx].size = size;
-
-    // update offsets
-    for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) {
-        ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment);
-    }
-}
-
-//static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) {
-//    fwrite(&val->n,   sizeof(val->n),    1, file);
-//    fwrite(val->data, sizeof(char), val->n, file);
-//}
-//
-//static void gguf_fwrite_el(FILE * file, const void * val, size_t size) {
-//    fwrite(val, sizeof(char), size, file);
-//}
-
-struct gguf_buf gguf_buf_init(size_t size) {
-    struct gguf_buf buf = {
-        /*buf.data   =*/ size == 0 ? NULL : GGML_CALLOC(1, size),
-        /*buf.size   =*/ size,
-        /*buf.offset =*/ 0,
-    };
-
-    return buf;
-}
-
-void gguf_buf_free(struct gguf_buf buf) {
-    if (buf.data) {
-        GGML_FREE(buf.data);
-    }
-}
-
-static void gguf_buf_grow(struct gguf_buf * buf, size_t size) {
-    if (buf->offset + size > buf->size) {
-        buf->size = 1.5*(buf->offset + size);
-        if (buf->data) {
-            buf->data = realloc(buf->data, buf->size);
-        }
-    }
-}
-
-static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) {
-    gguf_buf_grow(buf, sizeof(val->n) + val->n);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n));
-    }
-    buf->offset += sizeof(val->n);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, val->data, val->n);
-    }
-    buf->offset += val->n;
-}
-
-static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) {
-    gguf_buf_grow(buf, el_size);
-
-    if (buf->data) {
-        memcpy((char *) buf->data + buf->offset, val, el_size);
-    }
-    buf->offset += el_size;
-}
-
-void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) {
-    // write header
-    gguf_bwrite_el(buf, &ctx->header.magic,     sizeof(ctx->header.magic));
-    gguf_bwrite_el(buf, &ctx->header.version,   sizeof(ctx->header.version));
-    gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors));
-    gguf_bwrite_el(buf, &ctx->header.n_kv,      sizeof(ctx->header.n_kv));
-
-    // write key-value pairs
-    for (uint32_t i = 0; i < ctx->header.n_kv; ++i) {
-        struct gguf_kv * kv = &ctx->kv[i];
-
-        gguf_bwrite_str(buf, &kv->key);
-        gguf_bwrite_el (buf, &kv->type, sizeof(kv->type));
-
-        switch (kv->type) {
-            case GGUF_TYPE_UINT8:   gguf_bwrite_el( buf, &kv->value.uint8,   sizeof(kv->value.uint8)  ); break;
-            case GGUF_TYPE_INT8:    gguf_bwrite_el (buf, &kv->value.int8,    sizeof(kv->value.int8)   ); break;
-            case GGUF_TYPE_UINT16:  gguf_bwrite_el (buf, &kv->value.uint16,  sizeof(kv->value.uint16) ); break;
-            case GGUF_TYPE_INT16:   gguf_bwrite_el (buf, &kv->value.int16,   sizeof(kv->value.int16)  ); break;
-            case GGUF_TYPE_UINT32:  gguf_bwrite_el (buf, &kv->value.uint32,  sizeof(kv->value.uint32) ); break;
-            case GGUF_TYPE_INT32:   gguf_bwrite_el (buf, &kv->value.int32,   sizeof(kv->value.int32)  ); break;
-            case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break;
-            case GGUF_TYPE_UINT64:  gguf_bwrite_el (buf, &kv->value.uint64,  sizeof(kv->value.uint64) ); break;
-            case GGUF_TYPE_INT64:   gguf_bwrite_el (buf, &kv->value.int64,   sizeof(kv->value.int64)  ); break;
-            case GGUF_TYPE_FLOAT64: gguf_bwrite_el (buf, &kv->value.float64, sizeof(kv->value.float64)); break;
-            case GGUF_TYPE_BOOL:    gguf_bwrite_el (buf, &kv->value.bool_,   sizeof(kv->value.bool_)  ); break;
-            case GGUF_TYPE_STRING:  gguf_bwrite_str(buf, &kv->value.str                               ); break;
-            case GGUF_TYPE_ARRAY:
-                {
-                    gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type));
-                    gguf_bwrite_el(buf, &kv->value.arr.n,    sizeof(kv->value.arr.n)   );
-
-                    switch (kv->value.arr.type) {
-                        case GGUF_TYPE_UINT8:
-                        case GGUF_TYPE_INT8:
-                        case GGUF_TYPE_UINT16:
-                        case GGUF_TYPE_INT16:
-                        case GGUF_TYPE_UINT32:
-                        case GGUF_TYPE_INT32:
-                        case GGUF_TYPE_FLOAT32:
-                        case GGUF_TYPE_UINT64:
-                        case GGUF_TYPE_INT64:
-                        case GGUF_TYPE_FLOAT64:
-                        case GGUF_TYPE_BOOL:
-                            {
-                                gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * gguf_type_size(kv->value.arr.type));
-                            } break;
-                        case GGUF_TYPE_STRING:
-                            {
-                                for (uint32_t j = 0; j < kv->value.arr.n; ++j) {
-                                    gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]);
-                                }
-                            } break;
-                        case GGUF_TYPE_ARRAY:
-                        default: GGML_ABORT("invalid type");
-                    }
-                } break;
-            default: GGML_ABORT("invalid type");
-        }
-    }
-
-    // write tensor infos
-    for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
-        struct gguf_tensor_info * info = &ctx->infos[i];
-
-        gguf_bwrite_str(buf, &info->name);
-        gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims));
-        for (uint32_t j = 0; j < info->n_dims; ++j) {
-            gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j]));
-        }
-        gguf_bwrite_el(buf, &info->type,   sizeof(info->type));
-        gguf_bwrite_el(buf, &info->offset, sizeof(info->offset));
-    }
-
-    // we require the data section to be aligned, so take into account any padding
-    {
-        const size_t offset     = buf->offset;
-        const size_t offset_pad = GGML_PAD(offset, ctx->alignment);
-
-        if (offset_pad != offset) {
-            uint8_t pad = 0;
-            for (size_t i = 0; i < offset_pad - offset; ++i) {
-                gguf_bwrite_el(buf, &pad, sizeof(pad));
-            }
-        }
-    }
-
-    if (only_meta) {
-        return;
-    }
-
-    size_t offset = 0;
-
-    // write tensor data
-    for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) {
-        struct gguf_tensor_info * info = &ctx->infos[i];
-
-        const size_t size     = info->size;
-        const size_t size_pad = GGML_PAD(size, ctx->alignment);
-
-        gguf_bwrite_el(buf, info->data, size);
-
-        if (size_pad != size) {
-            uint8_t pad = 0;
-            for (size_t j = 0; j < size_pad - size; ++j) {
-                gguf_bwrite_el(buf, &pad, sizeof(pad));
-            }
-        }
-
-        GGML_ASSERT(offset == info->offset);
-
-        offset += size_pad;
-    }
-}
-
-void gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
-    FILE * file = ggml_fopen(fname, "wb");
-    if (!file) {
-        GGML_ABORT("failed to open file for writing");
-    }
-
-    struct gguf_buf buf = gguf_buf_init(16*1024);
-
-    gguf_write_to_buf(ctx, &buf, only_meta);
-
-    fwrite(buf.data, 1, buf.offset, file);
-
-    gguf_buf_free(buf);
-
-    fclose(file);
-}
-
-size_t gguf_get_meta_size(const struct gguf_context * ctx) {
-    // no allocs - only compute size
-    struct gguf_buf buf = gguf_buf_init(0);
-
-    gguf_write_to_buf(ctx, &buf, true);
-
-    return buf.offset;
-}
-
-void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
-    struct gguf_buf buf = gguf_buf_init(16*1024);
-
-    gguf_write_to_buf(ctx, &buf, true);
-
-    memcpy(data, buf.data, buf.offset);
-
-    gguf_buf_free(buf);
-}
-
 void ggml_log_set(ggml_log_callback log_callback, void * user_data) {
     g_logger_state.log_callback = log_callback ? log_callback : ggml_log_callback_default;
     g_logger_state.log_callback_user_data = user_data;
diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp
new file mode 100644
index 000000000..655ed600a
--- /dev/null
+++ b/ggml/src/gguf.cpp
@@ -0,0 +1,1325 @@
+#include "ggml.h"
+#include "ggml-backend.h"
+#include "ggml-impl.h"
+#include "gguf.h"
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+template 
+struct type_to_gguf_type;
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT8;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT8;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT16;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT16;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_FLOAT32;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_BOOL;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_STRING;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_UINT64;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_INT64;
+};
+
+template <>
+struct type_to_gguf_type {
+    static constexpr enum gguf_type value = GGUF_TYPE_FLOAT64;
+};
+
+static const std::map GGUF_TYPE_SIZE = {
+    {GGUF_TYPE_UINT8,   sizeof(uint8_t)},
+    {GGUF_TYPE_INT8,    sizeof(int8_t)},
+    {GGUF_TYPE_UINT16,  sizeof(uint16_t)},
+    {GGUF_TYPE_INT16,   sizeof(int16_t)},
+    {GGUF_TYPE_UINT32,  sizeof(uint32_t)},
+    {GGUF_TYPE_INT32,   sizeof(int32_t)},
+    {GGUF_TYPE_FLOAT32, sizeof(float)},
+    {GGUF_TYPE_BOOL,    sizeof(int8_t)},
+    {GGUF_TYPE_STRING,  0}, // undefined
+    {GGUF_TYPE_ARRAY,   0}, // undefined
+    {GGUF_TYPE_UINT64,  sizeof(uint64_t)},
+    {GGUF_TYPE_INT64,   sizeof(int64_t)},
+    {GGUF_TYPE_FLOAT64, sizeof(double)},
+};
+static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+
+static const std::map GGUF_TYPE_NAME = {
+    {GGUF_TYPE_UINT8,   "u8"},
+    {GGUF_TYPE_INT8,    "i8"},
+    {GGUF_TYPE_UINT16,  "u16"},
+    {GGUF_TYPE_INT16,   "i16"},
+    {GGUF_TYPE_UINT32,  "u32"},
+    {GGUF_TYPE_INT32,   "i32"},
+    {GGUF_TYPE_FLOAT32, "f32"},
+    {GGUF_TYPE_BOOL,    "bool"},
+    {GGUF_TYPE_STRING,  "str"},
+    {GGUF_TYPE_ARRAY,   "arr"},
+    {GGUF_TYPE_UINT64,  "u64"},
+    {GGUF_TYPE_INT64,   "i64"},
+    {GGUF_TYPE_FLOAT64, "f64"},
+};
+static_assert(GGUF_TYPE_COUNT == 13, "GGUF_TYPE_COUNT != 13");
+
+size_t gguf_type_size(enum gguf_type type) {
+    auto it = GGUF_TYPE_SIZE.find(type);
+    return it == GGUF_TYPE_SIZE.end() ? 0 : it->second;
+}
+
+struct gguf_kv {
+    std::string key;
+
+    bool is_array;
+    enum gguf_type type;
+
+    std::vector      data;
+    std::vector data_string;
+
+    template 
+    gguf_kv(const std::string & key, const T value)
+            : key(key), is_array(false), type(type_to_gguf_type::value) {
+        GGML_ASSERT(!key.empty());
+        data.resize(sizeof(T));
+        memcpy(data.data(), &value, sizeof(T));
+    }
+
+    template 
+    gguf_kv(const std::string & key, const std::vector & value)
+            : key(key), is_array(true), type(type_to_gguf_type::value) {
+        GGML_ASSERT(!key.empty());
+        data.resize(value.size()*sizeof(T));
+        for (size_t i = 0; i < value.size(); ++i) {
+            const T tmp = value[i];
+            memcpy(data.data() + i*sizeof(T), &tmp, sizeof(T));
+        }
+    }
+
+    gguf_kv(const std::string & key, const std::string & value)
+            : key(key), is_array(false), type(GGUF_TYPE_STRING) {
+        GGML_ASSERT(!key.empty());
+        data_string.push_back(value);
+    }
+
+    gguf_kv(const std::string & key, const std::vector & value)
+            : key(key), is_array(true), type(GGUF_TYPE_STRING) {
+        GGML_ASSERT(!key.empty());
+        data_string = value;
+    }
+
+    const std::string & get_key() const {
+        return key;
+    }
+
+    const enum gguf_type & get_type() const {
+        return type;
+    }
+
+    size_t get_ne() const {
+        if (type == GGUF_TYPE_STRING) {
+            const size_t ne = data_string.size();
+            GGML_ASSERT(is_array || ne == 1);
+            return ne;
+        }
+        const size_t type_size = gguf_type_size(type);
+        GGML_ASSERT(data.size() % type_size == 0);
+        const size_t ne = data.size() / type_size;
+        GGML_ASSERT(is_array || ne == 1);
+        return ne;
+    }
+
+    template 
+    const T & get_val(const size_t i = 0) const {
+        GGML_ASSERT(type_to_gguf_type::value == type);
+        if constexpr (std::is_same::value) {
+            GGML_ASSERT(data_string.size() >= i+1);
+            return data_string[i];
+        }
+        const size_t type_size = gguf_type_size(type);
+        GGML_ASSERT(data.size() % type_size == 0);
+        GGML_ASSERT(data.size() >= (i+1)*type_size);
+        return reinterpret_cast(data.data())[i];
+    }
+
+    void cast(const enum gguf_type new_type) {
+        const size_t new_type_size = gguf_type_size(new_type);
+        GGML_ASSERT(data.size() % new_type_size == 0);
+        type = new_type;
+    }
+};
+
+struct gguf_tensor_info {
+    struct ggml_tensor t; // for holding the equivalent info
+    uint64_t offset;      // offset from start of `data`, must be a multiple of `ALIGNMENT`
+};
+
+struct gguf_context {
+    uint32_t version = GGUF_VERSION;
+
+    std::vector kv;
+    std::vector info;
+
+    size_t alignment = GGUF_DEFAULT_ALIGNMENT;
+    size_t offset    = 0; // offset of `data` from beginning of file
+    size_t size      = 0; // size of `data` in bytes
+
+    void * data = nullptr;
+};
+
+struct gguf_reader {
+    FILE * file;
+
+    gguf_reader(FILE * file) : file(file) {}
+
+    template 
+    bool read(T & dst) const {
+        return fread(&dst, 1, sizeof(dst), file) == sizeof(dst);
+    }
+
+    template 
+    bool read(std::vector & dst, const size_t n) const {
+        dst.resize(n);
+        for (size_t i = 0; i < dst.size(); ++i) {
+            if constexpr (std::is_same::value) {
+                bool tmp;
+                if (!read(tmp)) {
+                    return false;
+                }
+                dst[i] = tmp;
+            } else {
+                if (!read(dst[i])) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    bool read(bool & dst) const {
+        int8_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = tmp != 0;
+        return true;
+    }
+
+    bool read(enum ggml_type & dst) const {
+        int32_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = ggml_type(tmp);
+        return true;
+    }
+
+    bool read(enum gguf_type & dst) const {
+        int32_t tmp = -1;
+        if (!read(tmp)) {
+            return false;
+        }
+        dst = gguf_type(tmp);
+        return true;
+    }
+
+    bool read(std::string & dst) const {
+        uint64_t size = -1;
+        if (!read(size)) {
+            return false;
+        }
+        dst.resize(size);
+        return fread(dst.data(), 1, dst.length(), file) == dst.length();
+    }
+
+    bool read(void * dst, const size_t size) const {
+        return fread(dst, 1, size, file) == size;
+    }
+};
+
+struct gguf_context * gguf_init_empty(void) {
+    return new gguf_context;
+}
+
+template
+bool gguf_read_emplace_helper(const struct gguf_reader & gr, std::vector & kv, const std::string & key, const bool is_array, const size_t n) {
+    if (is_array) {
+        std::vector value;
+        try {
+            if (!gr.read(value, n)) {
+                return false;
+            }
+        } catch (std::length_error &) {
+            fprintf(stderr, "%s: encountered length_error while reading value for key '%s'\n", __func__, key.c_str());
+            return false;
+        } catch (std::bad_alloc &) {
+            fprintf(stderr, "%s: encountered bad_alloc error while reading value for key '%s'\n", __func__, key.c_str());
+            return false;
+        }
+        kv.emplace_back(key, value);
+    } else {
+        T value;
+        if (!gr.read(value)) {
+            return false;
+        }
+        kv.emplace_back(key, value);
+    }
+    return true;
+}
+
+struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) {
+    const struct gguf_reader gr(file);
+    struct gguf_context * ctx = new gguf_context;
+
+    bool ok = true;
+
+    // file magic
+    {
+        std::vector magic;
+        ok = ok && gr.read(magic, 4);
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to read magic\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        for (uint32_t i = 0; i < magic.size(); i++) {
+            if (magic[i] != GGUF_MAGIC[i]) {
+                fprintf(stderr, "%s: invalid magic characters: '%c%c%c%c', expected 'GGUF'\n", __func__, magic[0], magic[1], magic[2], magic[3]);
+                gguf_free(ctx);
+                return nullptr;
+            }
+        }
+    }
+
+    // header
+    int64_t n_kv      = 0;
+    int64_t n_tensors = 0;
+
+    if (ok && gr.read(ctx->version)) {
+        if (ctx->version == 1) {
+            fprintf(stderr, "%s: GGUFv1 is no longer supported, please use a more up-to-date version\n", __func__);
+            ok = false;
+        }
+        if (ctx->version > GGUF_VERSION) {
+            fprintf(stderr, "%s: this GGUF file is version %" PRIu32 " but this software only supports up to version %d\n",
+                __func__, ctx->version, GGUF_VERSION);
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (ok && gr.read(n_tensors)) {
+        static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
+        if (n_tensors < 0 || n_tensors > int64_t(SIZE_MAX/sizeof(gguf_tensor_info))) {
+            fprintf(stderr, "%s: number of tensors is %" PRIi64 " but must be in [0, %zu]\n",
+                __func__, n_tensors, SIZE_MAX/sizeof(gguf_tensor_info));
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (ok && gr.read(n_kv)) {
+        static_assert(sizeof(size_t) <= 8 && sizeof(gguf_tensor_info) >= 2, "int64_t insufficient for indexing");
+        if (n_kv < 0 || n_kv > int64_t(SIZE_MAX/sizeof(gguf_kv))) {
+            fprintf(stderr, "%s: number of key value pairs is %" PRIi64 " but must be in [0, %zu]\n",
+                    __func__, n_kv, SIZE_MAX/sizeof(gguf_kv));
+            ok = false;
+        }
+    } else {
+        ok = false;
+    }
+
+    if (!ok) {
+        fprintf(stderr, "%s: failed to read header\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+
+    // KV pairs
+    {
+        for (int64_t i = 0; ok && i < n_kv; ++i) {
+            std::string key;
+            gguf_type   type     = gguf_type(-1);
+            bool        is_array = false;
+            uint64_t    n        = 1;
+
+            try {
+                ok = ok && gr.read(key);
+            } catch (std::length_error &) {
+                fprintf(stderr, "%s: encountered length_error while reading key %" PRIi64 "\n", __func__, i);
+                ok = false;
+            } catch (std::bad_alloc &) {
+                fprintf(stderr, "%s: encountered bad_alloc error while reading key %" PRIi64 "\n", __func__, i);
+                ok = false;
+            }
+            for (size_t j = 0; ok && j < ctx->kv.size(); ++j) {
+                if (key == ctx->kv[j].key) {
+                    fprintf(stderr, "%s: duplicate key '%s' for tensors %zu and %" PRIi64 " \n", __func__, key.c_str(), j, i);
+                    ok = false;
+                }
+            }
+            if (!ok) {
+                break;
+            }
+
+            ok = ok && gr.read(type);
+            if (type == GGUF_TYPE_ARRAY) {
+                is_array = true;
+                ok = ok && gr.read(type);
+                ok = ok && gr.read(n);
+            }
+            if (!ok) {
+                break;
+            }
+
+            switch (type) {
+                case GGUF_TYPE_UINT8:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT8:    ok = ok && gguf_read_emplace_helper     (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT16:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT16:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT32:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT32:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_FLOAT32: ok = ok && gguf_read_emplace_helper      (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_BOOL:    ok = ok && gguf_read_emplace_helper       (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_STRING:  ok = ok && gguf_read_emplace_helper(gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_UINT64:  ok = ok && gguf_read_emplace_helper   (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_INT64:   ok = ok && gguf_read_emplace_helper    (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_FLOAT64: ok = ok && gguf_read_emplace_helper     (gr, ctx->kv, key, is_array, n); break;
+                case GGUF_TYPE_ARRAY:
+                default:
+                    {
+                        fprintf(stderr, "%s: key '%s' has invalid GGUF type %d\n", __func__, key.c_str(), type);
+                        ok = false;
+                    } break;
+            }
+        }
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to read key-value pairs\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+        GGML_ASSERT(int64_t(ctx->kv.size()) == n_kv);
+
+        const int alignment_idx = gguf_find_key(ctx, GGUF_KEY_GENERAL_ALIGNMENT);
+        ctx->alignment = alignment_idx == -1 ? GGUF_DEFAULT_ALIGNMENT : gguf_get_val_u32(ctx, alignment_idx);
+
+        if (ctx->alignment == 0 || (ctx->alignment & (ctx->alignment - 1)) != 0) {
+            fprintf(stderr, "%s: alignment %zu is not a power of 2\n", __func__, ctx->alignment);
+            gguf_free(ctx);
+            return nullptr;
+        }
+    }
+
+    // read the tensor info
+    for (int64_t i = 0; ok && i < n_tensors; ++i) {
+        struct gguf_tensor_info info;
+
+        // tensor name
+        {
+            std::string name;
+            try {
+                ok = ok && gr.read(name);
+            } catch (std::length_error &) {
+                fprintf(stderr, "%s: encountered length_error while reading tensor name %" PRIi64 "\n", __func__, i);
+                ok = false;
+            } catch (std::bad_alloc &) {
+                fprintf(stderr, "%s: encountered bad_alloc error while reading tensor name %" PRIi64 "\n", __func__, i);
+                ok = false;
+            }
+            if (name.length() >= GGML_MAX_NAME) {
+                fprintf(stderr, "%s: tensor name %" PRIi64 " is too long: %zu >= %d\n", __func__, i, name.length(), GGML_MAX_NAME);
+                ok = false;
+                break;
+            }
+            ggml_set_name(&info.t, name.c_str());
+
+            // make sure there are no duplicate tensor names
+            for (int64_t j = 0; ok && j < i; ++j) {
+                if (strcmp(info.t.name, ctx->info[j].t.name) == 0) {
+                    fprintf(stderr, "%s: duplicate tensor name '%s' for tensors %" PRIi64 " and %" PRIi64 "\n", __func__, info.t.name, j, i);
+                    ok = false;
+                    break;
+                }
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor shape
+        {
+            uint32_t n_dims = -1;
+            ok = ok && gr.read(n_dims);
+            if (n_dims > GGML_MAX_DIMS) {
+                fprintf(stderr, "%s: tensor '%s' has invalid number of dimensions: %" PRIu32 " > %" PRIu32 "\n",
+                    __func__, info.t.name, n_dims, GGML_MAX_DIMS);
+                ok = false;
+                break;
+            }
+            for (uint32_t j = 0; ok && j < GGML_MAX_DIMS; ++j) {
+                info.t.ne[j] = 1;
+                if (j < n_dims) {
+                    ok = ok && gr.read(info.t.ne[j]);
+                }
+
+                // check that all ne are non-negative
+                if (info.t.ne[j] < 0) {
+                    fprintf(stderr, "%s: tensor '%s' dimension %" PRIu32 " has invalid number of elements: %" PRIi64 " < 0\n",
+                        __func__, info.t.name, j, info.t.ne[j]);
+                    ok = false;
+                    break;
+                }
+            }
+
+            // check that the total number of elements is representable
+            if (ok && ((INT64_MAX/info.t.ne[1] <= info.t.ne[0]) ||
+                       (INT64_MAX/info.t.ne[2] <= info.t.ne[0]*info.t.ne[1]) ||
+                       (INT64_MAX/info.t.ne[3] <= info.t.ne[0]*info.t.ne[1]*info.t.ne[2]))) {
+
+                fprintf(stderr, "%s: total number of elements in tensor '%s' with shape "
+                    "(%" PRIi64 ", %" PRIi64 ", %" PRIi64 ", %" PRIi64 ") is >= %" PRIi64 "\n",
+                    __func__, info.t.name, info.t.ne[0], info.t.ne[1], info.t.ne[2], info.t.ne[3], INT64_MAX);
+                ok = false;
+                break;
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor type
+        {
+            ok = ok && gr.read(info.t.type);
+
+            // check that tensor type is within defined range
+            if (info.t.type < 0 || info.t.type >= GGML_TYPE_COUNT) {
+                fprintf(stderr, "%s: tensor '%s' has invalid ggml type %d (%s)\n",
+                    __func__, info.t.name, info.t.type, ggml_type_name(info.t.type));
+                ok = false;
+                break;
+            }
+            const size_t  type_size = ggml_type_size(info.t.type);
+            const int64_t blck_size = ggml_blck_size(info.t.type);
+
+            // check that row size is divisible by block size
+            if (blck_size == 0 || info.t.ne[0] % blck_size != 0) {
+                fprintf(stderr, "%s: tensor '%s' of type %d (%s) has %" PRId64 " elements per row, "
+                    "not a multiple of block size (%" PRId64 ")\n",
+                    __func__, info.t.name, (int) info.t.type, ggml_type_name(info.t.type), info.t.ne[0], blck_size);
+                ok = false;
+                break;
+            }
+
+            // calculate byte offsets given the tensor shape and type
+            info.t.nb[0] = type_size;
+            info.t.nb[1] = info.t.nb[0]*(info.t.ne[0]/blck_size);
+            for (int j = 2; j < GGML_MAX_DIMS; ++j) {
+                info.t.nb[j] = info.t.nb[j - 1]*info.t.ne[j - 1];
+            }
+        }
+        if (!ok) {
+            break;
+        }
+
+        // tensor data offset within buffer
+        ok = ok && gr.read(info.offset);
+
+        ctx->info.push_back(info);
+    }
+
+    if (!ok) {
+        fprintf(stderr, "%s: failed to read tensor info\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+    GGML_ASSERT(int64_t(ctx->info.size()) == n_tensors);
+
+    // we require the data section to be aligned, so take into account any padding
+    if (fseek(file, GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) {
+        fprintf(stderr, "%s: failed to seek to beginning of data section\n", __func__);
+        gguf_free(ctx);
+        return nullptr;
+    }
+
+    // store the current file offset - this is where the data section starts
+    ctx->offset = ftell(file);
+
+    // compute the total size of the data section, taking into account the alignment
+    {
+        ctx->size = 0;
+        for (size_t i = 0; i < ctx->info.size(); ++i) {
+            const gguf_tensor_info & ti = ctx->info[i];
+            if (ti.offset != ctx->size) {
+                fprintf(stderr, "%s: tensor '%s' has offset %" PRIu64 ", expected %zu\n",
+                    __func__, ti.t.name, ti.offset, ctx->size);
+                fprintf(stderr, "%s: failed to read tensor data\n", __func__);
+                gguf_free(ctx);
+                return nullptr;
+            }
+            ctx->size += GGML_PAD(ggml_nbytes(&ti.t), ctx->alignment);
+        }
+    }
+
+    // load the tensor data only if requested
+    if (params.ctx != nullptr) {
+        // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob
+        // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of
+        //   the ggml_tensor structs to the appropriate locations in the binary blob
+
+        // compute the exact size needed for the new ggml_context
+        const size_t mem_size =
+            params.no_alloc ?
+            (n_tensors    )*ggml_tensor_overhead() :
+            (n_tensors + 1)*ggml_tensor_overhead() + ctx->size;
+
+        struct ggml_init_params pdata = {
+            /*mem_size   =*/ mem_size,
+            /*mem_buffer =*/ nullptr,
+            /*no_alloc   =*/ params.no_alloc,
+        };
+
+        *params.ctx = ggml_init(pdata);
+        if (*params.ctx == nullptr) {
+            fprintf(stderr, "%s: failed to initialize ggml context for storing tensors\n", __func__);
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        struct ggml_context * ctx_data = *params.ctx;
+
+        struct ggml_tensor * data = nullptr;
+
+        if (!params.no_alloc) {
+            data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size);
+
+            ok = ok && data != nullptr;
+
+            // read the binary blob with the tensor data
+            ok = ok && gr.read(data->data, ctx->size);
+
+            if (!ok) {
+                fprintf(stderr, "%s: failed to read tensor data binary blob\n", __func__);
+                ggml_free(ctx_data);
+                *params.ctx = nullptr;
+                gguf_free(ctx);
+                return nullptr;
+            }
+
+            ctx->data = data->data;
+        }
+
+        ggml_set_no_alloc(ctx_data, true);
+
+        // create the tensors
+        for (size_t i = 0; i < ctx->info.size(); ++i) {
+            const struct gguf_tensor_info & info = ctx->info[i];
+
+            struct ggml_tensor * cur = ggml_new_tensor(ctx_data, info.t.type, GGML_MAX_DIMS, info.t.ne);
+
+            ok = ok && cur != nullptr;
+
+            if (!ok) {
+                break;
+            }
+
+            ggml_set_name(cur, info.t.name);
+
+            // point the data member to the appropriate location in the binary blob using the tensor info
+            if (!params.no_alloc) {
+                cur->data = (char *) data->data + info.offset;
+            }
+        }
+
+        if (!ok) {
+            fprintf(stderr, "%s: failed to create tensors\n", __func__);
+            ggml_free(ctx_data);
+            *params.ctx = nullptr;
+            gguf_free(ctx);
+            return nullptr;
+        }
+
+        ggml_set_no_alloc(ctx_data, params.no_alloc);
+    }
+
+    return ctx;
+}
+
+struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) {
+    FILE * file = ggml_fopen(fname, "rb");
+
+    if (!file) {
+        fprintf(stderr, "%s: failed to open GGUF file '%s'\n", __func__, fname);
+        return nullptr;
+    }
+
+    struct gguf_context * result = gguf_init_from_file_impl(file, params);
+    fclose(file);
+    return result;
+}
+
+void gguf_free(struct gguf_context * ctx) {
+    if (ctx == nullptr) {
+        return;
+    }
+    delete ctx;
+}
+
+const char * gguf_type_name(enum gguf_type type) {
+    auto it = GGUF_TYPE_NAME.find(type);
+    return it == GGUF_TYPE_NAME.end() ? nullptr : it->second;
+}
+
+uint32_t gguf_get_version(const struct gguf_context * ctx) {
+    return ctx->version;
+}
+
+size_t gguf_get_alignment(const struct gguf_context * ctx) {
+    return ctx->alignment;
+}
+
+size_t gguf_get_data_offset(const struct gguf_context * ctx) {
+    return ctx->offset;
+}
+
+int64_t gguf_get_n_kv(const struct gguf_context * ctx) {
+    return ctx->kv.size();
+}
+
+int64_t gguf_find_key(const struct gguf_context * ctx, const char * key) {
+    // return -1 if key not found
+    int64_t keyfound = -1;
+
+    const int64_t n_kv = gguf_get_n_kv(ctx);
+
+    for (int64_t i = 0; i < n_kv; ++i) {
+        if (strcmp(key, gguf_get_key(ctx, i)) == 0) {
+            keyfound = i;
+            break;
+        }
+    }
+
+    return keyfound;
+}
+
+const char * gguf_get_key(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    return ctx->kv[key_id].get_key().c_str();
+}
+
+enum gguf_type gguf_get_kv_type(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    return ctx->kv[key_id].is_array ? GGUF_TYPE_ARRAY : ctx->kv[key_id].get_type();
+}
+
+enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].is_array);
+    return ctx->kv[key_id].get_type();
+}
+
+const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data.data();
+}
+
+const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data_string[i].c_str();
+}
+
+size_t gguf_get_arr_n(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+
+    if (ctx->kv[key_id].type == GGUF_TYPE_STRING) {
+        return ctx->kv[key_id].data_string.size();
+    }
+
+    const size_t type_size = gguf_type_size(ctx->kv[key_id].type);
+    GGML_ASSERT(ctx->kv[key_id].data.size() % type_size == 0);
+    return ctx->kv[key_id].data.size() / type_size;
+}
+
+uint8_t gguf_get_val_u8(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int8_t gguf_get_val_i8(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint16_t gguf_get_val_u16(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int16_t gguf_get_val_i16(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint32_t gguf_get_val_u32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int32_t gguf_get_val_i32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+float gguf_get_val_f32(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+uint64_t gguf_get_val_u64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+int64_t gguf_get_val_i64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+double gguf_get_val_f64(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+bool gguf_get_val_bool(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val();
+}
+
+const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    return ctx->kv[key_id].get_val().c_str();
+}
+
+const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) {
+    GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
+    GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
+    GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
+    return ctx->kv[key_id].data.data();
+}
+
+int64_t gguf_get_n_tensors(const struct gguf_context * ctx) {
+    return ctx->info.size();
+}
+
+int64_t gguf_find_tensor(const struct gguf_context * ctx, const char * name) {
+    // return -1 if tensor not found
+    int64_t tensor_id = -1;
+
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) {
+            tensor_id = i;
+            break;
+        }
+    }
+
+    return tensor_id;
+}
+
+size_t gguf_get_tensor_offset(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].offset;
+}
+
+const char * gguf_get_tensor_name(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].t.name;
+}
+
+enum ggml_type gguf_get_tensor_type(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ctx->info[tensor_id].t.type;
+}
+
+size_t gguf_get_tensor_size(const struct gguf_context * ctx, int64_t tensor_id) {
+    GGML_ASSERT(tensor_id >= 0 && tensor_id < gguf_get_n_tensors(ctx));
+    return ggml_nbytes(&ctx->info[tensor_id].t);
+}
+
+int64_t gguf_remove_key(struct gguf_context * ctx, const char * key) {
+    const int64_t key_id = gguf_find_key(ctx, key);
+    if (key_id >= 0) {
+        ctx->kv.erase(ctx->kv.begin() + key_id);
+    }
+    return key_id;
+}
+
+template
+static void gguf_check_reserved_keys(const std::string & key, const T val) {
+    if (key == GGUF_KEY_GENERAL_ALIGNMENT) {
+        if constexpr (std::is_same::value) {
+            GGML_ASSERT(val > 0 && (val & (val - 1)) == 0 && GGUF_KEY_GENERAL_ALIGNMENT " must be power of 2");
+        } else {
+            GGML_ABORT(GGUF_KEY_GENERAL_ALIGNMENT " must be type u32");
+        }
+    }
+}
+
+void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_u64(struct gguf_context * ctx, const char * key, uint64_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_i64(struct gguf_context * ctx, const char * key, int64_t val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_f64(struct gguf_context * ctx, const char * key, double val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, val);
+}
+
+void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) {
+    gguf_check_reserved_keys(key, val);
+    gguf_remove_key(ctx, key);
+    ctx->kv.emplace_back(key, std::string(val));
+}
+
+void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, size_t n) {
+    gguf_check_reserved_keys(key, data);
+    gguf_remove_key(ctx, key);
+
+    const size_t nbytes = n*gguf_type_size(type);
+    std::vector tmp(nbytes);
+    if (!tmp.empty()) {
+        memcpy(tmp.data(), data, nbytes);
+    }
+    ctx->kv.emplace_back(key, tmp);
+    ctx->kv.back().cast(type);
+}
+
+void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, size_t n) {
+    gguf_check_reserved_keys(key, data);
+    gguf_remove_key(ctx, key);
+
+    std::vector tmp(n);
+    for (size_t i = 0; i < n; ++i) {
+        tmp[i] = data[i];
+    }
+    ctx->kv.emplace_back(key, tmp);
+}
+
+// set or add KV pairs from another context
+void gguf_set_kv(struct gguf_context * ctx, const struct gguf_context * src) {
+    const int64_t n_kv = gguf_get_n_kv(src);
+    for (int64_t i = 0; i < n_kv; ++i) {
+        const struct gguf_kv & kv = src->kv[i];
+
+        if (!kv.is_array) {
+            switch (kv.get_type()) {
+                case GGUF_TYPE_UINT8:   gguf_set_val_u8  (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_INT8:    gguf_set_val_i8  (ctx, kv.get_key().c_str(), kv.get_val());              break;
+                case GGUF_TYPE_UINT16:  gguf_set_val_u16 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT16:   gguf_set_val_i16 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_UINT32:  gguf_set_val_u32 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT32:   gguf_set_val_i32 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, kv.get_key().c_str(), kv.get_val());               break;
+                case GGUF_TYPE_UINT64:  gguf_set_val_u64 (ctx, kv.get_key().c_str(), kv.get_val());            break;
+                case GGUF_TYPE_INT64:   gguf_set_val_i64 (ctx, kv.get_key().c_str(), kv.get_val());             break;
+                case GGUF_TYPE_FLOAT64: gguf_set_val_f64 (ctx, kv.get_key().c_str(), kv.get_val());              break;
+                case GGUF_TYPE_BOOL:    gguf_set_val_bool(ctx, kv.get_key().c_str(), kv.get_val());                break;
+                case GGUF_TYPE_STRING:  gguf_set_val_str (ctx, kv.get_key().c_str(), kv.get_val().c_str()); break;
+                case GGUF_TYPE_ARRAY:
+                default: GGML_ABORT("invalid type");
+            }
+            continue;
+        }
+
+        const size_t ne = kv.get_ne();
+
+        switch (kv.get_type()) {
+            case GGUF_TYPE_UINT8:
+            case GGUF_TYPE_INT8:
+            case GGUF_TYPE_UINT16:
+            case GGUF_TYPE_INT16:
+            case GGUF_TYPE_UINT32:
+            case GGUF_TYPE_INT32:
+            case GGUF_TYPE_FLOAT32:
+            case GGUF_TYPE_UINT64:
+            case GGUF_TYPE_INT64:
+            case GGUF_TYPE_FLOAT64:
+            case GGUF_TYPE_BOOL: {
+                gguf_set_arr_data(ctx, kv.get_key().c_str(), kv.get_type(), kv.data.data(), ne);
+            } break;
+            case GGUF_TYPE_STRING: {
+                std::vector tmp(ne);
+                for (size_t j = 0; j < ne; ++j) {
+                    tmp[j] = kv.data_string[j].c_str();
+                }
+                gguf_set_arr_str(ctx, kv.get_key().c_str(), tmp.data(), ne);
+            } break;
+            case GGUF_TYPE_ARRAY:
+            default: GGML_ABORT("invalid type");
+        }
+    }
+}
+
+void gguf_add_tensor(
+             struct gguf_context * ctx,
+        const struct ggml_tensor * tensor) {
+    GGML_ASSERT(tensor);
+    if (gguf_find_tensor(ctx, tensor->name) != -1) {
+        GGML_ABORT("duplicate tensor name: %s", tensor->name);
+    }
+
+    struct gguf_tensor_info ti;
+    ti.t = *tensor;
+    ti.offset = ctx->info.empty() ? 0 :
+        ctx->info.back().offset + GGML_PAD(ggml_nbytes(&ctx->info.back().t), ctx->alignment);
+    ctx->info.push_back(ti);
+}
+
+void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) {
+    const int64_t tensor_id = gguf_find_tensor(ctx, name);
+    if (tensor_id < 0) {
+        GGML_ABORT("tensor not found: %s", name);
+    }
+    struct ggml_tensor * tensor = &ctx->info[tensor_id].t;
+    const size_t  type_size = ggml_type_size(type);
+    const int64_t blck_size = ggml_blck_size(type);
+
+    tensor->type = type;
+    GGML_ASSERT(tensor->ne[0] % blck_size == 0 && "tensor row size not divisible by block size of new type");
+
+    tensor->nb[0] = type_size;
+    tensor->nb[1] = tensor->nb[0]*(tensor->ne[0]/blck_size);
+    for (int i = 2; i < GGML_MAX_DIMS; i++) {
+        tensor->nb[i] = tensor->nb[i - 1]*tensor->ne[i - 1];
+    }
+
+    // update offsets
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+    for (int64_t i = tensor_id + 1; i < n_tensors; ++i) {
+        ctx->info[i].offset = ctx->info[i - 1].offset + GGML_PAD(ggml_nbytes(&ctx->info[i - 1].t), ctx->alignment);
+    }
+}
+
+void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data) {
+    const int64_t tensor_id = gguf_find_tensor(ctx, name);
+    if (tensor_id < 0) {
+        GGML_ABORT("tensor not found: %s", name);
+    }
+
+    ctx->info[tensor_id].t.data = (void *)(uintptr_t)data; // double cast suppresses warning about casting away const
+}
+
+struct gguf_writer {
+    std::vector & buf;
+
+    gguf_writer(std::vector & buf) : buf(buf) {}
+
+    template 
+    void write(const T & val) const {
+        for (size_t i = 0; i < sizeof(val); ++i) {
+            buf.push_back(reinterpret_cast(&val)[i]);
+        }
+    }
+
+    void write(const std::vector & val) const {
+        buf.insert(buf.end(), val.begin(), val.end());
+    }
+
+    void write(const bool & val) const {
+        const int8_t val8 = val ? 1 : 0;
+        write(val8);
+    }
+
+    void write(const std::string & val) const {
+        {
+            const uint64_t n = val.length();
+            write(n);
+        }
+        for (size_t i = 0; i < val.length(); ++i) {
+            buf.push_back(reinterpret_cast(val.data())[i]);
+        }
+    }
+
+    void write(const char * val) const {
+        write(std::string(val));
+    }
+
+    void write(const enum ggml_type & val) const {
+        write(int32_t(val));
+    }
+
+    void write(const enum gguf_type & val) const {
+        write(int32_t(val));
+    }
+
+    void write(const struct gguf_kv & kv) const {
+        const uint64_t ne = kv.get_ne();
+
+        write(kv.get_key());
+
+        if (kv.is_array) {
+            write(GGUF_TYPE_ARRAY);
+            write(kv.get_type());
+            write(ne);
+        } else {
+            write(kv.get_type());
+        }
+
+        switch (kv.get_type()) {
+            case GGUF_TYPE_UINT8:
+            case GGUF_TYPE_INT8:
+            case GGUF_TYPE_UINT16:
+            case GGUF_TYPE_INT16:
+            case GGUF_TYPE_UINT32:
+            case GGUF_TYPE_INT32:
+            case GGUF_TYPE_FLOAT32:
+            case GGUF_TYPE_UINT64:
+            case GGUF_TYPE_INT64:
+            case GGUF_TYPE_FLOAT64: {
+                write(kv.data);
+            } break;
+            case GGUF_TYPE_BOOL: {
+                for (size_t i = 0; i < ne; ++i) {
+                    write(kv.get_val(i));
+                }
+            } break;
+            case GGUF_TYPE_STRING: {
+                for (size_t i = 0; i < ne; ++i) {
+                    write(kv.get_val(i));
+                }
+            } break;
+            case GGUF_TYPE_ARRAY:
+            default: GGML_ABORT("invalid type");
+        }
+    }
+
+    void write_tensor_meta(const struct gguf_tensor_info & info) const {
+        write(info.t.name);
+
+        const uint32_t n_dims = ggml_n_dims(&info.t);
+        write(n_dims);
+
+        for (uint32_t j = 0; j < n_dims; ++j) {
+            write(info.t.ne[j]);
+        }
+        write(info.t.type);
+        write(info.offset);
+    }
+
+    void pad(const size_t alignment) const {
+        while (buf.size() % alignment != 0) {
+            const int8_t zero = 0;
+            write(zero);
+        }
+    }
+
+    void write_tensor_data(const struct gguf_tensor_info & info, const size_t offset_data, const size_t alignment) const {
+        GGML_ASSERT(buf.size() - offset_data == info.offset);
+
+        GGML_ASSERT(ggml_is_contiguous(&info.t));
+        const size_t offset = buf.size();
+        const size_t nbytes = ggml_nbytes(&info.t);
+
+        buf.resize(offset + nbytes);
+        if (info.t.buffer) {
+            ggml_backend_tensor_get(&info.t, buf.data() + offset, 0, nbytes);
+        } else {
+            GGML_ASSERT(info.t.data);
+            memcpy(buf.data() + offset, info.t.data, nbytes);
+        }
+
+        pad(alignment);
+    }
+};
+
+void gguf_write_to_buf(const struct gguf_context * ctx, std::vector & buf, bool only_meta) {
+    const struct gguf_writer gw(buf);
+
+    const int64_t n_kv      = gguf_get_n_kv(ctx);
+    const int64_t n_tensors = gguf_get_n_tensors(ctx);
+
+    // write header
+    gw.write(GGUF_MAGIC[0]);
+    gw.write(GGUF_MAGIC[1]);
+    gw.write(GGUF_MAGIC[2]);
+    gw.write(GGUF_MAGIC[3]);
+    gw.write(ctx->version);
+    gw.write(n_tensors);
+    gw.write(n_kv);
+
+    // write key-value pairs
+    for (int64_t i = 0; i < n_kv; ++i) {
+        gw.write(ctx->kv[i]);
+    }
+
+    // write tensor info
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        gw.write_tensor_meta(ctx->info[i]);
+    }
+
+    // we require the data section to be aligned
+    gw.pad(ctx->alignment);
+
+    if (only_meta) {
+        return;
+    }
+
+    const size_t offset_data = gw.buf.size();
+
+    // write tensor data
+    for (int64_t i = 0; i < n_tensors; ++i) {
+        gw.write_tensor_data(ctx->info[i], offset_data, ctx->alignment);
+    }
+}
+
+bool gguf_write_to_file(const struct gguf_context * ctx, const char * fname, bool only_meta) {
+    FILE * file = ggml_fopen(fname, "wb");
+
+    if (!file) {
+        fprintf(stderr, "%s: failed to open file '%s' for writing GGUF data\n", __func__, fname);
+        return false;
+    }
+
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, only_meta);
+    const bool ok = fwrite(buf.data(), 1, buf.size(), file) == buf.size();
+    fclose(file);
+    return ok;
+}
+
+size_t gguf_get_meta_size(const struct gguf_context * ctx) {
+    // only return size
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, /*only_meta =*/ true);
+    return buf.size();
+}
+
+void gguf_get_meta_data(const struct gguf_context * ctx, void * data) {
+    std::vector buf;
+    gguf_write_to_buf(ctx, buf, /*only_meta =*/ true);
+    memcpy(data, buf.data(), buf.size());
+}
diff --git a/src/llama-impl.cpp b/src/llama-impl.cpp
index a05ba4f63..6ec709dd3 100644
--- a/src/llama-impl.cpp
+++ b/src/llama-impl.cpp
@@ -1,5 +1,6 @@
 #include "llama-impl.h"
 
+#include "gguf.h"
 #include "llama.h"
 
 #include 
@@ -138,7 +139,7 @@ std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
             {
                 const enum gguf_type arr_type = gguf_get_arr_type(ctx_gguf, i);
                 int arr_n = gguf_get_arr_n(ctx_gguf, i);
-                const void * data = gguf_get_arr_data(ctx_gguf, i);
+                const void * data = arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx_gguf, i);
                 std::stringstream ss;
                 ss << "[";
                 for (int j = 0; j < arr_n; j++) {
diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp
index 7743b4652..1c4e30878 100644
--- a/src/llama-model-loader.cpp
+++ b/src/llama-model-loader.cpp
@@ -18,7 +18,7 @@ const char * llama_file_version_name(llama_fver version) {
 }
 
 namespace GGUFMeta {
-    template 
+    template 
     struct GKV_Base_Type {
         static constexpr gguf_type gt = gt_;
 
@@ -60,10 +60,11 @@ namespace GGUFMeta {
         public:
         static constexpr gguf_type gt = GGUF_TYPE_ARRAY;
         static ArrayInfo getter(const gguf_context *ctx, const int k) {
+            const enum gguf_type arr_type = gguf_get_arr_type(ctx, k);
             return ArrayInfo {
-                gguf_get_arr_type(ctx, k),
+                arr_type,
                 size_t(gguf_get_arr_n(ctx, k)),
-                gguf_get_arr_data(ctx, k),
+                arr_type == GGUF_TYPE_STRING ? nullptr : gguf_get_arr_data(ctx, k),
             };
         }
     };
@@ -553,7 +554,7 @@ llama_model_loader::llama_model_loader(const std::string & fname, bool use_mmap,
             const enum gguf_type type   = gguf_get_kv_type(meta.get(), i);
             const std::string type_name =
                 type == GGUF_TYPE_ARRAY
-                ? format("%s[%s,%d]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
+                ? format("%s[%s,%zu]", gguf_type_name(type), gguf_type_name(gguf_get_arr_type(meta.get(), i)), gguf_get_arr_n(meta.get(), i))
                 : gguf_type_name(type);
 
             std::string value          = gguf_kv_to_str(meta.get(), i);
diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
index 104f90343..038cf58dd 100644
--- a/src/llama-quant.cpp
+++ b/src/llama-quant.cpp
@@ -875,7 +875,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
 
         // update the gguf meta data as we go
         gguf_set_tensor_type(ctx_outs[cur_split].get(), name.c_str(), new_type);
-        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data, new_size);
+        GGML_ASSERT(gguf_get_tensor_size(ctx_outs[cur_split].get(), gguf_find_tensor(ctx_outs[cur_split].get(), name.c_str())) == new_size);
+        gguf_set_tensor_data(ctx_outs[cur_split].get(), name.c_str(), new_data);
 
         // write tensor data + padding
         fout.write((const char *) new_data, new_size);
diff --git a/tests/test-gguf.cpp b/tests/test-gguf.cpp
index 1bb5fb47c..611957ac0 100644
--- a/tests/test-gguf.cpp
+++ b/tests/test-gguf.cpp
@@ -15,66 +15,71 @@ constexpr int offset_has_tensors = 2000;
 constexpr int offset_has_data    = 3000;
 
 enum handcrafted_file_type {
-    HANDCRAFTED_HEADER_BAD_MAGIC          =  10,
-    HANDCRAFTED_HEADER_BAD_VERSION_1      =  20,
-    HANDCRAFTED_HEADER_BAD_VERSION_FUTURE =  30,
-    HANDCRAFTED_HEADER_BAD_N_TENSORS      =  40,
-    HANDCRAFTED_HEADER_BAD_N_KV           =  50,
-    HANDCRAFTED_HEADER_EMPTY              = 800,
+    HANDCRAFTED_HEADER_BAD_MAGIC           =  10,
+    HANDCRAFTED_HEADER_BAD_VERSION_1       =  20,
+    HANDCRAFTED_HEADER_BAD_VERSION_FUTURE  =  30,
+    HANDCRAFTED_HEADER_BAD_N_TENSORS       =  40,
+    HANDCRAFTED_HEADER_BAD_N_KV            =  50,
+    HANDCRAFTED_HEADER_EMPTY               = 800,
 
-    HANDCRAFTED_KV_BAD_KEY_SIZE           =  10 + offset_has_kv,
-    HANDCRAFTED_KV_BAD_TYPE               =  20 + offset_has_kv,
-    HANDCRAFTED_KV_BAD_VALUE_SIZE         =  30 + offset_has_kv,
-    HANDCRAFTED_KV_DUPLICATE_KEY          =  40 + offset_has_kv,
-    HANDCRAFTED_KV_SUCCESS                = 800 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_KEY_SIZE            =  10 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_TYPE                =  20 + offset_has_kv,
+    // HANDCRAFTED_KV_BAD_VALUE_SIZE          =  30 + offset_has_kv, // removed because it can result in allocations > 1 TB (default sanitizer limit)
+    HANDCRAFTED_KV_DUPLICATE_KEY           =  40 + offset_has_kv,
+    HANDCRAFTED_KV_BAD_ALIGN               =  50 + offset_has_kv,
+    HANDCRAFTED_KV_SUCCESS                 = 800 + offset_has_kv,
 
-    HANDCRAFTED_TENSORS_BAD_NAME_SIZE     =  10 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_N_DIMS        =  20 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_SHAPE         =  30 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_NE_TOO_BIG        =  40 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_TYPE          =  50 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_OFFSET        =  60 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_DUPLICATE_NAME    =  70 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_BAD_ALIGNMENT     =  80 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_SUCCESS           = 800 + offset_has_tensors,
-    HANDCRAFTED_TENSORS_CUSTOM_ALIGN      = 810 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_NAME_SIZE      =  10 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_N_DIMS         =  20 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_SHAPE          =  30 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_NE_TOO_BIG         =  40 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_TYPE           =  50 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_OFFSET         =  60 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_DUPLICATE_NAME     =  70 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_BAD_ALIGN          =  75 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN =  80 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_SUCCESS            = 800 + offset_has_tensors,
+    HANDCRAFTED_TENSORS_CUSTOM_ALIGN       = 810 + offset_has_tensors,
 
-    HANDCRAFTED_DATA_NOT_ENOUGH_DATA      =  10 + offset_has_data,
-    HANDCRAFTED_DATA_BAD_ALIGNMENT        =  20 + offset_has_data,
-    HANDCRAFTED_DATA_SUCCESS              = 800 + offset_has_data,
-    HANDCRAFTED_DATA_CUSTOM_ALIGN         = 810 + offset_has_data,
+    HANDCRAFTED_DATA_NOT_ENOUGH_DATA       =  10 + offset_has_data,
+    HANDCRAFTED_DATA_BAD_ALIGN             =  15 + offset_has_data,
+    HANDCRAFTED_DATA_INCONSISTENT_ALIGN    =  20 + offset_has_data,
+    HANDCRAFTED_DATA_SUCCESS               = 800 + offset_has_data,
+    HANDCRAFTED_DATA_CUSTOM_ALIGN          = 810 + offset_has_data,
 };
 
 std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) {
     switch (hft) {
-        case HANDCRAFTED_HEADER_BAD_MAGIC:          return "HEADER_BAD_MAGIC";
-        case HANDCRAFTED_HEADER_BAD_VERSION_1:      return "HEADER_BAD_VERSION_1";
-        case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE";
-        case HANDCRAFTED_HEADER_BAD_N_KV:           return "HEADER_BAD_N_KV";
-        case HANDCRAFTED_HEADER_BAD_N_TENSORS:      return "HEADER_BAD_N_TENSORS";
-        case HANDCRAFTED_HEADER_EMPTY:              return "HEADER_EMPTY";
+        case HANDCRAFTED_HEADER_BAD_MAGIC:           return "HEADER_BAD_MAGIC";
+        case HANDCRAFTED_HEADER_BAD_VERSION_1:       return "HEADER_BAD_VERSION_1";
+        case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE:  return "HEADER_BAD_VERSION_FUTURE";
+        case HANDCRAFTED_HEADER_BAD_N_KV:            return "HEADER_BAD_N_KV";
+        case HANDCRAFTED_HEADER_BAD_N_TENSORS:       return "HEADER_BAD_N_TENSORS";
+        case HANDCRAFTED_HEADER_EMPTY:               return "HEADER_EMPTY";
 
-        case HANDCRAFTED_KV_BAD_KEY_SIZE:           return "KV_BAD_KEY_SIZE";
-        case HANDCRAFTED_KV_BAD_TYPE:               return "KV_BAD_TYPE";
-        case HANDCRAFTED_KV_BAD_VALUE_SIZE:         return "KV_BAD_VALUE_SIZE";
-        case HANDCRAFTED_KV_DUPLICATE_KEY:          return "KV_DUPLICATE_KEY";
-        case HANDCRAFTED_KV_SUCCESS:                return "KV_RANDOM_KV";
+        case HANDCRAFTED_KV_BAD_KEY_SIZE:            return "KV_BAD_KEY_SIZE";
+        case HANDCRAFTED_KV_BAD_TYPE:                return "KV_BAD_TYPE";
+        case HANDCRAFTED_KV_DUPLICATE_KEY:           return "KV_DUPLICATE_KEY";
+        case HANDCRAFTED_KV_BAD_ALIGN:               return "KV_BAD_ALIGN";
+        case HANDCRAFTED_KV_SUCCESS:                 return "KV_RANDOM_KV";
 
-        case HANDCRAFTED_TENSORS_BAD_NAME_SIZE:     return "TENSORS_BAD_NAME_SIZE";
-        case HANDCRAFTED_TENSORS_BAD_N_DIMS:        return "TENSORS_BAD_N_DIMS";
-        case HANDCRAFTED_TENSORS_BAD_SHAPE:         return "TENSORS_BAD_SHAPE";
-        case HANDCRAFTED_TENSORS_NE_TOO_BIG:        return "TENSORS_NE_TOO_BIG";
-        case HANDCRAFTED_TENSORS_BAD_TYPE:          return "TENSORS_BAD_TYPE";
-        case HANDCRAFTED_TENSORS_BAD_OFFSET:        return "TENSORS_BAD_OFFSET";
-        case HANDCRAFTED_TENSORS_DUPLICATE_NAME:    return "TENSORS_DUPLICATE_NAME";
-        case HANDCRAFTED_TENSORS_BAD_ALIGNMENT:     return "TENSORS_BAD_ALIGNMENT";
-        case HANDCRAFTED_TENSORS_SUCCESS:           return "TENSORS_SUCCESS";
-        case HANDCRAFTED_TENSORS_CUSTOM_ALIGN:      return "TENSORS_CUSTOM_ALIGN";
+        case HANDCRAFTED_TENSORS_BAD_NAME_SIZE:      return "TENSORS_BAD_NAME_SIZE";
+        case HANDCRAFTED_TENSORS_BAD_N_DIMS:         return "TENSORS_BAD_N_DIMS";
+        case HANDCRAFTED_TENSORS_BAD_SHAPE:          return "TENSORS_BAD_SHAPE";
+        case HANDCRAFTED_TENSORS_NE_TOO_BIG:         return "TENSORS_NE_TOO_BIG";
+        case HANDCRAFTED_TENSORS_BAD_TYPE:           return "TENSORS_BAD_TYPE";
+        case HANDCRAFTED_TENSORS_BAD_OFFSET:         return "TENSORS_BAD_OFFSET";
+        case HANDCRAFTED_TENSORS_DUPLICATE_NAME:     return "TENSORS_DUPLICATE_NAME";
+        case HANDCRAFTED_TENSORS_BAD_ALIGN:          return "TENSORS_BAD_ALIGN";
+        case HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN: return "TENSORS_INCONSISTENT_ALIGN";
+        case HANDCRAFTED_TENSORS_SUCCESS:            return "TENSORS_SUCCESS";
+        case HANDCRAFTED_TENSORS_CUSTOM_ALIGN:       return "TENSORS_CUSTOM_ALIGN";
 
-        case HANDCRAFTED_DATA_NOT_ENOUGH_DATA:      return "DATA_NOT_ENOUGH_DATA";
-        case HANDCRAFTED_DATA_BAD_ALIGNMENT:        return "DATA_BAD_ALIGNMENT";
-        case HANDCRAFTED_DATA_SUCCESS:              return "DATA_SUCCESS";
-        case HANDCRAFTED_DATA_CUSTOM_ALIGN:         return "DATA_CUSTOM_ALIGN";
+        case HANDCRAFTED_DATA_NOT_ENOUGH_DATA:       return "DATA_NOT_ENOUGH_DATA";
+        case HANDCRAFTED_DATA_BAD_ALIGN:             return "DATA_BAD_ALIGN";
+        case HANDCRAFTED_DATA_INCONSISTENT_ALIGN:    return "DATA_INCONSISTENT_ALIGN";
+        case HANDCRAFTED_DATA_SUCCESS:               return "DATA_SUCCESS";
+        case HANDCRAFTED_DATA_CUSTOM_ALIGN:          return "DATA_CUSTOM_ALIGN";
     }
     GGML_ABORT("fatal error");
 }
@@ -140,31 +145,41 @@ std::vector> get_kv_types(std::mt19937
     return kv_types;
 }
 
-static void helper_write(const void * data, const size_t nbytes, FILE * file) {
+template 
+static void helper_write(FILE * file, const T & val) {
+    GGML_ASSERT(fwrite(&val, 1, sizeof(val), file) == sizeof(val));
+}
+
+static void helper_write(FILE * file, const void * data, const size_t nbytes) {
     GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes);
 }
 
 static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) {
     FILE * file = tmpfile();
 
+    if (!file) {
+        return file;
+    }
+
     std::mt19937 rng(seed);
+    uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
 
     if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) {
         const char bad_magic[4] = {'F', 'U', 'G', 'G'};
-        helper_write(bad_magic, sizeof(bad_magic), file);
+        helper_write(file, bad_magic, sizeof(bad_magic));
     } else {
-        helper_write(GGUF_MAGIC, 4, file);
+        helper_write(file, GGUF_MAGIC, 4);
     }
 
     if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) {
         const uint32_t version = 1;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) {
         const uint32_t version = GGUF_VERSION + 1;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     } else {
         const uint32_t version = GGUF_VERSION;
-        helper_write(&version, sizeof(version), file);
+        helper_write(file, version);
     }
 
     std::vector tensor_configs;
@@ -174,10 +189,10 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
 
     if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) {
         const uint64_t n_tensors = -1;
-        helper_write(&n_tensors, sizeof(n_tensors), file);
+        helper_write(file, n_tensors);
     } else {
         const uint64_t n_tensors = tensor_configs.size();
-        helper_write(&n_tensors, sizeof(n_tensors), file);
+        helper_write(file, n_tensors);
     }
 
     std::vector> kv_types;
@@ -186,41 +201,49 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
     }
     {
         uint64_t n_kv = kv_types.size();
-        if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+        if (hft == HANDCRAFTED_KV_BAD_ALIGN      ||
+            hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
+            hft == HANDCRAFTED_DATA_BAD_ALIGN    || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+
             n_kv += 1;
         } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) {
             n_kv = -1;
         }
-        helper_write(&n_kv, sizeof(n_kv), file);
+        helper_write(file, n_kv);
     }
 
     if (hft < offset_has_kv) {
+        while (ftell(file) % alignment != 0) {
+            const char pad = 0;
+            helper_write(file, pad);
+        }
+
         for (int i = 0; i < extra_bytes; ++i) {
             const char tmp = 0;
-            helper_write(&tmp, sizeof(tmp), file);
+            helper_write(file, tmp);
         }
         rewind(file);
         return file;
     }
 
     for (int i = 0; i < int(kv_types.size()); ++i) {
-        const enum gguf_type type     = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first);
-        const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second);
+        const enum gguf_type type     = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].first);
+        const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? GGUF_TYPE_COUNT : kv_types[i].second);
 
         const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i));
 
         if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) {
             const uint64_t n = -1;
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         } else {
             const uint64_t n = key.length();
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         }
-        helper_write(key.data(), key.length(), file);
+        helper_write(file, key.data(), key.length());
 
         {
             const int32_t type32 = int32_t(type);
-            helper_write(&type32, sizeof(type32), file);
+            helper_write(file, type32);
         }
 
         uint32_t data[16];
@@ -233,69 +256,67 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
 
         if (type == GGUF_TYPE_STRING) {
             const uint64_t n = rng() % sizeof(data);
-            helper_write(&n,   sizeof(n), file);
-            helper_write(data,        n,  file);
+            helper_write(file, n);
+            helper_write(file, data, n);
             continue;
         }
 
         if (type == GGUF_TYPE_ARRAY) {
             {
                 const int32_t type32 = int32_t(type_arr);
-                helper_write(&type32, sizeof(type32), file);
+                helper_write(file, type32);
             }
             if (type_arr == GGUF_TYPE_STRING) {
                 const uint64_t nstr = rng() % (16 + 1);
-                helper_write(&nstr, sizeof(nstr), file);
+                helper_write(file, nstr);
                 for (uint64_t istr = 0; istr < nstr; ++istr) {
                     const uint64_t n = rng() % (sizeof(uint32_t) + 1);
-                    helper_write(&n,          sizeof(n), file);
-                    helper_write(&data[istr],        n,  file);
+                    helper_write(file, n);
+                    helper_write(file, &data[istr], n);
                 }
                 continue;
             }
             const size_t type_size = gguf_type_size(type_arr);
             const uint64_t n = (rng() % sizeof(data)) / type_size;
-            helper_write(&n,    sizeof(n),   file);
-            helper_write(&data, n*type_size, file);
+            helper_write(file, n);
+            helper_write(file, &data, n*type_size);
             continue;
         }
 
-        size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type);
-        if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) {
-            type_size += rng() % 3;
-        }
-        helper_write(data, type_size, file);
+        helper_write(file, data, hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type));
     }
 
-    if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
-        const std::string key = "general.alignment";
-        {
-            const uint64_t n = key.length();
-            helper_write(&n, sizeof(n), file);
-        }
-        helper_write(key.data(), key.length(), file);
+    if (hft == HANDCRAFTED_KV_BAD_ALIGN      ||
+        hft == HANDCRAFTED_TENSORS_BAD_ALIGN || hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN ||
+        hft == HANDCRAFTED_DATA_BAD_ALIGN    || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
+
+        const uint64_t n = strlen(GGUF_KEY_GENERAL_ALIGNMENT);
+        helper_write(file, n);
+        helper_write(file, GGUF_KEY_GENERAL_ALIGNMENT, n);
 
         const int32_t type = gguf_type(GGUF_TYPE_UINT32);
-        helper_write(&type, sizeof(type), file);
+        helper_write(file, type);
 
-        const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1;
-        helper_write(&alignment, sizeof(alignment), file);
+        alignment = expect_context_not_null(hft) ? 1 : 13;
+        helper_write(file, alignment);
     }
 
     if (hft < offset_has_tensors) {
+        while (ftell(file) % alignment != 0) {
+            const char pad = 0;
+            helper_write(file, pad);
+        }
+
         for (int i = 0; i < extra_bytes; ++i) {
             const char tmp = 0;
-            helper_write(&tmp, sizeof(tmp), file);
+            helper_write(file, tmp);
         }
         rewind(file);
         return file;
     }
 
-    uint32_t alignment = GGUF_DEFAULT_ALIGNMENT;
-    if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) {
-        alignment -= 1;
-    } else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) {
-        alignment += 1;
+    if (hft == HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN || hft == HANDCRAFTED_DATA_INCONSISTENT_ALIGN) {
+        alignment = 1;
     }
 
     uint64_t offset = 0;
@@ -313,9 +334,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         {
             const uint64_t n = name.length();
-            helper_write(&n, sizeof(n), file);
+            helper_write(file, n);
         }
-        helper_write(name.data(), name.length(), file);
+        helper_write(file, name.data(), name.length());
 
         uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1;
         for (int i = GGML_MAX_DIMS-1; i >= 1; --i) {
@@ -326,35 +347,35 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) {
             const uint32_t n_dims_bad = GGML_MAX_DIMS + 1;
-            helper_write(&n_dims_bad, sizeof(n_dims_bad), file);
+            helper_write(file, n_dims_bad);
         } else {
-            helper_write(&n_dims,     sizeof(n_dims),     file);
+            helper_write(file, n_dims);
         }
 
         if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) {
             for (uint32_t j = 0; j < n_dims; ++j) {
                 const int64_t bad_dim = -1;
-                helper_write(&bad_dim, sizeof(bad_dim), file);
+                helper_write(file, bad_dim);
             }
         } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){
             for (uint32_t j = 0; j < n_dims; ++j) {
                 const int64_t big_dim = 4*int64_t(INT32_MAX);
-                helper_write(&big_dim, sizeof(big_dim), file);
+                helper_write(file, big_dim);
             }
         } else {
-            helper_write(shape.data(), n_dims*sizeof(int64_t), file);
+            helper_write(file, shape.data(), n_dims*sizeof(int64_t));
         }
 
         {
-            const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type);
-            helper_write(&type32, sizeof(type32), file);
+            const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? GGML_TYPE_COUNT : int32_t(type);
+            helper_write(file, type32);
         }
 
         if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) {
             const uint64_t bad_offset = -1;
-            helper_write(&bad_offset, sizeof(bad_offset), file);
+            helper_write(file, bad_offset);
         } else {
-            helper_write(&offset, sizeof(offset), file);
+            helper_write(file, offset);
         }
 
         int64_t ne = shape[0];
@@ -364,12 +385,9 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         offset += GGML_PAD(ggml_row_size(type, ne), alignment);
     }
 
-    const uint32_t alignment_overshoot = ftell(file) % alignment;
-    if (alignment_overshoot != 0) {
-        for (size_t i = alignment_overshoot; i < alignment; ++i) {
-            const char pad = 0;
-            helper_write(&pad, sizeof(pad), file);
-        }
+    while (ftell(file) % alignment != 0) {
+        const char pad = 0;
+        helper_write(file, pad);
     }
 
     if (hft >= offset_has_data) {
@@ -380,13 +398,13 @@ static FILE * get_handcrafted_file(const unsigned int seed, const enum handcraft
         }
         for (uint64_t i = 0; i < nbytes; ++i) {
             const uint8_t random_byte = i % 256;
-            helper_write(&random_byte, sizeof(random_byte), file);
+            helper_write(file, random_byte);
         }
     }
 
     for (int i = 0; i < extra_bytes; ++i) {
         const char tmp = 0;
-        helper_write(&tmp, sizeof(tmp), file);
+        helper_write(file, tmp);
     }
     rewind(file);
     return file;
@@ -505,6 +523,16 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
             }
 
             const char * data_gguf = reinterpret_cast(gguf_get_arr_data(gguf_ctx, id));
+
+            if (type_arr == GGUF_TYPE_BOOL) {
+                for (size_t arr_i = 0; arr_i < arr_n; ++arr_i) {
+                    if (bool(data8[arr_i]) != bool(data_gguf[arr_i])) {
+                        ok = false;
+                    }
+                }
+                continue;
+            }
+
             if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) {
                 ok = false;
             }
@@ -512,12 +540,20 @@ static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned i
         }
 
         const char * data_gguf = reinterpret_cast(gguf_get_val_data(gguf_ctx, id));
+
+        if (type == GGUF_TYPE_BOOL) {
+            if (bool(*data8) != bool(*data_gguf)) {
+                ok = false;
+            }
+            continue;
+        }
+
         if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) {
             ok = false;
         }
     }
 
-    const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT;
+    const uint32_t expected_alignment = alignment_defined ? 1 : GGUF_DEFAULT_ALIGNMENT;
     if (gguf_get_alignment(gguf_ctx) != expected_alignment) {
         ok = false;
     }
@@ -539,7 +575,7 @@ static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsig
 
     bool ok = true;
 
-    const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment");
+    const int id_alignment = gguf_find_key(gguf_ctx, GGUF_KEY_GENERAL_ALIGNMENT);
     const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT;
 
     uint64_t expected_offset = 0;
@@ -607,7 +643,7 @@ static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const u
 
         std::vector data(size);
         GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0);
-        GGML_ASSERT(fread(data.data(), 1, size, file) == size);
+        GGML_ASSERT(fread(data.data(), 1, data.size(), file) == data.size());
 
         for (size_t j = 0; j < size; ++j) {
             const uint8_t expected_byte = (j + offset) % 256;
@@ -627,15 +663,15 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
     const std::vector hfts = {
         HANDCRAFTED_HEADER_BAD_MAGIC,
         HANDCRAFTED_HEADER_BAD_VERSION_1,
-        // HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME
+        HANDCRAFTED_HEADER_BAD_VERSION_FUTURE,
         HANDCRAFTED_HEADER_BAD_N_KV,
         HANDCRAFTED_HEADER_BAD_N_TENSORS,
         HANDCRAFTED_HEADER_EMPTY,
 
         HANDCRAFTED_KV_BAD_KEY_SIZE,
         HANDCRAFTED_KV_BAD_TYPE,
-        // HANDCRAFTED_KV_BAD_VALUE_SIZE, // FIXME sanitizer limit
-        // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME
+        HANDCRAFTED_KV_DUPLICATE_KEY,
+        HANDCRAFTED_KV_BAD_ALIGN,
         HANDCRAFTED_KV_SUCCESS,
 
         HANDCRAFTED_TENSORS_BAD_NAME_SIZE,
@@ -643,14 +679,16 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
         HANDCRAFTED_TENSORS_BAD_SHAPE,
         HANDCRAFTED_TENSORS_NE_TOO_BIG,
         HANDCRAFTED_TENSORS_BAD_TYPE,
-        // HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME
+        HANDCRAFTED_TENSORS_BAD_OFFSET,
         HANDCRAFTED_TENSORS_DUPLICATE_NAME,
-        // HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME
+        HANDCRAFTED_TENSORS_BAD_ALIGN,
+        HANDCRAFTED_TENSORS_INCONSISTENT_ALIGN,
         HANDCRAFTED_TENSORS_SUCCESS,
         HANDCRAFTED_TENSORS_CUSTOM_ALIGN,
 
         HANDCRAFTED_DATA_NOT_ENOUGH_DATA,
-        // HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME
+        HANDCRAFTED_DATA_BAD_ALIGN,
+        HANDCRAFTED_DATA_INCONSISTENT_ALIGN,
         HANDCRAFTED_DATA_SUCCESS,
         HANDCRAFTED_DATA_CUSTOM_ALIGN,
     };
@@ -674,6 +712,7 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             /*no_alloc =*/ false,
             /*ctx      =*/ hft >= offset_has_data ? &ctx : nullptr,
         };
+
         struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params);
 
         if (expect_context_not_null(hft)) {
@@ -689,7 +728,7 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
         }
         ntest++;
 
-        if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME
+        if (hft >= offset_has_data && !expect_context_not_null(hft)) {
             printf("%s:   - no_dangling_ggml_context_pointer: ", __func__);
             if (ctx) {
                 printf("\033[1;31mFAIL\033[0m\n");
@@ -700,23 +739,6 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             ntest++;
         }
 
-        if (false && expect_context_not_null(hft)) { // FIXME
-            FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1);
-            struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params);
-
-            printf("%s:   - context_null_with_extra_bytes: ", __func__);
-            if (gguf_ctx_eb) {
-                printf("\033[1;31mFAIL\033[0m\n");
-            } else {
-                printf("\033[1;32mOK\033[0m\n");
-                npass++;
-            }
-            ntest++;
-
-            gguf_free(gguf_ctx_eb);
-            fclose(file_eb);
-        }
-
         const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN;
 
         if (expect_context_not_null(hft)) {
@@ -763,14 +785,15 @@ static std::pair test_handcrafted_file(const unsigned int seed) {
             ntest++;
         }
 
+        fclose(file);
         if (gguf_ctx) {
             ggml_free(ctx);
             gguf_free(gguf_ctx);
         }
-        fclose(file);
         printf("\n");
     }
 
+
     return std::make_pair(npass, ntest);
 }
 
@@ -789,10 +812,6 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
         const std::string key = "my_key_" + std::to_string(rng() % 1024);
         const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT);
 
-        if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) {
-            continue; // FIXME memory leak
-        }
-
         switch (type) {
             case GGUF_TYPE_UINT8:   gguf_set_val_u8  (gguf_ctx, key.c_str(), rng() % (1 <<  7));             break;
             case GGUF_TYPE_INT8:    gguf_set_val_i8  (gguf_ctx, key.c_str(), rng() % (1 <<  7) - (1 <<  6)); break;
@@ -826,6 +845,9 @@ static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t
                         std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t));
                         for (size_t j = 0; j < random_data.size(); ++j) {
                             random_data[j] = rng();
+                            if (type_arr == GGUF_TYPE_BOOL) {
+                                random_data[j] &= 0x01010101; // the sanitizer complains if booleans are not 0 or 1
+                            }
                         }
                         gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne);
                     } break;
@@ -928,6 +950,17 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
                 continue;
             }
 
+            if (type_arr == GGUF_TYPE_BOOL) {
+                const int8_t * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
+                const int8_t * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
+                for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
+                    if (bool(data[arr_i]) != bool(data_other[arr_i])) {
+                        ok = false;
+                    }
+                }
+                continue;
+            }
+
             if (type_arr == GGUF_TYPE_STRING) {
                 for (int arr_i = 0; arr_i < arr_n; ++arr_i) {
                     const std::string str       = gguf_get_arr_str(ctx,   id,       arr_i);
@@ -939,8 +972,8 @@ static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other
                 continue;
             }
 
-            const char * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
-            const char * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
+            const int8_t * data       = reinterpret_cast(gguf_get_arr_data(ctx,   id));
+            const int8_t * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other));
             if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) {
                 ok = false;
             }
@@ -1028,21 +1061,6 @@ static bool same_tensor_data(const struct ggml_context * orig, const struct ggml
 }
 
 static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) {
-    FILE * file = tmpfile();
-#ifdef _WIN32
-    if (!file) {
-        printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
-        printf("%s: skipping tests");
-        return std::make_pair(0, 0);
-    }
-#else
-    GGML_ASSERT(file);
-#endif // _WIN32
-
-    if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) {
-        return std::make_pair(0, 0); // FIXME
-    }
-
     ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr);
     printf("%s: device=%s, backend=%s, only_meta=%s\n",
         __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no");
@@ -1060,10 +1078,24 @@ static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned
         bbuf       = result.buffer;
     }
 
-    struct gguf_buf gbuf = gguf_buf_init(16 * 1024);
-    gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta);
-    helper_write(gbuf.data, gbuf.offset, file);
-    rewind(file);
+    FILE * file = tmpfile();
+
+#ifdef _WIN32
+    if (!file) {
+        printf("%s: failed to create tmpfile(), needs elevated privileges on Windows");
+        printf("%s: skipping tests");
+        return std::make_pair(0, 0);
+    }
+#else
+    GGML_ASSERT(file);
+#endif // _WIN32
+
+    {
+        std::vector buf;
+        gguf_write_to_buf(gguf_ctx_0, buf, only_meta);
+        GGML_ASSERT(fwrite(buf.data(), 1, buf.size(), file) == buf.size());
+        rewind(file);
+    }
 
     struct ggml_context * ctx_1 = nullptr;
     struct gguf_init_params gguf_params = {
@@ -1151,9 +1183,8 @@ static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned
     ggml_free(ctx_1);
     gguf_free(gguf_ctx_0);
     gguf_free(gguf_ctx_1);
-    gguf_buf_free(gbuf);
     ggml_backend_free(backend);
-    GGML_ASSERT(fclose(file) == 0);
+    fclose(file);
 
     printf("\n");
     return std::make_pair(npass, ntest);

From bec2183f2c8d37cf1278c11d1adb9311e9eaa242 Mon Sep 17 00:00:00 2001
From: ag2s20150909 <19373730+ag2s20150909@users.noreply.github.com>
Date: Wed, 8 Jan 2025 16:17:29 +0800
Subject: [PATCH 039/279] fix: Vulkan shader gen binary path when
 Cross-compiling (#11096)

* fix: Vulkan shader gen binary path when cross compiling
---
 ggml/src/ggml-vulkan/CMakeLists.txt | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 9501de736..61de21d6a 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -69,11 +69,15 @@ if (Vulkan_FOUND)
 
     file(GLOB _ggml_vk_shader_deps "${_ggml_vk_input_dir}/*.comp")
 
+    if (NOT CMAKE_CROSSCOMPILING)
+        set(_ggml_vk_genshaders_cmd "$/${_ggml_vk_genshaders_cmd}")
+    endif ()
+
     add_custom_command(
         OUTPUT ${_ggml_vk_header}
                 ${_ggml_vk_source}
 
-        COMMAND "$/${_ggml_vk_genshaders_cmd}"
+        COMMAND ${_ggml_vk_genshaders_cmd}
             --glslc      ${Vulkan_GLSLC_EXECUTABLE}
             --input-dir  ${_ggml_vk_input_dir}
             --output-dir ${_ggml_vk_output_dir}

From 02f04301417e7fb44fa1025bc1b0aef866e2ca89 Mon Sep 17 00:00:00 2001
From: Mathieu Baudier 
Date: Wed, 8 Jan 2025 09:18:13 +0100
Subject: [PATCH 040/279] Disable GL_KHR_cooperative_matrix Vulkan extension if
 not available. (#11117)

* Disable GL_KHR_cooperative_matrix Vulkan extension if not available.

* Perform Vulkan extensions checks in a more sensible order

* Remove unnecessary #ifdef directive
---
 ggml/src/ggml-vulkan/CMakeLists.txt            | 14 ++++++++++++++
 ggml/src/ggml-vulkan/ggml-vulkan.cpp           | 18 +++++++++++++++---
 .../vulkan-shaders/test_coopmat_support.comp   |  7 +++++++
 .../vulkan-shaders/vulkan-shaders-gen.cpp      |  2 ++
 4 files changed, 38 insertions(+), 3 deletions(-)
 create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp

diff --git a/ggml/src/ggml-vulkan/CMakeLists.txt b/ggml/src/ggml-vulkan/CMakeLists.txt
index 61de21d6a..c0ddaac82 100644
--- a/ggml/src/ggml-vulkan/CMakeLists.txt
+++ b/ggml/src/ggml-vulkan/CMakeLists.txt
@@ -8,6 +8,20 @@ if (Vulkan_FOUND)
                              ../../include/ggml-vulkan.h
                             )
 
+    # Compile a test shader to determine whether GL_KHR_cooperative_matrix is supported.
+    # If it's not, there will be an error to stderr.
+    # If it's supported, set a define to indicate that we should compile those shaders
+    execute_process(COMMAND ${Vulkan_GLSLC_EXECUTABLE} -o - -fshader-stage=compute --target-env=vulkan1.3 "${CMAKE_CURRENT_SOURCE_DIR}/vulkan-shaders/test_coopmat_support.comp"
+                    OUTPUT_VARIABLE glslc_output
+                    ERROR_VARIABLE glslc_error)
+
+    if (${glslc_error} MATCHES ".*extension not supported: GL_KHR_cooperative_matrix.*")
+        message(STATUS "GL_KHR_cooperative_matrix not supported by glslc")
+    else()
+        message(STATUS "GL_KHR_cooperative_matrix supported by glslc")
+        add_compile_definitions(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+    endif()
+
     # Compile a test shader to determine whether GL_NV_cooperative_matrix2 is supported.
     # If it's not, there will be an error to stderr.
     # If it's supported, set a define to indicate that we should compile those shaders
diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
index d75cd6d61..077452424 100644
--- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp
+++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp
@@ -1645,6 +1645,7 @@ static void ggml_vk_load_shaders(vk_device& device) {
 #undef CREATE_MM2
     } else
 #endif  // defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
+#if defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
     if (device->coopmat_support) {
         // Create 6 variants, {s,m,l}x{unaligned,aligned}
 #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
@@ -1739,7 +1740,9 @@ static void ggml_vk_load_shaders(vk_device& device) {
         }
 #undef CREATE_MM2
 #undef CREATE_MM
-    } else if (device->fp16) {
+    } else
+#endif  // defined(VK_KHR_cooperative_matrix) && defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+    if (device->fp16) {
         // Create 6 variants, {s,m,l}x{unaligned,aligned}
 #define CREATE_MM(PIPELINE_NAME, NAMELC, F16ACC, WG_DENOMS, WARPTILE, PUSHCONST, PARAMCOUNT, ID) \
         if (device->mul_mat ## ID ## _l) \
@@ -2242,6 +2245,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             last_struct = (VkBaseOutStructure *)&subgroup_size_control_features;
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
         coopmat_features.pNext = nullptr;
         coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
@@ -2251,6 +2255,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             last_struct->pNext = (VkBaseOutStructure *)&coopmat_features;
             last_struct = (VkBaseOutStructure *)&coopmat_features;
         }
+#endif
 
 #if defined(VK_NV_cooperative_matrix2)
         VkPhysicalDeviceCooperativeMatrix2FeaturesNV coopmat2_features {};
@@ -2283,7 +2288,9 @@ static vk_device ggml_vk_get_device(size_t idx) {
             device_extensions.push_back("VK_EXT_subgroup_size_control");
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         device->coopmat_support = device->coopmat_support && coopmat_features.cooperativeMatrix;
+#endif
 
         if (coopmat2_support) {
 #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
@@ -2376,6 +2383,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
             device_extensions.push_back("VK_KHR_shader_float16_int8");
         }
 
+#if defined(VK_KHR_cooperative_matrix)
         if (device->coopmat_support) {
             // Query supported shapes
             std::vector cm_props;
@@ -2442,7 +2450,7 @@ static vk_device ggml_vk_get_device(size_t idx) {
         if (device->coopmat_support) {
             device_extensions.push_back("VK_KHR_cooperative_matrix");
         }
-
+#endif
         device->name = GGML_VK_NAME + std::to_string(idx);
 
         device_create_info = {
@@ -2553,9 +2561,11 @@ static void ggml_vk_print_gpu_info(size_t idx) {
             fp16_storage = true;
         } else if (strcmp("VK_KHR_shader_float16_int8", properties.extensionName) == 0) {
             fp16_compute = true;
-        } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
+       } else if (strcmp("VK_KHR_cooperative_matrix", properties.extensionName) == 0 &&
                    !getenv("GGML_VK_DISABLE_COOPMAT")) {
             coopmat_support = true;
+#endif
 #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
         } else if (strcmp("VK_NV_cooperative_matrix2", properties.extensionName) == 0 &&
                    !getenv("GGML_VK_DISABLE_COOPMAT2")) {
@@ -2593,6 +2603,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
     // Pointer to the last chain element
     VkBaseOutStructure * last_struct = (VkBaseOutStructure *)&vk12_features;
 
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
     VkPhysicalDeviceCooperativeMatrixFeaturesKHR coopmat_features;
     coopmat_features.pNext = nullptr;
     coopmat_features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_KHR;
@@ -2608,6 +2619,7 @@ static void ggml_vk_print_gpu_info(size_t idx) {
     fp16 = fp16 && vk12_features.shaderFloat16;
 
     coopmat_support = coopmat_support && coopmat_features.cooperativeMatrix;
+#endif
 
     std::string matrix_cores = coopmat2_support ? "NV_coopmat2" : coopmat_support ? "KHR_coopmat" : "none";
 
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp
new file mode 100644
index 000000000..8c5dd1bd1
--- /dev/null
+++ b/ggml/src/ggml-vulkan/vulkan-shaders/test_coopmat_support.comp
@@ -0,0 +1,7 @@
+#version 460
+
+#extension GL_KHR_cooperative_matrix : require
+
+void main()
+{
+}
diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
index 8111c0638..7b5044798 100644
--- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
+++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp
@@ -342,9 +342,11 @@ void process_shaders() {
         matmul_shaders(true, matmul_id, false, false, false);
         matmul_shaders(true, matmul_id, false, false, true);
 
+#if defined(GGML_VULKAN_COOPMAT_GLSLC_SUPPORT)
         // Coopmat, fp32acc and fp16acc
         matmul_shaders(true, matmul_id, true, false, false);
         matmul_shaders(true, matmul_id, true, false, true);
+#endif
 
 #if defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT)
         // Coopmat2, fp32acc and fp16acc

From 0d52a69e4bf0d6181beec7853307bdcdeec9905b Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Wed, 8 Jan 2025 11:29:34 +0200
Subject: [PATCH 041/279] ci : fix cmake option (#11125)

---
 .github/workflows/build.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 602cf5220..02a193b86 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -665,7 +665,7 @@ jobs:
           - build: 'llvm-arm64'
             defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
           - build: 'msvc-arm64'
-            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=O'
+            defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
           - build: 'llvm-arm64-opencl-adreno'
             defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
 

From 8cef75c743ba13ebbd6d380c531200c768a8b8aa Mon Sep 17 00:00:00 2001
From: amritahs-ibm 
Date: Wed, 8 Jan 2025 16:24:19 +0530
Subject: [PATCH 042/279] llamafile : ppc64le MMA INT8 implementation (#10912)

This change upstreams llamafile's cpu matrix
multiplication kernels for ppc64le using MMA
builtins for quantised int8 datatype.

This change results in 10% - 70% improvement
in total speed(ie all tokens/total time), across
various batch sizes.

The patch is tested with Meta-Lllama-3-8B,
Mistral-7B, Llama-2-7B-chat-hf models on a
IBM POWER10 machine.

Signed-off-by: Amrita H S 
---
 ggml/src/ggml-cpu/llamafile/sgemm.cpp | 836 ++++++++++++++++++++++++--
 1 file changed, 770 insertions(+), 66 deletions(-)

diff --git a/ggml/src/ggml-cpu/llamafile/sgemm.cpp b/ggml/src/ggml-cpu/llamafile/sgemm.cpp
index 8fce576c3..c22a66287 100644
--- a/ggml/src/ggml-cpu/llamafile/sgemm.cpp
+++ b/ggml/src/ggml-cpu/llamafile/sgemm.cpp
@@ -54,6 +54,7 @@
 #include "ggml-quants.h"
 
 #include 
+#include 
 
 #ifdef _MSC_VER
 #define NOINLINE __declspec(noinline)
@@ -1051,6 +1052,704 @@ class tinyBLAS_Q0_AVX {
       } \
    } \
 
+template 
+class tinyBLAS_Q0_PPC {
+  public:
+    tinyBLAS_Q0_PPC(int64_t k,
+                const TA *A, int64_t lda,
+                const TB *B, int64_t ldb,
+                TC *C, int64_t ldc,
+                int ith, int nth)
+        : A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
+    }
+
+    void matmul(int64_t m, int64_t n) {
+        mnpack(0, m, 0, n);
+    }
+
+  private:
+
+    template
+    inline void save_res(int ii, int jj, int idx, vector float* fin_res) {
+       for (int I = 0; I < RM; I++) {
+          for (int J = 0; J < RN; J++) {
+             *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&fin_res[idx+I]+J);
+          }
+       }
+    }
+
+    template
+    inline void compute(acc_t* ACC, int c_idx, int s_idx, std::array& comparray, vector float* vs, vector float* fin_res) {
+       vector signed int vec_C[4];
+       vector float CA[4] = {0};
+       vector float res[4] = {0};
+       __builtin_mma_disassemble_acc(vec_C, ACC);
+       for (int i = 0; i < 4; i++) {
+          CA[i] = vec_splats((float)(((double)comparray[c_idx+i]) * -128.0));
+          res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
+          fin_res[s_idx+i] = vec_madd(res[i], vs[s_idx+i], fin_res[s_idx+i]);
+       }
+    }
+
+    template
+    void packNormal(const TA* a, int64_t lda, int rows, int cols, VA* vec, bool flip) {
+        int64_t i, j;
+        TA *aoffset = NULL;
+        VA *vecOffset = NULL;
+        TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
+        TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
+        __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
+        VB c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2]={0};
+        VB c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2]={0};
+        VB t1, t2, t3, t4, t5, t6, t7, t8;
+        vector unsigned char xor_vector;
+        uint8_t flip_vec = 0x80;
+        xor_vector = vec_splats(flip_vec);
+        vector unsigned char swiz1 = {0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23};
+        vector unsigned char swiz2 = {8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31};
+        vector unsigned char swiz3 = {0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27};
+        vector unsigned char swiz4 = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31};
+
+        aoffset = const_cast(a);
+        vecOffset = vec;
+        j = (rows >> 3);
+        if (j > 0) {
+            do {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            aoffset4 = aoffset3 + lda;
+            aoffset5 = aoffset4 + lda;
+            aoffset6 = aoffset5 + lda;
+            aoffset7 = aoffset6 + lda;
+            aoffset8 = aoffset7 + lda;
+            aoffset += 8 * lda;
+
+            i = (cols >> 3);
+            if (i > 0) {
+               do {
+                    C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                    C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                    C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                    C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
+                    C5 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset5->qs);
+                    C6 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset6->qs);
+                    C7 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset7->qs);
+                    C8 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset8->qs);
+
+                    __builtin_vsx_disassemble_pair(c1, &C1);
+                    __builtin_vsx_disassemble_pair(c2, &C2);
+                    __builtin_vsx_disassemble_pair(c3, &C3);
+                    __builtin_vsx_disassemble_pair(c4, &C4);
+                    __builtin_vsx_disassemble_pair(c5, &C5);
+                    __builtin_vsx_disassemble_pair(c6, &C6);
+                    __builtin_vsx_disassemble_pair(c7, &C7);
+                    __builtin_vsx_disassemble_pair(c8, &C8);
+
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    t1 = vec_perm(c5[0], c6[0], swiz1);
+                    t2 = vec_perm(c5[0], c6[0], swiz2);
+                    t3 = vec_perm(c7[0], c8[0], swiz1);
+                    t4 = vec_perm(c7[0], c8[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+128);
+                    vec_xst(t6, 0, vecOffset+144);
+                    vec_xst(t7, 0, vecOffset+160);
+                    vec_xst(t8, 0, vecOffset+176);
+
+                    t1 = vec_perm(c5[1], c6[1], swiz1);
+                    t2 = vec_perm(c5[1], c6[1], swiz2);
+                    t3 = vec_perm(c7[1], c8[1], swiz1);
+                    t4 = vec_perm(c7[1], c8[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+192);
+                    vec_xst(t6, 0, vecOffset+208);
+                    vec_xst(t7, 0, vecOffset+224);
+                    vec_xst(t8, 0, vecOffset+240);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    aoffset4 += lda;
+                    aoffset5 += lda;
+                    aoffset6 += lda;
+                    aoffset7 += lda;
+                    aoffset8 += lda;
+                    vecOffset += 256;
+                    i--;
+               } while(i > 0);
+            }
+            j--;
+        } while(j > 0);
+    }
+
+    if (rows & 4) {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            aoffset4 = aoffset3 + lda;
+            aoffset += 4 * lda;
+
+        i = (cols >> 3);
+            if (i > 0) {
+               do {
+                    C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                    C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                    C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                    C4 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset4->qs);
+
+                    __builtin_vsx_disassemble_pair(c1, &C1);
+                    __builtin_vsx_disassemble_pair(c2, &C2);
+                    __builtin_vsx_disassemble_pair(c3, &C3);
+                    __builtin_vsx_disassemble_pair(c4, &C4);
+
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    aoffset4 += lda;
+                    vecOffset += 128;
+                    i--;
+               } while(i > 0);
+            }
+        }
+        if (rows & 3) {
+            aoffset1 = aoffset;
+            aoffset2 = aoffset1 + lda;
+            aoffset3 = aoffset2 + lda;
+            i = (cols >> 3);
+        if (i > 0) {
+                do {
+                    switch(rows) {
+                        case 3: C3 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset3->qs);
+                                __builtin_vsx_disassemble_pair(c3, &C3);
+                        case 2: C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2->qs);
+                                __builtin_vsx_disassemble_pair(c2, &C2);
+                        case 1: C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1->qs);
+                                __builtin_vsx_disassemble_pair(c1, &C1);
+                                break;
+                    }
+                    t1 = vec_perm(c1[0], c2[0], swiz1);
+                    t2 = vec_perm(c1[0], c2[0], swiz2);
+                    t3 = vec_perm(c3[0], c4[0], swiz1);
+                    t4 = vec_perm(c3[0], c4[0], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset);
+                    vec_xst(t6, 0, vecOffset+16);
+                    vec_xst(t7, 0, vecOffset+32);
+                    vec_xst(t8, 0, vecOffset+48);
+
+                    t1 = vec_perm(c1[1], c2[1], swiz1);
+                    t2 = vec_perm(c1[1], c2[1], swiz2);
+                    t3 = vec_perm(c3[1], c4[1], swiz1);
+                    t4 = vec_perm(c3[1], c4[1], swiz2);
+                    t5 = vec_perm(t1, t3, swiz3);
+                    t6 = vec_perm(t1, t3, swiz4);
+                    t7 = vec_perm(t2, t4, swiz3);
+                    t8 = vec_perm(t2, t4, swiz4);
+                    if (flip == true) {
+                       t5 = vec_xor(t5, xor_vector);
+                       t6 = vec_xor(t6, xor_vector);
+                       t7 = vec_xor(t7, xor_vector);
+                       t8 = vec_xor(t8, xor_vector);
+                    }
+                    vec_xst(t5, 0, vecOffset+64);
+                    vec_xst(t6, 0, vecOffset+80);
+                    vec_xst(t7, 0, vecOffset+96);
+                    vec_xst(t8, 0, vecOffset+112);
+
+                    aoffset1 += lda;
+                    aoffset2 += lda;
+                    aoffset3 += lda;
+                    vecOffset += 128;
+                    i--;
+               } while(i > 0);
+            }
+        }
+    }
+
+    void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t mc, nc, mp, np;
+        int m_rem = MIN(m - m0, 8);
+        int n_rem = MIN(n - n0, 8);
+        // TO-DO: KERNEL_16x8 and KERNEL_8x16 are having some performance
+        // issues. After resolving them, below code will be enabled.
+        /*if (m_rem >= 16 && n_rem >= 8) {
+            mc = 16;
+            nc = 8;
+            gemm<16,8>(m0, m, n0, n);
+        } else if(m_rem >= 8 && n_rem >= 16) {
+            mc = 8;
+            nc = 16;
+            gemm<8,16>(m0, m, n0, n);
+        }*/
+        if (m_rem >= 8 && n_rem >= 8) {
+            mc = 8;
+            nc = 8;
+            gemm<8,8>(m0, m, n0, n);
+        } else if (m_rem >= 4 && n_rem >= 8) {
+            mc = 4;
+            nc = 8;
+            gemm<4,8>(m0, m, n0, n);
+        } else if (m_rem >= 8 && n_rem >= 4) {
+            mc = 8;
+            nc = 4;
+            gemm<8,4>(m0, m, n0, n);
+        } else if (m_rem >= 4 && n_rem >= 4) {
+            mc = 4;
+            nc = 4;
+            gemm_small<4, 4>(m0, m, n0, n);
+        } else if ((m_rem < 4) && (n_rem > 4)) {
+            nc = 4;
+            switch(m_rem) {
+                case 1:
+                    mc = 1;
+                    gemm_small<1, 4>(m0, m, n0, n);
+                    break;
+                case 2:
+                    mc = 2;
+                    gemm_small<2, 4>(m0, m, n0, n);
+                    break;
+                case 3:
+                    mc = 3;
+                    gemm_small<3, 4>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        } else if ((m_rem > 4) && (n_rem < 4)) {
+            mc = 4;
+            switch(n_rem) {
+                case 1:
+                    nc = 1;
+                    gemm_small<4, 1>(m0, m, n0, n);
+                    break;
+                case 2:
+                    nc = 2;
+                    gemm_small<4, 2>(m0, m, n0, n);
+                    break;
+                case 3:
+                    nc = 3;
+                    gemm_small<4, 3>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        } else {
+            switch((m_rem << 4) | n_rem) {
+                case 0x43:
+                    mc = 4;
+                    nc = 3;
+                    gemm_small<4, 3>(m0, m, n0, n);
+                    break;
+                case 0x42:
+                    mc = 4;
+                    nc = 2;
+                    gemm_small<4, 2>(m0, m, n0, n);
+                    break;
+                case 0x41:
+                    mc = 4;
+                    nc = 1;
+                    gemm_small<4, 1>(m0, m, n0, n);
+                    break;
+                case 0x34:
+                    mc = 3;
+                    nc = 4;
+                    gemm_small<3, 4>(m0, m, n0, n);
+                    break;
+                case 0x33:
+                    mc = 3;
+                    nc = 3;
+                    gemm_small<3, 3>(m0, m, n0, n);
+                    break;
+                case 0x32:
+                    mc = 3;
+                    nc = 2;
+                    gemm_small<3, 2>(m0, m, n0, n);
+                    break;
+                case 0x31:
+                    mc = 3;
+                    nc = 1;
+                    gemm_small<3, 1>(m0, m, n0, n);
+                    break;
+                case 0x24:
+                    mc = 2;
+                    nc = 4;
+                    gemm_small<2, 4>(m0, m, n0, n);
+                    break;
+                case 0x23:
+                    mc = 2;
+                    nc = 3;
+                    gemm_small<2, 3>(m0, m, n0, n);
+                    break;
+                case 0x22:
+                    mc = 2;
+                    nc = 2;
+                    gemm_small<2, 2>(m0, m, n0, n);
+                    break;
+                case 0x21:
+                    mc = 2;
+                    nc = 1;
+                    gemm_small<2, 1>(m0, m, n0, n);
+                    break;
+                case 0x14:
+                    mc = 1;
+                    nc = 4;
+                    gemm_small<1, 4>(m0, m, n0, n);
+                    break;
+                case 0x13:
+                    mc = 1;
+                    nc = 3;
+                    gemm_small<1, 3>(m0, m, n0, n);
+                    break;
+                case 0x12:
+                    mc = 1;
+                    nc = 2;
+                    gemm_small<1, 2>(m0, m, n0, n);
+                    break;
+                case 0x11:
+                    mc = 1;
+                    nc = 1;
+                    gemm_small<1, 1>(m0, m, n0, n);
+                    break;
+                default:
+                    return;
+            }
+        }
+        mp = m0 + (m - m0) / mc * mc;
+        np = n0 + (n - n0) / nc * nc;
+        mnpack(mp, m, n0, np);
+        mnpack(m0, m, np, n);
+    }
+
+    void KERNEL_4x8(int64_t ii, int64_t jj) {
+        vec_t vec_A[8], vec_B[16] = {0};
+        acc_t acc_0, acc_1;
+        std::array comparray;
+        vector float fin_res[8] = {0};
+        vector float vs[8] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            packNormal((A+(ii*lda)+l), lda, 4, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x], vec_B[x+8]);
+            }
+            for (int I = 0; I<4; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    *((float*)&vs[I+4]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 4; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<4>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<4>(&acc_1, 0, 4, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii, jj+4, 4, fin_res);
+    }
+
+    void KERNEL_8x4(int64_t ii, int64_t jj) {
+        vec_t vec_A[16], vec_B[8] = {0};
+        acc_t acc_0, acc_1;
+        std::array comparray;
+        vector float fin_res[8] = {0};
+        vector float vs[8] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 4, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
+            }
+            for (int I = 0; I<8; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 8; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii+4, jj, 4, fin_res);
+    }
+
+    void KERNEL_8x8(int64_t ii, int64_t jj) {
+        vec_t vec_A[16], vec_B[16] = {0};
+        acc_t acc_0, acc_1, acc_2, acc_3;
+        std::array comparray;
+        vector float fin_res[16] = {0};
+        vector float vs[16] = {0};
+        for (int l = 0; l < k; l++) {
+            __builtin_mma_xxsetaccz(&acc_0);
+            __builtin_mma_xxsetaccz(&acc_1);
+            __builtin_mma_xxsetaccz(&acc_2);
+            __builtin_mma_xxsetaccz(&acc_3);
+            packNormal((A+(ii*lda)+l), lda, 8, 8, (int8_t*)vec_A, false);
+            packNormal((B+(jj*ldb)+l), ldb, 8, 8, (uint8_t*)vec_B, true);
+            for(int x = 0; x < 8; x++) {
+                __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_1, vec_A[x+8], vec_B[x]);
+                __builtin_mma_xvi8ger4pp(&acc_2, vec_A[x], vec_B[x+8]);
+                __builtin_mma_xvi8ger4pp(&acc_3, vec_A[x+8], vec_B[x+8]);
+            }
+            for (int I = 0; I<8; I++) {
+                for (int J = 0; J<4; J++) {
+                    *((float*)&vs[I]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    *((float*)&vs[I+8]+J) = (unhalf((A+((ii+I)*lda)+l)->d) * unhalf((B+((jj+J+4)*ldb)+l)->d));
+                }
+            }
+            auto aoffset = A+(ii*lda)+l;
+            for (int i = 0; i < 8; i++) {
+                comparray[i] = 0;
+                int ca = 0;
+                const int8_t *at = aoffset->qs;
+                for (int j = 0; j < 32; j++)
+                    ca += (int)*at++;
+                comparray[i] = ca;
+                aoffset += lda;
+            }
+            compute<8>(&acc_0, 0, 0, comparray, vs, fin_res);
+            compute<8>(&acc_1, 4, 4, comparray, vs, fin_res);
+            compute<8>(&acc_2, 0, 8, comparray, vs, fin_res);
+            compute<8>(&acc_3, 4, 12, comparray, vs, fin_res);
+        }
+        save_res<4, 4>(ii, jj, 0, fin_res);
+        save_res<4, 4>(ii+4, jj, 4, fin_res);
+        save_res<4, 4>(ii, jj+4, 8, fin_res);
+        save_res<4, 4>(ii+4, jj+4, 12, fin_res);
+    }
+
+    template
+    void gemm_small(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t ytiles = (m - m0) / RM;
+        int64_t xtiles = (n - n0) / RN;
+        int64_t tiles = xtiles * ytiles;
+        int64_t duty = (tiles + nth - 1) / nth;
+        int64_t start = duty * ith;
+        int64_t end = start + duty;
+        vec_t vec_A[8], vec_B[8] = {0};
+        vector signed int vec_C[4];
+        acc_t acc_0;
+
+        if (end > tiles)
+            end = tiles;
+        for (int64_t job = start; job < end; ++job) {
+            int64_t ii = m0 + job / xtiles * RM;
+            int64_t jj = n0 + job % xtiles * RN;
+            std::array comparray;
+            vector float res[4] = {0};
+            vector float fin_res[4] = {0};
+            vector float vs[4] = {0};
+            vector float CA[4] = {0};
+            __builtin_prefetch((A+(ii*lda)+0)->qs, 0, 1); // prefetch first value
+            __builtin_prefetch((B+(jj*ldb)+0)->qs, 0, 1); // prefetch first value
+            for (int l = 0; l < k; l++) {
+                __builtin_prefetch((A+(ii*lda)+(l+1))->qs, 0, 1); // prefetch one loop ahead
+                __builtin_prefetch((B+(jj*ldb)+(l+1))->qs, 0, 1); // prefetch one loop ahead
+                __builtin_mma_xxsetaccz(&acc_0);
+                packNormal((A+(ii*lda)+l), lda, RM, 8, (int8_t*)vec_A, false);
+                packNormal((B+(jj*ldb)+l), ldb, RN, 8, (uint8_t*)vec_B, true);
+                for(int x = 0; x < 8; x+=4) {
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x], vec_B[x]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+1], vec_B[x+1]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+2], vec_B[x+2]);
+                    __builtin_mma_xvi8ger4pp(&acc_0, vec_A[x+3], vec_B[x+3]);
+                }
+                for (int I = 0; Id) * unhalf((B+((jj+J)*ldb)+l)->d));
+                    }
+                }
+                __builtin_mma_disassemble_acc(vec_C, &acc_0);
+                auto aoffset = A+(ii*lda)+l;
+                for (int i = 0; i < RM; i++) {
+                    comparray[i] = 0;
+                    int ca = 0;
+                    const int8_t *at = aoffset->qs;
+                    for (int j = 0; j < 32; j++)
+                        ca += (int)*at++;
+                    comparray[i] = ca;
+                    aoffset += lda;
+                }
+
+                for (int i = 0; i < RM; i++) {
+                    CA[i] = vec_splats((float)(((double)comparray[i]) * -128.0));
+                    res[i] = vec_add(vec_ctf(vec_C[i], 0), CA[i]);
+                    fin_res[i] = vec_madd(res[i], vs[i], fin_res[i]);
+                }
+            }
+            save_res(ii, jj, 0, fin_res);
+        }
+    }
+
+    template
+    inline void kernel(int64_t ii, int64_t jj) {
+       if constexpr(RM == 4 && RN == 8) {
+          KERNEL_4x8(ii,jj);
+       } else if constexpr(RM == 8 && RN == 4) {
+          KERNEL_8x4(ii,jj);
+       } else if constexpr(RM == 8 && RN == 8) {
+          KERNEL_8x8(ii,jj);
+       } else {
+          static_assert(false, "RN/RM values not supported");
+       }
+    }
+
+    template 
+    NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) {
+        int64_t ytiles = (m - m0) / RM;
+        int64_t xtiles = (n - n0) / RN;
+        int64_t tiles = xtiles * ytiles;
+        int64_t duty = (tiles + nth - 1) / nth;
+        int64_t start = duty * ith;
+        int64_t end = start + duty;
+        if (end > tiles)
+            end = tiles;
+        for (int64_t job = start; job < end; ++job) {
+            int64_t ii = m0 + job / xtiles * RM;
+            int64_t jj = n0 + job % xtiles * RN;
+            kernel(ii, jj);
+        }
+    }
+
+    const TA *const A;
+    const TB *const B;
+    TC *C;
+    TA *At;
+    TB *Bt;
+    const int64_t k;
+    const int64_t lda;
+    const int64_t ldb;
+    const int64_t ldc;
+    const int ith;
+    const int nth;
+};
+
 template 
 class tinyBLAS_PPC {
   public:
@@ -1070,13 +1769,17 @@ class tinyBLAS_PPC {
 
     void (tinyBLAS_PPC::*kernel)(int64_t, int64_t);
 
-    void READ_BLOCK(const float* a, int64_t lda, int rows, int cols, float* vec) {
+    template
+    void packTranspose(const TA* a, int64_t lda, int rows, int cols, TA* vec) {
         int64_t i, j;
-        float *aoffset = NULL, *boffset = NULL;
-        float *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
-        float *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
-
-        aoffset = const_cast(a);
+        TA *aoffset = NULL, *boffset = NULL;
+        TA *aoffset1 = NULL, *aoffset2 = NULL, *aoffset3 = NULL, *aoffset4 = NULL;
+        TA *aoffset5 = NULL, *aoffset6 = NULL, *aoffset7 = NULL, *aoffset8 = NULL;
+        __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
+        VA c1[2] = {0}, c2[2] = {0}, c3[2] = {0}, c4[2] = {0};
+        VA c5[2] = {0}, c6[2] = {0}, c7[2] = {0}, c8[2] = {0};
+        VA t1, t2, t3, t4, t5, t6, t7, t8;
+        aoffset = const_cast(a);
         boffset = vec;
         j = (rows >> 3);
         if (j > 0) {
@@ -1092,9 +1795,6 @@ class tinyBLAS_PPC {
                 aoffset += 8 * lda;
                 i = (cols >> 3);
                 if (i > 0) {
-                    __vector_pair C1, C2, C3, C4, C5, C6, C7, C8;
-                    vector float c1[2], c2[2], c3[2], c4[2], c5[2], c6[2], c7[2], c8[2];
-                    vector float t1, t2, t3, t4, t5, t6, t7, t8;
                     do {
                         C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
                         C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
@@ -1174,21 +1874,19 @@ class tinyBLAS_PPC {
                     } while(i > 0);
                 }
                 if (cols & 4) {
-                    vector float c1, c2, c3, c4, c5, c6, c7, c8;
-                    vector float t1, t2, t3, t4, t5, t6, t7, t8;
-                    c1 = vec_xl(0, aoffset1);
-                    c2 = vec_xl(0, aoffset2);
-                    c3 = vec_xl(0, aoffset3);
-                    c4 = vec_xl(0, aoffset4);
-                    c5 = vec_xl(0, aoffset5);
-                    c6 = vec_xl(0, aoffset6);
-                    c7 = vec_xl(0, aoffset7);
-                    c8 = vec_xl(0, aoffset8);
+                    c1[0] = vec_xl(0, aoffset1);
+                    c2[0] = vec_xl(0, aoffset2);
+                    c3[0] = vec_xl(0, aoffset3);
+                    c4[0] = vec_xl(0, aoffset4);
+                    c5[0] = vec_xl(0, aoffset5);
+                    c6[0] = vec_xl(0, aoffset6);
+                    c7[0] = vec_xl(0, aoffset7);
+                    c8[0] = vec_xl(0, aoffset8);
 
-                    t1 = vec_mergeh(c1, c2);
-                    t2 = vec_mergeh(c3, c4);
-                    t3 = vec_mergeh(c5, c6);
-                    t4 = vec_mergeh(c7, c8);
+                    t1 = vec_mergeh(c1[0], c2[0]);
+                    t2 = vec_mergeh(c3[0], c4[0]);
+                    t3 = vec_mergeh(c5[0], c6[0]);
+                    t4 = vec_mergeh(c7[0], c8[0]);
                     t5 = vec_xxpermdi(t1, t2, 0);
                     t6 = vec_xxpermdi(t3, t4, 0);
                     t7 = vec_xxpermdi(t1, t2, 3);
@@ -1198,10 +1896,10 @@ class tinyBLAS_PPC {
                     vec_xst(t7, 0, boffset+8);
                     vec_xst(t8, 0, boffset+12);
 
-                    t1 = vec_mergel(c1, c2);
-                    t2 = vec_mergel(c3, c4);
-                    t3 = vec_mergel(c5, c6);
-                    t4 = vec_mergel(c7, c8);
+                    t1 = vec_mergel(c1[0], c2[0]);
+                    t2 = vec_mergel(c3[0], c4[0]);
+                    t3 = vec_mergel(c5[0], c6[0]);
+                    t4 = vec_mergel(c7[0], c8[0]);
                     t5 = vec_xxpermdi(t1, t2, 0);
                     t6 = vec_xxpermdi(t3, t4, 0);
                     t7 = vec_xxpermdi(t1, t2, 3);
@@ -1223,9 +1921,6 @@ class tinyBLAS_PPC {
             aoffset += 4 * lda;
             i = (cols >> 3);
             if (i > 0) {
-                __vector_pair C1, C2, C3, C4;
-                vector float c1[2], c2[2], c3[2], c4[2];
-                vector float t1, t2, t3, t4, t5, t6, t7, t8;
                 do {
                     C1 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset1);
                     C2 = __builtin_vsx_lxvp(0, (__vector_pair*)aoffset2);
@@ -1272,22 +1967,20 @@ class tinyBLAS_PPC {
             }
 
             if (cols & 4) {
-                vector float c1, c2, c3, c4;
-                vector float t1, t2, t3, t4;
-                c1 = vec_xl(0, aoffset1);
-                c2 = vec_xl(0, aoffset2);
-                c3 = vec_xl(0, aoffset3);
-                c4 = vec_xl(0, aoffset4);
+                c1[0] = vec_xl(0, aoffset1);
+                c2[0] = vec_xl(0, aoffset2);
+                c3[0] = vec_xl(0, aoffset3);
+                c4[0] = vec_xl(0, aoffset4);
 
-                t1 = vec_mergeh(c1, c2);
-                t2 = vec_mergeh(c3, c4);
+                t1 = vec_mergeh(c1[0], c2[0]);
+                t2 = vec_mergeh(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset);
                 vec_xst(t4, 0, boffset+4);
 
-                t1 = vec_mergel(c1, c2);
-                t2 = vec_mergel(c3, c4);
+                t1 = vec_mergel(c1[0], c2[0]);
+                t2 = vec_mergel(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset+8);
@@ -1299,21 +1992,19 @@ class tinyBLAS_PPC {
             aoffset2 = aoffset1 + lda;
             aoffset3 = aoffset2 + lda;
             if (cols & 4) {
-                vector float c1, c2, c3, c4 = {0};
-                vector float t1, t2, t3, t4;
-                c1 = vec_xl(0, aoffset1);
-                c2 = vec_xl(0, aoffset2);
-                c3 = vec_xl(0, aoffset3);
+                c1[0] = vec_xl(0, aoffset1);
+                c2[0] = vec_xl(0, aoffset2);
+                c3[0] = vec_xl(0, aoffset3);
 
-                t1 = vec_mergeh(c1, c2);
-                t2 = vec_mergeh(c3, c4);
+                t1 = vec_mergeh(c1[0], c2[0]);
+                t2 = vec_mergeh(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset);
                 vec_xst(t4, 0, boffset+4);
 
-                t1 = vec_mergel(c1, c2);
-                t2 = vec_mergel(c3, c4);
+                t1 = vec_mergel(c1[0], c2[0]);
+                t2 = vec_mergel(c3[0], c4[0]);
                 t3 = vec_xxpermdi(t1, t2, 0);
                 t4 = vec_xxpermdi(t1, t2, 3);
                 vec_xst(t3, 0, boffset+8);
@@ -1321,14 +2012,13 @@ class tinyBLAS_PPC {
             }
         }
     }
-
     void KERNEL_4x4(int64_t ii, int64_t jj) {
         vec_t vec_A[4], vec_B[4], vec_C[4];
         acc_t acc_0;
         __builtin_mma_xxsetaccz(&acc_0);
         for (int l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[2], vec_B[2]);
@@ -1343,8 +2033,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_0);
         __builtin_mma_xxsetaccz(&acc_1);
         for (int64_t l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 4, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 4, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 8, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], (vec_t)vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_1, vec_A[0], (vec_t)vec_B[1]);
             __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], (vec_t)vec_B[2]);
@@ -1364,8 +2054,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_0);
         __builtin_mma_xxsetaccz(&acc_1);
         for (int64_t l = 0; l < k; l+=4) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 8, 4, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 8, 4, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
             __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[0], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[1], vec_B[0]);
             __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[2], vec_B[1]);
@@ -1387,8 +2077,8 @@ class tinyBLAS_PPC {
         __builtin_mma_xxsetaccz(&acc_2);
         __builtin_mma_xxsetaccz(&acc_3);
         for (int l = 0; l < k; l+=8) {
-            READ_BLOCK(A+(ii*lda)+l, lda, 8, 8, (float*)vec_A);
-            READ_BLOCK(B+(jj*ldb)+l, ldb, 8, 8, (float*)vec_B);
+            packTranspose(A+(ii*lda)+l, lda, 8, 8, (TA*)vec_A);
+            packTranspose(B+(jj*ldb)+l, ldb, 8, 8, (TA*)vec_B);
             for(int x = 0; x < 16; x+=2) {
                 __builtin_mma_xvf32gerpp(&acc_0, (vec_t)vec_A[x], vec_B[x]);
                 __builtin_mma_xvf32gerpp(&acc_1, (vec_t)vec_A[x], vec_B[x+1]);
@@ -1571,15 +2261,15 @@ class tinyBLAS_PPC {
             vec_t vec_A[4], vec_B[4];
             for (int l=0; l= 4 && RM == 1) {
-                    float* a = const_cast(A+(ii)*lda+l);
-                    READ_BLOCK(B+(jj*ldb)+l, ldb, 4, 4, (float*)vec_B);
+                    TA* a = const_cast(A+(ii)*lda+l);
+                    packTranspose(B+(jj*ldb)+l, ldb, 4, 4, (TA*)vec_B);
                     vec_A[0] = (vec_t)vec_xl(0,a);
-                    vec_A[1] = (vec_t)vec_splats(*((float*)&vec_A+1));
-                    vec_A[2] = (vec_t)vec_splats(*((float*)&vec_A+2));
-                    vec_A[3] = (vec_t)vec_splats(*((float*)&vec_A+3));
+                    vec_A[1] = (vec_t)vec_splats(*((TA*)&vec_A+1));
+                    vec_A[2] = (vec_t)vec_splats(*((TA*)&vec_A+2));
+                    vec_A[3] = (vec_t)vec_splats(*((TA*)&vec_A+3));
                 } else {
-                    READ_BLOCK(A+(ii*lda)+l, lda, RM, 4, (float*)vec_A);
-                    READ_BLOCK(B+(jj*ldb)+l, ldb, RN, 4, (float*)vec_B);
+                    packTranspose(A+(ii*lda)+l, lda, RM, 4, (TA*)vec_A);
+                    packTranspose(B+(jj*ldb)+l, ldb, RN, 4, (TA*)vec_B);
                 }
                 __builtin_mma_xvf32gerpp(&acc_0, vec_A[0], vec_B[0]);
                 __builtin_mma_xvf32gerpp(&acc_0, vec_A[1], vec_B[1]);
@@ -1589,7 +2279,7 @@ class tinyBLAS_PPC {
             __builtin_mma_disassemble_acc(vec_C, &acc_0);
             for (int I = 0; I < RM; I++) {
                 for (int J = 0; J < RN; J++) {
-                    *((float*)(C+ii+((jj+J)*ldc)+I)) = *((float*)&vec_C[I]+J);
+                    *((TC*)(C+ii+((jj+J)*ldc)+I)) = *((TC*)&vec_C[I]+J);
                 }
             }
        }
@@ -1812,6 +2502,20 @@ bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64
             params->ith, params->nth};
         tb.matmul(m, n);
         return true;
+
+#elif defined(__MMA__)
+        if (n < 8 && n != 4)
+           return false;
+        if (m < 8 && m != 4)
+           return false;
+        tinyBLAS_Q0_PPC tb{
+            k, (const block_q8_0 *)A, lda,
+            (const block_q8_0 *)B, ldb,
+            (float *)C, ldc,
+            params->ith, params->nth};
+        tb.matmul(m, n);
+        return true;
+
 #else
         return false;
 #endif

From a3c1232c3f475f0a77b9cc5225516ac31c567a06 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Wed, 8 Jan 2025 12:55:36 +0200
Subject: [PATCH 043/279] arg : option to exclude arguments from specific
 examples (#11136)

* arg : option to exclude arguments from specific examples

ggml-ci

* readme : remove old args [no ci]
---
 common/arg.cpp            | 17 +++++++++++++----
 common/arg.h              |  3 +++
 examples/server/README.md |  3 ---
 3 files changed, 16 insertions(+), 7 deletions(-)

diff --git a/common/arg.cpp b/common/arg.cpp
index c81b15217..27886b84e 100644
--- a/common/arg.cpp
+++ b/common/arg.cpp
@@ -22,6 +22,11 @@ common_arg & common_arg::set_examples(std::initializer_list
     return *this;
 }
 
+common_arg & common_arg::set_excludes(std::initializer_list excludes) {
+    this->excludes = std::move(excludes);
+    return *this;
+}
+
 common_arg & common_arg::set_env(const char * env) {
     help = help + "\n(env: " + env + ")";
     this->env = env;
@@ -37,6 +42,10 @@ bool common_arg::in_example(enum llama_example ex) {
     return examples.find(ex) != examples.end();
 }
 
+bool common_arg::is_exclude(enum llama_example ex) {
+    return excludes.find(ex) != excludes.end();
+}
+
 bool common_arg::get_value_from_env(std::string & output) {
     if (env == nullptr) return false;
     char * value = std::getenv(env);
@@ -420,7 +429,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
      * - if both {LLAMA_EXAMPLE_COMMON, LLAMA_EXAMPLE_*,} are set, we will prioritize the LLAMA_EXAMPLE_* matching current example
      */
     auto add_opt = [&](common_arg arg) {
-        if (arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) {
+        if ((arg.in_example(ex) || arg.in_example(LLAMA_EXAMPLE_COMMON)) && !arg.is_exclude(ex)) {
             ctx_arg.options.push_back(std::move(arg));
         }
     };
@@ -649,7 +658,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         [](common_params & params, const std::string & value) {
             params.prompt = value;
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"--no-perf"},
         string_format("disable internal libllama performance timings (default: %s)", params.no_perf ? "true" : "false"),
@@ -673,7 +682,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
                 params.prompt.pop_back();
             }
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"--in-file"}, "FNAME",
         "an input file (repeat to specify multiple files)",
@@ -700,7 +709,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
             params.prompt = ss.str();
             fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), value.c_str());
         }
-    ));
+    ).set_excludes({LLAMA_EXAMPLE_SERVER}));
     add_opt(common_arg(
         {"-e", "--escape"},
         string_format("process escapes sequences (\\n, \\r, \\t, \\', \\\", \\\\) (default: %s)", params.escape ? "true" : "false"),
diff --git a/common/arg.h b/common/arg.h
index a6700d323..49ab8667b 100644
--- a/common/arg.h
+++ b/common/arg.h
@@ -12,6 +12,7 @@
 
 struct common_arg {
     std::set examples = {LLAMA_EXAMPLE_COMMON};
+    std::set excludes = {};
     std::vector args;
     const char * value_hint   = nullptr; // help text or example for arg value
     const char * value_hint_2 = nullptr; // for second arg value
@@ -53,9 +54,11 @@ struct common_arg {
     ) : args(args), value_hint(value_hint), value_hint_2(value_hint_2), help(help), handler_str_str(handler) {}
 
     common_arg & set_examples(std::initializer_list examples);
+    common_arg & set_excludes(std::initializer_list excludes);
     common_arg & set_env(const char * env);
     common_arg & set_sparam();
     bool in_example(enum llama_example ex);
+    bool is_exclude(enum llama_example ex);
     bool get_value_from_env(std::string & output);
     bool has_value_from_env();
     std::string to_string();
diff --git a/examples/server/README.md b/examples/server/README.md
index 3ce16945a..1f0a27d96 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -45,10 +45,7 @@ The project is under active development, and we are [looking for feedback and co
 | `-ub, --ubatch-size N` | physical maximum batch size (default: 512)
(env: LLAMA_ARG_UBATCH) | | `--keep N` | number of tokens to keep from the initial prompt (default: 0, -1 = all) | | `-fa, --flash-attn` | enable Flash Attention (default: disabled)
(env: LLAMA_ARG_FLASH_ATTN) | -| `-p, --prompt PROMPT` | prompt to start generation with | | `--no-perf` | disable internal libllama performance timings (default: false)
(env: LLAMA_ARG_NO_PERF) | -| `-f, --file FNAME` | a file containing the prompt (default: none) | -| `-bf, --binary-file FNAME` | binary file containing the prompt (default: none) | | `-e, --escape` | process escapes sequences (\n, \r, \t, \', \", \\) (default: true) | | `--no-escape` | do not process escape sequences | | `--rope-scaling {none,linear,yarn}` | RoPE frequency scaling method, defaults to linear unless specified by the model
(env: LLAMA_ARG_ROPE_SCALING_TYPE) | From 80ccf5d725571035b454659e3c1b4b2b07b65e71 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 12:07:20 +0100 Subject: [PATCH 044/279] ci : pin dependency to specific version (#11137) * ci : pin dependency to specific version * will this fix ec? --- .github/workflows/docker.yml | 2 +- .github/workflows/editorconfig.yml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 41f1a89ee..f5af72d0b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -100,7 +100,7 @@ jobs: # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example - name: Free Disk Space (Ubuntu) if: ${{ matrix.config.free_disk_space == true }} - uses: jlumbroso/free-disk-space@main + uses: jlumbroso/free-disk-space@v1.3.1 with: # this might remove tools that are actually needed, # if set to "true" but frees about 6 GB diff --git a/.github/workflows/editorconfig.yml b/.github/workflows/editorconfig.yml index ae86e9927..f02b7c219 100644 --- a/.github/workflows/editorconfig.yml +++ b/.github/workflows/editorconfig.yml @@ -23,5 +23,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: editorconfig-checker/action-editorconfig-checker@main + - uses: editorconfig-checker/action-editorconfig-checker@v2 + with: + version: v3.0.3 - run: editorconfig-checker From c792dcf4880461c2b5f3960584db241ac71a893a Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Sun, 5 Jan 2025 09:50:37 +0200 Subject: [PATCH 045/279] ggml : allow loading backend with env variable (ggml/1059) ref: #1058 --- ggml/src/ggml-backend-reg.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ggml/src/ggml-backend-reg.cpp b/ggml/src/ggml-backend-reg.cpp index 7ddd178b5..955ed505f 100644 --- a/ggml/src/ggml-backend-reg.cpp +++ b/ggml/src/ggml-backend-reg.cpp @@ -574,4 +574,9 @@ void ggml_backend_load_all_from_path(const char * dir_path) { ggml_backend_load_best("opencl", silent, dir_path); ggml_backend_load_best("musa", silent, dir_path); ggml_backend_load_best("cpu", silent, dir_path); + // check the environment variable GGML_BACKEND_PATH to load an out-of-tree backend + const char * backend_path = std::getenv("GGML_BACKEND_PATH"); + if (backend_path) { + ggml_backend_load(backend_path); + } } From 99a3755a3c518119d0156766122f7b4b796ea576 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 8 Jan 2025 13:40:30 +0200 Subject: [PATCH 046/279] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index b67445ecd..a0921f1a9 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -a2af72be7baf5b1f4a33d34e77e509e5e85b7cd7 +c8bd0fee71dc8328d93be301bbee06bc10d30429 From c07d437bbd417f42b122e767ad42b3298767dca0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 8 Jan 2025 16:19:36 +0200 Subject: [PATCH 047/279] llama : avoid hardcoded QK_K (#11061) ggml-ci --- src/llama-quant.cpp | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 038cf58dd..466e7bc61 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -7,14 +7,12 @@ #include #include #include +#include #include #include #include #include -// TODO: replace with ggml API call -#define QK_K 256 - static void zeros(std::ofstream & file, size_t n) { char zero = 0; for (size_t i = 0; i < n; ++i) { @@ -154,8 +152,10 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t if (qs.params->output_tensor_type < GGML_TYPE_COUNT) { new_type = qs.params->output_tensor_type; } else { - int nx = tensor->ne[0]; - if (arch == LLM_ARCH_FALCON || nx % QK_K != 0) { + const int64_t nx = tensor->ne[0]; + const int64_t qk_k = ggml_blck_size(new_type); + + if (arch == LLM_ARCH_FALCON || nx % qk_k != 0) { new_type = GGML_TYPE_Q8_0; } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS || @@ -367,20 +367,19 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t // if (ftype == LLAMA_FTYPE_MOSTLY_Q5_K_S) new_type = GGML_TYPE_Q4_K; //} bool convert_incompatible_tensor = false; - if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || - new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS || - new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S || - new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S || - new_type == GGML_TYPE_IQ1_M) { - int nx = tensor->ne[0]; - int ny = tensor->ne[1]; - if (nx % QK_K != 0) { - LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type)); + { + const int64_t nx = tensor->ne[0]; + const int64_t ny = tensor->ne[1]; + const int64_t qk_k = ggml_blck_size(new_type); + + if (nx % qk_k != 0) { + LLAMA_LOG_WARN("\n\n%s : tensor cols %" PRId64 " x %" PRId64 " are not divisible by %" PRId64 ", required for %s", __func__, nx, ny, qk_k, ggml_type_name(new_type)); convert_incompatible_tensor = true; } else { ++qs.n_k_quantized; } } + if (convert_incompatible_tensor) { switch (new_type) { case GGML_TYPE_TQ1_0: From 4d2b3d88041705b20c30b3219838aa435e7ffbde Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 15:59:53 +0100 Subject: [PATCH 048/279] lora : improve compat with `mergekit-extract-lora` (#11131) * (wip) support mergekit-extracted lora * support mergekit-extract-lora * use lora->get_scale * correct comment * correct norm name & condition * add some hints --- convert_lora_to_gguf.py | 34 +++++++++++++++++++++++++++++++--- src/llama-adapter.cpp | 24 ++++++++++++++++++------ src/llama-adapter.h | 7 +++++++ src/llama.cpp | 21 ++++++++++++++++++--- 4 files changed, 74 insertions(+), 12 deletions(-) diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py index ed1014cae..6dea14a23 100755 --- a/convert_lora_to_gguf.py +++ b/convert_lora_to_gguf.py @@ -226,6 +226,9 @@ def get_base_tensor_name(lora_tensor_name: str) -> str: base_name = lora_tensor_name.replace("base_model.model.", "") base_name = base_name.replace(".lora_A.weight", ".weight") base_name = base_name.replace(".lora_B.weight", ".weight") + # models produced by mergekit-extract-lora have token embeddings in the adapter + base_name = base_name.replace(".lora_embedding_A", ".weight") + base_name = base_name.replace(".lora_embedding_B", ".weight") return base_name @@ -260,6 +263,10 @@ def parse_args() -> argparse.Namespace: "--base", type=Path, help="directory containing Hugging Face model config files (config.json, tokenizer.json) for the base model that the adapter is based on - only config is needed, actual model weights are not required. If base model is unspecified, it will be loaded from Hugging Face hub based on the adapter config", ) + parser.add_argument( + "--base-model-id", type=str, + help="the model ID of the base model, if it is not available locally or in the adapter config. If specified, it will ignore --base and load the base model config from the Hugging Face hub (Example: 'meta-llama/Llama-3.2-1B-Instruct')", + ) parser.add_argument( "lora_path", type=Path, help="directory containing Hugging Face PEFT LoRA config (adapter_model.json) and weights (adapter_model.safetensors or adapter_model.bin)", @@ -290,6 +297,7 @@ if __name__ == '__main__': dir_base_model: Path | None = args.base dir_lora: Path = args.lora_path + base_model_id: str | None = args.base_model_id lora_config = dir_lora / "adapter_config.json" input_model = dir_lora / "adapter_model.safetensors" @@ -313,7 +321,10 @@ if __name__ == '__main__': lparams: dict[str, Any] = json.load(f) # load base model - if dir_base_model is None: + if base_model_id is not None: + logger.info(f"Loading base model from Hugging Face: {base_model_id}") + hparams = load_hparams_from_hf(base_model_id) + elif dir_base_model is None: if "base_model_name_or_path" in lparams: model_id = lparams["base_model_name_or_path"] logger.info(f"Loading base model from Hugging Face: {model_id}") @@ -371,11 +382,16 @@ if __name__ == '__main__': if self.lazy: tensor = LazyTorchTensor.from_eager(tensor) base_name = get_base_tensor_name(name) - is_lora_a = ".lora_A.weight" in name - is_lora_b = ".lora_B.weight" in name + # note: mergekit-extract-lora also adds token embeddings to the adapter + is_lora_a = ".lora_A.weight" in name or ".lora_embedding_A" in name + is_lora_b = ".lora_B.weight" in name or ".lora_embedding_B" in name if not is_lora_a and not is_lora_b: if ".base_layer.weight" in name: continue + # mergekit-extract-lora add these layernorm to the adapter, we need to keep them + if "_layernorm" in name or ".norm" in name: + yield (base_name, tensor) + continue logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor") if ".embed_tokens.weight" in name or ".lm_head.weight" in name: logger.error("Embeddings is present in the adapter. This can be due to new tokens added during fine tuning") @@ -407,9 +423,21 @@ if __name__ == '__main__': if name == "lm_head.weight" and len(dest) == 0: raise ValueError("lm_head is present in adapter, but is ignored in base model") for dest_name, dest_data in dest: + # mergekit-extract-lora add these layernorm to the adapter + if "_norm" in dest_name: + assert dest_data.dim() == 1 + yield (dest_name, dest_data) + continue + + # otherwise, we must get the lora_A and lora_B tensors assert isinstance(dest_data, LoraTorchTensor) lora_a, lora_b = dest_data.get_lora_A_B() + # note: mergekit-extract-lora flip and transpose A and B + # here we only need to transpose token_embd.lora_a, see llm_build_inp_embd() + if "token_embd.weight" in dest_name: + lora_a = lora_a.T + yield (dest_name + ".lora_a", lora_a) yield (dest_name + ".lora_b", lora_b) diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index 9fd7edea3..d4879b778 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -242,6 +242,10 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char } else { ab_map[name].b = cur; } + } else if (str_endswith(name, "_norm.weight")) { + // TODO: add support for norm vector + // for now, we don't really care because most adapters still work fine without it + continue; } else { throw std::runtime_error("LoRA tensor '" + name + "' has unexpected suffix"); } @@ -251,6 +255,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char for (auto & it : ab_map) { const std::string & name = it.first; llama_lora_weight & w = it.second; + bool is_token_embd = str_endswith(name, "token_embd.weight"); if (!w.a || !w.b) { throw std::runtime_error("LoRA tensor pair for '" + name + "' is missing one component"); @@ -259,16 +264,23 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char // device buft and device ctx auto * model_tensor = llama_model_get_tensor(model, name.c_str()); if (!model_tensor) { - throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model"); + throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)"); } struct ggml_context * dev_ctx = ctx_for_buft(ggml_backend_buffer_get_type(model_tensor->buffer)); // validate tensor shape - if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { - throw std::runtime_error("tensor '" + name + "' has incorrect shape"); - } - if (w.a->ne[1] != w.b->ne[0]) { - throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + if (is_token_embd) { + // expect B to be non-transposed, A and B are flipped; see llm_build_inp_embd() + if (model_tensor->ne[0] != w.b->ne[1] || model_tensor->ne[1] != w.a->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); + } + } else { + if (model_tensor->ne[0] != w.a->ne[0] || model_tensor->ne[1] != w.b->ne[1]) { + throw std::runtime_error("tensor '" + name + "' has incorrect shape (hint: maybe wrong base model?)"); + } + if (w.a->ne[1] != w.b->ne[0]) { + throw std::runtime_error("lora_a tensor is not transposed (hint: adapter from \"finetune\" example is no longer supported)"); + } } // save tensor to adapter diff --git a/src/llama-adapter.h b/src/llama-adapter.h index 5f1870cc8..3448656b1 100644 --- a/src/llama-adapter.h +++ b/src/llama-adapter.h @@ -45,6 +45,13 @@ struct llama_lora_weight { struct ggml_tensor * a = nullptr; struct ggml_tensor * b = nullptr; + // get actual scale based on rank and alpha + float get_scale(float alpha, float adapter_scale) { + const float rank = (float) b->ne[0]; + const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale; + return scale; + } + llama_lora_weight() = default; llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} }; diff --git a/src/llama.cpp b/src/llama.cpp index 8ea6686c9..97e716cd6 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2545,6 +2545,21 @@ static struct ggml_tensor * llm_build_inp_embd( ggml_set_input(lctx.inp_tokens); inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens); + + // apply lora for embedding tokens if needed + for (auto & it : lctx.lora_adapters) { + struct llama_lora_weight * lora = it.first->get_weight(tok_embd); + if (lora == nullptr) { + continue; + } + const float adapter_scale = it.second; + const float scale = lora->get_scale(it.first->alpha, adapter_scale); + struct ggml_tensor * inpL_delta = ggml_scale(ctx, ggml_mul_mat( + ctx, lora->b, // non-transposed lora_b + ggml_get_rows(ctx, lora->a, lctx.inp_tokens) + ), scale); + inpL = ggml_add(ctx, inpL, inpL_delta); + } } else { lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, ubatch.n_tokens); inpL = lctx.inp_embd; @@ -2617,9 +2632,8 @@ static struct ggml_tensor * llm_build_lora_mm( if (lora == nullptr) { continue; } - const float alpha = it.first->alpha; - const float rank = (float) lora->b->ne[0]; - const float scale = alpha ? it.second * alpha / rank : it.second; + const float adapter_scale = it.second; + const float scale = lora->get_scale(it.first->alpha, adapter_scale); struct ggml_tensor * ab_cur = ggml_mul_mat( ctx0, lora->b, ggml_mul_mat(ctx0, lora->a, cur) @@ -3967,6 +3981,7 @@ struct llm_build_context { // feed-forward network if (model.layers[il].ffn_gate_inp == nullptr) { + cur = llm_build_norm(ctx0, ffn_inp, hparams, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, cb, il); From f7cd13301c2a88f97073fd119072b4cc92c08df1 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Wed, 8 Jan 2025 16:09:20 +0100 Subject: [PATCH 049/279] ci : use actions from ggml-org (#11140) --- .github/workflows/build.yml | 2 +- .github/workflows/docker.yml | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 02a193b86..c85999b89 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1237,7 +1237,7 @@ jobs: - name: Create release id: create_release - uses: anzz1/action-create-release@v1 + uses: ggml-org/action-create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index f5af72d0b..d71f1eb38 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -97,10 +97,9 @@ jobs: GITHUB_BRANCH_NAME: ${{ github.head_ref || github.ref_name }} GITHUB_REPOSITORY_OWNER: '${{ github.repository_owner }}' - # https://github.com/jlumbroso/free-disk-space/tree/54081f138730dfa15788a46383842cd2f914a1be#example - name: Free Disk Space (Ubuntu) if: ${{ matrix.config.free_disk_space == true }} - uses: jlumbroso/free-disk-space@v1.3.1 + uses: ggml-org/free-disk-space@v1.3.1 with: # this might remove tools that are actually needed, # if set to "true" but frees about 6 GB From 1bf839b1e8b9d043306c65eddd9021fe4337733e Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Wed, 8 Jan 2025 18:47:05 +0000 Subject: [PATCH 050/279] Enhance user input handling for llama-run (#11138) The main motivation for this change is it was not handing ctrl-c/ctrl-d correctly. Modify `read_user_input` to handle EOF, "/bye" command, and empty input cases. Introduce `get_user_input` function to manage user input loop and handle different return cases. Signed-off-by: Eric Curtin --- examples/run/run.cpp | 63 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 60 insertions(+), 3 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 2888fcfed..61420e441 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -11,6 +11,8 @@ # include #endif +#include + #include #include #include @@ -25,6 +27,13 @@ #include "json.hpp" #include "llama-cpp.h" +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32) +[[noreturn]] static void sigint_handler(int) { + printf("\n"); + exit(0); // not ideal, but it's the only way to guarantee exit in all cases +} +#endif + GGML_ATTRIBUTE_FORMAT(1, 2) static std::string fmt(const char * fmt, ...) { va_list ap; @@ -801,7 +810,20 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str static int read_user_input(std::string & user) { std::getline(std::cin, user); - return user.empty(); // Should have data in happy path + if (std::cin.eof()) { + printf("\n"); + return 1; + } + + if (user == "/bye") { + return 1; + } + + if (user.empty()) { + return 2; + } + + return 0; // Should have data in happy path } // Function to generate a response based on the prompt @@ -868,7 +890,25 @@ static bool is_stdout_a_terminal() { #endif } -// Function to tokenize the prompt +// Function to handle user input +static int get_user_input(std::string & user_input, const std::string & user) { + while (true) { + const int ret = handle_user_input(user_input, user); + if (ret == 1) { + return 1; + } + + if (ret == 2) { + continue; + } + + break; + } + + return 0; +} + +// Main chat loop function static int chat_loop(LlamaData & llama_data, const std::string & user) { int prev_len = 0; llama_data.fmtted.resize(llama_n_ctx(llama_data.context.get())); @@ -876,7 +916,8 @@ static int chat_loop(LlamaData & llama_data, const std::string & user) { while (true) { // Get user input std::string user_input; - while (handle_user_input(user_input, user)) { + if (get_user_input(user_input, user) == 1) { + return 0; } add_message("user", user.empty() ? user_input : user, llama_data); @@ -917,7 +958,23 @@ static std::string read_pipe_data() { return result.str(); } +static void ctrl_c_handling() { +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) + struct sigaction sigint_action; + sigint_action.sa_handler = sigint_handler; + sigemptyset(&sigint_action.sa_mask); + sigint_action.sa_flags = 0; + sigaction(SIGINT, &sigint_action, NULL); +#elif defined(_WIN32) + auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { + return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; + }; + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); +#endif +} + int main(int argc, const char ** argv) { + ctrl_c_handling(); Opt opt; const int ret = opt.init(argc, argv); if (ret == 2) { From 8a1d9c25fafbaf4182dd0b785dd6303ee40d55bc Mon Sep 17 00:00:00 2001 From: Vinesh Janarthanan <36610342+VJHack@users.noreply.github.com> Date: Wed, 8 Jan 2025 12:54:58 -0600 Subject: [PATCH 051/279] gguf-py : move scripts directory (#11116) * Moved scripts dir and fixed pyproject.toml * updated readme * fixed README urls * bump pypi gguf to v0.14.0 * retrigger ci * empty commit - trigger ci --- gguf-py/README.md | 8 ++++---- gguf-py/{ => gguf}/scripts/__init__.py | 0 gguf-py/{ => gguf}/scripts/gguf_convert_endian.py | 0 gguf-py/{ => gguf}/scripts/gguf_dump.py | 0 gguf-py/{ => gguf}/scripts/gguf_hash.py | 0 gguf-py/{ => gguf}/scripts/gguf_new_metadata.py | 0 gguf-py/{ => gguf}/scripts/gguf_set_metadata.py | 0 gguf-py/pyproject.toml | 11 +++++------ 8 files changed, 9 insertions(+), 10 deletions(-) rename gguf-py/{ => gguf}/scripts/__init__.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_convert_endian.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_dump.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_hash.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_new_metadata.py (100%) rename gguf-py/{ => gguf}/scripts/gguf_set_metadata.py (100%) diff --git a/gguf-py/README.md b/gguf-py/README.md index 24af96a17..37a75923b 100644 --- a/gguf-py/README.md +++ b/gguf-py/README.md @@ -15,13 +15,13 @@ pip install gguf [examples/writer.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/writer.py) — Generates `example.gguf` in the current directory to demonstrate generating a GGUF file. Note that this file cannot be used as a model. -[scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console. +[gguf/scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console. -[scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key. +[gguf/scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key. -[scripts/gguf_convert_endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_convert_endian.py) — Allows converting the endianness of GGUF files. +[gguf/scripts/gguf_convert_endian.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_convert_endian.py) — Allows converting the endianness of GGUF files. -[scripts/gguf_new_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/scripts/gguf_new_metadata.py) — Copies a GGUF file with added/modified/removed metadata values. +[gguf/scripts/gguf_new_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_new_metadata.py) — Copies a GGUF file with added/modified/removed metadata values. ## Development Maintainers who participate in development of this package are advised to install it in editable mode: diff --git a/gguf-py/scripts/__init__.py b/gguf-py/gguf/scripts/__init__.py similarity index 100% rename from gguf-py/scripts/__init__.py rename to gguf-py/gguf/scripts/__init__.py diff --git a/gguf-py/scripts/gguf_convert_endian.py b/gguf-py/gguf/scripts/gguf_convert_endian.py similarity index 100% rename from gguf-py/scripts/gguf_convert_endian.py rename to gguf-py/gguf/scripts/gguf_convert_endian.py diff --git a/gguf-py/scripts/gguf_dump.py b/gguf-py/gguf/scripts/gguf_dump.py similarity index 100% rename from gguf-py/scripts/gguf_dump.py rename to gguf-py/gguf/scripts/gguf_dump.py diff --git a/gguf-py/scripts/gguf_hash.py b/gguf-py/gguf/scripts/gguf_hash.py similarity index 100% rename from gguf-py/scripts/gguf_hash.py rename to gguf-py/gguf/scripts/gguf_hash.py diff --git a/gguf-py/scripts/gguf_new_metadata.py b/gguf-py/gguf/scripts/gguf_new_metadata.py similarity index 100% rename from gguf-py/scripts/gguf_new_metadata.py rename to gguf-py/gguf/scripts/gguf_new_metadata.py diff --git a/gguf-py/scripts/gguf_set_metadata.py b/gguf-py/gguf/scripts/gguf_set_metadata.py similarity index 100% rename from gguf-py/scripts/gguf_set_metadata.py rename to gguf-py/gguf/scripts/gguf_set_metadata.py diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index 9c3956256..92d7f22ec 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,12 +1,11 @@ [tool.poetry] name = "gguf" -version = "0.13.0" +version = "0.14.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ {include = "gguf"}, {include = "gguf/py.typed"}, - {include = "scripts"}, ] readme = "README.md" homepage = "https://ggml.ai" @@ -33,7 +32,7 @@ requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] -gguf-convert-endian = "scripts:gguf_convert_endian_entrypoint" -gguf-dump = "scripts:gguf_dump_entrypoint" -gguf-set-metadata = "scripts:gguf_set_metadata_entrypoint" -gguf-new-metadata = "scripts:gguf_new_metadata_entrypoint" +gguf-convert-endian = "gguf.scripts:gguf_convert_endian_entrypoint" +gguf-dump = "gguf.scripts:gguf_dump_entrypoint" +gguf-set-metadata = "gguf.scripts:gguf_set_metadata_entrypoint" +gguf-new-metadata = "gguf.scripts:gguf_new_metadata_entrypoint" From 8d59d911711b8f1ba9ec57c4b192ccd2628af033 Mon Sep 17 00:00:00 2001 From: hydai Date: Thu, 9 Jan 2025 04:03:28 +0800 Subject: [PATCH 052/279] fix: add missing msg in static_assert (#11143) Signed-off-by: hydai --- ggml/src/ggml-cuda/concat.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/concat.cu b/ggml/src/ggml-cuda/concat.cu index 2f42b8a95..aafbaf803 100644 --- a/ggml/src/ggml-cuda/concat.cu +++ b/ggml/src/ggml-cuda/concat.cu @@ -124,7 +124,7 @@ static __global__ void __launch_bounds__(CUDA_CONCAT_BLOCK_SIZE) uint64_t nb1, uint64_t nb2, uint64_t nb3){ - static_assert(dim >= 0 && dim <= 3); + static_assert(dim >= 0 && dim <= 3, "dim must be in [0, 3]"); const int64_t i3 = blockIdx.z; const int64_t i2 = blockIdx.y; From d9feae1c06321aac9662fd4b4249452dccaec553 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Thu, 9 Jan 2025 10:07:33 +0100 Subject: [PATCH 053/279] llama-chat : add phi 4 template (#11148) --- src/llama-chat.cpp | 13 ++++++++++++- src/llama-chat.h | 1 + tests/test-chat-template.cpp | 6 +++++- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 44670d3d8..1347ec156 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -35,6 +35,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "mistral-v3-tekken", LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN }, { "mistral-v7", LLM_CHAT_TEMPLATE_MISTRAL_V7 }, { "phi3", LLM_CHAT_TEMPLATE_PHI_3 }, + { "phi4", LLM_CHAT_TEMPLATE_PHI_4 }, { "falcon3", LLM_CHAT_TEMPLATE_FALCON_3 }, { "zephyr", LLM_CHAT_TEMPLATE_ZEPHYR }, { "monarch", LLM_CHAT_TEMPLATE_MONARCH }, @@ -73,7 +74,9 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { return tmpl.find(haystack) != std::string::npos; }; if (tmpl_contains("<|im_start|>")) { - return LLM_CHAT_TEMPLATE_CHATML; + return tmpl_contains("<|im_sep|>") + ? LLM_CHAT_TEMPLATE_PHI_4 + : LLM_CHAT_TEMPLATE_CHATML; } else if (tmpl.find("mistral") == 0 || tmpl_contains("[INST]")) { if (tmpl_contains("[SYSTEM_PROMPT]")) { return LLM_CHAT_TEMPLATE_MISTRAL_V7; @@ -269,6 +272,14 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "<|assistant|>\n"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_PHI_4) { + // chatml template + for (auto message : chat) { + ss << "<|im_start|>" << message->role << "<|im_sep|>" << message->content << "<|im_end|>"; + } + if (add_ass) { + ss << "<|im_start|>assistant<|im_sep|>"; + } } else if (tmpl == LLM_CHAT_TEMPLATE_FALCON_3) { // Falcon 3 for (auto message : chat) { diff --git a/src/llama-chat.h b/src/llama-chat.h index b8e94d9ef..3a4d07ce3 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -15,6 +15,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN, LLM_CHAT_TEMPLATE_MISTRAL_V7, LLM_CHAT_TEMPLATE_PHI_3, + LLM_CHAT_TEMPLATE_PHI_4, LLM_CHAT_TEMPLATE_FALCON_3, LLM_CHAT_TEMPLATE_ZEPHYR, LLM_CHAT_TEMPLATE_MONARCH, diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 51bfb155b..f1f9aec4d 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -78,7 +78,9 @@ int main(void) { // ai-sage/GigaChat-20B-A3B-instruct "{% if messages[0]['role'] == 'system' -%}\n {%- set loop_messages = messages[1:] -%}\n {%- set system_message = bos_token + messages[0]['content'] + additional_special_tokens[1] -%}\n{%- else -%}\n {%- set loop_messages = messages -%}\n {%- set system_message = bos_token + '' -%}\n{%- endif -%}\n{%- for message in loop_messages %}\n {% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}\n {{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}\n {% endif %}\n \n {%- if loop.index0 == 0 -%}\n {{ system_message -}}\n {%- endif -%}\n {%- if message['role'] == 'user' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {{ 'available functions' + additional_special_tokens[0] + additional_special_tokens[2] + additional_special_tokens[3] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if message['role'] == 'assistant' -%}\n {{ message['role'] + additional_special_tokens[0] + message['content'] + additional_special_tokens[1] -}}\n {%- endif -%}\n {%- if loop.last and add_generation_prompt -%}\n {{ 'assistant' + additional_special_tokens[0] -}}\n {%- endif -%}\n{%- endfor %}", // Infinigence/Megrez-3B-Instruct - u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}" + u8"{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|role_start|>system<|role_end|>你是Megrez-3B-Instruct,将针对用户的问题给出详细的、积极的回答。<|turn_end|>' }}{% endif %}{{ '<|role_start|>' + message['role'] + '<|role_end|>' + message['content'] + '<|turn_end|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|role_start|>assistant<|role_end|>' }}{% endif %}", + // phi-4 + "{% for message in messages %}{% if (message['role'] == 'system') %}{{'<|im_start|>system<|im_sep|>' + message['content'] + '<|im_end|>'}}{% elif (message['role'] == 'user') %}{{'<|im_start|>user<|im_sep|>' + message['content'] + '<|im_end|><|im_start|>assistant<|im_sep|>'}}{% elif (message['role'] == 'assistant') %}{{message['content'] + '<|im_end|>'}}{% endif %}{% endfor %}", }; std::vector expected_output = { // teknium/OpenHermes-2.5-Mistral-7B @@ -137,6 +139,8 @@ int main(void) { "You are a helpful assistant<|message_sep|>user<|role_sep|>Hello<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>Hi there<|message_sep|>user<|role_sep|>Who are you<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|> I am an assistant <|message_sep|>user<|role_sep|>Another question<|message_sep|>available functions<|role_sep|>[]<|message_sep|>assistant<|role_sep|>", // Infinigence/Megrez-3B-Instruct "<|role_start|>system<|role_end|>You are a helpful assistant<|turn_end|><|role_start|>user<|role_end|>Hello<|turn_end|><|role_start|>assistant<|role_end|>Hi there<|turn_end|><|role_start|>user<|role_end|>Who are you<|turn_end|><|role_start|>assistant<|role_end|> I am an assistant <|turn_end|><|role_start|>user<|role_end|>Another question<|turn_end|><|role_start|>assistant<|role_end|>", + // phi-4 + "<|im_start|>system<|im_sep|>You are a helpful assistant<|im_end|><|im_start|>user<|im_sep|>Hello<|im_end|><|im_start|>assistant<|im_sep|>Hi there<|im_end|><|im_start|>user<|im_sep|>Who are you<|im_end|><|im_start|>assistant<|im_sep|> I am an assistant <|im_end|><|im_start|>user<|im_sep|>Another question<|im_end|><|im_start|>assistant<|im_sep|>", }; std::vector formatted_chat(1024); int32_t res; From be0e950c91cde2d8488ae32162b549d7023482f0 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 9 Jan 2025 11:15:15 +0200 Subject: [PATCH 054/279] media : remove old img [no ci] --- media/llama-leader.jpeg | Bin 199945 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 media/llama-leader.jpeg diff --git a/media/llama-leader.jpeg b/media/llama-leader.jpeg deleted file mode 100644 index 0b4e6e1cfbd442f1d945f90d5d668e19252ccffd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 199945 zcmb5VXIK+k8!k+BBcK9GN2CO#gH#DcdNoK(NC>?XngRg{f}nuX5lE1#lu!~v4@hXD z(tGc)ReIf+fP$hYzTdgd|8M5UTv==8nftld)3Ro*Kl6Y7rC~EOGBu*1rKP2Dp`J8< zmT2^8nE(0b@;{fDFEd?cVPR&y#>;l?>eXw4T-+SI03mTPQ6W(g30ZXo2`N=+5m7}0 zMO6(g9UUEU1tW7qZ8Pg(9qHUMf?8_S~?nf2F6SOP&;|49ed>bBu6FuTC|_aFW62t)UbBo z^eYLBBZ`kJjCNnym;%C*pvqj3Ef#uF1^*A-jhVWmt}m>4RsagS9AYW`E23;DFJ3j) zO9KpvG$Wa;<3c@2AAy!%z;?U<$XVYvT5;>acCBc~YJc>-BOQ6(%sj;pr6=LS|KcRGed_FZYd>FVnP|%cfdm8s_=zgJnGP zJdtFLm(5@s&WN8_te+@Zl4wwdnSXYz#xvDNkA`h`i4S78+6Dr;_=lRlCghRJnx`BC zND`y-lD+fK3%Raj`^;AlE-2wmhWWfWLeOp=47sme8#s{k`qdx#EqRa`rsF(F8Xp-S z85bE>W&y8C*F4-;pw1WDt71A|eCaEb$iPZ_HNR_W*6PbR`+PJQ88(f<5(y<$@X0E; zjTPsd75$ZNrd9=zm#Lg7yO2nK8I&D^h6*t_Wbr|dXQ6@&u8eraRUe;b$oTm;M9$l3 zM<$G2zWqE`(*BsTl~p-73&iE^KlrHJ(|`7nmtnF=4ZlctBa2bS9hnd2LYe)5c|MhX zz#*N*Tfi=pS6LWa}D&FbE^DBbb>3$vVfb%6=XrnfbwTA;fmL z;bmN}R0YW1+uZsStBedmHpj*nyVr6$DbG4{tLco`@W*kAV1I+H5 zR~&+xQNh&6y=4zgX3W${v4(DlVe;JUD|H>w^KX-1>tYUgLq~jDgFeJHzmlXrCSz{P z^LBvY85WI=lxmP~)0(&6JuBB|pU5tMkpe0&*zS0c?>D*QOUU@BD1()e3S>=r^?B#D z>rNumIIm)+9=pVV3Lt|Dn29wO00Rut3tghar}%PprwY~S{bPi;bD$6$#W8K{WyZA- z8p*}-Sx{JcqaF-gS&fOkRF>fn6@;mA+=?gDfj@A$7fKlgEa$@Vp%2r0wj7`cOge0SZwy${ z#L>DN#=e^HZv%}9KL@D{Jh2b(U*ra{TfZyghRK0*9L&Rp~J z8>!H5(qoMsicJw_f=P;Hwtw3%Aj?e{Zg+QEBGKUWrt(HPuGw#2N#6$B)r2$+WEoS1 zc1{CQ?XbU8h;Y+P{5rKid;GG1&vL=g?!9>B9Ec182xRF9($H zA|?-oS)~FidpjJK8ulEA+GG>ac2Ubq?&Zt>hVH$eKq#6!Me%OvCC#R4o@#JCI`V!p z=3(D|%57xy4Eo;0ErsJs29J!Ws;;8Ul~glTX<%oW!lCKavP=XoBhM96VQOIGFwq4t zAbTnY?@T#-Y=LDYGjPC!=}1dp$W-BMs>5^=*Nkb1G{RR(1@S4ll1q&9S=prz+dHma z-YO_R9M?MTqilx0N6Z3K9)?~$KvaiEZ+mia`;1FfuZ@Z6gmv0*N+~y}Q8M)?_G>_B zE|?jw;@_;;32`*f3+O}m`!_qRu$3AzKC9;uc$Vvx1-hO>Dle!izil1UgtnR0KkC!MJK3kmi^O4~c!9u0; zQb9vrww(lNJ6(M<8?gZojMG_?k1d9I!Iu-$4FF_r>H{~53xLl&^q5Q(V9fK=uiLFz zFqQVczJU$N4Wx7ACUy=krlfnvd(Uva3zp`vQ|1En8-4YWJx2D)rg^+fO?^q5I}~P! zq2|j;hsl}kt&-2AkOYt^pwgp(4Pkm&h?OCe?yvb~S~0KRm`kmR-03r8!1>|5uMwBB z^|FaDqflaA`Lmv83A1F5Jiv4Wv^De@+8-_r=XwS6=U@@Gg!-uht4+#mFTnt+di>=o zemlNo%9pu?3cmH!8QvVLYV&b@rj=P7y zKT&O}(32<$!>5X?EF_Mwk`}N{&5e0WS4&tzMA5aX>&9r`i8p-B%cN?r7ZvJ&65%R< zDV3pOHW584{FeZj&`ajQN}HnDWa&OL1gT&*OLFC~<$CE-eCx?(4sdY_*4xu~GZIo~ zM@kNno$-ZAyCzExDjHJLTrkXRz9|-)%p0e=5`qabSG+lU`!416W|wL8p2n?vW|Fh` zba}_CnmfV?OVId3$#?zIt`dBvX`Rms7SHkW{PH0J?Ppb7&0Sc5#+g1|j`JB|j`|u| zg1J%Qb{@I0FTpZhBe( zn}SXvmx)j!O^On3@>4I)oLi$Le`mA`w@buL_UzrN-%4xTPIIjB!6Zwm-U0Jxf89Fp zbOJ@$pF})=o0!;C^HL0QqKX9!A9qoKBw;K$?^~EH0lL@MYW%`M{!Naop$>hbe?Qf zcwU7cvBd4YvUueN3xN2kS}*f0&5-%;)O$tNPo||bf1cN__qbA>B{q<0OZC|Q>$ucQ zOvd{RTBuhEv=JMxly9I!;sc@vC5NRo+sBo5H6VoY#0<0rC(5!Bm>negD=!ugDLgL;)TE2 zk^ss~!f`RLX>@2AcF6Z5S$@^xHd%a!WZ~VTQ^97(p0Q%q4HB5%D$wo1g!z(z<__ge z*#kG(hF)_*-hjfc9nh65& zhhM*X&#bochFUs7d*|Vvxi`VSnSER^om0hPX@#2o#LW!x1>WHu0bkSeB0crB_~N-@)a;6B{i5ad`z!K<*>m3#{*06w1@l zYgp8cH{rRhR&{QcBQiG>XC;Tg6<#Uy%JcA!td{a4Bv~}416}t3e=B2m=KBmjk}}en zCZBX&lf(w?1Z0D|&Fu9Eb*4c*$d~=3U5BcU6nQLOVK(uF8rXlmXlCZq*wj~?Zyt6;_4TLo&zQZ}FjviD zB5AYn5hk|ehIz3`t6A=R6(P9PUpnWG`Mg_MhZ*FDrLtaP$PNBghmnYsMvs)ENb{m4 zKI!HmVSg{qrmiuWI#g~m0CCR?eTU3Ix8bjf)|~J7zZNs&rBZ&$ z`bFk!bFsZW9{N~ho~zzPs@Bj8fh8Q5j@h3y|P$4 zF8Q+Uq=4}=;_4~0zgTcTx{t=?fx0_<@^WOaRh`kDdOr^TYV3Yp*&4HdIQyZ`Fv+qw zRIkm5uYQ}`QrX?kc#aj>rteh!>mCAluLmjgNKHPg}ER(_q!-p40OoyofzjXr*7vB zxj6Nsp1nEn>kaE=tlkuuCC=6X$1P&c3LYO|sRG^6^)>5xYo8_=7gu$Y8E9e(c$>k) z%qzwVFr{&!Ws|g~|Jv~sJynnyWSMNyQZg2<*=qOFkxD-gogTf^}TqffKcnpY;!nm?zHmujc7=k(I#S++fnOqGOV)~g}0C9>O zY&^oW>&ZN{&!-gIOiif{Y z?*5#Olx%@b-pLL18!PI-6g{xZO;KbTw>A|P64!|vH_pI_GV`&<8w)+-p>fd_laZ0| zL&!pf&`BT*{oTrZKdclw zhjTYv7jF=LmhjAr)YHEh`+AiquJw!%gS*St&K@LYy-sgGVemFoA@2*G|K5T0*JP*tf`_QLnN(&GYijd*pWR2tCVN0U?DC)5pap*uqG@sk}-V+oVFeeIj-8>OW!5?bx*7>)s#A>ojBE|EvE86JWBI5b;$squ20B6t=~ux ztbb^88$@n~WcRlPuU${iigt2Jdtoj<+Rk@r)mV@SlglDmsv~Sg`o=V9K zh78%qWCHJhbI66Iji0QCy}zL+VG~iqmuMPNi+(7(((%yK(tF?4JS@_fP@Z~AkrQ;` zL6~@CN+_M}1J`Pv3cctMbf5Sl`vaer@&cCRqHs_O{I-%}TC`fI>^UcB=C$ozYyV!Y zJ5QGL3N4};{ISC>c6i5oWAAQ<{Iy!asE1)+Ii}@|U6sGgw!W3Dh>nnBO!h1ek|3qB zM5jH_mkPI&{jvht z;~J#X16EF$&@;2W&s(J(ebk24EYy9&ZFZC$sRk%t$j{OK#dXdfPx?~nO8d+TXWp}Y z`*Igtoe#RqqzfX-Uzf}C)(t>iCF7lfjSZjulf3wC&S9s7u$T{N+6;f(*19*&47{7| zyX8L{SerUa_c6cMTxMgfShnIx#3E!ExtfGJUO&|w3%M!ZC3x^{^ps%7>q%~;a^w(l=;=2!>x;3S&gwPaaVnxO-71x#PdB#(EQ;PuNkI1CWYBGQwQjv3+@uap zzOm};-k8|C`mKuMF!jD$(H03PPU)#azFUGSIO2ULuOwv+UNC&x$vxq<9;n8HcKCO z8H3Wn5VlrL=h|;1%VzPDb5zUU*E00Ih56RP+upnI}LkUB6 z9&5exLe0aL=QTP1RESD8)efDH(heW~r;$oV3H*L>%_(~vxtUYHrtmUzsgUgbSQfq& zr4bQz^S?}w;nLyrTFJfl`@zbH)9s-*FT!%$qSQ}s<`hW|O;iBM*+&C4rJN_<|6T3t zIZsgtdx*?Rd|o_;Se^T)xezANz81Uv`1+HC);6X2ZGWuD4lf}-l2tXD-b>stm@TXuajeZY&&=Upv%?QA6mhNJ@YcgOscqOyLUiuT9ayl9>CFp?<=5sksfWksWXdwmA<{gQh}|R^Y*k+l{sXirc?RH>f=Y0V?Ue~ z;)s6y;m?Q;^WD~=S0j7m3NzG`pS#Gr??@q(_9(}zj+*BI(P#G?1H8QI6}Bdc(o-)x zafB@758@`}9SYR*tx)rPZDR9l?$I^itk*g`zuc_b056!|I=m%x~hV5?ef zl`K1cd|F!Tw~&tep!>7W(+2$e@M6s#gci#=N79;V%96x_QDAEOYBEM>dW+Deo_~_Y ztyKDEY3rT1qn-$Kw@yJfseX0$zo^^!(C}|4P1HuCU^8B28jXxtD)1qCMS+RtAq1bv z(d34m-awJj_Ky9Wh!u9)Rqen=1X_SbZ@SX+&y*3g#fp;;Z_q3eHx&!uQg8I;etSq zIx@1R@k=9cDi@zr*<6&_CJh3i>et5XjUrNg4uVOj&*|c;PRR0Kg}3G8`KKak1L>5f z^#)B$xfb8MXxYI9Yj3XadmVdcwTML*krYF}QD#s;w=Ka>-9r_IXUMsrJjL03vU4F~ zBH8GS4^xL&U+pmLYlL=hMmeMBZ0FL$77!C4=WWfkw;TAN;r0fwa%gVE3M;?2QNG32 zt@U5XTe{{YS|RQ$h~vG zbzgumLj!(Wyn0DVi^t%{?<%?*>4>>Tm<{=P z$d)gfc$H$ew;!m%f2awZd_>L7!h0+d&9(OeqC(n;{gk`!hr4Ik z-Sb=Fr6Uh7x)SqX;%`HP7nQwjDpXUpCKfTj?n#DcJ&tfJdQj4ZB}Z4xbg4Zr?cX_k zdwngp8J$Bgy5c~rHT1?bO-(i*e>shCjCgY!RY;*kgsm4;H_a7#loI969MYY8g3wsi zt@Ah_)?7~Z{ycl#q(O}Mk2*>G|Fi)VAfu0E>>>s{qgL>}?Dw1$NgI z1rc5lCUINTJ7-ax!fol5Q>t^kCknj^&O_dS1>dA@jZ+pNw4mtf@g$dIn*(1hY7f%X9!h2H!WIv?D3!zB$84+GMM}Wxs*)BIuX%tKJsg z+{TQ*@7>amohKLlp$UQ!SjK0zH0*l{qP>jH+c)}G2PN7bPnHjV>6_DbrhbM9+3i?p z@(rWTeh$Ki3+2>r8=x%ALA~M0=#y%$Q(v z8}6)?i<>Bnt9ID2qw94FQOml)28z4ot+bU zt4}2|aIXe@VG+M#Kie4ynln`Ppr&Q&Xw6zU?(rNx!JAEv&<3jr3K@#vT>N=0+7LKfSVvSz4{^SSblU~@$ zL%&&%z;7beZwAcGo@aG!UEHpyxb+svKv?n|?;^@T$MeXFVy~5ayB@m4PZp@UphlZVrzPTB({d%J@T$EUYhKBrT zZ~FoCJB_^kmhALOac8LXjS9K*&B%Ehbuub4H#X`kwoT2Zdss*NDKG+l2L(6GEe}O$ z+u1BEdrud(q^gh?8+9M|c+N86uZ8XlEi{S`5%AG_WDlciSP zc%(Z5-g;iL7ba`Bpm~ND{B7*$U;ak_@buBclZc#JzpAaMtM9Jf@{SL8yyiHGx3j4j z*z+1vzcyAg+}tx9(h@D%!3CF6hh!K(y_wi3u4igv6qYr)_cU6zz%{EK~!q3_pN-KsbC#4;q_OWW-ZR!hK z7u?sUmhvsye>OQf&S|B-R{8aZMq~O+^Iea5I(Q-9s~Qxke)DNijda>;LV#&mf)4JJ zef!qa#J+fe!yKiKOp6X=XvE5!TR9^GTAi4q|1?&$fhyqefzQDoiv}P#i@@F5ll#ku zIT}kR^;4zyq9r};_5@2NGzFBJwOZiqe`vgu)7*@LqtfBMNuiT==!-X6P6%gB#-K({t4|6HtqUN~FHS_uV=ii`84V3km z)c;oUjh+}>QNhOD4hp~bVDLYOfd>M<_WPrTz2z136LW{(BN9nnL8aI}ie{kWb#Jxu zUZ3xezNPj#=h?Y?e@%|C`%Lk}W~tU(cs-Y!3%oLX5^*!oII#V^2JRCQP-rH%arWf< z)cvi*00=Dn0ov=faqZCV&nOp(RY$YI;<(Cm8`!&!Xy^O^yW1^2N5ue2_{_qWQ%xsl zpzG;*MG4g?KWn=0+U73i8W+*ufX{?W|BRI2lr%|4wnit}WcB7CVqIGVQhe3*Sbe zJaO9MZmzdATwE)f!Vm0`pR6JbF%Q3jnnxcR`>Ijuch8;Bx&Iwin2?Vg^_r9ZRU{w7 zoRha|ueE)moK!#Gr>re~4*TWg#E`YH8)UN;b$f&i75#N4MnGEIXQ_5@BiC4Dsq@n_ zV}?!Vnxzn}%=6z~`?u8kUakn7D%lM=tnh|~wXNREd)rV2@OCDj*IKaUUEANN#V4FYYkhDKhN-!(-G$^^d%$mV!NX8%Wygjmv7OwMI2} zw{(H~C#^R-14Y$Z+i}>kMYI% zDb`m4A$v=ySx(Ma3SzAE-c#V-5MFI6B=qZlk88TLJ9*|XuP~1DK*#X6i601cCiSy) z4GRWMeo87|?Q$)Ctrp(oL7pBMsyfb1S)(0I^L|Wt0$X=Udhjk4H~sM;QG7Y3@7kLW z+Ak#6zMi}b{!){2K`J`B)wcO%OUvonC43#p(aUSd+6p_-;xVy(x?XY)?P&1r%ng#S z#Y#ON_$>TfaaX28Vr(L?L!IlimKwDyN-lreWg*-p^|b_Pb`=g`j8MOv_*tB-#`D~% z?#cCC*PTGj#AVN?`Xg=jwfX1>mDD}?xBs3t@^#iq_kCsoXJIfs(V_nO zvS@J5#KAWBO|Zx65ai&p6)$2b{)Klsdra?rA88h8PrkG0u5fNOXZZ(4j7y^jaWnZ*V_w{6_f+<6 zzm|7nfejRL2S(dZfoK@EOqnPz8^H}Wt}ecOZxbs!mQ|V)uKF9fa>3<2o_FfrrA?5> z!ADCsmK-_n9uK==&+WFq{p4w1m3Mx#{moIbktd~Am>4+ZX@~e)6mqiDTklxeqVE$n zLB4Wye}q0td8oKyrPALY;y2>y+3$<_sWijiYTxAhT&Jd~#;-J>eeWdifgW+gzmy)1 zP)f&$K2XERCZWU5w7TrTxa`x&knK)5b9FlHHMKB#?$@3(#yL6S1oThT1QrIp0!qX5z23*j~Z&>@aTbX1I&sG3vRe>qi~1+0i0WSn$azX1D+K%IPLCoJ?_fg(?Uf zgyB`?A~xPOPJOA%&1LP9oFuI}CYDs6&PUAUd23IBaw8H~Uk|DVJimXX#8G5?AiBnf zAd)RRG1|~{y-21hZk+InF|aZ6x^~k<1$y#b1$uUa$eA-n{vdjKC@NM@8PwaAqvg0&NJD&vB>iF)to?z<^yai`71xsN*5|dc zmD;MowW-c0TG~%7u*Y-l6n>z0MI?)w+-uTuV5^r!+SNeM8UKTSZDwc29Eyk|LqyKe zXp-M1q%ffGPQxx5_xlBhF(jdlZl>n`4~xZ(vt!=L4=u%nhI4yw!-9@Sr8xm`$EUR6 zz}za?RVkk~XHk1((>I{z>pj_?1pT=?|dwBOy%WA&wOgQP;?A0*j|dg9r5t(@^+VSUZ+Ew_Bz18zMQO1`wyN4|HhWM3Y1<#G*~u?hEQ-ECBvDeZL|dSr zg{Xlz1LZ@%niCk0KpnN6V#(o2>Za^fq^G2 z3;olh&j-%_SOZRmG_7LCJZL=mRwqJs zhGY2%-i~-Dq!l52bUZ`j?EHADExk0So4o7Q95Oe_a(IHy+_m$!6eNL@%9E=$)Priq zxO4I$)jz#37U+|jG3Bbzl>v-Xoos8AQ+^dbYpphL)M$9CJ{B`=I`Hr8ptXDQR1`UY`faNRW24pPzOUCWe zs|IHqOYOFow)-Yf>M+XYfvCA0>a4nAWN*Ncc&>Tc40mOriAU&6$Z#n8hPFlk3>A}0 z&HZM=!Yu;c5r;DZ&FPij8_L7bF^v5}P#KVDSulTnXi-#@QL^T2&CGHC&io{F$0IJG zPLEcI)ffzyR+4?xI=qlS(k(G%tgY^$)f_rkn-)me{hBO&1sf?nQ>!QVG^nBm&sA{0 z?J5XyQf=-OFxHw0hiqT*AW3aPijt1*b>+MH_@;rhI-`HMy&RK8)8fW-lotxQ?Ps{o z_Y^e=2?#fPUtahNZ;f(HepZX_)W+$Sld9Gal%J;tm$Fyuoh3e)bD)mtqw#_9o}mu& z;Gn!FAxKR>p!Ff3?7FKI+2FZstMV2XZ#CB?=-?pNkh}64Dg<7SbGq&9k@=p$@u@)| z!Q}lE=S%CJSg%KcRpzDRw)J%@fdxnRJ#8Vcq_v%;Hk#w$;1lHTn0Jv$Paa}=n5gmC zQ9IpG&M}lU@F91tJWYvwO2hJ}cyf zBu?eSEBqNf-|a#2?s`iZu3@eA=0xI=nY-fvsp@9hM40NqNbkxb;A^_hz{Z=Ys{WY< z25Ey3VHxo_A6t@XUPp?ZQ=)iSOK#v<=2VDVC~EBrDPPtYQgF<#VH3DM653>Y;;l}< z^>x#gYsnwvK!(Ge08#_82`a%3Vdou&Lph3vZ+oN;HthrNbM6EX^VMu#8>LXDnmg{s zBHV!a@;yp~lg|rzh@SCw$q1VsKPwhaMvd{o%@TVFqOAXs%q75$&L)ZR=8e4&)Z(78 zbeP6cao-O4B$Y3C0b_tVhhmXMiPt{HtABT2E2c{=HD~(=2I18=Lz#59&I#u~lCG~# zI*?E)pYBoBzlpJZJE!|3?B!WC-oE8UC`K{vWk_;B43{mV@n8?F7kX)w)U@>K%S04M zwL`(X!I_X@XbarKcq^w(BJ&hm7WFRjYQdQ&_jU;J%YNwd{H~``?HxtlpyAfYkuS+2 z0UxbOm1}LP(zSdWhT}-ZMJ*m&IX;^!`OVPBKcwJdF|mG-Z8}Jb5MfBx=wGRRu}_fU z&@UY)EgFF{j9!Av2bUpvYO{48wn7Q5#}BJpEiew4k2O1vL?|C}3WDcm4lE;_a|{T3 z=reKP=y9#7#kj&r**)?bj>Yn)8TVEXVfv9Ak4o_=>3!@26B{KCK zAle)Cd)59Z@*Z{J^VADlZ`=o)Vn;>kT9A*&-G37{-tL~E+vcKJ9EaXgel&YIEKEAG z${XGA6jP>1XrnND?-Hxl`ck-$z6Jtohg+maANr0sp&E+^#vGlZkh2$1ndD$qRJ#Y7 zP#F?s>&O$bJ5c5B-QJnZ1uIEzROD6vFBO#G{}1mY$4T2U=IXkd*?|h}E;YQ^@3cHr zd%n6~80&F4C>t>$-SXsvm0EHJTdCaxH^;&W8C5Hplvx>b(b>s`#!J;Ii4Ww(b{kTX zw76P}CJkbJi->UhW`WsD#mn#Q345j`^R9gTo|EKD1s7U;m+{(>AX??&_mkCviJgcB z_vL}H#;Ug)u<@EZ$%>HSX2$z`UKLtb?1zX3lV>oLdFq!rhuhZeY};PA4@S7O>ic(V zI7G1}C2g{wj7E?m&Nev$EnG2=&kg@1COiLtJ{m+G*$Px9~kp3@^?9AYr&c+ z%9|VGWaPK+Ji2o)JDddF8n`)V0?c*si|g(THup>ryqe9{d|2NUJQu-sq(B4K!kKgO3m02 z2;;sD=Jr{#%PT}D^nl86|0Jkor>(Vad^OFhLp50Spz=f8x48$ey55NUH8yWzhTUd| zL8@-kEg`5xf%iim8NyQFL0yVRh`D$CSuPWnMdS{mG^K=wlPCT%6w#;={K-@CE=&|= z(%h5EepAX&8T$I}i505CxF~J3#e3F+?#q%@$fTBOh`%0;LnS)$TPd%c2MV&Th$y#- z=n+=-8wgW>r!NYKlll}2^%1*6C{A@F*fMVqGS}cTbn%`|n)ZV(RRj_|@aDf67GJ&U zC-?E5;Mq;Pn*!pO8{arS*Q^;ryt>-nF%XfnfJ*a*UryqjMht%9FlBnK6OdB&xX!1{ ziU!6b6Q@AaPbW;D^%^GbE)&b1VWNjMF|pyyt6sHd!dU=YAD)o$jS%_Tyo8wkk_>(zp&fmVqXwT&7%tg-+KBlPoOLkdt5@?1=h9 zb9;^aUACfYOLppto!Qxb!*Y*NA{|7}gzf!abP9yK%)Y^fv1T{HoJ%Uh#3W8pMreU4 z*2B}%K#$2KmJRyBB#pq(EtHXIVnTCdDJaZS3_-l?eTVs!aR3^fnc^A%yz^nX&%m0I zxTdNG#DUXwp-l8xVrfUEyM&GZk`s-%SUgBPAJBcr?Hj*UTL)Aiub^ z8^z|6j*)js^%)a%1~FHKL^9$tdxFbTbg$CLRL8F7NXC08=nT*^TxJ7sGV3zMW(YsK zM5hEukkaQ8jZ-k|aS<_b>vv^{bG;N3XP9r8Wz8vJlFaaO!mMAI3BbViGUH`~Nv0mu z+T?>lM!bs%50mh-*G(R56VMC}6MCT=?7rI}DbpU2G9`GKecylCGg(}%O@s_GT+j@e z#zw>su_gTvg*FWJ%9stP%h-fmggGuT=?b~TQbFi3dBi#5dF3jU# zvdXj9$8zvk3k%aI#F*$^24qw3Fasp=LVZLy>RepPIOu6vX=!|fg=u5n12{|=9_d{+ z;CV{j-6hXPL(4!*PfySAza3!IU0&2JUG(f63~a*JMeYbG>WV5T=`h|<*0Th-aOzX{ zd@)gXg3&V4%?stRA$qKPxNwUd+;zIvwAMXtRo1cwBKOWv;P1~6{A_=JNf}6i-+@3n zQj2IQgWIBiXol>z875=x-f7&GvaL5#Nh-T;V|l_+kiJB~|3(Lxp`U(ax*_m9L+S#= zqykj7ucCSVqO6r8@#i%Dca+pBTIxdMLV8|!7vXDo*Pl%X4{3mjtebWHzJ!*V|3lNu zNLT9u#_N<97EIgMFV32)WXc)f?f%eoXIoLMv1VQW+7l5oM|{k${+^37_@DZ}(NS5u zSd%!tdqiiq?~E!cgA-CSB?VSezyD?KI=I!KCvkxTcSspTWmH+0MR5`Ao92Wz@cP+bX+@B}lDX+RS%JYhm_1CLzwKe4V!&nFG$NObm2_xIZ*ih>I|S)5&Ro zeV3{6G@HaN-JeuJU4~N7YB2SA>oPi88vLYM?fJ$nLp+3Vs}V7Sht#_7F`<}@58^mw zED=|rGF;M}CV^}eYr>0*XK8&Ch?(E-bZM&C>x~Z?*r=MUF+S`2L-S0c^k=tnd10dA zjp%=M8C0=m89MuLBHnJ7`1GXKjotXW9up^G;+_|p`d$V$9U(3$j1=16OyhaSADU6D z8MQgRW~L;4p>~4);j>$T8B7d3B)@W!iP3X5L@h|hiirsM`aAV6XkoT0_8sEW;Lh#K z)p}YVm>8344ga>&UMl8e)lKG1>ZY1Qt)s}a(f1Ci3zk1L@n)}>;;epd*ex-u+WY%f zFAL?>t+&PhPBgg95PfnOJw^AD85ZQD z{*MgC?DUFma`|&hM2}m&PE<0ab{|s>p|XRLMu1OE|InnZ>xmxP6C-amP3GUeY2dMl z4noYlwJ~F&UW$oQhV}#8A}7<127)iLpXvzJy%;?bolgS7kL2`q=+HnRVTEhKW{|G&``5-=i@rH5W1s&5pkJ1i2OxmEt%J_gm zRoMDo87Eb9r&;QAv*rMSxa7)4gVG%)AhAq1);d}mT&nlojZv%oO*rudb`Nxvl-$bh9Ni^S+ys>!sCyt}p{Ij~%=-o5|$ z@#2I;wwcs-Vs!q`W`gtLUsD0^P@U;qkC85KU6Ab|EXbS_Zm*5E+r}o*kFyz!BW{r* z3`zoF)dU-Zlv<;f4oyi8f(_U6$_J@v%1WdtLh>69F4c2LTkE7 zl!36^;ES=Xa;s-bC|SX360hBQXJvR}L!eirFOe@v;=r6UNtpe-sfrqP>Y#FY?B$<6 z)c+f2pA4pk6~kl(5#j~%BGko^aomV=N}6h$pz_WPG}6wByJQ1#+9Z&-&PK`AE*d_A;t?m!zOiy&Y8$%D%50#`ZTv(A;#+BI;c)g^tmx7w0?DclHsX0 zI->-n5wj)W?^$sudYaZ3oFq~EkEoT!c9uzo8y;a>54~+r@X0=zYJz5Hf2)(2JT^#` z3a+K2b`s`Sp3KRZ#3Tbo?O2D{yso#+T~}svXW*LDn5@+S>0ViXP4~g!b5;%_6gv?2 zj>Kj;mED`rbZt$dv@OqQB=V~Qf}yh^*~;xsNzXH?W8N_9s`mphGfzB5v@LwQgU1b{ zC~*|h_OOjOm3)z&RG~@+q^0s$&8NO!iPqGWmERn$t$Pg0I#{1e@=TRl@GI(F_p~^K zmh#CG{tl*&&@+C-tvI01$3<*Nsn`iy{Fs%2=={_75+z9*DwGs|R0xwG>-7`kd8`SM z?WKVTrwl|2jgqye2K%!7asl&Xk4W|+{Z0wG`d5j`<ECogxF=s{C!y|h z$8tE+TV*pFZKX}ykQ>jg>b$LE41Kl}|E$NF_R8{U?*j5FS_NsLGV|ou;EP}Mt%f(k z{wvsjwdM7$ejI@+O19gPC?(2(MUu4fI@Ql@Mjud4tMk|k*fTk-{Rkj}hw&epN%g1a z`t$=o>-KJOBzfJRG3T6O>a8!EaeCf?H!`Zz+B6ozP4Qdcbs6scn{8$6zOAuy!h7+| zDp=ldaMoNbQ|$VwR@GtuxwLDpqZ;Rggn^AKQJgRmzT+#-Hq7*J6(fCgczuWm*K4IsS4J+onP}Ope94gw4=Jo{Fyz$n3e{(@re-h@)_1g&l6i?2lL;DM#6Ll{&s9XhVAT=Ehs!wur1dTcq# zlM0rq5JIHit;ZUhL`(A=a<9s)v&Vlg|s=jZG+9-)iB!B=(7ygNkS8Sr%4V)Dfw zn)e#BC-qEmdctfk?ITEXcxW|*g~7faDnvEb(5X_xWNHTToyKOu>EgP!=1QnZhAJm%a%3vf-02SWCTi{kOIgUe9X#x2%DK;odwuEB z+iAML5vUqWEja!s$E<}AL(TGkMVkGsLZqN2Gyls*IzlUZryesQS%c~lNcCuHAs{kM z72h;TDGzIA6aB5jAuy*JOSx$)G*t7c<1nKMqd^b>4kTYPQTh88sx%ZehW!sJRWwEl zBcb9>q}DMqfAh>=41fJVh%>;Mh{MDv3z(w|9PR*pNfXhj0dTa*Urnmqk~uj?o(4ji z{8q8EK{DfJ@jTkCh(wyehm>~r{Fi~-@_foy5f3{?nJ&7XO zRV5Cu;UHZuRbp=$90I!S|)7 z5Bq;7GNVPPFyPj-b}LeyBT}|6h424zwP*iBlXEnQh0~jl9i$t7BOI;$m<@oZ-|~n? zi?G|`C-3QXE;sBde;bkZnzBv`H*o)LAinF0B7grs0DVA$zv)-mKd0wU; zv2^s_-#}Io^pn-|e~dmkEyT_kE5da?e8g=b^1 zN756wIRe+@ynUAt8`iYxEkBbXMaWWP<4lpRM{7rrZ8y|89BoU&>bCyDmfg`VT_!9k zWLx$r9KwuAR231mTB_K&p?_h7{+VCM9&zkVmCXPbxy*AM$C%+ftqXC3?}U8=or>ep z;dw<>is6HkUuRJ#b6=3PArxQFl|ZQR3n52PSk@6B#+7!Uc_Y+(lR{Z!$Z9ZS{-FYg zsE(oy0BuTvm<9jyXu08S@oe zQVyYZtJN{6$mdJ%zfXnk`weCf^@B*8P8K7Q+o+lClTWEIsHSA%zgOp3W^#LcF0gY- ziYH9w=ps?W9A9t8r_SC}q-#@N9==z(p|~s9Cp~BOLiR zeNL!H9--ahPod?mc3Oz}l4KJQO;6Ijfn&7%jT%#!x?3xvXyG|4N`myxc1UWU&(;uRW)x`HS zL2hI?>DiVZm$x%El35d@=$RtvwX(6Plv9ga8?`btzDt^3_c-M9_?zVL$P>0|cM%j1 zG5godg)zBmblQnn^Q>#5{{Zx3Ya^UX6mjC7{2BeE9@$%fxxK9l0kGj%zd$@!s02p% zxGbKt7vxu^&!p#?eAuSS;^Xw^k5ScAYI+Ioe6qj`bpC*AY%-tLds`vofk1aLslLx! zoEkr{(EkAJ4x~DUOwq8DpH`$NH=P`-9%Xh7{>xsp@%=!DE5^srd^$4M>aVCeM)&ZX@EHAkR_WO&FcR<`9EX;cVzmc2D8~dJ z<5D8Fb8FFFw;!V?p9EDkwwvtr6NrAT#2E{RQNeWxWPiv?Pk{KdWD^_oShAnK;qwZZsQF_~F8;%$ zX3iNh3yL>I9pKk8YzouEZl`Zd9suz2L)|MvWBP+y z!yz3!yRpM1;e7I?tYHPw#M~Ho+?&{oPY(#{`cIEm$NEZ_h^4DG7qqcA+3MHGoCz~G zqIwR%!r<1<_Pa}|&BOA{%$wB(n-pUd@JnPN1qtFr=JTc$^6nn|V~bWp?$x@=4a9E# zTzu&N00i)f*)&dx`^n}m6Gl#u@`8`HH>19z@|6*%h)Hwl9klY~*111UD^kGCB`r7P zf4Qr~VAzm=hjU#WzBciQp%XzbXk9|*P)E@p4fGO5&3G<}(iG?9)`mqOd6igG=>w2B zuBD=A8G{%0$W3vb+IX7S_8w*JZv4~f&Hl5*y(yuGO6NX3oJbS+kWwR>36`1c+k@zx zrPFD^w#Xl=bKj~>p48D8ouMsqYqrEhT>5NIldN2vLqYCjfobE()Ebw^N&Y!f#@U-RE`{uk(mk4XMI@&ux7i<)vjTSway!e*P({vOHUN@i z=m1GgbuDkPY5xEutrp%BKyzbZYI8wcS#K=iYBfQlPpY&Ca_woE^CPQs9S)_F1o?Vy zY^->sPAjOgwA;s5ER0#0pq27q+2zP;8suEk8b!I($IYAbG_t)0fKTdl2$Unb+tk+Gn-TDmN~?Om@_y z@grQF!tkR2sQHZgb>N*oqD+S2t}W9>=R7wCykw-Z4V3A6aZ9btL_63MlZGXu7VH`t zj;LmNV2`5{M|2;V_Tn#W)>Ix(OmK?P{wCQ}6UD4->K#zIH`wk13NecweIa>?@G@}U z<3W84sX9E9np(QO4N%ny8!voA%wF@tYrA@_kg=GiGL`h%)Dkov$RCZgYek9>4=|vEiSRpK)JwWPUU(99CoXZm<0+Ecm7T83R?HuE+Gjonc z#?&-UDePoHf? z6}nXUSM{FQ^upY&aMVfwpeSrg%q;lL2inPGp8QaeN;6XM)8j?$FLI5}CDz1VV^V%6 zuMq76RF1Rh3qpQ`UPjsT@U`svw2A)!%kvk$R=V*uhd82K6OYnw5xeok+i2mz)S78H zST4nCWcpH7uv?uYq;p(c$BsD*Nh_9pjeboAjJ{@BbkMbff~zn%Nu`;#o#J8I;F(KI z;#ZJQCWAE-HS4D&tE9$?j}gjf#r<1*TH@SO;-uHUCqt>ra^7EfLvg1qHZYIvFgbM@ zVc?yA8gc3VHu~HYIoe$=O_}FQ9B#@MFh*C&^I#T=K;FgF1*ZTC!q!2EyoH@KIIHBI z9_x37q6pFrY+^%dxqEH1M)nyGa%i(5!ZYa`e;h0^*txzwC3QYCY3H3-$^`y2#*zww=?s^_M(p|RmHfsI3e zO($|=YQ3!#4>SR_ou?9=;_*emqnHM*-v0m#MtOVF&S}KC4-tzx?$)3!M9dVA$=-v) zyW9BRe74yb?k#FP?KrmhnEe$frn$DFWjgHj2~}yl0pt~am&DhqEp)O5J5qCFw#M0s zM}h^Ixh_N6<4uS2&t_5X7FWRSIxblpEsZ%yoyu|gdUH!#*{RNyQ}&3O^j=yUo8A3T zoh2OHWRJblPS(lOidOW7EoMUUg|BOqNs;kt9v$-zq4BI6MZg28VRl&;Owz*^K)0gSvJ$}@fMPRq-1k2zM0$Lig$W~p zhS=u0%wu}u0R2a6<$cKa2MBW#xEK0#O#cAMZEDRAQ`^8QCtDoyojMYZjhbmPFMOPr zcR4%Ak*!7i;sR+4+DaipY4Rspq7hB(hgta;6Q-p~)%t?bKMh2{;y7~@yvzQ!LX(Iz zpP9ti^TiJV?k;Vj(-^Q^4X-;N7Odn7yAuo2&Hgsc{98fq)3BEpCp)@xjCxPiV zKAk%=ora4wXLQTDh~u*85|tR&;ETOy*!0E4Dos8vRdkP zmv(2ta>|!CNCY?(n!@DyI%Tq_yr#6s<|lBq<1II=Bes#PeA$qslc?DH809a3>|EDz z4-2A6n(foNpCjIFG?FxO8q}any6uAOX+O+kNkH}#=PLw zO$rfaDGL6?amG42py5unG`$$7zN;H+SWD)K)}S0}k8@5=m6(#yzuQtQgNpd+^V5-O zgn-UVjDexAqMU7iwRX43mlH0aVXP!GNheNsaGpAcjH5C=jc!V)ctP1B?1(f8<8a)J zlH}(+x#1Mi+Ts%`oSo>=X!?rnJifvE_5yDkhNx`(rMlcqct=SO=oki^#1_l&mBC9RILkg1DtVDMClW>#e#x{9G=Z1 zTJkLjF?oqOoi@4Ur&4Zsbh7FczsJ5fJuUKlg{1!gC?0uMsZC%NAHDI38!REJTCwLd zM7W4fFB372{^baD4w1vd8IqaM9m9fQwks=Ii(bNkpo)&So)eVPzPGnM~k9v5$${Gbd8a) zX@9e*u+by&2hz_c$XZ|W$nI-ZD^vJP<=^Y6aROD~{{X)#yB-5na5tt40rH@4a95t8 z?d;TmWP`&v41fOFWf%u!hl=KdMDUR8DSJREQH^P9 z^#g}_flz>uu@DK3zSuC<-a}*kzPMxh%NfeDf{H-ur(+{mPzOJZu>3;@hCq(fp zH*haO08t8q8tE*^Z%gTCljJQo`7RViD*;~<_$ohqj;7D~9ee!Pri@=GZf|!m#7SrY zJF+b==AZF7wzQ+Ur57HdsU(}RLj0TIJn#omvqDL&zO5f|m*2_DGb6TU}Y2Wqbmpx9a#*gv3K{C=!gu5Mv^E#Vk zx$pWOn9=eUm;9+(N^7_UTAPrVaeiV$FD@+M9j!yenzr{v7G$228!e4j;L(nCPZ$ui zuMAvekNC9?NT4J5I&?YF%gA zBqXPDi!w9+0Ewl(raa;hq~cp7Rv$6RX}=s^<86F1+)z){XP-pcikefDO8)@!dSgWo zI#2SCF$6JR6MIjPpe4_Da(Z>;{=XxIr57%xmj3_+{Hp+JpJkKulcdb31}}3@NG&g& zO46QXcXpkf*UwX#S({ujE?RCh(iG(1^?rygTYZ3y^$P_(~L%9W4BMK4Y4Hr=FlF}h2959-4-fIpAx?$K|Z zxX&Mx-pH70y{{T!b>Md&qHPb}W zwL+RLNN@Q42<_s)L~!HWz_H?-R87V`R9w4*4i9@}S#gq@LDT7wU}hR`D1VQQiQUgS%hS9&gR^wjzKC-j5j{W_NzTZ>sdKoOf|&Lvw77nx zG4-aBOpNkc(=a{q&(lU+i-Bu1P!JiHRxrY%{L;6HIg&rC7M>Fvak8a=Q*|w{TixYV z1LNjl{{VwX!XX`~8&V$A<(?0a)|B2GAhkSHqJq_1OP@f>*1AVy)9OWt=(w---VgFM zpPhsRe6(|;WgNNd@*Ghq{vYId)AH%KF6~FP&I+v$KgXkJd1?BV8lCPgpN=5$wP_#&=@gd}{Z-KYTkhQkL*0?oGFHA`@<4x7MO{fzl z&)Dt@WO|AAKFwNWWy#Df`!JkQ9XQKLP`I@KWPBQU*rk^zUevY=LS8hcu?Ns;Jk*qO=eSkK-CrP#-aiQXVwpIfsIaEeL{hgG*L6i)jE8zY*;} z>%O1-jbnRv4MYoK2WJH+=9Zh6OYe234&qizXoMjF!$BW%t$-)+f8Gn(NI`XQZ;xwZq(#WgY9*lqv2>gyUTfA+1PS23}k1JRI)FY#+x4)t`oQtEi3Vz&9<0} zOwA#G9!QbAzH*#wKgr>y(N>+#oW|gUmb|0Db8&Z5w2oQ9dx3d7v)$-{q4PE--T|tO zNGMi!C%I#?V%0tYoc-7qE1WRBdXvyC(HZ8oLhGr7~(QM?y>U&K0p$trssNk_Cg z!f&@meub+lPZASj&2|Qo`FmJAZA!1v^F4G=mijdR0G>h%IM^*e;bo>$By3cP%S+}_ zf)=Q*YE_v*o5(ac>u zYfwUQIXOPDEpoB54q9msS&z3Mb8c2mJTvK%z`=I=?cYkm`F#yIxE>6xS!6K_z~Y)| zq4ss$N4y=b)ax=Il*t%~-ryAz7e%(DJh(dx=G=)52&SO)udU zc?B!26ah`)Xa2KcTMu?IMm8CtcFu0(AE?A zaiZ=UWQyYs+EBc)(!ta&j+o&@zp^$0oOx@hUnP8PrDI<-lppAcpZSH}E&|sp2Q^t% z3LfoFEn}%;l6yeYJX$_-Yqa1du{En26+zAxUYt{G9temM+1TjYrf@HdF1x@YP>(41 zHIr*5bEj0OAkq+iD>_Y=88OdLvR-y4(etR~PBzQlq3)cHqtPkb zM@jzxiPS*}loV=4ghGrar*N*uBFG^d+W22HLuJgxQq#%PlTpev6PnQ0xy9c$70Z-~B0V}U&pcf}Mw&jPjcBV^dw5ua(Tz|{a@51rFN*-iI64uK z)W#hoYO`}&g2}5jXj0S!NxM zN9E1{IIaoknyj<8_;8adIS9VywA6ywiz$Il_~8O*V?F0yU;!>%BbWO@-7CLPn6xmX^Adm_ooT zQDZI9hAc=C3XG5udZO*Ac|m;7_=so@5wtXL?p?ch((R&Z5;pTe$-pj|RPeDVv?q4y zi0*Q7J4OZSIdvv6auI(r9@M~Bm1AxR{@hA!be^b1c- z$2TZ^w1+wT7jm9B`M^0JekNDN2w;4e`X{R*j?gf+7Pirj#j!Qru6be(1+HOw_BE(; z6VY)|K9h$qNL=@JtU|yPdr=_{4s@MGV2e{)`g2@1ZHH-?o(q%M#*+w7`fam7F{@2| zN^1>RqBgGKN`NE5^Tzkh@2DxggW8+q`Pv4DJ?;b`pcM;!t+Kjn97$~+K**vNr-r-- zz3I)x9izOY?q4hxFi-6gG@*ALqhOLyauNo!f~}xr3gJ*xoXa>;EiDddP6!eUWNkh{ z)L?UtCCkD%hQZm(DD0L1*y6MpBNI!Ci<7h?DVzx|7qo53?zV?W_0}#fkP+2U zzR~SXyaS#MtlNvgpmRay+&Yo6XTakim6p}PcY;YPm5yk{@&=Hm#v)BQHKe#0BP%YM zWa{>GLB>YnWbI89psuGmWJt6fXTmttVYw<&bsVsA{BHD2ZeH-7{+mXlB=2_3uq>29 zqEtr!B_#DbL&!c=ZCo-jl&vO7YB1tT@X{B|bfxzOgwSgWM0UvD<_=moq&Pr=O)M#5 zXtd(ir#le$7proySHrr9+95=KmqQk{gfXpXYvc_wyEIdsPkw6}Acmio)lENp-E%`i zU9_#a4j`W|YK+r>0bWS5J0%d(MTY|51eCwgj;F{NWs+NQK$4p3K$7;ca0TULUaH*Q6Gb)d4HG}VCZp}hAfaI| zkavU+9M_c~rPBd5W^XlYBABpgMbrza65@$1X_C+)Lruzx=~{eb7X-)j-yq4aX%W$I z$Cw*}cWWYyThsy0a359j1`e~DV3o_mw2d7{HnxG32wy%?Wi1@E`LyCefe2HDSReo# zGELw(0k8t&(mlz#FovEWvJ3`pUoI)cmH{YDVF*Z?Ngx#KD7nBo?aa-cCSX3129x0r zT{6jGozEs<*KqM9C8{>!=UG6ET+3apB)O}A{TM08w9w#rUnm>g`>{1W;zrb?3Dl`l z#YY;$CEO7Ns&b+QS!`gq@T7B`Ol`z6*)tn%^WbUYxg-@W4eytpI0}L%6y8QF0udbG zQ>-IkW!roc&nuE`%>)vh+C@b!3TF06!uKeLW8HE^#579N_>b7ry*j|tv5a)@IQJ4j@a z?RaZ$kIJ?2bm~&4HB!YiDzq9CTAIY()sR`D`dQk^%F1t#^>et9vP_)Ph;n>x#<|ck z-)K8-Ys$=7djhL-Kt#|MSIM*xYQeYC#9<(C5+e{!Z6ie*PXxy1hA#=O&L_O5cBVA+ zTE@o{57KO$27t)0Y%@O7Uq_{qk4o03JT;TcXXI_d90r29Oo`83CPvC@Q{;0~By5c( zRW1WWZU@@jGhH%Ea%t)uvCdYx?O=9DNls|C^qZNCpy$&~xXY5ysxz?ZIzi2fcA26L z>e{dw3%gAf^E)w3l(jY1v0SWab7Il%UIfx~z{%|axb)gVZK04}5l-SJl+(nHwCY%@ zRa-C;uyqd|p=7dXG#$52+SaB)JQd{( ztSoXP++B*s!33;$#)pQ7lw3hH5u`61PBLU1ZrJQ!HMEx~CG2yF1;?gez#@nODX17R z$Pk?ODJ4$i(i?avHAfg|qOqjiJ6Z|FP;;%+To^$PLhn8vFlm#o8IMrVI*n;jiI$+Y4gOGGke6v3oRnM`hD76Xnb z9(|6>x-RKTzykSN)E{R%Maa9AN@GJH9OUeF$7(^TjY8V8X-ChNv@xQEWk@y7A+!S; z#AX1^se7b&l$(?$J>4f|T{9xVp;$$s&8DsbAUIwr&283U(Y49OHI<>l2G?>8#Q|Wk zH??cQS;$M?xZqC#8|q>uBC>{XX0Z@DKSlv#jdc|M(oM~3Jd*y zBVl`^fJ7u~%BMK>*_Tu!HzY(>WIVC2AiPONn_P=P^IH>f624df-c`^{`M@&sC+Sfd_ zt%Z)fTxR+sQd=MPGj}oyWKIs@(gvQkDTQ#-;P}|>I}vt60>?zkk&G-C--rR$*2vu0 z<7nzIOw_UQ$>zyL+-51|%!^3&wXzB|lJ8Veww|{Qb&I#e+#}elzd_nk>uDjS9Fbh? zJhz1#tW~bYt07+sCfX#*pCc3J%0617j$E9^uj|k-PKzGYSaT)Wy*6Q0YYuvDvT+nz zh*;#hb}%5n9K5S>M74YqLz{K3r5fP01@PJ`79~lYG4s4Ee&X4LhBSp<9v5gH45cdNHrwu&QI~fRT zg*1>#iIxYwt~g}&4EZE6xrVN!Va35_)|WW+?agmdJ^8%Iaw9hI<&kpdq`)stVqU9* zw9<)0u-n?xfeO>QY8Eli-jy#4;BpSqAFGlsvD-yb9vFb&3LF)~2T`D&1aanUHAwth z)^cxM=tmx%eN8}anUriGxvWFfx?oykEIgM)?p<`(8!c+YWljY0dSM2Fdu>T-kqtx$ z$NB++E*3G!FuCQ-$MUZNKvVW>u~Kp*7EaeiriFI+Qvlzm>9JX^rmzjdO^04w24ljVE3#CYi!K4r{!pwDgRoW1~M-U(1SPsjUmN4LdjG3fi`< z%ZWsQ4I=HRSHoC(UVyD++h zukD7rCcI4mk*sT%EhmMN#8Ch+y&r3-6Her`KFJFb6||QIlm!kij+kYi3EU?PjpW%l zIVP1E$~W?z!mHY-9kS|0Se&tMgfP>O?fp!|Y-UrfPOvfXMqM15NZiAhNx^#wLGEIX z+W!C}F5H1>Zr%t@c>+(O?x%Q^$XxUUNyIMT15QMcy1X)5R>w^(6am?S!>OQ1ED+|I z4GG*Bdmp4DImJqErt(OqTWpq$TWZyXcj)lGU&BElyMpo&HoWZ7!#P-jv87xVB8Rc4 z3|IR~WW}s;Tmbo6&~+9JuWD#()=g<8h0lqyj+~7|xjsCY;YS-D4rQ(2s%jv#Wr`6r zu9I=Z1`Mkn?||;twXY$;H$1Z{@+PcPTGuqEaNm_32R!2GJdO~hkt-XBg&VPSgP8eq zFQ+JOF}aWc5!O0IENs%oTzW!!W|t>6b}_E-NXAC@Dp^?ysXN+EO3EB2xG~wcq}O85 zS{?$oBUx~WYQmkg zY8=*Sn8m0%Sl>I~l`yW1R@)YoAk_roJ~GQD_<&47xZ^;;*!xEwZ)D! zP`0Nylsu-MPdj*D5J;r@Wv(c{yM*D+z#=Ss@8^GwuVYvjTI8-Qp3&vqjXbs7h39yJ z?LvnMAXcJPd#(2Z{#M-$=TNfv#*|0OvKWbfKir9&q&t zE+IQy)g!u$hx&OkD6P@XL~+fNn$;$fH!IXayBv(piO!BUI-0yRa?==bArJ!wS5brB zypO8odASIb8Z*rVm1>5VUi@$v^MAz{?K2E^x}16$~&<9C}^dt`3kPH`#EAqNy6 z%VfpPAg-+}%IzyBRD4S$mN5}BC|Jfh?{RlYuWMLWYXthN5BEFco;jH233Uu@Cd62x z)aDxSgzR&Ha(*vFJ5JXI>Kh9SBgYas0NVy+(*kX-+Bqi*Ro!&HTiUydvaJqQ0^4E7 zwLi&6m1vB#zX`1`acOF4{{YV~1o=Ayd;AUBzELXuKP@zQDo$I!jjx6NoCHGfORXN~ zje=rxx#150SZZ|H6^_9}fpwpOR!I@70nV-|4D4^b)ZEL!TnuwP=h5cwlMv5jDuJWBT zqEC;i){K)p(}92E@&_)XUI&*oy{cQ^sNZ61<~q4>ZFa9}syK0p#Gre_xTJjF z6aqBA*6kEZJI35}L{zIn2ot*A-(67+U6KcY>>H6^G#4N^r=Idr4Xk^XPB3g}HbKYx`WI?vGC+(f&Q_`Vz0{N)gjY5YkHI%?Jo-V%o;kfK|0D zDIU~9ZL3Es$<19wl5MpEu~$mdA8P3p<)IR2^1LT_E>?*b!fEdCi-;xV94O*}1!t4> z6EXoFKiPC(h@Qz4bJpr!8rb>~N~6=q8zicc<0*BW&5!9dp@q?ax{hk<2QU%Pbkx%( zo#E4T3LMEU>5^_DwmDvVz5+; zEIJBJW+YlA2O25koY8$Arv=KzzsY_qC$`b|61t1zCby6fj)TT1h4L8EAp@(f?$j`4 zKDusM+o5hcXr?U)Pb<)4lnBvWL7R;2f1|1lx^rk zE2uN-%Pcg2q75g0KSu}z{jDFd91oQ)EmhQ!N{XXwu`;2a`J<%d1&sA=qD8FTogiTU@z)*Vi@yQ<|jNv?ZQ!R#3^b+cqHN0DNv);1{Z3L=|BdFDQYXw=Au5wm`1t zsHbdk%PDi38!V23NFM1WD%FH4h~)2dZ-)foxCkPS#YGvF&24PefGYs7YOREV$DpE| zx`j2?F$e(DXghLgWWHSMcTAuW;RgbVPc(z=ImXMawz8(!A+My=QAfEr*44{Fau~6+ z(s>4~4-%|G@(N`cB)HIL4*fNUoEy3Je@FIk?OalZhPd6F_F2H@YhD!U42WH zqr8F%?yCG?(x2cixfwASht`m4brlQ461OOf3zw9|AwRts*ZN+^|q+v&A zY8>{`@vu&+BaN_dKmc39bva_1-h3hG0?ZKf*nBV`n%4zNg2e&G0`?}BVP4do9ju(x z2`iKewCX52mP>@^5N&3%wC7tv0Nz99M^TyFGD=6tKz8_1nh#EOBak;pA&EnQR9srQ zYUHm8yeQh|&nvNxcGAnIr>>*lBV$aN@~+m21lp(n09yolc?XuY&U}XwPlOL^g+IzK zHF=9|g4zy52P3-^4JtKsjVBRyg{LFSO1oUHo=Y5BTDc{@-1#o0lW2|{xJ}xZ&&35> z+PYT0Jc=laBf;^ujsya+ZkCQ?2Hqm~wPgodE)7CZ^(ShMOp|+FjV}ly5ekiyN<_(c z$fp||wGuBrT_qwx5xUn7Qnr`I*GAL4vD|r5@Q1Xjqv2mUoAON?|Wna5sr-7A?FJTrlAf zYJ%{uC3Lc(h$|2q@k1lY_NR@!Z^CFrq$h9XV(q}C$VZe@_z8?MIjte9;@lHsX*7wY zCaV<9NqXyY=^c?Qi84U&*03phtx@(H$u-EMCMzJEQ9PMl8{0LbAiAf$#3s9mQFqA=ad#p4j_eienuP8`)OG+ic_QqO?khYIKy z(s6T>Vu+efQ#e0A>oy%VQh1v(5oH%N7?M02edB7^|03ayk+#Oq801AZtNhm<$W@kCa zMtSOWwNpvMB0V{~oB)gxWOL3ay3kgC`^pO6xSy+xaI}W_L6dGHjB8~ zr-8!BG^g-T^0Gwm)ZyRW)outlm3#~k4ZJ}_Be#??38bx85O_~H2>W!*r4v81sJ{1N z1h23TI{}fTc~wj2$pvS@^J0f8k=TvxGEUr$^Nq1&5%Lc!m2`z5LOWjA_D1xrEVZhX|&>7%brW5#qGOS zwX5dPcRfciz1>z_qJcrwk8-n#6p9W`UPq?sIM`n*&?qMhO#>R{(d7;hS#9n^&kKyc zUUG=?tz~e;3*2LCV`Q1lGl6j=po(Bc9(F6{xuaQdT+*wIiUoN^RqqWGOaj*hPy+n` zazICsiKUj*oCq8)6Nd`ZO`zITuW23h^RXx7*cw(`2M5A}JkwgS=FoDwPJJg%qm&R; zV&ML9W6=~vr>{Gm?FUkKJ4VpDVPc4-*yikf4@Z8|H6yQLeBTaZ9B(0ftsUgS-n6U8IK;)jsXB1oje%U1bb+OIoZc#f&(yVNZwMY**GT3!5o@Rd{=;lc7f>g^ zi6LO?|lMS+V_XD$VMU({vl{L}_H>lPk#?A<%_CxqC3pIS7KA#((Q&3mU+OUY@ zg>=4tDF794s?cu-xsUVz0P%c)>3p9J`1og%XgE^IxVx0^VsE()1@V=vu?;ATN7D(X za;&RdDqJ8f4=A0j42n-q+|mT`SzDdIA@J3PDWtLBuk6 zV`xfWGGqWc#wHNuG&lyZwSrLA$VqZFwW#7Q4upjkpD;m~QAyKeI;qa!pF06 zXO*ZU`s%4f5UiZ!cnwc>*S-2#!Y||F=Op83pcMo~e$DzVU7)E0TY!evg>8Gh*l7YQ z03Sf~d?LOV#@`;H>G|IO088iF56r)b7~3wHz8WYWB-sTM_JK`2-o&d%!nA*Ow)pgM zx%D3nXbAcAd~Q7>#@`;YdEV9jh5A}DGdm{)_Z$;H2AVcY1kspWPu%i?2$g%2!U@42 z*R^W)zq~ggSpfde9-og*{r6(n(|L29+M0fjo*6>czS@4^MFU=+DDkzU`#yfn{hk%t zdB4B$_b>J@W;+KLw1uYCwT+it+H;GK>4fdi{L;P<{ZaaV9+UPwf%k{oKeh4t9yTAa zJvaKN)BnT(EfD|$0s#a81OfsB00IL5000010uc}cAR#dlFfu?v1R_vT6GCAWU~qAP z|Jncu0RsU6KLK8?h(i7q{Ddv{@@%rp`G`XK7rx8ce<>}u@bA>T*?yDt5QVX_%Pg|X z#r!4dU!?t8JdYyF9uvba;-P;BW#YaymLY!$V&2O5R(T#rk!Ad=EV9clDdx{I{zZwG z^L%#s9%G(w;~{3tFXK^Le0@qEsa81F)%+!9irEprRS1-8V)^!Y9!2Jg#p1B|^^p>+ zlxz4p+h>vFEXK)k>mg#SYK4m|zfSXC5swj#B99UhibUPNS!`pDZx&g8rTUk$%P%oj zNjHf}zl|k-KFcp{X3O~R_^PpKxA8CdYcFDz9&-Nx(ktNC@yq`JfEV*swJ%S^YPQ?- ze1Ef#QG7ZmYQ1zzzg0AcUm`S1tSFCS#FDR7MZ7MFck3vWG$rfNNTRbI$su+=Hz@E) z{d$s(p>TNF^h>c0HR*UVv0LiO!rX_1J_;y9;R~`}+)5+aKXw#v((MZTsB3{f2^$iz9wk z$I$Hwe`Egun2dk1_mou(s(7}KGw(9OB^Z<|L*%R>v;P1kIaF5LJjpwFQeHM$*u^%w zIb|}M6_D?oOWb(#M@xEtt|)S88d9O?w z#5_f$y$L}}u}(;D$={-hAw)=_ZEN-sJ@M?2jZbO#>toFevGhl=k>V#O`sEs;&HM&d z5quGNqN{BC6CR?v^gNlTJ2h>@)0d1-+DCyH43ZKkd=d7GvPb%qPueVf5_U&5n=jDt zV~bg6NTDo1kfeERY_nHa$Ab4W$%;aBd3hlr*-UnYFCt43CTY7x5-p3CYG+l_B1hJY zY)iywYn)!oBA86QcQl*-|Nn2+dQl_xo{bnq?OBo7BO$RztEg3bwe}{8+G-TBV^z@F z+88xU5UW+Cs48l!*`N10pL2fSbFM$HbDeXI^UN)e$K!ds|Ga+X%nAwY&OhPKEFoV6 zo(CD|cYv2Bn;j|U!t#^HGKaJHKW>7}h2%ywf+G25R%r+jMN6H93)PPLcTmdKQQh21 zIxoYNpLZRXB?k`ZhT3uJLpQH3*RlW20@~FE+O=~l_xG>=D6b8$Ltp3`e=j z;kRp0AtgF*wU;5C&NFf}vMVBGH&xlF!zb4-_%(zij~e2D zEkg*iPQD#nExd+I}GfnF~W1 zw^;6=>E4(l`~y;W)c5~#G!Fs<4+`W0)l(@3ISx0Qb&{GreeLCSlzVcmoL&IyC%&dq zj19f-cw&|d|1PU(d2xGZ7R%VgxJmwDLmLN*tfx;E{}Fk*Uz#YfEHszx$?@()C}{V! z2PmPy&->6yS84k@>xnizvMM2S7Xz27^*=dSenJwoTFky$Z8+!p_%=TZx01#-D*H`lR>Ql(?@ zP~KFIppgLChlI0cf%gj8I@#t!V}1b^sDV6zdsS_;Mxu~1A9(qFW=K{Br5>H6U5fU- zziX&^bG06p3dL4Yt8=B84l2+2EnS`54VQv`hI^MRW8$XH*rnU$btrqw%GuG5Hualo z%|Z6?&HdBJACUc+3sC^<5T!f_c+UKEUENnZX5B_^MbX4L^&gRr5K|ZmAzGYu{lxju zUiph|WwzT`v_a;9W!)bq+4*mxsnqfzJ80DC7{tixxdFfxV-v_dY}MSXs!f|L1MGGm z9PtTU{Ad{h^+bylu4k$a3JjLq5e8V4Lm$_hhYGcGtd~Lb-;FhUwu}_`_Co4!hmrpg zRc8T)TPKHSehmMZZe|F}VrS9T`Nm&umnt1mW9ZrZq)Y?)bgLv}#{5qp)#IF0;PVGJ zT-e`eh3mUFB{y8Wh>^j;o$`VDx#2$G-DcYGlFor&LAkRbbj4qz4 z_G4{Zj5R{r9LP7|>pFQ*b5Vh-b}-7y?G4zan(=E3*b={!|LVG&Y3WoT#3pMoBBANI zw?;aTXNJzz!ag2f-*oyV^hv7ZNXk-V&Hg!;&zZ@ZNv^n$DG;Cj(svOz_|giUQOnD^ zRmKsXrJkzb`b2iau=%RC^Y-y(f^t~}#hBY&@I)AGTPa->C!s0-)LB-L$6)V$4wYf3>r)7*q!>R&J*vMJe%81^G8;?h4oP1>?Kv~m{7-Y z?x4Dw*k99S@Q;iP)ta#4&19cLf{T6T1jBEThVTVLP~6t@ZV?l%beWT~&eQwZU93*; z)h6dIa5r>%=i2sKljQrAkQn&CS^Y9iw9zOsJ zKKeiWAo(AD^iTZNkXKdg6N+c@Vq)>c9%;|G{)mr52{MS+H$6G*Gn^Bz(7!YH44^Lt zfOX$`u_geD*}_haunc>H)0I@ft$XbMIXHEP-sT-Jqi)ape-3O9&9N)G2l2lXwP)N~ z(WpUYl-P7%5*j6+ZGrB&d&0U?A!N~pPVMy=e36G4JB)}M7~Ah+JZPc@E_ur54?EC$?*93H!1WbKsvM+B#O zj<>(q-)Zt-d(9eI%P`zWBaXc;G%Ej&9g&4DovAj>v_x*#>wSBAXOVLH8lb~`VFTBF zzVu8!`qfSA4fMs@-#zvo%&?3|#r^!^eEH|t*|Av#2ZquB;Ff)4I$!L6 z-=7`K`F?)u?;=}92^_;Mu{&6`(8AL`5ZfDPyi+mjMrYECf&u5x+~PX*4pozf{3?nH z5iy^u&7q4SPS1HEp_~ddv}O%zaKq<{*tF2x%o5Gx({C~I-IC_H#(#0cD5D#kI2zwkL)ks zLr<4XPlE_FE*B@2xkKvj1)?WXl<(eiwbJhLx zd@AGVL?$~pImF56v-nId|Y22ErJFJ-t3jVd3nrRqJqzlY@xnLKbg zyUq-9?WOF?YJLVSPx!>aVnG>KzY~2sXvVN*c`hNH@OX z&GJdfvZbePAtTRgTiU`e8HYOMO>BP+(C%+!FZe;SN2>Qec16p0C8?}?#g72zSiu9s z*rA@64c#oN`nL3uFIW_48KMyeNzL0WMu&QbuG`k-&8xKd%DMXmLrrTFP&esY4;hwZ z;F9eEXjc%EBe2V=n}V&QVPXQLZu#}Ol;*L8WjQ?N9eEnO41uNwTYI`BU8)!Ri5PG* z$@XzMtfbcj-qH5(n$yKQ?nJ#Exnkr>8eKQZga{{<9j#)1u?%*CXR^Xj8l*-c@4<5| zFYZ6k{t={pH$$CF*Yl{l5aa;q4}wS~*MvC6{bH%HA-!a!3CPIzaqJZ_>7imu2J|_39g$mXY%F%$-E-_mt~Zke7c&gzg=r ze44#te9AV1B>z4XuHelJKS;*}*T6%juaYQBAYCHEkizdxY#A9;E-hN0>(-4YF?oPz zufChBzfoP(7>=&vOmjs!bBd$%*0ywJuUz8i6Y6S;TeS50yN&J%`pM;tw784byAI_P zcp8>KE`=rpk%cKkGj0e0aqT~M%%6@Yji$OrzRJyt)=JjSs*rO`I^X%in0M?mdY}7L zcB=9^C3ai=j-bAR@Q)M53T+2p_I(4>R~6nOySX;3s(0fdPBkf5?ecXe_ItPMD$vql z2TXsV%xhVeJayny8NMOnrQlK%Bs&;Et^Ez+p?WQ|c@#tYhI^+v!<0*g4SnZVD=QN_64P(dbL14|Iv_DPs zFC~O5n=wV@9}UZB#1!%!#h0EjR=G2OGhXMA3>NRYq{(&$5DE zSC;caj_{fhfZHd3fi(J@!%yEduEoRl74faQmgw#$Nhwstv~b(ICTT|H@dDh=8MlwO zwZ4p3sYGk%AJHmvo5jmzeyn2QAP2l67>{3x2MpCOJp(o=p2g`fgr#To@s`k>h>yFx zRCmaIuRSd-Boo&h^1TYhF}S+cVpadpGEH!y$M;xfV(=mmn%w-D{~+b>r{%*Szh2s` z9rw5^vCou{;k+}N?oZR*p4jV_brjqAIK$0AQ?r);cWaOJntwzt8w}ek+MA;s^^)PY za|9$CCF(Sv#}rsEWJ1t7Z0L4C2(K#t$|>*w1<>J?uTZpHg_{5Gr+*0!=7lH={@`U9 zyS7SptY0z%ZYC?I3r~Nai9dvEuWT8h01RzZ%#gC*1|LI|ZsHwLxcw*n`!4+!46SKb zuJtndalR-qT>L86!eHW$TPnd@m5B>)ex}emEb;=!*yf?e?&Az_V}nmhgbtujW3E_J znSVcTy1%pR#NsWtHec&FCeU3|?wa9HsR05)P>1%Wf0(zd(CWG;XO%`_?l}N=%cGIgCa?D1Xgx2k0?sM7iFj8 zB>sBYnWMb%KCevhdbk`2PhX6#%xcxP?A1!_%>KMJ>?bDUJx2J`tR0l}Hk$JV7~|hZ z_NqLP)oawo`!wy>=TXR&9xkM9y&5-T?Lb7(2W1nz|Blw?5&>|i{JDbs=l;D~3SnBK za*RWSJ}k$M?v7q+|05!VXGQL`cYzMYaDvWSbPLxTgD&?Tyj#??JprNDMh(e3R`!zu zSdL6TX)e^FyejN857K*&Ox-Q0;04OT?gaR^M4tPj%qqMVe3YZ|JZsjEn%bDZN-|n5 z6N%mP+>@aDM--5kZ_G=jXqp%}OSp?`H7Bj&$E`XCa-Uo-!`0VRUMUuJ6ScAJ3`gXc zh9C2Y>>%EraE1@ry0)ks*ft%wP5x1}-`xIN@<10%lgYEERVAfmb){{ zp<8p!Ycv$pz7=2 z4QL&{ESC7ILz8rb6idpQ--FQ9;SP?`>_w4uv8x(>V~exfk<0sMKH-4Y7`1t(sO`!M zp5jTDFNKHyh+K7?9cJ6V!FI((h_g8^|D0{V;YL#VE&+m4(~{^RoS zgbfUt*Mc!)(&J(!r-?1kCJ1CZMY62IylquDq-4e4_`Bbw(H@#avd|oBLaxl8zv;wp z9AcH`il0j|2(Cv7%+tiR9}3^ajGZT5+MHZRlBogqcg3u}o~dYPR9|wuxo2gJF7Yk< zGx=@RkK@^Lq+2O__}Sg2A9O(l0>K(SQI9<%FzCK$jpESgh7QftnQsU4d26R8s&BnZ z<`3w?KTmuuZ@-zj5xho)R3^+I{(4!sOO>O3Jk3Y!QWR@2)Kb0mtV`kW_Upi0c3obu zUkMlCHKHr zbovY0C~`e3TS^tl$ag$^PIIzXcfx0EDyLM=9u|Q9M@QjG2GJ;1d2tg+xYH-nJlVe_O!XMulkwTM01L_ds)pQ{)hXF06^D{VGo)isVS z6!$Nl42F|%_qkIr zVEvUQ@k|vpB-Vjoj2x)0WoJNVAkCH0Z2La?$Z)%8cmJi{$KkTr!~zemKx)mZ2g?jU z!aMlL6Td5QubZ1{f7C?4gR8naY~3B*_Uw$DsY#y4dt6PuDpW}CRO+f~yK#m^DFxrL zu-+D_Iej!Inc#8O;64sI?dYo|g+r$Q5%G11YQGK8?46o64|g%M7l*54lTecVSb1r= zv8bYTCAhP_G73xfSk?qR$`Y;Ms>Gox^=qowQ|Ik>JkIDSo->=jV7`-B5;2MrsbOR4 z{k_!ux;Ldi<7&3sXOE>(jfwcr&%nalrB40(8{NOpBgG6r$r$!<*snd1S#$-PiR)iT zEntW{;yR`>*G@{9We*)bBh#u%ds8z~#C4h_5j*X%g&>#KpRoQ25K(kS_;4)@qx>TPeq`CPiSxBtL#mcBqM5msmn3$on z{bbug+6jW6q+*6Nk$LT}u_`=~COkAvkq7gJ$!mYZmP4Lx(kIaPcWj~%-N<6pMxt>j%9pTaVOO7*i1;?9b{SPiQx>6vYuh5}odIZ7M=oB&JQ>%p zu_BMxQu523>@i`wOwY`2~JH9Fr!d+UApf^ zG%3gYn#-^_;00z27TD<{gf2W z;1eE5^PstM>f-*_8VQYKTbOn!nb<#~R+YV2LC8-|_nvAGx10j4P?0~**_R@+{vsMV zUQBLwH7ev319uzgP923A6RcxjG%Xu0lJ;_0Re&5fHrwYH-)d(zAcj3Rr9!JoJCFJW zdxEU&)`K-0I*eS~yV7d9{N5xlguWR+eCfAF)L~a-%~vFdWc%+i;_NYocE56-#%%<4 zf{>e)n9XHdcIco%RZ91y=t&Zsq#O?Dn^yluqpv>fBJU>gt2DdC%tB%Ur7Eblih?b; z`L*6+5(;k~M+%8@e^4m~l`2&z43vWSRM1Z&n5ELoD7jjr@1y zQ!iErT%^WCSY351D@o-|H*6nRLISl~+}HzwvcZ^&rQgvN|>h0<;NBXHPJ$= zx3PIHVr%1MeDm^4*;8Kz#EaiN60qGM#k;uwei_mOsS#K!$}`?5?n{ z)rg#8!lZO!p#G0Qi|m|yverK$cNOe^&fp&0_y1^e1SK@&ax1A3IS;FUpv0Fe^(7u9 z6AYeJ`~2q-NXDE~EmUpLV$7rKjJFEViNjPsVla=6mo+WAXGl%T{^zay042=kUbw{t zcA_kzvU63+d@8MUR+JtTdv!o@_Mxp2BjKM6+%PpOr!0`Pq)?m^5aD#SEBr!yS;HrH z)dt6sErew}8J`|Qaa9C8qiz7n!K&W4eXK_23`L!H*d#o4x|!YD=`k-YloW7bZi3Q? zb(zORkMlLS)e_hPDbCSCD*B{2>W$JpV^k>ygbyj)PV!H%u!?LPe%P@WYy_h%-80(= zwDg)5$|f40>9#U1T4#1YdT7(3L07aN-he53Cc6A4Gc^dws1!FrR@=b57I-V-M)K%B znPE3Rm%NvzYmX|^b*Jc!0|1VIyFc4ypd-rVFv^VePKT~o2^k0$u|RDFAIW_jMrljCawg**Qb1qhlU*6~&v&d@eHx-0! z$JJhlvf2fh5-@Zrc0NrDBJJs95Zx}A?%SerUVFthgwCLJWY zO6Q-LOZQ%5EoHUpeVm*d;QAhhP9aE6s@-a{C8(*aQye|P&1cfdH~y) zYN8l6XNE%RtBkx?i~Dh+4w&SIG8Jjcr_5~OEC<@JKotdl+}5MtiyE^q#w|OyL|~>{ zWU4^F99*|=%GN;azWcwo+e;*+A69;+bEmNv()^nhUT9B~kozzw;koGJ)e4zxodntZ zzP;%2iv=V4`aOBud#qGwNdXjw)81gWB!3sJz-(g7e`W$3q{L}r%quW1O#+;u~ zn7`CaK;kOq{#T|xP5#oo8-W(40)kNGtrnv|dv?^O2*A4rfM7O@wh6GZRXsJ2K|C7y zTI8=}(jE*gr1x2QWz6;V5N9NnpFtquUXNGJ{q!{o`Jex%37d9&>dp)jzMfzz=+{?_ zBvx}xO4=%_Rpg*c8ab)hVO0ah?H8IM1+5!UiR{$S>`pR-Y<=}DwYv{YcqKFnjg;wOlG+9+iKKJ~O+G^gp6D5iR++Z!8k4t#p@yQ#z`jVYwh8UT0!Lf zgeaN)YRw9mD@*y)Nawq4mEkZtnY%@pfhM`D)&3(Smvv2(E!E#wlVL#YT@AlKzs$b! z%IY$)7!yTL$lAA05-bSrbByiX9@2yePksxI$0AE2DljwOBiq?h)`G1XIEd;mBEM7h z(J?~n7fnJB14!Q^2P{IX-nZz??w=1)K+I#Cv59gIsdKdMKlL}fXq23S&Hf`IrvE_s zmeP5DMQy9^QbB$X^08I_`{PjW2N2RO#W`n<7B{}V)e(adtG`}|poc^swa2sXVfp7W zzm(g4rdX{2(z|dL28eKuN~XJ5x>YPH03UH^h{xUWob~|HP4fHxr1}0?G*bXBc|XgC zJonL|&2&xfS-!x@!@EMFk-p=lRmuTc;Sb=XofV97iyZqWCl;&MIXWZkex-c2lF|a| z`XzEUhdSE;4wAa<&NB37V|!hcJ2M`sr4~N()Ee|;pcYdMFvbM2X&yrKWxk~Zkb4yt z#2bFZyK3%rdD<7e@1N{o2+YX?ZuBzunq5l1V0Df5PWr1OB=FQ2)f?@fB1oVtTZVnM zm2Y5>lvS?(nWh@UWbf1dMKIrW(jQ)B>XTVU=5`^79qRj1qesimFP7nO8OgH@4!EJHMondV0bvosr zgfa`N4d|Vv9OHjP0x@OGM`CGI3z)AOfexsVhN-!_Q@gT0S(y{yJ13eWC>{r}c!91O z?UN%Vj(h%nmhVLs_or^3#i_J9*-T8O=~lr<&7dAM9V-HG0m@eN=OkaH{6HILl386K z81TS>f*crWE^NjcOm!hTTfY=-&2J)mfgnn)dqIHE|8$k5oFMZx$9!ESa*eb1*<~}* z1^gpY{thFF#YMS=-?Cd<|F(D>eJ~PNRY=A`mQ^QE_ReVZZYuA0IR#)i6_~wG9W*?>Te-+f3)vdlnwmGN% zoZ#qyz0av+LmOkj0B$6A$;05yd?6xoj+#R^lE|@I*Wt~ui?e*C>J}(}E;?ndU4K-j zL=H!hxn5K87bZ-bYfc6|Ga;rjJA&YV&oeRZ0B&l|tqwmhX4C`k)#6#fSLu?pP`|>s zb5?jc^m!C6q%A9j7|BFWV~Jn0N~4v;W}G0KQ)mt-`Ke(l^ZI`x9w)fJs#U2-aaw|c z>GV??$JH@%WOHoSD|0e#d1KE4aNf^CyG z6u##D5caC2;QntGRzVpSHV`5b?ZTekpi@El@0zN~;U+pnq(yzd-UQHET zz-MR%?|&Z^j!mgftd_M4fsaQiFx6v${v1?T*L$&Rn+ZiZv+qY4;y41(d9Q;v3|A$7 zj`(yI{}FzZm&mu}m^7}5K`;8Z;wl`FzBYoai=^+ksVk4SDr)AgEok!)teGm~{Pm+N zCc-BU8_ecqcJMoK%WH43I~|}nC4O0f-aldkF;daTn&s`F28(;FoojseTZJ1?;M5a- zDtSA^)$HBtTGxzZO>oE&zt)I^{} zmoXe4>idVrWC1Y2XZ*-2KIzrVR%PwZ$0ip!?9?$K8sd%Nuyuf}Uax850C_3M^gWCD zHhuKQC8nAoD3yY2O_e$E*3zh+H=&g`R;DLwVebu9l18GDy|b4 z3retu=TLqR{*Y;W0eG6&wuuBxp20zZ?vfPAx%QJUK*S`kTKo-t2qcoV0ecg_Zjx@` zi7r#17J!P*S4+>As6f+C^Tl~g;ZkqLmwiTpD!KJOkW&)-m2ZZ=zTtkw{R1GZ@ora@ zy+#2>@87T&s$P@CgK?F5CrB6bd(f!Tx=XT4G8@hl+F-`JWE!wumJ1e7_{f;NiFvGh zvdqa>5~YR%qYz~y&m~bo+9GE+tRqZ)xy}ukQW4ih9ZZgEe@15!f1jTsm*1Tj`DK^z z)(Q~MJvVGL8m&6()nzjjCm|O5Toja|nDExL6Adi_fNYoLHZvYaRjdj(jQP2w#AH+~ z`cf?c42x4|o;Qm9`g&-WFHK?SvG?NfQs|pWOyy|2{SO^ga`IIRyczUPio~eujXrlL z?{vJ|^OVxE+JQ%pqQ6eok9uk%5I76QYE0+1x~tCcN<)j+2G?7fin9wTULk@Hos?X%(z$*}|*c)(op%ja=mPac+1vy94IGjAWC zT^;^!il^abjz~4;1ePM=S$>jEv`15B{lo7ik6odC+swQ3{*)sA5iQj={(Vqmycyam)uOzMH*wttWoSUQ?T4X) zdoL&4-fa=|*QMh1OaJ?X*IS|E#Ie6b^BbO5O&YsSpxmA*NlG$JQZtOufn*QhzI69E z4r!!HgOU^k4^g@V^Cjj*m)<9aZ)>sdWM;_q(IF7xcnkF{QYgsc*~ZBqE-M0wGtv}; zPd~Fo#&f1;S92=7DfHG&ZcxL;ZcnP`9ls-Bpgb9!hij9?S{iYM`&Qvf-%ilyGK`Bjz$MMi#3vBYn8C2Mgmf zD9?ap7cZ4pZzaD?!RiYNAs;?A)q=ih8m{EhD(hlPk(eb5N}&LIwrvZ}L3vYlA$KI@ zMa|6?ED9F;nQ-~mw+qu0_<~sy7Om`OBfvS3ude^|RiVge;b+ELyPTQN38OH1F**ML zhYgtWPAz@n$<4@@TFE9k{d0-8i~dLrS-|%PzrLyx%s&4O>YOxO5wX4oSM7WHMkWUlVtOX=iNV^Cf1VYjyZe>{*cxZ6u$Rx{FCt zEAJrqIOmwwB5|8XWMXf3Ep4^<;b|nxb2Tl``&y zPq`H6>|2Q2la|A$)p(Z5zyEVC=%ALNoaMo0Mf+%A8!#~+?`QKKa%h1JlxXK^KM)HR zOIXmZ@hV*;tr2ayGe6YKf|(0y2F};mJfr;n&GE2Qt)-$osBpw8)V*yiE>p55{LQx9 zFQoUmXp*H_Ol#w-Em4vJ)2pt4H<(MkJ0Inrv1H?K?Hqib;gxTDSI;sUfMMvMOQhWV2NgBOURg>sb=!Kn-tDbNQzA_#| zVYrNp<0-mQR$K4c%P}}hbF+#=g0<`%3$~gFFyhY)aHNARZH^&&_ul>BmeZF4p2@FT z!ro%gz3fznAitMHY}9WaY?mrEiQMZeHv0TY>~h$)wx2_-V~@pg?XQ7f-Y+!Ahq)W4>1%Q z(G5Oj)P1({7M~1>7m3JcOR)v|_ekqn$(6aU$ZztwMqvD7r@dry`XXlAJFhM7 z$L*Lqq*Tsn8CCs84J8k`-@)id!)jAGrBq5&>G`H+B_?VSvhvpYH~NZR`FAdY)yf#z zqFjFh6->>|e|UoE#}^ehXIBl5fB{AU_^xUYJx{jM?eZPWW^SX)$6~kGcrP1DqEPjdE^zwEW>1H_<&qw zEiF1AB%C2urlQsJvHSDETzBKI_&^a=LJt75Ce#nrmtNig>B5FNs!ds}j^I9x5Xtve zNMHZdGzd<=zq#LyTT~G3mR&$n?|3lMA*BmLcY$sm2Y0?RN@CE6&ft z6!VvfVMZOIcgVKm+-#k!nJ0W(Eu1f(7$IJ7)i)S#VF7P~;^cdcHImI@O z`B+*qhUAqQY-nJhn*HmRiov``y1$V`xdffT2&=M358n~Jf)CbS4u-aYkVt0gnxS8_ z&0{KJQzXP@ZXdWCnaCvBj&YbWL*{x3LX?_MYu%?8-+Deok4OISD>W;3IzdV)eXXUP ziF19=alEKmSKW{OM|8DlX4D2-C^5x6X_(zSikFDeuR`7w_Ry>S5u%dwEnNr}d(;3m z6mQQH#*yO*Mz2I$Ut2^+8M`hSVKq|CxKd173KR_B&?HEaK8fl<_KSvM zo{9|Jjn^Er`)ZNo@Pu-jo0+;U531|1s58(zlURz7JRaJnLLm1FObXIi6)@5Nhz5Du z-(w;T);>;zIK#ln*7{j5Y044$v<+Ls`0zUl$VO&HB&r+OiC*?Z z!Wuar=Q$5HW>9O=%Rw9d5oMu9zmN_f@Bb#&()8~OpVOH?meZ+mmu;Ge7z@N=*XSc< zR@uLFSzMSL!}IQ&rLK`xy+3@Xl747UuT&?e$zlu6MaV8bW@`!j%IatG2r3^W+HMMJXX1sfVJ^w!<`cxGix>rf0)m-2bmj@@>^6y3;Vwwnk zNcR}ecIvu_5Ihnw89ErDe>{szFRDJ-W8DKuS4BIc?4BjG;H~R2o8qmLKdl$`Mmsa} znsut69ED~AltJ;Zx@gP{ao`nZ`#n<386#^IVUUO>_{<*L{7@)-4b1a-3ol#pkJEAC zW|dUX7XMP*DbbcXXX7_p$|3jmACY9N(J3oT-2plxFLE4!w`G(w-J@wg9pP@E)cPvqnqvJFVZ@BJN08EA=$a$5K!ba1w)04u*kscrf}hHnn6Pfn z0n(tpFv@4tB}**sy+j2KP#ym%BQB);AJMFPQ4dYPp+z2uFgB=YLePtQz+{IMPXJK2 zn_uG!)!Zv)i%IV2pw-6TLZyAx0IYPFEvjwwXk7ern)Ciz=!9Ipd&c7vn5TGXs!FRE z2t)RCz0m>*V?8r3l2_E(s{^G0p*MBOZ(HQ+aOK+=3^vOfeF@f^qRntCrTCyO5bW@J zt>5W^Z_xXm98q<__GKT+YgnumhU2-FvZShQrvR_&_=0DWL?13PPH*Wym?R@!zq@|o zu;{i@2)j*^_3R7G8R}|eU$0s8wQ6WbnjXRhGRuf;w#rtnz@wt;9U%EAAPFWLl#r4< zPu*52XG6=JZfV;oDv%H3g-W_1-i`L^b}!qLl(-z}mO>Y~mg=zw8-W84M#tC{B8r*3 zx%OD&fHP=5N^@^awj+hy^EV-rVdO+HEn#sI|08+@BR)-UIJUAexlMWy zF9>s}*{&)&ye5$*bq|5DJIb0hZjp(RdfIF!GjK~2Lw}f7O_)iqR~_kEJ&bmYinjsp zU(M<_z{Vd_#b;~V_=9rSOYI)M+n^wU<=aZz1z2Zvms@U7jwCOJ86%v}Y>f|X)k;2N z+?AN6xlsFeg?Wa_@2KV^^(UFg(KbWg#Z3*HzY4~Ba(jGE%5kLKu0t!u1=^n1>{9#! z%YBLuLDO?DDSiL@fY@S~ z3n2Kj`M#(+BDT_|uY7*;;RB6kLYaM0PK5h^p!ijxBlCF4R8(0!wtybPoI$cpt95^Z zoped%8H2{1?Ow5jokW+n`aT5C#K@cS}1` z-?6(Doq$k&|l8^Cn^AjmylqMLMQDbmfzP zdsuY?XTBNdwLI^HUyDWP*N$Iu>SMua-po0$G)aB(7`*?7!#My)d=-EuNfcCywD=uw zWD&=WaHi!N4Txs!l`Is~JjZ0%0TO0++RG?JIaGV6@@#tkief>h!>^w!d1;at?Koz6-0ccg!z_oxW_b!o1HXrqSv{Be z$Aq@oI8vFqwIO851i(#re$Mvt2yG#Umirw9hv-{PkST2axW$#5w2% zumS0QvasLudL0Xdor?9x(xQE;gdjO>wKPic@7|I?(z$L6Wc>(x1Iey=C=|qk(KaY^fTa~4OGZO}-u?NWi z`j;l^Xm&Ys`Y?4-?*xXDT;}tUl(On~J|V2P$lnPW*KnKB7;8T2Cz#5=AIclZ#W$ zEpv2Jni7;SY`2U^=|gSD&1?{xP+BYUE~eY?dY;AOf?rOibE`}f&lreKpDv`PoiMq7 z$RrQ!`QoAtHR49|cMBr5Xx|@J|D4b&mi8Q-QGxq>^lzZ7MZ_ZwCF0dAmXyQhX`l?k z-O8_R?>iX$lCB`EV=5GCU=#| z>tQ&&n`3U3ZRZp6zg6#F%-IHo0(=|5JH0k6Rt8+Mwn|H!FZ44hS=Mx-oW5F798k`Z z57{m+{V>JGoZW7ej;Is6RJc2{_D~M3p_fB$gJ;@lk=p4Uy4|VT719S4?5_)g1Afz> zOm^z*0IBZLvWUR_@v3z9}bJoi}+>rsD6kl(mC9*N-+F|TvZt;-}~ zu^Hs|y(^47F`cp3Z(9fxyVh~}CB5D)-TJi-xg^P43;ftP%v*qI^|FwvCQ$u?s0$uZ zmojv~zK&A)m@a}?2izLyeFG*Z3@Q$3fGl{3p7xLu&^3`58@>7y0dc@{>X~c$%}^>( zzjFCUge6@ip&oU7tD5_)fp908uriB1jMKw+R<|1(-Htrz%^9bHiSrXuo4Y05+dW~Y zZzSH?OGXpTt)xaYg6^JWLFvEz4r~oKbC#6n?)EAOLQjmiq=9K zeUj^W^!ndPq*JNf#%|uBDflhY-NlERxG39wB%7>u+w^!n9hP@tocYBPt>fxX6XfLn z^3*xX{qBU8B>8`K&f|8A1&O3Ec<$4{+Jiu?g{}urK1617ER2(Dhu&|c3;HT$mOl}p zI;ZvW9W97|Oy%&?FM5mCe?-V0zIdFID7Ee5N=QeechOOfP+%&uOS>yj^T7c#NjKr) zp^mig27(`MXyMmhNu=ic@*#p~`_@mMO}#ye+D1{9$O@+g%Q-5?KcPX5=D?_d62@L1RRoE55L~i8nc6M+9&}&g|(LWeNDwLA#6cPaT2 z32vkoQ1uIoIepk(s5?^-t3Gqch-KOT>m=&g`aUTdBe}~)<_~d+WuZx;cag+w1uPqc z8h(OLXp$wptCO&{j@29EmCM$4JE7SvW$6;@xNS}LlsNrt`#85~@J*&rVIbUWk>bXl zK010aR3)NOZ+gDf_;I_{&?EO1H^8x#j=NSbONPX4GFAcG!_Juge*G|m=&$f#_ns^*0tYNj zFCcP(ATT9DAom21mm(jDhW7t-uJIH7bW($rJ-|Gx3IcH`_SJz#{}DZ1@*;=v0^F%r z-ImUdZhl8O1ssD^Ea|8lwN4$7(z6YDYJ@=JJ2Bg~E{N%HL@sJOS}>2nc#_N=?TDnn z#H>g~ze|d7doSy$noaM~$#EE;n=uknbK9f+N961bS#8%&S)m2d|{cU2038nyFB3bI^6>SQq zRE3{Foa>$G-NC+Igt;eYMwf;S-pQX?ImDZM*7o-=s!m^yuPxDobRl_Bv>LGlrD_f4 z{|4tw&o%U=8f5JK1jgG+?zgj#D7GVvZc zlCDb$HAS<>aQffNv?sXV1!+gMAYIz^ns5YoPWg-++{TQsvQO!sq+S4yJTuBxI}!O9 zX1K6dxY+)*MAS6o&b*b@VP$ShI3^Huah1oUdtUag24=<9QD65omY$imw!0>46eSdWDam9+CARiu06Q=^0*5kf zm1S?xphQ4v?nBYA@oBiHGwQ^u`VF&DBP#*1ns+2tqti%U%2AIlJa3$)VBmRGU)GRw znra{EeRPDwf#R6g%wS zBPBm~BT(xFxVe{Gu&<6MalcN-lzO-9&sHU>sDUU5PbYQKQG=m8_>!ruoR8To2dtu} zUc{dQ3}j*tD3g%>*1v7ekiHVKO4&}ld4Vtwy4xv|)7&828WP&!aUeJt>&v(pQ(O3g z?HHLhBQ?Q5XY3Z4&`@xjYnWsqh0ZWef>L6ct9m`eb>-GAHocbOi3bv=Ht|aFU$d6z zp3<~#1Ja>duO?V5JRONA=mM-!~RW!9-b zX5^;@6TJXUUU~ISM0W)sUn?(whP+S-s`K3X3DMh4!v40NIzab?L|IRFYU0EjIxE`a zY=1is5+mMG1OW4<%B!a2D{Vighm$9Y-oLVZ@}nOHvv^8mZAK{rc|x`~3dj z^WM7~=bpIdaSU}E5vvtht0on$g>MAOw5@a_Lm(norsS0wej7AYhSBZLF{}z8j^yMM zmJ;ZRhXanGQN3{OLoI+%^&Sv=0l9=e0uORk>q(<^R`(4IF$q#9ex`9GdE67sPJG3+f!?^%9?fvTY`KI^Iw5tnrEW9weBEbL|eB_pZnCUleRSs*jz{HQ)up z<5szbRu$V_O6yA7E8c^4i`FP_U09}=gmR7@W9B(LnSa{rm=^LxBa%JUrlCvJ+}keZ z%lyk4Be~zTe9Xm^pP*sH_%)-gyTEYAOG8z^{87Sb-A1&I8}&CqCBZDPqBuqQkCQig z7~cBW&@h<>$EdCe$_iMOq8=n_99m1x(MMccg^Tnn6T8}QXi*w=XuPgQHS|6Nm?Dlc ztpCQ1$xORM6yOuYPtXG$xF0yUSs-q1mjFb|s_Cbbs7@a6e<<7K`-mdG#a-DbUFW4Y zbcWtr5D4LTEM$4kJG*iCvC@8b8t~DjVtpMZ2-&UQ!M@j}Y@8+D+PR689YZUAmw}<~ z4cnArf#?{UE0Vr?r~$b#uff)Om6#D6ZfQPG^DdOyds?nm%lY0M63tR7=KYhYjsQKM z(>JYtHa~aYheD z`EkM)r@sxn-_fXmWa%!Ddb)cUM&zL)GoqWchHHhx*Kp+OzW&WqHvX=#w~R_F1B>n> zn=$yX3$dmz3$j&Iy04sryjGcJg02~U?3DYqM7ue`{JT;J$l251p$q#7Z~QiUf##=XCI>*JXJ*$M~Enngp5&8k;N$Tyj#uSmYV*ilD5(olLq|{<%nQ(0+aF2 z?D*i;Uj1!Qid|r)_HwFTFJM8xb}Ulkr&2{*n?Hprf&!By%_`DSPsaDOY~EgGkH;rn zJ|!MO{plB!SQ2;KSg${P_SJ9F2BEpi8P!B`Uj>5Z_G-ZSr8Yzy4|5z;e&?kV_NQ&R zqF;*rX4tm!PCQ*F|#0- z{Fe(MHdclfCg-j5{=OvD6L8JpGIx;ob1Tl!c>YVws3%7{{z_FvWv}n+pZE=QJS+n< zA11ux>SL1-MFBr5g@kpnsvE>^S6^wH$53vcTg`@i>Ybz1V5cdsz)csuptBD>CAkfh#3NKS+%vy!I%&VWTIo^ieM~l$b z=vF%&74YG3eSiI+y2_kQp4=$Por4n)9FM@~w*EqUP_pzTd4fs;_6R0SD$-tV6zU>x z4?|NIF^4SM1S0}uvycyeQIkE}k?DLtr6xP0^svH1K;Y%Xl>sCgsS?-h9IGD&lZj{r za-cW43KgB}XjZ~B zuCBCCgLb_Wh+I@G6YHq>qgoz4u83$gBvYqwQHH%q3!o_S6`~M?j&ag)R5Q4h@ma1Z zOYMjQcCf~I1Gf|;BHAXETd~n2vV=s(!W8M2x)yZE`AZ3UwQQ?7DF0EkC)$%OkSG$1 zkNqHSW4Q)h>EBlofu73yp9ZjoKAopbR2;Z367ypDdPM7%X-$HikzOI~?l5JlXS*=> z|4^WCl7swjG4+rKfR9`uSv6AK=61W&woYkq(+W_QfQ`*g>u<@nPYB+2nIt^YINm<} z6hYR!X}yL=nGP+6Q@rs8RFhHZ3tw<55iyL-Rf2p9D&Bo8?*LHa@|=8PwvqN8w9_NO zlJ3nPwETeL1T)=hO-m7atOSXQeDQXRu5gnhNsir_Fy6oqR)owmZ|!ds?5OZNq2;lu zblJqiFu~#Ca57nQyZZwcx5j{#O|3AH@q&9Ai%UiAcLj{kD)A)OX0%b+iGAwEct~)~ z%J-xdUWcu|bCuGLzZ7;eC151+Y1rxAYK7(YYAw&&NILA?Gg6@Qb3H;rV?t*Nf)+08 zv`@#|6kxO-0#ZU6xJg+JuGZ*aQChC#Pls)sTdZCL$5ER#dXYDD@`s(pWZC$e};qoI33h#FIoX_w%lJ}`G z%s4nd(esKhZ`SnHc{(N6b@Cl8PLcWey8lSxa5FiAw9(Br&m!H9V(>oV#Z58^4enEH z7L@5WxI)Wczm`bzuClBdV2VTWO=7*7|VyG3yO-kUU?TbeJ;zzObE^467a1ED{h%Xi;O|Y}9OHCffch@lNI?J?*cN}87 zN%?X#&(*(38AsQdqQ)fjdnFd!s97Lm`<^(qH!Kioz9N!CMa)vg__okQ^E}{FYAD)0 zFBNPZH2!_B*^C85Ldy4zK}ZHt>Qa+kiAoK0IjdzTxh3Y?g*}-Q7!39+)hH`h0CVLO^hginl4nJ z{w)?iw)EE#o2*%WS%lXn-o{5YE)`o7J>y?@+*OFx17Et%k;5B znc;TpRLJ0HGsu;OPulBt-!*x!QQ?+w2kq?@>la3pZ4FQP+&x@~xjm6%`fX52Ys(D8 z`Lk0*{qPi4LPX@{0x|-zrjEa2R7F-pSx#;vz>8-;^Py3N_9~Kq+z_qGo9-YIm5EIS z3UMc(z;Y3gw8XccBdc1&%lJ|W6LtzoDKmL;`!=_>9Cq&1|z4I0rihQ13-DdZT+~m8> z#6J?*S8(g0R7tqaUJkNeE%$u=Q-$1x=Ib2t+Mn)$-HuVU0wAA-k0jz)mHccb+B*Kg z2TmzFWS$qqe3UZt0dUWZ+B1$jQOvv$JmkC}^A%aJZmK<<)~S`=8kzy4IGv5;F%8B(xj!JzLK_S=GQK;)0{{v2A`!_lyX8T7u;bNJ1R42QRzH5 z?XebTg6GO2*Fk--WMj6fYYWjlMqTc_g>PGoR5xQ4J5yQ zY1Z0Xz_EYtw3K+SQER#H6@gnq3rJpZP(T{TTl;u2>1!+J0*)`$n>T7OTyw=CYjyob zadE^9tlGu$)w}jOy#=!N)ysz(*}pJ^my&db`e#$kF-p@AZ`90P$N+Yu)EgCo&!HfI zK3Ia#fxxzd&GD%wXx<){fEfrNkFG@~z$&*sXDGcp(^ddJCRhSM+WUzl$l$sYPc$ z>Hok0HSw?q4|PE@Z$ie(8Ola(u}J)1Uy8=@IOiz_NLy95bEJh-vKM~p7U`&8>)*gg z5Z=0(#xHVNs_YlAhPWhRyR8`%>bW|dtM~u(*g!HQQTo}sZFC+3|MX5Z-B3NDi9o)J zvZ>2ckQ#V(UZsv<%Ar-a!^EM1Wi^V{T5$`dqI(xq1(7%=7D2?mZf4hx%@5y>?Mh`! z*I{_$Y51WWW)-3Mtx4MM?2FBp>;Vslgvl%d5sBAyTS&c6ILuuX7o__RAPCfys1%ix zz(m=^8q4KTL1j^^BtO-Galey^K{vu8a|v|b_G4zMSB22)~09^ z>s=J(dYZ^en0Hz$d+il8vehiv5r*VOVfX1qxmy~${eOi{C*=M%doQNOw~V)`NfuxB z14_6l(AL0hvXL46`c6WpU>Z!2jSIQfZY!vG*lj;DAdKA@`W?1SpCMxM0)xGVjm-D~ zTjhbB@z2w*Wj5|KleWnGU~h8MS2s}9JsT@!MBV*TxWgH;EQ4;f0$FmcZO3FI|VZ%R^ul3nJdOqQ=gCM{x|a;`J18l6UyM_U z={y+L`EsD}9}3_ja1V;Dz)9z!No88fm~AUfK}%M??9d=}I`dAM+%SMyeRt%`ZE1+i zn(|=%FPt8s?B)+YW~$=WKPXOBMUO-CY-xSVlooWAb-r3Oy^O~kazGHRp#=swcFq)- zATKSLU81Z>Mrl;mmqw8B1O=^2m8s+E9+aaxa4~+(O;lU=pk^FLWWn7~BLWrGQ^y4< zW-3ofH}^R#7|7}C;h1O^{Tavtb#m1}Acdm$K;$8c_S9<@-@+jFHDAM|CO#vtbp)Y>h2Yt5sBt<8ir;B7!8V{x zZWp{1Buh{@itY)0{YLKoDy)nWp_o1i0?kJ#{vu>i9MzifJRRw%$xN&NBAu=Ll%ds= z(J!TXr!6g{6Q_rKw(eEiD(v-YqY^SRTqSlsHnAenxMj;+NkivZ6_512!ubERzSZ+F zS#aN9!+xjy4*L%DdYr#fU!-%*QL*-u4GbtrQKA5hkxVzIgRko&I1i(K)PGH~izsr( z{98By(`{2@v-MsQk}T7pfO{Cb!H5*u>1IBhSjq^rDpQ%ed%r6 zn5Y6of#mzYE8;fwY6XEDsYiBkF(=V(xtN1p;)Yscu`i=&1wWZ4R!sB6Y80acq=H;3 z99pw@t|{aR_sS`%MuQJ_we-g|^~8V`DEB+M05&s_a&~|IT9%MorGkM)PFh5J#qcY+ zxoGQhveQ>@Dspf!m)LrDf2~f@t3=7-&O&jdJ!;}fKUFCdL3h3P+AURKRmJ9dN(V;Y z6{%Z!<{F=%XuKy3G-s1U6K7u%ZN2ew)Ys&Hgtl?6VHyLdS-{=?Jl}X1yrE?``t(~y z;}D$|Bg-iSi|eR!VjhtXNK2vhFzje`0~HrLgzvf^VI|jWmhFL?5PuZijfjV3Yh_5g zOH?ISfeiZ>Km*>eg%F7MIBc4;Zw+k5Cd^0@5fA&iMa!%{1Xg^kSRmY_R{2}Cu^wL@ zKU=!Bh`0k8!R|bEqi`t(&Q@4Kt0EllRA0sj+wa`DugCRskGKxn56ipB{Y0UhnoU2+Tz3LvZ84%X&N# zm1pL7JQPFQ8)qvT@8DjJrd} zehDM9SrE;C9x-p5_vMM{L`o+%*@F@Xu3? zoMJxo9}1FI1RzGziO`V1A}l0Z`u`A+C`ee5thS{)aTs%2ab4dz-^|v9jMo28Btn&D zxsw}>>E)Ua0^?9WW0y(2a=*g}eocHMsVPVNAIeU5O4UWr9dLR`CF4oCbO1Qd)Tcu- zbWR<}%owx9ERzA;lq5pxdOK2ym1Y^pr$b=}K@M-Msh^blA~wHW8(51)ERo^fz`z5% z^5=Ru#4)fg3`rdSrJ9g!xXO$2He6A8{*Cq1Alt~MHTB3A%LTKlKfb|OkKy51mp;i8 z-Z_2Uode_dBL55k=mEPo5CptCx`#?%)F}%@z2?sV=po5g6ytY1<|RyokrA{|)25_4K@nZm41N;twfunNG z9I+WwA|QNiZcMs^U+3}in%Ezs!k;nYNT(Z+fegcOt04P#;->eci49y6v@qEV9fZIiOh1ukcasS;=7dRUp4-9iO6j+b4?S1V$GEFVk6z# zA<0HnoN`|H3OpEQ_7ztShgx8F{F)@V6#hb>l0p5z&w4N{)pCKFGNOZ=kBilwNJ*k7 zc&Wh?h$t+UG3<#H%H|KBrU_2SI^eVDi@?68TKo^?vNQ;b+A@rk|E5PCI6d;eT{&~- zVqP|W1CJx8{tpFHeqLDPx0ovk%7oTUQz&~N={kQciDg0)(%bV;H9deY)f5C!o_$vQ zU$07}dO_yNqn{=bEH`R;kPsBy6BTm>fw~*)e#SVaP;{b5aPc`)Kayxf5CWu5OBtLV z&w5fWx_%Q9>F;l&DL)NyoCa|+$#Lb#dSz8EXFOO2lS;)M))$1{k!^-WAig}nUPQ2w z3y+-2XzZC6x(I|t9I134E(RO&WW`8p-LR0+i@wrS_kiKIdAv~EfJ+WNy0GWS>G`hV ziXk*M#mLBd1Mgk-1HaW!__N&qrfH)nV&0l5Ia6EQU+8dP_E~^S3pPgLapM>t2F6=6GXrIh)AU3p=C{-w_a+Pxlu z1eXqE%~4J)y`fwf*|AFIGI&x^;i$7h@`Ermw<^tF;YjLW$SU5vl0uo|=#{&}9_9;F zNAC9!;vxoFG^YEx>BwJGKu5l|$P32g$F0`-XWnVcgsdaiO?0N@t%J3ZY2;EpAZ14v zPL~`T+iT#0#|s)dCTB`7QH9kt)2oeMJk=W}-MSdQkfVUedPr7Wd>*xkWeQ)S=hCLt z6QOnrbo?$9DTPFMr~RX)?`oOojv`4%y&Btz;pIP^3|(c!5}B{5196cBBM0UgvxyX# z9&)yA%GYc(8Zh6{1h%;E85cBrUAI1=l3P7fTkVc#9T=pssoc4k^n{vU<(RdcM~!D; z@ftH}RZOU`LD;P0l)#&nW=J&u_1t-S&q0nF@+ER*qTYl&S-rX4cF^Xxl|)UDB9jtw zzw7Kiyg|xB%<&i6O#ahMHfeOo9U~G;h9f_n=7GyOVdx}`&ZISdP>4kwXmm5#!$Fu3 zDu6M1tM1Rmf57yR{lv$QEE~acbmKL$xo{*^`T;3?kmDQ6Alre*WoCw3ty=x83_CDA zpG`dISJISkVEoxnR>#fF4V-4Ti2X6g8=Y4+Fu!1t0RR_k*<|L8>r^%yud`mZDJ53u2*A!fR2A~X6Np=VQ_M>* zV-3t3ABt*=OFpwJmaFg06U*lJ56qPQ&<~QF+t7Jpz9m`&IVH?Hyhz^?6{qymV3hux z7y@0}>>&MJgY`=X#A!E%59AL==Om~H+IN~-)ow9G!w!q|UN!#BA&_(FRH~(UPs`!l#XwQmTi+UK4v@zb0 zGZ--qnV(C5g_QFNc6 z-{*dP!edEJV3a1MfTNLje9e@MPsx$|Rr zm7vV9`!$qhww#a84&@>qMjM3#@h#T-4L2ka3;BSEUEZ z4NfGpPI%uQg6R+LO_O4E!Tb*;tABuNZOpK0gDpP#_V!;=g?O-C51TBE@Cnx>2&tQ3uw-o&cbFzumwEmg$Y*UZ zmq|kNkho1K@-%x<0A;8;)OX|Foy}i=G;2DN58G`R!1@6zzAyT#pIj)(5@L!|;=Ze{ zM#7{oWHufmIy2J!5{3i+cz_!SKx52&X6Iq~_k&6FOG!S?sk{7!5KbyBc?cqiI#nU1 zazz@wy7XiHK75JzLtN?2Li|l-;);>>U?(JN-k{$!x*PS)uy>I5&XHNh#eAhsQ6M`2 z+^5r}pTj--T3-=S2h?qU2F}Z};63r}h@5i@^_`(=iBZ8xpSG#(f@~@23m=(672*z| zNRm036bbg&)vfMI8)$mZK z%wMm$8u0G@zd{_$<;TZ}g>XiPXLSUneWPlsf)B?lk1?^0rO{mw%4PK9F@ zEITfEFux!c-0Z1ft>&>#ht@XqdRxyAG+k&Lq`-ORyHVt}@sE!wx*_TZ+|j%(1|mk^ z+zhE)>k&QOyylE@{8Q~I_tr>Cr&C31_pwzS^BR}w-|X%a=tu{u5_hQ6<-qr(=2Ka` z?Ha_ayzZKOCQzd6Y_`=a#Re`@<(ah5-LKFq-Pq20T#cco{}Q#gu0MmUxQR4wgLLK2c_SJi*Ttj%$V6& zQ8vDX2?*sVq@GG#GnVkik+63G=Yuq5{G;6yr8=)+<-uW*QZ(?&U|h_Wq{~c^i@*N8 zDv|IfU&JH+wCqmnTy5H)Mw5@6x)GIiPIO^h@?kd}j4=oDY?fLFPL=y8(C13-^RR<0Wtu3aO2 zi8U!|;T%Akyiuw;R*2q(^4a4>Dma1xSr3(w?b;gV>`+~@c4$tkka0m`7IDA`EB`=z z&4l@+36+sSUb({%o452MF-64xXQ>?gZ&(@hl`S8>G9_cnQXs{T>e7bP zAgv)38!g|r7Se;AD()wbg_!>{Q~tk>fb*OH;X2c9armATRe2G;Xe8Xof8kR4tS_tV zIb~-?>8C$Lb9;Oz7#k_*x{LgAkYC9XMc(}$yZ)axhHOPwC*7&t-juaLM^a}Z&Ci5_ zHyzMEwAEuKz5Gc2FPiex(b50@(f{>#&0yZ^y2BQz@ghj;D^?E4f-Kxun$dLxJkJN| zd0)HP#ts6ycQWlQLq3hI+)y_`>G{pN-w-hWOI)8~*NoHLvX)y+uAW{e7(+H#@sM%; z1*yl={jcqA@oPI}{495wSMh8)OZJ6Ai>T=N&aU=~nm)RR*t?g%9|!1gPdw;*MQf8^ zn*MJzcZ>zCqFHXTQ|_YQi+}aHW8GDL6R?=mgpw#;2eFq;&X=(9{D%@Yw>WGbdDi+3 zzbsrCt6c3VKKLuvlEVwDh4w_I+0emKs~OlmT4hn%m;B&Flpq}SGy=j z_Ww}AON3Og26$#`g4FtU8f+Ko6}^r&O5u9QwJ&+y?h3;+HUp&XI9-%(7Zc+2mq*^) zfDfBOkV^#If=4b$)B;X5&C+6rwZF$|QE7S9ZEkRC{oT>KepAIkxX<^a{1>{`iyU0g zqn5=D0XYIyE2_=_;bWhs65`BC2yl4x4-R6*^+NwsOW=A~Du*oM@aA{w_k)Zy7`5+T_k(=0F=|qQ1mv{oVIR z8yEtuxq~r7%jbd_idfLY_2BaE401Hq^zesh{UxN?iTqzhCL_3l%et(f75)Qyjp~6% z>Gpj2-|absMRpG=o8p)u1S&W1q-=_58h3ap>*l*^yGR-V03DmLXaV5GO5@K z#b=)7DNgI3sB57K>#%5akuhfvteoHpvpm+7uNK1_9r{np?(aP!vv!YB7nO4GhuED6 zz=S`4LV)lnX)wCtBrk~AM$v9?QDh3t?!Vsqzcnq|M2%_z@AHMW3^-cRbo(~Mn zY*SZL#h!|RHD*VtR(M)&jM;kPyBzf3v0Q5OnAs;tfx0<^<#5_P2YAu?P@auD;ZqBb zQ+vt{)9KK>{w8zuYYGD=1l<^5IDf}?>F*`E7s{Im5UjJW#{URw zYsRT5$j0?e$?7#cbeT6L>;@j)#by1Y2U9h(<=8tUm4pDFCB4hLWfx#6H*IxaPi!zC zRN*yxL^9?Wa@VA>7-J4tHA**Ce)q6$MS9S~GskolduK7- zpV>zMtV$uK`q1`$`uLJdEd2w8Hv{(XM8QPKhu0>Mk7r}9Xj%MfTPXA^!md1d#0))23G&_mU8s!Ee0LF zGPq}>O6=X_jhMxGJf!}#n+L@-ZZIR*9to|(2s@dVi@t9mK@B~^F?@z)+M#t zgUt<2URK92x8LU2U-BFp&~Ui|L9hLVL{*gCUizI0 zbfKUdj&DoM$ne`5pm5cY7;?L2;+N5>dXl2oi+Je3iwWp&1opJxu8LH95@H$WSop_x z)#*xLgQLE=hNsY0WG%rz9F)SzX6Nt!_Gm_^3pBlp)ll?`a3E~zu>$S=CdA&hMs<&G zk$Ow8VRBX@GqGHf7H72XF!8Pu|LNIvuJl(K%+K8Eg>j?zGhsNUeAG;*_kSq0(^c<| ztF4;4-PhR;yFm#D=ITmC>HOm&S>@6dGU{k$Dt9%5lw_UnZ1`HQX}k*>(!JyJv4gXN z(J>!9vHp~F26iIj(`1F~M2acoZr^i0*4IACtW&qgWeeskpNreim5u1eAJP3PG@vo* zu`+Acmf#R8zRd!+CK)e8Lyo2JD^n7~dz}XjqZ8GVY!6lQ9smpwYV|I>U6!($svFp&eUwKtnFv*T1l1DfJBalE!slX8CgQZ0Oek^-{2emt&fp?l6PG5 z?9H7pUGkeV8po!z5W7_>LU41(Na_siy*c03A!fFGPZ?gTzsZRWvhbh0mxrbc#7?{A zBZSJv??k(uu($Y^DbQ!EO6_pf#CD&?l!)G9yK~3nO2SzPo;W8vD@{V#2Leikv~p|N z?Fy290s8d0Ok?e07&uTlRigb3;T+u;O}eW_W5*i@s-sI7XGT% z)=^}EjPAPB3_}IpUed`mRSzCJL{`9@_X#&}xy1KZWn1>5Q$yU+y2p(+;O%R-A?pIh zu3unXj=uFEcZuV1V|e-b?h!+Gt?f9`+)R%Bpn@r(LAZiW`P=%$?VwXJyWQ^snRA~9 zSwT?@C(#wj;0hbRc*tGm57GK;=YZ8AoPX~pm1!s1u$#^O$_)<`h_eri}}R`GBn0KKrd!LYyU@Mxd2Wq|uKD)K%_QO{$p zEiDKZs|p?u!+mCgx?Dd6K&uYdGwb^|{b>W=8ZCVeP~?_L#_ibWQ|vW{?re18V4)hE zE*!Qktm8j8m`(WSmqih*Tx?qeF zFMLR3(6KU^@JmloMk4MJZ{!~&bjS(+P2k}Uk@^ErkbWU}2LRn<^Kz;H{f57MLDFpoQLj`(Qi}2hN15h$j}luQOd@ z^g-ifI{4|_)X5;IL^$<&2YrjLfN1e8jxxO*!Jhuu$fwZu0nRZW$QL^6*wt?z(VqHd z%C2-1=J@5>OwoJ5gLQ2sFLw!JKk{-$wk6qo7}-?>lfHk}*|rBdbwO!1Bvolv17@@i zYj1dRa^H(6fXeW<)|X?b#7TgjX9ex69q#3s$6^n%CJ4kgMN=I1fpz^=?Aq}q`*zG_ zXEM$bA?r%#y~06g{_6qHKP$Sl6p77J$!vHA!e$IfzR%l`EtS@cUwwxC2Upxqq#aVO zoxYU59*AKUp3Rn-#vj{s(=xM{+q=}5m_5d~$q9t(EEE_6$#D~VH4U!8eSB~Hn1~as zbpdLB&GX)+R#_#Z3c9+azKi=qHi$)V7q+JKpr)R%Oh< zRjtO$P+@yJMU;9*i}sAWTJ*zxmU*&5+9eJChkIbyhBByK4bP=b159be?06kxg3aBX z$gznJjaKje4RN_qkm=Zt{Oi8{=~dQeKZyquo;JX@Ds9!TOHTf`KV`FL-u;XGNfqz< zoIS{vF!<(hu9Q^oJgDr@<-Teg!g1^gIA*_s+jz5xTEgu!l%kVvPq-1S{5LH;U$HWL zrs*D+*4dKK)gEQIoBSt{>7Z--YgXNVB9iI23h=IUb&jYy0G|{@WGlFMBkRZG1_LBA zbo|1DNctDrPm> zS?$HV!zI-q;VC?z%z<7++dc@poMBmB5_W#m8lly`1+xNqjR>6VEOS+Gd+~@BpRJgk zCu8iHM<+{`Yj}B#fK3Tm$2FwtW+?#V@W5~@L(J2Pq?Mgn!Oa3Zh$A_&K;D7^xhTazd5Xo zi2FDSqmvl>Akq8=7tDxiIQ9Z%SsW4&REu^LAxe$^jZr*A7THe!X3Yc~wy3&!#uxsh zJe;=DC$Wsm6yG~JzH^zz7Io=_Dxb9@cp)XP2+X|jvd5qmmbSE~2V&$Qtm##u*?)+Z zL^>)`Q}H7@74zE6SvdN`mXn`){vN9DCoC6yyAs+ZVs?Li?2{}{L&tB#-sj==2%nbN z7NV(v*0}r0Eg(2_bdDc~=&LfA;Czv&z4UT9u|KqTCZ(d$T)X6^R~;jMlB8$~%<1Mb z-<*alJCs!vHIb50haQ&<_+>m+lQWH&Lp&2e*zKG4}X z;nv;r-bTftEEzmmM~%hNX+X)uWLvmbnnssg?8W2u$HG)O6mY&utZmD~c%LnTul+Q_ zz=JtoY>a5nE8e3RZmW>cKg?|4qBW^g1>2M}h|n|7d9`PkZ#Tnt2Q8OEgKUkmK^m{xb1MSnCaw|Z%Q zr&6IOaDPNt=!t@Skf&##T|D)3f?2sqCWuhW9fZf|+sEdu;c1`P2)C<_nmW0AHYT)%ZNk=(0RK!6wU&Yh> zljWzY`1n5g9y0Y|;Fp_V;6A_FSiZFz)tphJPmYf~dZgR!wjkvLX(*Rl{)gPQTFsn2 zzF*h{U%H0JRNLaLE^OaFi$NwxE1KO%P7TgWWa#yd$YFJzjeg+`yciZo!fx&GN!48o zii#ngwYH$6jTKZbeOx&Tm~#c%-I8+Ty-T@D23GIwAa05RUd+cXwFNtXRHvdHYpU~& z#BGwEBf0j~1Yi10*2KNMzK_L;N72kU)AfTQm{Qx~QO58jSW!NQn?iF;t!TS_0YO~C zL(cPfdxal1tP<82@}ZA>%WA!|*JOG_VbBat7_=)_<#Zu<+yU*6@d%TC>uPp}UzT_} z+#x&G$ymNc(?sy8G+-N-IJ~k->-8WP^+RmK%>P2{%1kCpgu-Ru-v!SflF0`l zyK!2M`MM#~cl;bK7_esnJ*TQsT8QtgIeFl|*9C@fZ7f#DSipN2Wv(UUm|39OAV$Kf z#dzn8zt0<#9rN0P_Dp(dmQwgEY$kDDcE6mc6zSwLt?qVISvzU#=QVRJO3%Y+us&)t zOE@kw-(#8wm1o2VIuBHjdw|ExC@QuFnIA?^9u(XQ*u7mz{F`{Lt{4b?Udd#(Ose<0 zy7eGvraMOLN(>2+_*cj0Sm%OB#bpz&JbitHg|bA3nzma|p*{4t5_E!MmRABZWv^X* zHDUGD(f&+}{=~~rkMb7D%9odHU9B0SZBwew&1&NO^qp+sK*1CHaqLfcrhY91QIh+^*K^A$0AD8YW7v|q9)2hWjzfFLpI2kZid)XTci zrosYtlq<05#iBd4p=EOm$Ewk~4F%u(p3<5f?#&}U14z3MbNh-HQS41_cn8%9$53}{ zn)C0>EHiez+M)m8wo!UBEv7Hr;HAs(QLz>%dxdjsvsLpKi#%8u(nEp9on$(tx^0>7xq8uu zKYZJ4@S&<=CLawf;}DAZ_KJs7OO%nJUMP9m@&_Qq=dp1a{=9T@A z^?Ty{+T$2j18=gG(Vv=jc4`J&CpE4pdWr=8%h38cFU}V>#UEQL|D=CoCW=dzyly#ZaB}BB&r z{kQ1`m6rV#Wxu?aEcA1FSUNisB(>Ih^$p>jUYq?rYa(zr&-H|)+H+A0Ys{qSJ`^XQ zXY)1WVNJ@w0C|!SyGd_f~m*eXY>ftBs|fJMWd+o?~ImtKZLcXYvk%r8asB7cs!EJ zV6Q{fl%80oPc3zGs=srH8fTOn%-nSyIb6OwL^?7RqP9>jG_)+*>WlKWobLEC7@*aj z2frXhjUzhI`IrM5C=^gWdaAt`G(YZaPH@NkmCg7ilSIgPYN)r?sd7g#DwEVfW#*3Y z+4TO3?{DYeiQ8{ozhl{kERG4IhxKVn*mi9vvPMoP8R*yE`)m|pxDR$O+lI9LCp*%S zJqT5!k7Nk<7@8lQzB%LHy&4OmssB)xb7d@@cpYyl=0PWRqEQ77(XSJI|H-iVtWwtP zlvMj?mw`q)f8#wf6k)de#Vvs5WrY-X7Y$AsPz!!Iu})cz^3K@jsPXLqs2ZO?dgt<# zu&dk9PMptaJJX4YCB@@QZ`*tvTijYRwrcnNCb5*02zFGHJFtx;-U8=GGxNI*I~d@4 zqibFd@8{VI4yvzsKL!lc#DY>eg?D`eCI$#ClZM8!h-X=&M+X|=W&fd!7C7I;(x^R^ z|F-%?)4~v!-NP}_Ic{xZ8JWEeb0&}imE%iFzEO!x|(C1!&$ zJwT6*^{}UxP7S6Tq4^AH1iW+i+xVQeAuXPlrcOmb)H4NbPHsr1^d4yx)R9&0sFg}( zsJBA7Sf|yY92!47;nXLdQx-NK6D#SUfon3J=ddKJSWNJs)pqY59r2FxB+SO;adM5H z(R<;Q#|Jm?XkTJA3_9bCghMD7xzoz2|Q>ku+}e!h(|mXD|A4~Z*yHP48o;SJ#9ua29K&ie+zlOjICr=dxbaUf7`vf$-O6#wA88cOF+#P%Q1D5MBgb&MvX`1F7O|>kjid@f{z2UE2M3dUQ4H; z1(a^|#lTZIH=*J5kIDf>4g8y(4`IHK1U8(jvSXQGF9R+F*ceu?X9|g_Q*Pyq42;fQ zWVIN#KJ3L_O%UjNhGpBLH*Q1wI(VBx#e)e$A2Bl)yOaEvTLYT>q+m8u8j7qPxWvv1 zHo21mcfCNtN0DuII%Xr#?G(ImuKAW)HgG3=6W8S#M3a5`}dU^SG7yb3oL+6oM#!A7RdFrU1i(D{{UuE zWm-MsK+^XWen>_p+k&2sIEJ&`IN|xjSnr1o+*#JR?N+~tuD0Xd#r&nf6(Pa`bF5zf z0C{%TP0;c5odg(nP^b(o9bWMavS4w4DN|*-I(V2Rs_eDqP{MrOi5(sKP46%J{Xyp4 zVg_roZvG-rIj)>az8f#;5@5aosZBN~)Beulc7YcBHIGwoNm>eE*S*Y;G{nyV+1?GH z-plO@j&!<|Rm!YpQG;8ZMA6&!oZgD9@6h7WXIonL)SpmF`hQ&?~S!P3WhK z$8v2Fxj*E{ed!&-%vr?6uXmO`mrj&%snyqi#KXj_S-3H{8LlH|yjxQ-cab=$zet(c zj`E3%7~V5YOe=90sb;4C0N^9#UqlH^3hwuR)TQ|AMoC9*W6qN90fWQ18c}kV9oMgD zx|=u(_qeOL6SOpzYQ3WwS95ilaBsvP{CYJ=(P;DReseT7YJj-YtBlPXEodGd{{YmqCKxK_y}U#kUMO%c zjKd3E#!GA6mK~sNmFRS`0^t*S)0d*zYIJSp>{lN+>arph$3b;|CL{zDqPcHv{{YB< zHHM7;0NJIoIQ667af1u4{bn5$#RWOb)=|@aUZ7wBfUoDNRKeqF@!}i=j70^?{-%F; zA-=vJl(E#~r{&wC7@UP(8u*Tuzh-4EI8ssba~w4aW%KFxoshM#XYWwKyJ1rKeo;#+ zipGxcP)1w|^BHa}tnPG0bS9R(Dz*Mjk{%^J&R+S5$}O$6k0z#(u%o}T8(zEj{b9o^ zBdKfBWpSt)Ei^=qj(f|=!SVlG2CuKq4eeL; zFp+W28HjW(zm zFcBb9zA7p%Sno44FucX6fznvW$n=5M?mZ!7K6$<%a_2fe&=XuVIF+q+9e=5IvSb<@ zGnE@{)8FS6)}ZO0zbR*UyTH>7RO$2o0A)B^mZmoDSORqUMY&j7bkw1~GVs$adUC(a zEU7sjWy^fwEu(n2dOJ#Nxt_N-FR5Sb0e>;KcZrbG#wQU+8J8JA(>1M_mp$0n%Y>y5@-0u}{Lf;<+7DsSXsqAiwDR%pJgNLApyA01+#0F`^n;8MBI zt3=Z$We)N}g0J{9q3w0W69@~Y%(oi}v1+?7;Z{p#uLxLNH4FjhvtwC&s zi*xP7c@Dw*7d@q2x?4BR=Tj!?_bBJF3nJ`Ztvd9DOxxPKl89Ol#LwNz29zgbr}mYR z=>G_w`8`x7GNz@nvY zhUOO zN+b(Q9p``)8vg*lDDAg^rc~w2EoEFcGZ^8wHScvA)wBm*wb5r1nR=$TfN}sEwnv3v z)pCGyr0T%L0gPZ1h-*H~zKeD1-YBt~G@k7r8{yCS6)Kj_Zl_PI;8#?JJ`lAPb&8~% z>KX}acm2lPTSF^+%h^SmQEno%YE{>7^p=(5bfOt%Wn)2p#9fAuq^A{p8~2?i(_2JH zAxu0WSk_Gy6w#yi?-0h>y)xT0abLX0&|nFd!Zq3}D#+bszCM!?7E?ijv_@S=)4Zf} zkynTmYWooKJWW&mW-Ovmg0PO(pEczC&lUOf%?*Pl&C7HWZ^()up|$Vzm)R=q2T|wb zl^Q5448Yd_lv)AQc zg9gp?RhWsKv&X;UCW62Px^JH|8ao<&rZ{lb&PNIMi9;_GtMrW;;gdYo%A#1N}2)C*ia zrKZVWi2O&RvpG+>`!dciIJxJp3c17+eNU9DjU4V-XQwi*UAjzI8XJCw|>m;pf(#~_l5Dl7eZzC&{Xg^(MX!M)@M00!UJi_SS zq$?+39i%@GtMmB^JjWVWI8`M(&%%VrG^B!18p0TGkHG zcvxxk5iHZ53eo#mhzR9%OFG~QLC24tIk3>`fUG=@=Y&0CXIu7#S z&ka1+&JC<8>U5S{6O0$1`vD!IH$chI?Hi*`0AziM_%U6b6=`XI{x*(f%%6)z&$^#bQn9Gh}WRh!s zBljri)Bb;uFsF27R;-_Rv`cHPD;N9B;fDCHXzJd9-ZA0i=!|GQc*_XuJ~qlH8w#_V z@RT~(!dLJ2FP`%}%-1Vlx3r|8mu)=2W31Gr1K+<%MiTzfTLPQzzqr9Kww)(6janx# z2kO+c80ry}6(gq~64ZftVo6A2zoIgoLz-eIbNd1$D~7FIHXm_O_u!gm4V>km(o1{}?BZxc7v ztxKc@dOuuT)2;2yyUs0am5d-$p*z*dyU7)Upi#dOYZZ$64gHCv)o$<~-_8XQ&{)aW z%&y@jIu`zMHC3f~ddnylwH^AvdDXu&Xrl7W^4Qbu9g$lT?a+D##BBt=gBN0@3t|J|r;g{gP z3?jpfzhk5#ii}u%=1wIxF~eJzbh&y)CaYlIda?GDc2#aseBs_x4BIjF!d$uCEMg2m zkYSsvd0#!E5CC8acq^RQ_zn6SCe!x&F|&g-VJOqUKs#IUDRqyu>D;WfxLRM-aJ)uM zI=W18-X+6vT#eRZ*?4M+j}q>i7EO!q42{~o)xX@-HoCRpG|e%HBpYm6cuYzGZFA-g ztm}$Zkn7Jsd5V~}8DpDd`bW5-;tT~)VY%b@@hFlLEqcn2&xjb^!&{5->~Mt3+V)sIS z>fPHl(kyLyo{zvy{5(Zj;5F78Iooil)W)1H`Xm2LzlWKJO;i0061eC4#$a0xKcwe^w%#RU5ozWsD1^gVtb9C7@ zpS#*-Y-GW346C*#6^rdDq{7>oJi+9HuQ4AEO1Zq{q2}x5IIg+S-*z-->wRHIAu{s= zpdDCXEjl?y4wz1`#JQ(hsQ8UwVgV1K$L2lBS(nS2#7b07bDNE=1m;-H!LF#wjwd2~ zwLXLqY=+1b3mafr;6VlTpM;0Vir!bzD;Av{xzqB8PhyU0ZRM>zutA#us^3422AghEcjwxaffSU8F#-9X<|!g053WgHq`G&OKj73sTB0`?^YV7gmOdv87*0hJB*I zzE702MbhpHyyWu|I$_uP%nM#Z6!?_^%NH=qNVQ%|q#V;r-Ia(jXz$gEzWu*{F&(W0 zR$4aOZ_$%YT`CJsH+N&^9sClM+O(AZ8fwbIi(yyGq#LoFw6WNT`rDdSMNEmZiG1N~ZU+cGu>p{08RU zgK7Icq78IoM6&naR9;EwOD$C$q0qgN!_SDK4TcysrPf}4u=0VU;j`#Q4jqH3R=}nu zZ@e0|TZu__`oPx{l}tK?qW73o2y^Qz2DuQaf3!~LSrv1FslgCUpBj$BZPo5}qaM(8 z8#cjJ9Lsd!<;1p1vKqq^Xmy%ykXRYF@8VYw78P%GmCR5Ag}B#_@!?dT^uiY?()waM z$ND?|=3ZQ51#p{>4)@OA4ungA><=B|w)EMb#O;g+KGz+wc`?!#Kh)m0GGhIwb$#PW zuxaTv2W9AmC^yw({^Dgbjtv4LEmQ|OyZ!4ffQ8RX{=Ng#kB^T>;CfAx(T3Lt&TNz8 zP_}eMorMv&#(9Zvd@(OeaSDNgoG12$J(>hB=3&h3(`?LyU1ttputqxSZhJI7ap@__ z#c9oADfI<)mj=ZiP+C;AQDt8cxm4N16j=$}xiAr>Una%v>C~xJLbBS$^8V^o11%BC z`mr)>VKH5yONcnx1-7bxQnfL<3wnK}4I*^yDiAE=AJj|Y9pzS7?J&TdaZ%ab+zwHS zxsdM}g^+=F4f>mgpen-5zYZ!bTjT38ziFq!=4~5IP+#uTDnf-d%vs|T>Ud`IfbV?N z2)cIS2j`yXEy=9UZ0_iPWDO%_xEE*laICG49y1(NyKd863W!CG?cQW6rc_;C4N5?8 znrV#4Hf5Azw~5I8b&p8CUy?!Mvo2ayDymqTZv@4`oWWVmL(sB->lZ&-m!qGjqJV*B zpw)H5qF-UGxU1}aqR(Y%&Fk7>x)oP7%}Y2q`Ho%)uID&x!ZnVO8`v|U{>j3aEMsvn z43*|QSD(C!gPir58FayZ^MJSp9yL+OscgP|2&26DF^r60;@_Vzvw7(2ce$rA46t8s z4vzIag|i4z2cpot_271k8DlK*7h99_x)jRv@h0qnoH# z+FSzkVF<_c?iC$YP^Ct8uyrql1IH5OTX327jtcK8SJE(f1tb^b7s}$(v3slG9|q#P z#aHhUOcpWHU>iIcnJ72M4`Wib5IA6Kqb%bby3W#v6!S1Ohi=F=C!?sCc8JoCb_il# zwTGEw%MLQv)?LzzuJctvh%}j}T$8&Q^tihaMC6>yo=~N`YTFl*;qCA{z)!3J6kUPE z=4_j$sYeSxY1M*ips;LKFYO*Er-Ktl7ijIgdfeN*wK+PYs#C-(Azx|BBSX`3Y z`)e}BwRhNJ$z>GiBzlN_I`o`H+8N}t>oYmJ)K`+myhOla=AijC7gRe ztIHev%oTa7r&-j&eJ(Kt&&f1f1HL8!RAvw^RdJUxzVx^Qaa%D(^@lSMc(3h#zIQ6L z;MBJjWnDU}-*|^PN(cC60Hzw~b-BRaud<@bUR|W&5SeskCyjPl0ar%$<__(dhnMa} zQ7nVI55#PMZA<#I6+n6a0Lgci*lhH~UNKx3ZR@GV1>T%J}}~)X4Tfz9yP4 zsi<-cuKPcJ(MzgZd#^RP)ZA9E{ZQSwR(!^oAZC51zQ@GI#q;OMNwV<05F)_j%A$v zqqqFJL9F;-u7mVoZAQE5GLooKG5n`#iKKVT`l-YNEaKoT7vdEGmmboiQPLLhcM-@s zH7hFXutSR4F?cfa5MIkt$@HQZB$p;g~St2z-|YK5~c`bQB#4PMG#i!WFn7 z99%sM{7c(?p($gurOh5?P0)2}7f`F`iIVJEZj26n9;F zz%_nG9F4j%qggpP?uZO?T8XtOk5y4|Im<}+Oz^LH(CaO8-@H^>CT759K~pEzZYOi9 zajU=S8=NnsUAH`$WH34NF5{@D3bXk_2T_^88gn%rqcsZN_;~w77KvO8OX6gSMml4r zWhG&pfzNg}E_16_o{*{-bj2_(02JAeq(V8h_qUzPrk);yp*;Yqa&Tsfcs4bcv z18`dvnlJco*_mHSYnnPH~gBh!=ex4fgFVD${g)qwk9;Eh&Mcte_V3+^}w*Y?;bhyj}Cj z0A=-M$I<}khF;!)g|xACH5_@=K!<~v;}G6jVbSJ@?f&7;m$%IL1A}(`nTu$m*ULXS zeHxx`+6Cg`tTK<$kNk}RbvDqd z^UMZ?yLne{_Z;nfjBGb%?;C|!a(Zl?3!^*NYT`c?bbRH*YOBOFG&8!?EtIaW9$#5} z(Ye5HzY_&(;}p#*seX8wGUA^#7VejO+W=z^wxf(3EySwr-#eK@O-Cesn3zGa%xoLT z9z0A9IMy)lbr8ddu~{DGV{Vf+HZ`j00=XOR^q(Z$wKdbd}?mr|rP& zE|zM)dfZ=A9YFSf*=S3O#g~tl_b_y%9lzwTvcXdc){bSm7Y>kd@75GYGVqHrok6s$ zutFT;sVqZ=tQV}&C?G6eZR_c9KsMlqLI%6O5M2w-BR50J*_e=GJ>d@t7+KZ%nRuH{ zG(<$?`4I{#^Ys4!lb?jfw-B;hM_4c;GmV@~0;@kbvEkMf0CY7l>vy_}z7B3!&^Ce4 zShbn?{{Y!=R^>)F5Ohh(<9mp7pa^rs%Il6Lfq1pPAj#9j-HWlZ>LtC+GOX5RH+o~d z;|$D}B}^nM)oIMd3_=xZ&gxyk8PLb(8L+M8g1SVL09Q2t2+-iI%sDfnJ{k7XE{lXr zEHu&n#bs-t+bF|D2WjpqFm;I%!FDB3l%F`%Ji~BKaJqKKL zj4Ojs5#5kE+n~hgqs<05g3JNYR!l=&bempcVcN_}+|s-0P-$(pB`vxxto!0Kb2P7q z9i}%0gkAEyp-QdFza$J^AJkXj&s;)Uerx)VBVsk*7soQUTg!>I3K^zTT2iKAZIc~U zTe3G}v>uG~mL--?Dq)9i&L&!OFOfX?kDM}~>m15+zGLj2HSyXaOXz;da93y)VH_`zGT{;>oR3+ zx??jeHLPs*_<3YL(+NSyXp;j8kZA!;-#7h1JO=P&h!`1tjKo>aHIHbTZdQE%0FkB4 ztGJpA~yKoh6~;76%_XDN_TzcG5+Ic=$9+hK&W1>I-faK1DtMt z3>#-OKr`c-ilkI@<{!{(wER`VT?w4s3>`KDIIx5`rg078!Q z6N<*8>D?&0Ohv{f(PiJaS-IRUrU;D+WhG8^eNOobN>MGCoiu)|}Zf%2!L~S$4PE5*D=jnOs-AaD(2JQF~4>pgr8i zfj5f&;Ju=!SUPm^GZ-_@;~prpOt*;8Ku(9+Cfd$taC_X@z}(J%bVL1}yAiI{ORj#g zV=frDQybj5=rh`&sw+S8_r^YYJfH z%p2^1lk%K`*k4Y6S#Ve>(gs_`a2RF@S=zA;w%xZxqPi6bYzs9G{TW>U0AH2@DP703 zq-DUPf(Z&LJU)5cco!2lZJzPXJKlMhCsVZ09Z92kJ8E^A`NX^-e4_HZ?+%CLOQ+Ng zVq>O-w}W~Hc=1pv)qg6o|LF?fd{yqgN@EN&aM{00eTHNSKB`2`$MIUZ<$_e z;!weN-c|M*iM_ek6Pfdvr+-9lBNd%`!&C%WdLkB-Ifg|A-(Q?XpE7mXr#@o51#MeB z-jc+DW-Yq*;-*luczJ8-5iA_CT*}i8^!#NG@9f;pY^H}hHxvSe+I{i+QzEct{)|Eb zqelSoaj3hcQ+_`+Fc?(K7U=ewUW48kmQwk7mtR_ zw@CEnR=`+nvk(d##n0vd5nGz2{-Zx7TDSKTE}&#}%(gA40`&+jjV%Qh_9>{}Qu?A0 zvrjF|*lR~qi1z;gEcW)6=MyV*my%WEYp9LK25W&(xwHy1|#08wVB?P+k>uf*X|EZ0t-G6NR+vmd>tF zG!r{^HBeC9m3q`%fGw>>-LhSsJ7WTsomPIZnzEdl{6`K0#wSDcE^nR-g|qWr+|?|% zsVba2)GM5XS(J&Zl)l>?HT^}FRp6+?inlQ=DZPx@8DzPD3_UT=I5BsQp$UN{b4m@W2q{3DZ`~|4nfxrTCdE`-1&<9N0Zcn&VX(U zCWtOnYmZhtK<@MV{!F_=?$&9wH5nadORCr$Zg)IyYj@ zOc)Tr5WMpDmyPK_bn0Ao4iv7AO*_4yDLo7B-@I{`E!&8OjJi|5tmmfgBeiS}u59(~ zH_O@<@nx&}`Y^*~)Yzj%Vz`(>w@Xc%uA74v-F0!8_&C<1X@Z{e@0^bCZFTjQt;|V7 z^?%IANz`s=Q2k-r*hZJH#9WnzhiQUUUA&!O4yhAX7lDrw<*Ub7s0+`Fl`WRdim|ZFB|;sWaJ^d1O$hV=jdJd>6gy(ob9vMSf>~1s=*po`0O97? z{GnZo2lqG$(ELAl*`Y%=4D)TmSQT5 zW^;b3Slyb$%FWv@qWT5G;?0IlA8BoDo#pnpUe6o%_ue&b*0<(f&T>+HBV<|L<|~(3 zfoii7{{VakwL4rcml8apTdPsfaSu8xxlsj2J!Y^Opdo~))S~R!kGu{vycgxnR@m9(PLqn?hl?q8jO1^37R9AVF(cr<_HthR*PU4!SCj;C+H8{hkUegMW zTt#NP%>1aY-eV0>uEJ?PNr0@*`j|_Map*AFVP^Lj6oHy(b&H-*aYv77^&YOzf5gpl zU$-CBBLeH!5JPQczX5CXRMPoBYG<=O5=o z8pG8QU|!mPLL?n`ap?yv6;R6c6V6TMayaL#ZuE7$)kFzwcbjFVf60F%gwa*@l<{6= z9D(wd?i?!>dj4P}cK{y3P*9E=M#dlOj=8I5-PzHz1KSUCM^ zem0m6*A;^dF}H_4^CgeLzrO^?mz(6H{f(h~;+ZpyG_xfx`{ntQMthd~O8})f{6^_V z6U1}sx!qsiQw?n+^B*&hQ^&Hjfx~t9_hKKQyCH|4X`A{hq@d8ZlpL`f!n*i_97lBSMsoHJ`Mlb^NP{7Ohd% z5*u9uwzgs?IbRayuwCPrud}4La=E6?*p@X8SBbJEI@Pfa-?7*A7N{ihAYL91GezwM z3$?lWuN$2kRJ&xnSD9sODAMiX1?)5^5^tvAMw(>IbyuhRg%e?$8quDyxjAC`N=pvP z`{K3Vbno#ky`6BVs+{#|y2aR6SDwTLDiw76=KP;IbVGB6BH^IAir@(K1@xwYgzsA04z_>iPk-?pzN*OC{wQT-3|1|7(EW0 z6`sY#bW)e2!JJ`c{)1fV-e1?Oy;RIIZ(i?cAt+YfU4y*Zgm4ec;^N%oT6X)qW;rdS zGM}$5BE$orG?gAW{{V6_55Nn%2X=9Km$BQ-8UZg@KIc%ugQrGv;3> zNO8vk_U z7I@-N)h>7)R{YE}b3$8Q{{Sq*3s#_~F8oSTsOZ_QJ)?G?3rpWqJl<@L4pzP7=k18+=o27V$_5$kN>pyyUvBrmtSJBx4R+IO*UTx7 zx@!771Sig4oxaoSz<%9nyy7%k;nI1nOyJ^@yXcv~)vR^fnUc`kNBvHA)HU^BQjQu6 z{{UWKC}MSse?}r{$nW(2;IVDD6_E2TZI92dSb+XMwFP*nt9tHYyO-Q<10F4|oi4+{i$|?y<~uuk!YxMTQN?Xg={%mbCCTXa9S z+6z{D=AtIdm^1G#FPwkavZ9pVJ>^!oV?ARKDvrIQpep(oQL>$v51-;ZRhd=OEpv&l zINqO#F!InF>3zR2P|`Ho59#?-H5IF(UOvzmE{-|R+B(NJ7FkW0#-M`N?wT2!)GEg? z7PT>NGp4)!zOt{)MlF;uv2j>Kb3^A5DKkF)g-4e0jllOs+zq`C6H3k7*NWr(l!?kW z)q43wZ>szo`cChz_3!9HYdZ&>{{T=K4_Ew@8XLgDQL}U$&9bHHOe2iW?)=068nbOm zoK<_P-fa#{@3;M!oqQ7BNiSYsAKX+l1_f+i6p5k z=>#yfU|Ydi>jm;Lz4JDnLHxbtiKgd|xfC)Q(-OHWkk~@K+rq2fW?@ewI-Ii^&V6s9 zBw>|=Gp$B4)CA7G;lUmhJ_DpV!U2tb`Huhw zGIH|#ssm0MjH$(U%Pc}eg{ywPlQ7zzXU;syc*g#dQ02WoQKB9XKeaPP1Fzu=P@$G)G=#m49!!Aomxde#H67g8xhgR}upA&z1%HOn~ZE*3+gU)PDf@GaGQ zwJb%{bVkm5R^z0 z7&V)1t#c~2@2O_D>(&cwUM+wwkj3!l=LQf@5rnZo=;BgyF2R;;YqIlGb*r%CI=~l8 zJjWPT9!=D(SUD_3fV<8i795Xk+z&$CAHC&_h8k?SYpd&7W#1!w+{gynRdX)(K8$PD z9k&44X0wT(7FKy4(D!v{&2n^A{UDPjOUtGguqbtO@z;oos-csge#ww(^Z6Zy{iOrQ zd>ZzNyP>vS&uK(J7yMK4f##)Oxsq?`sUP(EI9U;U4F1;g>`)qI~UY*`a<2> zC0DeevAgaapm;50F)09xF}31sg&9&?2p27CBpY7xgJ5aJp7OrM$-kisOO?6}P9vb0 zsBo#d4Z2EoCeD5Vq^6q>Fj>;t2h7Syn-od8bX>?yL+?z#7mnw}K6 zceuMAt_6joXhrpljjS$@+wvHiFF0@6WtQTds8mZzJN>V zU%n;09r+{OFD^oUVyYM5kD;9uRxSBv)(bVo<#Q(t%?=Fm6GVvGMbLu%_jvx!XPy<|Je)85qs_OUs`9&RK>lYQTRrPCn zre%l9a>1~KRhVOg`?g@ZYa(CmOs)Z?N7f2jEswMp<`X+no)VbVHGSpUd6ga#>6*QJ zLN8~y)GTPGgKcBlJDQB15~2heRm*8&^eUxRQ&q9Y_Jma3r>w%$F;-SJxRuCX64o=X zX_A}U#J1yAZ0Q-M9{ot;l`(=*i*Jp-b0`&j_=1*nF17TAn-t5-={a#Q<*9W}@~8co zRLb`%xakVcNTy(vgl4(@{{SJABW2$Hr6!`7`%}NnxpAY_#!nwvii$gKa$q+%eUk&( zaWa8W&@%prAPgZ&HD7PMGc`&_)DLaLNG=^;Mf~+Irz$#KuHQejB_b$aO5D46ixp0D z(~mbTaS$AmU$9G$I*jZ&@76U{SnA^X8T(4m9SpwGnoy_fFQv(1-mb%vB_OK12Ufps zR@hPZPuzN&jt{IViz)@2MCFt#0!6Ckv3**Z`-e1Sl&Ddszj;OCmCflmaAM2naA64B zv_UovG~#4lk(x&usk(CtkfG1Cvt%A6j)S2ASA1biJkTog_?7ozdFeJas>{kQqb3DG z?FYXDQ*I5>M5bFN*`!yx;e#B3i05j0Bn25fqR2$Q>`Vnq-0U1>L@jmNABc!r6z9P( zBltkF?dTQr7I@bxeZ{`gkb{aVJ|n-iWy%vXr;&`79Ueb>M9Dp>5y?+5YB8!hoO@3# zXSaFXx(!?M?FuDyJK&DW=qeMH7l+`C2W>4!Y;5o7Zlw0~mC79^jrygL?Arxm7p_$v zfYO_>n|ng5TW(X)j^JVmX}>v2#Lj?O{{RT7!iRS>2AQC>eWuGJ4i&^ojsPE+q+8%e z-s4FH@;_hu0J=Kh4lC&|d*@mCy5*4_K}$N;)*+zceM^F*mE^&v#Xev1ZQMWGCkwk254|SrgjepPp{5m zS43AB&k?3IY~GW>2ROX)(K&jp7S-l9hzF32$+oL_sAdDKYws{}`NmYavjV_`JM#>* zD;x+J+H(H@B4!;TfoZfBhE%JKiuv!_Ulv0(4``M<{-vM*`5gPe$4KN%lpS}L6FH3v zpvCbw1xy#Ur|6j39a@i-*VPc7U??HhK%jGX)4=f%30Po5+2G8H8*; zPts=8uSR%09pX~}K63m+jy`aJH1y7Ye-KPIh7Fbuq+84QsFJ zV7$=cA8>2LK(*pF8G_qu%g5dgvjkih&ySpRBWiw=piY<^hpomq16awP^E7bAUAW}c zL1xy^yrIa>;I}yyh@dQF;8a*M2K3k3bj{72_mtw^)j(cwua&Qu(nC`I^-Z zxXT8(U2L5ueN?8H=b`0`bprt1{c)~7bBSWvtLwxN^;0XCaHb1J7XwTbmLGBRfs{FR zC9xKd0mqhLMy#65t0uKu>}DIus-m^e?jpJldA;~0qa&_4oewI$@HU(R_!-aj94PgF zyv8gR=zrAGb5QM-^!*~@$~pbSamy%5dHaZpu&sIBA8U?~G63T`s7Y%SEnZi}#aU4~ zwUzC-^=eI_tmAF>fQ{VLYcKGZHBYYo3rQW4EE>$*6HY;oujdmcf-Ou#KnUqig4DaacO{{SJ~8HHCoeB~Ks zyzj-c2wLS)#^bvu56&P#OYW(k7GS;}dPJcwfqzt}72Fn}J?$;Pw@G$nZz>6+spbg+ zR*=JgKM*p3TV?y+a#I~Qd)qHU`vCgEXcOv2*gJ7P>1R3Peo?OWG!f`B9g9@o-S^>Mitj&S<+^W)@wA`S0aacj%9;wIP zcd3y_&TIv*OK{6|a&J1+peda!{KwZwdgsJ?HXQFgV^k@+s=WF6$}MemA1Frcp@6sN z1jernMv4{Y#j^x$ImzB6bZV%qX(f6}ys16HbM&nXmdc1WUF!4J0?ICsId9CoO`t}D zr|x{_6JE`R`%27@Lm2B5C|99rp3xa2n%-tWT9r6)s8iqoV4hK%)v z(-S5E*148|eK6z1uQqtleClDf131^t0m@rCN(-tS-h150+-DC&3DlgI6{)~`HO7ZQ=#YU2ZD3QI_a5W@56tk zO1DOvrD@mo03benlkF*!luKVKnRI#%PV(5BmRH~Vj;j;Ybglcs?&lmo2bkdTtsfbN z9UCI_4NEuS26yip`x6C~$;a9`0ymYPls}cEdFBizc1uHe^;q|oo0K;BeBgQGz7wka zZxHcY6wN9(>NH^aRHo3sc?)8URJUw#rTwW|s?n_Z%m&HUDuG?#ZEMHr`|mZKC1bl% z%7>Vir1G5r;&xxw3tF6iXj-g^(GA7pte|^}beCLEA!CBG!|OEwy=a?h?4A3EQVp$) za{#?68VC<1#MxJs=Q77uCd)JF`*C8+Jk-5n*w&aCXbsz#ajjENPcg+u3)5fJ6Ugb^ zxpjzM4aK>CINhvxC|w+7 zV}UVCQ5s+o-KwV1n#VtxP+_KOm8P1xtK4eMp{)Drpx)rgeX|jfVibR*>;-mS_#mU$ zWUhL?VRELsYKta`Vq|ij{{Xi#g)zHnXAS!T`sd6xLgu(@rxMhtDdt`-ikOyASnu>9 znyjkJO=bmDoOaAr!y^gtlaJ~WXcK4e_l;8^YdC9s{{WJoR`Xl&F%A@0e-JKJX|85c zj3XB9toM}O=?^y@Z&g>x^OR$lhdrWJm4N6n{a`3YmQ!t8i8?ZMrenA$Y4KNoNy}Ld z1Iq>xWJdJU?d=h05WvA#eceW|Z370f`KC2>iGxMonSaW|AY-a%;C8uI;Q-=^#ujS~ zL>7)1?rJ?)y6817DO@PdJNSq#kz%~S%#JUI+Ea_AWVq8;$UHaSk}0KDE^F&4)d1yQ z+(OY{t(^7<^8{S2GX4JL&f*oB&-bZjmbfDx-A>YmECjSS)V@B_#h3zEwySuz>_Vbo zvri#Ul)`XUE=B2esbr?M8*S#gRC0GBu}ZJr(aX$O^jV$>N&?}f9dptq52Z#%1?Oz& z+10D)IzWmGi-X<{btz%Ysxw`;%*osqRZgRi(iS4S7z)F#^T59-Ox}g;d}VmU)4VEpF@HE-SvI5S@pk!+Rs347DhB zHekNrCK1R(1yN-m61sF2i}aK&EGEqK%;071!c+Cg8Qh}LhJcM)YnEPFmi;B`Z1FPeozv|W z5LKc70H0}tky_0;ju-7p(6NiIWyEi^&BVe+^^5yUaa~vGD=7!Q4M3*%Jz?HW^DpNH z%Mb;>MT?Fg)lgjo_p7)*(Zu(Q-R>+l3gs)O{pkg!0=ZHiAol5!ApG}^)xmPzfx<7# z7+>lbDbEn0P_8ChI=Gdjx0=#n=Ou_JR+XDP*O^-anF&5@>YGQc_WjprMy(f>+xuFW zk*27OV*dcSj>lephgho)SvOp7nX=zYA1aAQUvT5E1E115UrS@d75@NW)(MxyW*g|Z zFT?bfsbaN_c(2^WsUgWy{{YV3V|8d!WrulNmhKw=a|X6QFT|i}5clN*x#J z2L>E2iJi%1byO?|Mf8@r8_x_Ov1#&xs?BA#{>7mhUviCqb3^DWF7fUDN+R3FX@&iz@2{B8TCH3kVe)}l7HrO^ z*}m`r-SgAtOGQ=7itA7V2E7ZuFSN=vvcx^z;-v*}=(gL=FY6j|nL$g>Y(DoWFSC>- z^sbwSi-MPjUN!WV>S+Bmyt4R!DkGj2V-*Q-&kif)H5{v_{3E;wZm*+=?)*_Vm&mDh zfZbQoGo~m_{GjK>Z=}E28rG)!Zx;%i2s4k{3JJ6>fIRlc)(fKvqZIgE_S{!Hyt@MG zimJ(oldda7eHW{!>`}Q}_WEJmR(y-x{T?7qvE&b|T3uTiUQ@YJ!Pk?1c+=Jj?uc(2|_DbAIuO4(y8vFhGYmV{l7_D zqfO*{YEr))Gd8@;WrJAZZ zYGYE$G5y1hfT)?|Rjm)oXubDhQClwa7onY#AOp(5Sr>+~VCe(BeKi_)22i+h!FJ8w zrz=>k6EC^RxoQ^j!n#uJ<|tDVlZ~zNe7aEaiaYnw-H#B>{d5u8Z0GK92 zbB+G_nXAFpLG3M8s`R>|1%pLBri({Ejvyrfr4EAlhRFPkR?WnN69PPe8C^Heym$wf zuJe9=v8}V420C|w@D06tt;~4v!q*)~`-PzBUK(6|ezB{}?V;s@TWiqwz9P$D7<5+o zh_+!Y!?o~5a^2bhudd~QG1bm{KGAq(3;O-y0*C6|N?okJWvpIHW>9WlrsG9R56N4C zuvlW|#drN;apkG60nc|>cFo_XXpz~)vY=WA8hXbNK)1kZZhirb$=9^@ z!>78dNuMyGVOGU<@Ygc8s6WYz)Ul@uyDN;AkJ`F0rk%!4nINl0&ye}l7|pt~H|^SD znl zR8+Q{h$zulyQZRXe62jD$!ZUZms+rSl@LERGUW{h?kKk@i+2HTvaw|m2A6!StuZ7ltgxr|?FE^E&i2M)AXaHB!z3y_*|q&Q^@w$0&o|x% z`wo`gVhjvQ%^}1g&7R{G6u~H}EA6PQ6l{-g!pk9Q2-n#E09WqESxoj{md?@Vb!55W zii)A-t~|@!gKS}31OSo(w|?~!<^f?1dybX!KmmT}rw9hE!mL-;BGuSc>)tq~!6n1u zZj}Wbe6#&SKYl?Y&VN{;R<7e7^)s`}p(>u=y<*=;v~qt^mm>U~I!jjyF*Tpe&wcHr zu77$)=v~@}@6KJbO{+&#-}Nw+yq)@0!y}Ugy6}5JU=5Y2tAYSkR~UVFUnmkRItyRF zyaJ6PIA|)?9fIc6DR+pmo6OE=tE3f?9`=XApE8%-Xyyn>2i#h_4wwDGl zc6c^r8iMS*Zu8Wr%pzv7Wv?-$9f~7w%8L9z$ci$N3;AZ2?-pL`iu&AAxo#@Ab)Fz& z);Z5TwNSY`lf`G*=_raRPR+l(EO$$1v1<8s(o-@mpfgX8oO^&?Yih|&d&4HyMI%jK zxZgJ#vUUkYnJL-KK5BlT+~FW`0b4tDg`(3^kKyn0nd)~=b~JdlSjpD{!;!tGUd+ax z;V)caiz}Y9uWvk&H|TUO*bmg!8|6=EiC_pThYj!bncJXPs(IDi;Emz6$JPvYa~t39 z+GYUp9>dRH?igA=&KZnQRv+Gx2-Q0W%?J>>yf2wc5C*f9ypTgMO}0JXNlq2!Dpe~=tB#E?D%DJLK;jY`dM3v5^kBST2Q=Fo+g>8Z6`(_0ccXM_yftDq<^JJ> z^CfNp+NrX>W&NVOc2hjC41Q`Ky7}r z`<%1OMmvPLS`yCx06Cy2aoyvXgRMvHa^pOuR#(Z$ec_rt|M+q?wCZy ze-kF%MjBRduStMUAbX{%K}ZkQRZRVef*nNKif?mhKQk^}b%rG;Y|$2XyO)EQ5Oy@J zyIB59hQAR6CP$FgWsw%jx$eU86^lT}FWbmLdU(`k>-WkQ8RvdzlLEFdX z6%`C>v=I*+M%En;cNSuWmxbL!IxggBd&(Z-$)}1uPC&>S_MSfQ!tQ z`)P?azJO>PzU;tQajtT^d(cbPfeSqAU%8bL2+%Q`fSSNP4u{7*;&VnZv^4!8R&WDX z(Cy|>(N;r#OY6^k^qmwTqoDV%sWUzEIOOP;;-|E~Hz-y?TizY(_j56_-Nyd_ZhdjD zXVwG-Koqbyv+zL52OAdeZ2R&HaM z`mJ(!&*l?FFxK%OP+6+Xu%}Y%h_!YmmW%T#OT(Q?G8MYaMyNB(5QKM?EN+X%M1!FE z$1mw_WVO&@Qvs9ZDsN6|{*lbevV`0{9zIgy+H^B6(;Pv4FtD2WhhOR*IyeLj3Fsem zy(8ZCp@ojkqp2)&js0NSHzFhR)TK!Nnt+wIraDog!UD21aewSv7!_2kOI0OQ8nin^ zDXmvB%S_%eU*rXz+x!X8dkf3cOekQ>3hqoWXUclW`^^ z+B_dF^LBhf;*PJpqJHh*sLuPSuw^@S3YzwrfpjC%tf^vF+l^jGPZmq+*_lul{gK)* z0+?4f&-DV&*H?2gl+fSKzI70HgvYXJ04iyp-?Xfp2D31bq4Y=6yhnbWp%ntI5TqPQzPWuqB1dwH443IU~mT*aVU(#E}eN-qb&Nrm5Y zQ48_c02r?LiNU}nIMSMb?8s{JHOT(6JQwNt(fhnZaN}ZNMs>&gE?9C-r|m0iWi$Kc zbO_7EbefI;4hop6QX@|*{pK!K7MP01v_VWm)YdwaGzpA`%Jz#0yu3FMT6F5J6YF!O z)>YT;CD!WCBbvrHnQA+LRr^Njo<@$95sa^^96>&bXf)vQIu@12=CiJ|eq~JedJ5ap zQ3r46Ys97y{U}uEB?*WRrcML<+77%nqeFl$stPs2;0RE*OXFG0$lFB{AFrTD(xwuclqc zN;jdSzLN1&;{9bcSJL1hS;_uDwC)Trv;p!0$GIIf$yV@+kiRFu_0= zz@fj+I?6^>kzc>uy|l?PA~xj(Ld; zy$m^Sx<&ik>^{1kh}Ht(e8!n?#L7m_na$XK^@i$&w;zvbMWvZ-x=!pjD!#$SCAuPC z^)0aSO>YSl3s}cav3jj`H7vuKk{Wb(`-o^$Q^eat7er~kL;Q_7HpTnDx`u(Vz;yP9 zcb`AeD0a$lLh{1P-l6$Xy#xCv0mY$8zsO0mm2cnPS&F!14$&~?oON7YT$W_2#K@?k z+Wbvk0-f{zN>gI{@@Roze)D{0Bz9Iq%X67JI$O<@_6#PE|HVM}-?PLnWOF2%F{ zMJ`o4u&r1Oot-8!@G{>r$H7C&{@I)25xkrepmk0-=2WA!&tq&xO3n^?5I4oOcDaMD zrA;q>@CgP_4ZWtQI1|?#&T3q>I6FF3%u}1Qe02QBjTLIR#0dA9Z*>v`(4>V!*uNDV z9tnW6&9XNbs0y4fiQ0myk5_X7{lUgO^Y(`>nk-&(p7Mn^!CQ+jtMZuR=9Sb&&Pwkb z7EH2gbG0ofvD3xo98eqRJvz#}0;5Xx$Mr3`-n&&^H7*)h;+({6wTyDim#J!V2U?kc zyVzqH(d`TXH+)PDd7I2?3|&dsxVJMh2sqRTTy%_n+z<&@kU-888FMjmuO<3H*;BCG zucjN>W|-d@r;Oqd<@2}vfT}vc_){?#^fLVCF-$#RqCe-DS)a`+eWL#OyMFN_OlJgD z9cW>TV4&j0G2zPDm%=Gcwo9S4^UsM?5lXRZrhlki!nr#duj(w#f;ah%+nne($2S(6 zD9asB>Bnd=wA)urbI;CD1wq5K-Y9}cN%m)k;*g_3SXy}3KlbH@1mK!-`*fFFJsz&P zrd&L-6_?$&T9%2+S_I;{PAJ_Or?K7@FV@xFHN3Dc#Y-Pyoj_~(gqA^)lrOGXr?=8c; z!gSsJA;s}B4H~axbxT_)^9;*JCH2e=g$t>PExzR0GZ)ZuPOy3I3iAr23+es2k(?pz zpYAgZla~;wtBN(5g2BjpMeLJts;XA~F$g@}NLV@l02p11V&PDx#AiyI7Frr2laVqp z#8=rk?=Toig( z2Y)$XG@OU(#fQqmC|S$eDu@f91+Vv{Mzm}LyucTxTC&B@s$Q;E4N9_F?0ulM$_hUT zN1W4|)5Yd!cSl1l{oy^w6Z&_6ECD^eJ>?uq_GMJbgvG=sF_mho$kfla#D_g*Q-!L{ zR1vgpzH z{rf=?YZxne@gE^?Jo^VRKuLo@?=Ow;1bfQ$cfac{>hb|mCdjidg>1x92MOfQ~|4K_n5|_U@;uEekCWoBdcC`cP(}* z7WA<}O2YeZ*O)krVcStp^;;qMV#yOQ=q_ULr$D-%K@h-H2 z#m?opM$1dC$nVe)b1;j(uCr9i8^3AD*5-oo99Pgr4K3ycmn#)7NDh;+po^oPA*YdI z>T;QSb&FnwvAOaTEW3S)?qJyJiqOknNBxWd!+yr+ka#6xopPC2<|aBW8aqH0qM9+x zt;<>BDYWIf7sg@>h6k75A83hEu@9v^r_OsU%ZRqJd5(4HbH7EEtqV%SFa;Oeh)K+J z`y$u4CROSC%vvlOaPRl;6>8f8pd**GpsDfl<^qDn(fy(RTp_Rkf}0?;94| ze)lEtRyN{R(=a+;E8bLrSl~QI7RPP>0CM1m*HCCx|CrNoGlVw~luP9`Lu8@m|lY#Flc$SLd(V4@~+x`_yDQBQ5h+ z-z;mt*4h66Gf}||bmjf#24Jzhy7_Cg1=gnWh5Bdxij;CLrSb1Dz;v@%v-ahZ)6??( z>ke%VG80bt`N}Au=<@yV5ce8QKSz(usN}T=UdNfDBOt#J4IMygxcO!ch&5Gv`$A2g z#mvcW-u-3U`Y}DXh!mU-NLr$FPJJsE(pf`=_JGB=n(F|P?)Z;ZcN46!%G?jYHpYzI z7tmD@JwU@Vos2$^I(dK{VXo4d_QD+?xu1ahAgnsfS9_JZgHKBg4OPbFRSh#V z9TMKra)qg(P_u6Hvv)YJ1ik}bd5E}OABd&Kj+}Ly$7_MK)%s>-j<+j0(<%eA$t}|c z@=H%x{{Y1oD{|n54u%g(^zkb1=NlQ~UrlwF;S?yNEgWVKVQXC#-YZhuYXSRHHbZ0G z*GR1**f%h(+x?j~(gP{zq2qfU;9nSxw3E>OkwLudt{&AiD|ehzuA7x%R_@R8ESN;R zYX1PZ%m*~zdF+2v$++8G9Sr!64el5I@ht$l3m$kS0eus@0=%agzoah}b5OLPPFxS> zBsOD(e{(5;iou;FTRI$Lo0{vu*fK+{C*khB~}> zl|qWAI_vrQMVh2@(>;0@yhj3#NwMRn{2&_@&;aADXqOu~4(-=p-xH;~aO?|!UrNBI zU;D(St4&(z(3Jz2wSJG;QrUfh-r#oo|U` zEN<^<&+mCbi@W#Cu(?L6WA_uiy!4!@m@HQNz)6KB`_CiL;ab7k7ZS0k!C{YNI1H07 zM+Vp{(QC>G?r{!14@qMLYGFD{yQ$SW*5gJ+FC-#M$%0rk_BKq7og#Da2NB4#Cy_ic z0*mLE>bw%pxR*K(m0byCtxu&EuYmnvwjOL09=Mbr^&>eW(+S|YMxY=AbO?-)ki*vK$ zqr9fmf5dvin_lH=1vFmd2i&L3APGwWmAk(d)}oxGtiLcqG}9hqZ>Uz6wB3)?EMN)Avu{o=@N$}(>K>%U2a zcG^5D@TF$&wt6m6Q9r*L!QfamJR;qZjXY=N5$&wb< zeqTP&&7qrd*`UUY)a5%hQ!Reu+9n&rBCD5JETF2+!*?DaF(PbB%ZVRLsEoq7A9wTP_+$p4Zz57e-fPZa4?i%#1T`t`-nL=`C&L)Ge-5y$M zTlvR1^x8OgWl|ktsP0#x{Y9i^)G$hqqrX;Qvr}adP6MQ1-7ms=@#w3>V^l2yH66a^Lt9I8X!fpogX$4C9(cX0^Z?>PVvZ9^U8L;p3i2zVFYWW>moEk)0^8Ic_Ue(b6TVS(OV!n}q3knV^Gs$W?`8>NOjuT4a-gOQSS z_k`6=uW?TI@{|sMn(2s6FzaRd>jGf7Id~;Z3rl`q)Un+)MzXPjfKVFZwN}M>$6-Cs z>Tia$7#_m*OSRT70ql@jAmqULOnWfD370qxUy@Q+N=}G{vS};lDjswR?o0Y^pR!sg zZ==e%CTW)fnF`zfYI7YgKfsPt)x&Os)mK{{X!u1Ej)JtL8Nzm zB};a4{{VW+v{OYjL$BRME?$BC`#=)ca*s)QLM?}{=3^yET-UVWJ1du1;In1;f~Ggk zLBW7@vwmhFyCWT=D09PpKWYM^&LRT0mr{Ui1KrF!w#@vA$EI&cjM_1TUVU%Bsi;}v z78#rYw6q1?#}TSsZg#Qt>2t)_SNrOSCoj-N-h)#guf5KLQ)$$#_tns+va9>hA+^ZZ?yj6O?Q7Bt< z-5UAas=X7GXdT7~Z?8FSk!&B~$1&xj$nKdctQYtpL1g8KAU2%)m7-93vsB_#eD*}4 zGSA)~X5jT|KdX#Zk7L?XQu$w<#09Fa2haPNOO`?39vJOgRoA|$n1xQPeR!D`)5Qrd z#havI`Vy;Pv1Pt%_m(W-Djr5TDq5VDwOq@|tyV({d5O7czW)HfXn~iVo%6-SEsk!v zJo`$y*zU1j^2|35z2Ccmn$X}mHO^SRpePJie;>Ue+5oQ=^8M6RDS4*oe|*egvwS7W zTB7m;`kD<p7jQ8YYU)^=7?C~?=4}B>M3x!&SfYi7G3cxGL+-oIs8Rc zeo-ztdtPknK%ncV8Da%%E_%b3KA2J)L!`-uHwMo}408`&JvWYKZVM585NPoWvSVM| z?_(30xO=5gSF%+oBh6*ZXKpE_PdJ;>_luZO%lSML-6XE2WmlzpI?f5d6G$E4=87di z%PVc?(6>IDX^rPH*so}TZBZRR?Js1|0Ai6%bl!RT$2<%&{w3WC^A77&y!_(z4lcp@{N+H2u%LbCIbL&)(%I>| zT4ICGd1G}{yYVXn*YK3-D9z(VlAx5lgQfSxNezS_n7%vi4A$~e@1S=70I|>^%Z~Fg zKX)b?S!m1hsX%b)sCCVl^B15yaDtW>ES}C{g2dwfnWzXrG{t*4x87KnDb*WqPqaQC zJ7+XX1F~Ms;p zWwDeCbm#s50AL%8-?7(h{nU z(+-}G9N*QMn%a8v)4W)=R`tZ{;}b#ywcn$T8P!7+;b?f{YV_drdJH82Cz65h&e8#3^Ee-ePxp zMT(L?h z8>qNYjUz1qn!BGPDOr6Zs&&EMQV!%a%2eBbztqX`htIsg*OR27Qei@wnuZtL%Uj;u z{QgkfBUu~&0J8vxN^bk^ZLMA_?p#YnJ7V(mYu*4WC4tHLosz!EniNyqx_9*UV!Co( z4=fC1Yvl`Kv2ba}WBZo08h-CHsMnj-`TPlpyY-#G(E1zKVVuh_7BGie<|d`u*!s0A zQg9##gLt-D3DBi^@f=Y#qVV--BE=;;;+fHgSnO?*1gGYcnMtKD7stw7azf(npA(`K zgPzlF%5IZbYf{ZUJ9(V{0I`k$+s~`XEN9Ng-CTX4KyTTn_L-en4mkNr;ea{_>tDYp zpH#H5UdJ1Qz--f4Gg~Ds+OXpojT^dC`cwOlB*vps?u{#t9O1LNrcOn zhg{Uj)41f~3tOvPFc{}^2sf&qbf&BinZM8N!pYe6PBn1V(@il~Int(eb?5h(k+%ly zD&CsZ+_Y-{0HsZ94POlN$7r{J?R}>}SdBay=2+EmAPx%E8dxgcrQ5urg&FXfEVQq< zj&s&$Q+|eJ0>!^3VBj2T+lH9^B>SWAeJ2}~`hBL$fKk`)55l##SM30*yb2jg)l}@{ zs90d7l(p7*BBN5LFG5?F6SIgg-8!FXdLKEbDQ4Wtvka-8P?UVjIl6}hxrl088~qA^ z84iR>()>#+HR5g~qgNQp4q$g?W-$Dch)PWZZ=@AD&63xpOmvNyng~*3Jv>ZM9XmIffTO@J<56$hXpL&)lkXfYp)fIBAY5G9lQP9!PN70L08sW9-hL(D zm}+fBtM(GPl6mL+{-$nTWDg!9hPul-$4HwCbr-wD)KdivuWs&Fo4$wr9qe4EtCQuzP7!GN{HE|(?aKcBqBuRsym@$*EDyGhwf;cg zb&N9f{QRLnsq>Sg`ly8h;1Fx2*%IMrLH*n~me}dyX8cyEC6Tuh$6S0L#@`!oP%@d zJq-5~oL-1LeuOZ~K18b3!E!+^T(#7rs`i&^WJV=8r;33gfuj~Z4^B#n(>c22!g{;v-_TpQ63NJ6~GbN)dd;XI^-G%G6VT2A?KVPX6so723 zq&(Pf-N&5vm7-B=Z}JL~H!0KQjYjC8R_`3j)rR-O>s2tJ!~Ff`QO+HsI0=5eZfV-I zT;wXW=wIqlT~hsz-qQq+VCZ<4Sx$?)*iKte9RKiIsRB zo}uB}E)l@_n&%U2Tp)`0jm``3;LGhReqmmU{KPxyWjTE(Z2th&K`5nE14SzPpD9Z` zz*uje7{sqmb)ZwfBL1}Hr?=hnbk*c|43yg^kM0};T@Y3%nAapn$ zYvmdPX^K5N-)OR3sifw#eW3M&N?tQ68g&If}QM5Z)VuJ*zJH1g9N+^=I?=kCB*>j0^z z%b2>htw!Dw%D5@QkH1f(D-ZyHecgP@OaW30>W8R>u6}U9{Nq)^zVX?qmcH|ir$TV# z@2F!hvM~MNtnvHiHv9BC^sXbuH~aaDp~)LQh$&uW>(G5HgE#2to7N{qK_=<9Nm$S* z14MC*FIP6g(biGJHrZa$O&8uGj0Zw%%&@e)*Ak;TVXe!S<>^rd^1Od&grOA8d(5jR z63fW*60rnqYsA0-sZzq_9`SZ{)R_vmnuu1_5#9UCo4MH~zvQ)Ox4M<^-9c9p$C0^r zHJN%jn-1X{(o;1zGMEx)YJw)x|GV4!)Z@#N(42NO9hdtuvKS2xrcZP)>^Go$Ifcj>R-)=1y^d9 zK7F88)G4>_d_}cB>E(pTZ121uZm2}1|h1jn`EdUW$AbFH07Ipc2<8ChV2j*c01T54yJW3q}f0EXv3=0EWZMDp@ zoi%4zhyzN!K04K{c`*jl1;VMgf=gtFw&axq!+so731 z$`E05+pft>cxOHGsJ(9HNx-) zIzRRa&Ktiw`pYONvZ&!(*X|*%n`zzpsOzTStyjCm#i3CF!eQULA%9Qavp@p{$*wLyb9j_Z zp3tEa)i42NV6lL~hFwwTh?b3F$(m}O^2T!!p($1O;%q1RA#Z9d%y(`Yw)x{yqaa^@ zI))AhCr^K9&FQ@yP66)y#-SNxregFkz`z#9XDc1|f?&Vk`D#2C9&T=d^;r&Lf zT&vGRui|K^68rg|CrA4+jCX^?7Xq%=kn8^dF%p4BUfq4dSPm6BEYcT18%XMYAJFeO zj~F@Zzq>EIAasB@>onQ#?+Zo!Jp0F{tlb{Zc#p4m(KyEDQ;~a^tjL=pzOyRtGWq3k zprA&e8s)NV0NwP9MqVDc=4QKegWd%;M-6=?`DMpRd1?=_l~^Tv>ww0w_m$r0x_;M8 z$A08%A4#z3MLKZ|O0u9A^Otb$?Cb6Mh@^ntL&x4L?{2Ahf3m_za~)5${ij~d4zbU+ zXJ>U~Zk!v;xN40PZ{ynKgO`?h=`$t`CF=f{U1F(bFk#bvVx-F%P;;Jf?J*{ZJ-+h7 zCdlu__JAZ1cVooHOV9z1v&0S~Z8twzsa6Yl&D1g3b9sMkO)9t;@ItCx<0sZQB&uuB z?;7p&gQeXHE1sB&0`Og)J)v%UK7H{M12v87tg3}K^?vg%ExAKOyB+p%JABfPGQ`uo z4$uhZ50Ye0$bpPrbWX5Nl2O$(PWL;d{SUkpf+@IVkS2^v;4^W1h?;JU*4XV%Zc)Nz z0&Rphxyr{#qU{4$IN1raWx<;Hh!qnu3&hTdqiS^NEoRQ$As}1Xl((3haT};Td&O?` z#myrutjmWoxZ#eiEHKvE?(+7{_bC3LrrctKR=sSU&<&8f?)S{F>A|1h_6h|wb!J2b zwKX$I*92`M-zp49V(084rm^tu`^Gk>Vc%%j>e@Mq$jVeqN};oe(?8kpLK;3=d&?6i z-e&^nQlB`gc^Q}}^qbrY*%IVtoXl{_fJ9Y{*~`2vZU9}eN0H2p>)xZRh1LC>O>Ra) z%C8&a=__96O`q`o<_5xaf3XxcPUb2Oj_ydu(r>22e0%DpAXwU%?abV%S`f(#rjZS` z{>|*xh1;$-DU^d4z#Kr{?UeVH@xx%_c92qv*gsW9vJ9MHd(n=#m!j9PZ&wXq)@57Z zira=`!=*L2wqGyn%;e%4F)p=ij0Gj}>A90-v(lv<4PgHOumG#Uk{c^e+>WmvmgKFQ z%Hp+=W5lGtw5oC$8S62vRc{FShDCznuRS_O(>Hv|(ew^wyo^f1J!&5ndxWqC37gDq zp;z7%OFhX=Ax-WM*z*YvpIO3Zp#ag#?HU9x<`TV;$62-l0#$;AUVTwGko4vTi9@GY z36@|-7Wsl@L5aPc#^oUaa51ZkU^|JNSiF*VLn-#ERK|7V{DZWZ&A(DH5K3qlQ)Y+S&v}sr z#^_X~iVgz1yg{rRcV(|IQiA5pV^8neD^l#h!D;()9VT%1VcDUOrjBvBR~ya^ z^_t8u1&=!$tIfXB)Lmklyw_NZP_mw!k^N4Qv`Ltd3AU?nW*4F=+qdJy3!#Rn*|V!8+AHOf})F=;usv= zQ&CT}DzJ%RK#Yq|!!x?k>QgzGdqn_`R14g7dyK>mt~mqEn1CuBRCkMO;##e8N^GKO zT&AWp8Ka-X-hAaYMnaHsbo3VrO7sULN`}p}*#gS%L1zB|ev*)QW4ugEt-Xl$2+B;p$-GTesCSVR%OVSvC0-61!KYyqK zflSaifdfl>uijp55O5Ime9%^GEvIGfC^eTRh{>|KG+fHr8R(j1LxjZXaoUTQUA+GQ zajb7r@M&OC&BR=nC0w-@*8I{a6&-I8db{#Ka$7fBGyrMDP`Ys9<8pGMT)@;-vxpMA#fYex zz_=YzxmW=U*&T}PV{m}qwK7JfC2qf|QC4*KjoW&-eqcipE(0Zs7eMMdw{s9?*?nU6 z<5!?{D1>+DjKaMK3WSYqSJqwa4mmmfxo4!cs}NEzs#WwEms5eo@ddYNX=i(iLg!gG z;#Ug1_I*6WHZ{7^qAc02uuBG>A!%`BX3Cf-Z-ajDNHaSEJGhIzlL=x4O=W!vV6wA4 zh^a_EO8&7eoO=x2Tos+02I0~dx)q+0)tlj!T~UN{DhO)o;5F-lqk0WG#8G<=n~GWV zoEq;i#%;$i2wy}Qfk3TdBv6`I)U@-6yHI$Ue4d7Gxw1ZTfMUzH>G{VF3)`%`2aV6| zOa&F&WxIdbLop+#Uum3`FHLpC6MRF;_4%K zOa}`!;%BxxIpPN!m%gQA3f@|jyA-PEeLdinR|=w|hD(?7pWB)GTw&hlmk@R-%t!={j~n6@#P;L=O=bL7ydv$DP>|a(?f5a-iwNEkngP zfz*RAwA-1qUq&&%S>8hypj5-{I@4S>OANITQ*{D8_%H~3#69aB(I)rkX`MNS=QBP? zNv@ZPQCRgm!pY^RH(kysfGG89T1OtBk^zSm>+snp9SSD&Dx1%#Sc*_wW1v0AXXjeI_B%QF@l(0=RO2 zu(tzRU^)xQLBAeibgNiSu&Y*HkKR~#tZHfy05DT|E(Te{_r%4q_m=>4T;E^cXpYBG zoBjU)lGH){sNt4(o9|randwr?#RiKaWv7UM8Y>-QF!FLsveB321p{Ki(SghcO20*q zppnHexa>z$2wAM=Vyana+|pB&H1FL(dnuMuyLp$&Y7lx~rQMMVm}Rjh5pxvbLE(wg z!AfhaO2?*9qUX_+3yQrQdJvm>TT1-S+36b&!Hjk%L1-PzHv_DV+`$S?D+I|b><&;| zc|c9zyW+b_R4t{aUzAZzJXpdNZ?OFKi$=FmlQL4-WrpQt?NPZ>lb_$TuMFL@S;{Ot zdo?>>`2ji}2J_d^f`F|-p%4VG>u}3lM@#dUF7%Zva94N19ftn^TZ-g(jE?!Zcd4kr(jAQ8q;2GAwvg$Q9 zwGr?eqOX}q67AA7XyesmVcgyi%&T0^EAuI%P2rV;O>*PJE{yUx;$%i6)aC^Wdcm03 z&vYGt-fHF+R{F7~OXg5+{&On|v)VZ42OfxCcQJJ@5W$6;yxiUw-+XnP4Cd%ADj>DD zEB^pygwoYorW$cP8PzkUev=A{{G%4O&bg`Y7qI}#6Aojzl{)Bx$=)VwaH=iCbm}-> zQ5@XrP5RsBnH;6U)eXYsq6X!3v5>(N(bvLbiH|XBDbx~*@p>^9vb;*ETXhA7Yo9{L zqn}e*c!W7i^)Q{2Jsd6PJ|MO*95(lkfZ%_~>7mthKM++le7ra3tm|aV>H~^pVX@-Z zET{n6PGaD7##a-UL(LM*$Z@k(?JmW)PHXYvR}TkU{-so*A1*z-O-GVARH=BS%uB+r z)3iv~3_MXTHXn&}djzg{23cCn*WCaEkaUfyL|jueXUYl!X! znV+<5muuc7qmzvNqAvc)NZQJ)nT_+%@fRbLtz24g`xi0`O?8!-1IFbB2K088z|v~3 za_MavyneFhTZESI`lUw$E9IFq&k&g4vJM*b>O|<(ZH5tQ?#*<*t-ps_jl8o`5( zAmprT=P)Cdz2i3n*72IDS<)C-i`aYX-XOsJQvtEZXp;?{c$aPi&pPoF7r(?WCgw5L z3F8z;B21KYsbRs6?aK5HRPJQIIW_Keg}yy8|F6OKM` zZq0Uc6zn<;GQM#Y-8b}2w>dM8@|;xQdb(}_Lklp7_u@FQIuUK=kCa-9&X^Br(`QB`#B=P7iTr$|FURjxe1^eNCmr;f&BW^AuRFwiN&`g=;dT``D<2yd%W@28ii zN!1P){>9wiwtUOc0zb?i*ldL3dX%(OP*Sf6@~M2jvX0}f1)5A;*K+X@jM5P zurB;s;PyF#UVKfq9DcPIl+?ylv#~M4IOLdZ_x`3eUU74PpzF&s%ekej58v)@!5h<0az_S}8MtaW=2^**KfX`;fe)_z z5xy>E6scw&s3?!=Ese)9B5Cwmx|tJD?K9{Fg|wE-G5L?-C7!uKA5-8XFVk}+O9ZKP z(w`A%4g|wUVz*nE3)OL%qaBuRUDwZ52b1qAp|YQ8^SO94e~HvA=QR6A02l8qGvF^+ zH0Qrhcxb9~(>R;yUDA}gAUIjAZ=_+gaql)e#hdhBqG&}$#P@SC`JT`>+;Nz=n{+je*n2?eWo)@ zHJ_`@St{>|ogX4uVWv6f>n^Ka^XI5g%Y7qnCL`%^c$m>}!9whFDz`_1#gT5mNA1fX z4X;wzTowm0(wM1V8DQHyvnmu^DAvz0>2mVqgc^mczY zG5F6>X;rqfT9~dbUq&3u)_tJg4RyG#+4DRzdtkFK=`R;=i@4wLVmVERiiw61=d`_c z)v}%jY(pk4leFh{cYsyh;_JL@8PTVYd7aJi=5IH{u_?EgxMxK=^^QJXsyj-erP8J? z@KiZXyv;MI^yAQ(jGq$&9CkKmQ+$18y-jm2ZOqKv_T2vfo0NQCiB6Mfjw>wX*Ruyb zj%DyhbB{uv22;iPnVERkNcxah*{Ni8SIn(z>rvd=4dUi1mL=r4Spq3)E6{6ip?7|Z z^lW@MCzDQya&a0;5aa&<0cUakBLwcxYmMEPrsr)E>Mc6oQsuUS-}YogUrk2w?glkQ z4^7JqCHIQDK4V~)6fM=vCRuYd_36YpKG>I5eCDv>-$=o1>Y~5t0sPog<8gYBX65>I zn~psfHLpR;a!2DGXgkZc?&7<7=H;%Qc*J^3a$8vA#QDpFUUp08(~UC~nfH$o)$=RS zr#GKOE+?pTyu|9~h~?zy@PEH(U^G6xVJbB8Vp$>Za2&G9#Jo-kevhe-M$0kZyj=PQ zUZHLO05Sd}bIlUA74ICmOv>hIKO*;|%an$mQGU0~Nu&!StzF1>xdJkNh!66Plo z*thtO(e*sUuJ`CV!RYcCBz@~%i{$>(7q3*@wN|n{W;jKciNs~=)%89|26P_)zgX&E zn}6md4d3)~+FW~ViQ@qV@40XGjeJ-^Y=7R6E zO4KxtT9=h^>GIdwR(;UU5mok%9+&YxuhG6sm+ECQLJ`05@%)*%*^BofNVdIx^X2Pu z;Z06I6Fk3JymJqyrf~7ZAo9NQ{{TeLdX9YvdhzId2k{O1TP|ss_Sc#A^xwb3UX5n1 zyZRZ0OmTNNE^mI79D0cH(k;cHPZmW1ljw7|Ihd04j1hYipW=N@OT@f--=nCkzve&0 zdiaK7kF9z=`SibCW^w9H{=Q>nZSW6XJ|aKEx2F2agj%um;%Z#H{1txQo@dw6y0`1w z088FIHu#PD914E%e=^6#zfM2(A6q=7dUMy->zw+J!GH2!%hzm8Vcf0XL^(YVhmTj_ ze}`{_#K7XC`%};G=hx4sD+KX7d}DL}+5ij#0RRF30{{R35SojOrBRthP*0(S6db+%n zksh+0?7+X(Mjh+dv9>!PMDEJAidU7U7}|Ly14`A+y${hZv!brXp3)!2n`rdx^erI% z2ulei#P`1E4NFHacT7T_%s+QCS@J~86M%Ad9bv%fNnv@heW{f4tQE4K@CGa45BP2`=_=48q!HXa(eoV=D2F2@vi z_zG0bvHLUz#3a(ms#XP)!t=u|N-P8CXurCVWrPqxMi;(Mtd+Hs$G6b!x2wshQ}>~t z1_hc{1B8o??=YHV&Xw9o_N?J=;DWs^-7FMF2p&nQz|kBntdKzNQ3#9fpw*~!tJiSu zir3Vu&xigatKOXbeyf<$^TVbhog9!u2sTZ^P;1=EHA%S6CQy0WZL)cmVJ8qq7&`v| zF3IC7uvekMvcvoLx-)=}dacGVM;AIvpx|w?=(5IbFvNbp_~>|cA@u?iOQE3|d%G(f zOf#8_18}%VB1@?RN$Cu7$M=_~JB;b$X|p5YlO@r>rYdiy0%JIgPtOPsA#8nau3gO1ZM&;=VopxND{wxSi% zo)<>b>(4y1&vJHH7obiYR@2I_O#J#Cj&Bp<#w8OVf2`R{Ga{{ZIIhCUCq z7NmkoAOHiqU!);0D5X%hmOwtGmZ8W>RUwAWzl8{i6||FL!ii4f4~wGg$*pk>;vj+u zDu4@oT9GDAwlTGXx>XnH3t9%ZwRz@1B#}%Rhz+ZGv1l?KMY|`$Kp;S9q5{$^_bNzBs1`~H0-=T`MS77UO+`_!s1TH15&{V^6ct2}7Wx$s z5)cRo_~QM)?|pA_z68RuXLfdWcIS6?XFMv}saJMr;tSbh%O;7X-sw6V3JKExE`h&f zAOA;H`)rc9rT9NnoE}k4Z*M++aMF*n_I&&8GaC`F?Y9R2TA+~ylfs0Gx{ss2+$Z*q z%23&H!;&lVH>kDo3jF`vR`KGt`zb(XABWq%tX(&9_psCHoB3jM^WStQ2#>_`^T+42 z%{-0z>6+7F42pw^{}-pr=R%4fN!d?0wRB&22$Eb2_FsG&qEY+AUQ}L>`xx`tUD#n{ zZTn_kEB;|1H|XvObGda%aZ#?AZ>5(O-0ZzgK&$huvybl6Zq`JYm1RW>vA(71fAv1! zad=Ia^y*z`>&qvR>hiD0&dGiXSOEt=o>2bwBmUBTYY3d3mCcpdYqakU&~F0m4nED` z@r>inr+;x!;eKkr?T)_mg^=}8#MtHXLhjJ5)<+Os(!poD6Xi!4uT6PNUWz3C^S67* z5?{;VyqJ<@^+I+0>ag#USZLqRQ9rsoK&EB8yvaaC&8&K|@_PhrVyy-xP8G>j*Igh&n^< zZ}TAh;lLyKm!ZS6kJC&PPd-bzi_e|b-<@8%g8Z~cQ3rnXuHwki+{a)D$WjDDfB9HqyFZLDNpjTMJ5Vu{x~^Zz|`H(R;Y3lkV8{72#*AsY@-1{+{>J{b*Cis(B*cJ>1+pb^Nx>lhpD4 zs~XzAgXCaB%tf=&Jeke|GM(Zv3p-tv%hV?#}u6T}oVn6-T$F53YiV@qu}#1NPt`@dkl9E+ie)$(SswcFtk%m9@1BjLtL}ST z;hfyKRw3Jg2L6izntg7x2>T6~Z(|yGh~8b(ydZlK+n!E(a9i(7v{|QAas0G6rnBZ+ zF2$LY^Kn;?vB;%8f9kA%#cindmG=q+@4u=mN@PRYSOO{~E%Yw+>)G1n&v?so99y@; zD+`;~Z|=BoPYo3EDy`j+srHwCcQ*P0cbvq7;3ccvTfTR=1ESts)c^O93;$7))AN#l z3w{*wY|ZP&?r<@;W2yzBq1U6wW)Wtu>~8lQLnBq-D!Rl_RLMp zG+)n(*9CZ1HnBaK{s`6nvG^1*RIGycXd4wA*#@tVzk0I-`ZW9Hz3;6-;MLciTM`_Y zFYL_Z2RXinUN(eD(5a19eKq%??Bpouc)BR{>5`N8J}Y+id6f4wP!-;K`SOYTfs0oq zP4)i~7EVrEjN@I#VeDI*YD-$qTzWOV`7Tj4W+s_KKdk7`TG9l}d1?D^3ocC9@lDh( z;8l~+_iI5Lej>gaH=j}4s;`f|I20d{)-G}O-2@A%e6_7RMBzy`=URN zjUcph2gt1tKK-kEK|*%b#|9i{S*n*qiu$H}MZtpL5Y$WTjB=?P`V{1~jY_V`@YNC$ zI3opBPQ8P_>G&wbX(y*bxRI3Ejeq*XtY81~;NCk%>=YjVK|&VXs4L!I*nVv53%Y{3pLkAD zujsUR8b9hw*}INq0rFLn9)FdMp1c#|;q%&K0eU-E&YIgypf<1*#%3sWYmoy!?58IH zbacX=W$Z6Uh37^$-W;AbJ59_wJWJaNcc>PPe_p}I@-~&);r<%fS9GFJJzI*r4t~@Y zTsAckZ%G=!o^evoH9oFh|aTd(NQJNm3NFA7?CzT$l-`y;h_ zN4z1Q#~{ZeeY`aKbBv{QrU{eQly zZjc34@5`jg0OML7z-bFEA+=$jKB(nV&*#k#6#esZs(-Khii3mNZvZo3Y184b*(7u> zR%esdGH+|TZwf-y;;$DjJ0(>`UDi3BYn7j&9{C=`zZ_Brh8S`JM z_dEY-*m?0UER1z#| zPmFu-Shq5$G?eID@uRL6ma8sZ;fnVJ-etL!%w7?p0*?cuoNGg@GWxr@0ws2MKM6A6i_%qZIDE7 zq$K#3vO4*C@IxHjh-3qne%;culHL+LEjWwWI1yM!5}i1iZeRz=w|gtp=ocrF$55sr zM9IorB+P_+(quLF$_SoacwQNi!#Q3g$DpcJiu-p*dYN%s_n;8`SPFRj2=uuf;@36ppoR|qoQ^%-C@0n z(`sb7lrpyDBjy_D2tayWCv03p(v<2iO;z1WFi+dG0-omon&MmJHxcKuIT7R=7$1Q2 zf#1_sqm-R;{elUbtR12@PfV`8H1Y`EUAI69x92$M1|B){4lGDq;j#XnRVhjSHwhw+KE<9XYZ}w*o??%7PrFKamwUy!%`>Y0D7&h)(k%YX*#^ zA#R_WZKzbUF^G}G{AHHzFec1PH?^BRNMdV?;;wA2fEZ<#-N}tCJ4lk58f9))crjEg zLN+o}z!$Gpyu~gRh$q-ebdhEKa6~(hVj`#Rgzub7pvCcR#gO)+D^rnS%^-qnb{dx_7}@BfpveBFyKh>x}vy1 ze<_86O8N7Je*r|&g(1RfCM~n;pzgkZU+J0mr-sIf-vFr@xa7-Tf5j#_6Z~44J^M z@^)3(uFZ3oV;kIlY|M8_1{|D|5-L!=(<6X4MkKUPQ4JffIWM;?jjelAUSh`$Uwov> zjdIz{E^)83f`?)ZQW}K?3o@pMf|Cm7FysC`MguG5jsvEV(^|Ys21Zxr7MWEcByqdE zib#Z9pdn#90fSJE? z@3Ph`eRdaCwY{4KbDC?%_Z?l&LI$LTpVbK&^nXkY4cg^>3=qFtg|XjS>02c+kYgMZ z$XbQXkE*?Kbl!?SMoJlP$hu6>*tfe0ZmL2yC%OsDnZvW7ups+gqX=yPLxyEGXMSlO z0j?*`H^_?$o+3BhXhGc!ZTU=LpXldn)&{~Ap+mSeo`RjEs62%UY_@?I-&GEQlEo@b zu1215sS>5 zrCilhT&lv=(7#}n&RuWoeJj*>$_`xKlX`Z=!>r~L3cs~Hvc~e;LW)23*$RnFzl+uG zYjYmd_LE@84>Y6^IhF>+xhL-+m@06iU3raNzoN~X3N?$|KU6L(^c|Dac=0_`Pot;Y z2`CW!sWB;mNRB7xN9$VQkU^2GvfRoTZgT80o z&>K@#9lrsJ)2u95pjW5@kPM_+wB9lE?s;dBYp4AsH*gyk1Px&~?rE3~O`2m2O-uNYXZuU_eO>j$Dd9mtC4Y=LtN&o%I6g}@3T-N z*A33c{m?FLRQXv?REwQn{;(@I^>_lDx05;M+o?upNQ@)|w~Tq4y1!{L5P2zP7R?VFpbw22kCeZ;cytY*9IA~`$AA2e#NRq3p@@@|1y_3O$}sg+Lx zb8Zk1JL~ug)sK_3^a4l^`t!8JVL|HWL4iOL{l{V(HCl4U7T||((4I^qe%jV{6u9=) zCB;ggQ&v16cRa!TWlCdd)umXXJYepI#_;@$L)>mu6$hwtN7N)as!C)H>@<>vF`hVC zs_Nkp!=Ycetq#ZAXqr5nsIg0T?=#FS4;S?g8ww>9v16`srNt;h2Cf@+laAu(@@Y9@ z4{_}11Lsmd=^*+%T&3$cr?B~Y0dg285=0{Xt#FWkTy@;KBsT2m8@K1)^;hxM^0j_= z`Gs?MEr#!0i=r<(j*7m#b<>qucUmPd|+#^G4~4b5VscF!+%8PSFM4Ior;@xF2VAj=5u;t(99s zxQ-WwRH{@8{>t1v_TJejm(-?iN_yFly(A7?>W}H*Np#981Y2uQ6t#V6-!KX7`~jv7 zuQ;rX_Y+4}d~rxY&CazQi%{P~Y16YaCcU(&nRVe!Xj$)b`9}t>E5owx+kB=DQv*E} z&)$Fj{;j>%;{w!&_DU}BSz>g(M{B0(f?>@f`RsIb)VruE%s1~T;^Vc!YNCH=&^eLGsQj{mNF%D-Ext@chzE78#?v%;j(ennmHTthwXV>X0%dIUzn_D@SBuVtYyZ%t)N1Hs>rf-sMn~WL14N#` zv@RLnofE#frDIe<@O-6L8psZ3Ex3L@vkYISF_{X@mWJE#FJP%X^;o4gI^HsC_v;D) zo;uleJ!T2I`-+4t_v4p59>z7V*A-IOiR$c%rdWX%-&Js~weEw{qrF#DK>52d5P=pG_~OX3V;%uYiV6TvY( z(7(*DaMUKh?%MT=4>OOFP`hB;&Q9G7a_(@tmI~dk)Ob}>moeGZyeP&ejMG_AWQ2gX z5rW1M1pmZApP5TrcQ5ktk4Rbzf8zZOSeP8-+f!)Cc>{|0#*Dr zctMg2D(kVK8r4RD%JR9QkYTY_Ii@#E#b@zFA0w09QlgL2II+OMgVP0$&vbNa3Q03OO&HBx z2K4Mz`Gn*xkHq8h$n)e2QvpjP0k?%!Iotje6UhwsB?(_c+iR1GMuA^rZeNV0F5soG z>1eG{IOKU5-^gmLI7RrHKH2RJ)O0Ll=%{ODBnE@c*)Z&ie76w74jaP+{mRl_8|$_k zX`ggs$xhlCMvMWc6#7Ec?Ge!hBfQ=aYS@hY{EHw97Cdzuy*nTG+A|yGHx97nGn4#C z#eA9kW@_L_#t^c$kr-#hQsYsE)5_>rXLW2DJJbwd_poqQ`^?Hn5c0F2tcHyNO8NYD zrH=4^2ZB$d#(r=78YG*J`M7&(*AF`*QqlMshVO&S0n=JN);w$;^pqJp&BQMnrQ}$S zoY?pc7&zRfdq(iox7`ePhPD=8Gw~YFDj%<~3{(*f={FM6gjwWE-&BYb$(&lN5QUV| znMITP?1N1B2@SF{b}8<(+kS#tw`K0cJB3$;c3{E1{zqS?nsLFA>ywf1`i~riAN$Bv zT23{`_GFqp>Qki&@Vk{k#!|Q+j9Ry-JR>Lf6_`H6ibu1|jOxeC)*2_m2LmP`kgd+U z)Rn*%v||NpOr0mbh!FAQ?wgDK!6tR{n7G#>W8J1V>>c7ARE3v_k9R|)60hU6{=K5GateA(CGbbM)BH58k_GI#bj;7ZBk5mwxC zv7qVQi->*5_w~!y*CPx#ebAHncyc6(gR4c4Tpc8ybLmH!vh!qKNEib6xS$fk0ExT| zYn*``wFfrRhEjeUWb4Ekj5978ZHH{DmPR#l#AZaBUbrdgIXaNfgoMLTVo`Fg85X(mF_TfaB)@8_SUJ#d9O6zH8LSTtoMJ{R)+kup?NqF}Z z*W}cisit(sxc@v4It{fg#!g}a!4FAq&J}x2YtJ(5&7=lnw&sL}!_W4Be*+laZY6us zIV>C-pVIh)PyGi?{STx1A3h}tv%IW`f`%kK&a1az!&CpEQ=EU$DMnimE$>C31~H$i zbB$u-?bmFvV^1>(9Odu1r9Xm^KG$-kHe)X3{7>gfE|mZbi6DvcV#Y6hR|?ijr)g z%$CkBUgR=cWXUah?wsg3>r8y#rf-ZGt3lURoti(Pse}RP*W;_o)9GGp{LPOl!WE`2 z?!qlb7ZanOvtIl$n?Ga%>Etga+p0?IbvHmsvnB$Xrp+=RmOm+C4A*~@A^WMSS-Kim zkGn#U$^YF2YtA7eczPMrlO9e}_sMtBNPuM7u^OGf6_#jm;b&Gvty6^Z&(Z`)Bb4O) zo?J^YZc!ItBtrW}hF1Al z!j7Mj?~u#BnI*U6xi}CKg5OBRV`*RTL$j~z?CMI-M)qFM}7<0@j5TAQ^qp3|`Hm3^Ir>xXOo|w%okSm{d zuHRku@8slHR_2G``O{L71q>P@UG>L04dSCp)1J5!cJJ={WFh2qonEdb-*N+@NUttj z;#Vny%^F+U3+$B59XH+wqH=8;ye{YF*5zsWZq)%LVLlc!*A#rbYb|myMyw5bBQ2ku zCE42M`_m#9S$6J=bCH57N{VFDi>%4U8Cv6FgK{KI*&Z?aCADY$##?Q@r^)BLir84w zkPp7kRxl^U1qDt4Arldj9?xUMRa5O`hX8MtbdW+BdfO;%8#vWq+*`$)TVhqwR10Zp z(YXfIYhoW#Vb7Z-{1H88}`l8}~&3O_W_FCj^DDf)(tB zu>srQii5^KU@x=C<)F0DX|~9g{mTOCSEB#=)bnP}E0?x)7TDmM!5_pe5I?aDY1m6b zE^x9RO7ODv9;+-VIT2Vj5VK}5p(wk@4rjtOGxwdQeuFh6y=)F?pdS1RFp-1iAp*N- zRaI|+JX91b>fow5(_n^>keXAL{&{Da84$_Zx;I2|&3Dy`b^g_aFcUzJt&U7r5e)#W zOmncBty0}^a`4b{BTMDeX=dnz#+-XNPJ>)lHs5KKH;w_M*7Gy*fm@Aw&M31r1LKQA zjM=`sGa_%M%c5{9)6m_{+93~-S)~>=vmZ^2n3|oNx1x94nuP0yxZCTKguymO>y)V< zyb>!wjiHseqOX|YPpTO7f)2+)m9Dm=Qtg&fC&FNPw(bM#fP;36W|eOEd=PXTB6^%A z{;?~3o7xJdDYKnV)L4?@6i4dG_d{T$a(@=mtfe z)Zo9YK-3QBpVnP`?gyr{RMf}$FvBOj5@7@8LnFAw7Bi| z=J^shSw&Iy)JOthn{#+Yq`#FS?c1@Y8uC?p(pSDBPo7p+9W?R9Pb~duThvDC)zb|= z{o;}q3TYxXC&q=JHqRRG$ZZ3H291ORyTmP#vvJE2h-e&F#!h^HB0<6D-Na<0?GFt) zt(m(mUr&0@_Q~3%Y?wb2v<>2#d=k07D}7gGJ1T-Axe!iT`tfyYOI+=qvP9FaV))k> z*?HrvI)DBy@xAQ!@~BchFGqKSF{(;90K>)|>NyHHe7ixBEIet_rTBGcHSQbM#fD~J z)duu2W+fZrdN{uD-i`{qZhBFE*+C=TV=A|wPid%B!W@kf_LKzV33Z*jxif4Mbhl|& zw>;qco?}!ACnhX~V{s)t+&#zWn68cvCY%V4B@E^Xi^raKbcXf>0p-39ezVkP3bsV& zjT3-7JWsFebht22T1S}t(l&^JIECEAMc98H_shUX3Kv~)jjmv;Q$&&EmSO|{MoiSu zD~;~i#vMIWXl{ehHvG{k+hn1EhgKc$+d^U4dm1y(R?M}eb35GCsTt?V!>g!|BHO26 z+v;}WH+~FjAC61zX*>n1eg4H-w?n^An%ga8rVJ>O7q_(cJ9Tl_d$5=8tjLZS_A{N6 z;lwQt^gH}cfIc-*S~j1Ps4e*@LUC5$ux{390w=T?nj3v$Wh42Er`T z58DJjgBwltH>Gc#40rpn6-#Pdd;Bctno%?c*~FM-y^!f|X>v#&@as0l<@ZEuTgZmx z2K=h%FrVZ6RQ5RznnH463f~DHNlKRI_jsKQ-OsPIxnq6}R49AA$Ut|8l~AcE?uD<{JFG=wXd6G?2M)zWqp@^Vw4Z$hstk?YNI9IUEn< zT&x{To|&_km|6K;B>E+ODA7%BTMl|Via4R>`eoBeVOw{CsrMOBHfkZcp}P1@JhdzB z5qFot^b+sI2R@&_jS`=~5KNdY+LOOfCY-zQ2q&4x@2+C_hL+Z^u^pz+e0!qLCBI|b z5Vf!O<)@y&2<_kA9Rc_=MR%OPtb3GB&EeWt_-%Pckn@0*~C<1eHbOkokAMk zg;3Uo?FGyPqIBA-)`|XY#(FiDgybHM!lViH==T`UB-VJs7HZ@l%x&8FUB0GquydS_ z(`0ddeawxVPHfibgo&(%w4e{{pGdI8c1-v3a)GD9E5$aNFU=AcYP%~NX&AW+8PPLa z6G&Zg`l$-uYvY!w<+F)VB^zNB7o>38!evF?M^`G`XL=G3FLYfxDiL{W_Oud~f8a9J zs=trAR3+J3xpT{GGj?byO=R#}T!8qL8*be$?!d4vQcpUEx~22V%CNXIHwCN>CkWf9 zOpo8{)|K>28w6%}z3x0a1oY4N){qN^bOoVPO9qNz(o;=7=y zNPtNgq}S5rkT$F#bh#wsxVt&X%6 z-c{z#(xZ;r4J|_PIyK*W@1dUmoAfaEb$W(qr}ByNu+|0iIGqJ<%9S~^L=9SsJX^tU z6q)AqRwM3uMzYT)B4vNn#jZWxzI|{Ov!@f7s;zSO5V!7R&^mro&&2uYse$5n((4C@ z&hCn1z)`mZJuOhIAcpDD*3p5;-=w;KTl6qPB|%n3Q8$M7Tg0;Z z#!n`wR-wJADh=^2rhQa2rJ;1Z==gUbABtPS9zxY*6``FjG1xGS?D@<0{o;#U!RsCI z(Kj#fy^w9;4Sd2oc5S&W3x_!dd;S|x%-I*$#g{l`KC@z!8XvK#aey7sL#H?c{k5tU zToo>vL89%4MV=5r&8-1W*PSP-Bvb8jTx(oq%M?#kLc@uplK% zh=D!}zwTUanfL3gmYeu~z9nw?X;W-b(oxRghu?r}l?TA%8H%wVqO#;5U6KOSul;hn znjuXMz|7I}U(Bj&QRAo4!Ht7uLB-ZzNc>_nE;kTuqjz6y(Znq`;!9pe)eEVVI>$L- zzWs99@!<819`8YfonM6B@JJ(iXLME|r=yq#RcJ%5v1CKkqdFJ3(Hup3TGc=LxXqNJ zqB2@$*S~Z%>)57TjS9FeMr=m%y;=?c`fUdJw3yq89g^VLEk2Nf%+|0<8eBKpK#x9d z&5m>nHR-dKb+S4n?;6Q4Gxa8=*pavlbaMD30;ClpAkoOtO@VNck>#oQb#m9UMpj^a z%ncR302h2^!_r#d#$@`f^%<;!!sJF^=Q&!AWMZxZaIAaiyY4@<3ii zj75%3Xl~xg4alzmxuD&d+3!I*+jmX44Shp{SanYoROf>FC^?i zM-(!KHJ&A>t$iOzj(QMV#~1FZp(P+K*JZ5DMKzTYN!7?wJ*xjdzq(R;bOt0Xq`Lfsgbi#T69NZf`C9nys+_ zQs(P~!`?;c>a)y$bVm9`Z*%-S8_W+|T%FX+@o(`&=hb`{Ci~807#K|m#z}hzm71rR zx~T+y_7cU|B;6|s=RB#WP(qyk#`14(IhPPTqS+^AJGO~7wkqw@Odb~e4DDd#4Hl3D zMe{`O zY~R&FPtQPfZW&&KTSR3u^=>%7hxZkOt&f9_A#7#d$|W#=cZ)nn&ZR2R&{ACs>SXTBV%Y1_Nzdl8PIL4Hpwh) z7)I6k47#3M&hgM6wCxk3nyHacTQR7B7X80U73i$FX$wdliT}nzBG|~}j z-B$b#GTmLlrP}O`{X*=gUUIZBg?dhq1A3MfXTqOv`=Y3GhSeuA!tBg&7{~9o^15eu z`YNeY#R&F8$lw}4N0Xnkk@95QI*CbDJ?S^q`G$7}9I@1I+S``PE5A0g&I;WjeUc}w zDVgdA{ItIZ%bEe?yck#$G_EW34F7W;M^%O@~Axg2pP|n|^zma=QK(R4aLF_rz)=cy3}97m_UXdc}2}*!b2z zkupli>w-9MKo)W)#YHe`B#oLWNV-GcJ5$R+>)H;d6j#xlYp*|a5&Uz<%pf)mkgTLA zu|y>M89{st_t$Y9Cst9;xzKEpm&WD7(pBc@!|b76Go%|kb&y5qlgHyLC(y)8(Q?N>Lw7JOV6Kxx7d%- zx-opR5~;5e?&ULbUWt9Spxp&c777)+95}Z4r|iytdREC<;i$^I6(U3(DE4`B;S9~MZ%?Tw=`(qU(mOFD%iBvU z5D)cbig!uYcC~%X7QZg2zyXhOM^q>@Tg9*!HiN`%H3LH4Hk#an@Ar=L&vX*1+&}wL z+;gOjss1eE-t*41Ptoz)yWZVK{GxqA`G(;4O5(N!5N-L_aY`I2b8@W^>msWQ#?FB) z)Q~Z8B8?pfm6dG!F2T{-=w|dXWWTN=KB^vcNbNX~Ao&p1o`0dtU7t!{*Hd@Qsx13h zx{viV;fp-ga%aRyar<4&g|`~zsgBgA-W*$sxPPnwcz!+Pbnq_&M|#HniS?PYT@7=Ua`VC zuM3J`aQ654-wczNo5TO zKpM!AKgP#4nh&T4dXz4c7?(ZF?nrj%OiG;p!f$|jBn{HQ*q)ii0hv{m*uJel+toz> zKQQn=WROD)pu|Q4*?ZOU9*fqqvA|iTBGlqPK#&8#1_TMjc9RcQD)W*r?lc@z4web` zDuW5Xf7o`!T}l?K?4PGK7=GaNH6AGUc@83xfgjN1)4Fm#+CY1I+Z0eeybg^^Na5o| z4oW+pzDGbq!TPoONo-01p^`9ji?obCbboF>gw+#G5etDyVB74k%S{F|aEWtsAtBo8 z>gpPd5DNIuMJ?ds;sTI-wo|h4r(wgH zR!JxBdS%y1$iScYJ_iL=>RE$GwX;2@NbPf^sU!U9U)^wfHUUAy`WwfHSOS4?#9b1B zo(9mZcvw_2UnUR;6me1K1tBMee3%7$_HJw@g4EgnQo^RPp61H>QPs#%4Y*0qkp0!{ zt1OMoKlRz$|Bqb^5{V?BXXAmc1qMePi8z&PI4bA3J*Qs*R^qs0habgRMrx~2wnUhu zyj&o}?`5D+d->&kd6>1_ty(XE!Hb||azsLH!EqpeNnr21JiMg%H-M1B#RXV)Q{hT% z2C4G|Gh{}^26=fwY({}4APFumRcZm)zNA{Q* z_`b5h=0p(ee?$S}l7LsG^I(%)wtq*Z1OuY8?(JX<12R1!x*LIEqHKtkP#{zs4{s}gJypY!rg3`vI>k=rO2LNkNq z-dB>1?to$Wq@+K8{zG6~fwshfLGLJj5dC4&d1DhkUm&u0Rqa*ffk%C9Br&4A|FZs| z86#%lvfN@?$!VyJ`sOJlKU}r1S)fT2JS(@{#?jUrNejt#J>o(`p-^`^PL!C$%ZU@M zVDlAUA`d0};qZUNPVi+{!0zn-SS%?iW$R(4yV8#G#rjfNJs_p0u=~g_D}a_;|W^{|Bnv1)a^@keR`+` ze`Mxnk1m<5MbE;(_SSOsq#)HE!Vw?XcR(|d-ds{FOK$CDQH-g`KaHpwpdHu)OXj?@ z;V=rA-TosfN#M`!Y*>1}N?w@>I#}AOEnVE)IX;>o3+2lcaPSQWJ?ir|U2c;Yys9wQ z42Q#!#4b9JkF9JV(5-}o6d*Ty%n(StKwdW43$UeCs_V_}*FOYD&yrFFfVhNiFE4xh zQYdwqBUPtt%TH?bx){{3_U8h6&dbXSr_QVJ!IrI-wYa1|0&Dh&!beuOpP5#rwW|n_ zCIo5foD`n2uZ|psiA5W{w|WrGJP(Nv3Da9aU3s{8G@8T`Ox3Bi(7(@2f@&}n`alSF zze2TnK~T_tB(3X1{f{pnYI=Hsx;pbuKba?b;kZ|MjhpL7br{kICysc#Qb1zeULTO^ zp-V$__6T;O6NvYu>vNdlAy-9-=?7pmiL4+^EYdGC@RnZa=`C5vW>hkUBl#Tf@K7C& z7}<;m?U%N4IyDXEPQiz;vlupj$|fM(u+aXI_k3a4J>*~=jsTJZvKd1Xzo-291pQy; zs``)A|F5F`k%>=pNeL7=2x>dxl@hxTuhEmglf0X?;23f%W&1#%B1#b`;Np{KXSO`o zBi5Xn$%RJ#+_PL2N8O$_u%ZnL@nT$wV z=ws_8(oKqiURu|c01A28TX94Ac;KXAfmJNhJRzRM@jqQFU{&=TX)w*%Vq^C!P9UU* zA^{HRp{D0xaR*QOzbXWv7i(R+=4v1mZn!75dhlu{mq03oZFm}3vM7#nqk%TUtlK4> zNTFs&Jlotrdtt$%WqtaC{4=2OcGJ=(rgIA#jn1^<8BW|vqfh=MHft0dn=gNpSzX&D zZSAZ>uTL`W+b(w@=2F}Q?$5bho`&gqKr=Pib3n--=|TVWE&EyZZ@`v$`hhtq8$O@I zSfH>FZbQETsxLEA?C$qe_0^LSp*=`z%^XS13|I&nUCAIOakn&KM_qY{Y?&Djr@*sK zm6A8Tt}4C3{|zS&3Z&;S;BY;eDB6JW)fuM^@-C}db-f3R-0r#KXvbV1yRVy%X4~_&d{?2@W z5`l{PIf|45{VjYH?pz;&S)>(fU+2?ys|91%;j=l|R7Y9!_OluwXPxbg2 z<2<`GRxAO8h3JZ+A0ZHQHRi;w(=@i|eB;QMPG?*+^$V`+gk%;^U zE~N_>FGukl@iicXud^@4M@akrl;pHTVx1x$RofU zlJwAPk;FSmPY2&C@lH};xOvXvk}1MqILpf(7vDRr!H{;;xTnu|X~0WQ>|wVtiQl1w!h5;llp}*5+w%rO^rX+D8$<#O z2k-++G!W-(rGjS)ykU3^E?k)H-r*N5G8Y<2x(Crb8z1H^IMc=H~# zeOR%6H|A-JYsOwVE-4cP|cH?ptJ{JW=z0=6qS zcEmmErXLUQeFj=m-6!n#7%hupNhwIn4L}(Im|~T?lvSchKH1u9+*3u29F!&l(S%7b z7G8&oKjKKB73(mex-v$Dx8*?K{{~~$0<%D4%RE(`_m7eK$`HvkUs7ad&5OdRekBdk z6k*K_vbH$^iNunGrSz^dnx%F7ots-QizIB{hhwoQ-i&Sbf*}BS!x1W4vpJ9g8HZ(~ zv&!c$=o9ZV?-Q{^R&PyRVNEBL@csVTJBs{q^^(S(su)!4 z12X{w5+F;?W+QyEtpvHTgl3Ieu~9)Tsd6wF?984l;7BBn$Fhlh;@cMzmk9kgV4H19 zByN28V~?w7>^FNjR2|%q&P{_NWv|Rw{V=Oo}i11^TJ7HQU|tM5^h+arJo=(&6VE%=9+|3Tr)+@dX#hA41d)b zpw4grBaKRccL~j@rO9DwB~qCo1wfl=)$#kP7-?4B1dK29TnI4cnj5DShcPjV>?V~d z9bW>Hn@uX_1A?1hutgHyb#LAkJ?j>HAYf&S%vmszU zFz#RyhB(;+R=mSHX#`~3Uv~Rn)QDJgES{{2hvFG4f^IX0P(u0;Le{L2$t;$1;aeFIZ+3?v2Lw23^OFUJsR9 zQ-Bs7>IAw=ng0Wf_UUEx>SR$d-L*^`?GqmsRb zsrLiIw$kdtw(il(uLIK);b=dRAx01kfEusYy_79D8ZH9512sV#qZP_IV8XZ3E26vE z%LjC-BV=kQoKHhIB3_-Be@=vHNO)S3n#6@A-Gj=3xHR}NJ`@h~G#{ESeG)o&-f;#F zCy;>VY-|37@*6Q2 z5_{5Mv8P7QSu{1LQV=La=s;lw?%U-$&>NI*QKCnIk==%9YCGswF9hw#h?$fSeOV-Z zU%ReacZ2UZ0YR@}7@4>82CEU!T~ysJLO;|{9lp>r2-OB&veBy`{7bAghU%wCvxnmz z+Y%=qfkQ&h1w~taj_O-FFD5f`Sn}FcD=n;=SrFjZV%C+&o);v9?ERFD1k0-id8Wn6 zBgt(az;lM!|2z`GjEn;i$N&ZJkuU}A6Gf)y5Pq(;RWWL=0z{>e+t z4eerW*%9}5Gopzem|N}f*^#iG(98z0QO#+Yd4m#(%xZCUCBt$<4ue^^V?C{q2rW5B z6qcUQ&~7L%k>G+v-_Cw}5Vw`8k%X1`Bk*Fd`H|p3^Wq0&(;(+b>&vA1dF~UO)%Au} z4MF4cGE+5B;**aGJs;2-;8qN#_>DPWl0~dzEL%o=OCkKA@LN)W<1TrMy3)B>u zZe6TEdN)jVfNlNFOA`wA^eJ0{T>5?eTnM@@-7vmz)GSp@O03=e`v)lYG+FT8hu&c9 z=VH)qf)T7)+p^owD&;xja!+S{HWP)eZIYUXKd#O&LU(#D2+?b}3QNrNreDarJqIAT zfu#vTNPF|7-+=G9jNgD+TDk#J_U;^KAI9A%Jnin+zTbe_DU_Yo zop~ONTSchb&$B)B-vA0*t?nF{P@J150N}J*|*r*P-j$V5E>=W4tKlU@(>0z=|*;{yYhvD zBv5%LrHe=wHU5Ov#Rwuk&ec zt1%A~kT^IAndtlM7_xsbLj(Zg>r>YZy(vR5wv&HkKH1$6hXe+98JSgwH6^S>g^`+~ zFSXX#9GHrVB5LpFP=b?1H)p0{q92_Pcpn<>8MNSd!E3|7}S+KRpirQNW@Q_E^n}p9K&LWvvX~K6#9)7&qGPe_8^6TqHA9A zJ3>~eOMGV;U^*eA6f&QqbiGOazOcx9s_7g_s5&(8?*0% zr8o7WVYoIr{B0Hz3uHvJ^0qmDcY#W$4$23FYD5W66vjLx9_49eY87u)HoLu}+2|ca zDcuAXgaJ)6-o|HMj}%J6Rn9gtPl((NLsW&P!!cO)J%zz6P3*MFE%U=Xuzx|MKmIaJ z)PpGYkNg5imUINA_{J7n<#XA+zd1Z{MaMlQxj1#W08$>YRY|)5o|paD)G;~&ZUb9_ zq=8dCUjAX?ZW6(^yv~b?l*g>B_gfVNXW&w3FT(5aAdq`d@cJ3K=@)0TFUfR zLQJAnCr3M$6LE6zVtV*0o$HJqY?3C1k4BX>&G4r;#1TcyIj4Kt+QPEIv~!H4K9QZ}Ax{U8bG zCNR}WE<6M{i&C|N@i?-qF-8qCWcwn^gQ`Rd6fTUB#HHy-01~Ih9lr#u=Pj)fm|O2a zf@%V9Z;CsjIRUf227GO+8<)=VaIQew0yA;yS7df=VOx2Gd6Z^=JxZV}UVs6Rz21&; zd(Zw_U2>Nw!%lrPj2+~QXD0B!6fM3qa-`nk0bqO-m9a^k#e#8IW0i1$dI=HimV5Cu z&i|xRU|MWv+zPMCG%(V8ED` z4MpBjz0q50G|kjvzZK^(dRX8bHM(-jkzfG04+Fq-I8Unt(<~a__A!%Ov@>K0e z?AJ=yuV4Qr(;2PCHb<(A|>gb4F@-d^dGN7Y7sfp@d9ZYoKo@)VVGFx z@woKL8mBN|P%8(NCYq*09hN!uAK=%8++G(l9xKaUScW!IFJsvIQK@F`#yM}uH$0hk zhg;}|3_IE$7Wqg;xTCmlg=jDAgwPb@aqBF1;?nQ+yY!;|*tn}59aCtG2m=Ld_Sn-A zXz^ww9;_TAsDhu`e|}#Nn}C;?(6~l7#2ym?*6h8~3!0Fp{eajvZanupXD(jtkwpQo8-=P2MxqA(|6&WvN$|9IB9X65@Kq!Zb>6(f>XV>IWhR* zm6|}XA;>sA4UTzk#nDuQ>yLS^VcJS5(FwXu1d;|+7*GUF&1ai>WAf!8bqVcVrgaam zpQW{fP#&t}Eg{K%oL-xQt1^e#!=PZ*{I_`?l$iiY}U-9`@gh zi;2f4gKz7bj!zTMH*Y7zRKge~w&EUC=u}rS8ndpmq|?sMtv=yl zh}Hc#D=3zqnSgmDz$0Ntc*9X$Tu++7N3$8FWmr$Jl>{rEi6y}|FnK$C6LN};kF9~$Jhh~h3O|*0{ zoelcsx9XUl>+bE-FJ*A34#)u&+|KfoQ>JqLDY^rt9k5tMNR0Ln7 zv-zn)u2jvuk(mhXwIYu&1LVpx{BZ+dM0D3Uhj%bd2U##2GVk3N4c2HOo837}uy4I(2dXu9Orh))+2~FW6g?9Iu1; zCjKyo?MJ7+e&7j0)G0Y&o%TYG4aCW`iJ^~kLAi493^RXkog81N>?KN#n&jCNYRx%u z5qv=1QDcZAwtRyq-KDM8E7huUM~}Yv(^aZ`^o`exBeMA|XYM z!C zoN*cK6`AtHx33+&E^0_zqdd4LUTE{lY;H%-n83B8swWwm;A7bUNwSXJr@El&vzj4Ppz1D7x5MqF zQwKpT`|GV7Alewhf=UDj(5D2N#81HSxjP!!f|@5{<`kp-IdvwdAN&^|rWI*8i>Ys= z!RhqZ&)%K~w4<=i;;4UvH#!G9LVUM$*cdvN+czW*(t9AoFD|e6QmBrJpozPsyBu{d zyojJ4LSgsS*EdHi?`Nh!7Z564yQA(iIHor}Z44cP(YYkffN`%#LpUS5DhwEj;Rn7(daUE)VCdH`9*zV2ZT!Moe=UR8c)SBgg0NC~&XgA7NA!9%2 zwJ?dRs+M&r?6Mq5s|shJtB0%SWZFR^jX1AHd#VJwKeWn;dPf zRfcY=nwZ5XTcs3=45xzg(Ep zsfnZVGe7l9c{SS!y-s8z-EIUpq?*9Vbm=e?h+zTlZTQ{Fb9z^Y6iYU>5Qht(#Niec z#83a%6)a_uSVbYNd{$!ewm0iH1F`)BH?up*(D@ChEZv2?CF2N}_yn@7P>D6<)1u0r zw-#Vhad|(kEe0tm!JPb#1`qbtq+~^wZIn>9g%Y2RuMeBYgai>J`k|o=QiV`&5vF&j^I4 zwJiJ60WQfeKNGC+X&BEphnU%Q}U^V+7m{2yI}JTf+raW_dzO?nZrk6Z?JDc zQa_<(N%-hxwWkXgud#ril@Z57*SBSElLW7-2s2d=-X9#ra`0Qdj@#_8D`6JgT%1QxwBI!1dOMYSJ!+%|MSa6Sh+PFJ9-*^Sg_Ja>D3#i_4~ zyPGkBvKQA>kx{G5u|!o8?~(oGgEwY}8rY>E&19wkqh@Btc?|^_3IH zkX}pvGuYztk|=pn{-p#3Z6>jgZ9#D!FK-6!6Xs%I48>ZHdUWNVQtl_#Y+N%qfw6BJ zg7|Xi=v4Ty9Wg|SzdKbd`LEJ=&j?fQu7$_J>|o#~7Cx9%=`@^k7I@j%jqyc0h?@~2 zP-_IfRjsjz)H?SoOMDV+bD;Kl0PQjt{n!u3feVMPiHxsm9wiLgC9fX(KsW|;xM$D& zW_fVROAdty$*6KDNZx7st)P25KCvB7cMlqY-n1JscUOVUWxhJd-Sa=FU=F2ELzQlCP3|r;LxZLfLByiO5?&KjOm@F43zo zHM!+9q>hQB~AIkU&I@Cu#PkvCv)5+;!;5x+!t%7R7 zy!RS6&9mdvK(xilggcasx2YWh7E=t3YJYHI9ecyaMZTF-O+I$(AoG=P7qU0){+Z{% zkSKC%XH-k?@tM)UFp-8Kuf5uH7*5~veO#)nwf;`e!ZROQCYs|#hL9Y^&NOlF??K&)7#l8)raaf~2egeUL^v`4r5-LM- zz>O2GOMK)+uX-=QJ)6!6a^k3f&}kabB+0E6D@64_G}Cb%IzA|&VKW}aF6t1FH3=cX z@>qY_+}f^CV;-8QNYtM2LCz{8^~abViz?W)smx%q^b75fWatP}p~W(Z>!lBpp`)4u zACXKD;kjZBm2B8nYwKVw?yxKb%i5|L>`Y}==ni8UUJjoQUFCyJ_|EWLs2Q0!vz1i$ssCW(xqwUD>9IdDkM)cJwviPxo!^J*brQPI*b3gPh_SZ#+ zbn8KJ{{Y&WM9I+P96{>fqA~FVO(Ay#GN$T}Eu={dy`^!}tuGVRT&*!z{s2DWE{DqS zEMt|2$`$?@)Sd7I%tPfsS(ZRbp~%z0YUZdahqaDN##kVUQhcf+a7_!<(oEn>ZIT?{ z0kSe%dn1I0 zX?nWxxQ<~JpUp9m9<-n0il-aUG!d`Z{XJ_5a9slb^>^Szk;L3!Um85`Hy6IHot>6x zi1Wt}-Ba*gj@w!4+aH=6#J@l{`_+OWkTe16G7d&SYGF3)#~9v(-HFRzsto+kuAqa< zM|wBL=;mz~8x`>Sz3N|cSY_6jr7W3(<&b4h>(=O&VG8VEWiwqse=H%V7c7=^h-Fbg zPzO-^-#!nnPI&zv0B(xC8Vijo!&+61;T40LD70WPHMYwq(_SBeSTT6MxzLD^10J5{ z{`RAmAU4FR#P`1KjXSoYwbXK-dQlC-je4ZNKat+=ZFf7T;J4Pm3kg$&>%QuCkf%+T zW;~mX)U<=J^3ItSKQL%EfAiLJ{~NZS2waH#i9%o5fkHJ|_RkQ*Z)+atb~s2B^a9<`ej=*H^5GiXcXnZbCeTsmJO6X<@+C4&buZQoAqkRno!S?f7MOL;RBeiG9dNc7K z>B>|(kx!b>PCgZYiDvqsW&u^DIRo9a?;!9~sO8ZVU5BHS<;P1Cr4q3^dG3M_y0GrB7T+*8Is#rk1;k)` z0#dGYYy#+E!caU!HbR?mMo()dQQYl5mX|2qTmAdBc*3O|poXnEUKRes79e!NUOhx8 zx=<6^=UD`txk`R@m=z~$ZGDUs=Seui%%Kot2iRgYf~^FecXupR4V?l@_@e0h<>y?g z|AnoL!e6P>gh@3AnsZ-~GFAp{jwhniD=bihRB_IfTlw(;5a zFck#5UqLm_ded?lq~THdu3utk9Hx#6xOkH*mpl6He7)t?Xl!6{!mjn}B;=Cdujm!m z?l#uJUN?!_ppS6c&WSZo5uMhP!2z#T->3cF17`}4JZ6*Z*O66+Wp0&c0TU=&TYCyv z4k!G8#Mb!=x>~{@6=)QdS?F|F9hV!0wzJnTx!^)q(mqz!s;G?2SF1p-6jqjjs>kd* zR3Ri~A|HX32QSc3DF&lVpsdhJ$F&S#yh7Ymp^t(2Wo2*s3J0<%U3Y78+7Z_shH#9m z1EgwVY>YJ;uZvcLe_Jw;WBupHec1pCliL#n^1s2}X=9+rVCqIj_*{5UjF$9T)aYt&^sKO$-6-9PuMLeW~{ zO?HVeA>)x@vAPRz6G%M(Xt6g7GY#Eh zTf)$PfVoRIwD)d!sG$ zYu2*P^!PzhTPDv|30@e}lcv1FGWm?k%+x=Jmp0A^lKtVm^%KCI51uj` zLjM7Lh7|PFLSVz(@=CMud5V3!X(*8oY#S}h*iKVkxDPd0W8hB&n6sb}=6y-$6>_u7 zbeTIbZFLm3c0Vsn;yo3baDPn1Lcq-s{i$Tpl-Bx#JR@mj62urTm8irqh@cBl6sTS( z=)UarnPR%03wmW2FM1rsv@N~3R3U!dgl*+*$i*2{N^>b0?JeHSHRM_c`AlG9GhL(U zvImxr1DwWCDlf}Vz*N{Mn$Vn*O(_UxVfGYd%gLrJl9-g<-EYlf4%HUfx+cR87bUV6YEy`r>9I1LPBKUK$z2I<9aFOjx*ZGd*iv`nT6qL=4^*O@$owyMA)!{;Z zzWS?5EF5|j4~#DcP!v)f<1YF(ex(ap%%!II-$2T|*fT!D8Y#?&r&3bE<;?%aOr9sI zrD}N0s993dQOdVISTA!V`vv@ubOBY4Q8_uPQ{+Hpswx?oHRe2C&GjFqvJxk5KD;zH zX0YwYWWunhNkRXr*lR~_JNtIpX#FsTdYiNScQT5v3&wA*rXIs!rn1-A65Kywy;PQVX_*6RF6E1Kgjv=SJxP%UEhNCDWqXq zk&{q8LI|-gO&0^g(%iQ%!DPPza2f9ID(G7Pun75zQHF}zfxq6*%(aWpP96s`p z$}NC?mSizr$J5p17^~1o$2q+l!+V)XdTb%bw?mH~uSUm@xumtj#~eV+%-)PQ8Gjji znsjh|;og3-@5Anc^*p{13dOq=gEo0X7Rw$w!g~Jc4TVYC{jf&%1RzCUgd|prwt%z10ERUIsJ8vICBi20ceWrT6BzJ`|?WizJ zXne!CTviXTW_sy!Phh>{wYE&c7=d5)m3gzC*)x5POpvXTmvHmT-}Fv?)vv=%w#IWG zHd`+rlWvj-F+ zyDgK$?MYSr$)8};4FZV#tR{J@HS5||(PZLeT;A?y%;j0#N!F_hX{jBex`4Ktl6Ir$ zGIJ1ZGo3N$WtJZ8O=cHiXS)=?FK^6DC--+BMk??0pY23=D&`2pVsI?KT8puB)h0f_ zhWhIH)`6$SpkiIvu`7MhSEF|Y;16^CYW<>rzwOoU=na{Ke+VCNhbS^+zLURx2?f+>%j|(kCQ>uqzfbJ_QxY{&!i7OwXbn*+*H;1>(29rjeq|C8Qqdx4&yawq_O#3_T zNR`a9t(UzvPIhwk?$1om!)}AP~)VtuxRjOcE)CyTQ;Vqbc#^vXT}8M0%0?Yo(wv6)~iBy zdxZKQ;8@^G2bu=EcJK-MUN(7-?UcC89xj1c_N3W63D_pz*^`p}FwP&tCv}rEo5MU* zg)p7UE00MuE1ZApZg%3yMt%=lKzZQHvdSQ(d0JGQ1=C_?5${JlwAuX++JyXVYUq_I6oAO2N=HKzr5 z^&tNp1RB+Y_@v7G{rx)puKFi_xv6~8*_MP(e^eYehBIj+m4ZVFvAvBM<0uTzxv9OI zM>(j9L|;j@x`@)ME4{fekrFV$j@LBY;2ZoEHF>@>+a2ewd`_lh!J@xro;~44TqZ|; zAJO`3`O$B={ud_9u#l3X3k|V6$p4c#!umeiemYX{I_PWDW#O*V5#{-d(*FPuaHV7_ zzaDZ^G+io{`t@CP*_-RlH8U|xoMw5ejy3NSvF?&O6s(xn+r`wG!~1VI34;xyux;0} zVFL39T$IbQ010QE1C9PAUXudRbDh^4@~Fy^)XF(r)4Z8mcD7|#sUsDGVi0x~Q>{a$ zl^~foYF`I@AI**RqA}58Kb`YEX`_B~Fwm?f zA41G^fIsU^bkd zfCg7I6$k%+Dw|5xzeKYUu{T5PBnBzJW@T)eh5tb^OYtumb#7ySJ^O0#UbG9XT*S{X z)(^8$yVWoz>mEV$3>Kl|H0t<+6M1a@KiC9wi~k!cXOW;uR9P1*j2|%EFwPnAGnB?h zTWQxDO&kB+yC)`EB3<0_OXJS)Cn8}@Mxxt*e}F_5@Jjxb1WwOxnmb?N2w~oNC&Vd2omH{BPp)tw8G}({nJ^xec+u!9I59 z@3xlS8zZYmJT5;YOS(CBtiS+iSc$mbrTXbX>l>XhL+F*Po>0@p#{7i>6QC>XGqq(C za=l6Id(wXZQ2iaJXRp@B>ck80FD7ZT&I>j_WqEV+#Uk94_C4%d7W?0P`kxy6$KwN1 z<8+UOSxD|Oyh^TbXWU-GOX{ihm|Yvak&-wXc+!^4xkx%Ah|!{V)^r`dZS+nx5Cqyn zK(F+l{nZgBze-8_rioTK*Y$^AJ=;m-ZXD9NI3CwlkBv<3-P|&?E?E z3^~^Q53ouLc=_ohtsPxdoq%bolP;5G>u}uj*KWkz9X(|gqe#^XW*8~>zc%QIBb`w1 zZGOIuvqG<@W56cncUm%dGs-&YEB(wbn`N4r*DSv6$aK0jFA$GJgh2E|^WggL1CjItn!@8$72Ts%Ld6r0G?>F@a5FSyF{ z+G$z`$0<1r*Gs+O12@oP<*bb|gr;u_8Hotbo(QF(4K=wX=IoX#NdEwh_78dhZ z3{BE5eNFy=_6<76#oXKLJdV47eeZ3b8?4&veRevcqh+*BJRRfeD5H)#7*M`C@({7# zYb5Ruw(=^_<=|xBmuRMA^sX2zfoJ?xwX{LolbMXAXWTH%UR{MX8#(_0+)Ut=V>HL5 zKepJr9`G5BeLN);Rsq7vkx19~#gH{y3-rGOWRC2V=`N`)D$x9zQ)kzcVqFYp5?N5- z4z*cms^Zwqi{+@K?{a3fvgBNz1&xdaQHDy-Aue^EOmNDMozv>jNl2h+ZGLV5(nnDi zF6OkCF}E02v!{{uPqXWJvQ?+DGPLIVA`DN)1iU}e_KNU?tz=AB8NGcQE--d7Wy4f1 z^`%|)^IZ}zH;oKS)6n;YZM|_N1iPhQgrrZ@pwGSQA@4P#3Vbb`3d$E}C~8+3Sh_fM zZs$})RAefCzH<=$iPp_5_nXh~3M+NfyWl2Ry3mZlX@H##Cy4Y;-y28(fazw7z^LO9 zp^bV$R_J_NDcY5&o-Na3YQ@j1t+Zfo;dXt?ywIQnW6R2FoMG#1;PM39_PM#}%!LXG zMqG_Hs9L1Dusbu4Az~CjvEE2YmHKXvsUIffSan}`^!h!_SX_4kZ25bW!dGn?y`9rR zpE19@W4-?Xzaz{5zdy9J*Y;2{iGgX9>Z{ifE9tvyca4_bvYGC!(cqJ&8os;f8`pP&rNu+g>IUOf`$1OhkAJe>+Z(v7 z3ySiim|%IsOvMM1t!tOVlK4=)=>UhKHOIVYqUzz&Sljn{5#?%aFD(X>&(`@X)V z7FzjfG-OsCKwF`pY z&8oyVuZ&m*yAnjMzhry5CXrry1!Dn8g}q0y!x%0fq&-2n5BbVB&gyX-)mpICMqLD> z0wX?5JKbBeL~;Fo^_?~J4Wsh(AVQdml7WDHj-S^1qBLsWUJu(%b%4yyK8Sv{GS$yE zwB;|wm{~0vhC^EICH}6d$(=Fymj30uMXYQD>8Wb=JXY_9>3&MmWBOUu@GGA-dgw z*)aPr%2rqX>v$-?0 zDMv(mrc+q(*80TO9X| zh-kVaUx2dB@_rIVlPQNY>Gz+bboi8xDpn=^Ot*VpxaTtLp01Isx+S;dZ7dL0Vt0ri zI7KGxSnI}-IT>|q2m-v%9SnW7?PK#j2yAmop=NS52L(pgdUO!1ds-uE;fGbGNjK_H z)h4w!`&>_>$JT@*GubYuFNuAFY^8(p(ERbF_KHu6v$Z#DsQ#52qFny@t-D8BRT*~= z7B9dxfECz*cinto%h8%pTr&-l5y#}KmEn-#Yk)YDb{iX^KKaXs z-AhN6w(vzTI?5;}Y2P>~8R^hRRN+u%oa`Yo@NVfvt$~qnAcbR1(Pd|@oc_l^F1)=! z6kou0pgsXVZV8Aky;H~-zV+N#@cNu+yYa|GLFue# zdm5vPh@kSX1C0{NGK6_k<# z*q%#K8bie;e355tq$E_(A)Go?6VFyTp9+TyXPnhkLpfR911NQm$(DYEc~pTTVnA5^84C7Y!e7;c8VlzGvKy6tPBpis8=h zM;3nH4%wC{pVtuu$17im*qQ9J`2(*ZzDHh{|0(uuKYx}kYsX`n&relXs7k7Kdw&UB z@=kENGDJ~AG`&jH7A|L@y`v;_c{AMBwe-a`WWd3h*g!SWP&-+V{PbrUdTt`UV*0&e zCv74IH6oJ7%{<0|TT_(B@M+mVbVu z_HpT6*?jVfz+mvGe;EG4pr;<@&Ka9tjIrYx&i!`uNKCebqYb^~X<+ez;4nOv487~i zR(kUu@UOwymZGqCZr}1z#_UqY%!jN8?6>r(S_CrSvW!k%uVeLXY?z5_vaWlD-E#C$ z%N3tO@t6F;^V%2P={z&RZd#kpDl6@ZCs9O^j9Rvymrhv`;zVRc0nnaVi86M3)b=v9 z=;c;7d{=cZLEiv;`As;)P_E=cf8=Z1-^vfF+Y`eX*ZuQ%zM`%l%b>RB(gksB!F>@kWdmbrA6f?2bs2(G6 z3OY6pNus3vgNXKmn$CtPk%rm3{FDnn&Ime#lkit$L64wiTpxT+(BzjWo06x;WGme=Fw;8`GMG4YF0jv5K2Oe14fjf zdG?3B!m|G?QUP6K%?iD)Mc;M_!Joz1qEzZyEb$l=;A+qy3XY5Z)>!?Fk4D(;T5ORR zkL%-J(mxFXy_t0ariw+y^JO&5ksOkPd~b)QE>I3Xaqs8u`kse$uJJ1P&?^o!DESs&0L1<#=;-;bi0q-#BuW zAr?VXYDp&1H);VV!%E=yu3x?7PD8!qUGL@q3ec`mKE@8^PqXS;htR&JT!;X=W6tKvHQIVokk~6d%2pUS}8?6hOYEGscJ7D3w;vku5$w|KxG#a0vbAmB^=yt1wFp@ zAAr0jkjY%AdUyAU>5{lZ*+TBPeK!1xX2P0xQ9=|O4}trA+$QmgCC~WB`|^r4VSMo! zjK8Pha>O-$fcWKS$uT%-Hz8ohC_Zarfg3lrK%$IQdAK)gFS1G;9SSs@QQDUheE# z7|eIvx?D28$>siLkb+|3ruk`qtlMgDxU)}V+w|x7CnaA*o(l|6DB9c))Bdzl@BYb4 z&SwTPPNP)+DjVqJn_3g}SenIAazQs))*(`WZL6&)7BVUL5~f=lT)bZfybM&GF^nWa z0yX*i{UYy$o=gDkYb=Gisa*&9*u<9t=^Q#F9lj9qRAZtMQjUIV{QD>Qh_=zi!X!oR zs978((j)^U&~V zlL-CLRY&iy-v?7Ap3&9k(c0=JEkpxCXhh;N9n}Vz{|b)n|H=$$=vgd$Hox>N>{i8_ zH)@z$F9P3L*6iwyI=~qOKXXz=oYQoN{S8ja4T(PYgDk8Z-SjSpXh0@hyGSiV@lRFu2 z+w%s>a~Acm!tJlnKy|s<@0jo7OPa1edSkHpXO0wyHVVfCZJtj%8Cbuxb1M0l1bTb- za#F$);_a9*4Ey&B{ z4bSQ)6UPEi7fvgk$8VeNbDSL(gj-9Q)X2F%wVh{fKZ~s^*l;m;8Vb)aiaF z!)P{kMX)!sNmPy9GV#P$g!_A^-JhzG(^2*E>k?Jp4$cWG{8E=!EicrP%;w_tZKRKK zOK)kW{i_A1^XjXrVJ-bs&(eOInZa$u6KwPXe1tZBxt5E-BtV!JlcK9)YqA}pKaZ^6 zu?cCt+1)ZR)q2`sG-m)FemjI=OVl|_fTgK zm(ur?a*?U)a^>N~wgHgJ@HK{=Q_6ZFzNJ=r3=7Frfc~HuxYAyqX1Ejw5vYn96O13e zZl4}<*|jc=I1%1`+wkRp7Um$|adM16gJ2&SeRB;Fxsx=J$<3GMO)X6myjL5TrQ}M9 zJhRT>u_n7z0}SqHN_>|PNFV2%IU@*q!1r|ase?6yiOETbYDZ*+EJ5CU)V{p#ceiYW ztm$|Qu&wz0k63C(DVLoQL#s_3qlf1z4Re@RDJM^U(?OKQD6s#Egk?lgM8c?B;U)dF zNF;S`sTypD%&|oP9NZGNN1{{r)7kFGRg3*2B*lG|uM|d#Zub2E6@Rl&z-*hWN8r?Q zzNpQY8z^zeGQE3)VMNSo_QNLP|9nYc-ysYa#TH%<;fi$qr)J7yXDW-1CJzM!J!4ZM zE+QB0=ENMt?h_Vm|QDVINPp8QN>|v{@m@0z07FKP%?F+NluEh z)4%ANdfYl`ub#tw{dP?a4%RZeyR(!+We>4xiHPt$abvue*%hY-RT8>}`+nT=B9W8J zPgc46dU1DeA90eV=ac|Mwb{wzSRtQ;o9pDi{|D&Kh*q^{R6G_J;ETw=?v3pmXop99 z4gnDD(0dn@tN?d7Ur9~E5hbJEn>O?F$pEtOSxDX9)Pvqvs68F&k4%2AK8wl6;Cb$L zzmA_+Oimbm(5QLL_qdyI=AS$GK%a1~9Q&qD9LhQa9U=1aP<$P$p82z2n= zzZoB4G$Kp)>0rk>ewH0pXqEYpMb%CmbkOi3uP{q`l$EE|PIUUtf@dH;ftp8s__#=o zP2k~t-QTW|H~Wio?Tfv(G&j?&dEe+g8oOplTeCNxDZPKZMk%MIz3Nhy_dXf&Da&>6 zKR|n7iiF~a^>?jp$=@0?e(kzw9Zx{ zBMETivBDZTn48gaTJ$IP?_Ce+Slnb!HX_88e(@3d*NeH+OiMU zT#^>^k#v=tntU&4Zx4>s+7sYf7tvp>gocTQ5~AVs3k0&8i(j;UbLlR|SBkT7Ut#eG zksoU7$iQ^W+FTBwvy*CcL&YZ=Tu%tTi zsL+I`v^hiiZy242+pQt~ok*_>@Cas`Ii+pyQI>lw9v*$M4q*r|<@M0O9+3p@eJYrA zdd7FZWd?FtZH^AHA9|&0pz!W%JCIZ}D32V>WD;u1FU({EiB)qA2Wv}6PY!3QR|I?mYzQC6t2(!kH*4fC zLc#%){8@-T0ts$$Z4?INym>#o^6-OeTC-#Q? zmUza_dk5z^VX-uj1vxNCzG=e#!Y4s<_NUf=qYy*-mqmK8H#Zbra;2hS1j>9aa=73ES&r+d>sHh6(WFt6PyX@Cj+) zYlEEX!_vMCQ%N%ViogU=X3p3cDSsbvM{zM^#Vb;~Es2CvqkKPRWd{Uz5jvf(LtZNS7<05!nGdKTdCQKb>8`C& zBLzu0&&{x(GT(|SibZL%M&&Hur+Mu@BRSzgL*~ zH7fm)_g2{7)k-_3(R+JoV?+Rlzti?t5?LAdha>1DEkRI6QC1(=$uAl4TISLX*IRr- zOQg$=udgon_#I5@x^s2jy6=Y+;O}1Hl&DLP`J^NtPBm*;Ctq<%leoA36g<<`Tp!R4 z$dzJ#NycFXe*ibIrOCFi8<)E0;}0r&Bv)?}&U{gbwM@gfnzWAaJ7YU%Te#LN!6hU) z`r-1!SjT8oBlgD$agC-GaQ_@~jAiKY-vGKgwYV(lc z1lqX0e&pSqo|RzTs{!rw0l0%dY5GZl?g;ya>Zp6<02Js<6}Z1jIu4E_Bx>(!-ea9pb2zBm^Nr?1~ORxO>S%IQoYq<|* zyg#(8MVvKR2PuU4>U;kNGb7)}(xWh7J| znw&9#PGrK)nPb0e5mJI*C`i4FRR)r9%u`kL$J*i5_UH=BtJQSvSU^0FirT1Bg~K!% z#eVH8U8!hGw3nceFLyX_Wq<03AJ7Tgl=LE40t4MPGCA7UY$U^OmbIKrE_MC9lJ+}@ z%WuAcO-oEnTx83Q2lWtE!t&;~y-4)tQ5Hk-?>>@Su30`@c=dpwd@8iZkyhxnltC#N zAaQ5AXzOVI5n{8T-D@PU(b-Q5kx9yz80{(b0XLri*;{||;M#$WU6G*>v2>W|#;bKn zl;+2KBJVf)?Mz?5qS_wO?QAn0t@HO|7j^b0X>4jy)>NJ^S7~NVo<3`K{SoC5nYHjn zOr@EDleWlxO(`xn6Acg*#^f9GCLK%=8AZE17}%JA)unQA7TWeX*$?``+sK0;!WV16~G-2 zflO{JDUBqMaNE-yrRTw3lE)qApdLlv{)`U?#RQu+iqa#PKdOH13658WWFL}?HV1v; zdtmb`5aImQgpmpbBoh?Y^!tp|O7|6B)xx5gm^rtry$`WU?xy}MBq69r+^e&o?s|Jt z!-a^xHOk;-U0dgOkC5*!M-h9SVID?gjggZ{zAn@0y?-3YP5h)@62P^>GJ=h4aI4)T z=I=?*pPX_o5qjO6h<#z+=_T*|gumYt^3OO(A zYPpU_+J>!y8_94lLx=eci8=gvkd0SJ=fl~EPPdBC65b6ZSj5oPt^!m_moyFFll}eHu3kp=A1}K2VB-hse!s zdC~ppLj;;H`8&GOWxFa{ph#%Q!uC@*8vizC<<&;@`=@vHmi*XW7!V{h>A57@6M5sB zMF?z9z@6HAw4cY+5ANp(~P7+!rqpAOVrW{5P`rb1``JFuQ>bS5^x*|Rx-K|~xrVa~y4zMj zt+F(Q;ch>DvGLx!8HV(bTvPjc7rK(l&5+ zUb<`2-N|Q|=3tY9b_L14x_@9`NWHcF{jP{5@N5 zyMUcRq`dYtqSQYO9w;ZK-uKbOg^}&oNYx2e=qm5*LK^DL=iD;uvOkccYMLtVqu;C1 z9u17hnzynh{|C%KGrxd_i;b~4s!Bp@5nECTuQ>`qpts5=%-^gTK9wsV1afuP;)2ga|hOt5560s*ufpi90? zS+GW=5%@k$YX$nAf%*CKn|Mlz`Zx8KwYHfX;{!!PqpH_uZT|qw(Hww*u(}KcYAWzh za037Y08JgZh^vqb5UtLL?r|?%TJ;PV7|qFM=s-(&=D_3huyffvrwrByV_`W^O67+MH=sCTiezaF}?2Gl%-J{ zSKi1wM1i7&J3J?N1d=GhrBHrYn|qUi z44f$J9vu@;Ib>HN7ElO5s>NhnB{qPN+Y|tP9}Y+g4Mh$?9vu{%QemVGz|sdj^HXci z@TI~Qoz)y8BiRQKR9Yon0CXtEqkvivf%+iuBNo0g2E0R`jhzDbCk7gt0jFgtPlqUE zNyDsE(M~7#hAE^9?I0e9_3tPVifCSA<44no$OKg{?-Pe$Xn=C`;QZj|OM&_TeUlqt zmYLy_F{4=U4*SPXDJLE^I{CvyWz;2yz6te%9SG!Mdcohk4NM8t&*%5Y?=7SNoyWs> zJdwQMjtLIImtfWb5xQ;%ri_Y3`EifE763I{TbKfnfB@$3Xvj$VC}Ap!YKS4+>pOB_ z97C4<;**kjRgxb#w?JsBJNS6N9c6V0gy(M0-|qohkaZ4xex@Gbr-P@15}l_9KQHsv z0@w%xm4>h7hD;GDpp6S=jS~$hDcu6V4)5;YizFzo0NPsW7Hf=> zWChfp4?CT6lr5-Y3Gl!5&D`DBO^EvzP4$5k?>Vj%8yCl(@FZCqw?Hn}!feL%Jw*cn zyMX?(X)sq{088JI{JBX0TMf>v=hhoh83f!0{6E>pO0`zW`QpD7p-&oJmTsegNIx5z2OMD zi_$ZAM5RX|&)y@GZ-)^C+2au;)8hnbE9(Pcv9~NoXAd2jur7cC`HRm~G59reZsQ+X z*StZF^(6_&YkZRTn@LL4OZE7`Y)jNYec`Oo38|!S$2qKlV2>>c^HV4aBM~S4{{Y-V z!jL5F)7S9xy2kiK{I18oPK!GE3_bol|FD1Bt%|X>8OzB8Og+x&g~2L93?3OYpXiE{{XDp zRM9=8pw*q@Cur|SAMu=m6?Orj#n9ND%}_aZ>xd?xh@6chgd13cDh|u$s`KH_79r#a zG=&LxZ)ZJ%ARdTVaJo0)j>#yTrtInKTJY;8BB1&KXdC7i>nLp+baq?o1~6bW6oJ^q zC`>2_N)QAN2wgU(8ZL{4idnI1 zuFEgVxsarZi=^w6h#-2{q&~={u7`#`GS;}NbQby(jdzUFZ_{p% z{loJBmzRS55YDG5b?5K(gw&f&P>09< zWpEIOgGE6bRB>ql0YRp;YZT=wri!|9LpK6Sv35Omg+g{MMBIOP8y3PtLnHHWelooV z1)>#l5OYGFoku7-9oorhJ{+D0BNYeX8QUe< zo5DP)-yS2Djg=6W=4%2?O&wVI#E!?1(}@WI*cSf)oH*BMLlvPa4L|Ss!=OQJVOhT; zc$LB1Ny|C;{bM2oKq`H`VAjQTd*h#dV{vwuN&d*{V<^r5WSlpx_ve1`1&Y<(T#G0% zIhEJn^^qjbhfI_A`Y;QHZfn!}!(5Y7y>6J-_F>-I!)z}&pdf}{?3TCdHYb(Y9VXCk zaMa562*wNm?crrBdEQ-35+w_Pz3hZFzfKSkE@9O~j*d-aXhk&|+V&Gy$63_DZjw0l zKUq7IVv@Xzckle+Ezue&q&bhWrv^{`mP0S^vj3*#KM<+WS zVrPcH*@YFN@(IZdxicPqLTTUam&Jh8CmfddjT9HHOUK8YqG4mG&6AO@Kdf6Bvnw5( z5kV?#ImA%4jR+8-$7rL9whArzTe(R%LoG_A#w8)7VTR7|28*)q1%jOuZn{z^?BFy+ zBGPDzSs)Vt&^Yia1T=1N69|I=?L7c_biWr7p)!`HRIwc_ zqTs|}0RiQMOkq4;SoBqkwFda>j`eY*IaY#2yXgJ66?dSn`o=&!@#Bo~gC96C@pAHo zs%-#vDmYOou}Zld6Y`kF&}pK*uYf#MeTG$E$M(pjI?;hd~0Nc3{bkEz)Dq^HIJTyM5 z<0FS0Rp{54y^=e}Agz!i@F=g711zYcQvv5&9{45_{<^6hXbX8d-l}3yw=_cp8;B2| ztag?xQjXiuUR@J-LqO@TVxGZHdDl6m6f}iirnp`3gqUbwfo<|>DO;znvm}lj0oWu{ z>ifCI%Of6#O}5v3;*{BegzIPgxK$m=qyG8pyxT@Vd#{x3p0W@zq!l_s@x|k26*9oE z6Ug(gh3}k8N+F1szo_pSiZxE2KAM?u5(R-~{{XpxNa|^@b?f7-Iv;Z3I3khlh06z( z?-9XYE-mX7UF5)1DB;4mA7LCs7 zIv!71c9b1n=D!&-qp$|FBOYEqIU+W456Ra@sRQ7lta9$VyIri43>-gl(T_HOf?^}m&&ajl~fhLk| z)Nfx-0?;-9Is=-l=5Ht?-a+OI%RYHNc+^wS*2^@WE=2K@aRfvwJp{d3JbO52ZbnTPdg)fI=qec9B$+V)U5c7O`#a^a={DX7r z7--1q)qHpexI9i8WP%rvyWpoc6+Px!f?e8?(|0fcFlZve9TB}Y_*`nFbWzsJ=Pjv> zF?>f7Q|wAO=wH(xm5$L_+8W!|SRa@$xo0S4XXOB5v=%rKNh8L>;_;Fqb)A!TnQVvY zI=r11NUogW8ED-C4eX18dcj`H_yx4$xW0APHBGb{gc>Qa)s2H6Hm$T3sfd?Adog&y zKJh_pdmpPhcpW zdYs>k9P2LPRlosl;8=Sg$`C`SF}AtY`N?AdYrl}uEi|*^FN&`I!uagg{z;WHZ3S!} z6yesfBJ;WgX-wyQc*HVvpgMZvUy~Xt#Sr4NR;Bx5VhWa7Ac}2K2g#0fP)6J|^X|`k zz?u#NmbP*@DLFFT!cjLwe9{`d9OM(YK!U@a5z~6(A1f+D!EHSK@M1y&)Jg;!{!;@? z*jLSWRM2RCv1kn?@ZYtcP5H%I0cg!6OcdKao)gFquJ?>0LR9eN_2=UXo$lSHpN4PA zk%Ll((lYLR^_rEXS+%x(F*llY@r?ek&ShK$#F!WuQpCWez(c9CJ!Qwu7SJA!x7PKJ zVYij6o}>Qq@$mz0VZnc_YDl9KUjDu^*Co>cC!X?dS%T7!2eN*!GzfrzhR6EBQU}s( z)2F?D^PWZpHVxC$7vmNzfl?j`;ofZoP9g}K9?N>e;AsFO%;%m%>l{QOyf$jO7ZST+ z04#dvAe1a4I(3rTxD}4MoC!Nz5Lw!VWzbI(gTHywI9@AaaGY~)@kj;+(E1){Kl_z( zi0psH1-b~*yq}yBf=DF`x#makU=zeZrw8Pp_OUv`?S!j;uwfvaIOljEAV`6vfAZv( zNkgZr=d57@)aRepvBPjaXumz+sFs(Z5${}a#t9k|YH`2bzq}(#X#q!`IzKr-M|Jb& z-~Gt7P}GZT`s!Ud%nC8>bwT4^rZ`x7cSBw}H_u#P2f*Y6Cl|K22&D5>SHF_O0B(+5 z5-r#?HO13$g6l57Y0;r?T&I|!1Ir0{N}ihh%Ruhb2*^BuDeE7zxXPzWaKGLlF%c-l zey=R=2O&cSzYa=2jL?a-L52Cim&!V4J-$y^mZ752)vvXX{o!G+J7Vd!?cSd_dK$2J zE(Anm<+Mz3Txn531w=Te^^=MP3b{yUBv2l^NuVbk<0b@Q3q(T3lT%$~3|R28lPZRQ_r@7u zRH#u9HUoB{30h%1fd(oC+B_eZ0mZb-EaD*L+5t?9kvxkRD$3Q(A=fv==C< zs8#wu#wy*jx&z-D{{R?I89}&9rJ^Ep%4-@dC7LY?4d;&WN~tJXd(nV|&;SHIVzhQh zuC*O^z8@wW;7y67Kh}5GY1*xPbARe&3Da;gY#oeSF)5Upz6PxmOUKqF4~X9fg(Oq( z<-qS)I$m|_vz#r(;#_to$62l$2F8Yudf(r8&$O&pmuJ|YSg)$-#xT?C+Xlw4oIpXn zBgOv!>Ip^`k?^Nwx7H;a3Z}!SeQ()%$eo5NQXEA%hgxw#e5wI9fJTW%y7iA@CE-tp zn1!dTvwcvZt#VH0x$~^x1P5iLRqMy3&Q9(@xZu0c;jFohy6*isNTAqt!G1*J=N^Ll z9zWC1oU(ubhbD`?-`01ERt0hwUUl=A5rle$<6Bd%u+kGRKzw?BVtpr3HKkJAT-K9*+yx!^ZU*6 zhW!!J2j>FDToo&7dFP*5(J~NK67(LuV}=#2$EF5qCmnZjk3v>ju@-s!8p6nl5D$=Q zqmJ-(jFW(9TO+>6^VH?k>9s_8LVO$K$Or`Grrb86c-w%vqmWIGsdz`cAQ23BgQ;gf z)G_3@K-?19<^C>e2B;{Y5%zDdIb`TIz`pZNp+Zh{*I6(nz`!&=NPS}yZ76|)9j1@Y zAz;{_|qPVMwQ|@snzTfQ4RmpFHD* zvj?>ou9Xk*o0_KBG>?B@_OQFd$QlPn?LA_$i6fD{e~*6|MM7$XolZlBT|7)~hoK8& zhN1M$;{q0dfVxeafgr zTVAJFf`&v!yL1`{gA_~f!VNDf9yN(fKnrOA9PIP+hBOc?EkHKi_pDCJmj?0An49x) zK$1fQFF_8xZwPOt7Ct|H^@tLv2r&D0N}kES@k?>+3#SX-yll!9WYK;~^RcdB;}i{Q zPK5=fwM|=t8>n|zib~17=J2rm2O6z7@68R|NOB3tJ~`{{$8%{yRIZ4y$Z&O#(!pS$ z424vc1-Y`u>osb)J2pDk1|f@W4f!f+KHECWrZlUDYTgGGN0G=VRsI@8tt+>~K3N_zJ1Yu+eI^1wVH-QUJ+zy(lLv>Fg9^)4e3wp&e+ zo4cCD9^Q80j3OdEWakt^_k_VmC=@^v_s%_$5&$765c2Np7Z{*Ii^z1% z(o9FdbWkcHUBKk{!IE?~QFp963!%uni5FerKBN_@4tgTjThzfwjbpa)(WMKaT{^{E zU}Q{UdE|fNEr}4@S7DWxePZjA1Aj!{)pZ!^C6$^}?`c{lJpbCPQVXpLYc zJTF~m8ED!?Hdb4CHLOAkOoR^>k0sYu(+ZNZ1P@Ad{{XC%9WqU7uUw2kG-!w$Tj~=c z(N2*qtK(|?+=QV-J&3Q+<&!|9D|AmF2mb&wR>8Bn2ITts!T_bn5D1saIrEw%NK8Ek z$*yup98M#{-+X!d%Y-twq|ho!*0}eYp;ZLY1Uu5t{^Y{xE2^U3UMqq? z00N>kaUKKn;|zoli%Y`)0OzbB%si&`rT+l6g#{fH2V6t@#2o})fN4g~vUXO3V9+}x zE9>!R&LB83c()84xx)tLG=Dkzn1}~Qpo)H9oMQq;iZA#ffIP{e88ogLduKp5v7w!! zD*ZPlsFY-WpaTNuq5d;`8+|enkpY8T#G@BT+f4di-Mf1ziFy`IGMso|`Q}q&w_y z)<=V}vY5BIa5tBhc;mcArnj(-{R=p6 z7|2@;NQVZQu>(w802!-b1t+Fyrd`~2=nQu07oE_)vLR5xQ8ambaR?|t4O$-u8aZ5I z1wm7dapprRG30OwvHD~6hP5GGLqz%BY_>5n{{XQ4kBoMLd#p0j+6Skue{_A$*(U!XBzmegxt<{{Y-Znguku;`iUi2IiNI3-Y*vgis3! zI5}T_FomEOOarZYuCf4~9z^oJzA+|H}B?E?(Q(?fjfBnFan{2bX_wMfm z3D`lpJA7P4$?XYWnGPOsq;~Q9gx9gJyhKxUcY4Xp&l(X>?(y}Oj7VyP2%1f_)9VC4 zgWhaMoj-Vt6`L)4_uGEC$zc|Wr2P26sQ|h&YEK`lkH`Z^C+Gz2);kb%aBT{8uLH9P zjR*-mr;$S4aqVOsW)rAud(-XCJP;&>2S4f0edKe}LqK!a#tVI-Tyqup{W9`+lVRb| zd*Gbo;{Fgi4xRy9+E+uKbHNQE)D^6##lRw+*K>HjQda?GD2DP=2vuBvA*3bCW0Va?Ig^ zz4YhrXAg!s?U|Bp4S+xN{2v*-L@JOAHYdq9doW56THsTThtKzdlsPufMz8M+U>r8x zc*Un`S*$|E4qjWw+zgbUh#@=!!>xVcl~5rkPJ^$>-|}TJR(5XZLrG(OAW4!00{9LgTOx zu;=I3I>|733A@v~@%if*T}2QZ-=7W>z|vSk1^nNvr}ANTn$LglF4Bvs4Mp72JHs^k z3O-!uG=BZy6&E<5*Ujo`%t&59kN^wMk^EyFt$|R{K>18~wRX1z57x4@j6D)N=tbb> zZ!Q6KO?pet((uz5R$Czrt~S%hS!lQ*X`=b(jpOht4QqtyvElu=8^RbJoNbGA_0AJu zPzt5tKznb z%W^EMp{5lqi{;;JO%O`9%T6iFzpUAaI7Eydhu~pq6+-TmHmS#^So0AhVN1%etixSE z5bdsVKmmX%4HIr|dH~i8H?RIVuMJJrUA}I<7Z`&>fam8VXvj6}jx|TT4WKb4Q$uQ# zZ;x{vFBOduxLX*fM)qKV#zCayt9aoV4~z={Kr~J&yIC7(HqEb@mrR<1iM^g)KHS)3 z5cC@R{_zoIl$GF}vh(xS5&?tA*DLUT z(6YTwAvXFN}(s$?YyN+RoC(<4er+-W+ zXuu}xqsN!5EhP4(Q>}A{XGQ109Zlby0Gn*6jjEKUpTC^eMm29B-E+^68A_5_i$!P% zzPr`OHeN(07Tz`!hBm?%v3FlDI88~k3IUBid09MT(tsju(b04sF}Vtr^Cl)HVs0qk zpw~NW;~7O6M_v_K&p)j70-*-PgHY;!SP*sh1@j)8srKi&r8rvL-5 z(c_;cGDAUzi#zix`tKjGg#o@g`~I6p& zqg%nPafv^Uu;uu`sAVlYIs1-Q{v04Qs83;ns1_+n<9A>Ale9Lc0ki##8=&cTJN2yB zN-!vI&;6`Du{q(F!T$hnHmIS4S$)sEmZq3BcfrrRV^fGz_($BiZVz;kut!5%$#ET3 z03oBYzQ+R7C(6kZcGWhTKj#M0?}F+s4;SZ+^^lmc*;gEQ{{XpGfqVg`+e&@Kz=5d} zi&7GDjYfT8W1$lzsQ7>GY^@><&TF3V8dV(2OBu; zNq}%eM~*+dK>C_HBuz0nniJ<%&J>o^NAQI^CyWH`2gk5L=?t0_h`@o){0A0`a z^XDWHMr@kC{&~v!snG9Fc)L<6P3o^-{c}LVzDSmTf1G#rNLsWTc|g;^V;1DVrSYTG70Ta%F)m{FsYe6t^Ats(5 z6E=s?Ci6`qmT2O zrpE_@IBV^ST~DZVZy<)|DC?$;@2l1kr!vx=QL*^Qa)K>+8#pfn`26Gqa(qDb{xKjJ(HjqVtOH&h-)>Oo1zF1F00YXL zTxb!z-K;;L&aKxNrH0w~&Oj-3=DByNr%y|RS_`afK7IcHjAXn75#s5=2nBU~9&khi z)vo&SfTN4YpIIAKr=aoU5mB!Pj~IB36)u(k0KZt$>7frG3E;2J4M7OniPZl9Gc*|*AxERE02v{BARxZa*^2l9 z)WXyZ1t)yoM!91e1Yya47hfK6fzTWv1D+ysZ;mUPG*A?|O|7@yIb1FU*#4vOFfPZs zk@=I#!~JIhbQKyS!RqaHy=3Z8Rqr$<^Rk-f5myQ=ws?oIE?g5H8KvopHR}N;l>B#+?8+Li6j^ z1*E3PYIoC&5@@nl^~e5U5#z_VYySY;xS1Tf>ovgMF(pC5o4!F~yqjBg(eia~Zt-4= zAr@+Kx#wsIL2Kob~CR6Md(nAjX>6@@y2(j;uGj*V7a6O&P-~M3C3bKz}bRRg_D05l? zr?N~@EMRq@=c6oC3QJ=(UmRs`qV@n!z_^wrNhmJ(?&Ie|wVrRouhswn*+XIP4eH!l zclU%@(6g*Gg4z>l1KAH-!6}#?O-J6Bw;_}bD-DPpA5#oSltSnzcgnhQ=kGaA2=XDn zj~NbwrCX%$t+JJ^!cnJco=pw;&Qr=MQVWwxajXv#rtByU*oNMFm@1yQ>yaE?U)DEz z1dtA?_&3G}(4bR7wL4YkI)cE}-TimgZ}iaY>t|O+E~l^vx?Ok=#v+s*gAO|5&M-A% zpdS8D>2p^Jf^gJ%@#V!K1+ig$WD>;^6+7FH)WLG8qk!Jta8#&;@0G(9YUwz<9kvY3-&Fqp0P_}up@u4+Zn23xxZV3^D>d+8t^7j%^6Lg+Zom7E z>mjF?{{V29wKsdy{#s9b3Hx z;{7;EsFI#A%ePJc0Nt;~MJRK5_{*sXhlStg^Nvc=*FnBjP@C%-(XmH_@A_P8F6r%E zf8Oz3C{Y@gJ`(Hw z-=|oJs$oeSFQ>jWaio_joBse--@Hds2wa7R{Xef*3Xtil8-n+XOH#|A1?c7H859>> zf71#zA|_+g{h4Ro5=|RpNTa z=Ib0yA?`+t;NQoLcz}QaEh>3O;}6S(fb@`Jv1CnsG^PO}{z|=nh7dz#%0`6Z@|Z|} zsN*^$cJqi4h$<<@lN2+(oB{-`>HEk*wgnDXQ<2{NVlC56HoSc60tGiEa$C&^cz^4k3!WTX%6Jq+tCeKTa)a2l~n_G#z29oF4OtedjCa$qn>eU>8?&1qGku z%Y-NZLAQVBA+^z|f5m~N*-!(N_ht_$1mw0gPY*vh1Og&1!g)h; z^^i220Cz80a?&hrd*b%tbc{^^QR~KEU;?!s8P=w2^2tI0*7#4Zv86PS(5~$(v%!K8 z!<8p5nvSe*1h6rbZ8bgQ|ZJoDyD5qqO9i#<&+VEEt;SASS#!Sm^aJqHR~3G z!5Zn{-oA5_Nj8?gp@h7GApZc5tl0qmLi-Ps04udQIdwVt#mNCeEmN~k9&pfkZpY{Q z$9-b@hxy7LZMEkr?L^cRXW@(}6MGLixsef_nEJpMwRBnLXyblxfy!Y;u{@J!Iz3hWRGPA38 z(Zfz^o52uh+kQQ`#b^)>g_<~@yMlyl0C~6m`pp6rZt6~tANM$gsHBTz@b!QSplKgp z_s&_d9ZmxK{{Rj=$8rs8zl;piUDXXU#j}?A#*FbLcPMIu5T?FE1Q~#NArGCNTD@=( z*;e$VN^yReVcrq6Qp0r}ZsVi)s?~hl*?)KovvZl2e6fc6I`fv*!;hC1n(fDL#tMn$ z{N(^@8**KO)!7EC(8rFpn*hiljB8&p4 zDt$No^@m@A%2V2UXVxq=2CDfzWcP$yVLaeXPGO|=))~W0Dk37B>F%ZH9TaL0a96l+ zu7=^f%bv>)3k^I?E-HX@UqTwUYdJf{fb)e9H^^!}H1&jaKyG4;){{`1d}VNG)U8VL zH;NDltHF2|RPU@xl~M$*p-(3_HVI0hmL|#5*PKLjQg--${{T4xL|fC_zJ9vF9iSL> zt-dw)_Rvi{lw|(^zOV*@kVQ_gBmk8|d9Z~?yjjanX-W5pW%a1iXGnd znj}FVImsGw@}3MsRVUD{dyM^K{JfZN>5uVY-uZBTOi{r^mE|%T;!2@m&u;p_1R@bw zJarA$cZNGZh5?LF_`*{|luiBhn}q_p3pwUs65d@A%n2?X2B#pj(BRs@CQd&Uc{gbf`hPhU@U8p75F!5ncQbZwM!M$yvR0HSc>pKm{bUHF2!=4)DLPIJ zP$LwArwDe|67T?^F9*&=(YhTF6XekIv(8Bhm;j5Fon>1CB~)lJ@_cKiXH>CavJp^i zz4^sJ_R`V{18FtGA9$-t5Yan0(Rpin#R5~27WYGY;|pAf9Juw+`QGoGYeuwqLhXBS z_A17t;+^^L6qj8jCmidX)&mh}8tB@0;(EBY0E7flJG8e5L7_Ci@vrM0>aDJh$I1Ty zbB7r%C^u_JiC}lWaLR7Py{a|0f8)kXp|w)CDg-%7xyQfaX)^2QCDDtlnV4H{bi#Op$cmJ9R_wn za)6sokNd`L2A9*DqJLOu1Du>q6~&gRR6WD|VgxOS5OAir>GEPin70D*8P4~S!K)$^ zj`;1+doDW=U4rXCb&8rmSTkE4{{XD3!BGI{?&)-Txb}jRv#tw%Ul_6Jt6Aqn=NrIS ziK{z(;s^mLf%rSt-xyq}Yy?6m_}Mv_LKxdZyMCNv&j>;~2VdSMqje?FCqoZE4r*4& z3^@@{tTnw|?b(0?Sgrzc5Z5i`3_xeP5S}#e-fkC&1>;g%#=br?O=24rCo`v88RX{y zK|owXM0B$AIffl^S$I-+!Sj*3ly^ID#X#Tx0CRIN9izs!JYC?_EjuHvXyeWis_j*(2Ta&`SG3TTy#Zi*~h~B#~cqv=M|7%?2P#T0JWXL4rYV9@PFnTu;9|Fy!1(clm?>$Q+}%qi-D2| z4Yb5d#u)WC7%~CyDtLLp)?Ebz_G)~%VE{@aVtFy$DA5OO>lY)Zjsw5(&N&ij^j8zQ z8DIdkQob!bcaSJlxun|7;*d2_W$~``4sXJptMuQGIifH|lbP{@*fgT#>s|8e5UJmQ z2H$QeauDb*g~y#=-Zn&K$kGpITJyY$ISmx};05w%)L9YzS~xi&BAj)#2I+(v)joK(3AtqX47m8-=dgbv51!s0i@U6X1sT zyu47LJbCo@g5$qaU^NjRUOzc~q0DuCYrXF1>k{rtAevbUANLs^>;$^mwO)N&$%>mW zYNm=pd%?o!g8Y>IPQ2nmcvA<^0KD||jT%9=gOPe311@EtA2v8Y?#yvf!frftTj57| zP5?pJ`61@$FO4$|O9&womZEIn`w?*+d&swhGmy_dvti=rd+RMXh_%qJ{24d#gqiQG zF=+9JA1*s{)7E>-3~49MTcgIYA!>{-PsUG3Pd6b1n)vsU zRw@#ez_#hCagos)PO?%(#)-lu~N9p9BSfti? z0Q0b>xcs?Zpz;ddO{4np-XPgah(1oV1@9aO0S|ni;~e_J+6NYRAC5JMJHpeeb#=W) ze39N;RhhL)&I~wxQyQX5=tpD4#88wFV(14-tF(P)_ZT9khrXQ63hW|hWRNICiydB* zNqEIJ%xqp>QPlW4!g64eAd_ylT^I$tMv!~#elorfc_3(3wX#o+G9$%QG-{ri#6Eh> z(I(0Rjx4%Qybcsf;G68cFaF&%NPc)546P^-Nh`uD~wDwzj3iW~o;3-_ z9pbu>sk-tOd;GZ|H%ld4gc~?Oav(KN#@fs9(5);ZeU=SpK>zkEn zp)5Ms;w4F_h&FbA+~ZH%hQ-=m@>*S3Cv_f&thR}R2bH18iLktC1|UmOoi%$Fl-6OE zW=c95FJ`-b{{R>r;EL}4`@q#XNNwvGAqA;_`EclXb(j8s#~DDk2ucRqo-$KWtTpA) z39VO;E^&)WqU*2wM?7W7*FYw>%={mWvN$w={{H}2qU37V9;S`F{ACUuh&n&-Il@R; z(eD6n8%GEK0CBZOQfSwIl={P3C0D-x0IZe-Gf;v!-(;%i$SH?sntGzO-N*=y@4)WjVeli|skMKpk|&Nb7k#wY{>S7vQX>yrVB zf!bk>da#$HEG%ZR3NB^*hFkPM&Q!aA2xlW43MQ1Sk;>P9r9` z(_98N8XDJ>A;j~~ZVk&Gz6X^%>f!fhwo^##O^&(W#wALnfCRf%~F;*4(%oAz~qU=KC&Nu+PLOzRHC zbtb(1yVp9x1u!7q=N$||6{zok4PEWFeC)}9Nnr;!YVJy85~E}p9y#xf)F)tNqHukF z^2h{Yr2Cv@Wv8{u_5L!$WC9vBUk^WwL=4a_p;v^s@c0a~1bOW~vguITB}@UvA!SX^ zSUuXTdB@f47lFmVL1xv~2uaB;K=eu)2u<09fDZ98ENYPD2zd3ZVYaqk%eh0r0RncvrW8WQ*zU(PCoHfvX3oCp*Jg07##{{T3JHQWVL z=U(vBpw$iIjhcVPF@h>;08zr~ZP4p(Et}GY+lARtvmek7;t2-Eu+c=-PH~5!8Uz~# z#7phK2R7;6Ki~J9v&we-I1(2qA~Qj2zfUIy2xM@@Cx_pMyrK)OJ08Xp0D|S}-kx|l z`r`y9iQKE+^2uwrXpz3T{{Yt|D5awEd=U565_CZF2+i+}^XJAgtqV^y2bYbz$QA&z ze~vByA5bf~%$NF&FbCKjByc^s*9yC3(wm4>TzzF{6&lL|vM zA-%w{JGFSoW`RRxI2G#OfyPr=xk%Cuhn!@jDodn%o^08iM#!&ez;*b-qB2>21upLx)TyP7+X@JDES)(>S&)<#jBGBwY0oR;$tm%Il;}aiQH8NB?{_=$-&{hS12L7_}RF}E0 z?-rtVfT~z$r)&Kx;(eB5%-;WoX8f*Vem-fZ}6K<}TP{barf z6X$jHx9<;J8>Uuk(bWxEw#@zP>Rbs2@{! zyVqau6gIIAos!vfeV+2`*06^wrWJ)@@i(*FS3#6{0UX}=m@D0sj)4uqUXOYft`vZGXD zgc|A{N}%cn91z_WH9&yU$2cxiMR8h78*h;?1wkY|Hg1&f`p!$V(}~(6`+s>mgp^Dp zL2u2%KRY6GU3)(!^x}y`ci-z5dhxAbC}u2I)O%<3g!qfl`|qB;V?Y&o@z>{*7y}?O z@H=P0@$a18fzq|qnh0SnDT@~=5TENg4H#F-~z@oY{N(@kLj|UEZjE2Il%f4MC+G~FC$M|R9i>=tM(hmFxcsyx5 z9(8u{M>SBfe?M4TIt?YYZ8T* zmC0M_=U5db(rt)7zwcOgh^lf&UNWKwVy?$W&LEbUkSC{medLmelbTLHKlz4IMw+${ zkN(V{Y)(W&T07^Htag#^q=T&l{;`#V5)3c=VsAEUZxf7rdh#)!^MsY80a3iy=kHFFBfZ5(|`DDNE5qjXQ@*eSg zj=|+~sqE##*Wnh%7W)4H59VFa(bI$&9S4+M5B~TZR)A)|vK$wF>Bc3ItOJBYM!Llq zkdO$DM{u(7awzNw6`SBfAAYbF(h5nj%D6Abct|#K;lGD-TwxmlsBs&TRM00H8^nffj3mf-Lf4h4tzf4Z2ow=DD=E(y zDo8QXo5x85eIKmkM=9lDg>B#o1I)>qyUy_a=P!YvSb4@cpc^=ddnG^T7Po05vnYJT zv%!gCDCinAn|pBagb^g!v^;Pv`OPj41&9`W+%{nC#HHWcC*jUXbF6e?evC^LgM@O- zB2Dw!_XqYlF(^HjcAXv1Mh>1(cc34b4oHF&TtDyq$LK#(=i*?|O4oJhy*uQb;%HMr zviuYGjY(Bh;%(7x18D?8aIgtAKP1-JJWplYHz!rg)ipKq3eDJRvM+d}xI`|GU%%cM zkpVlu^vz&{I8F1eH{Lf2=O9R8C8`>ku@~ z8tR~>J%t-ei z{o@3A7L?hLFuoH3Z|e#~HQ|)_#sgt;-UNVnaz@isId${v33O8pqcjd2UVqj& z{(pl^?6rnSvSBC)t@ubBca3^Z@CySPQA(apjnx&?xKOsXrU?CigvesTscMO&QC{{VgBQ9y~? z_2&M(Vu?pNMBj%`KL=MOUDK(;Lu=ama3pp0BTDmn{{R?(OmsC=Uyixn4#KAi@Spaw ziLz!qgib_w#T1Rjgul1_w( zybv59co=-$zgUi43>A3SpI-8se*goMZuPBVqq!n1{<+B5-o@V|`62U_fK@bDN&VuP zGu2L$oj06kp*9H>ASK4hk#?&)Tw&4z zY2xT*XBssyqSGX%#x3P99&ll?L9?%M-+nGZ8z`zJJ&A9Q@zJWS75hvNr6Lg$717`h z%XI9Tx^9$-v&OtSaAnpF;G@RzW22Z>*2MyyMvDf3det1lu*}muL4ig{k4uZrL8L-= zHP@FA9yg#g)2f;*aMB9W2Lu(}WZNGbWcKtcBZBW-4gl2zpmI%DzC<5YS zwWQgNR=_;&*Ysl5*fc~~JQ953L;A_E$5n7}fA<4?#}d%wx@vR4X>c{^&7#m929Np3 z;o*hw>;BeR1qnpgJj?$8OaU9%ZoP>;{NSo?j`VzT{{Y|6)mHS^8dvYnc?C)#t@E>; z_5T1EY4zns){N$_t`7s?%1{Jx^Y@WgXocq{--*Zh#_)nIVZZpx#xu1G@A2a|4T%Sr zuj?!ZTGU}lIo#(qZW@}j+8!&sM9R_%n>H_B=NG)ZwEUg*tTsBy#MOa-n3jOL`osiK4Fif4P-?vUz(hd~2R7Wo_Fxj7t) z=r2@@mb)T7I=NCkBWXa?c!wJKauHHe933e`lf~B=#Cu4Q?48HfFLP)EUl_T~nrXZ9 z=HeAKc82vHvgJjpgpUUO?_W616c{DzT_WG>5y}=FI!n1$YW0G&S7<2c9BXwt)*b@e z;8nr`-PAb1*BfX?+nvqdgT@9r6;2wDMnMRJc`swK{;=gzT_i)_t$%qy5Mqcg$VZ#6 zj8G6?5e0ZxRo@+b&QZS9pmEpWgdFw5Yv>mQ$dC9y59QU zSQP6)$LCVd{@@afprF3%ug8wCFB%32Q@;f`$)i9O!44eQT4%bn3S1kU(p>l;|Gi%9czU1PaG?B8dd2EKl<6$d5Z>Bc(T zvF+jYIQNI5l)!P})Rp?fbP?hAije5pN+@Y|4vYjC6q{%Wbnc;WOeu&O7bi-pYFo|` z0`@wUIDe0L>X}EKoxU~X$5w+X1vn8WCq3anm3g}QUOw@VcA_AlJLHM;Jm>(134AB+ z2pAB=0~HG)PnygK2w;$li4t%Dz-wNcd!uDtf%xfV4vEl;==lgJ=O_qer~_&oYY=7< zN_dD-0P&}JRK3>dMZVI**^fv>UP!6t`1boD!($IavjKq&(&Q1<>l3}o5gSJ;lHT>WXG+=L<=Z(6Fa z-VF)EybatD4seJ9G4N160e-N=j2h821BZM2%I44z0;iYF<24?)V2v~h5}7t; z&_$zDMX0H?zIwqZX07BjeUJPt^Kq4OA2cP?Sv(kdDK?jqQeN1Z!kP`H=dne8BEOc;M z1nJ{vUnUqJ5F2n8e3OH$TgJXHD#rZeDlbjblXyY(h$$j5@nDSDEhS< z^P5_NDzw4$TnN~L15cFK{l-+3)Z4C_&2sNppv6aKufq7aUT~|(H(u;*?^s4L0a`V? z>D#_zj&HSZ&l=`$CgwP&6;1w`r+5mlqzIo1CV5KIYXxQ)3 zl%AXiLWq11-XwVdj+w50Fc|_(`n&%C9~S@+tby2f2T!b_5K<>vd3o`yCZw7}zbfQq zG#df*A9Er>1nlJTzn-y(0V~kkc}Qz4F?mHz8t?YR4`R}j@qUk8>{Je%1{v?1KdU8K)w-u513UotG--lQ#2r7yu z$0tVmKJx2ksuw}}my?XJtu!E7Ktzwxme2(uLT%?bbZ$-6CeUJ zy-4uekUgANBg8xZ0L&TvZ<6xs(PCm1Ya2U@O z%H3m9VTvGv1DAE<4Oo67W0OGhwtZr-sH`)B9H%F{$9T&ayMZTqE~Jp>mle0OkW)it zj;72Ih1!I2o8KPvy!gE#@_?hhZp8e3&;zM~Hh}p9gU8ses>dst%-zjetYqVl_mlx0 zotTt-;w+2{V2VFqj4GCkq~>5ogo^LW^Mn%Q5~($=rr(ziTSVc}(cNd(ZJ;~>x&Hu5 z&KTe{CW*~0^Dz*<1sakY^Z4D?1y0~9T<~u7-wq%U#TrTzeeD4Eyj+TI0&d<{XZ>;! z1ptGdmoy9rp-nqLyxo= zjSnlafM7aMgb7^WaD)LswWHziV+2Hq?WP_l{$q3@32^8eb?02%x(Y@vf-1EQbFO&A zB_+`9c}6o@XX6pLq+`9*;t;zIy~DD8R8Z`6bFJ#f>Wo_D{7>E^1geHx?t#Bpj@Sy5faM}(UljHaCB0*v6nP6D zJn)_{e=tNqH@jy&f9?**cJ=cA03BlhX)@*n9$%3!oQQByRk1G@h8~<{E0I9>tL57e zigC39PmQ9yH&4?*{BeITFr%FZF_$MpLt+ zeIK9mnqy8$2sFd{^^F*i4G3P(&HxcqQ&r<$Hczf|lxtgU;O18wDcTI@wI<*5i|Ues zL!JCO{`*xVY^XdkbpHU|;DBkqC(B#kf@$*LM)0WY(t4DB`o&T6V zl6&X(nsMNUJ4w&%&ETa*rQYX$y2a9{4kvXt+g}(B2IPu06{VZx=KxhH$>LVg6!4+m z2vj@GA_{TrzFg&*rIE4bE}A9ZuCVY8MHLsYEHD?7Aq^q?zB1GmN)v~>fnTo|K5!~gsn$CsSOQGj|S%O%z^pXRJzVF2P!rCIU$$pxe6`s1+Ncw%S1rSSK6EY1lR;hmliy?|U+L z(0j^A5^_c3MzHB9MLU7$$Ti{W+yPBSsAz-98&|_UV)Y0iWTy+!ePhHpfc?BW>tEEw zinIizXv0g*53APk7`1~muJ6nL09*(G9i$>`);rQOdY}2u2b`2awfsABfFL%#f9?n) zPQ2kcNR_DFI_oTL@YSCu{l-ysjjBn=HC+)lijUnCY@k}0HM1n z{{TAP{KnD=oXbySsHQBPfc;YloB6nNv99CeC-0N6mgKJb$Q zkX#&mfX7e+T(JOnHp|0!$EtL0G%oIq7{DnL2USEsE$4XTscu@;^meJO)?rJMWI#{> zNS1lmIjtzVMQe0a;X3?f;3#d{dDhN_;(g@;QVMw!cFw=2!NK2Av+l!>wl_-@60f$E=f$25W&{kk$gItDx@{3UAkpKoS-+ z0Fc<}@ZwTv$~jFncz@nWoTAJTbrfx>=W+MfJz|DzNDx+~+V#IIby1NV=>nl_UU$BP z4{z{e1#51Dmv~!&2%zx`y_-!{Ul?H5Hy5rPxk3sSeT@fxav)F$A!jYvZ11cm=z;5Yz79X~si`%RlLhfGMCOxyCP9tVAI-yPM_S}f2p|g$O{@_Uhbff+B{dvS{ zpb?5cnWo{80PSeqt$yo~1cHkH0FHmWsV0>WkE^?);R`^gklxv?%JY;;i^<8@3;zIX z$&}C)H>1V!&LfKGqWS*-x0)~|YY;Smf7cBZ!GUJr*!X#W7{Q=gfCQ~laQ^TRZ3ww| zK>0b%K%CeEr);z5C^%iTr#EjosDP1s{8IS4F>>z-L^La}TE$l*1XGenltJj3xuB3MS8$@8cLgAV;?9tb6CqOjz&)dgT|7 zTE=%+7K9@mbCJkFp$#Ip?D2}>QVbCn{{VN45U;?;$&}nuqU>-`b#ieq(?q=H`I*)M zkUMqc>2bZX-<$+ceLQ6w1E?9#tV9DbHn^2AWDk?QR>vPQHK<2STgR_E%NRM@-3N)N zarAeWJ{FuLtxf&+i?!HLlE^j$JRO+|(r%DumAm+s`p0ZTe!v^n{rbfOG8$G%IM(v{ zq!Onx==n5jSSKr}4TzqAqDSuXa4|u!T?4KjrGq1=f3fTXn-R0=^byAtUX3aUDQX19NssHgn`h( zcdI!OoaVXcrBd21guQv|c(Z#GVXo)bU!2z^R0?s&{o@r3=1^T%=Ued{s0F4WgUE8! zT2B~vxSyec7!Sx?W-NnHly57TcQ>x@7`3;={VaL&8&&J5KC*6MDrWb?1v$0>wR zZs(4evx8k==%}0|Xghg%a(E-G-xXJB;M zPW~~7hwY*aZ!r z=Mulv8vp@#?dL3|AVA%ZivIv+6Ho*~{))$bvH*aCr&`yZcmDt=g7`pc1n8dhjFFkqx0J(2=6*mn$8+~gaoMF@At$_1R*gh1Xiyod?m@_)3ZT)W&Qff3Q0w@F+_+Z#5kQNEiTL`Y#3?KyccV@ZxuGJf~z1ou6gw5 z6rTu5r-{GW=5uiP0a_mP)|kwl?l{&bF3lWf6{{Xld*QIJ(y3p~KQ>je}T9lUE_-Vni6v3D34>A7bU zF1&I96klV-HH5WjNnlYrIB)UJQBs4529UrDJ-3`plMS_$3ZCw|V*qXtMb-AVK1J`SI%jY#LtoxBADy zM~3`$ikn{n);q5bu@IUGTC;Qe3&Q~&^%Th%?Y2e``%g$8vyP2(#P zfDLl3uG!-YiU96j0QA3{76!U4uZcJedTlGh90s2_q!Xbc*-+R1{AEEmqOaTjd}25x zG@eff`tQ6*C_18%9&59rd}9r|6F{{OX!#}yQ6IEVD4RfNvuh)YtHJT|3a6jTZIi%H!SOot7 zTkqm`mS}HorO+OqU1GHYM!f5WZ@*f{J5+42`)ir_`^im`Qb$gq?+rb)2%tkxKiP_e z3IGT3ep{SKBd9WM-FEl*z-TvS(lQk%jN1jbR4-c4^WfiD^c@7+wrxI7f!46wD|=rE z!F_Z2%0VhB4z~UrzQY9r7iCK!c#7WK`ogBN?hSNCpI7tFF0#FqKN{Wt09OdAp$OU@ zxSy_ZRZ%)PuN{Be%`hMt7;(FI#)pi)`qPZXTTqGK?|FG^CdjHA9j@SJu0a5hY|PxD z%8&%c8uBG8C{~;uFIaFDb-So)qjlTXoIq;|v{l&@PN6(X;(!wlC&Eo}UB?n2jhmUU zQ?)}z44zZ~DCQh6eBcysfjXT+^V^RE>>b&rnm@c@%<@qDuXq|`;GpxGDuNX^fzLcN z^^Xp4*^Rp!8N~R#WkG4$1@_N48Wlzx>Qen=ZfU%E&x~=UwLJ&qe;ELXAbHdN->lle z-;8MQEZ2SDBsD-l@;4WP1zt+ZU^-OLDd&KT{bg7UsW+t#q~5vBA<^U$qUMSiN>V8kU-ZB_i=ypBOUGxd zuiHTUyiSbufGPt(M51b6A2MQWiUQI(HlFle4THVEzo!LBFO~Zf{{V8xgGhvPdj7oo$C8VMS#-WN zzI+?RsANc@pFKdoCyN~-=!>(~#-w|&*En=I@CPqG9n5daWD-3C@A2;i$b_MNE}t0? zY+?`#q*X`>F?7ebMLlisX})s1T(uEK?Y(Svlyypm{3(y>peusUYasfn%;bbAW~c zbSgJ%eEG}V5D>P)?AyVbxuO8p*npZSU6|#{Du{C935)GCVjTwWfrnB}yIg+JvS3Lj zpR9s4DQW#C0`%yJ^@*|^u^w<02G7nFgE5CiH3M%N=d9n%igBWVCoMfE&NwZIGdCTw zM#>gLywnH^ceeikgAlL?krC28W6P5RgR&EgcYnO#c*$qH2e{gD>jQ+U6FrL1)J}sF zM(`CaSirjd^PDh8WTuI;-~RwJb%X;J^yoG3>l$G|rTC6q$Zp&eJUFBdV7v8+G)26h zFH>jlsfU26V$?SQIn##}1cDsD)VLQ6i69Kn4#%Qki%hi2QkE*K;$m?C!AD~2iSM6` zF#rONkvhPwQiVk4&LAUz07z$rpnNs@De&5k+Kjz`@r5U78ZAScc3zc;)7o**ILdpNfdn=eEyF{F#-cY4Faql zz3Yr&Xfttj7gyh(ZgMmoj<=(KS`1NX5F^847V}BdhZ>EFt`P~OdDj?Vdx9h(RG(ab zymf2Tg`KrIdj9}8_JLKVk50qpe~fe?(M{0}KCe6L7O?d6$xWT?49&Av4DCe0gy&DJ=x0U<4!2hnyWN z8Uk>@#nU$&XbDybK{;;Re~xV}9?!$c{d1LZ1^g9A@!PFFoT@W7lVXM8@$UgGM#?>X z^g1R14!fb@X>>GC-c$`C3QthFAAWLO76g$^MkQD)$Mk?j0RTfc;OpbZ7_g;kgc|AN z>3LrmOJ6b2@5}FBjNif$4x-mvPVt6DNFI$)OhHL1Y z9Mt2)^>GhyPQxX3zuE64%_6#1@e`lw;gDQFbKtLt>mH;rS__~T-tZG+R0n-|;Nt{3 zL9;~d`pw7(ATg?*K1s6Mupf=?bTtUb+Yj` zOQTgyT06V-T-hCk0a841_{dR1LX&e(d*_^%^fd0B=!e!&2NbQ%x4Q82f$wILyG97D z4Ls9Wfp7|RytIe-oIU7@kO-@W4W0U!G3d~>w&g>azgcuGdZqW?j<7+cYAqLJbh57R ztYl6p=?oejJaO_tvIYl&mX#r=b?wKH$Wju9jTHV1c3?Hv^iM;c3-TL?3w$xAB94#{ zn>ILyU?@-nmUXMYJHVXoVNu*b8h@U3gH-@eD0jYF=HpZ_K+(B4=jRkwN~5xG&%z_8 zSOl?y0zv`=?sbC+gfvv>O7im8URt5F8jY;W6cHCaZkj9k^;`B#NXeKo9Axyo4FZf`<0_ zhi_T$Rr~%7SXV)nr-d*n^H?+T7Yr3Xa0La?R04_LD>N8_qbGpV-}}HpXp5C(y6cU@ z=NT@LgGTu0Q?%8@+8CiKaTcV@xK2eS+8z8F&3IH24sN5HJw>D4&bE1;AL|`SSX4Je ze2EkMyGd&yh|Xk zAHBalWd`xUb{VZ}w0##68j)>7puKuZN=)1P+Rg z>NWj)^MJu$0Ei8=x6$v`ED?293%cZJD7unN@& z&d0lZ*r5tqC=3sLZ7&w!Ohg^fKrV=+Iy85Vtz`wHrJ}K?QN3Uwv>SdU1#eF8E+rrV zb_n+~<@wx!P(Ptx2iQiR4lqt;;0K98q`oo`|4>^9N6G5#6-r0B_ns_lAiYEasc}KS#zcA_5!WO7p3+ zi*Oy=VDeVaed)o5N+ip*Mxkr9e>n!8+@e=d3Y+gYagYoXY!)OQ8@s$+6uyG+K$kIOI@i9D-hW*FGk$ zQ=rPg5<{=AoZOZwo=d9h$cM*x(@>(>s+;kn>j0t+6{nAXnUNut8OAkAJ9*wE=^+Be zK-AfKYmMQGipNOedtEKQ@Bl|HC%A-=8^0Xm@@=31(|OP%!}h@3Vst1QQ(@m)Tt0%L zw_HP#OYtxU5CvB#P7_bQaY^|UkwM9(Px-v*(c-Nkplgl?8-ht-G7pVg3HHm%u@j^A z4D&xZa9a={Ksj)J{){3B0jb6jVD@$KggFO=dH0Noq-Z8LnHV6 z8E^qo>bq&zFCAovuWgxqt@_@u*@3q$=JX1>wloUfgEd+jmoaadBL=vp3VTfUpR0IB!MAX73_sM z#FL5$0A6P7hM#=l(+C0Czs0yhASjW(8Z(u{q{T(eL+Jz*@sTKlgh54nYU?(tq(__U zyZ{|hrn>Ovr=W5Q?EPl3O9b{uxZmCcHV8(Oy~auZ0L%acQU&3=Y<%y`_k!Gk(dDgH z{9vh;*-j;!51bOmZD;^Ce6`o+C03omUxSbPj1{N|HoM%1hU8v|Nn3YrkAA-+T?wP2 z=+t++&Q=!av`ra5HdN;j(*QTFg@p?0Xg%N{& zEpfBJ8D9x!ZoU3&gV3CsIOQt+m<_?$Sa$I}{y!MlQ_htAy_4q>5Iay+(FWC7wX4=u zWi<&wXfy;kr>Tmg5^4iQc{KB_cO2OHfUHMk(C4G=#b`O`;HrH_x#KIOqYyU3$;jw# zWra{7!#uhgr*AJf?aM+9OGpV~qO-hqT$Le#yl#XY?*`j?1t&t-4qt_bk0IDWRlnP83 zT46(0>{A z`+uBd0Et&RpeGL(ta5wo#HCH!JH%9JFQNIH*VBNw#Oeo#ZeFi`@@km44*2I`{k`D2 zcq2w0H95Oq9x=xRmJZ zyPo!?1k_aF1lVhcJa#$6q+{haeRYfGO@-*~2bRMopm91b>&FvME-a9=D^zu}@s)!r ziu|jyvwFgc)M8uWGn^4j_N$Y*)}3g;dqN9EI437Og6VMn7XZ`@b-4WC;-Lf(@M@ov z8VKklS+Wr|$9!Z>xhDx-Bi^;fjx*G@O<2JV^RhwR&8TcS7MdD}3acG>r%L5;ZUu|n z51vEPv z!rqQd)Iex-YO)h<=D6bRe-5*a6OqT#;es73Xq!{cDj3Zq6L((lUVPyXcme|&=e&@q z=01LMJB#K$ddpOL)+A8PMKymr;Oh|UX9&3os-*P5x`m?XH1IrI`D61-0_1ehI-i3y z?5cR)4j7j~6q{%N0C2hm+U;M1wqYVc9f>#VCq-GJxad1%wQSJp=$)smK$r!AD!Pvc zi!+O4BMQYdb-#}CS-dn*pbhcg80Rn|6Q0Plo-s&cyc5QVQBl`HhQC;(P(oseJaAh2G0sT4HHNw_tyg(iqQFtx!%~ko;~5c2KSew9 zCuS7%6OxE*JrP{fRSwMSZG||m5xqO}&N@5f7Nq3cPW|FxejwaWC;sm?kRmiEp09fU z0EL1;)ks1xpXVBpn*wp)G=}HrHkhac!8RJ^DfRQ4f6O68n)C72T(!PQ?mA=HQFI;Ye;8H4GbJ6sVJ?}Y~JvAo-vA&PaLLDH95vfy}0pjvt6^sTqeCyr3Q+?xw zBtfFZ-qYugtkGUtK%}O*=lbo)=E@s@1@d#{)=+`K48qr2HeK==xg}DswyVjwwFV$S z4GvJFfY-uG*5z_ZG%78+@VG$(XlWf8I1rBct~8j?Rphv_4Yd)*aV#NO!ATG~vzdT! z9-!Qa?5ivWJsB*S4=7AFl!z{tG)SbnjE4|9t}h@X{hn~vK@c?2HZeAZA6St{s%gN9 z@Z1Iygn;>8#Pk9rhC0iBGSy{Qr0euOIj^WQ1z_?7j`(Qh29^tVAM^X;yhBl){N;%Y z7s21^<6$sZpwR6!YeNn4=mpqKE3zM#*Q`O9nsrXb$8Yu50E(EUSIMrc39jYIrBEn2 zTtVlTpL0&AZB<3F0O22uV~0=>MJ=ZHSlPT&eP949d7(I(JL4{>h`?%H3&A{6_1ZxI zP7#Vv7hZL};n2z#&(>E1q2+qq*9YPAkeUT7E8GIhrhDTzKzWYO1&?eGPK$dBG<;w|3kW@*;JbtnmBU3G{1GfjvMyU~aQ1q2-PdT}Hh*_97>xbf!#+)Q0HuuZ2haq5G+6y)TeHS>yQBSZ-u7R{b9 zL|{Syk7;*}J&DGuG=O!!ap-XZoOp!Q4xd=0ZxySTif0Q58`Exu{bh!ddrHg91>kXa zo;*Ot+E}5+_xs1I!Ktqt6NknZ1#|(AlIshsK|zhQx+2gVcX9w7p&$@DY8w>R^Kywf zZnQoANci3Ys1vr)+MuTa^QaLZ462+_OMENOoJ#~bamhYtG(IBSBdDsm6bH9JwS;#^NIqnH-PEB=SRDMFn~Ku)Z88)7yuwk z%cQ|-6!z?JIGrB)JYpxqWal=U-0ufsz>SHyH=w=G8^y+z42je?E#z z4JM|@u-3CfjnKrZO7v?IYat+yA}Y}U4nx%DV|UD72c!IZ!vi}lbnFtk*TvQ%kpK<$ z>fn(_MfiLx*u*7Fdt21}*-HhL3jts9b~zwd4XiH1k+a{djf+n*k%RI6I>i94&W|aG z$RaTWBs)K>BeSz;bcZKA@7{I1D^3QqJ$K^x$d*J(=K7!I#wtxxR`(q9I?kFDOdS@* zq<;SLPcj)3dr^KjXVx_dwJOofWbcjXJYpgQS>$N)!tFTx!qFgK{sW(Vc4MoWUu1PT z=H7Sj1OV|FxJJ!By!*z$SESi??(_X)*PEzKnzVUm{j72YMyRYv@E2OSOcc6Np{cgg z4;nskgFvJU1f=DNdn|>)4I-3Pk&O_h9FE->0jSf|bdaGFz4ke@(UcSW=JDRGYwlxP)cywfNkx(cb2fauG6V4!6QW{a5--+{zPa#c--uHj3kO6S? zdXYuX_`qcy_Njr1h`-z0M$M#z8k5A@jx^j zG*#9pb4gd`KHdJYqDH8bxgRc{8A!Sa!hs80^Mu2pvV2&+9OQ+gL>hPX&*KjRYlKES zIn%8;bqFC34S)b$)%n7~DGjN>@42*j`IbY8uHC9t`8U>El;#FI7RSBo#K=UFouHRZ z6sCh&91~kudH^?^>2d-Vb^icfF;oI5Zij!b*Em>o=tziAY^V|*@Fu82Nob>?>Ndky z8qhJo5LF&@E&+oI*8@e7AE$VXihPX62!Lm@PLY{@u!gk3dx`dHNtT(THjj02!XbQ_Os`|48W?2BTMD!I6eEzoeb%> zfGZD!526tT3ZjG?ueJA(zQ}J=ZLc!@fUIboGBvYSL?c$~&74SbkJIIoxQC2~-=2f{l$A@M#EyOgC6>q+BSnR9G z8e%Nb%YYq+$SZlwEo}T}5Z=avjk}Y_9AzPc)l+W27=ly*L25Ae1#8_A_oD{$#`yijlqo`F3=9n875~ z>H0xE={yrQE)`b}g5Qq1#$k%=0J>Co@!S)P6WSz1mINm(*Svt14#C;VmfG|Ct})SY zkO35WJ&gJncoY%PFoxyrhNdHGLw0~Po>7;EWXCisN1?iCjy$|%PXa&?yD8>={+km< z3D5)%m#3NDP8ex@Mlf{DX_75(ES zL34SazZwg?f@A;*vV4^epE%(f%JS~7a9(~~nAO$O+pcu`?>Hi5pi0THKu;5{abo~L zMlQF~Q|x%k(BcSQLs3*8ZZ0?tq&|h@j~mKiTN8F1H6i8kywKr<5D~#!T?Fm};u3Ho zxGO|MgVtYOD@R746&jUJIlz)LQ58y2VsyN3G?a7*a-isnuRu^mgjIvjll6}g1ra#J zGKE3>r^WqXlR)Sxs>e;b=9lLRMODmtba(#%bd#znSSI~wpZv=J5l}hs`}}6H6jUU< zdY>QqePjd|ov9mm@#xDCg;{Q?%o_o&wR(;qikV1fLJP;O?3(9BeysFvy`u{wI*cj^ zz1g}Al+phHxP^}(Qeolbo1z3&91VC3!Qi-@3F4}e4Ef`C;(2B*Gq z9Zdiu;GgdVPeV|tY6t!Egg!vmI1%u&@wc9GX!cZ4Yr>CP{bt{^Qb9$~?r+bG1jSYa zofKZ(UExg8fyow}bP08{0my0tblTBHx}LX&qTL$>WC8@=uX8uxflX#8uJ><;Hb>3~ zt*r=Y=bkP!GXz(H8cNMOcYq^M565`}(B8IO7-9@;e4&3B(?e`Win~sCidsB_&^t!Y znY`6{2f@4|)0Nhd##S>6gMuO9jsT68`|)NsY6XSZ|U7A;sT!oQZ{EX>>rS`1_dj2v7hOG+2YG`OOxD z+GzcRcSeuNO5LN#8=4Ut=Y4M)r2L0wxzhapXECJF}Nq~`<&RSb}0xAMnc3d4n*R7dB_^HrnrS>_)`F7 z)bt%FX8u2{VN?{+g6=G?olZCMi9rIv1)`e$J#Sb-3)se_?Vn6Dz@-Dtg+0AnuYpUc zyrm>IC-=@)Z6yW6!4cmK=Xea1do2W@bVe_TJ{*v6(6>g?!1RnWd{8Cd;8`cGCs|Uh zo!V+DB`;FL)>HzBaOMljo4uzKdLg(d?RNwOIoV-)bTO=JY_2piMbmwiE8Ch{ZY!hd z+gU`&_DdEj*>*v@cp-><4%@{VvTKqOTFS0HD7i#Hl&RnY)&Wu%YE361>)dx--vU&D zqoTaiUQKs`En$JWS|CRI-QF-jBP5B74qu;mg;vMW1qN?NBju8C^ed^og-Aix5H(sL zUdpoXo{vTrf&<4Tc(tBQxFaqEZZ;>D->l_eg9e8)Xb1hV)Cd^TDEJxr1d<00+hQmU$2YI6z~t32T6gAs6no}V+b z5(_%Ee;()g#?%y0k$zt@Z;o@i0F)Xo>hg7m--s>fA!^oNZd_f40lPOh`OXMI&vja_&Pyh?U&TjtjLCr^Kn4b?dJ#EfPrqUET=tcG{piQ6l2u@UuQhvCYAzN`8}AbMbW>OqF?T3Y+O1# z&;mQCs{oc@K>;nGWEx@R$&^-vNY-b!3F z+jYb@so#8IEp_U506YBp<2VFTmWNkse-rb7DXryktK$Cv-QpOMEeRZg2`g&y`5yT( zG!jQdYxVV&lqQ5p@cQxVJV4P8d%k|_`p&a}NOmt<`|*~fn@zhG$%{LPg#z~NXWj&D zV%nU6zbhofT7m5=K7R6J1rE=OI*rySXIM(7J8h}Q>E2n5BChhlt>;a8Zku;zAh*dkr zdw{Y8Unq5>J4e?>jdF0bYE952F$>ie^0FZ*%uGxEWW{fpd z+Ew2t)6w2HY85ss%@!Nu#u9-b*l2^Jzg=^Z(NSq=Sab({zP?EbwjQ0yHiqvV+({tO z@F2H(efb|4xKolCX+cpFMvp!*$rVA%t#U29zCG`FASfWsr)}^~_?U-H1E{B02WH!Y zXb!^FQ^DXQ`oRS6dmR#R6dKgyoP*qT^;~tD{lxLWm~GAb@8tgT@U)dQI)f=noE)cUWXIswF2=x8M1ApWrH< zD^2ma5;~csGzb<2;^3`MrqV4)$|aWQHdOTFxs|L(c2hv(`fWu^TAZaX0S0lbFrq}! zVh)EqhxL>Noh}-_*M|<^y#|5ycau9XD!M)S{{YNY7_OuXV(*Xfl|b`JB5e6;_F}h$ z4sL?)VK?)vG0Kt@fv~n$tJ#d@eWL>Uxecf; zg4SMo{{VRbFcAVTPUL#=g1}M+6pvhf@CiVqz(C`3{`ZcuGem9ud*cRbO}$e{)}q;~ zT-s`pU^@d83i4CNEyzH4kdnCsPrK1rplpg(a5^{xJN}93TMYo?mmAc=%8+$Q`{wtL zR)vw65LKl|L*mQ_=(wZ$ z&U5o6T1P&33-^pm!nGw8RC^5EBbtFnFM>Kn6KnvRKxDsm(~iSY(F24?gA(B$&9aanvFvfL3O#^%)P!r?^-cO@B%SuLs%NE=i%;Jg4SK?fN{ zZB2~kjkAI#i0_y z-<)bdpi-iSS7y+7=L0LMH|05n@#m8VAi9JK2t5*~V&DdHizL$0)Sl5T{>N^3+1wcq0pbSl$?HQC1Mm2mCm z>$3r72@pj#JGO!w#IPkr z!~vkZqQN8pp_TRk8r#_^u_3g&X258AKb3cFB^x_?i>&q-WP5P>#85;@w<3#5Af zmrel?M1)UtFxvO4F5(GOUAzJ-jMS@nL^KEHZk)iy;B*A6**L zNKtk|qqI@e2}Ac@@ERfR3vRk6pBVh5Y^<7J#OsVGQiuVovhncw$rLoS&mq>!n6SJ7N{A8A@D|GV9>yL~pLNGKR=$&B&)GaZpHG zVDWTP;Etl1Cv_4FeAI_49#qZLgpyd(nPBkRBY|%a zS%5=m2_4>Ym*7@kYof&Dc?oc7Z3zQoS7y8cb#!95=;LA_d^?dg&p42EjkN?2vryWR z=Qe220w0h7i>h;jeRUSuCbp~tTOXFDbi{B$Z8VEf_aeak7v9+8uE%D!(h^%j3T8tA^Pa z^KAkhU7PyPGO1}dPIa@48o3w{3d%X}lOO^r1Q%E0>y7K{2!;)AIq2W@{zOoCIU-3l z72WD&>3Iqr?=pknp-fXDE?JcQkeDr0IbOORfw26e8e@;&wO z)~6X8ksR6_*WzCnoVV##_dx@D*4eC(iFg~tuYF}w+5{cDlXKHNWJE*=Af}F2lU?K> zWi)C8XP82pj}ASWRphjsO4)L zjFjvxK%oBsJa1*-CCw;XGzXBcXe5*=VR$;xwjaD*t|-vxlpTU{%@j?nQ$(@Uh)tZd zLvtD?iccg2g!&QTCxXR=)7!uRx$Mt|$2$g>LlLdh((WB;nuj?8R@viX1QPd>t zf6nrtJCkxB<7RAd()_h@6VGSH402Xt1CVP;pgulu`6dM7@zxz7ffWQ&oRQ2+*}Rzk*Oyz(KLdDkPhWnjx42 z*qn*4BfKI`BFcpEzhH0g2BX!Y;pKrBZ@p_YYjov8hYB%?=O$fLxSKRcIUNJZa2s`Q zosf#9N}i!2E@}ZF&>biVRM#!jopaI4EA47o(`E$EtcV1lfkY8=yfg%XP?4%SAR_X@ zFDX-og4jW)HqpWF;zRaF?W(mluPC=i&g@bbJxxj3I219o0y3?R7*Z5Kus&WfePC-Sbl!DNTb@Nu|(m_zm=MD0dVh7N0bn z`It)N3xt$Zu{wBu49N*H9pM&)dR%^#dK}h->&-%!RZa{vih3e?DPOg`48`pkDTI% zxYbT+@z-5wz}x~G;GMkeI=o;bb%NL)IQacOF#u3i5aWJ-<;V*@So0u~%YY`}8$4eD z#})OP939Vy8~(6r!~|#*Eg8$Vtfqo#MY-rad|U*;Vn|1sh%n9p^QQ8@a+i(((joc<9 z(n*>sY23dg!E^_}p}iY8)E!&cRRDqxdI-BV>-tm&P?{n<9m{{bfAamx!dgLm(JiYlt8N_ZaW0)hLg|hD;>Yca-Du4w$@!PYVgB9!r zsQGTaOaxL2QZW%thXvX1ymbiG&;(s3w3ncE*B% zYZk_u$wh%7VkHh9k&r!*$#fofdE+6{#*8L{09+02opWQ8b}ZY*mb4O86QKeK(Xbm- zZg-#ro`uocmqgt)+5|@v8 zpnwq%BsY|Za;BUCWEmd*e(@4PzA10y#<+_~SFRsg@MYSn#>#&10h-Q2SFXhMUpPw1 z-CYCN6zEyrIN=s&N~^Rf-i{84Hxal00Kebf8(2Ut7jHB3^M)VucZ$Cljg@zzd~BWL zCX~>KdO+X4uwP1n68E8ebASXA2!2PBem?m$h1dk5lfvxq`78BG&j3ao`X=qd?zH%U zu`c(nN4bb9CZVWnZ#sdTvy9&8BnOz3a0gwti!lIhDKyfHO;t;SP$iLGT|6%AnHd$1 z&IG-1esFk(bfgtpN6yZ$l`Kz&7gD+`dgBN;)j-7-1*POx7gDDBLXrVlctHtX{2xh_bR?ds3rlYPC00yqhpCp3*?757gc;2sLo*nu7>UP z*KZn736`#Q?_Bu+G&Xg($RL|4=Qa>z1G*wdf`UbH0uo&)A&nMx)5CEgVbBm4Kz5f< za>i0hO)6c*lBh7oLyZ%Nj7thl;IHHte%2)lqo_8sJ`%23yf!AmXD$KkB;#G@yh@c{Z9iBoKo3j)vvr8#JUFsJ@)Cc}64P*=OtGg* zZxEE6BZ`7*alA(@W18d$WIKh}UF(lqq5)J+lB>ZrejGV~fwU6$!M*qRa!X?636`o} z^*xw5L=>opK&OYC4jYvspB7FvmJliuK=--p{{T0l5Yz-<*WeTL*kMK06gJQY%gomK z!OJLxK=>Y`lmwnM(bzWF@%hbMf|9v+q&=8|@Bj|YfC;5Wd)MR6Y7FSua|U{=w?%G2%U(h0geh~!SNLR#&mNkHh|92f@W{Q%4Zum%u&H8Gchjx1vw7ek2M@sTr?3Oy1#n3Ks}6K zV1?!a_~SLmKpiI{4co)-22-y%t7*$&VkV~!X$@l_jp$!i^G}?9e1HSX0d26>&l(D# zgE<@zG0;-#7KmjrTeMV4j~gda^cH!IZznYd;o+Y0#_sIABx6Xg4C3;IK}FbDow2IX z(qJWU07F8Eu68au-fEyhDiB&lZl3RqwH_TYQk{_U^X1?HC1py3Wi3}iX*$JRHn!q# zh3ai#A;FZOz)&NIS+H-9B}{i9RH`JFfYpFOd&f`$37#KbgM@p*0|M zB77dJPf3LYM@xb-1Yg_p-?s*c!fJ7=_IHp7j)HQhzIT+k2z3cq?FaGB2LJ*DQmQKL z51nCp*C-Q%Z29@iV+b&VfK5+X!RIKs0jNJ+nnmrtE;4yOq)~fBj zhUTn$gHnuwD9}zhC%iU9Mz^N10d-n)iDk2G>MF6cxw(>3rni1#(J(Bsi zE3vXbYyxRDM$B|iV?~&_>)>E0bC85HLV!VfM)<|BpV1=#FQj_QckRPkXS_= zBw>UFHK!nRWJN9q2mrMcp~|j@4lpL9v!rO{)p(xqV@(kX=#i9o3-^G8t|rIQ6{R(7XirGl(wRObZ5?u-)4!`ocW*jIo+O{S)x zsm6~Pq95<2#B;<$n3 z*XsisJ$PW?ikELV)~FpATCi zufBEF3yclCDSjh4orn0%_i$_su_mRovv~<{N<>M@-h94n#SGO0Vbh&&d%)9GIg!1A zI^W|R5R!}spU)fdh*T2x56*}ZO>vtnC{{tGwEqCJ>nE<*H>eBwX?Wu|&=X*=bz(m1 z_lBerlzW5?&OK`d6m}+Wb|lkPe>mz*>?DVPP61zov-d!9&qeFwyoWTP_1dW?J1xrP zqOIzSwT1eN0*oh}kJAc??MKn&wuz0_q!X0?&Ny$i+~g6e_MF6R=|4mkfieS+GR8VJYqHgAvou=ktX2*3&T}}Vwyp% zV)zM8$cRvvMv&jC9)VGfNPR=-=zbcbNY2Ac;Hv0RuLG@(*~O)a!)Z@laPHp)mS zunO-<#wRigv^TMf*pL4EDS)?1-ugZ?K6cKnHHpTeFA^<)Yg6RAP@hB%_xcWPH&-H*L zphrySZ#-yqzVH&@bQN$AHP-U5G?~(3O;c~2qTwF7u}h@a!s8*X710Xc8bMEwb`F3N ztTz2?67H%l^N7_QHft<)<0Nc% zvWUIvr&HD(WJ$0P>CrElfe%0eN3E7rbqrr0EF0|^xDPt>fOl0uDyGzqhnsP+ctE0F zhXpmQI>f=l&@TtDGl$+0h9ZU@0HBt>Fxs>zRRvlJp0jcUlSCA_?ih4s+BPLh?6fZQ z>j9_&fDvdxb)Y(<sx!qFa$J|Cf!MY8L$yRZ%*ni-T9ah?G>zn9gxh#i~+pjw*t^ z4-Nvgs{ykfGf_nE;}Di}$eCS=hK0m}Y|>z91rGfG0QVod8Yl@+0Bg%$a2X^-3Fo$x z;k{g?-B>%*6UWbOcY!BB3P5G>3$HsdSVdH29#cn|I@|9il9HCE*kO+-00l?N-yhc; z<7eULzpC2`a6CKU@Mum*mExkI5CQw8U&$If&;BKtQ zur$%%XQJ+6MNpGSx+)H1UAefhwGjib0>XDv-f+op7!=|3EE>5DS&rtHWNEH7)6ESVL~* z@?>D0X$~hVLxB7E&B9RF;0}!?`TnlTPQ$)A0rbzA-Y$?dv@mq~?+rpBMDOwOi$OL= z;{cR|VCCWUkMtlZAmE=^fQk{bT<_N(tZho}4Cf})$5dz)i6TKW6ZRK{q0{eub8(KLuC?T^z6R8RpHS^ofhk%_> zNTns#r_S-j+KtdK#NScFUq$6XbZ*P*HR}Z{8$+#LdBL8dmO*0n>s~5gcD81@s%?3* z{ZN7++*YXa4?W;;QFx*`9wL49l*b{@n+5Xg^K#7_{D@c1zgaM-C_KdV-rlev-b&Ko z$9-of55)B6<2rJKbEannc7kpH0K~=GWD4|g`pXI8@Bu|U>)>!FBS?X~9`~nmm79Iu+yXGh9YPW z1_~m?LUVx~z(n(eD|tha%@>nQy!7A+s~w-`1jJ~`i@_1?Chrdu1B12J&qRZ7Cnv@O zEONrhqT)pwrpn+Ts47njP*VH+b1I@;Mz!!nj=RfL8WbFzZ4M;u)OUe+s)m5mcs%P| zWzDN+eNxR|x7W@-ASDMyK-g2Rc2J2R&?0c#bUvqejKt={P#~iIo%1IJ)4` z?c~S6DhwJW{w(o979wlx1vL7TsuxzB}6=+iA6omdCAokO+PPh)?aS$+Nrh)<9Ds#lAvwV zxq0(&)+yjM(-qBJZgmH_HN{?r<6f^EWFpbh6)0P4PKd^AO}9c6 zNVMb|E4)gRhZ`9{(eXcg;tfb?Km(O`$W#wACJLq)!RtgrMFp}H=^<ZLAly=LY@o=olLL%=E%BMh|Bf@gC=x81jjJvXn5>$kVr$$vs z#BoS4-~zU_8hXi4w4=c04 zHh)>HWa&ksBg&-i%j*e^TN7rpy`Sf;OqvqMPEds!)jz?`Nl`|mQPY!5oMDB9^ku2f zdc0(yEz6To{3gD0x12URJU1N(JSv}k;Q@MD-#`0{(P+GAKj-~p(l&HD%B!HoJ0GVD z@qqw|qhY_?{N%&;K%jwkhTn|at8taq{sHg*0HaiqpmYQ1K6%a7qEmo5akS(-sDgQV z&Cj39MB4lW~Y-3tfCO<9$>Q*0-7lw2lI;bA-X7A zudZ<$6%y@;Y#!JEQhE8eQEONU?yvF&L{#PopXTtXmIr?-lHlC@=W`0Nh}jC~7|j0r*=0 zep~aK${_SZ@x;JjXvC*h9VLEn(*eL~fm6bGFb<2c$4@#C-aqz*(FNZxIWQ`6fsCz^ zQ~q)a3^IaoJjC$~1XU3=3h(D`vRzRd;M4oV)&)ZKT&PhED(g?AkIqb8(~T&z=ZWRQ z5CK=KTullHwkvUK5JIWr{_%=qL@WOQedVIk348M57KKk^wvNvufMA|ul5GNkt-cs} z1fW2W7iANYCZzS0X$G@sCe3X@@tZVtBf7E<&z*116H!18u~(0?mQg_cemlkCT=Z)z z1PUJR6iAoY;>Qy6>A+P-vT4HT18GYi53Eut8M1O>o1!7i=iYM&Yyb|f^?`#V5YjXv zFXNNJhpl#?Qj8(nz4z8u3A}s~#1 zaC9hCG^T^A52ND;Iobo0nyl|r#xr*aDsi)uP=D>AN@zzePdcbgc*ODml@J72jhM=5 zr3Ze(K(O22E-|R_>ofu?I}y=)Xa4{*hy;2k0C-KVwBH#d;3bIgino+ zId^|K5zScmE|dmwrs+Ar0iltk6S2dy*UKtMu+FDW~f!6qP4te$JiSZQ8Z$Bgl>0? zB}YV?3BDq$v$8y7X@>+2T40L-CBHHN!TCRKs7YndHIdFo@AaxP-3 z9F1`K^OT^%f!HSPdVM#-v0y`sDwYhXr#H6{8!R)xR_Qf4e({131mlx$PWFM(oa4%< z1QE)I0mOQ7x^sg?+ECY zJSSu6Fm~z|Qb?~KSc$z2F5X|pD|&U_Miv#XtWupW9)k|;5QXSiZtP&wb5k9Z=i4d@8_i=<_K{x*ZcZeEdY>jhQ)&Bsr zL6k<;i(l3ia4|>jILQ*b-VR;-WJ=p0c@vPtxE({YC_x|yzOb#AO-HYIgbkNV?*pyV zOF16!Kuu9G5SlcerZklqMsw>ZOK>9i{{Wn-3#tcy^}tXCsha`j;rC1u5+6VJ7!r^G z27y5L?;QvLkq}GssgPPxp+`;kj-J*Liu1gr&<)=E{aiwkq@5F&9P?<#I95bq%U(P> z#*MIyppp7N=L1*@J`TUd`@*7{4$74NvZJUO#f%D|}!U9qTt&jDP zZ?rKAaQp9D;{_DJ0MKE9+ad3qr%gZ^Hx`@fDSVqd)S#%7lD`JE%*4E(yk3|0tdl@{ zFj3h3-f>n8I7e3?FF>MqgBP{~uXu`9%{P-K7K&5k#v+FI=HMb&>SnktXpH0CVXuBM z8p;qVmr$?yI_Cg;H$eCsoL+8Ff+MU5aO2Uw#;SnPLX7L6Y5aFRAGBs(jOm)0spO_sIRxAXD&$TS$VE7{@g{{R^V zE;33jVg~^=jn;xhp>}qslZ}|I(^ru4wBGa^;{)v>I=-l(vb-EF90=*1%9FZ_cJp

?L-r+VkI>A@D#HJgbfZS0+Mq17BE1 zrh=)|hgW_sB6x|y5sDqyAYcu5I)J+3an!-YQV?{QE-nHA_9nWbz~h?ph6xZeeQd#& zm<0#Af93?gfga)QhOKP3EaFjM$5KRG9`jVt^S)~U&!i6A2w@bRn+fYa~z<>m>C9Wxlb`3a< zm^nk06yF?RpnwXBNaJg{GHsw#0qlH95&kPAnR?b(4LPb31DP`6dq?(vAC#&m4*EZ;PpIa79rU>y#A0@nuW(%vDjYhGb#OMu3$IzU2a{Xu#OMaLz2k<$o_u3PG?0Bi z*8c!lQbYw6>hXv}O@P~Z{{T2ugir#u>g6hhBYo=%Nunz3K7KMHEkeCsaaaLB>sn9A zj0X)M?Xxu0Y+q}w{Y=~h>8bfJ=qv$V6AT45R5-8cPx|1HqNU4`I{yI7HeP^%dxo(N z+6nC${y%ud<~=7jECdy6t#O*|hNC1;`}dN#DxOUQyPUhr3G59gX&nPOm-{F%PywkG zNb>Q|7b?Qd5Ri8TU6b>9$*8b8F&CF!v40xmaRZjV@DQ;(gvOW$q24jODu`NliiIk4 zX0@x1m>0CBVeXhPl1^VioM zXLt@bh;~QTMkwZ{C2R({_UPVoD$*bZI&Cli0CHIp8RY4!2M)YM)&&5NwGH@KX8FII zP_zl(MMXB(1IJj)4Njm!ri*&)SIGpeqT-CHU=jy_vm6FSmC;slcI4|33?Teun7@7S z;NJulLOG)k!SRyEstjf z%h`}_X;DFWS-h6#UrGh4DX%ovus&iyhkGKsta8?K`6c$!a_ zIr7sTwAPdp(XG=5Q);>u(1%>>qj;>?wkSk>%eq zJYyx)Hf*W5qiWxSoODG=)4rA*wd7UVp|3C#aei{C}(;DuUr8pb zj;^v$V`^!PCRCCjzCY`MEku2delmg`+2?Qh#M-D$Gq;cLHqcQw&ja;wnOaKMoN6?H zBS1y^j&oW<38TTMd|XlN3c3kjhek{#0rMK^zbDa+N(&krtr*o%(>8oN@zyI?hNy(u z!QM^l^)TJqsr_Ss17`=zgaAANFudUNmKvjgSC^c{pAqvozQuDkHI> z;WekiVj#HETsSm4tH${;AkowW0D=lu+W2CiTU%mlu+RZn7o1rp5>dQ{RM%ykt;$j4 zO$tXr7WvKmX`a!SO?!WyO*js z)*KGWv|>Qo>ZX_P0KAZZ!*T9BBYuFQ{ILM;RbHXJs8_rbmqINUW`h%{QmK7~XBVGY z1|vEk+VgeJ3104dIu4SV2_Ti-cDf%0#BwfRzf7K^{o|J~8ZQePF0cM!gA{OxBLv?s z<;GwkNwz1LD4u)D-8%+G6q?u|qt3ALs&-Bx1$LX(j*KuRqTgjsDO#s)jJq6$CPa5l zt|Cq*ZFCxxiAIZ8M;h-W6BJkuR3M6i28&)ZcVlKMIN7v;V6PrmlyuNn6a{G!(1FnfT+a1_7Q%N2ZAbv^ zxuCVaQh1Fb_fY4UTmp?Hqa0}P0eG?VnmUNbbWMkvA>T=WsK$W} zItH279P^UQ?hINwculi>aAAs#1rmd2Q=`Pj6kAn@SUkFc{a`78R07?d{{Yf_X6PBQ zPPfD{re&fRJ!GnCiO!Za8-8-+MQ8(v%^Kr7)^0I8^fdn5^@8z636EF|R9KT_ru`q* zZIM$$l@*_EBM$2-G{wwn93NS!6P_F`4Wp+J;P7K2^OURY(SfR1VB8M7&6u6nG-r4a zg5;95e?*6AOO(${{R?-2^rh{;6x$@`3>MPgGkq%;(;ZA_~3uPymWstE8q8x8$(Kc zwUg5D2a|V??~EEk_7HXO{{VPK>}f?_dzd;C*2nWRSOzYW`oPu%aOnEYS^{J7qpz@< z$y7z(tVx1RE-ipFCV*%qvtQ#Uj7OY!^N_;F#v!ER^@|{B%l>e)YIK~KqieIX{Nu<0 z*o}Ss<+5(ueT4kto5IJ}NBGNW?u)S8ro;DmyWe66$*M`(Rhjf`-WuIskvh0>=;F1r$(9*n7u;K~1Lu&|WK84*;V*C_%+sH3z>Y zFBO5~oDCmeJL3*FhXJyeZEl@TaHzc$gd1k14okkd&D!3KPH6M+bJp-81<63*A20RZ zB2l7%RNPUB^zeL*aBW#oe2|>=)=VOlX3dfg?za;n5yr6+Z0~)a9`fyk*a7Znoqz5q zGl)RY_cm?U8L1>eCQ5yFYmA@)iMk?eY`Pv$js!7*5C|0@(ID-Zuhj2>RHb$O3Hs-<<6SW2v+QE_W%tOXuBy1LOwd?jR5FCIBj-d*> zHV%q61(U&vrjaU64Hu|;N;zZG~?haa!G7NX``d(t~fH<00yc@?_BqejiSQ-roZ#n5{U;^ zgt@1sbuS+ta4e}H3vXM>9r8V5#6V(}z(2q3o74>*O|y5bInsDU_3%;HVHG`u>{{XyTbaV!5*4ztk7a%KXtfa?H zc_vgnw+T-;f>41qfdS}eyn=5}Uc1JLv()Db7kv|V8=iz|Zt()t?0BZxhJl^mbN=vU z#Dh?^!#~!!$w2H4@?Bx3EN|llh^Rh1;D7?hNs0nBZG0vaA=@M8Ap}cr4_Vk01TLPn z?>F#buKM?lA>(rKowE_1`M?t*@%!rt8roe3wfMk7QjrfGm*W`>TR*&?oZdDD;ftoq zK(I0AxV4X4fq zNwkF8bMuct1vD=>o6jdWZ~pNuHBUIc4ueR%dU3!oiVvRWoT8Q|9x(!mb<4kuxp>fU z@##OTB-9bVUfj?GtlyvhV^c{4QL633tbJaOuvG7H{biiQKqr_3ruDLT#3n8)IrnQCP=+KM%Yd}xlLLmyEkPv{1 zx!bAGWDIx&p-L209zOA+cbc)nMeOffXJRv;2!z^c2}d5-NX%LYcWQ7{jEo*JTRfv_ z!*DRAvC>75;0plIasf(fOypTcHZFkx+gtTV2Tq|gQK7x)c|^W2h}o#updkz348Ip_ z)5>Ta_k>JH1S|1xN)M#Ph;)TdeekY9g49sHBHx*h-FZSoqFP%x_m7%p)V^r`6DI-y zP~;?djhiz|&8q^4x!|X90@wr6%a-s=hpSk8kA@%@JA8Xrbl*whzcP=oX3L=MY0O;Z16+{!m4x6aLd50-$bIq6?Hnkkhh8@f2uVTU zL`RoAaKi?wtuBL6qi(o6^t$1E2H@ckoFaCP@;L<)w!lV|w_7x8c)D6bh0T_{7nbqs z7OGq{a6PY=zrN@qEwF(!;aWO&Cldl1p&?a>O$U4q?^7(QG8)+EJM!h{u@oo~*o~?q zLb3JbzQA>~I@b6&*dY97fwgJbqTk!2Qbol$2$q1UAM4*3q6QA+iuwKHZ8Wi0Pv2O@NlXc#`Tqbo z)*WzyzdFW&1w}{0zV+uFMbNNx2EYJt&o2^d5j40)hK2VB z9Eb#>=<8pcO4Ude{{X#Wf(`J7DA!~C<*hjE`Tqc4ypc{;;t|_1Z1i)id^AGu1&4OG z9ba!)K8#v#j1ai68PPLL240+-uZfxfJb#QJ9fwb>4bT*bnqKld{%;2+IP&mN^0ZQse0H6qq!I}-!yFL75Vh5Wv`@jk|s1Lm5JAne9 zDrT`~G-uFbg4N^v%lGHTRM=lW=IC?q!gU`fBZ@?tiR{$VeAYj6hgh z#?K7E$N<=r#+W>>5wAydJJYgKvAt(+T)j{`;JV*#TUisU5 zc*a8L>7vA^{{UBb+SGDUEm3t@b$`Y+k2iPEQfY4!Us*vVh|qcz3q5$}1dz4d0lpnp z1C{iTB=sreI0RE|dgGkjqHRKG1D%fxqj#4fXK^aK2Lq;?{ZHso0nIID$6hUSV71{= zyRN=GWoF7!l@Jg}zlY8UX-~&2qE%1SwqgTsgg0FIEZ#YDy{O?xh# z$<4aftFll9KyXJ#m}0b0i6@}zJLq)hOQhkwj*agj=*^0;*zP^|<;@@2c%y5efKnWA z;^cGfM|_XBPA_`7K9xnIU@IBcFakx*!*99Z^WHV6fn*5L&0wu~@ZqaSUG*NPgOU8zJFdb!G~{UKk-aSvN`~la&4s#s z9}t-&AlyetBFFq<&@Hx%RME3jjpc4>-t10;f*cQcApiv^Faeg;FD3g>v69nwkYlMi z9-dnAvMPzwi4Ft9P3pXFTtb4HnnQ8H2Ag0dL^?&lINyrKWOmTqB@vs(wh@gEm{3kk z2z9>U1DN94dn7gis;%x14hYiAL4?t@D(`oVf}k za&bt;^0lm`!$m0&7_NeWIvoxxtY78HbF*{6o*s~T>k*`BDZfpZXR`v)(Qw}oaYswAZ69SvS1)h-_8NC2hSLb0SOOy z!~(-=xao zHtz6Xo6$-OOOZcp;NfT|m&OR#KO4g_G?JOhl#_e?;2I#t3(w9BDmy`~K7XuEkr2)2 z{%;E=(9t=5a4?&7G^clv!FCSKA8&b30t5+b=JkKjP$ezLtCAl$arr8 z&1elH*wgiYpvhr+77+)Q?7ey4IW6b{emnEV_vag=iKPyhPIYVO`9K5>0js034++;E zI8HPRh${6}*S;WNGm9MSIReqT*0{uJ6dO*QfbD%t)^cQ-CC7pGq1OlM!-zPp_cm*^qX$Yt4vu#*@Ig|CM9@6J(EHAi(DS^$ ze2DaDxnijhYtpUxU|p&@?G3KL+<@;>Bn(+Qo%CNgTbO`P5j}?~@s|8yAs5P>L({OX z1Uw0$9D$&Nlh58E{{Wug=HW$j>eD!BNyMT$GN+zi@Ca|M7KNRNGnW)V10_e$MJ<&( z#=~WnXR}kkS|(Z%0F@SwjYz+Y2%;~jNH}uPO)eV*Xcf{2T&;XP*@%qA0z?r}#?!wW ziW);@S;LcJ2+}+*D7vDGh!V%a!gqobK_L#&ZAir0^rNpRMN^RtMW@eMNTb_IyauaJ zX?n(G7+Tv^7L#Y2Aixcfu_z+!0b77j+ej!?0Q4ehVrbys6?;a&D?!0G$FsUPEK_9L zPhi_QXUbYZ)VnHH40-9t4zXxO>p%nvx8GaZ?W$a?fu|Og+uvH!V5pSU4*~-;As*+v z7Akd$jA*i14v`(VL>uM3!iRTC*XfT^+ z)4p=7Dv>BV@$L7G5hzlU&+mKlaYlu$^@?HIDt-Zd+#xex5Q+fIKr_GnkHyV`;3xHq zw#f(wXCLPmzzkjM38(}i*oTs<-1)fmC^Ta8b?3Fdaon0)J!Jt@J3R4(7`(12qsH>h zo-iwcs{a63K+*~0oJp{s7+ge7IrWoJdaeXjNG=TsL?iE~78=5beIfUa#L>8+I9FQc zV_W6%XC~K!2euy5>l)JXqka$fmIW1{y&ZhwSV(qFN32+Yp~yqyHu8yD@ripEMDP3< zNG6foFV->$4Yb|ynn9(lJQFkubF%XHnv+W*wE4kUx~aZ$98{YR4~$9<58g743LDl2 z)O|X{fkAtuz(XGmhm52_VsF>1Nmh_Uj4c(Mr2ha|YoRL5T;5!w8(s;{5-Q(gGb zJ?5Df(i}fHJuRG%Pv;t{1uYTo{o}F3Rc{`j&QV4~i9c9FMin|po@=eck2sR5yQHMi zw)F4kd2K30HN8#~Tzq)WL^(WSrv~Wr#cmO&pwI^|Q?2pWoI+_0hb7~CPJ7^&2ofqG zDQr`P1n-Zaolpd8K=H6AGhXnTEE+FdPQ!fdIPF-{2)NrzQDMPwF+{Z7NR~mbQg^ou zQax2md2XI$@wpvW6mmBo3=)GHhW4)yGri`NvN|Eg&Hn&^-c}hz0gK9Uv&R!QAV7-} ztQtDg?|ahpfUUcLvYX|1faZA?j0(o-S7*NNL;xz;qA143=sfi;fan6OMZ-`$hO*>Q zr%OxJ(Gu(t;et{;!xy2glZ^)Y?>6!hQWmzc){b`Zs1OL0f^^#Q_Bwmk0fw;Xp~+*1 zk2;R{!QcQ}O>9VxK%7D15)q|MLnwhB2zX9rCcO&*?P5cfm(~Cf_jd^=u9KeInGVK- zP(q#pg;rN?B~@Bz(IMWi1`hz3cByF$K!AlUk9~=Z0w=60Et*PS6i~sI244puBj>TTTf?a8-3ydhz>C6PdZMlzD(!}9H#J- z?<(K`At4jrEAxs}+6438)&)V;p@T=Bd3xS1G|D7&)pzU8QGn3heV=&Ni7Yy(YU$@# zLZz8n(m#=hHV@nW=MfT)wXWx!pj3l|t)(@&Frg{+Q-w#mS=2Fvm;fySr^x*;yM&2p z9XIap^Y0P~)4nmHJDNRUHWPU9o!l6}@@D;HlT~!7#wegimSsvJcy!=4C1&q9FjeJ( zdO3frlCi3PjlbSasV6La^OjS2g6|Gs)C|8)@P$1zLV?@w2LuypN$Y&#Fj|Y0-#uUz z;6rrHFL>%_Kxoe=>nRW>p)R<_9RRCz;(TVOBbh$TM1-lbljjgZiJ&~>m2t8%dT|mF zK3NZ(QluJ+hOOt`DkiKJUze|}BqT25&OzCH6H_H9S5R~x7$guuNE6+{H1EPX#`rdK zd2uw0IC?NiKonQYiSj#@cKjH$f%?E2X?7cM6WZxEC;nmEt@Eyaa+Egg>k`&W8M}$Z zG@zI&fL%QtDy4fS2&Yqr{N!@7v~c3mkEBQSkrH{WBjo=8y2n#-<@Nsn7;QvG{{W1p z+K9XM7=2hxookz-Se*EjjKJy(6QiD+@-D7Du$U+~Q?k9m);XgRRbJJnJe~Oa$~C3Y zvv%Y)Cq}o%SHgRQ)Dvk#;5KOyVw6U$X|#taO@1Ee6T)r-gC7BVA(9cXD@K_>S}CoU zdfpfcYHYI7G<11o>qa2r5Ti_y39fltfQ&636o4BL zXcghkPG}2PgmUl@AH$#(fj3^0;r-=jH5K}arp2AS^@(32ryU)sN0-L2b;n>O6atU{ z_xRvIacm^6sc(F_8-NnB`^gG8;|)5XfRr39+f!ki77c(B=m4Wbz~PO6X;hxtky&>G zTXD-+v!D*Q7koO)+UpZ7?#9A0E8cOD34p*&^2Z;&9-^q#B)&r=w_ zQHuM&@yjjXyQ5~g{eNoRjSs_5&J25`MQ?QR-_7GU?nL%Dy9Shh=QT2l3$CFGPym_? z;ZcUl1iClF#?JBptO}BX6K=mRY{g_K850cg4V2>GlV&9Bwl51?uFOq4j>sd!L~@cg z?=A@^B{FDt9$M2qf{X(sPKekw*8$welH?>-fagrz3+7_dfT21YH&S&S7Z4tJIL=46 zXuegNgv*rLwKWa^PR$l6T1^l%Hl$ZkCv%L^!U~Ti(h|GwbGIl2901YM7TTNN9to)y zj}8&3JT>+?GQ$Zfk!jSHC5g?zZd+ByHg6Yv7M^iU;l>rj61+V0VNM4GL8o#-+M0vb z&0R)sC!WynXWh*RXwBYafAb%PKxeIflkCE7dJ96>IO1{VFKoLCKQAxu0H9&OowdGx zKUkG`h7KFeZ<*Iv>!=YZ9J=1$rvOn>iP^j;?o?aG`~Ltqm_*_){$i|QJs36IR-ZSB z4A5T$bBr+B09tuz2A18O%wlSLE}hi6Pp?{s9z5bhSyZ0!7VtpIO&(kk(OyvF0b!l1 z=Ln8o26Ku6F6BJpwV;Wy^X2OhKsY9Yj3u-*OL&ziMxrmNk(PJn=FSILu-ACWP)I|5 zGP*#}aqi$OV4#h+^@t@mW7hm)QZ#r;0}=?8X1}~e;7QJ2sKehFU;%0hb%FwvHL4!j zi0@E>>&_kz3<#&1?*$9xX8tmo1swo)k_OzAU)C;y9qB%%AZ+^$I5i>V1i)Yj&=4`E z`yij3Hv~G-HH|TcVU9tpTnEE|p(S?dKCx(o;0C_&_nX4$`^}=$!)_%Pa*zDp7*Ys- zx#s~WhY8QuIdnp_Lrlm>OtTh(Maz zE-CLj+Z2b;dA;$5*yb9W3#Uhh_m%}Ehm&Edb`$3~tKpI$0O(#FoJ($7<8Wjmz!d<~ zP`(bLQ5Q*M4cBtXZajek4!{EQwAtGXG}kV4Qxrm|D4l`E>16@Ips##jC|xK(Rfd#t zD|tH`3Iv5WpwX@di22A445tfJds3eU2uh(Ug@;l6J+ZSb+3&| zzgR(%Lo%hYvZZ%$(IOiPb2a|3Jwn76evmnKmA+8o!#x0YFqn?TrEPh`c>EivTxcdGR%nL2&s2L5+lPY}WCnSx6Zo za&(}>M;p>)+5iDfbF|k3r*d+FPzj>6k#+a@ zl|UeN97=6kyLZvt#4zZLh*L(%H1JOGa6}D6oG?@X7F0F{STOPxw7jg|n=aXcVUwYz z(pK#dNj7wfV2SU6FDWI2IB}Y*+_^HbI4h0Uz2UGT(Q6_Npj|v405v8KQRV^@XPp^B zq5)AV7SP^L!HLYxKxl4(W~PNc31-6QL*S(VGJNgG%1F|QN*j7| zhPZccXVyAa#IyeY0JVUlk>ToMBNT_cplG=mZmoEGoWsU52zDZPo%9bJ?^sdr2x)4P z?dSWMQr6aJPAPrjof`Lwn1jwMfmZ9l4SUww@Id}gXp4S~Cq!3KojFeM4bz09bHmD2b;<%Yy~1CP9NB|KLEEk!w;zUC-< zrwj(DOE>2=dLB?PKn_=oAq9LF4Oj3IAp+0T^ z2~Dd{#ok|HrCJY@c@@5J4W96Zg-+0zQ9|JcUk*Cm#8}q>l)hXEK%9Xt12}Oezs^dm z61!Xk1WBezf-x#-v>ap!!r$?YNL&|5kaQ(3n}R|YQeV6*5n^;Fvn=A@O^4Pr5{+{i z?--db^y$k!Bcv9xjFFUtjtn4G%6EJY5h#TRr&PcKmwbI(L?%rW2cx7snx^o;g^`7o zSHg4P_{!l+WhzK%B~5Yln`oOhtM+lnJJ@-01hSSb2KTg5{2WX;5&;P~b`8;bM)!qw zED2RskPf6;`mwR3*^B z2NWqtl`yMzNJo!TeVQPF*6cRWQiV1evD;Tl0)q-@_yYj%j`&rcY2@SQ5Ur#}lOJRT zj`F`iO(@u&j&R92z!r#GR6sFrH)9wuuALI_M=YRrN=AUyHqxs3$7&yG0&~v2b1-G? zAb|*WZv$HA5G@EDzEiz$8D6UBO;G9RFYj2Qa*&;yf%k}k#FdNV5`%;)?ZQStyvtC3 zcme}jAshbyy|~&SB6F>I#DanbcFX-_kY!96wZw1jbn%lI6MRQmVN7QwR36{n7LiTZ zZ`Z6)C=|NuH7oG-j?tytle|MJ-Kvn;1aCFp9c9u2DW?s#U2+WV-gAWoU_^}}TgdA1jpA%Nkqe-5@NkD1 zGkXY?sG}W~V(->DT@e}prs=YTO+L9~XQT>^VSK`U0b$AnMFDCCq3EElwm2{*%o-n+^f9FAd#QB0b>ffS>_lv;@}G zS(H_Ga|m98XW9K>LW80<{eAk%;?X1r4tI(ZDun^_fVNVAs%vvy{db79LobKbK0@Dl zOAX##tp}9k-mS#c&<5to!b%r?Cz#f1Iy_-Z+s8P7q8EoanM&3PdmyMj=UkbxMydr2 zQ0Z4~#}EnwwzPbWb;lj@%AzP57l)#6kH%^cKxTpTJLuzIIK2RrYfkmQ&rNJsIUvIh^J^^-{Ga(x$7sK%}3%zZs#1|>c7gG!TPVu}TWG|WT-hQ)ck zV+k$jhVVqFs&nfQV+>Ttac%({uxK?M5#+bLbx=4-*c7wQybmTyjDV;|J2*A6`1sC4 zrwERpI(?j_-3C*+7eqLGT|bg-MKaW}7Jxp^xH6d8Hi{uSP=QI{VZhfCix*{1i?7cd zF!BwpkPl(&=MjPhsU;V0hqR6Jj@!Z}gtRk61zBLsoT+H~|N-3On~OCK542-uV1=f*1@Uo&$hCI50s#7vKj+gS@k7 zlZAU7U!S}~3U;)x+MYGu5D}A($&HXJ^2rUOC`d1OzM zvkXRuBJ9Ov88sZ(K1kz91${Zsike3l4KBfn?qvV~njK(cs%%CkC@{$_5u$s~1u;dU zMAnAuf$|waWReb0$7{oBSOgI#A3AU)cmF7AVdcTm7}+c{9;jvkU+6DK_;fR zkI|FG%89=SP5CAxY7towT5v1RM8S=q8bM;ai~_3hJV~h;eU2!=Uk@AO9i+XKrK&!8 z;$6&m(J8Ly28{y_DZH?zlAT4+IX;AnxWUvQD2||Q9vc!bIRH)JQfb(t4xMj(X2zFt z3UsT;CW+lJij36+UdnLy`ox69GKc5Z6LEx8-u?doTw)YlDM1Y!zvDE`VTe_I7temO zDMEyF8h^g73e*lTlgA-dH-0Wx5sEXTVsZC1e9q*6isc=^SGNN5XmG;Aj?d&dw!tL3}0 z0Xdz$%+@$W55dp<;{c(hoT&bcf~c|p{{Xd-8%`cyoB*}pSa!JY12k?JU4RbLZgUE_ z9Ar)uF$c~xXbVIc#-EHpYA_(_`omiQbAXFgq;-ay2v9y{xci8_OZ{QCzjqRD zH&@GmLKlbw?-a1~?0lXuu!A^UYR1ikavcf;Y4wQ^(iJQ38EFpT!}o?;CfLjBb&W>= zDAE|L(GX2X@x}~X8!=zJ2M3inUYch--o=H zaM4b&1rZ-QG19a)Jz@nmSh#Y!aa@B+ZI1-)zA!R~T$y{bSTBqLme7oBjx4dfaSk>HEeXC6VoQUGAwoV58z-UXFW z1_-LWbvWlKdVyLUk&Qj`FKcslgJjW98gatz2+!;QT(`g?JW6HjO2CWPE8QJTr6vdh zK1HrHalUn&qQHRct##HC2VxVw^5f&IG_)eWlse$|kESF%g7NW#(4mpxb*LLPwjUUq z;kpoV!-B5Nr&J6Ce0xAS))oPBl?@NKYscd^Y+y8W9s#|(c=3=;p%SL&=&t5fA%Xxr zFSPN3R;H3Cfc0`Sfg6)}H;i(GXg*)>B47i3E#uzvlaN(p9!d9$tY`>BlMa_nBJ}#j zN)IAEd&q6!8D;MV14MQw7|^N(){_VoUk*|!N32&zW7Ofm5JvJnxFkb7jBr3p9ibEJ z07xL}gn%e%%fx9 zf^;Qwt-EpuB7hD3NgItHtSVEfG$&vBesJ2*v|XGAsl7Yg%3+X>OLX(TVjnmp396Tw zhX(;=@!k#CJ0OGkU2DzW@FD?ZL7*viJL}#)k4;kz$fl?C;9#2>P`@Kvf9n!vojW!V zbZNhgS7S)dZK*gZz4P%TUKm|87u4fML{xpS9v7diDuAI{#6me7hx)}pF;p7(zhhQW3J0GQG` zB@?I4BL<5N;Xwr!cyWPSbiLpj97-5aXv$!|5!bBQdkokjP*WPlG2?>ZQp|`-!={6` zeo^z<+tiT1RVDj?*`NDWb;a{iT zQ>rgN807$Bmv^kSoX4SAug4j%*F-{IUm3<}g=_QYt>jhYX!87VgIfgX$(o6 zn~w5s_B@vd0t#wLjO_!j3F8?(N**-*Aoc4ur#mG;6cJoPyI0;=LjVSp-6z(TJ~$?Y z85}iEl(4+e;{*d~2^|o60X4?n@l-(g?k=f1>G8oc2uWKAPV_f}nY8V6oc z_`FHUoAw$)=r1dHIgdT%A~gV8b$brcruf4`Nei5AXhQBn=x& zX^cn$B_0PJ{6ARo&6kW5R%`rEcs4Ci)7$TfG+<;A0;o;c7nwia9VAg{>=9Ar)Rp+a zJSk$7&JMe%*Wf7>l?lHei9T^w%?Ext&Q8oij~}jY7^2Xn(a3cNzIel@LLU~VEiZlg zVm6GM;U9zE-%Grj1gfG=9dK*8>tRRLpFJABnMJUm5-e*F9_F!72<^uViFeG_)3< zS@sGqA6RLep|=?XEp>duD5T!(Ozi;Y@CUOby49+CaRvJlOVQKt;3}_Vpr1eE0ub2pG!jvlaJsYOn3(ojn9;;pY@F)&(cy}jcs3-^d=D7)_$jh1O}n1wsN;%iux z9n3m*$~|EU5FWfZl2A*@h6Pa{j1oNF99DyMxWQbUb}FS0UIZBX^nBpKkd_fZ-<@*q z;QA4#(mUDSsQQvm1WG|r9z|}{^K8Rv9zIo|N0cbn&jcC1} zoK^;pAgFIas6&unrPI5pHfFt$-nR9KN-%6u@}9kOxpHB=+C7pldf&%aDrT!q!aJt5 z#`8pEb)em&h?Z!l^(N2#409eRK6!ZT8y#8}=57E}! z`hO-BcA$4_%=Lw$hiHR_y9W;}$fOinJf_dj9AnWnl!TP&;d$iNZpFL7PXnpP*6}># zor3tp)3|pYUl|TXdE+I-VP1n*t?wmnQa4AAJoAaJ!FiU>@OT?cfQciFCRanhSb-|W zozGaRdK4Xi{a~|D0CxC{+};}MLu3x9{NT#>1vvC%8g+Fyd^i9C2Q=5tZG{1Ta)F1| zXj>brxr+@CCBXqoI(kO{Yvc)`u8%bC@zyI!No(%k9U|`zm|N^!q^C&Yo29{MdUAs`OT1`DezJ~%6heMa$Z|Y% znlzvii73xoIsInQ3A5lrh#*&JYc5hnL0E0K%^S|WVhxp15j~O@iTK90Q0N>H9Z!E7 z-c?DB5ev{!>T|EW8_A+gLso7=mGOMy9l-7_ST(2QZMwKr)5K0f0wiB2tdkDq9+%Oy z*De`?H4$S%x@eF5G@4hOF+)R>cV!sPIyQE<~lR};6xNnQ>OvQVnpn6 zqVr`i5Lagka$}TtPtUxNRT`l9L*585cd}`dj!hSSxWoi4&;YMZP0s|>*}^1N4>8o# z=W2Nevk8*ZkkNtL{@xVf_!@~(u5S8?-bw~y;eD}6g4ca(plw+VY9yB0anEjQ+r}Dz z)YLB@cm>R;uHg9P-#l}Wf$K1dMIL|98NC&5-e;I(YFmCfp@Xw)NL6 z)2t2Ao>zjp6N4{QLqaL%eqOFBB~C%)zw?MHwO&Yn-}jtti3WqOA2-%$JF1Ee_d034 z_liM4MK>L>>71PNd0t7uym?2EQ^36PMAie=U;=RJc1&`B4Fzvp`2PSd zQ&teuZT;Y>0@W41H5t2<;u-;`Ge@5OFtknFL&(kdImWmRyL5mdd~*KrIMLV&Xwl^J z_s$mxw1khI?l%p#Ysv47Y9e-T2et;8#vpd?W{55z303Lq1yt16yyOTYLi2(JR{#L{ ze~c|eMj(3Y$7T|q?knGW&KYMTgU5{ADk0G~>o4#c+*9u*fF-O&^@-3&r`|Oq{p4M| zB1PxlH|_GFxljqdody}CyYrq2@()iy_DysY> zB=-lxdppEA$e>Gd%4pR1lLoRPw87(qvq?5y`3n@QP+uvsZtc8aVyFmI+~t#pmt0Pq zKt#D|?yJuuWAbvRz+tGp=*KGkyv*V~3kL8h9>Sjbvjgd}K$|ZO=(=ttsYQZH$S|(I zy%!y+z|3L7?bW2`Uh%a#Y6nU!p60xEjs)0y&{fvZyeA`PcNRGozXw=5F0BCN=*x+> zHgUm&G`VfUzoYixPp2q0h2Z)F7r{kxI0ADH{NkBW(DCG)zv#pQ;_CkZpT-Hh82TL= z!DC!DtWFG)6odzA;;6@pw13fpL=v}#hA#$(6Pcsq2(-`_M;}uhx=|C)E9b1G91xOu za7)ld935govNbM)6wL<7W-4TuT_!rUG!DelKlgY#g^G_eBC76A=1?N6x6iy5p`cOs zhzKW1=L#^<$&AMC-W{C|{eDb9WlL#RL7;cc{{T2OEVNV8Rli<1>j$8m>rE=@k)cnV zA8181171is^)-0b>TkSkEd4~*uxp^v*g~SGP<}pYsxfB3C^6o?JacAng7gIZJ9Ryt z3{r`)p`9=ex!3D8!+>IfMwhc+j7TXA-vPklQ?mey&J(4spcH2J_%l^RqX2ee3j6iE zwWvWYsk--BfWSmXt3FUt#gQGudTx^ldVDPC#UK}w+?H$agqh+W608G=@9usdT;~n)4 zg7L2WW|xwK*7b6mmjkbJAVXHhFLZr=Mhol>E{6*n#CmH_kwr#h#WNe!8m_8 zV&o%5)8W7ZM|)0Out{|~&n$UiJ>e4wH7Ndg$J;9+Q}J{NgmYyq-6WHtN4wuvn%l2Tm&6jS=GFKzSS%Zz{o) zwl>z>yZP@Z78MX9Q$q3@ddiWcEg89{=iUea2NP+%o?bb{WtQ&9-;?P3X7E{PDzSX> zPltSEM2bxa+G(9Kbnj|#`P|76(sJDB_ z-jYL)k8dAtMLyG~f$ifB+^Ug06V)+71*)UB%kkq9T{RCa&nu@q>;trh_ko?16W{ze z+#FOj)0~Y$yhp#hIFu+B0e|ntJjyxOj_?RjpP{@$Dwyk%VG$bf))-kOJtzC#Af8HB z(Ek8t0NEY`lm$`JW8=v4=K_~UlipJo#L99jL9GK*%ZnieZj(|t-L2yVCLRAa9u%S&&e^_3`2FrK1iGVH%*%Rvor7rYLMFc_S8^D2>lzCtN<5A|RFMvOOFlfaJ zMhJ7m&;8yD3BfcRwBk-pV1op!I;-I0B2riY%0)Sb%vlD|&NutVLNzoXd;Nd&005yu zM|Jw?{{Xj;WS|r`JaLJMH-XxBi3Di?P~YPSQTDd4IkX8q&T~u!<9y+X0Z@9vni~i% zDlBoM_b_eGQ@_SiR8HH*HY%Jf{;&;zoSo!Eo9)M>LeX=OTr?f?>j9vxgwS2L=Ql0X z6m>2W0}`8~z8v8JMX349Q&(WUw-1mj>G*Z~#G)1=OyNiC2Jzzs%K6KKh;aV#Pd0eC zqE?581R%rM8~eov5=RC_X%#eCNxA6#6GEaly{+IOxy`OW$+N{{U#~ zdesTdy!F0)<1WdiRXP|apNZd`ssex%JG`)LrN~&(4Jazh_V(fZiD^oJQo7Osz`M6x^7Qr z7`x)9fuIs_Y0>4zKv0CMeG99$>l5x$9UK#L@-lA)Sz3vzQSqi#+xBY!08asgsA<=u z{{UGAK?%`D{{RopvDwMjwwzy-}HBI_YRu{;A`ybaN&&Eco;S~}i92?6hx<)*M&<(wbh5~SM?fAug$DvQCn zVk-a*M=u~)rbHVqHS*x2a2eOZJ{ZB;{KJW?|oMFg_*bbWrVB?)A^M?>&`Siw(PX!O1= z-Fd|tFjl!I4gBCL78U2*pl#e%z|~3{QslSAU*dXH_|5fUNIxuig!LAS5bQ z7kBlCnGvc)@yPMy)+tp%D2CMk z;|>74nB&jZD=j(|TY2LOfKbwiq0hXY0Je2QPoApqE;hCq81 zl)!d%wJtPvkYv&x*=Qw#)D9*KMEyryG!jabGa`Ep1 zfdmgGk#v0fp7D~UmB8MaQ>~8+ln8b2oI3nT>r*rX=1xCPFi=k4$ zZ@K3LX?ImxU8Vm3q}CSMhT5J6yU_7@?&g#eYDT=k(eB^8*qsBW|MbW5m zANQO`9XErlQZ$3pcm3ta4uIEOe^}&VjXcl$`@kSuz zmBAQDBp%E{y@x)!ai75J7jtlc?D)$HD`%%6!`3m0)5dUzO~-rnjBsiY`~2hpG(j+o z)O5S&&LG%I2Q~NcPu4^vp|mE9@N)0%)kT!eaKCoFc2<(p?yk(d?1rL3B!3Y}!qvbf) z{{V7Ou$$$%!`QrcoorUU!F}lClh>S03JtHK&3nOHjqO`+{pM^!V%|&%v^f%d{d0q02m+;>c=Ogp zG?aYr5)H1jUs%|zD_0PWUQ=T(LL(jVf`kdzdAd7zoaKeR24IkZs`|q)NIjhL>o=1G zcVKajs}R)6*yAAsAVi&e!kxp{IbestVBrzi&N5ns(~Kae!;AL3caT?pPZ@Im0497n ze}^)dMwjCRX!2s9n>J(r09f~i4K<20G@R?+SwY1uHIHlWCg)3wcHk-n?^#lk%>#e) z{{XlENgWNZ?_W7XM?%1EYWJ>8P_nRux)w_Ln3N6yMuxc7&+ne{6<#6fZ(eyn;{#*} z6<=-qVkD-7XdBq{Ul|(ZmqHwCZroZGBxPf>zgNe%tTW36y} z;DvN>TOS_&F@SDB73}`rdBU(Xqo6mC`k$A_qcPgvXcG1d6QSK>YeS@)LE2GQ6i{{Z&@g0369ymvV< zAGI`prU?a2n-J^I_x}Jv0kU&mpU?eqBZe}kGyeYa?Wn_cy87Si0O$}A06)ig37|E( zMo${vDhRR){{Y|h#ABHO9ks>f>)tNSk>K06_r@-;sz_MjedW%@U@Um~@q(S2%L7pO z{{T2L%j`Bh&sfnCrx%7o*>O)TYg_S^G(@HYzmNOQL5=PYH~ zZtCy6yPh3W2++dwi5NJcbTgcd5GoPVC+{>09%3%MY}e_?He!l>iTBBnff+`OMLtpc zVn#7#eUS}dNYzQV5IPEPv+oc=w&wnDD-1wh3>8+rt~ojikA4gl0;-3DF70^ric9*2 z5oMg2D+|*orQm#GYi@+cn$#k^xVHgMSk5s9Ic@cZ2nk#ku%YkXIniu#xTQ&aIaH#$ zeB%&wy7iEr{NohtV)*`V^IYeg809hCOcwLJZ^?-i#c*dBK6AXR=PmCR7`41toVw3Q z9HDSBA=m=5nkoMPImG~qu?6w&WCR$LS{~f15Y_~M8q@1Ja)qtaqB8Z6mOn#%Y43~y zfSeSZapKvMV5_=?P4~fpNkj{3tgnpNBmq_SpTGUUasq)Vz7Nly{;+fckkja=#v4jR zBZ~YTTfIB;7zzn%#rrw>&W5F08inI_;xeH{P#>Y$`ND=Uh{XHVy-$gpd8jZb<$Qmq zth6gaunB3>#u+d{0*8DH*8KY39fWPyp!&7OeP~$&`Fr`9L)esY4?lRectRg8@Y}wO z2|T7z2W75sI|3N6d+q-KSU5DM(Zxte{u!(skL1QLz@{s!l$oIQ6BgCw$xG3!G*^K? z)f^HKSdJ7`DV07CTLepwe;DjR>Xeh$@8_F`4>~a7 zf71f>LZX(x=l7Hlnj_ynwKv8jXaea?dHFx@nnMsY6x-kZaZDKcQvO}}$dxVFD(n99 zhz_h6&(nf7jrJYj#6!hA_`oV4uy4oyVERXGf5X;QT4QHGo7O5?EvY!gl8~NoYHIZd zpS(t}DaI=Lz*ktt343wE z+vJ`xfF`+p<5Z7<@2pT&=gtLZJ2I^fGR&6+(?=6=quyM(aKZjZE_Le%hX$Vi0FRa! zKJwy|7cFpMtXupIs3qBgEhs6#e~n_=010@WF79cGd7^6T4rZq)_v49(73^9n^tgy< zEFwVlj%Mp=r1szMEC`?s8xJ48F$q!>HLrxqqhcZi_B!#nZxCSDqh=z1#c7H4<}a=2R28-VYKaV6A(nva76VpQ917vv%Gd`=uBH4-&ty2 z$mbOPbrpbW8Lu4omL|sRg80@7nmZa+lkdD$XzaX+H^#nmSEaRMP8W#pykBIO!*#tl z1!{G=aat-KXj|_PBLmu6{{YijEi;`Vv+ofBxEd)L#tF4qw(?(pUyPP&%fY{#IWb)e z*QS}EsJd?FonoWDL@zVv>k+fC(l{(uoq;tEUffhIA#9&_5^rkknnM8z?-4z7&i8T( zifclD+QLGiSUN-Byi;k*CCTEh0zo}foN5B94|&)Z!^<{)XXiFG(w`>ZR<-GjV53pt z$ou#pK}Kwt6%r-3b~s+pTFe(#ua2;`t(n~D)BT!QV8((DLSsJH_H>Cj~Os z2rG@iXywLCq%P1CqaXl)SKd=zGQeF-ic^x{$c)J#`!azYjwDgZh8e8m`8dFKVBpV; z(9qnRpk?0?caGJRVRmQaRpj(55KVe51*Wx zQRI2wj`0IdI0+eu)d$SbkDquUY|yJC_F^F@C@;?tn4*_QO|HK3iWiQ|2&oy&YchY3j>m44ooqRaEA9S>lG`#D8uV`-vCVpiI-)A z=>GuB^1Etxatwr&;obl;FA)C#+(Csb0pe?BLyB?;_3!-Tcmr*t!{Y}+r;|4?FAX<> zsH(j6)+(E|b-p#58l$dAhpYsRuJ$x|&kz-$B7S^p0#y*=$#N(E0C3a?lP>IYrFDxX zH3q9Ie#mOG0uw3b0Xz8pU?nl37Y&UJi zepea>8(X{?m!l1^zupf%oO{+-;P&BG^gno#aCL=PUF9DWHdftCT0^%KG{Q(uFn{+b zDX%yijhs3Ae~W{f@A$cMj&OfD!@M~4k2e=CT(=ft*9R8|JE>0cIlwy@Q=gw+u_I_h zW_pQ0+<2vkek~&&RKvTv2NXe%$emY=>1%pYI{fJ`g_jl#mH+yTor{ z+U~#g#N|9SM&4t94FOFK6m)6x{o|~yHq-Hka5Z9`eBwfZ&>Bh0kSdmPCL{GE-i13*gtF;<*m#szuS z5nmtUG$srTPePY%A4F{J6 zX;)>YHr-pNI+`$)uu_|g;~F|D>t@~M2&ym*FVmU?n$b$U_da-L01BQ+8~4vnFc2v0 z;vbw9MdOM+V1oJ6dH(>l`o~ZPk7q73s?PiR$763ijKK)eX7!3Y*Q6)4--eeaE(-W=6`t+u8xj!0wNoSo|hg#AUign>B>;06`UVGtVdzYP5%Je z&v5uKf`MkYE@%|xw`>0ZoqlkZ!VZ&%6t=(<%ZV6O-W+h=oGn~v-VhefxPBhKaF7cG z^bha)#ZGbY^NV@T5XIIlGRGFrobAWF9I*G04O~I7)7B#L3A}E&V0*>43{H#{4m!oW zSr;_L!ONHUzrdzm%o zk80m?TvpLZ7(L}a&IZX#-r2Ab`*4F{Pb)v;I0PN;lD@mjYk2fjjY(JXm_b?v{`qp0|La8gEx$IY_DE z&V}RW>l8pmT?biIVj7%V>kG{<38Ux62BqfGdGmnjK^-*LonX*OeQQ|A(1Cg7n2@Yf zNbH`R-H4ZJU`hhCD*mOxoGkG*kp%_h;QZod#e5GN#UaS)bwBHdfCV0H?+D-ywW@h@ zeTXpM86;Ilif!?V5Q%vMoEG)IF~H_lDk@#B9O65IAnbQ*Id-{TDxCF27`J%#awNGaeu@sy_i`ZB4o zl}Pu39k1weF;TJ(&41s#tFS5oU7s0Y5m1QgVyF@$NB;nGXNJ9xK5-s#oQvey4xTP9!tjaT$DCr3kBP=X z8hqIC#!iZRnoQs%X10Cjpq#+t6zEjJ8EtyJdcqvLedxstG%L_x-6AK6K5?Awc=0}a z$Z^BSagfp7>C=RA4^8ae{{UD3B5i!=eB>J`3hMs=?llA;wr_s*LN+58r?<>o? zZPm?kL(pOa!)08zM0L(Cx3o09W}!{>Tw3S=+;C!=uA4yP>*&f#h2o9@-~b>04MEc+ z=kb&k42eE6!Vz8W^TsPTD5 Date: Thu, 9 Jan 2025 11:21:41 +0100 Subject: [PATCH 055/279] model: Add support for PhiMoE arch (#11003) * model: support phimoe * python linter * doc: minor Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> * doc: minor Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> * doc: add phimoe as supported model ggml-ci --------- Co-authored-by: ThiloteE <73715071+ThiloteE@users.noreply.github.com> --- README.md | 1 + convert_hf_to_gguf.py | 57 +++++++++++++++++++++ docs/development/HOWTO-add-model.md | 10 ++-- gguf-py/gguf/constants.py | 20 ++++++++ gguf-py/gguf/tensor_mapping.py | 37 +++++++------- src/llama-arch.cpp | 22 ++++++++ src/llama-arch.h | 1 + src/llama-model.cpp | 11 ++++ src/llama-model.h | 1 + src/llama.cpp | 79 +++++++++++++++++++++++++---- 10 files changed, 208 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index 0126da89c..a71015256 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Qwen models](https://huggingface.co/models?search=Qwen/Qwen) - [x] [PLaMo-13B](https://github.com/ggerganov/llama.cpp/pull/3557) - [x] [Phi models](https://huggingface.co/models?search=microsoft/phi) +- [x] [PhiMoE](https://github.com/ggerganov/llama.cpp/pull/11003) - [x] [GPT-2](https://huggingface.co/gpt2) - [x] [Orion 14B](https://github.com/ggerganov/llama.cpp/pull/5118) - [x] [InternLM2](https://huggingface.co/models?search=internlm2) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 01b58f976..5562499aa 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -2562,6 +2562,63 @@ class Phi3MiniModel(Model): yield (self.format_tensor_name(gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT), torch.tensor(short_factors, dtype=torch.float32)) +@Model.register("PhiMoEForCausalLM") +class PhiMoeModel(Phi3MiniModel): + model_arch = gguf.MODEL_ARCH.PHIMOE + + _experts: list[dict[str, Tensor]] | None = None + + def set_gguf_parameters(self): + super().set_gguf_parameters() + self.gguf_writer.add_expert_used_count(self.hparams["num_experts_per_tok"]) + self.gguf_writer.add_expert_count(self.hparams["num_local_experts"]) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + # process the experts separately + if name.find("block_sparse_moe.experts") != -1: + n_experts = self.hparams["num_local_experts"] + assert bid is not None + + if self._experts is None: + self._experts = [{} for _ in range(self.block_count)] + + self._experts[bid][name] = data_torch + + if len(self._experts[bid]) >= n_experts * 3: + tensors: list[tuple[str, Tensor]] = [] + + # merge the experts into a single 3d tensor + for w_name in ["w1", "w2", "w3"]: + datas: list[Tensor] = [] + + for xid in range(n_experts): + ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{w_name}.weight" + datas.append(self._experts[bid][ename]) + del self._experts[bid][ename] + + data_torch = torch.stack(datas, dim=0) + + merged_name = f"model.layers.{bid}.block_sparse_moe.experts.{w_name}.weight" + + new_name = self.map_tensor_name(merged_name) + + tensors.append((new_name, data_torch)) + return tensors + else: + return [] + + return [(self.map_tensor_name(name), data_torch)] + + def prepare_tensors(self): + super().prepare_tensors() + + if self._experts is not None: + # flatten `list[dict[str, Tensor]]` into `list[str]` + experts = [k for d in self._experts for k in d.keys()] + if len(experts) > 0: + raise ValueError(f"Unprocessed experts: {experts}") + + @Model.register("PlamoForCausalLM") class PlamoModel(Model): model_arch = gguf.MODEL_ARCH.PLAMO diff --git a/docs/development/HOWTO-add-model.md b/docs/development/HOWTO-add-model.md index 04c5ccbbe..8fcd70811 100644 --- a/docs/development/HOWTO-add-model.md +++ b/docs/development/HOWTO-add-model.md @@ -28,7 +28,7 @@ The required steps to implement for an HF model are: ```python @Model.register("MyModelForCausalLM") class MyModel(Model): - model_arch = gguf.MODEL_ARCH.GROK + model_arch = gguf.MODEL_ARCH.MYMODEL ``` 2. Define the layout of the GGUF tensors in [constants.py](/gguf-py/gguf/constants.py) @@ -79,14 +79,14 @@ Depending on the model configuration, tokenizer, code and tensors layout, you wi - `Model#set_vocab` - `Model#write_tensors` -NOTE: Tensor names must end with `.weight` suffix, that is the convention and several tools like `quantize` expect this to proceed the weights. +NOTE: Tensor names must end with `.weight` or `.bias` suffixes, that is the convention and several tools like `quantize` expect this to proceed the weights. ### 2. Define the model architecture in `llama.cpp` The model params and tensors layout must be defined in `llama.cpp`: 1. Define a new `llm_arch` 2. Define the tensors layout in `LLM_TENSOR_NAMES` -3. Add any non standard metadata in `llm_load_hparams` +3. Add any non-standard metadata in `llm_load_hparams` 4. Create the tensors for inference in `llm_load_tensors` 5. If the model has a RoPE operation, add the rope type in `llama_rope_type` @@ -96,9 +96,9 @@ NOTE: The dimensions in `ggml` are typically in the reverse order of the `pytorc This is the funniest part, you have to provide the inference graph implementation of the new model architecture in `llama_build_graph`. -Have a look at existing implementation like `build_llama`, `build_dbrx` or `build_bert`. +Have a look at existing implementations like `build_llama`, `build_dbrx` or `build_bert`. -When implementing a new graph, please note that the underlying `ggml` backends might not support them all, support for missing backend operations can be added in another PR. +Some `ggml` backends do not support all operations. Backend implementations can be added in a separate PR. Note: to debug the inference graph: you can use [llama-eval-callback](/examples/eval-callback/). diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 9d0e7489f..cf05bf47e 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -244,6 +244,7 @@ class MODEL_ARCH(IntEnum): QWEN2VL = auto() PHI2 = auto() PHI3 = auto() + PHIMOE = auto() PLAMO = auto() CODESHELL = auto() ORION = auto() @@ -428,6 +429,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.QWEN2VL: "qwen2vl", MODEL_ARCH.PHI2: "phi2", MODEL_ARCH.PHI3: "phi3", + MODEL_ARCH.PHIMOE: "phimoe", MODEL_ARCH.PLAMO: "plamo", MODEL_ARCH.CODESHELL: "codeshell", MODEL_ARCH.ORION: "orion", @@ -940,6 +942,24 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN, MODEL_TENSOR.FFN_UP, ], + MODEL_ARCH.PHIMOE: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ROPE_FACTORS_LONG, + MODEL_TENSOR.ROPE_FACTORS_SHORT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE_INP, + MODEL_TENSOR.FFN_GATE_EXP, + MODEL_TENSOR.FFN_DOWN_EXP, + MODEL_TENSOR.FFN_UP_EXP, + ], MODEL_ARCH.CODESHELL: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.POS_EMBD, diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index efe2a4aa4..7616c468a 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -55,7 +55,7 @@ class TensorNameMap: # Output MODEL_TENSOR.OUTPUT: ( "embed_out", # gptneox - "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 + "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais nemotron exaone olmoe olmo2 phimoe "output", # llama-pth bloom internlm2 "word_embeddings_for_head", # persimmon "lm_head.linear", # phi2 @@ -68,7 +68,7 @@ class TensorNameMap: MODEL_TENSOR.OUTPUT_NORM: ( "gpt_neox.final_layer_norm", # gptneox "transformer.ln_f", # gpt2 gpt-j falcon jais exaone - "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 + "model.norm", # llama-hf baichuan internlm2 olmoe olmo2 phimoe "norm", # llama-pth "transformer.norm_f", # mpt dbrx "ln_f", # refact bloom qwen gpt2 @@ -108,7 +108,7 @@ class TensorNameMap: "transformer.h.{bid}.input_layernorm", # falcon7b "h.{bid}.input_layernorm", # bloom "transformer.h.{bid}.ln_mlp", # falcon40b - "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe + "model.layers.{bid}.input_layernorm", # llama-hf nemotron olmoe phimoe "layers.{bid}.attention_norm", # llama-pth "language_model.encoder.layers.{bid}.input_layernorm", # persimmon "model.layers.{bid}.ln1", # yi @@ -152,7 +152,7 @@ class TensorNameMap: # Attention query MODEL_TENSOR.ATTN_Q: ( - "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.q_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.q_proj_no_perm", # llama-custom "layers.{bid}.attention.wq", # llama-pth "encoder.layer.{bid}.attention.self.query", # bert @@ -165,7 +165,7 @@ class TensorNameMap: # Attention key MODEL_TENSOR.ATTN_K: ( - "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.k_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.k_proj_no_perm", # llama-custom "layers.{bid}.attention.wk", # llama-pth "encoder.layer.{bid}.attention.self.key", # bert @@ -179,7 +179,7 @@ class TensorNameMap: # Attention value MODEL_TENSOR.ATTN_V: ( - "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.v_proj", # llama-hf nemotron olmoe olmo2 phimoe "layers.{bid}.attention.wv", # llama-pth "encoder.layer.{bid}.attention.self.value", # bert "transformer.h.{bid}.attn.v_proj", # gpt-j @@ -197,7 +197,7 @@ class TensorNameMap: "transformer.blocks.{bid}.attn.out_proj", # mpt "transformer.h.{bid}.self_attention.dense", # falcon "h.{bid}.self_attention.dense", # bloom - "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 + "model.layers.{bid}.self_attn.o_proj", # llama-hf nemotron olmoe olmo2 phimoe "model.layers.{bid}.self_attn.linear_attn", # deci "layers.{bid}.attention.wo", # llama-pth "encoder.layer.{bid}.attention.output.dense", # bert @@ -242,7 +242,7 @@ class TensorNameMap: "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais exaone "h.{bid}.post_attention_layernorm", # bloom "transformer.blocks.{bid}.norm_2", # mpt - "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe + "model.layers.{bid}.post_attention_layernorm", # llama-hf nemotron olmoe phimoe "layers.{bid}.ffn_norm", # llama-pth "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon "model.layers.{bid}.ln2", # yi @@ -265,7 +265,7 @@ class TensorNameMap: MODEL_TENSOR.FFN_GATE_INP: ( "layers.{bid}.feed_forward.gate", # mixtral - "model.layers.{bid}.block_sparse_moe.gate", # mixtral + "model.layers.{bid}.block_sparse_moe.gate", # mixtral phimoe "model.layers.{bid}.mlp.gate", # qwen2moe olmoe "transformer.decoder_layer.{bid}.router", # Grok "transformer.blocks.{bid}.ffn.router.layer", # dbrx @@ -310,10 +310,11 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_UP_EXP: ( - "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) - "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) - "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx - "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) + "layers.{bid}.feed_forward.experts.w3", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx + "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe olmoe (merged) + "model.layers.{bid}.block_sparse_moe.experts.w3", # phimoe (merged) ), MODEL_TENSOR.FFN_UP_SHEXP: ( @@ -342,10 +343,11 @@ class TensorNameMap: ), MODEL_TENSOR.FFN_GATE_EXP: ( - "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) - "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) - "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx - "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) + "layers.{bid}.feed_forward.experts.w1", # mixtral (merged) + "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged) + "transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx + "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe olmoe (merged) + "model.layers.{bid}.block_sparse_moe.experts.w1", # phimoe (merged) ), MODEL_TENSOR.FFN_GATE_SHEXP: ( @@ -387,6 +389,7 @@ class TensorNameMap: "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe olmoe (merged) "model.layers.{bid}.block_sparse_moe.output_linear", # granitemoe + "model.layers.{bid}.block_sparse_moe.experts.w2", # phimoe (merged) ), MODEL_TENSOR.FFN_DOWN_SHEXP: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 007d79f82..eef66ed31 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -27,6 +27,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_QWEN2VL, "qwen2vl" }, { LLM_ARCH_PHI2, "phi2" }, { LLM_ARCH_PHI3, "phi3" }, + { LLM_ARCH_PHIMOE, "phimoe" }, { LLM_ARCH_PLAMO, "plamo" }, { LLM_ARCH_CODESHELL, "codeshell" }, { LLM_ARCH_ORION, "orion" }, @@ -584,6 +585,27 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, }, }, + { + LLM_ARCH_PHIMOE, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ROPE_FACTORS_LONG, "rope_factors_long" }, + { LLM_TENSOR_ROPE_FACTORS_SHORT, "rope_factors_short" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, + { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE_INP, "blk.%d.ffn_gate_inp" }, + { LLM_TENSOR_FFN_GATE_EXPS, "blk.%d.ffn_gate_exps" }, + { LLM_TENSOR_FFN_DOWN_EXPS, "blk.%d.ffn_down_exps" }, + { LLM_TENSOR_FFN_UP_EXPS, "blk.%d.ffn_up_exps" }, + }, + }, { LLM_ARCH_PLAMO, { diff --git a/src/llama-arch.h b/src/llama-arch.h index 45e458bb9..2e5f97b77 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -31,6 +31,7 @@ enum llm_arch { LLM_ARCH_QWEN2VL, LLM_ARCH_PHI2, LLM_ARCH_PHI3, + LLM_ARCH_PHIMOE, LLM_ARCH_PLAMO, LLM_ARCH_CODESHELL, LLM_ARCH_ORION, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 7deb3683b..7260cb155 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -76,6 +76,7 @@ const char * llm_type_name(llm_type type) { case MODEL_8x7B: return "8x7B"; case MODEL_8x22B: return "8x22B"; case MODEL_16x12B: return "16x12B"; + case MODEL_16x3_8B: return "16x3.8B"; case MODEL_10B_128x3_66B: return "10B+128x3.66B"; case MODEL_57B_A14B: return "57B.A14B"; case MODEL_27B: return "27B"; @@ -661,6 +662,15 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { throw std::runtime_error("invalid value for sliding_window"); } } break; + case LLM_ARCH_PHIMOE: + { + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + + switch (hparams.n_layer) { + case 32: model.type = e_model::MODEL_16x3_8B; break; + default: model.type = e_model::MODEL_UNKNOWN; + } + } break; case LLM_ARCH_PLAMO: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); @@ -2094,6 +2104,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_OLMOE: case LLM_ARCH_PHI2: case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: case LLM_ARCH_GEMMA: case LLM_ARCH_GEMMA2: case LLM_ARCH_STARCODER2: diff --git a/src/llama-model.h b/src/llama-model.h index ce038932d..424cb0f52 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -73,6 +73,7 @@ enum llm_type { MODEL_8x7B, MODEL_8x22B, MODEL_16x12B, + MODEL_16x3_8B, MODEL_10B_128x3_66B, MODEL_57B_A14B, MODEL_27B, diff --git a/src/llama.cpp b/src/llama.cpp index 97e716cd6..ae375bcd3 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -1212,6 +1212,50 @@ static bool llm_load_tensors( layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0); layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0); + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); + } + } break; + case LLM_ARCH_PHIMOE: + { + const int64_t n_embd_head = n_embd / n_head; + + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0); + model.output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), { n_vocab }, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), { n_embd }, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED); + if (layer.wqkv == nullptr) { + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + } + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), { n_embd }, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), { n_embd }, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0)); } @@ -6266,7 +6310,7 @@ struct llm_build_context { struct ggml_tensor* attn_norm_output = llm_build_norm(ctx0, inpL, hparams, model.layers[il].attn_norm, - NULL, + model.layers[il].attn_norm_b, LLM_NORM_RMS, cb, il); cb(attn_norm_output, "attn_norm", il); @@ -6281,8 +6325,7 @@ struct llm_build_context { Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0 * sizeof(float) * (n_embd))); Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd))); Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa))); - } - else { + } else { Qcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, attn_norm_output), model.layers[il].bq); Kcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, attn_norm_output), model.layers[il].bk); Vcur = ggml_add(ctx0, llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, attn_norm_output), model.layers[il].bv); @@ -6326,14 +6369,12 @@ struct llm_build_context { residual = cur; cur = llm_build_norm(ctx0, cur, hparams, - model.layers[il].ffn_norm, NULL, + model.layers[il].ffn_norm, model.layers[il].ffn_norm_b, LLM_NORM_RMS, cb, il); cb(cur, "ffn_norm", il); - // FF - // special-case: the up and gate tensors are merged into a single tensor - // TOOD: support into llm_build_ffn - { + // feed-forward network + if (model.layers[il].ffn_gate_inp == nullptr) { cur = llm_build_ffn(ctx0, lctx, cur, model.layers[il].ffn_up, NULL, NULL, NULL, NULL, NULL, @@ -6341,6 +6382,20 @@ struct llm_build_context { NULL, LLM_FFN_SWIGLU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); + } else { + // MoE branch + cur = llm_build_moe_ffn(ctx0, lctx, cur, + model.layers[il].ffn_gate_inp, + model.layers[il].ffn_up_exps, + model.layers[il].ffn_gate_exps, + model.layers[il].ffn_down_exps, + nullptr, + n_expert, n_expert_used, + LLM_FFN_SILU, true, + false, 0.0, + LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, + cb, il); + cb(cur, "ffn_moe_out", il); } cur = ggml_add(ctx0, residual, cur); @@ -6353,11 +6408,16 @@ struct llm_build_context { cur = llm_build_norm(ctx0, inpL, hparams, model.output_norm, - NULL, + model.output_norm_b, LLM_NORM_RMS, cb, -1); cb(cur, "result_norm", -1); cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + + if (model.output_b != nullptr) { + cb(cur, "result_output_no_bias", -1); + cur = ggml_add(ctx0, cur, model.output_b); + } cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); @@ -10536,6 +10596,7 @@ static struct ggml_cgraph * llama_build_graph( result = llm.build_phi2(); } break; case LLM_ARCH_PHI3: + case LLM_ARCH_PHIMOE: { result = llm.build_phi3(); } break; From 8eceb888d7b7f5e93d20a4f85ca6511022b87040 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 9 Jan 2025 11:28:29 +0100 Subject: [PATCH 056/279] server : add tooltips to settings and themes btn (#11154) * server : add tooltips to settings and themes btn This commit adds tooltips to the settings and themes buttons in the webui. The tooltip will be displayed below the actual buttons when hovered over. The motivation for this change is to clarify the purpose of the themes button. * squash! server : add tooltips to settings and themes btn This commit adds a tooltip to the '...' button when a chat has been started. The tooltip is "Chat options" which think could be a good description as the dropdown contains options to delete or download the current chat. * rm tooltip for 3 dots button --------- Co-authored-by: Xuan Son Nguyen --- examples/server/public/index.html.gz | Bin 1205858 -> 1206458 bytes examples/server/webui/index.html | 72 ++++++++++++++------------- 2 files changed, 38 insertions(+), 34 deletions(-) diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index 36f9c9fe9a68d6843be38c7b798e29a950ebcee5..3640a7a6cfa76764d93684e9051a32c263932c8a 100644 GIT binary patch delta 912289 zcmV(=K-s_I%t^Y@Nq~d_gaWh!?Z|(=mR1$5j1VBkEbF!d2oORe5|U7N+cstpqZovw zu-wo0`F!H}l51#0V<( z_Vjx#&bQF>lqfMa%(hg2-u@eJ|J6dTxOqm;Zwn`JRL3j8Ha|V%2zuv9v3dR!{Kbt> z#&&ewI2X#qVZOWh0cM#2{^hdRMmk&)1U~k_I5u?0Lp*%{R_UJ~@;@HD~hLI7Jr>giqety{~7HpPxX;8c{F`nOUt<3G4q`Xtk zK{xs!S7-O20P^{R7`6-Gs6X;h<+`n+qL03 zN+^E6F4u~yis!v(wh~yzd6SA?+q7aM{QOdErRVZ{;;!Uw+g*O~d_@=+t zj`!13%n0;Hv3-0oTrR2~SjEffg1f~0gXSrW^e_+Zkl`I^+sH7E0K1BI`T)<{tIgX^ zGWVsB5hkJM=udy;^LH<|+sXoqLPva0hhK-4alzC-mM#QLoBDTp3D3`0;4kmthuw5M zDqh0av7F#b8b&*zXQ3DSvfOLN9#U4@lxqWd6jkh#UX~@X3k;hJ@%9pUv2{1^))M~B_eUz&k)1nL6iqJGu*LLF6$Pp- z&MRk430WFOw=o?WwMBW3Xc0fa;vau|-K%nJ5q&=9 z9Cbc+8iChdl~>t_z>gKhm!uCJwG3BvR+G}|h+{({~ah#T!FX$?O=Ut1&hA-1ZWua>7I$zl0z zzK*K{LO%VnPrO)`$fvEomSu-Yx^Mdp)yXSsqIz##UT(bCL$_XbSLe;|D0VhF zxF3I2FOE)Yi0&#jf7{x>)6Pfs!+C>G+q;o0ePr2x4BgjLb-m*;9-JTGr>cAR&bnXo z0Ed391N>Xhy&H;w2R?gw@UAHco)!EHTM_BO24Q2vuw6SJsNsLyV&Sal?MmLxSjr#fX}n7t{v_k{Efftb zDB39>XV1cU+PC1#{?WV3dGVFlHR6Nj?^YFg^9^kYTk;mhKX0^V{#LrJ>;|n!E>(P}`3&RMK=pqC!hCN3 z_?U6@2_fAX!|odDIZv7Qd_|y@+fHh{YzXp~Aly`TdL-}oz-1bFl5@4Piwo1;JKOdG z_5Ar=B=26!-fpPLl$|tu#CgZj(DI7PP06D}N&V*eY3uLdnDV_hs)Jec;exuqjR#?2 zI_ibFp?~+a?cv*M?OzFqg8@5oz^Tjiju`_RTI z&*@G(=AzoComY(gEp?+xvyqYep{{t3=sRsCB-^_qa5S^pTrYv$hWY}WDm5Jye?Dz2 z*>>L>^d*I-qmkPt=c+rI{l*7M!rXfbNb46T(2eC6f*|fI2c24g{WyOU;=5Wj2bthe z$Z)A(b)3awj-KBxH}7ux3f0`V()enMr~SN9y?T0iLW=wNL;7etJ45glE&gETd8t7M zlfjal{6b9r$Ci@sExb2Nd;3ay)JM?{rq~RRf}~;e;qs5Q=~wlqad45er5S%|7W}1I z@Bl-vhwjQ;@uE*yUWKuENFLV#%b^a4WB@?3%K9s$@Njayaq&F?R%5?e;P`#RI6 zs%%qtnOmjrhxO`?V&N@nWitq;m*5hsV7GV9SwZ_ict$?RPTtAqavvGLwE&tDd;fIemY-+Uw#9bKd7i`gU_* zUN)7zB(|PEYNmBQy^p%syK^(z_4!Q0Z+lzc-m0(q)A_+(a();;^xU`4)tBD2H(+vf};zB-HrCs)5{Z7I4OMErbNr9%C@?42@rh+uaqfc-b^AKEuY|M z`FZzmIse>W!+y#C)6cxri9F-q`?FxH8CB3~B{r0|doHBFYaTxS{A=o(veZ>IsXxDh z)UL}D*f5Pad5H}@(+vH^4>$YbDF}aGwpm7?8C!Y$+rb6!m#viI zB?_Y*89DJ8{CVRu`1xQWxK)JMxp*rJ?RP0#LU0v3P%<<;z5zE+tCIRr$pP`_r&j_y z(KmTVspvOTCn~G3yp>8{P~vD;HZZa@R=m4qgA@I!kEzz|T<(3pURmuo(krWdNU!Xc z`p`%F{`-Gwgk1C@F|;tyzrP>JNyW+c_TwFNm;F$*g4Oh$ols6{Rlc_q@1VOf8JVUT zN%Fl@k(~}+_T7{59>U9>IDr{{XE&Tv(&z-=+l_Y-UUnl_;=l>4@9ar_x)Ad3URduU zyzGe)$6@@P{V?K_YK{1N`|&Qq9jV_EPeA%cx9n_q5zT*lW%oSs< zQ#0R zjQ@X{U0q7Psy}9o@(n!0kG-AmRq(C;%kR9$FMMgiw}hR1H@kJp`?)}4H)?#r9>;}t8X6)PX>^yTgM16|I}`kZ~J?(_TV;}tt+6w(Pk zRPmJ?=HqqeXXBlJY``z>gpb#3#M;@_?Y)1RKj3Tn&tt0cklo1x7AN0A9Bvg zLH~_};iD(!`vyHH?}kSVd#*n;K5&_Ljg=WIzL7i~xsGqZr?-chjrjBPSNIA5_zFPJ zU(@h1qqy7e`@Z9SmhNm@IoN6bcfX{*#lwB65r>U_a@Q?<(Oglc`=O$Mhp#hxq#J)n ziTif%mdWQs%0D-b;djY@Pvgw^_uC!e%r8ft^aiG|8@XV3Z&qi)@-A__Z*^=V?S4QO z_HroFw|t$@Jg z5~-cf>3ITzolg|@>E!S|j7o9#UJ=y3tiB-8@+)#P_od($rQwZS;Op0&{`1ilYRSvr zdEVY~?A-_qA2#K?RnvnuYNw7Le=oAIZ^TA(|I}maX~W8nG;e#|d>CX${7-+&|4yBM zQQ|nv|8d^^ZA6mG=n<#OW*u7IO>x!rZSNsTVW22S=d|dd{DFWDH$fq)64%&u2`sG@Xp5u~=ye#?AE9^gS zRn&LcTk?q`I!Ov*Zcl|Z{s~p z+-Y#$s_e{F^Dgs(ARcT9?)uF>29>()Kl}M@Z(0Ff_yPZcm6UWVPonZ(QO>(i@7sm> zDf!F3>l*vQ*YkP%4t7uHKA#^bc+dCqBjEP{KjsSh8?e6T68ib=EIfY}csA}I_fvj8 z84pqNvw8U_%%;?1PP*?Q_#SuN--Gw&ER`wIl`9K!eq({4Km3*D_VT$+rEjjDhd_!# z-?Mf$@IQX(JcRb4m2;r!|L6bv-~aWH&){>X-WPB8LR^WSqdIvnoVyEt+9*u!6L;Ph zzFlGA8yp#ax6i^qwF`fst?P`Ztw)nzk?VcmV4>8AZ+1+)-5#Ekq$STEML!F<;Y;Lb zt`Qd(AV*WuFn&9WTf2kbi2{zS*pJD~OFj5p{@5+9b3XJh-%!afzu9}d+@%t$;2@-W z-uqoS3bD3@&%R4~;@51${}ZE>->cwcSwB3M@H=DqWgCZM6EJ_l4!unjx6{$RS>nsP zB})GAE{eQ;m@8WXz8L2#d0V;P-LBv5_1zBN@9Te5H~;beX4u0YnUw{JPy)SR02nU| z#mkJhQc&&_Zz}&d_#nCTQnE2eEF4+9U2~wZF}FDnmi_$nx2G5I`D@vk%;scvlF-|- z7Ph~PudiOH6t91Ut7ahAywd-oB$1&N2GOY#egU7VPvEH~;-xO;iQd*lh}WGrf|JkR zB)ID@ABu-ZmqN@z@G-_*@q23-{P&+eK{WsXetKFsMoxvxzyGwcr-h%2GLHnJV~T1D z((tLqycRC60wezK5$=PZFzSMJw(G=Eda?&t3!(A^B~O1_;lz^@m`>oN#wV$O^1t@{;ux@3#l`DwFE;geOm3Ho{;?5R9cKV4huA3;(~nv;ufLTa9T(oP4L?d(&u_ntS{=Ce#8&b!u;)${+^tK8 ziTjA$m!j&OOlK!eyA$tnSK??|^IP@xD9-p95zRGipA zeuyA{$m-`un>=55-#zQs^Mi`G?_t?V&F{MU-N~nSg)dy_o@w-zBLZxh(D!xx%8Y-R zjmGGEo@0*{+}6|kIJNnaijmg`eWzRXEkl3J&+=|IF2-=H!}4672cU4vsJU7664_y3 zJiYE_?CrkOJh-^tmZ!7uMtMM`7LnGAP(P>NFKHMSUKO8q!96ZD({~s*ev>y2C3gw& z!JF819Z~S)mJMyxEzdi!-9`IE!}EWfC~=Z&4$yy9Y351jk&WlzqYrB@jXyYs`v*#C ziC(@{HdXDmRTzCKRGY2{hxS_$yQRgAl*4xZfqfX?@s|mmyFNIPi5(tS=tpPiOj+pW zB<(%g?WgJFc)XmFt%rw_lG@9kK(GEAyqeybrNSfZCMLF{c%ikseI0zpZk~U&n=)Va zuaX-IStaqq7QFE6cB-3sMEi7fJ$}~@?Pa;0_u9*hErlqhUr|~F_S}U(ts+&~gYZ8# zlljl>>HhEM|2h5KX_RdOO)Xzyqi}|76b9WV6lEio-*OrLc67>i2KD@WLYGcpwq7)! zUhV*&*oYn0PGKWsx)P#e3--)GKFd_{lnx8U)OmUzx4 zgn^y;>f6iSG{59A33x(3*v=hTal72X9i3*ER?@Xb6`E_?!oO34&c`Lrf}mLI1sT%Z zw<{0EjV98!`t*JN5SaEQ$AK_0g$A{xhfUgjS~ohv34COL?`vf>05> z=*(@)wGt=pTS^6h!iUQ)n!weo;~znDe%t<%cKohG=Lvu5$M1jjr{{8%R{dIF0)cET^ zbtnFNoO=I$+Q%um8BzlH^i)=;u@Y&7^Gc)#eM-1;L{DKCtnJh z{Ph?=;yVq8_Go|pxToD=Adbh|p;MPhf1P>zGw)*Dt{rI?BYCgTf56FirBm2OecgIT{(R{qdAm$cpATpHC|^`Bi+BCoIZyVo z`14aC^4jr{K34js&)#?aV(r2L#g_Urgdk7*zx((RC;oq)KNqgbo}WMOyd9sTDnrD^ zHg>9E#fI^@xG5J>!FQ_J0UaY%6txu+hz;EK5?y@)K5f#TFE7{yb+}){MJ$X>4Q0+t z!EV}xzytKHK<`k;9{;{2Wz40LNA$?2f;GDfA+rhS^5tps&-2?~>*lX@bAR1z{n{q} zp8o@#xmkaK%I;XU@>i@JqkDHH+a+Fvx!R)Nh`7r>-nNy_GbJ|m8c5q_JfE~o1Z+ESGXmx| zBH4p2+9?^GxomiTyROt-zFYq~72H94)!D>@BYJ-w?*_qHP|>e;>U&$Cc~@(He!B?N zd^?zCVf4DSd|eoE>S%>Lv}6c-w5{D5+j`2u*7ox?*uUKcDWTrK9*iQ?U7<_7U*ffUAaOv?(@^;LUgF@wnfG@!}x&ugprt!~(J=RO=L>JY5em%4;JT&FnVZDEqCKQD>XYS?(P%y7ciflUzlG85lxWtEg z_rAj(s_H7Ldv$+|7@m7B+A$S$!&EXaeaHLEKGKiO%(u+`ueD;n z9%i}#`QEMkf6-3&o^OBOVRl-{2e#^8CY66ilgfip{%sS;4_m~1JzGMT zm+_LS`-z&!Rz!B9&3*OG|GeYl0DCRyqLwrwLu-@o?${e05H&+nAl_Nc^7+wr@O zVye?!a{6{Wx6nF&&s~wjo-VAiZRM@s-|zq}QJKqE=!Y78t9|cv=E_sH+^F~8vwJ1? zYmClAS zS4X7#ChyCYJeYrd_0<>X?KI;R+2_D-H$c$yj6IBZ#C9}P+oWIi@#}x9A0LayN7dV} zuYP*W*!bNzBwzGulZu|C9!ulpbZDPi9>x8pDm+ORbm8svQu+0@5O^IDi;u6b+q2z`3nN=pFFiq#rX>g?^DoS zN~tON3%+Z}w#>dA`P0i2zG+3it*R`~%J&UBBrfu=uNzuk)OIB5yBe)H%-=Th#NyW` z$ijvR^|E9BT|}AKlsy0)EiL0P-(_l{cb=(*Ugig1Uklc1^0I$@61;8R2ODl{vU??b zeLa{v7d)eb@$>%fv%$K_I+olaJ1=^UP&LxL7?$<jQc#?$=jKiC&;LOF69egY@NK z=(D$XJ)D3?0nK(%pG$aN(>M#XBGA}38_<4zExqR69@8fN`l?zlYGGXi0>Im^rbztx z3EW0<+*JmBV`YC2DJq>;R{O@vYNwUe?p9W;h!g)gUtgnwT$~7_!Anu}_F#-wrJ@Ha za3zq%R{va_%N!mkBMsx8qHFr+Z$$XhX+(JnN(=2wl0A?md zJ;#1^Y|=I%cs~?(_blx0JCz*5j=#MZm3#;I0+eha4~g(G^%XuQqori@>+ANZ{B{;+ zz7QuBD*eETZpSP5{CSsIsD!+n>wdg~&$lAe9}%9#k>A~5FYsP3_M1eu-w(=Whn)>f z;iusV0XBcFgwDGUAvgPf4)?ouZ_DL)+i6lA4?{1DpN}n5H-}ANBuOc;*kZAgq@^S) zR#qFfW+~0ij6E~;nCJwB?5dQIC~3h+DVIr%$Wcn)8?MZu;e`Qa;ilus20xcRpz=qp z`~jCgkn#sw`q+5E*?)xP56^j1JnzfK7&$G~AH{#hTbXc2v9a-jb?oQq4| zK4XjGe8m^$n&^$){&}YyR+M>x9zP9EaNa%;PEh}@J4UmWbbr@A-l_>;^X}MWs+n3? zS^j@k7nNUrH5#|&)NC-lB${pb#^1f-AozE&=%2JuH(qUTmb{%sLET@2m)i&OA+FlS zX5tpcBes3{f<_dsaBjnF%kNxEKxVC0!%8Fj<^MI&;=lJ0yd;^Lq^D@!h0#LaFYXsR zC=@{r?0qQLSmj}?e}1<`8+(#4en}PU-NJvqkt!d=uZP+rn3L@*?%i^QrpJCH01!b5 z=<6dOj#@B?c5paQt6})NaDn0*@3yB1I`wX$AoEotetasN_lI?!h3NyWGyG^Hc1#s( z-+L895CHu8aAP*!Wj{FNNm?9o?=~+sEMpnHKQ1d@4ZU}ZEehupg$KijN9AD%&k29r z$EUBnMiCez0R8JD8|(23qQuDb@Pl)rW{#KsxAXoM+dAcYe@IdzR=f|cDmC*wBmMmT zk*6PaD^f!g6!`jqqiMVPJ~#tx)apa!a-`VR0Ztu93q%MuC&D?@~`-`&#|LB9W4D#@MvkY{Hyqz@X9pYIo z`xx;oL)<2wr){`NJj=x%dug6SJowNogWV#Y4Ho*^qcBdD zAT3Ge4gZ*YO$ZMH5WJDXd)B|cJnXsSe&OHCd&dV__3a)!e!WWGLH&K6JZ|BB$D79) z*!lPKIq0QVkB?8yciEE{67Xp1Ng^Agw-qe3M{`$W*ThmlD<8AlL zcf8dXK^Yp8|0dgmUrJkkXJ>S#|MyE0Ynw2yn<%eEW;G8@6Z)l4wLsy|IzVu3i=@cn z4+kYA32%N@ro}fnBuR$6_lNRH()Rj%oAk9h#qf04ovD0_)=35;oqpY}g)N&5Ns1;} zr$IJchiO{_O=W*^Ie=Pnd)}zjtd(9xdt{R2T@YF^(Ku;iqNUKYewPFqM7lJq)d10@ zWlH*q&<~w%tW_ICf7N8t#DTaG9j#eYw5Riytk#8L8UWrPPlh?%uVbAc=({Lfn5L3^_5`_oM4M7*fnjPuwCA)P<j=@#2J*9k35!!ui>ZN=YMcizj!j3lwwQ$%RuSS{MbtWb` zH-rH7Drmo#j)rJFk-0Ri;R6XXV9BjjF$kNrk_4Oac1-tOSy?d@Y1an~FY{x@<&E6q zc}Jj0XSIK9u~aU5voM&XsDpL0a1O=_s4Lz=9s60MsuEx|HMH@3R*mM0Rsm%?G&{U2 z_IYZ8p;0==Ggn*d>)dM7(s&$(dfVmt)0&y0!Ah9tJSG@yW`NZ}LrRryKF_hC#Ug2- zH|bEOEwWlcp?SMz$supaUBxFL9Px;WxpW&cNV^*pjW^z2Sxu9*O1y=D`_o8e%gu6&$DINRvYWWnj1)-0RSE@{?v7i?zp8b5zf zfgw1|nU<2MD%JzXsczL(AMAqcTJgd-kuwO(sTECfGEGu*zEh|Dsm(W8sVB_jPSmn6 zWfIcXTutW9nm`X*eq8HYc83|a^?4OWRf5&WOO#2c>rOM!*HBaI&Z{^U4(4d+B>mKz zYkk{cEZH#HBP}2>&Gu?G&=1}9aB6?^iY5v`VhRx@MF84avgD{UNevUJnyu-q0=Gi7e}q1r3nuq#~-?ZK0!8-{#6?zue~PX!fWOm?Y6GR3E=E0a+V1e$|6S?h#?B}_Oc zxf*NRakTNdwh~bgG;HAUGO~^l_7jFhs44 z?do)%jIy-Wh>1CftiaxQ)N_LBa;&IG)dxC^LL`~qS)hq*3Z}v2MwNfBR1FP+Vk2#U zD5xis^%CHy>ZH180!?$>S4@2#4V)@vn=LD*ZMa=)A!wdU4CO3h0wT0UH}kE9p(u(n zZ$>;PP{TD-)z|f)pOaBrgfq$>)|GjknfFjEr=gB0O}GWLgkVt$NS~(a7?!u8^)%7D z)ivR_qz)x!WP8%@854i9)vbt&K`X{tB{Cy@Ni-}YqvR9cI?JF$mkx=GpO1p z2X0H^q-I}o<+VstMwMKOVcnyq6+{Q%5a(SvT`?<9)Y%Td$N|dpo3yZy+y)P+5tp@G z#Y-EQGg0_-T2)cR?rQv;Plu?c%ZxOb^3$0DPZWLR@O;Q~R6l>`S%~v#>Nt9|jY@5sYEOp+p3jMy;gAHXPRuP*EPXM;X)~jr4}N z9L(D;rBCWwKMw)equODP^eqXNjI}c!^Ig`}nAECU<56Oyqg*l-!B|5601&ve%X=ej zKGFU8fRU$DF0p?qbRI^^tm~2<)Q9J87KpX3BBX^yaCuV=rxZ<~R*)^)1)lPek+?-5n zK(((efoiQOhms(-oqWJY4Rz4MM%+vZ$X-oN7{QzcsJ`env!pt2aMluQ)c{F_=hnb& zIApIg&gM*S&c{pAWXcrGrk;@%*%1s$i>jZtZb<)!zu;9S}#Yk9^mVLvMKl9h0ZX02xJN-&K}0lP2wEm8aH| zq)kCnhLLB&6tdkqTt_2j3YudFoCZt{VfcTLN{MDKr$wGop|0pRpu9SaCRtFm7`QT; zn_y8sI8deP%Sazf3#cHiK7|k`*sA;QV2y>Q6sYFi{t5g;(CZg?3 z+4j(`1uiKjQ=%3$v>_m`m2iN}4P0a`bUOAX0d5Q(8-l=mis)%8N==mxdwefID};a0 zYOj#)cI=MhwP{4vQ_uLC6hMYTzm3377&I*Inyrm2NFtvFwn{ z$gc~2A|>>EZaC8sNDXH27?L0W6wqKOG93dGmR4lRh+<6_{%XNh6%C14svU$>-GY#H zTNVe+j+Civ#hd531J#zTS=W;}eTaW9CgP;ISfiH5VOkgsK&-_{0MT1!)#fbdv$K8$ ziN(Yal-Xnm&bv zIwBvBb81emNF+(zY?zXk>xh4g;gqhnfW83ENqgLv5q?fCX+d=B(}?qY3$d0#hn$iE zH-MUqqfbaStoQi=id!-!aKKoDaGQc^vxYISlqzFrDOm$^M#y{&2*)6&lF1Bjb1>FV zSU|#1epMGbe5=DJHa@fGia40HB(`FYTU<7Zg(lxg_?m$<6Ozd32HAhsgwR5Ad*$eq z-bb3M+sOJNLIu>sW8H?q)Fdsc)&*0A2!0H~(Zrlo`3{1#5x^PQJ$>FbY8ZvDmm%B2 z+ZCzp+5W0EYyobHsx7_(v|R#PFBpB!S$!>QJ8_fa`xLmWqjt9Fh|X|D^VqPL%YcO~ z+ge{1>QKl~kS>Ri+f;wMi31O$Bpmbw81*bXahZIoGkD9Gj~hV<{xU-oB;`@vxr*DoLod-bB-?17sLt!-Z@@#31fUvo)~}U}j7Vbt&Z5>xH}? zaZZ?Y?FgvTWa@t*=15J@hF+f)Yx9EmWjfPw7k+8%g$1uPj)M@ zz7j(K0W_^nSgD=?qlpgo(_qz@DC-f?5o92-`aNu9Q++unh8ThKx)gA$j#H;Zl1v=A z;{@w)#__i001U-+vdWO70lMF6d%ixHb{*RbRK$Sb8NYuB!w!YhJa6G)yH8r2En#&u zGkLe`cwN>pViz8>luJ(94aDj9I(Tbtz2hSu2jxbfMW1%+KXeM3;oUvMd__ByyPa>Up&f@*F-2+LGKK z4dU6z)W)J<`-qgRWHPq8B;sTFfZ+yXWaN*LQ8<5YP-vHG&ZfMBl4jV6kOaU}QJkwtH|tH9F;*6m2RFc-Id9R6KT8kmcS_pL1I$Z^*70M5$77j?=wx*1hAmvTNnKcyXA>L_e%gD!~p$iWEf#tifNmn~fTwhWX8mjY2G zwvzz0#X4<-jAPR{2oTA-H}PE>TXTQfDktZV?A6i<2xa{`+S6An*Ib1ZJAyK{ZONp@ zF4|ocTQguKh69JR*jB%#lfo>D?5Z3nLqmW7##uD*Q5^}FmPmq@Wwhqr%=P+~tXQkz zY%bKhiwL(TODU$ck#%s@>`kI6TYP2<|!vG(~;GfaPwEEKNZskot0j z0-QrnZFE54vg_MUh)zVDaRYyQ+41;^`vN$Wk7 zktKr2DXlV?^P)tJO>syq6Qvu5JxdW615a2xGiKVFVM{sjX~5{~iV&oR76niwErU_8 zb|?q-e0##RRGT*|*{q(LH9dx0$BUfV*k1tim5Gn*-C4VywfZcC&!c};OYz}ZjW_bO z-EWL-q65vjK@;oSnLfxgPF+&*vY#u?IyNbH*6DejHpCBeleWBA&9D|(vwKSuoJF~r zuH*$&SK4r$pwkQ&&;m)I7JxB4TxQmF(TvCS-|AScN4%A5n$Svoh{HkU1l zu7|5@vf2sVsmgz0LrMS@zC~5KRO%`u%+thLjCztARwX(Vx%ym**_s*GyLN`g?wp#Z zhC7%gN-7eg#W3|MwwmJ_&Q_W|4_|x8d@QX*hV#uSjgITOgG^~Mg#e$UC?6GIe%Z=Z zN?H?cwb`uI1-PO&@jf$Z2tF6ikZEns)Q>@ITQbBk@o7&91FBq_HWniJUi!m9&l-i3RFxL_Lbq$gN16YHEw z)saaM8fwf0lL{xZYpmZNR_cV+&(~h9&L()E^znbRzl!G|*`_+KLc^>q@w&7C#39>I z$%dcR<2nUcqE12Pip}DRXe{WSEr*O{bkz|u>>}|prK*c~4U?*r&SG(4b9z`+7H$X& zEz}|xUOq-g3lZ#T{8}Tau;)_jijI}$3PrQf#lo0o*}k)04vC)B(5L~Z=kSPU;CkFM zl*xaz)?tT?+@Od#8&!rP*kF}jW!1}svCoIouIam|I#@1=PQ5y)Su+4JBTA_bG0p!Z+bD>TvBb*)LqhQdWY;G76c30NA6L@A+ zNO47I5ZtIUG9opAX<~&^&NZt|#E}&b7~+33p$#si=2QY0himdH5W@^@)|>SJ#~ktL zARKD>JZw6}9z$IgW>{gZ;`IUB(?Z1_3O&XK+H$)t3uD0&DK^to05mq&-HIaueoy8!6KoLdqAnb-ic zG-$r|p>?WNo0GcW_op#BpAA^S?2Zg=&;)@X_u$UV>PD`WyK%GSgigZs=akvPjj>3# z_+=;#QL!82SvXxaS`uFawML?IZgqbUjq@OH=F2ukHrqljX~{$yF$=!6?n)TzcT8_uk$_5rXmg=76DkX{O<*_^ z4gKn{HB}h7*YauzOfOeqDsZdBHwIZhsx-3&F>j)7F60!71Hr6X9U+p4MvQ;8M0z5~ zTNob`v_2h(Bu@$sDvw>N?e`YClr6!!O6b5~3Imkobh|?@7$6lx%$uHSp^W5bazAN@ zo<{e=Zd~y#&`1-u52{PN&P*z}PRfmxQneZ2@N*p$RC}O!7mgH2}GHqmD7qWl3(62KV&d5&HV$xtH_1ghBa2ow&7DcH&hns4j4lth` zBNGr2k~kS>ZhuwD;r^($P-#aMeZ9%4h%Mt;okKW2WY^xHA&K+ZtWHkHoNqQ^&sg@x z14NaxwkQNBHjGSVpe3`#0_h711!1_X889idxQvjjDrwL9f$jI4CdEKNCX6HX39=OB+gz zU1vZ~MPhuNLQShm$Bushge#-E5LvEQulqpaSeV%pRy0iVFrQ|19|WBsFh?~Au-s^c zE=&pufSIER-ZaMnqoL^2sqf*8%jCjvUY}VM(ylN2q~)~1**|x7N}FU zLqp24t(sGy$BGT21yuSX*vKZBLZgaRr?4zvGQH3P=k@wvM#X=vaGq%$g}2ShkXTAX zBDdKJ9W}7FI5y=DMKTwL}G@j78itEP-sqd2ifsAuN*z(raf)tm+i zrZrbpSwWly*i$EVD&Qj38)6+{=mCCn#5bVf#BH_f9ggy-iVZI_UmI3JJoR}__9s!V z#hL=#&}s@8)LMUStrw1DJ#96YjoNI|0hg8PBARE$Y909XcuaM&=71I)HlGDy1;H=~ zjtAJnT$^yigBpIXYA&TQ1!U8nHtQ|`mf(_3yVsRh#2m@Jyv{Q+$B%`&9Z~I0G-D)d z&CGH=sNi}W4;uok57tIU?~m&e-VCCNG-ij94I9olv1xxFhzUn_TYR3i1$+=HT26~S zC}ZKe-RTS`rl!cLu4<&*ZF#6vpK%l|t8t5GyOuFiXPHPhLD^T+HcT>e#TdKQG{jl4 zTAQrV_G%OkSIhtdHFc>Zl3>)Q<9s$?M`Snb)rMqC&P7A6Pw@t|7!Q|&kyNkC5o==3 za5!6ZxS4;_n8yY(pCNF|;*!yf1Ohzu*{)E_pw&>Vh4N@EMC}@z&2nr4MT2Hf6=h9v z`aj@jzXeyOn+h-%W)Eq zh3eQz2lL3RwE7)u46%Jht2LO!P3V!R3K(0pfT-3ohMoCh?y#}3hArwn$ z)TANJBwk(^q`T1ii9S;5tM4}t~iCd5ae-7kM>d=;PffjR=5FKp_lv;$H#GlK?eCqRs3deWpj)A2<`Mo zbkEI3eL?Vg9Gld|Xl_ad<~AzzdMi=EN>my3s9_^ZXCqU9dM&I$4f|C$o+S9p;W8nK zeQOzyVGj1AursJaHseAAW7LjTqZz4xp_o;-ySzZO?B*mh$ePkKn-ZZB4m^a| z(8su$=bF)a*_@76*qYEiyplyorN3x3IYw#-t-x9htXZ?c5*WpR5sq>VzWvuugVlEJDNzZNj>a_jd?`7Fd4xO7LO_@IqC7!2<$}zu(A-BiZS$m zJ%eb6%z{&aR^9HnL)mtLdItgcX){enLmry4$a1Ny>%w}8fif*{QWupOoDU~WL_pNl z1m|al*F^+-vP{&KF^a-*?u9Txu4giXx75)p;xlsQN=}*@dfK;pV9U)SWz08{QBNNA zBR}fSBrRz}X6p1HW|&Rbb-mZ)yX&xjW)9Hxs5P2vHHVmWtH26jT!mE$c7S&!X;lkH zjda#FIMz^oszbt4r6#TC-UL%737@;;sXvUl?m9_EYpOb_s2!0a#siITJj{i|$()1ZNvnr{N-KOW za;?RfUi1EFsq0pjL|xzOFdaSaXns;1He7l|xN48Cad^C{nKY8L<8{yp@(~ndi#0I~ zdlixl#~{$anl*YX1E4wY1eD!XL{sn<)isLJtNBowu~T3+7Rf*l+k^=Z7^K}F_i&&a zv|VEugi-^yDqzp(I}zPafD9%= zbVL(GeZSpXvdd|=J2qsg1X0G#YZ3?J^ zrM&P)DIx)lJWrOqq2QTElT~0aTQTg&fEMdea$?^d1fwxj={3fEsH^C6!B0S~2U6>y zg?O!I;I%bKO1LW6p3MN2rs;a6KZ?ffs39b4)#)}`Yq8=R{CdK~c`fcv5m90$<2umG z2mS)-#A+9;q8wjqR>Jy!a@w0~vjD9VKF=A`>YycZeKWNUM)EzColvph)|0jyiHf>H z!dQcc0b>Qe=^BlCNKB`5B}mx$000Jx&|MCru{84#F!C}uGK6HEb)AL0l2vxG797k;WO*VD-2vrk z`cTFD1gH3Fdy?ic*V7g?A&>_WFL|AD7YS<+x-tP)%h6Rs<9-rNpIjHYe!;i zcs+GiecZ&sR1<-C(X7;o3Aw5%B1~p8bme;hIoH=~4B!w7Odu|+;847BR%w?+^lEht z#Er#bk@=9*nwh=O8}s=xjmfTCnT+byu``}8Ia`(&z>-~mbBxys5)bYvNJUSXzkn-}wCU_wesS6a%97ZU8*OB0Xl8Vhz7zR%@U zCmJuo(PGx4+ORaMsIwSYfPUBuMueEJ&ALe|wUtmYYTAMcJJZ$>$6CEwm`;as7`0v4 z5$u`PZdV5$^-23#_n-FiK5zOm)^?Qyfbfj#g8xQ5Skz z6=S3b%|==tj|HortB%|fbev48F|mLl+lgtwuP2@srK7>hS7ez7$Jwyb>o$lCT=ErL z*MshVBr)+Z7CW&D86=&9Cgst(hY`*)>I^UMnSERrk)fGBn# z7Pu|ptqq_3q)5UC9npVi- z7t&6xU@XGHlZr>*VwM3us0Z6))EezonzN>V`2iKW`eEx2EipK4sL12iaZm{H@{b~i z%MD?9#|0ohwR+~ag8n|rj#ryXknoL|88qRj;>a0$u@$Z&BNbbZwsg|y9XSE}37A;* zaRIrOobRHOpnY9ZLjMe5r`vjPqo7jHiQza*(fBPa;MBmh9)_-6PCtQQEf%wFA2fD< z#PPM;JF5{GR}RJBak+k!*w={R*oJXU!uett*5{CknQ=LdC<8Y8PH8~X*o_Z?9O%4j ze;|JHJ`2U$KEJy@t1ENzOY&>eCCg+uE7(Dvo+hrRne7UY)BA3kn-9k`p5Pv6NY_Er z?dNfot_=doaERga%pxXcuOg@{r(IBg!W&?{_mJy=eXn#JC8c%%pidMu%M`-Gl&iO; z%uT$5Djq3y!X)QG@+9v`m1AIDJ@9sX?gNxmJrS9D;&JfyN~U#<2m73KtP|FBf^8T&Kd3_(dInw8bxq|GSaR((moi!ImKKI z!&V#vY#ks*%SI&8ugE{1?6xD#loh{#oGKequ?Do85esnHuTaA+X*5N+7_v>6nGsvj$w z?9sfXWrFh!}2FVK}-eZjbBCG(t0TzqG*(~ZL2SB?nnz3pw{oXFq>BwN1}DBK6|6P?h3I4o)8(w&(%43kXwlh$ATVN!tCL?o2;r zWMl8vsM~!C++y;KTN8JVn#1cTr}Yt$dZ=`b=;Yx{j#Qj~Ay4J-YJsi}E-3CsF@~W) z$C7jw*a|{A<&dQZ5gK(8>rG%ef(N1%eYVZ$tCJw^d*=xdszS$jIojkLoI<1KAIpJU zpx03?%fS5_iG?*hssofo@ejjzev$22T5fKAKjbZykidg-yp_xh{7nGsreG1g1_`G# zmYio4E(6AYLa`%6{23h!{)S(MLaJFi6BENqh#TAh*7a|kG2sS`#qqG5kU6RQ~Vy+VR6^dis(WR(BG@_I= zdwrb#dbJ;ldcrY@ij&9doryT0BzmSIWBch+jgh3rY#@THm*Z|uMd&C8#gL%LH}c@6 zLA=yw2Fk-&30p~qOxllz46T=3F)0p%{@umC2FQ&P7Tw{qBX6h6Jl_>_!o4*&kKi=& zQBr7sYS8Qd+XvQS;vVK+Ynyu0<5g5)42m{3W_sKyky@6y?tVyRlK81(w&$6;*7vY5 z2HN*G&wt7j3tqNpQoAUiIJ4`D9NAg#W zldDtH);N>EiP%C|o?qHv0c1{~u2_||DI34ST`Wl4b}QuW4QxUHfg{^sBb}40nc~Mp zhLwAXN-aPs%6s$3{IFU`>hRlFZ#ZfWEceNBIyzPj2VDa(m7!0e0Au0qTU@n6L6*ON zPH6{`mPB3Z^`B}YCF*xQ7KQvUr)5f!5$icWw0e-Lc$lVvWVXQxLdtVPz_P@dExdqf z;Dsj5{TEb*lVBDapMWWX+|O3Q$2@B40(}+n z@wqd$_u8er*k$PI@KK{Y55@7X#dtm)P0s`s#o)MHde6)K8DO4y(3QGXb^se08}9w) z8oE|U(VB-8`OsF^M}3HY0SYc||6R)5JpkI~-^x|bsG$fALF$AgjRMe2U2??ZYXV22ZeUtpu{zzIx8PBBucUARwSghurA``5n|1 z{;abC;Ih*Qdm*QPeq5+%-DR-En*jb+F~W+x6Thhndm|;&DU)*3Eo5P*=zw z?Ly!t%|NeAA@6bkGB$yQE-FYH47!mt)v%Tf__f0PIz?{fSacWr=RP{g!)C1E-HKvu zPpY+lwvRU3qJsvRq#?hTlu~E$z#EO@{UnY)I{BVvtvu0x=v;`#`_<7!nYv`}I_Q|+&8&Goj504o^U4;P{$P=aR!dX~nn|Hd)8=N8 z&_ih!-6?7L%E-GRKfdjDkKqPE4IeE57zr#7Rba0iX;5FdObfLV%YE_HKx%r>iK6e_ zCyJ?~)EL3#`}C`6D`6n^2`+<&_xtE2(JPvWP?-*YqB|Q%qJKLOQ~0ZEk#%o1>f>hJ zH;0uimmhG(A*Jla{@4sSz=4i#FDHl9XMa!#qXn(hK&5U5c=Ns_-q^N!U$IYC6*sy1 z+t%Ujoe`g7$yq@H+t$_d;S_I~C~S2PiNK1HP2S0qPOxli#*Q#DWriR4oU3*t*IfuqX5KK#vcR|S^dz%cb;rpZl< z7*CJ&TrPh74FBQm>j7dZQGZeWOHaB5hKTrte9W_;h>*3_uJLqDD;Y|pL(^fmD|cdx z9}}ZJPWMiLzbS~%PU9jRrC-2~#On@~?on!gS%Je~a#JP95hSNbx5JTE69UUQl+{|) zC28K5^BTWQCw0XX_1BfJW%NXv--{ffH-es$$Xz3RuEAm)J8Qo-ydR8Oa}>qtEoboY zW+jopY+I5JB}A2JsGnlvuIgfKtmPvnBRfvZO@Bz-^z~;ci{b605oPUsy3jiFDDkF$ zlVB}-%H}a%FPTT046XWZAj>uYdW4QX4zVpxF}eZhut<=mO#kSm7bgC;;@U4H0e1M5Mb`TByQ`@- zY36LJYgbzaVw(9UYafF=2~sKfj6){!8#kQA^K$1W$fy%g{wPGIA*^quM>-o9&%p31 zMjDqdxqWu5q)6?=+nmL2O3c-N!2iuuGnx^Q+>I+#;{4f1gpauRBW@o#-_i}bj+KKF zr?)4gy)YjnYzHX>@9u2T0cYa}<@xX<^a)X!102O=a`w-1*^yL0^ZX|@NBI8L4D!Rshi^H=NS$D|1 z@{--tGpwxuzRup+BSk<$(0Xu$$Q|HF9cY0S(c<%2y-@aA8lHyc?NkX>ubh@_#egIi z;UH;?b+I@GW&-yIYJ`4^z4C(MG(i$B1FlqYtlFAaAq#}xT?f|*c{cJ+>fl;?Xt;}h z5B#8I`2|zBF>WeoO3?m)5Sisum3!)LDLTeFCnV~iLn1K0;Z>NdX^Tj}X(~P)$J&+X zqjCo<*@@gSyBU}DpUYMo5I29nEP9)=8ZzUllR99jc;<$}?bOp=rMD|?{u~)LX=L_V z5m-JfbNKijZv6Jg#vGt;{p4_|Vp}~y0*Rrh{JEqvE(ZR=>?D$Z!ND4UAHUfF4A8m7 zXtlg@N#84Um$cRH(_1{4)af0O7Y5cF42pM19YBNSME&pU1PyFNn?P6Dp*N~yx~sCg za_E)NyGg_feFTN+Rxtf5a~mT~-&VYqi}+~O2blv&g?39sZa8;H=ck_3{Uw9n>b#7D z>6xgc$(V0``W@zf7-pBVxt`MS4!#KaxYhN9h&(PUqlIM=Yl#Lj|CscqYrsE(luo=D zr-OoTws{mtJt0Ot4jvBUK*K6|1e)KChI0i+e*+$9K`SB`?f-tqwxEwM_Y*rp0lmZ~ z_S(u6H-dj|roo?CFy$=oQgZKUp)2$hM$s2lpvp^mxCP?cCnwYh5 zhhq#^q(>h~I_d?hANGsl&V86ZwUKfzrLf0npincN3Wo`|R_#xPmzJ267&Cgk`Bi)c zt4K#Wrt&?1K~;ukmMme46O%~5&7wQ-fIn%{&fCp*)+CZakT_}fn|D92O5yC1jo5Xg zI(zq~zMHU^wv8IkLt@8y>10YzV#Gg#j-?n-wp^vF&{SyW^hhOiER-C2ZO!S>B!H{% z)}q5mHfq)vDHR%OV6r6EMXDzJ7gbWY`T|B;(|o*t42pJXDj{TG^$JDzKb5!ncM0mf z_FJk}lrQbH;P4muQu)iOIK~L2*bv$XWTjr&aj&=fjge(gU2A!8epfKBa-q(ah<*<6 zP9^WrnNb#?I`49QK_+73SWm5GoIyZmeQOoHr(@A{O{ETyXSqT4{q_odSHy<(Y|I&X zNgJep7Ar1aMlkyFs5-hmk?xU;nx9NFIg>t~E_-(E`%c2S6gc!t=hOk950D>*TjJ;S zj84aGYs@5`wXI<#f{s8L3UUn87XgC#$z#jaI#1uI!IY*TA>jXw$+wNdI|U7!8HPgE zO^E`zOt(a%@LepBp}q6=ffib0GR$UydiV{0WLkAs`%ZD6ZrMnU|AV&=)hnYCFan^O zV`<&a5GmxE`RQb@$Qjk`mVYl@4BItZ^n(B$Ru&z?qadfRo{CS=8uL{6;bfy&Kwa*O zEq*rWHZ|OoMisfJk<`Hesh_$;;1aC!_1H5dAX7w1lgkwJoh89u&8$h1|SNCyUh;ua&WXr8kMc1q@cQ+oMQuBg+5oJYrxxSZ+fd z^pgkS)0m}&W6%J$*+&M24st>DNh62C6gAb)Mox!t9cx?5Zbx@=dA#f6tS6adrY#EC z7JTm&4Yt-4bMZ&_S~@8_M9cTQTYoWI?n6y5I92&d%)qw!i<>rOkVh~rR<5Iel4zOV zoPFNK%8JN-rfiZw3(r@`Z}K@AnLdIdDHsoYL5e(LM)|cXkbB- z+XkTRS?qxzm;gqM7ZwdE?$qv7lDqew{00Iw9+Vi&4&rWXaB??T@f=|Q^!O@a>I zU$W7HMWwFROJRpBki^f?Xoj`(lW?}UEDvSQDX$+D?lNpTXc!^$WRz(aBoi2G!~vI^`&j1FoBF=R}#ov*Y*%+umDv!v@(yVeQ*Q!S%@X{g$#Z(JRO(96}_~ z+k)a;vAps^#^4+}e&!!A(kI1nrKbq>Fq25`6{2bt{{t@t=c%z4dl4spVmrreK1~5e zU?s$|%rT2@k~8Lickfnu6_E(bW+{Rr&dx*6*^STefeF#0?u5N+XKrL@IZI&vqIcti zO2U*nv<5@?#u3ZGt`_|m&Q%?$IWuX%tUIIhj?txR{B)oj+w5^U`EKweQuArQ-j_LP<46Nn$axu7Yw%lIt5^*VV` z8x~H}Nk?cJSERfyL0>}?>%&)4k2eLf8c^{FoZd{=3|pqDqNLoBwSft_)U@%_IDW=4 z+2<$`6N*89B7XcyBz9rVVfU(?&np*-)bFcM+IMN!h@rV@#xBz-*v4pN(lvAHrF8yM4^>pu{vbO?{(e9^McUfLa6`FKE4SZRCxkBu)XjU-m@A+HI~? zchY84`6Hg{_2!=&7lsC--39KQOA2p^Q4&PAP!?%_EjDrbIW=SY{Qg<<&l>YF!2ruD z`gu^RFbkP;;4}yveIJi z2+@QUekLp=v?2!!)ln2Z(IH@#TdKfFV}dazZ#ATrD#}B!1jEzPpNBY`+z|t@n^Gt= zybeZx`8}kY<8At;eZzuYK*%hYcqW&wYZ*TwUTX`ZcRX{ab!SQ`n-Z@W8mCVMz56=T zX!NCgz7;K|)jGZTfY={1$gV0U+R43DW_!{Tu9-L^1%evCM6DRYc27fA8!#%ldKR2F zMAitrC|K`*8M0hV?e*Dq2@g8@pz%dRgvBv`y=y;8uaIiHn~Ab-c~U!}&AFc2n-)`9 zYSE;RjklL`5C2ZPb+ucb#vnAa!0d`iQH#q^S7>ERE-1>)k|wrO&;{B ztn!(WdE$E`w*mFi#U^Qnu6Lq|F$HIb2hEd=>kev2v)5l5A=>`4l33eGHs0WxTZ#OC z1%1P7w&;$KWYnKZPp;H3)(ZT)L~W`>kgImbQ<#nND$q(@3b9Z%?=o(2MMD35!sDs{ z)pHIVjwlv&8r|la(o}A698+c5bBj_S(6s?P1`volPIh11Y9&y3ul4|$z`;#<7k72<1bHs zHw#TxqSfh`I3ck)4r==0*YGxA|0>*W3mYNNS0|%Us(8*>%&!GWX^8t0KsiW%n^JX& z*WdnayhaMHOR_0yAoPSXVZXm>TG!G#Jm+5MtdVFb2|K_r>W8lr;fH(psLI3>;_~5> zJv}|>*W2I=YP0Id@FXt+Ctu!c3e|OqmSoZ4Su2GOw&&2})#@L_;{P&q{YBVec|HKM zxAouK!@auhXQFd&QPGkZ6}IAk2D!b!>|&ESdeZ@O2Aei4DX?2!;?zV9P=I=K%YNc4 z9#kk6eBff!B`BF8<|G|B4-Ym*-_@(VWxV+f0u(ts}s;a@o!P0{xjq*pwFgLu; zb1tVONkm%Cx91t$*DA!Vw01NNuiF57m*o2whh%|j> z_jQ&E^H$NCSU1ksV$Q(6E>ZTL2?!Iv<5lEAT3(@_->UlukiQAze|7Lq{scA|aFu+9 zWaY-}ucWU|(88lovY)AcPAv!T(Gd5}c&^mtc7Pl5DgeNe z>(Iy077}fSW@I&ub`6vFE;9Kyl|+lRCHMY@owA-$m~25mDBO!IIPOEJf`W%&uCRSnID!Z0@q=g*76iOd@=#H|zNzn*xQ~B5 zQ>IJb3a~4}v~s1FB{C7&&!sYSp{}JAPx?_@Fke*IN^hNU8%4hT75p^+vCh+zXFMtZ zCZAjELDSR@s%SfZT9PRuxIKA!ZYd-Z?`6DfzC}$=m9*lpE!EcHTOVMiN9ufSfCjru zc_$!;p%K{ma>`Cwc@=)qfYF`nu2|2G!&UF|(=B;_WPDtI4n{UI)<*BW7^n-V+-=QP zk-q%R3fe`^Yd3^(0$GX6cLMs6yC{cSge`NUlZ$=8HgK5YHzHkD?r03264$<=W_~Mu zZ4Dg_vrpi=c(Cwy@@H$2p!67L>4>ZxL5g}hZ|6j0h;JDO zLtk-r3L6566Jbd*CLUO*FeJG;2$7AJz)#$F>@HRU!ZW(%7SROp90m&tXjQ90jQKHK zTjg>#3mcOZKOidG8P8RlxXY!5bdC9_;s#VlUsGa#t8a{IQyHaW`~ATOVztq|4scYY z_R6s186!L6?jMNGT-+^GrPa{87N)lB0A_@k7vj{S6L%UH8=F2Y@Oc>Q^h%?7_09Ks7pSDk)6Xf8{2-#0qm)#UG7<<=NbI@4@`GqCv4qZ)aU{M-|W z1$t=x^|iKlWXS(Yx>|%9vwCjJG#gRt3PeV$VGgSz_%~=bsd;kRSc(rcJsTwC1|$-i zGNdcX*r}Nr1^_QS^IN%4_=hG6862Hz390A+C{0J@Z$lJr?B(OtE_6C;iJo|WQ2Cf+oFT9{GeBx^RMN?2 zXs*0h^Q5wij$k94X$)D6IuCq*ce=RyHS7HV0e5$Q+*)3uJD>Q8{${MeacYKH4^TN2 zAe?wq2`~~u(jt%F@3WN)?Mg>wIOGMQ@X#5r;*7&yEvR6q!*wt)B>48heymslO?Q4r zZvio00&cIxnI--j-#;;$%6)w?h58Z`+}pnO43!%Pu5VQOEyZ7iuCqvg;o8_zp0*E0 ze}0xLQ0XnS*^w%^c}bmJt57r9;YHXh#mMg^4SFC7>70vX26l>nX%d+dCJ-$C!yW8s}4y0Cda?)RWDP3X^Ieq+IY>Irg zhz$0XA?FWq18MzjY&!>)2>on(%z<)U#J+?tspa&TSfqZiKb!$XL;8)a`Y03V+l?yS zX_ZU3*_~R44M+uDa5rFLt`%Nprc?XmT#RtR!;yE&jfVKZ^DiNP1ku)8QLtG4mwjvR zG%bIJQ-wnB@f5{(z86M>Z^GQZ6ZpB-Gi*;ip-H>QYI$2Aa+-^-Q}~746v90vL7{>S zR_Pb5^hu_L7KiQi`8hnsWn#gv=Fvi)W-(;CC+F>v*L1#@tULcgic$L429wXxcIvJ& z^DWZIh^HW%bz`@GNj4RKrVX-Jgwg;`pG85Ja|$0Q6;f5(A1+HWuvl2WPA$>a0KE#2 zR|YDw)WI?Qd|T5w0@gO?HW3C&4KW?5_xHS4bk;5F$s;Uz)hCWy?Zm!|C z4EukDEkeX3gY9)f=YrG-;<~jGX2`}PIzc_+mM0zCas|69OLz(S{yqFhnkfL-= zKU+?XY152<6iR1}!{vM8MI}21#s(nfSPFzZJ+>v$0A9(nd6?HewRga9A80&D%k?Q2 z9Fy)#HshF1&O8D60Sq$tsbVIK-O9N zIqnxVM%nLfby%}zY!?Y$W6Bo=vWl**gnS!=!lHzKv)J%v!Y^($GrMcOyfE%|CZUTW zwT?LmvC~3%ZylY3Ii;IXT-&h0?m9w+T#JeTx8od@!0@6CIOSJQEfu!e7-&NRu}BQ9 zXKCb0SO6JQc(il@A7FX*MZsK&MxKM4_|fJ@yy(l(sIj}$YXxQy@f55gGicqPSeoCo zbYRkds?3>hVlkUCswZ=(h_Rj(YK8=JuKy}_x9vz?2p8a^1Z6vNQ`eN=79$po^jAnq zb!k7^jtXsEb+00js+u@C`^8MY2p9arD>L2mRfpagXCl}QbBG9QaQU&8x3W(cvoZ;> zi{#?Bj4Mr^zc@ahBlo@SB8oE&__w_*w=Y6}Iv*JT{z@_$b6y4_x-)7_bdr-Wc^mYL zGaFuF>Vcc)6WJ$!N9IKB8+lTs!5$qIpP_~7rlr672|DtEe517L;~RJGj7l!D*cf1^ z>|ob8B}dL_QR7h#b6U=f0?I^;XOsMB+~r|!*W_3peuIJ!o<+j2uH2hO%w>^5%kh(c z(S{E#V0kb0Ec0&rnn{)+ls!K1wnr)hw|eCDB$AJ>!JuW_LkQY>UyI$pMvh$Ui2kTc5Zum zyF>U4{Y~VM9W$gsEtCqJuIC^eUAr!SxKXh-+7-5wrv$VNB@cPl#_~jieOK0w%O-iH zQ)lMwWe9O6Qh|Rem1lm4=;D`+p(T&TeMVjxz;U2HSa$>NAZP%sB6r?PJqxJI)|974 zE{p`_E{_u+|1NX_!f0BIk&cA#iHKZ*)kBjThy`RMeJ#wZP-( zmN$(qa%;675~j3a!ef5?I7H5MMSEwLv}pH#v;_9C+Z8|agWui3vI#| zxL6`vOV6B{^}P;UMFdcNHX8o*x`XaPMx}#d z_GEqS=Fi|$F$uT2nt-+@_+g=cbhJfwPO#6~3ZlQCivupW2FT)?z9nZtqSz$M9)ia*;ys5=>F;?7E#n?8(N=2ulF;q;XLo*T& zjHL8vw7c~wxbUIF?eh|(G z;ok@cdPNLXt?xYhaxpCBF!3fx2RP0(WxxZ#G9QpO!9iDAiE%X$o!jmq2G zV~3Ybv5iTcihVmLUQe%o^0&%a4~%^!%}@v1PYc20F0WuZwTbgGv~VBu#Tix9+rSFft)Wg$D9pYp(C z6G_AxX@b)7(Ail7WvYXt!ci5>3mLO0zLh~pG7G>;3&YW z#*T5B!;n_ezG{k-cPVrNh$$E8S^ivvp?(1LRf<*EfG0SN8sg@&Co6260QnKsAWT%q zL=q^8bvy@-p*eksr|#zYBLEoc)f(YK3ltwEAS8_Z>M_-SBW@`#@3>Gbg(QZXzzVVX5pdhW1|)#mKu?j}pP~&4e{$`ubNfeWS`ovU z7VJB&1fmFil264ptw~T<+E_@fNki{H9kEDg4QT(-`YU5CXQ=5~!oXqU_2f{xYKv^W zhqP7wpXPdxj_0Wc4A0VGM)CV{?ADzSyBfQpN zAjh3`rO&jD!$WJ7+T5`1DLY6wmkFg)`PdJlm?^Us;2IxT?%$H(f%P_94@qzo#mVUu z^`x7B<7~xSN`4K2{IHE&XC+ttv&$STPx`apJbl^B1l3qN`|cHnaDJU{hTf;cAh3#@ z-C~9S3(Dz+P?btOl3UbtfT+%-UXV^l%b&Z4m6K_f4&-7Mf8iJ0_HT;IZso2F`%b|m z_RzCVR&cEjz{GA!3ZfxXp4G;m9Y9KIyK1 zbJStte*@`b-dGw?#JKrQ;d);QuEjdf!7TZec@ z!F?n??Pe}@P^G*4n8MeXxZ^q*Z!oXbW=#_Lp4uWh*yi&RN6q&N8wqTLbV{`<1<92W zY)yCtSD)F;h}z}-6|gzNjqV4KVN?r$n^5cT_K|Q=kx`{YWKNYgTe)K)gzw^F6L$e} zad09G9Z6U(<4h`+7rE3ZvqE&ACXVE!XeA(Viw32`#Zle+r+?bpxjdQ`b_o*qLLQng zfXm?MugKz?koLTET9Hv-1iS8++pK-8##M2y4fN6(Idb6lj22 z9??HA{8=x)-V+w1SFIY1#f8XKg4aT`)Q=yA^ii}b-~H2Xe=<}}VaCIM&#zQ+%YjXK zWTBqDS~3r!4G|MTL2m)V0~r|~!6>42D218vg^!-}x7b!l!o4qAj~TK<$AC7;>&`}J zFIBmI8^yQ3TN0NjF-L)&(!uN%-NEdid1=|>d1lgEm2OZEeE;!v-8vidy!BRsO!?se z?kzzE_ujiV_ue1Zs}J6PFMDJ_5M=6@(jdqbd5HhSi4xC7;XPl(d*(fPvLmLP%mV)` z3y4^v#zX1f>>dF!jP`Fg_2BEssQ_237PjbC%tdxjS-S>4xqn3N*q)n^#mw{vhpU-r z_7USD(n&a={Tpyg2>A|K6ur7+$si5!=rYV^=g9&iZLcQY1I!A4#EC4*Yki{j8(q`X z;=j={@+I|otXsw&zY0l8rs_%4uffLJOt>s`ri~_H+m#{4kI#Mq1Sd6u(&Q+8b|Q-J zUCV5bh~WUe?y@3#>4Qg(BiD0_I(E=xCNk@A5%|KWFcsq)eQLM4sL%x3I5Iyj9`sE_ z#4RjgTTVa9&!07a(aWsYa*F7piOPf$H#jTq3tiX7Gr}UO!ixdlV6cOO6pb`0o z4Rd;U77}R*qGSp8>LWw)~s;yS|}o2Zd%y$ z+c+ews@ZZFjN=U}v2|ETU48_htS|CsD4F^4n%9+th@b?ZZ_}?qAC4}v{PRt)0}m0n zt%ko1u#J#^WbII3$MfAy<|<7APzTeuU{O9 zFFFLjo}PbE5{wqeu0RB$+%hcLr9rflOaeTN4SmXZW7>uK1jY!&*+quZZKh%Y_Ke&LXOaci4!{UrD-}-o4nnBTv&iwgkYN=8?BxB!$X1bpq_MY z=3^imROf!VO~K6;t#E-zY}RtmfnR21FEwC;3(>6`(@jCC*=!JI3>iRKysz8qGf7p3 zL;X>6_I$Ic$kx6sZ|K11tQY9aTXKYSOcW${xsWtWoMPWRsa)35Y2(iXiI|NseI@7L z$BqbpO+P~?Z#(_u^N88+o6n-L(Rn90QokQ?KmMHA?T?L0g=m77lN#b$k*L}U$YnQF zkh)t(i|a}=J2thtyFS3gH;k2Qy}Mu^_oW3;`Z%sogXT0+V)E8RUHYnjY)nO(*Q4U}SY9&bmxLSWN`Aqmj3P9N zq~*TLdFJYvhfI#E8pyLS8`f9Xx?Vl--(~ z=GzGFJBHds*`!K)Bw`=YqxhRHGCVp*4`mu9CTs>NYS7`QXt3Vxvt<>qhoCDI!g^zW zRg|fs^BO7Ble?)y=8c=uhPF?v;sX({;HWoe(?v06WQT=CLn3M66q^9@eks=HL8^J% ztB|a0KBlL+`-`C*{NNn6&;4>!Bc|V;P3^YB>8Z_Bu-H=A)}!zMSiEy!qMLTB$w#a| z#9XNZX>wkY+#Zc!3w3N;r+^jqw!_4KAue8q4Fp#F9Gc_HzOVE?LvDh7jb7=d^_vZ|1S72SNi;NT^ju`b6VRtbHK zJ6aPNO&yxX)h@_P17hX&BgjM!IE*7r1m#JZ6$d4_pjn)hpkKD$~Y z5(*l(UbZ+OvUZNMCCi|n1#^BG+J%d*Z)Baw+<2kUF}N3hoc?B#^9y;us4n%5283Vi z2Q-LL0eIHAndx6QHaw82gtqzQwZD5KBZxGrs z5auona@7MZm?Rk-v7rDLe#Y3IK^%}$VU+T`$?oPN8moX6fLF|FijSCoAEoxR{o6z7+b|tf5y8{871zbVA6)-0KYf`ogHp z+_T_~BQ?xs8~yn&v$ zDTRQoz%onKxf>d=B3)lz-=eE17nR(J9=*f{k)>Mk)RssCNk**P+2ElCN&fm2+=72u zap2t$>jIo5T00w2E(|S66zx8B$g%?)y%Okb2QN}iJ(3|TWoF8oo^(+45+UsIWcg&Ys&!`Ckt5_74GzYF=bPlWXwoJ zQG*3dq(Vz+LoVAhOD5Xh6$D~MjPbGW<1~rnQ^V2ZkT+QJR6KHP8z{}I z;ioActDTK}^)ViQ-!KB%pkpgaPI2K7;Fn)`D&PI{h*RMX53BiG$As%y^XqH1qiiyd z_Z$xF#_@e4U1IEcexmtvH(P>*vOLAF03Rt8*xk}TK&mUheCIno+oUb?qUqL9ZA8_5=dNZ8^nlAgq4OP9h#t4}3UE=3L|`cz?WqwOuUON`^1lc7|Z5`Lzq< zdVFb$hZ^FiWa9{(RG@A*K?3x{Fg`lPg|nI{7!&pp$p3m9>+5nh$ed(|sON!t8AdYR z+f3vDKS030pbG_p-G(scp}_mH6n}E)v0t!LjXJDMjtadP zF+1UW&iqA(-3}trf5PQ8SplQNr`U6@JVHBfWl46dXEu;yBVdc}D{+pM^#^p@9=}S^ zCUEE-@6yCvXzBJi=^Q+V4jeEwT4n~z0g35Q!P^_6F(QT*AsO<@mMjekQ>8!yfJP{N zVaJGzr@zi+ep|3W2U(f3>pHpC&@5M}jYuJ{-EiSRP2k|nHw%EPS`)j^M2~H!F`XaiM!Mhe}4dCq+#$6OQqc9Rdg7l(P1>m zjp^i0aS+r-A;Al><{%#=m*N{#$E~p+CnCfhCZ%6C^7)~jS)6B+(2T@!r6t=;d7kG+ z$OF;7Nw}DO`b{TSO*LSRjSn-yL~xeB_#+;j>-F3KbMh2n;_I}Z7iPp15870^E%DGL zQjf3+f7xLlCv4IN!>J)6B&aCZuY8fm@@O>OLGe)Gx1OFDB`XLA_eTH1_Dj_VK4+yY zUk$R`jXd8YbwVy3M|m3L*W3fbei@{w&RYE0Ng%+{io_ZVt2@)N@-$RLDCLQ@HVGs< z>1>=?+-{&ecd=$lNX6%W|9X}W(ysn1xC$sziU-`cjWwW3~B$G$O8KdC(Mcf}UGa87hpv z&Zq&;%3orY4*S@Wvi`I#=rJUl6l* z0QlwKQ4I~2^$@|zu>Y${;)jssRr{v!e=|PC(~H!C+R50f0=2!kSpD5*?FCvo4N9xg zYZ-Xm=~lS>7oxjFfS=-*mMmkX(i!6yARsxUXC_v>$mfn#&O@X_gwmpwP0F+JW44aX z7#?mauYz=0Zr`ctUjg4r+uBn)gOO+Q{MDzJ+pefImbe&c;*BLE_PsjgJt)J{}Jq%9Z5kb5mHNOuzP zIG8ro$B_;3fX;0)klG&B{jlUBf8GhzZk#v*69VK6?wkG!y7{yoe5J!UpmY`HS-t!z zqg~+QDxpLhcnP9j;=RhAeg|q<;7EP3rFpLK6;Wk#-kOZ1=OGEv*=yY`ECBtP`gK=h zYOA-j%5L~mepRghmQ`aF$sDy=+8J`y7FRFDX*U4?=99&Tra%K;I<4Zvf5o)G*Qk)h z57*6I9gliB8NKkW)39|LH1ZP4W}^}XBl2)@?054&VIVO2vRNt3 zCC)a|Hg~k1Uxv@4kPfM;p3n9zrfGpz<0&UE^QjtKycPM;_CG7Yxc;PXeoh7&VeQ!x zc#dIiDRvtq$sC+~YAxnQe(>y(XSmnTyP4(9u6%+C=={9t@b~uhSj&ACLm6PNfz2sNy3E zEnL-T#@i_bfD?j&Xy%@lm&$o=|Lom-iSM~ljMWqbQHn|@(8Z+XR07~3@ke*Q3wT~!Mxw3+uHJA6OT=^ zumbx0R__OX*Uirje?k}ou;VoEp4V^$^gT3Zb{CG4v76lgB=0n;cUy0|T}fDmLUK;l zyCXe)Wk`n+Nv#*{C4qw1+H!McurYtbMN<4PHOC?t4sw-_;SjQR^1_scDBjSm1oSo_ z)4D3rElU!8tmeCRb*Cc8^5P}Kh(%&zv^Yif6ZN~HB(&@Me|~o|gF*bNiL~pq2Hg8y z*4-Kp8(ONsOyXFJiYFUxri}6#{v$RfMrcwbEjpjQ8H<|q2ca0h>1S}6JSI}EqhU2C z3PLHA*WG5-kGCOt?gcby%l4xW3LplrrcW>t5$MjEWp&(SDzj-ey-@QCUif&{N(qNn zl`qI$>rUu)f9+e=TpXtA!*p#V$B}RpzQ7A@n~29+c#i`B$i$0)1_m zto+iFfIAejmXU{d_@)gW#V3>5!ra|()7*t_Bu)Mzf0HtRdHiE_87`ft?)2*Jd8|$q zM^k()hU5ws=lF|V0Kg$h4q`g!I%Y5YTL`i!;2tJ_&>)opiaIRa0o@nW% z$6b>MvNoFb?~-qHW1q8b{dVGV>+Plx8^aA|0pd~eljM1JZF6bm%h8_Sd!~4?omlYi z^zVqpO@T*ogiIhGHWk}eb){3BHY>+FDh`UDe`QE4IVSJP2s$!;^yJ8}{Sq+A!_70Q ze()W|w+gMXCpSQsUrnvG9LB~+PkV=BihQwiVvQ`Z1|$zI=`4NsdG$0wOrU|$X>oO% z@p>koBd{4$R^pXJ3m?0%epJe+l~s)+o?^7R8X5qH^@##x&5>B-ky}yQYSb&X_$a>DM$AN)H(FM?8;)_^uo~JHfs19%c^e@?qmDd4!)Le;Z!BQ0oZ$&?NqrLs61% z->WrN>v)=;)CoX9mtPA)cy!MyBb$LQQatW6h?5%WQeTQXvwdA5;R!9oCrUUck^>dF zEeHBEW?KvdQl`v*n_(E{udrtlz_}j>c)H^uCW8P3&$*@d;}KF3II*i-RuQwM03T8)xe)P;VxRRj$IXltlR(imiUNZ&*3sJ9B0qX*;`-f-8~5z&L_J za$7vj{RA?;d$Dci1oWbI^oFk|cLKS6=rLv_4}uINtv}X(_+v>SWOXo>e}JHN;?a02 zuzcJ{EYjbTs+93t*{ex^-{OH>KN%&xJ9|j{aexoh!_P6KiMF!fGGJqu&<>6-(+s46 zJirw}Wa)70^&e~;UXU3&aoBreJ&Ja1W2k=$akj2gh9XUx%cC%XFDa4CO@lRv(U9%C;y`FWe=H4i{zUJK7R3tlooHh+=fFm{u(%$ z0H^{Tek%JH>;3)=9F4zHZO8;sWzNTx_GT#adbmD}4tNOcv*iT{Yo0O$b<3A93^Ywp zZ|+QAK}GUe<9>Xefi1dh=YhqB9Oqm?0ViivHe`jaV^^XFKaIcpf0#K^cZB?@14l{^ z$)(~>f7tE$ke za;l&py<#Eem;^pNf9XBcvVIV?H?nD*tq!2q_I~u_C%Qk50PrT@qLV$9bUs_X$ZJci zy?K=lEH@tzZC3%?)b!+5<+`lRoCv~n#k`zA@ZPufQf61ItIa-q_YttxoKDDQxlqH=~l=a9_CX3>ygpg_|H0oJf2WORbf4?aRle$JBWq}gyjlnU3 zkZ>MI66pIpT8!}2ErQPI@kH4TP|O!v9e}`E#2~aHiNm%G0~(C#cD?d^5Z#wJ$eL## z-tu+xRKqK9D_xGw!~?@g4u5k;z9zBlc$@Zh4X-J~ATullu(`v~Rfs7<#cW=#+;U(u-Iy%)!c=m@yy_5-K zN*?;|l_bSjt}6E#vwd1i^xneap13H&^tcJ~t5;S_ww51L)gsVN=3Fbjm?c7ud~&`4 z`%`PyX>T#xnK<+1BUZ29Ppa`f8Da-!?pWz7~MXaX4QYUI*vbCZ`)aj z@6Sc`AY8TUc*#$GIC3gK`jF5Be8q9aSh6nH&k*?N#{1+2IxpAh<4+l%F)-k;7-8`|3Tj86j>BWy z@coHlA(Kh6jU6!N246!Y_ZRA>^Mm>M)>sIn3okB${!KHJS zwVSz!sUL4C5!|*67@f7!RZL>OsLeTPCIi6Uk&lFXoerr82`M$+SQ?xFYpolb%9>s@ zNt(C1AFC}uM?feN8AcmH8c6diWc8sy{-vBl5s&ZMThTRQLG`?GmBIn95(_(ie`#mm z`4&*s6~0rT?V+^MM&5JD_fBdrtB+3E4Yvf%F%P zd18r&!}V@CA#E|5>P*&c@pro?p|FnZS~8jcROwyCFB@=P7F)g+Z#9-{sofQ%b#aU1w!+j!b4mfGkQ7pRA2R&Lz#Re|zfZsLB*N zh`t{8lHB^1dr#p{YcgxCZyUnNwO|fq_g?843YhFNe;#;KcO`2%_j)o+w7&a_#wEry zx?0WUwKq;1mi5c6B`riV)4k^m`Ieh>e~hIzPoK>U%}^}Q*`1w1XQFcf90Pp7i=emW}lqw%X2NSoU5)rr^Bomq-=4Dsqryw_H|?%LqINTF2HoM|!tQRV7sNZ6*dKqEd&qPF3yJ zFluAHfaK_P<G+oW?f)Tf5QDg|N6Ij*{AIHAOC(Yb&dS{zy9;w$Nwm^U-n=1GXKE8-v7?C zqav{XEFoHQ8-`{6ilQG3gZ3?q>m!|)EVjsueamNXIvhGAd(Pm)jmY9xMDo{8Ee zjRb~~oqs^&`w!F++`F+e35NcZD(1ggNq?&Fz;7RvPd#$PY#D~VjekDB{3)ar^+2N& z+*a}*(EqKNfAW7yvS&la`6T{QhhggWuT%Pi777s|Ya_jL)Pxv@1^;Hse1AiS_8FOK zmDxA_XZ_%R^y~j#Y31l@L{XVr5jkOl&=~<=R4x3@R~4+nQgXrnDN~j_W4dMIPtM1F zFVXgauODJgXY<-qJ-o*E_q+>D5$@3E`I94mjvHU!e;75$Ip#0lrmO;mfU`14_aqo( zH$J1i8c>%jAzPvX11zGTtqWkj1Q?QLqRc#@t6beL95?ybp$T{qPi#5xRG93hdixlV zWRTHlL_ih+PkWfCXT`RIK`I_is(C)taW+~OZdz;UN()Y$Y^J*7LnX@>bRThr(wJ++ zZkRy?f5F;|xd4-+omV`!b{IOnrggYTg zOd3&cW*i69(v6%F^i&};%;w{-mr3&cw=%jV-rwUhHDfSE_oB#aIQ$jGT;pLLs|rKQ za2~I^cYz9J#*=V`lMRaJk#15f&E0I98#YQAf6#Tf>`W8jw#ZC~p>HMr?7(RCJ4yFx zER5HkM8K>w;TV&yf(SAse?|f4#7sgujG}WMx3Ttl zHp6AKjQ4}9FCYL!x+kdpwPp7!$0N*6xY)vCAM;^vK}1=LIv3qN^r+Q`V~_BdlI6tx ze{Camxtmc-k?C9j;IR1BzkC?9T1F4|<7fViq2ue2QVpwPK@dwIBLU*zJ|#V?@MrLD z{RjJieuC~%NJ%cVXuvSSmEL!Ev-EJN4Ri<&;&;Nt2+&~h<)%zMf~R%$Q>RzRx4%6I zkwZti}E{#JYopYxVX+ME_m2hw<>uqFZ&K zrWff;DJ8Tx$j51amPaIL4C0f$$_Frsz2vK%(`X@}xDj%x9~wnb*ljY#=|7L>i56sCuyir$F7r+ z%!huI?~gQ81*7*Bg>oe4DP7-jLa};v=Nwlguc{Qe>W|}<hqK;PT#^TJFSHt_NiI=Vcri?B%X_>J&NXTzS;~e>t8J>5#{T z0;7tUanq*ZlA2xy?db-#J$yIAB-LWy8(2LR4bTOs>a50WbbVSAXtqg=|J|vreWyz_kk&{B2n`^2kJCZ2-~Cz=WDk4f6otpt%~_VlmsrI zJT9oq_!fFAV~Mf+0@TjfZiqzM(e=0%VS*~wI>@(C-+VE$Bw(?l9_3(sq6|;yb0-`O z>A>a6)dgD<YP~#8=Sg8i2NyX>0uY%wUn!O&Dd$Y;%~bP1R(y{d>E72;wd)x-5tMniB?9rQH~ z$y$OQbpG;*ffwYo<^p<|k)iS7QJ%*b2Gm|Nk51yc2=e}LTQo?d?!u$Vn{TcK5) zzqL^_+(*Y{UW^O!VUVJS^EGT=G}U z8R*9loM6s#qUINp>Z@nF73`W@8=*Wmu`ht)+Y${UFrP$yD0!yE2b!l<(R$e3l$rXU zWkimszYOT3e=9eolDb{b=RIq1&`gBkbwKaR3>qEUO7oWikG?lSc#_Et9n8 zR=QF(fjtd`Lrlo26>@KK4Y;}LdjAlGS-x67vB2>t`eM_j-cY`go`bf<#f=5luEGAKCNwjOh54 z2o`o#1G)pYT5xJxVB>UTU}Apmk?mlTx2sVWw{eFh@>FscnO8t#1+n&ch4BL{PL^Ed zZ<%u!H4T_li&e)jt5O_}MLO(lm6&JIDQvREd3H(;LoltCXaGXa3MPlxKvU#aL0}iK zs*W!$f8F?%8fmKX<0@*U3Et#n8wi*K9*$+J(IPik2rBcNN`Ndx%UIt8O4qq_e54hq z&odj@XYz9OkrC7`dn49|#t27gv^f+gp%+d@L35!Qee9%=bi%i2D-40C%REM$<%FUa zT^GM+!EMv)KF}XO*FM>m4tn*5>`UeAg*Or%e^S3N0;3$GD3WB@?r=QFb=3eY#4Pow zN(q`{B?7BwNvh=A@-0k!;o=IO4Iqf+j5Wh7?nc0DnMCYkdHVqw4WKb$7$+wr&{$O+ zQ#c)uae5YT-!wXqel9ZYl7kItyy$)s!5VJ6P-R!otH6Y0$ zx5%Tl?i~wP1=iiHFMW&dBt-bhOSmL#S%qQjq1%kXQ$>flz72BDQ#`u?}TUc~H z8nDRazH&+9*X!zL)Gwby@NQ1lNr~$b%BQ5Ok}L(#F6_FnsFwyf^6I8McnGkge`F8M zhapvIZUTb|tn~>L!`QzR34XKfWX?p00(`;vnYEv|A<1N6PQy)B$Mm`{fz?5$ zF-O#>UJeJOmj-3EEspL_fAjl(;nSZw9DGk2d&u4~dX6XuBtdg#+`KT#z zc&?}N`7;N(1W_BoC1()>i{G>(+>~n(a8-f6A6Zk`YNL4}GAX z;=-KXdqqRs&H||(+QZNndpvmN3C_5M`Q#8qaRqmN)1Bc9xLcn&5F5jVvY1_e=>Iv09LUXf{BVc4&!MQY>cXzw;e||&yus<8)}fA8D|IrZ>)an zC?;Z;AEn(#Y!((uqcxR2m~X~tv$YH(ynVELPw(0<36a1?BruqQoTDNL#7OJjNz zFm(4c@A z5|o%$c3+bp?hxVxf)@v(qy-*~C!6y(ARemM1+MRSpPVz8E;M9cG8Hc+QT@p02X!_y zGc;Z36bjVItZqlEpngni&oCk`wdCn_?iPlvO3#AnLG&{TPo4tha`p_r#%^eX z5EvF#e-(eaVF|HTwJj^)UK0R=Nyb zC2j&a^aTnyKxSMInqB-t2y78sGxqq1=hV~w96Hjau0^fiGCt%VUV6NEX{Zl zZg)CRPnu3KBy;XZRfy7ZF(7k8oNEc_wsmCY!7^4BuMV2D|NXP7UcL^RfJn(Qux?{8^tC9*Q| zOm~5REwb{q6Wc;U{F;DF167{(-H5V=1h?T*uuRNpfg<9Z5ja1~(t<7xxJTNq3} z!3Cs)trCM0^0W}9bY!GDf5XK`@Mk4r+QZL#C_|1%(jbAtnLzQI#>|ako)KcmwrWTtQxuZp-|dFf>MKBtbc58lm8*;1C6=;R*-{o=|PplEP`oNCott&<9mz%ObcBA~z}q+hK36dE|dv`fV;#@FvK zr9vKqrp)NX)~=-HII)n0D+^y~d$ELouS~M$mAP5VcszBif4HrptqQntAe1!F7qbU= zeWZF5bdR2|jbS_;t7#wE7RCPf6PAA6_H zxnU0>H9%{bX-L_2h=stMyiu!m0d~IGZS~6QBxnf7e~Jfv&l`q?IA{S_ZeeS(EcSy( zRJcjiI+AwoUw`_iv5Nmo(OD!}2n0d&KrC?hEOB>dhr2tcpO_o;raP*$GV>kIEkf58 z5Hb2cZ!;(>WT1liGd{w=2EM)UK zSymVee{lx;#htzuz>8gOOw2v73?q_wD$e_cO-vgJh=`MywnB^L+zXA)6{mD3umit$ zAXNtYO}XAWwmu6l!L!4S`&Q1k;6FM5JQ1=kXovJ_EdVizD~y^KX7e>iZ5*}fw|0vl zOyAx%b=b=q0@{G=XIrD zFq5tE^o4`$7rzt1^5lj8yn>Pliesjp2qD#2&M-01$JU1>TM8E+;!5}U)Dbr3Q-Kd{ ze~5B$2wog4<)zHbgVyMf(dTMZ(GiOPfwk(rixk@my>M8EKq`6VObL@xewu=l4KO0etxQG)`&WA4Aj^gfsL9b_BF&6e@{aNadqV+{4jlQ)TqQ>MP%O_9(>?`zkx;VtJd3)B35cM~5WJ6)V^#fhfj=&P8 zIwr+ ztcrT(TS96Z8e*XEK`5Cpo~}hzk^SzzYHC{*Sra-2@Oj(bHj1@gAw{~Cv~FF_Z<(7% z5Bv#(Dh4WZy!B;sw$SRKf2oZ5G_w$^KmUjvISJ6=r}c1in9(#PcP8l+cav&&thxPK zTMkV87*0FR_~B=|b)n2wk;aky3MLi)+P1mA_QhK_XF7%`7(?`Wh=UiDfR;TA)`s`w zd5(M?8FDoWlv@hvD6m2{p^kgDof)Hh<-YOiN$HxynBR+X2IAiXe~i*943U0AArD3+ zWv_UbHAS)(dcF?xH%FuVg=>_uv@`LI445;cfLW%%zP)jJ+svIkiD7%##_}Sk8<~je zQdGbMkA!ftPk6RI1^^51X-tt$%it4d7~a3O^t}cc9Wiw`JlArCSw$M*g5$g_SJV>D zl$$v*WxhS+@7$`Fe~51eAogU(%sx$*tjF>TOGqWH1kOYo0^5m5L4rdm;x`Cd1BknZ zea3oo*WCyzJa`X72{yzFps@6~yjL;UcD+g#G`bRTnZy4jQY6)wcX{%c>_?Jq$Udr} zM$D^Ct5VtSxuZ-x=1_inw)Jqb)IM2e(s{~u&GPytdtOdte`Vn#eg-FpE}pVCXB*~< zqDkWG5BdbhP;Tkl3H43PDK?3}_bj~Jr2qjQLiAKt35AfJlSN~yWU4|(O|A3Q|$PmgbWT>!Yo+M~~Y zZ(22MrmrMfSy9gMmY%@Mgilj1N2ekr)&#-u%5y=1m*CeR5vKs%v zO0MgaSRDbXar3M@LS`{>|D*B&S8ZkPDkWrzZg_n#50}9Z(Ih0elsWyAo6M>czq

{$zuxrmMxK{Yewh^_Yx}7U|g!Ie_9S!S7qYQ?{4B;$EogvEepeUu^&Aj z{?~OT)+t_0Mhk4PX*;oo*BPS|#mc({&;K!32r!5!zJZ^^R0w_12|`Sq6uFTeOErHF zKbj37hiT8;>ynFFZZ@F*WqGyPD6oBhjUzy5P6ENqRyjS2xdOMB1?Fn8bl@{lPLgQ4m)dE|G zvTqAHL>DK7@7=}I!m~7TC6+CRG~xO!f7hGa%~YLR{#W-EI0@VfgM*+>E-)l)s?)y| zC;$UeAS8Mfz^|;V@e8SOf5$|*(*py#YIjEK6$ti1D1&DlqACy|A||w~A;!#9#E_k>)aHQ+#XiEBe{3~Q z%co)hdWH-onJBs2W#|Jc5AI3oguiTye(VL*Od*bzTEGS0dVYK;1;*OM93mp|5NAql z81}=d#+G*3(ht#waSgiQ{A^wCw>29#6&cKhseqL_wJkM}`WihCOkw1N0YArZ2L}tb zjeR$IO&H*l4WbN>L*x1?)6c#ue-uaYRB!`~u$r!-*KIEHcJ7I@IK=?bI{nV+GCS#( zox=fvTt)0qT(AZf%iANREZKs>V|w`wFhuu=p$BaDf6cjSrG|{^ zLzLV*x5o%4wG+ZU;um2X0=AJ3op4Q+Ye9ikNdSa-9YPV4l@6^I$wDf40hr1!H-$OU%j`tNXG>OXMA0KFwf-1TA%ZEzrFC~xHjk`Cud<_wliPrLM=qOgV z6dn<=C+cPLYhae_B%M*AGiJ9E~-hZmO-Ei0v}Nnh)B8uf=XV~5CpPD z8mJeTIZbuW+OI6T1^`eRSIN7^k()1dugjX!wg9OPg)A)L0(kW)hf8qqkOG7@e4qli ziD1wXBmH9?4EmMzwT#WK`X<6e?}<*~tXybTZxoG#f478sD%M^Ah17kgPw2OD09dSV z7d|Ed2`PX}y}e1PFpyZNu47NH5G^908NZofDc%7xOv$h+Z58`>*$R|K$9mv4{{~$1#^DguTejVTU*bZ3-HyVkYIKp3Sk1Owb!aNEg9X4?hoekE;&*1+cw@ z{?)k+N#8TpM!^y@jY0Pu@3)vocLeVKqPnUIe*vrRXLM7uJ4`2uL)Tdvew3Q+<_pqisb34_@8pi@B2CoM+3;x9sEx>P#eP7!o>fI( zFwZa{6K13|Mnd4_^%_HTb%@^>u^}oU35#ww4&9G-eztf-{l2IKzUJ(cmYzjGc$biU z1QvgQgZibdZ$rw`2yc7J-?Bv~fD@(u4KIswqB%n%D}VPzTwg12aBJ6vk6$o!K}9=R zwc<^CEdQos!tBQNXdwb|J~FT6x0nia*;?=09`^7;SADb_d^n{Xo96P`UA59WGU`PY za^8L{lLbDgvh2zOo8$aOY=Po-g#AG1@(zDxB}BD_s=IOZ)DX5Mx~hVUMBfsrv3rQ^ z(Ok-pg9PqoIBVz#=21A^pMbmv-@3*O{csvH98q0v*Do-qv7|DY`&joOovU*Iu!r>; zy)C8UI=64Y!fi7qna<=DV>Yry%GRR*kXL2u-7;h+;`ETQo1_gPMK(Uv-AL&7ImUnQ zWT%9Y&$gMX1HrFUXq1Z=8@`Jxx_?)-{`r&zotHScT2BE4y&!|Uy8|hS{x=S{{uW;R z;!3KvR??6WBn5rGSk-bT+Sj-Lfz}%5ZG*7IlU!v`HvQjD7JiL*PM@VUIur-#d-yrt z-nreQ5pP+lUc5d)D!xV>>cI9g${2qa07Yt3!;(~bI4<+yVf^cstP;a5WQpyjBCarr zwY1F3PUDbqqxtcJ^)YmgO%9$`av~=6g&;%)l(#IMW+19JBl|u1a@hA*(o~%Aq2w{z zFXZeKl$S=r$VTM#f->PnoR-8%N{6vjw$}E*NvkSuwj7&vSQrURzoJlWP56IjENnsv zrv?za`^^1zLR|jy^XZi!i`r9-jnPZu&FmQa!KU{@U9Um*TVcr9LEOjKeT%xUI{(A4 z{1Q}qeL2Y~dQBH+LW0|QansOW%sxnsK%bYeXw;@&OAm?XdPLSlt6w0el~g36wuUDV z3IW?Q)frw7Ji7yyRunL%mV*OT~78S@`e;@ozTbZB}D*1iP9)Viq0Vpx9LyQc}BCYU4Cch*j?rREVI1X_L_ayCyDkLn>-s!w8^>F`%A^p>gWu)d(=h3`FA z6cG--714Y5326K{C7pkn?OE7%NrkpCn{Y*<>zgN$c<0z?)YgmUqBUL$n7cRObw?B}f~so*(Xp-VY-Kq0o@4>7LD+Vl)TM$Z z21Uya_u86?9*iE}5W!j>SCO9~>5wq4k|+~VP@;WDn-ul4CPjY?iYQtPRbH7YGv@~H zTjHe<@2YxWm~y??r%Ky_hr&SM`BC9VF21UR%nQuw-Reh_^0Qw2+zmRb^TpdBxxs>q z_Np||LUM*>b21b!YmQFcJ^lHfCP|`pJhxfelf^Q>APPujsoR$ps7$}!N)|b> z5*I!kUmBfbE@UzNDWpXQX}(X}SFNfnVF5KmE`L%A{2ew*!7UDx=2hO5W!KMo+RRGw z;8y&PZN-GiSK7<-FH!b6>hg#qwT zh;G}Y0m+hs zdMsU^hld>&ll~N@%M?MFU+INRK-fWXilW5C%uA7+J}?|^dX5{g+7E!qJywTLrGP67 zJj$lM;kkcyy@qrXm_U^cULSQk-p@agiGi3EH6BFuOtL1K!b5=l2)&B1$wtU7cskoX z>N)j?8z#-KvkfVsE;thmw?~5XrhKK zGZm|BJJql-`@nXIDq*wpD70aTbTfzs*Y3E#1v9pQR0azs%z5EPbd2309_z3sQ% z2=nQ=GzIVJ%E&X^YidqyhozcmN@yh^5Wu3PwH{Ew?a_$#ahcDK`5qtXn!@Lcm%6@W zC{=%?REb|U4|Q6GEJhwSTG$M}QX&LMAe~Z?``DC_RLBENVF3B5urTO_$_kHMM1whh z5HT#Kl)@zH<$6iZ^IBl~R}lf7mSy(-b{B78hz5SldLzS`ndxuMVQ3}L*X@cmp!)u6 zAGQ%9v1Pj~dZ-_EOAcja5nRM3$r7Jx`lo*l3lttTKezf=2kEfhK8K^b&P{a)H@&!$ zH)DB3=u~6z?@?=*f05(qMP}NptV95eM-Tz*e+6pu(R@fS^Bxk%ka*L52yBliwpJ~+ zj(Emnu@2KMCg@NeJLvekHnPCwQ4&QA&Eag^RuT5ftPT9>mEpHE>D`^#A-uV+wrBR49P2{6@nvc;{Ay`oof!VtP`fV_v z9hse>D!zR_04};2S}CF+G3uo(DV2(^HkYd26NH-dAEA90Ch$W7E{(8uN4{zLu$rwN&TERLhJt=368^ zVIcRP?NRjC^N(*0%O1?3fZY(vio-&`!-0MIyPt_58QgQW1leOx*LB>R^uvESg*6-N zd6*Q~=VBw=#km&>7a|Kr|DDPkg$j$&7jA~Z2H45>>zKWU3xZGPh%}Xt9_-y%2!jRC zU*h@6CPf{DF(EcSB_pMqo_xTP6~GeFG07T!IkG~mXAOU3HJQ)ATiX#UrIy9iw11;&sXcD^k6iE)spn&ceF5X%?^f@WiM<<(5PY63LuGl|& z8~*!aO*UP|Fz#Y25OgALNM|VGAltJw;1WpKB=vqhdk--#(I2IYG}e(WgVMTxjyF0r zu)X0nmGen*l!-0<%@KfOTRrWR3dCo2OLuVglv%AqchYS%Yngi~apY)s%yk=QxE9EOEh0`Y?XI=yILk2N(%o#|ldf zSq2UIPNAqjL?RD8>#HzASrUhGTPJ_m^FEFv7wXkC3b43G zxIvmdZPl`QKk(G9E~OQ@>$*TGg9`(u{O)*@$SwRK$dh;|mDDI|vNrsfUqJ|eMiFZH={6&e#BaYn z;|TFa+Jd{0l-B&9O!$a={ONB|&$NumM0^D5d%hM-m4~e%=QuJ-O%Pbxj>=8sw>A}l z*#o58Y7R{3vH4+smb8gji%Xq|R%1Bjly@FGA#F$fiXc=T=8y!?;#B%)R}tcOdk1Se z2j9RszgBIS)#bE*($sJ-R>~5uzNg9h+SZ=RxTa9DWKP=#rKY{45k(5~O6E3m(C@RmtkUztzFjn;pQ)IMP~qF$x!WpWp_* z2vHHg8O2I-pkmrR)qhjXCxd`g%C`T9#Nrt;g!BEjN?6fO7^Tjr{WTCEfC@IGCw)kbNwxu zgk)MKQVS=C13DOIiIyQCo3P1Y&dsX?fK0)4wNd!narB(nEJNOc1vQZw&H;hV4Vza2@Uv7*8A+@PIf&t_;Qgdy4M z-j2htEN6(^c@YgCNevCteO#@sAK6QB7&!d*fE1~JHef4fk>Y(4d}+EoeQ%19Qz31$ zc*ezBBc$nqP4V)(4HqAHa3c0zvvCVFG6zi&{ztycy4FtLB5GXP3LHx?0@k16M<+l| zSBYUecZsYxPVMSTZjWqzwOC{ql&dhVfwm1gAc)<2WuG-m?=CNj;Wd6?UPOnbR8Z+68b{5>UZD zTNsWXOP?iH+Y$pvN!V?#FnlUvEICFdP^XqjeWzl*Cibe0ZrNCH_VVwU=AdvZL*x$| zRr&Y?RFCl~W|I^2Ead^fp7&fN-Ys5tMkJ_WUUWBN|@XuW!W`5Tu(+=h7#x{{3&&>)_bl) zxHPJD2)!AiRvi{7FpmR%ZYWv{lMO}|D#sD+R!n=-9Y}9z^p>U*@Tv<`!bN(2JbJw9X%AI>=S*&4eD}HUn;A*11`L#|0*B@F z>=8Ga?gbDRGL+jE`W2ijbaNRE>r^H6U@}@v7g4~zxf9VQ&!B$lB9NcJFT|JfZ3}5I z<5kU8!Z8X!MIZrv9SUuKOQa5ay0)qGqEEcSt~+k!Jam4GJN@L){aSQzlf9S6a4Ob2 z4ISu0$g?hyb0~=9=MvQtwBJE;IF~#F@l`k)L%RU%1lUq?7y!3lLZmwxO z+bgb)r>Sn+dwTUxqE{_j)(p7MIOYb&xohc+N0iqX=o>ls{vGdh7hq-iWLZ|$+dR@t zWI?GJ41|!D5&h&9ujRdgU&HU_x+*_g;l`Iw*`n2^uS+*7ZHzx+!IqZff0&r=<)bhx zEb{$;yJ3+{_^DH&a|_R@uFEab!DOD+{>H$LI1FZy1ehj&#NMQ^R1rrZ1F!>=4yK-_ zSJ2qlq}uoh4<^^mNj9EvgHz$#gaOdy(j|%ADyr>|@v2^8=DPRuRI=OMon)w-q}4wf zcV}_*eidh!?sBJm?23s20w`@)>YRRsI)!w}NYc+>Nc4m-?gMl5)C;ag@tE?eTWfti zbqA3C%7E*Cnk{bJ4tFNyy5=|~Gf9!MxBcplx)G1&#wm8u96JCoynJl^zRjngam*$j z0guEW@Jos3MM<=!Q-v$`fxyFBr3(wFrTkKq=SL5HTNs?&vfW`|Eb@nzHF=?U=_lfv z$$@;7XZ^(#fATYeOAH<}V85w@b&%xwqW~NSqkhVNN(;^!EVc!9X@8ZLd*}~XR$F9& zx2&ok9)_|F6^T%Z^b%P3&tr|$zT|7egv-r-Aw;Y}0$Z+w1ih^|zpGt;-v!iOug{bF zC<}SHA07@VbZF2K-Lbmvh7cA0y<3W4y%o1beT!{d+Am%an}Zs7Z=ph$h(7tNsWlXj z6Re|u*rR(uTG1#GQ`^P*(Air*@DqxvW+&Bv*S@P(EN{7uLW!U2^ZsW}rBkF7Na*3V z4ngpp!=)n685w!3+i{C${A)c`xYh|Cf`ol~@ODMgKr^qpim=+?5)&+m-zSrnSWW2! zU)f)QF^Yvmy#^AkLSL?Wpf?2+f)0K|J@?grJ#qRwM-0I}D@5vK!sGg|Pnk<*{+n3B zGsRw}kxEpEe8j+Uq&dVL!z%RwO8tX-u&C&-*~twL{)a;A4MQwCoig*+{{l=M&xAkjh9`PMb2)}xYAZ4jY!4xS z9^*p8>1|a{%t=m|0i#2t$o&>_y4-z#Ntb7#_^?Apt0pTng{?Bz@?_V?_~erxCF~#O zmGX*knZhrEh+e6~q($>Ca5r*nu&*z&3tI&`Hsv&z&MfhXZ;lqs8w2j0g4DT(JG@!( z8IM(bj*HkTYf<1{y3SVZBR`LP@&(Kd5K4=JRDbc(3RVmJp|Adso9!zu}VM9A@{}RD5J7}gdrJU zuxhowcX&|rq^RR46?afMZxlvcKbajZW%JA?key|Xh;j2;U;OX|{_K=PQi+ADGjbq@ zF#Wj-IWgM|^B&~Ma(G;!EDB$|kLtVehS8~Sy!vQifF75`4IN;~8`NQex=6|4_e0Wk z=_fAb#hFnv$J%eJFKiA$1@J_FeA@)|Kqt($T=C1+4<#oAh2UgL=e#4*w4n6y|Dr(VZ!8h~c#a>7Lug538k=I6T#R7~8g z60Aw|ZUN$}S3pPi@(i9p_X_%3JNRMUv`+-Sg zp<_9JunSbOl|3VPBlAaep>R7uBS&7e5=DKCRW`!%M72BZGgR z0BVL}m9gGFgQd(7UtVs1JP}inj&i6X(a*JNyCJYz)4p6(%$#NwZ{%Aa3p4x=Y1 z4t)Y{z)L@I4wN(`m6J&QR0)oJasy6wSw3~$RZIIrO&otyX?7?dw;2LwF`WiqwqmmU zvN9gipnxsgJEA45Xc@vI@s$70rjWRR&%%|YRQpDG2sg@q15V~a!ZpFh zHEYLsaP~}Nf9B&urTUZ63|<{SSl~pCq&xYkXR7(deYo>l15NK5Sb+Z*o!!49*<%Il zGEr>cwykuFZl?ULZ0wBEHDFio8L-w)7}T;DXx3*RVM z%r`3M z#qSIWWDYA`DIONjv z66{383+hnMDL6d%l^b8`L0WCdE1`0cSaMmHBd)4{J$8N?jaWhG8VxBN4&M0kWf&N4_DL;Ph&roB0Z_p!|^#V;)4t9SG5vj-XM)hZA- z`5izHW`T2CW#Z>&3Dgv7%ef8B<}Wx_yT>)W27}P|b9Z`cq}4IMs$kep{i$)LyPt4g zQadAmN=ru0v#8}k14O&iRHk1y>&5z(%bwAq;tEL9C z%whR=S(UKKv-p`pg<05@w>ti2(FO*t1bP*afs6P_0E&J95&m-`v#l5aa5POdkc?rC zwU{!3rf_}jIK23N^WdE%Xc$W_uds)-TL=GtqB)|(Uui{fKWvt04A=CLZ@CgA+e*jp zd?<7~I5pT}hj9<_(Q}cCMzRG1d%isO&UNBVBG&4+%ys;qryA2nsch?d=pL}*s#K0p z6p~E_12Q`6>}@P@2(^2(@J&;%thEU})mwb7tf9cl}W&yPruw3C5vX#UCNCHTW)FKxzH>2)S zC%C|k7>;JGCtG&@Q34;M6;;uwPuDDeej8<7*b4cTh9}VrhswV!EHwn;y^j1TjXUb7 zWMXL(x%2<0-V+f}-Gt}Z!AIpIYF$?a_^}n!3(Y=7VvNXv-i?TnzH9hSP`0#^HkmY* zTNc*lK8;DoK$KEHIH82-(q#l7#y{3qh>Z)hYyZJ}#<{&2FQtPka={0|wz|fD;NXzMUW2NWU=@)SyMkb69ASd z0r-8?{#Da641V!TSR}e2ONNi+Jz5UF8vHwRz!{5gbFe1;4PJdMcu_JnkJ zT?}sm`Uw6~^!?QLGRfA~(kNUW@thN>M}r$EBA>OnoX$?h+-5@)oDHd**4K`YWUx}q zNZ4rD@D6dxGOmwctsBdkBf!*1A$9&c5b^G!5@RbzHL%IGSML72`j)GIra;=%?9C|; z4S}iBr`B<$1gE0-nLj3PIqG^eydWqQPq*_5|3$V*W$!l1+*!-cK7_>|c1PVr!+ID6 zE|PUnQ4DF3@{!(-C1F5atU&OkIJf;Z^*~dj%xnhs{z|Y*mfwmNK@B}$JO#s^2;U}oM5S7}L7zrYU-f4_;JBn<_y)3I zcT8$XB5Fzcv0vD(LgZlN44$S?BPdWj2Vp+Z(z#dX8Y&y}ifuL0S7@x{y? z(10~bLZHnf#(CB=2`wd{v@c6UDhgm|fq#6EDUTkpuz~)9bw!>u*R!kx@El+y^JK=O zT~gn>gdrZ578}ifHe|!<$IP#kF90C!x5b*JHE8pQq14^zVPh>q34IG@+tqDNq40bY zHKSC6z2tS6MZ!k7?Ka;R)HuPu*Xtj`&ng^t1FO|YQrZ83)*{ZDJ3@}`=u*qDHh@ZJ z`93|IE0=N+`c2>p``^1lZb-OtJUdy+P}L7BAGH8uS4Ve$xQz4kjf9b1Gy_%d;A^0= z#1K8MM*z5)g5W2(UDinbQP}T91(|H<6y!oaT7qaOs?_3KN!&@;G|&4jgvx}-!Z7S7wr^A8*?%{#77{M$*4TcEu<`f4oyoyvYiKI z3}om8jtv`s1$M@vHa%XC7x*-cl1v^HU6zg+^^vsbxPbX(KptVho?E`FHv?iz;;QAmm_vGH z0nwa$ekx$FcZl8*trdGJXSwbgSbfc`p=vC@4Oi4ip42^6@I(bpmt_%%o;-=8Cj%rYm5vIh6iLTuqJakyP;K8@6@hkJ z7q%7FtyMVG=k1W1j_F=N2^YgPH8=wV`Cq1Yb6O0|6@g?!dXG~Z<3SuisQNr*N^Y%a6@YV3*c0)1Uf(#Waq9tZx^4)2R|9yW zi^b2Dl*HyV7WWlR#Y#*5Mx1Bi39vh3`C>diQNaXo%a3Jo!p57NHjKT3>$^nNk&OF#5&z~#42eS5m-3LgASCmtGzs+ia;%M0LhM){q>%D|g z+7||VPe2!A2AAR7`|A6wV0YcZ|^Kgfl1 zDxGuA?wqrabNc$l-Nox3AxfnRQIJ4#W*A~T;ld;RJ|vmnbD|_e+@5hVfO~%6UPj@6 zLTM4p;P`N4FUGWJANH4Q*S|!={W@(ig0avx^t7@eh$(zifsi^+64LuSHAHd)xUtsh zKo437<*8PyXR|&$#Bh_0i0;s9efM;b?iZa4R-GGFBbyXQPXzxv;SI&t3#0vm560hF zS|!elUrEf$P#yeKmOva0@j0`=wa1=+#&%YhR9zNauuo|6O;oaoAxXiEggI=?yt0Tu zVGKOOYMJ44VT`?*+-6`ReR-a~HcA)|2PRKChi@}{+&Be?cUZ5;{vFQRNfX)?O~|sJ zD`rDl5v|SF&149~hME07B>~4FAy+^yf}shMphe7uk|3N|We0xZ3dix~plAJmyZ2Yz zmbL)3fcAAR*%f7FnoZz4EPrYIAxsN1Ww}gig*4XqYM=Y+p|Is6^cEm=&*0eoxp#OK zv*6=s8EU}{7uAWlVlv7qu>{1?rK%lzhH4h&UJ__uRYAd?N&$GAt1!f>BV2ZI#z`X&#JVb_Bf0 z!HuEeMLVXhITi?bNA)J4yu7AyV+wvESaohHMg`-Fin3TdYbqevJcz=7Cz@BVG%9N) zCKDl02^*FaDYT}?`{sjR$r>QK@VD?7NR_iv`^jLuV@PWpKaL-I;DxG-ht1rHLl|dm zRo96EAv-c@d*E9a1$-R)bADQT8njO&J>36+*|Nn5sexyz$~~}<+4wnUd^V=dFmR>g zs&_|4ZNRZC8{m{(aAY5U4HVFDMSbyJ{9(4KF41ZezC?9Pb$$u@s4A2{R%#Oz{@0Wt z5rs-^XApI>GnP(bb^3@K0hL0}DH8#e;KhE})(JvON$n8gYUp2OVM#rx+Xa_cW7hSN zEV@p>^@k~3A=Df?d-t7|j%}4@z44PRX4jbk928g5QR$m>q< z)F%ScrWuI7aQ+&9f^xdVRdQ!d8rovB=_5}%4R#_wr2=oOrYv-M4rTXgRQD(-?Q|CN zP7WUm9?-MStJ@`W8hm$9KU3cCVo*P`D|uAwCkV4M_d~bDE5A1$FV+&I&%n# zo|xM=*VK7lLf3#?XVc&flItB+``*-@UmMO(bmSRY7m}!d$h8X)v@zj#t-#fb`xlkx z+G4l|DmxDENz**g`9pXz4aF>-SiBTvz$fI*&QZ8qt7SZqBSB~ZXFfPFS2!lADGpGj z0cMe;p5RgsoADz}b5tW5V;vJW>hXnkas+ox!fUAMAEc$hVBUQDGwIQ>Nu|oE#M84M zx*N_UF}-bnUvMdv(OzY+|1e#a@BMbmeMk;sD#ZnMsXxc-71JV_hQShk6cbC}ymCrr zlhjq*>Is!#ZmM;FcwDn{&-l}M_bQgCd{#4$Jcj8~N7wGOp_gu`! z_w=J4qsnyz6xaMgLUyZQMm>g?Uf$82MF|6shEC72@a44whLMsRN}P%iX9z8u*6XLr zMZ;lM+F8u4a1ry>Cle=slnHk9dVgw?j(nOj)Xe}4Gg>9ET;x3eA`KP5u!f)>!ZDnT zhi1NirK@7dz@KksglH-;k{OWexZAu3Pa!`VmTjI>G*~Pq?qr+(Q}vnlkQFGUf2MS` zX59uQr|9RjE2%G03J|agk7>UtcFkk!ej&4B=IkQqMxv+HbC(`8om^^ZuxEK>VOI~L zv-bu*`Wnt4^#SU(+csdD9MS8^u_qcU{4M)`=1Y`Y+*ReyV=M5dqJC1a5 zP#4qyIa}?!IaEpNX-o@igsjzpm-Xn%pL(fV+2+%8iuhB}31d);et=ZNe@q3$a!$-! z3kPfs2S0hqQG@h7tUn~R2XT7_-?;_y%bo#;V^I=tO%+T+`wRdD*SQC?4Z6Y#sMLLb z@`6|Yo8PVh?(ic|aMfkG0$Hgg>6|5czC-dFA(4sU#TwC3Ul9x(QW0F2ekwH5izi`; zCqwBHC?$>Gc3drF!{OAr@+=a|0Cm0$5Z;({kFeZ+TOkG{BlZr`l6U{y7okwX;a`_= zZE?0+VM0|>i@F_G)+_E^F}Q|pNdifKiDza8zNgZZF2E)Ss4L#7QJ3&a%cTt-cb%%A z$TlhN6bIXCdjP$fBzGUAF4FFYekm>6=8*A}%;-Rk;e!F6X?{s@JnoY|%JS zjdedc&KU)iFgOA5Y>n_9n+BfQ!qOjso6o)?2~ri?lr=|%Hm5c7tBfswo9Yu!3JC*7 zP$+VyLjiXyi7v^Dg-uAha#gv4Aqt=Xd|$C$?DJ@h-Q+TCp>u8o?aB&Vs2lr*S9vX6 zPJ3Ocbs>;#eMSkB7C#x)6S)8uZX}*Q=oo^Z&z1UgkzST*B@elp6Y zr1xLW&stJKWche!r^q^edn~0qJflDTxq*1cilGmb#??lcdfIcWfh3TzmeRQ-Z*(uL zYyOY;D+N6usPdnG=ipdJyuhq1wi*w?OE^Y6it!lzRB+j!p)w`$(*wK~A@yb*AEau9 z60C@lHh2sC1_eRsesX4~$zCoIAxCwe1T0u25j1a`wi~`-6m}Ta0F<66NmBZM?GF;Iz`>Q}XV#ZY!Kb9` ziiEf=eLPC}#={WH+JXFW-=7NHa``IQ+i0IvN#fb*)ffd5)nHkaNVDZ^aN6W|eAiw+ zJ@y@MjUo%ckhil@W+YOr4dwg{t(xpzy|WcAO-*@N?!*%5n?DYhMHZSjM`Sg=6%EX% zhL+T2b@_UKkSEl4GQYjAY^`= ziUa#3KajxVlag6Jblt3hew4ZVeB5SBxx8qpmRk`Bcpkx>spq4?HY!stZS@us!? zIWw$(;_N&i;2YHoTo;K?ZM!Z;$=TtKP2DB@Gf6c59`rSL-*J~M%mV9IeAfK>bA{E- z8Id7$Nn#_6_aR31b9ol)?W?q0eX&hfhS)%CpIsUTCb~`P6-ZR5rR+9G?WSdGf|D8y z`UV&HE|N$0x~abG9%&i9^>`gf!;EsY5^)TF4#(BbGteI^UQ|(>1ax3qkO)$uqQqru z79%Ue)AmCf`hZgbI-?6s_etKCg4GjC=N8%AyTB&NrDw7@1oxdYTVdPQbp&Ifw1y!i zi0i=FyL4qCG7@#@#@aps8yV-v{HdIVcKKfUg&A}n;c+{%ij0>bHIK|O_ zCwnl(-~9QvkubOaYb8+?I@+i4-?}-kQ|$f;AX3y|u;-$9#f!$0))sT2F;-E>fw5Nn zi-xeSy((@IANSD-1BFZ6ZcX{Vgzxg5JFCcT&$pn4?IAdey*113YOFEEOn)BCym)W> zEF+76`uttlSo(6PaQ9!PEL+bjfNG|HQ;2zFNw&Z?bgKAK)^UMysKqtXU{qwp|Le8# z`wcWl7o1(4d{J&9+V zMZk#QORx{aESkJFh)o6m4Vt1)@8v~N1n1H7p`cWz0C5(udQk8JsWkDIE|AAjwRkss~>DMgjE?`Z6T zlb!@#v#G4}j4GU)tPFgD3*{LwgcHtbr+9m0W5$O#5V<;0t=js2tx@rRaSFbreb}1U z7zy^CgDo|v{;fH|yvk^Rk&v)nyW6COD-lJq=m*2o>wwX%;$HMJ2~{xi{K+KL7{o12 zbs9{OOd-~9n{fwMlg4Iidr>JncJgp3$Wzg&4v1 zZuG-)P8y3Lh)otB))u#UI+M(0?JyQI3iP?^jTLO&sh=E>&o2~zDIF|*|@aWl<_*NuOA1-_g`%4RKK35KGVw?;a_Bb zBlY1Rd@L`f;g;fmjA6?Qs|HZ0$o~|^5f_+D`r*nv2#ZZ4#zAr}Nlq!nj}d;2=Xze3 z083Kf4Kq&&s)6y3rmi*~{D(@a0S1eQ!^lk~$)x#uM^FW^KExbA1bXL3GNtv=y@n#!_eh(T!eY)Yi?tfI$8 zAnSm+Sqn>|Tha3uEf@GOq_=bS zjy(x6=dZ6EA4c;g#tud>&LD>DpCMUBB~Kg$6F7@3*%RKwH4HY4)N@4Whqg_C@K`#uzVCl?H|v|w;uuQdCtSa zW_>`7JA`n9=CDD+2PH!jtpiH(`go;Ue~Ib-DyJU{DKsncrrU3zwd zG7P!c57WPOUT}d}sED>0Wa^p3#Xc~h z_x?HZK7q?1bTpfd$7y%b&!78u`0dj;-Iq`|&zemk#tcwBDg=m~PP!pJs@C}C72}|P zDrELuetHrCjK^12v+2bWY5$wv5H+~4j8bM0ZkGWrpfVCtevGNYO(4b(@~GRG%C$(n z)B2`Y=E|k4raBqXfHp?y6Se#`@IaBZLcDVKB$R1y^Pd~UU^G>~ zX1pV!CQlI6yLOCP(c}@mcnwYE)8uAaJu{WLR8jsId!&c%#~0aZ#3jOT$Vp^(O5TkPcmWh4U{FfX8QNU`u;5%NB0mXK+qEQw}+myz;K#i>6j8H4hWJjCo{HoSYn_)fsS(9G5YKp9na z9}$4igghgH1*z{s%$=rlAUL;8SulXGWn+ecgWh41kohj`l9Dh@LbjcnhFk?xIR!go8k^W>f_ zHrK0KJUW`{{Msk+gRxa_*fJcz06wtAhjqU0VWephWe7u}SI9RDhys~fk@=K$;p!V2 zMIWhw<`MIYw`*bwtnYV!D**zJ`Xt1|aD1!6b6E5%@aw613hYO_nJfvsAi&w0^YzPq!+{~Svr=;kDwJVGdS_8XJ8a#= zLhSeKk9Kt~O%?d;uB#y6TG_TSk%wH^B3+-~Zhw~_Fjv?F{7i{6I^<5vo3n(iK}j&a z0yR-f8JiYDwkK_WFScFTPPnP`nO;|p9ZpQrUCb4tcZc|Cks1LcB=e1f9G^F={jco; zzy+MQW`uJ9jC0E}~EU5p;Xp_S@PD&HE2Jw&4C<>u2n{ z13PCH9=oIUU+-bMLdlL}nU+GjTUPct3ls|Y6M^*7NU2Xm9EDZ)A555ev4rSGALXXt=lr#0*K&MsN=vm9|+Wf zSR%{c80a|7>N$?9D>YOa)#K6t7QkcIWe(#eMNU-53#_e$Zk9yna@C{KVKiuw&>7G+ zPV&q`^1GdXH`6uGvv`%k2m|ZS)adexbop6lAFjYN<_k&3!B5W6F1Su9*DvzXb}BxMtOr8|ldf!Efrg?EOi4;np**ZB!y=L~orXU9VZm zI3COl`Poax2?-J{Q28xP5 z);tiI5$j4jye&tsAMN~(O|pNctAGXIf2lw-{zu{9EvxVG)GubgR4_m9T2gk|b`S-J zTx^o&p&W53RU8lrJuO8vt^(+fGlz*z_5hE6BIu2z*Bz3uN$lpufie62yrFhAI&|}; zqd+$5mzKBQ?P`GfccxcEHNaug3k#^q2i)Ta$SpRZ01fIsEK3)EUc;*zdx1;_g;-0P znm^oD^(X#dDQ(sQWxn)Ohci1Qa46+nb)UcQgGL4P;;8K#K<917pNsbWE;q!qb3tK$ zc7eQox}Wi=Mt?REMZZXP58U`_ld|d3?;y!OL**}yuhXjgekQ+n{Go%6f;0hBPKVEW z6P9m~rM|IdpWnZ9-BF_i+e&+(g{Y}>UG03vMCa+Q2^MpE=N?{rJa!t@dd?vF)kZbA zAxgE2WTdY()?O4@6rhzq0x|1Goim(&QrFdug0x2p(a#+)>oflpeEm|tL%%5>6U9Xh zG%(S`wguExVIj3c6TydM;xvxL>f=m9QpDGgHdWNA^=Ap^=#+uAQFM9D@%`Nd4DuT& z`Qy&nHH@JeZwul_Se@NJ`rW|EQ}GTtmb47jtlXIQG2tcJUjvDhu`+|0FGeYUgQBYp zVM;TP@}xdy4Q%L8s~mBP3(vW=cK2oCer2PNv-L7JUO-GIL)36}bR@}^>Xy|pj^n4V zq~1dU7YGa>Y#5s{rpQZPh08qGI!<05%^t*blZQDv0L63J!;Y9sa=PQ3i0}5gs zi{UsiZe5!1U}ZVyt(|jOR?5{Nthh`4wJAFzk;9gOH0Gh~YHDeA_r~${`P3lKtrf6p zv~?~Vp!cOHt|>M0?%&t;DujZ*`B$D5y{h@+>)*I34>(i2BX|7>MN= zLu~XBN?skT`d;$RsOLL8{L=O8(*l|>_eH)H5oE$B20{e|zP;6wch8kw1@T>PNP>^; z5Bmr&6Ie3LomzRCR7QopfLxY`bA|G?Rb!|$@2uO3SjtC{_(;ct)5j@vYeYSIW?}Kv ze@rso++BBBGUq3{kz6x>)G=3dSU*;j0*eRP!GOV>%$komo?y<233@E&fxKAWPbg1Pk}`YtHXyDEi}6g$tE`L_e= z)W_VCVL zZVHJC&(VRTy$rI-CHj!9Lr0gzjwbvhIqfgYglTZky)7o3N~lXQ))T>I8MGXVvt;6M zYLg|hh*~dSd?j3>GD-Kco$w^`b_+rSmD_F4!k=eaoFIZC7oCzmcXTp<<~ zeNgkd1+$PNX34xWTDXH_@bO_c!*?aZGB8r0ZX0 zX4|?BBK;L;5Y7;Zqk3{n)|W-Mr#IuwMNBW|6S}?b{H?!%yZ4ejZ$NsQ5xwG*iFvH{ zR`n|AjkwW&iDEnSBClMT^NYGJ$jr8Fde0>CluZpaV}c1b;AFw-p$*2iNL!VJ_IZ{yNffu{y?sIV6BR91>lg+F3n zk*K!9Tq*6tN1D|Iema|hlL^Z`K$C(GgocmX9hQWDgAcwi12Tq*5R5+~-lWfLgSA5} z-Wm0kF@@f8uJFAA`yb$f(cLDw0%jn^UDKhwA7I3KR*rt&6m~qaWrth4zd?{TxV~NW zV7+7qUR%FpmLI;%VSw^f8dlb=oRH$%kYa6u&$kEwPb)O|M3B&CL)?<{w-8y0Nh_vE`nlKrSc z)tOGMqvMB$a#V{|nW5R->CkfKEZ5)@yXb;VjxdUPl9Lk3U|(V&p25^*`qAKQF;={P zskZiPds8_Bi9dLXUkafvgdm@ix#2E2P%+?1n014%WqWIBJaXUyW<`FW&>IgC2o%iO zfkVXmnYs96>v(LdnGW-V@m0EA${%vwweW!oq`KL-jSc1y09~eIDCKy+B-as~bsZk9j*My-^-?Pj+3xmn zA_g08!`h-V71aa7jyQaSg~kcMxvu%=Zdn^en|mPVu2XjpjK_~ABFi*YQc;nA{##v} z$O*!&^L6jL5?8G>d?*Rpf-lQj5<|kBIHTD`2lTr-acclwxrl-QNA1UR`S!bPqg}W= z6;UG#Vh+1?dK&r@Ac2KrWA^gF+G_f2~jCONXStvTAM8FMZ}gTDBJmx zj+Gt))2hkz*;IVfygiAak)Mr!=#Y-}v$KFDU(l;zr$&eSGnTj%=%@t&@zd9}VNJLV znCzZA9SOHBBshFGDuZ{3yU;nR7GnGLvvS12cO5Kr-z0uk2Wk4M*p0jqkgs$B&n?^G zdxWE(y3viBYXB-_3T5S{&S~2U>D6zxfHs=GV_oVoOv6a@WW8K?m9D&h!v<^GugT?# zuPy&llk|%d#7jCzhkEN>f|T<^#E-|Mw)AGnlpF=sZ;nd^k7jWf$A_sZ&U5s=SME&d za0q+5%k^wG<`Yx~TR;&c!`>#EIqasVlVpeAx+rx|XJ;$fgUSo_0jkPCPw49W>ijdi zP4ks9-HE&53XadcZ+7*6f|r&p-e!ZFw;Am?uX0ZEpj-C&tPR8U4rFAcjJrpad*$w; zZDEi@ZsWYK0eT`&nL~v8^M#NQ!sPK+GDlNs$7uoVP4OTtaY_nFL+DF?NeJ__I+7Ne z{?%AEANjh}&N`Kw;2Rzt7sZ{=dP-=Zzh&Rto-Mq8@)nN7IHQw)=!-3;T0euIoQHh@ zzQqqBW_BV4p6zX4mW58#ZJI=SiU12`me6${^=-jx-QDftbTRHm19NcAqRkah} zd1MEhYD^};{L#8x)w@621`1X}!MZaXgTacIZ6ATIS6wdt!60*qH)DIB}N#%9>O*g z^rUl$RGwT%DqQ`K1-ET=C}a#r#Tlh6X15rHe$fw`Y~AcDWa`k9{cyMzwQ)9s1radn z2E-K0`lZ>P4wc?kJ(}Re$J{ZYLxDg@%?mG5Zp^vw480^`Q|Y;Q`K2&;|4?6mHT4uO zUuroDMkQ8%dA4@{Ds>zd+3{F#4UE>`Jew%&UW6ht#JYS$>zTQ!Cgkxu@!KgBjYUO} znlV2Y_qv#2NNKtQyvk)`#U=B~X7IJ&IGoxo9RzHOwytW>f%eIVEgem=RbeO;UD2U6 z7;iO(x?J5TtDF8qo5KV|8=5&4L=gcHd#_g#vW9nmo2w9rn}8WC?xD;UM=)gpS5nXi zMFx7sxtoxENZ08^wYEE0(t}V2)4`xBiLy0i7I`wa6%2=yB5hk%Uoa<|I*t$NPNolh=(h5j%eXI>ezXudEWZH< zrGg}Xo`p@ZGm&#`Bw8*Ga#oE%m|Q^+)J3uqkZj*g)bHDbyi+FVr4TG7Xa%!NaZo^J zb@vgnn3zS)*He33$(shR+jWN-2u0l07a)JB z0W5}4tv;}=2LiMYy_dB-pKL<4=ODK~HYeZq2d**NvI#YUni<52prK~r(#F8JfO!aRnh>*%t5J4VWu%k)+DIf--|!!ufO)^*E-1 z`B2OIQWolsG$h;X@vG@QCPy#+Tv4jbgZ*QKe+sq4H2wny@y+dZl_gTF`Cd$>){v$s zi(gw-cl_o>`t`p7$s-w*LsnkdwWTSlZ3zxdt!ohZ-^z5O#Gzo#35aNq-xJE6%FeMu z=lDsbL~eW#ZV(R|qZ{g#o-!!uVC&4Ux4N+{nR%`m&iWZ{;l0V0>w?mXE3sKtT}Q~R ze_LQ(9JQUM4>$`dtSvK0QG(Hp4IC@HCH+J`v8rNkrD^j#yk_ZdC9x4@2ff;IyX)Zk z+0A#1+GR0>JJ_tqWY-QZ_W_kgTY@^QW#jRu$%dF)wg>9qlO3 z;`|+L;Z-B$+P=B0DWd@69__l8@8Fql`Z5mHVABvz;UFRvkgmR=)cfVgAA7tjJl5E( zmclE&Ye;r0LDZrpsP*l5krmw1y_jZ+Wl_}IQu<*CZau1_AF1}d9n)#6&}+%;LJm~F z6g|LMFg&JmqW9;(Z=~A~)o^8-f5|EZtgCeW9e6zaG6p1$ajJp9XY4gY*Kjp;2g)GL zl-a-t8_Lad0VNFEzL|CIr?p4TWSeiWR=uZQhs#l<`L^^U%K1a7XD!<0Q9A$ zh(*|k>`I^s25Vrm66csOE{+tM*!Knyk1z~RM?u4tD@hhn2rAnvDB`<2eu|P1uu*;~v~PjKHf@-jz~$Q5XZFcN1G_n0 z@`1x@M~zdR!c>RL62@LC$DwQO8vH8dgpV{ov79IJ#NrrtHS@?ODXZK4Qh&ZUDq0I? zg1pWIimgwM9yR%tzuaJgAoW-6QAgjM= zFM;c@bL}sP82s;UcwRG{`-ymIHtOQFKMgQG=X-nSG}Q!_`DifP0%`g@H{TuY{Q9zF z2BW@lwaT4{`E9e1h6KO6u?2=YKNz$t29SDOwwD5Ud-Am(3XFMRcUY4t1b%SbkaY)1 za*C1@2L*Iyf1JAnJ#pZLpCG$&Sy+ez?{ck0)S;+z+fu57ymh}rE`#d!_*6NCq4pA0 z#e{|Rj!{pA3-guGR3TLNd(-+26y(#7BZMp;Sg~&7sX-vguftK%iMr3AU4!; z5eX(f@Y)_hFOkaTCkagA)9oQCL-Koi9kITY9|v5D+_ZBi2q~`=6o2Jkd^5to(fb$M zW*HRmO;NPN*bLAx|Ig?n2MlLe7-0YR3j90%y8sw6BfjZ;xx;oMilh2xd!ByVyNqf(!M(I?D^auXRInXrwf{i_a>xe0syek>G#-!(Jq;^zJ zV@84vQ3IoS^;c};=k`%Hu@)Z|_A~LGPd$X{A-`v6qSD*%0TghknLP1nIV&NI1*JR; zv9zk9Zd;$ViwzTeDx0t5QlP3@QN=?6>Zo~K2Bgv0XJ4c&Mp=+FH`I_ z7n{dry1CvFh2*w7rn@{{j@MAQCXg$!^VA1hXv*tn;c1O<0%5BdM8MBWVgeD_Mer!@ zb_I;PR-|Jh^l}xe5g_Zjf_le%WV?pIf789*la}o3IMll|7Vx%{HrlhBVHmm35`-NR zi3Pv{aFk^6>Enb!s=&=e=vIrF8BHfx7-A}O=@UsOKEGp|G-8`<(ldwR8OX3(DWz_S zS6;(oSoW7lCZ-$ugL8l(FZwhYd~<93>-;rRLC z!`NKl>NoSZ-|)d!F$;pj+HCr48A>4dS>tnZOlYs?>4A`co;)UrJb6svJ1oN>TXKkE zS3>P94Xs4bfXhV20h*F%2hp6irY!`=Mco*7JJ>c9z1YZYl>D!JYMTjuoIfo26HLbD zgz92(Nb~4f5FYA;KrTi&Lj6-kf2j47q=Gm*!33pKg;HCDWMVaj@m6NnR^?q+B_dYA zomQi?Rz(ITCQ&D56(=TzCqpkM;o?sv_)carS+d(wpa$V74Y(}SqA){3=p4Rl$mda! z|5WJVuM$PvhL37p+;vyiEzLHH*gOpA?4m&u!x+0z7_`*T6^NeJw3(z7f9RwH&ok~g z)C++Sf$-OQseisC1#$1hSm5Hr?FVS!6h`A$d*eqn^bDNwxW4hc+WrLT&eQt-4{LAM ztgh2^iGGz6ebpUygs3>3bCop!g%(;2D&8q-fvBjc1%CPyh3?<*f8OVPyZfAr9oqqi zO=YfJxpEbojO|2q4Rv|te`$8xY*+Z!L*0BAe z?#4_y6}rmtmelRn>-}i04wFlJ-0px18KO%H9x+at%_S1+$WE!$}~tSz_g znNImU?t@@%I>Y4Dd%UOJAthURUbLdy%DqgJ^JP}OPjR_U$dXLre;GV3<}64~uBTWr zbV#~AKijAC{Ce#5Z=L>jF(f$(etU`9{{9s`d&czK@wKnJbMbV=|Bs2T*K8J1Wjh=;d?+oZVl~hKj(%hnFu{&1L zwo2FLtJ_bXqiXONZU^jO4yKlKY)#AzSodT@ni_N^1bV=Ez60)L>Q=~{jagoe_vOo( zKTp}K7hItd!R(Yy20?r-+@9fVkKh;U`}(>Y;`m0Lkw5V)yhX37qzEnE zj|r!@4Ca#d;6Xfj>e1}(EP|a}*y((AuxIHlVXMU;XPbAVs%?H9$h1W)Ubx*;w@QbAQ{H@fDlL2|pjZq(<4i_t!>f9u@J<7fDgNND5wGT^V#_KMB!2GL2~ z4|d#XSsk;I=J1elH#nD%cl@52E8&hKnofLo9PZrx@VY(?g`yi)W z2(l#Y@8A7dkR~I?jh@Fm&FW&MTBGhZy-Km(+k@%R6Kq}j?t0O(+vEFLwb(tqrgyKH zfAvn-RR?w+CB6ET-|MN}x?ed77WC}*{qEvq9rueUKh(YQHdyZZt2!c6+c@pp)o~I8 zXEzK*3l0ZM9u{~xf7J8i>pja7PR*TtXoYq}U1!Q)RdklG^IF*ZoqIsr6bAP@?5*QO z4i?2WJGt9A z;qUA^TF?0LS+wR>1mW~~J|AasJ|AbVF@be^?d`Mv+|SD-OwI8nzq$VS@Ln^z>U)#< zCAf|+_&T_p!tttjzK)lq<)(SJvzn;YBWU6Fd%f&i@A^HFbtgSg^cJt1-?EO+e>gcz z@{QMzlgsoqvqW#lUc3J0Y8RaiMWG)A+tKqrvxk#w@rHQ~EyG36^~hAWbdl|;A|(LzwMs&7O5mUZquGcAbVT8**4pRyJ9`trNOosZ%0^M>eId39$p{~QQNBH$-|tj ztOS$ujjjD6^$cDPw)2%Ig3N6{e_sm^rOU4C`(2~9dVX#k@3}kftJ@5SbDxb^MUf9lTiRk(Wg zmVMVbycX2m-dFc4YMX9t_3T%5zD`Aq;@&9Ub|0g9r`Y~fxDL0|)m9xAVJGdp2mNsI z3Q6}Svq!@9R#n?pc{IE}u4W~V(RDM74A{=1B6Mz4avb$4hs=wej}a@lLU{o}si<4GnpAZ zG#O0f`P3U8pRiq>hY}C(r+eM@UjyfOq{VtQo(}Gb+o#V~?8ogQEjzJ)>~?!ux!=sI z_Pe+Atz#J>i0$%VVcFdl-J$m!w9eDmi*CtnJ>MP_Ru3QsSO3u&@MY+u(XVEwUhASfgeBo*W&|kmpVDo_j_=Uk+$J zy-z(-ZiFbg8mRb+6~20p?!-ek|Efsgb!{{*7TwOxpN7`6d~Y`|`k?%>aSyWx-Cer3 z^q^TckY z^~#v@`zkV%73K5o4V=fg=(^=|7`MXy{1A>{b#L9Axvl)UVWQJuG;tr%a57pLoBhjh z=DmS)ao*R9<5ZXo2NI3PezBXhwhlin;zj$=?so^Y70%^4f4qmwet8L>+x9~BU%S)l zP!iwe$k+YoapjhxhdIKQxk_>nydXjJpjjIF-nz5EFu5A09fR1oBvF|f_4GfAnv77q znlH!g9|elYXJ3y+@9{;`z%NI$kI%W2fBJ9eMeFV)T6EBQ`4}ecw*S5!79G)(JF~YK_fPQJE;y{-s9&xokNDG1 zpXY$C0YvTzotOCo)I3~fYoE?9Z6_8MkY0wC?9)G;ku+^l#AkGl?Jz}Cwt~>6??g&8 z^HG;7et>GrY$XSYuQ0!!;|0pr%&wRIIkCN^+>$s?e-?wY%VR!qpK}$5eQSaiWuDDE zvCc|w`?9>jm=pfyt#P16)u`30Pu!k$#D0D~z3TgYv0N;y*y+So5Zs>60uQYYH(Etz zWy@E`R-;;a!6Hh!1AjHxPozJWwp||F@ymwxy*D|`RFw|m>-)HMgI(4+uPouv7>~o3 zEZeXcf8NiRo?(Rj0LqO$Vt4c^EHCPwfEb5>t(KpJHD*&5!IL7cG4q9 z*vI9Knt8oEt5aW`i{7z!jXIYww4BbN<6YX%*6{B4`wzb+GZg#I$}C&eE<{b zzVEd-(#!g7ANt2?G0bPfYy5s$`>K4e%+XuriLuVchOFHi zKYFd8V=dp;p-Hz+Ru%(q(7&NM>(WVD#`dGNfGv@g|bB}WsO-FEF^kQm;L*pK_C zf1fQ?v_PN$gy*y$E$2DBcs-{z+-H7?ZWK-#gP!26k#9nK^-iKzoTLI6#Xe7iLYa6y z$pe?CfoYCVe2XU5EDZ`{K-M>7nb$K(C{ zMBZ%nTpE024*Ekio!;+xnLVG+=VoA=e;dyxi*t|pZZz0>XR^5T3fBtyo5ymxIQK++ z@Xo#U<2gGoyMv*AhvDTVizwOeTkJaS_xrH5hs zes>2gd7A`fal7e%HhH~ny2smK_loT_-`ehNeeS25<>C={AD3Z^6#dyd8$7p9e{+91 z4fpeVzaELK41yCzCTFS<%sRK~&>e0b`|-28ZeM7Tt)`1xwJEzpa?N_%-SfRETWu(U zxf5R1AZZQpHF&MIk9v@_+Nh`x^VPi?j9No-9keVpO%AX2aD>f(`cY_AILfaZ|FoX1 za?1}ECuHA@xr_(cRbIY-ya8qK;d%XDyiz8mjfok91mg3U&5jrZ_y-QORx`{;0A zuP%c_@3OuOH=9z%k6BtNf8JL6M>^|{JDuV&^LBOH8MSTWGMBwwhzeslw~zAC@$92t zBzssKqCFUEzMRtI-keAgf0whl*p%HV97ltRRIzkF*z?{a#2_C~NZGmQM?B3u;e^JQy}XnSOjWoLq2q|8;*d zT(8%4chKE9cB>jqdBCzm)t~LHW6BTJa$99Y9G+^{@6WoU-mUezO+22%DCl{;`_6hl zd)@vfLe=2796j6aW3%3Er!(~A(V%D3(cw~Vo+&$w%%a=_^}X}dIxf74yQ)pu{QYMy z8WiOokK^&KvgeS2{`;3D-UNhy56+0aOOxKu?1q?Gb5lFTPv`S8TtBtiI8y)~xozx4 zy0%7{P!zxu&6o4eX|Y~X>MJ?dpFeA6K7O%X`DXVij$8tCh<=m#U~7+$@yhAXcP4=6 znEFV3?xcGM&{Gaq0L1S$uSd^#**UGYk5MH71~2_peU;eb_l~cHa}xl6vvCdpE?c2` zy+S8{GDrZ(&C##dI*x=|->iWx05~88z$^NuLGq>nuTN)DbIBI%eOben>gCZNf?sOt z04X67XalwY62WA3dW2D4v^X%zv+`s?a%5R@_TKTEALA~6$2~6;7q{jMBoo{6#nP(c zl_g^bW$fZ+anCy@UGE)#_vkU{T2&m-G||Vrc^Y3VNZ$My8E$P-S8@INm6aw-SFt1e zyx$wuamT}a1}ecXHCUb$n4=Ww%fz96xt{v|!1d9cRf}a%1D*L*c2oueRXRj}#6Fe@LOypvP-Kf} zy+o9N9~Dkb_n@eaDj*;kAu4e|a6xL0L&;}EIIaGC$SpMDNR@Z0-&5vOmz0AIHj#BQ zFa#@OLzi<9cD|4?s{Ep`L@X_o*v;?|nx!t`X6QuPfStfz;R(1A&{&H z5h&_{v@93FB_zfa3BjhdLFS8jtL2OxJK`~j0^bxv2$&Exe@%>Xft2urH+StcvgdR( zLqHKIrN0AzMk?_kfUY=sJTh310bO9FpEsuMyR)#;!Y3f6pbCK{or4b(3I)OXa+ZFv zoE~>BmTZ4;9uV1O1S+~OI`j&n`M#m&A6uPw#fjtyylW1AnIAr%fFt>t@9NCLb;1U@j3bEeRb|OBT5-BVY)IwmoAceOiqF}yUPIKyE!YmNU;Znu~ zNrn-BETmgSNP@W{V&phrE!t&{Fe$FMqAta|m0e?NneoT@>?TAqWc!gIASVc-b#&ru zB!r7(d0+a8kA3$Z;ocSbmN53|1cZ(Sf6vNIz9!}a= z^$~ybvs=eL1^UgkW-S~dS!8Kyc>Jt6dTVUe>-Nb-Tg~no=Ri`_{Ql={!Tz}YA3gu) zkJYz<5fD~4uyzN(TkL1W4O}T~0@6XL={K)U=^n|15l;{t=6wVVgJg6T<~n~7i~2r) zmZt4i?DP4OPNxPuCqemERwXcEZlor6kkkU-oi6%#7w2Myq+TDvbbf#=A_(a3Tog3I zk~O*LNyF2)OyAwm$VVZt5lr;KGiy^A{!` z6T>6%2;tc9g81y#EORsd%^_HQfoO%2t@#7*5qDp@pGdyEYJ{=wk*$d2N3Ockw~ z`f&V}`%R8pk{Yb$k(EYGEa~sZ->mZs^JUawRkE}c@921HzC*=(y2fzjwjIHL7`%qZ z_G3gq#2iF2K>-Axb!HNY!U3VCpjZJiA>Uk{f=t7?uZ)Qh94YE;(1=r6QXL1lVoQqG zF4mYT6rF}5nRMdBkvXO!nzYYG#}hFI8SCHrJqbj4AE(@xpnO|#%R-YU3Pv%`kAQEI zASn<6N->xO1e;PtrDwQdXnDkc%P69%#2~nbRiuAAa9UUvL=>ys!dahSAuy`?=1#30 zCx9gu!S*bLJ0*0+*bL(|azF}aid5<%#vtJslu77(ShZgu^lgyw5)0~qP_C6pg;W`~ zAG?SkItR&I`Wg-#9D7J;GTg+KqvI-8XbeoiQGzGnW~ZxhD_KSZ3q(DCsiVvbh0ga6 z?pE=NYkbkT@+YPonXbiM?=1}PsHl7V!4!68ns1S$%RaAPSkQ2}^QS)l!uY1nYh2RD z{t;uu!~AHWcwwbfpi0ZBMPY$tRvkv2 zrZ${&TfYWG)L_-@7+bWm+DOKsB_o^qj%AT@Bx7Mw5l=x-(cp*>|0gjcNG%5#eqv?; z0#OTzoSv^G#lj&V`3x%38Xbm;g66tKge9HzLmdkoLr}gzFps`}hKoo|MALti;F9%uKP=?N(hok-C)i; z>V2?>9-G(Yd5)ETNs-X&8QDZci{;xe?bMGLvR4>A4nztBDGZ4yXISP3H-y`?jt^Mm zo|Tz15Nws0>zO#YH=@?7j-Tu`m@Q<4P}SP6EIL7vL{DX|h|gnW6bKaatV~XI z{1Fk%Esz#mS%RUU$Ug;9m0JiQF%KkT`X2$#hbsr@4jE;C3%xZjNhY5d;WK(1BO#fK z-*Us)V#Dh0CO*?C_Sw9BZ%@`D@}s2xIP2ZgauLJc(EMSKU#2-j%t9%%1ciy^GuuMt zT=#KQvkFD=U0@8Zgu8^vV-T#51s{Uo^`zoD4|Oi-b+JGFSoHqM+iz|gU!J$JbluTh ztx_1JV)5yJy&Ak&Y7WP0_Oq@vKYlofchtEXMJSD;CXU{0#eZtcFTVJzevRN5k06T; zk3?rTOMdke*1yykarT1PrzA1FEcz#Y816Cp)E^Qj(62@=2#?4pW{6=pFbWmleriiO zLGI)4=Y=4&Dq|!}4`F|_uOo`2LP!?Ez(kbz;*=)@|E_ku<+iCc>j#n) zA!#rEzW?DNL<ID!U0QZM&H_huPb6}nz<(%_MxSQuZ7 z3E;3YR&$x3h}6gYZ{Sd+2$5X0UruX;K;PCs419?!?Wg?Q|4*I=3`hr^)z@_=Qs@nk zb=IeUxB$Wa8E+ZN*bIrprar738P}RySZ~(!od1j^evIq6jCx*5gR8e-Wq4>riWp7^j2ZHCLR2(CkPrm?!#$NCNQns&BVq~WVKqq) zb^KlT(gHQIuoR9y4up;oPC+FU%V_@7L-r(pb+#&L-Vb$0PV>GueQqODLD9mh*A{ENb3grIYLC)Cq4)wXzCeW6k0y< zYKlk-g2=xf15n_!hcW8In%~@hC?2kW1Sc+bBzoL1cMjuG*2l6|q@?Y%|EJ8osjD@TRADI{kb2@$AdrF*`c~}+rKE~n zs45#n!^1Fz`S?geqy>BK>O9y3o`E}pn`-!1R=7`*I(gd`4gWHD z(^DlH?=%Mg^uPZ$7d{+Hbmt(>|HMh}U)OPAy^|Jb9;I>p&tuXkYV?=idQa1TBYd&^ z;!6IX;p)k)3;feZqAF_YGp%F9WZcvRD~%Sghcy+t>xw8e{H2IbM3$6-^X$F?o z&j^&{;;8(SuX0Njr?4#7Gv7oskReuXE3Qn6e4fv6TFi|dJBxBgWJ!rMsB=D3kgwuz zthE$K&9}-c(z<|TQ>$78)?s*mgRnqQmV{Q!2NhGI4kZ$ikL=U?gSG#=eC`u(NEJM1 z{>3KA4t%khLWC(8pFTz;{v_?6=GJ2Qkj4xoB?Jr|vo;j}71nATP!mh!7*}G}-tYx1 zzyvYvAU_Wp7*?PqW_(bS2hdXY0XLoZo8@{PhSb%0?}i$Wg^eoB_o;M$YgT!||8cDO zwEAeZKN_z0U-pNzr#Zd#W_T;(3HS%ssux`KNi6wM@ou=FiRNoSI@vpp`*_O-^6*NHVHNHk1@su@^Ps7g!5xxs_ z21mjXaMHl7^Qo=!H@BKg@ma%^ z`k>f{e=B~r(D(Si`@9w+4G-&5AGF4kHMj%x%k7g3`>gMhrb*X-Q_b4?pg+G7Z?2C? zX`t%@1j)JS_p1>$8x78}<2S7ktc{hgC=w;3wQw|Vr-aWwT+18m1C0rcAjKz;#u9-$ zK1ga|QQ9qNj@=M&vk>L~IF4b0rH@pM9-_qPR1oO8F;db-M>L$tlEZ3bc)B*js4aX( zoBHfe#|x0Ux5r3-P0CM=c3M;6Tz8J3pmd(b45wm-! zVaNp>+1-1KkJg@2zgg42@)(vLi8S~BJ+t{cze&Rhn%clHtK+EA-ZgLgBaU3bnmC~q zU)yMufTE6587r*Za#88NnvOwI$Ie)Qg!G})nOLf+|H4L*UKgy-Ig+bgr>W=hhE@3# z&Mvowu;Cbg-gZttMf-!KK8tKUZvl!hW(4GD+MH)GZ^eT3b~kfFWg(FI#iKo{4z2fm z=pjfnA49gH3s@|dgQ^UuEH(a8G>s9z<{Z6-oA&cF@?%ZkEb&W01UQG8)L!@w;5;6C znJF!4{N(GKr^)a6i5~NQ`&~DPf96z8t>qt_Z0bLMd^pzc=Z}pJqGxFJ&oR39)~~yq ztpSI}Exc5gm07a=a^-x(7;GvJ z`K@q~#ZLkhgCX%QDi8^)?8Q!dC%sOB^JJ8rjQw1(A{fuMp}|5VT;6LL;?|<7n)(xhGQ>l52XmoJzJBMOvq}S)eaW!_O{QS|Wess|0y|sq=Gjsar z4?}Krh^fqn*-bUf^V$Akj&wps^#GUeR85u4Gyl8LSp2LQQ@8CpVc`pnJuJ zn_%4+SWtYpNxRe1Ix3Pe!^&J(mE8uP+7t?mMakRGVQZ+=6bpvkfFB(5HUSZP!O?tw zFM%OAqHX*JgaT`ZQf{z7>Xt8S8wMW0qz$-$xkAmJ#+`tf%^ZS+<_qe)G@7P2Tff&yLEEZ-vf4LZ>RaDu4$6G z3oF?;=LNQuVs!cF`+l6pt3u)Kb*A%wjfIsJfVrIE)SCp?frxtXT=?Eins)5EydGmvm+>$s#k*F;HlL*MLw;nVAT zGhnH-one&fBtG!hWm-PZqwU-bg*L+ zca=CInH*`Ayqj1}OY-D21eyAOYbekZozR53cMgmo(QsC3<;l%WLC>>}aUlN0h}{AI z+57NM?^Az!pII{_!;Ow0lKxDm^=FP|=|1kQ)X=MQ4*o;$A9fH)89|{dYcp@uN?Ed*IJp^}~bMcfRd~I&PceK69{7E%>~Th@A#g-jM;H z*g5~gmLL5@C0~uT$%8#q!g8}jYbjm+=DY1;#4yB#Tg#1;(rKeR32GBz5K7d23Z~1C zhC@1QYmWM{1@Y8oCppFP=po@^$Qc&7KP5gz=?>2o6X&IPaABu^j)?VOHDQ^&fvA@D zF|`6;9*NQ%SK+A|WxLE62-I%9l#0Pmd?X!nxy&sP#*B($3MF777_lPyVyeJiZAK)Z zm7V6%?8IX`pa((KocP=&PwLsB=nFYQ{8-Mg#aolws5&x7L`2d_hvR!yjHfaZaAJL$ zCffEb(G)ezLoZvFw61z z0Us5bSw_=eY_%4$E5|rhA>0%K$zABAO9YZ25zj@PACT@99wXb5#h==u_DmoU#S%%H zf^S;0+MHR4TDW3#Y2_4k#76VuuB7MoqbU8Y;etvWLKqr`kwa6dbp_FG8MxAVqLsr= zVN2@@(m9oXD>C|z^u&)xq58}VzB6Ra!@e_U_C%jP!>2f9Bi& z@HBjKH?n-O_~dZBvg-Ka5ux+X@Q%W8gQWWT9$$@O3;pYmefp2JtEBav^F6tUV;3(Z zZTOmywxl+K2qRz!Nf8HziV;MCgBy>DE1mT%LtuS>ybK(pTK9*N8$g{+)05~mpsTIO z-?~SExYVGF2{d~01i_!PfW*eI^*Yd>hayKfn=srT9B>GIQ0%KEYt1^QKU&g^aCUYJ zmh?KCMH)YzNLG_9-dQrXwN{Dc$4fL5h5#vM$=kd$63n!=vPFvU?g;zK{OLsSo}|UD zsXtPGtPn7Q)kyox2momh)HG`RWda2{@9=%MUgRCBH5Q$7?91VPY5m_>VZ+(p;xp3? zwXc9(e)pM;?s(K|t&a!LxyFBV2kh4I$|IV$i)G_mNt2%U78l5x+0xfr#u-)xJ@w?V z(G7*Mp+ey$9br2k1xT1724*1@&EdSrL5}o)ZG?g5R5`77bMC1{W#6_%%R$w%Aav(o zC@~yNho(HbNvj4Kttg2(6vb56?_X;s*bIy0_1+3HFD>b$a%(G&u+JM064i31U^%qA z3lKSmAlje9B!wWKao?O1#W;lsw|9It4NNozt2>d=8dN!=X`ykCwOb`=bbLF8y}$u~ zxJ83|r|Zn#cRLqbsyyYkk-G z(y)XzAkRufBiS~X%LXQrlO}u~+II!@hAf}`O$5kl*P17Nr-bk*#}dDouoyL6LI7#j%f6^j-gAhYv%mU5*fc28OY6 zNN;S|SYvb$Ax|1NEOjGCF{qKFJ%Lz87(?Vu&Q5DAVhRB`umqP2j*v>`@YOXmH?w5X z?K+BrZW&rIPHGb`)e#Dea> zGPm)NCf#4Tu4m4TFC+=RJ;CCazv=5Y)mrhVcT1cfzi<$mB~QF~KsKfQT%(-qr|;^}J(pBu70@#lDqN7t8*2BgF)(WIOk- zhk2D%Oln*zCddsIAfHPp#C#qt^AEx;nuBDG9 zvj~o>Ko#x`=fpztiWGH!snR-S6kjDpOUNE(Ew z-uQx&4HC(kH12u`KmPGAe-LP>`NMv-yRI_fX#M~J`*Y^_TkHOR_=xn(;>UaNuU@5h z{GLnA^^?vQtA6|7jvv3tr`9BqRq?Nept(eBHStvQ&`Klw+p~WB<_-U7_)6&2e8rD3 z8cR9VJRqco<^dB_QAy8>sm?8^@-?_bJl;V$0uya;TmUYHyqA7EZ%I5@8AM z6f7YKsU?VGYXk;=+CBKE7X;p)0srXM@*v^JLhuWm>9qp!wHyQPW4XZ2!Y}J zwK1G=NF}QzL(w~;42g~I^W5f`Z@G?0It@5`Tm|gL4yrIlBWLuV8E9r zHr_I%Vn8bS%$D@AVWZb!w=R+708b7qF;#zcWn*2J_3IBz_d2Lu2m*`sl0?@BR;!kf#nypHd(Qlcvg;RI&z4yB zZiK(}fDHqGO_3_M{);mt4*qAp#2g9&ueDAsE0_CX9(6cH`W%YbgLXG)IGudtekxHe zVr2P3_due(F%XlqLu%&gxx!G-4MGz`Ms1wJZ(TJlwBEl5hikt-`^=H7(SGH>Vr;HH z=UdN$8lRTN)1P|%#ofgIy-y?Q&4KIw^*5IOXmNyp{p5>&?l-?kzy7Z3!_WTNLqkQs znydC`2wV3|s1l9-MjMT$;p5>~3v$qh%P$~^*HpR`SB8W1{8KPSMKZ-QoP4-IzHaz> zl{;9L_yR)v^l+jvPvs2DnA-HT2U#xNF0Z3S&ZsOIQ8BeUiwDR@Yl1aqYs|WC5Nvx^ z8dH#ehBudM840?umhqfO8c^*sJjFBJl!1y{bD8Y5XQ1iTd_Li|yzIOEVJK-$AZcnc zC`me=cR_x>z!ja)PoCOX85VT%2Q1ew>_-z2!uxW087761CNmQ?bW<3u=zqrg;>ZS?!+9a z@!VG4?hM>Q5R2dxZ@o*W1=_EmV@q-e3)3fD_gvrk>l(r{-fH`W#mR_CP5dB|bJdaP3CBPOK3IeSDoi z^ObK-|Bt;=luf6A=&wX3|XD^WUwlnqZD?|SE8rYs46ct`=I@~uWNW`1* zWLX}1utqY90y5z>T~>3F(Rg)&WX8oQ1o=Jmni8j7fxm_?5cV%Nk6nCbr`5J2YugK7zB(o-UNl%E?Kx5n?i2{(tK*lFLY#VkFro-WFE8SP zX2t}`$hCvgMOr7OoULSe(>{}BrUe@Ivg zy@OEu{>mJy#`kA@bl1ka@YxrnYldiI?&66?Bl?*Yh(BZCCl)_7CRzQ&8dHC8gr9i! zh4z$PJ{}dW{b=W}zv*XxpP|ODUo)dfMSR_b(6tHzLQAg01mc7vk)RXK zm6yxo97;TWM{TPzV;spl@|Dl3(w^eZ4oTrrS#atL8-(rS+=>ur&-HdiQlBxATaePN z^OlUgles0;4(@V>veCrCBaIu@T1l4$bg9>PTAVmWht?=6A{MZJafD#CIF2o%VoVI- zpxlB<7HzFcRv?+PRtbZE%1}^?L4@FCVjuClMbS%gM<`T=ovI+@s*e)TbK){=8eF5FM3zBLLp>_$c5o&V!$TE>3 z%rh?xyIu9ae6IBSAK|KZlFuk$9p`)nqIv%xJZUh0f#K+f!+!J#{43Z)tIy_Llmy{808yYeE-}{Jt%^i;Fxx{~jv~9W)6eJgeo)m0`abUl;J8|nq7@iC zrSl6IHV$BdWqy&M#O%!l>6opbbBO#6)BD?Jht?Aw46g*-0AQtKho()RJAh+c^BR8f z@FqupNi&1}-q$)_WN+SX7)eSjt{o#GC)}eXy?=Ao<{l`lV_1etR*0*oQOd*Gjs9I>sb0Y%FBqM)B&Q%(az? z2^b!5?H5f&?P#B71rb7M>(#MuA+3$u=IUgutvYs5ZV{)7_9(Di3_P%;-pp%1=02Z< zRPXhTwpMM<_adI!Qn^s8o9GFSMQmx01rmv)_u{z6WW>VAJcUV$vbbZluH1vFc+AE)<-sPt=E5N>6$YKADtsP!>_tHnjjp_&~mh~LEyWf zn8^Z9C6oANwvZAoQUb#k^gQ}VA#Og0&)XfyO8k)Ms%QJ`x939do7Wo49R9#>dWL5dcv>n^6e3IAK1)KHSgo~&mMDFoBtNW6J-JR zSzx2Mz@nTqd;O}vYQ+wgk1MEzW94;|3aIw5p!dV^3`THZ1|a0rA#?^R_VhyW079IN zP83yrA4aR0VzWqg9Vyk8f|@~+AWN`73;ReK`BW(f*`wmP(?ZdE-~_ z?EN`w{-ujmP2FAm!QFo_>(^espSkBBd$)ep`llxU;z&rXfj~r<*6vQHwcZ{IwV>#*w1QU#w^HmBf*-?Ee<%Cjw zd5dC?k?FWn`}-0tCu%}+Fv!OER+gKeDvPTqOvPf|Stqx#wkdr+)@S%>EO8wY_O zve6?0?IojOb1ej4dqkSOPc`+~$efL!;8-OXkBLvDiP9IKFfAMi69Hm4-azDnNH$2( zgT}med)glAd7gFwD*)@S^Ul9}tdirey>NO>)8yl8sy@9Dt?18O?6b#SFaav6xQwmA zGe=Op0>d|UNB~hluD`M=1>K88f9$=ZI|p=MSgw&YsrE&I3|sw5Ytxa1@#qa=y6g%u zBZAsaO@nC)OQfWSQ!&xLOedpmvqwU4ty}5d1vk9r)thr zy&vGq%NxA-_4m@?S4}nk@OCwO*kN7tEhH$`d*%h#o?z?*j3H;;r3eZSf0(_ougO}C zc36*GMeOOEwB)+nO4)&;cvC34*NL?kK6_%72Pn%kCt#M|r;Sp6@$^h3F>Qju_p*|Z z3RYPA*`%rYaIZa%@m}u*!a60X)(jhu20}l3)soMi)PVn7=iuW5`1;+gD#m*ZYwb}7 zSu`FXQYCtSlY@AB?)Zicf3g&~*^7DH^o|N=s5m0%@XYsx6H4WTiR4m+v&ux!76_%I zlxC)|^j>#SH2crVhreIitpfXUih$(0!s>mLOMR`CX6F5SkM>`CyuX~g_s87s*Y!PE z-Sp~bFGvW!$AY?LIIoIZ5L99<4FsZdj15(~@B@Flp~qCm(OAzT*4~V{dN1OZlIv{^~ce39cPOsBjBrX-^3dz#4>f8b49>PjC z{KKZ>gVuN^eqv$8e-lS=hJr^#WIuupogv9=ta5iwsQbA-NFHDjVUUr_L*;63x}JBC zrFdbTjGCvDLVy4CkMIARpQqwyCjQ6Xs)px$efwX0ufLdnjEpzX#Ma(%{k_yUn*6`4 zeOp`dI@k3N;-E!Q>fub9w&@Pi4N%UoND2k2Rt@m$ziXxifAxOWTJN*@4%WI=K+|ky z=4OmBvGzpN8r0uEk9(>Ucovu|iAFnut11qB%ZdX|ps+ zp#n8cB1R=+^iC)Wibn??5@0?__#nbvM+AX{h0ahdKt!(If3}3FnkZa9Os&QjanG}^ z?to{9pg%3k>bgErj~(-5)bZcD8pgI?+j}mq>s=scf9<}jUCIfpqPswiux=bs$-C#5 z?5xS?CpjS^e=VS31einj)*BJ|#D#Jlp6o$;Def^3NX+PPi zJyO9AkhTm2-qqP)xEZf#T0XP$=sAP^6FIlfh}nYk>sc@3Tqqq~-b=d88voN3S!yJ?cf19y^ zek_RZbr_n3G1*z$>5k1k(qcVnXFQemyW$2=6T$odNwMhfuAk?mVqGqG$j>7?&cwB5 zKl`yJdoFuzhpyJt)eO#Tw^E$CVi>x~b*)CPM43~P#gNysYMew+e16qEtMU8+lWlVj zoq7+#N~cPaX1>Jwh@dqIt~SB=f3hZQY7sk4-1za8W!oXfXx;NJmLRhFk*emjJhvlT zXouqgoXCmr++Y#A)2GnqMAx`3Xy5r2H@LX+YCD{Zwr$6#^Fds4jyjvs<=u{b42Hhr zw#Sj)FfIJ7F)0gvE|30he+=k|xfkheXn7>!RWFr+^kTbe;Q6`dEB5= ztHh@V#hbW`+E9w;39MNngu}Pd-e)GGvSxb`%t6%wVP@^8(UyqBNxIK29F@0&-NP%b zqXDJz2#5vGh8f+Jj{$1qJhUq}D^42MvMn2GKiG6v2GetCt<zCJXvujfYB5P#c&-lxU9 z62Jd(p02f{KKESQ|2U8MJzey^Lx@B<#Yt4*p(qQ)!3T422?#>O>n4!7V^`AiQcD=` z(Os;t&L|^DRp>%Xjdmo$lj*r&dW61iZ1p4qAv`;go@axA+{F8xe|*6Ia39yV&pk0E zuvQ!{Z4}Cq=K}K}qITGBq?qHNpj2o70mv7Gsouw3d~ojLG|#!T%Bd`hxdYbM{aM1U z_w=s!oA1PIbaYOo@X{jK!7nW@`a`#i{-NVV|61G2U+G#ZYBE*Qg)cJG2S@xoQ$SEhgu zMpm|E!>KU&xriYxE+Je7#&m4;;}nqK{f^^jVUWq4_9qFVe>5f|C{386GLz10mxo^( zT(b0Tx>_Ai#r@XSD-XJsqR&L1E4}f65obLab$%yfubx=@w0K#)j$)|TWh#|JMIMFu zXih=8s<(-G!utJcU&>ZQr`mN?fw|8t zIh(?sS?@%ae=!r%^DcQv?_L*T$T=hV$NQaC={y(WfSyWn5%7ff;uVh?cX3=-&(r#j z#m#5F=?HFhPNa{S${)3hUXK#jwvJ{XFX29ct#hna&s9X|#qpSIm_3gaQ3a=DISEDg z{L>xQO53~nL!LW1f6P2s*c#(pQcpDb@uiHQa;A&de^E#;`GzQ&*rIfUp%pVa^q9&W zs)_}v>I;L9ReE3NP}SAWX+EQ*aS1Bd+B@T2gef#l=Vsda85aGzE`De&+;V*1+KD>u zyN`eB`t^E<+8?;qK2O5y&nkZI3-CYboIGfyEt`|CS{iK4qb>I08ABfVG!5tj&oizT zN93Kcf3RR>Oq69q6%Bb!(tJe5Zznjcs@4OQKU0Odxz=%Hl*J$>P+1KD&e*vO?bMo6 z5HI!2nuPFR=I5gD!CLR<(woS2QJsYcGRN43(UbL-&zEZ@Bh8Guok)N=DE*B_rAQ3 zIqfbNo$DM_k&NF|J;^ejgN&v`4ebSDc*$h6snnwMn+fI*pJ(9PY5N+e!VR{gz}`aW zk5#2Van1u^Iiu}8`am7lsrd?J?Sr)LN@`xp$0+Ljerg$)5m{qh zm*{vciB5ga;bw2jn;rZj!VBtl=AgfuH7ym~ab>Q#W*=}0eW_gMR0htr_KhYSNaVfa z9}@6tTt@H!x4&~AotV(Nf~X;Rjdd|cf3_>{&Z5jg<|`t&AKFa?l5PB5OW)*WkWC*V zjjaI3fW^oUk|JJLO##GuhycZ~Wddt_~z1ZJ}=#zAI>%_#Df5iN0 zP8}%U(o;SM!K=wu`S6B$;%tx_50TB27! zBYXd)`r#9iE`BaBp1b-p@|qLt&J8wQW!~6gjEH)t5Fu3MrfgtryM- zmVsb$YtLhy{ePKBQ5U$K(x?PB#uY;?qZMC|Tv@8fjzvA40^Yf*Od)#(tX z?C5Vd-#G?_Eh}?|q6^0HYp{Xum}#jvT}nT3PUUu*qPDL1NE5bjRKcM#e~)zY@Ms(x zN)Zc=^9IeED}ouh#faURxyFR#5DicUZVY}NC9MmJKhH8w?J8pNxeY+(v5h|7d1Q8Z zrj79~CJO4Zt4Aci*760aeE!EeJ3Q%ZAn#%b?F;BVdg*)u(#4Q=)2$(n-;W5Uz9LF1 z^NR}Cg`!|ojt4Ja4`WV7f36GclbOXkRj<#1;C|rb0X=`X>^!25Dqc9aLn_;VC>}&P zW0>b9$&y5(L5-W3C?+G(m~ioPR~GKX%^4duCXvNzW^+4+=EU)NovAUE;Rl|x7`1N& z@&L-@npcdo{I0j(J-=IH^Yd#Q#RTHCOS`u*$Ez+0h*3ag>(gyLf94>_GUt@glkmi7 zom119x-B8eXdKAKr&z}mjXAAyhse^I?nKb45wS%S(X@PFq+Ia%$RT<2&Ir7x^7R<# zb7VGjb1`kfCQt}ciTdQqM$fCeWBy;QC&Rp9+Hu#q9AVF6z-R0*OuJs^vhvfP`xmje z;;zpNSDcoISXnmRf5Ho?Bnhk74OAN}sH8NlW>mKCNfmuURl45R9=sS2V_P*0ih@+K zR#-d3ae6lR#_@gdB1Hfzh-eBYx)XM^7b_x*$3ls0zC&KYYaYqSq&p@-lreGv=*}Sy z>LH4niHrpxOPxo{4Ql|OCf3n#Pxst!cFk$u^)?fKes}%3f6hmV!t2(r)1dlgV+Tb@ zr3~p#3{WGEB>5nQh7hey(7a7fBM0WKMbhE#HKs5QPH27D$DO4S z-$&oMU(6fse{xf)XyK@Krtbpf82a9uK14>{MhoO6J?G>@oYuU1-#mchKyDrh0YwJ5 zerGlcp{68h9H@jBPg=ZxE(PkGr{Z3}hFl<>m)aPXoG64Uy8vH0&bFh=jpKaEyFK+| zzd@E)TEi87vbQ@PN(=jYkPo<4*|eZ5bBe{59!>WHtW1l%D!i_X?|_if|I zcHyWfNoY#}cnDSQWU2BT;An^_P1T5R9!uwHOH+041EX|DCb~7Ygpu)N;X2Gv$7+EO zgSEwT^gYJyjn941we%Yn?PuJ7#hnz$|G(TcjuDr9iCzG8DzABkZPCSuG}?XZ7MGVu zdf7yVe+k`1JCcZys3h2(n@=hWmmXH@Q06W@35Q>z05Y-Rz+F3u9uW#2B#DkQCG*O{zQs)Gid!N)DAz zpk_A9?`ZuIgH#zb$XP05tm6v@`Gz^_8pgzYf0_`;BXutJ8F22WU_*t*OY?EIIKL;! z=h(-g6clrtPJ9s>mw%n(P~I?PeO^W9zpi-�P%Y78Bt*qRgYKF08JRjOeXK*VpiL z&gbg$^mE;4w{`(&4%A3^89^j*maOC6qwfL5Z~UPfBa zf7g+D85e==C^IHta$B(WQ${STl!M$zEC5w0%c*K}UbF)O$V(|4&z@6xk_!S+OyQI; zaU6`&=!wc^$mRZit(=rD*7qRV*!Ag@rhJI6(PpIiHWL;_1W-e;@b`6*5s8U`BFlX6 z5w}34h(yjwM(bmDFGXahb)v*bD1~(^e@f(Go>Y;mMUJXCs-bp~%ShlvgmmIe2u_v6 z-2bDSMm3Rz^c3<)B<1IQ2tsZ+6fW=c)puCes&bT-oN0S)clEiRe}_7MSDZ;PcH9wIT??Aqb)cfE^kUrvz?ihs>4fWu_TW zCGEtNq(4VKf$kiku8P|v&lKyuaB^b#8fJpy`Ut%S$_%Msm!)J5xt^QT==-=dAZ#bsgo>f8(6g$R?+}LY{hWYr5K5XN!8jx2&)>intO}>s0dv ztHhSeWO0J8=}d_&uSDu7*6D$6%?Tv-(YyGoCvfbnLbX9#`3dE&7qbemaT`m zSVD^?Qarpr*4}2qL)b-)FS)D3Fj0m!G(mPj&BFri2ql*x;$yq`{DIjAe}&I$VSXI( zF`X`4f5+Y8mEKN7moB&6;c2)&=TjLC|M1uQdon*vK+y--PbXqGxSx*8KBqQ^1aTiZ zbxu6z+sK|-BD@Sso1b~kDvXO!mv{VTd^X*klnUbO`8W=CUyl=e+8{UcDMJjAG()0w zmn8GI@&fYfm{Yo!Z z812>~5H*MGs5NC5+rY{>3Y0iJ@N4~trmu+8^5aZ60Z4`Mn-iQ(e}|+|FcD1~GIK{n zPOuQ&*Go~uR5xTIlL&DDUzl&5{(rz@5L?{yq`)&!qy52r4lhlQCTybfj7j-QR74l1 zRp8zsRW)5GZj*Dpz#NN5Oq)XfK7-L@22N&6rfg*lOmKgGUD>igp0iIgb-H34n5?z7 zJI$CyMU8rKl)$hKf0ZmKeG;xLxeL6 zH5X}pn99Zm)jT^%MCjeNT%o@!D|bS8W^YSgC4w*_pQ(%pk-W^(HPA!5a4U&YkOn!m z>xAWAoP!a|lEjh?$+;ae#J8b;jQ&tG0F9@N4qB70t1sa9l`T1H{9IhYcikO|zLK0&g-DoS zj1m3E$E+>Dww&l zq^a2wn{c`nXeTy3E-_fqr284}1F~Ejn0IyM%f!pAe}qZy?C-Jv2HP0RhIpgVIyu1E$k1<=j{VY>Xkd20luCC9$UoGB;`ag z(q8t-e>1BstqrZ(1(lAKZEIiL``#oO%@DV3Z{;wl3Xvc431R@AU-vQM1L-OEedI%- zw$6UJn!B$e*dgvjOeRWFn;|dHL_VmSJFiP}n&0^<@urU8Q;;IK6l79y=d)fj+y1Dh zJJbo4E=l;pQ#@YpBcbmV7ei;yhG(2-^vc8yf0xU9-Gm!%^1F|V&Kdf<{I2KiYTI(t zMa&Aa+KIC*M-poS6i}^-=wnR5|u40sCL=s;Ykh*K4IH6K#JWW2L% zQZ=59xrd6SAUJ9ILp)6O#7|)?Kb1xY+UUff+tHwJ#sMt2fA&$XlQ9qh#ED$&s z<531*u(E*Z`%d&aDsa;x@XrPK=VeJzf7?lwGz7+MDQyDNClm3e%a&uA);MscHDnPp zvck|9{cxO=Fom^*5ankPUq^fW)N|?HG`N53A}yTDHLm@^y+TaI_E)WB*I_O68KPz- zPxDHxY={=@^dTc=4RTUEFHLcopGN)9Ppe2Y6ECk%(Ou$un+KL1%81p?7paE9fASI? zr&67rt-1BiB8U^Y4YSx@*<8hW`N1ozrn|i0TQi_ZL`lZ)(tKFsJ+d;}+B>T!3eJ04 z*LMNdq&ge`z-voC1T3%k!v1-o#|q*zBF>JGl+EF_nHtYZfGp_*3cR0U0*<@Qy@7?C zTG7T1*5Q>&iEQW=wRceeS2*HWf4>jTn7hFIyJ58Wch3SO>bL@B9gi?&oqdw@O2N2@)Cn<5@884eH=639_*wGGUwQH%y?H0h}Q`d-V*oUfXFBBzIoXe z4Hkzz8@c2%K#l8=LgA3jBM`yj!ZRnh7xTLr_qkPrXu3RN1(E|e-GE(#)y(0 zFWzH$y{mn|y9nrayzXn;T5M5>N^=UNzw=a{QdrwDV#Y;8_&MroOhx4DzE+$243wpF zhc-Bi=ac2a*JBFv5$2`lP|2wStpjJi()ndbiZ3iEAv_^R?y2UurdlIfNQcOIs~l%$ zj|KgpuX-QM4dWenMBbI|f8IGqY*d#+bSzhvfeo$ah!J_MfY@ITOO3A|Tnr-FD}0V1 zz+^?|V_{wx_sh?_=Kg9koTU8CLwT?~TQojo*nG!AEoF}Sb|@1}MT4=QIgzDL6}Lq_ zeYcfKJYdhEYL_#GbaVDM!>H%T@A=HyaJNsg}*pE_ceRQvpH&KKn&N2e6OEUNPx?=&|xrf&a zzNclw7*m3tF`im3o}IDOVm?=1QNl9alk=*LICql#XC6n#sO-nMSb{w)f8gSG`S3^H zMr)L_Ox$fHY7Q1Of4Y;m@lS2UALrUGrn%-1|J8et(}6;okEvwz5sHvZ$3x>QH&@}^ zlYY+h;J#jz1(HP+xJ%4+Xnaoh0&-VlXcUWLcRTnJA|x_@t=>v&f;@VVp0j)PAsO$n zi7Swe@h$@!)+JB@=s@)M4(=0yc|g14N8R*^_CLPf`lvvCe`*STj?R3np7~iUT^dPP zs;zsfx43g)*{=9DI`i{%5Yp(`OhAXQFEmmtT4T zou843Z{3eyf4GgFbNzis-p?2?D&>oJsV^Z>UqRlyIs#*sC`fDIPEv61(wJd#eNEzP z;hXvJL4n!i3=OZ?YMNLt^wik2dJR@ELS26u&?s?t=EjrFGw3GS$m^jFd~out;9c_% zF(0FA-W~7zo9}enzkSa;U03V}w;`N03=l`ywA$!i94u3=WYStNe3%s?*?7C#=Kk-XnwF zlwOW{?Evg{_Aoz6y1eN>%)7=HUSkrCg^SO<_Cm)YyZRD$zqs_5f^t>Z6j~dS=)iJm zGZNY1e|aASN$wLB*|dHIO7c0i*D*zn3so@G3uNaaNg)zUMZeYz%zE(a{cC;Z93t5w zOp-aMHiD{o@=l}Bl~LfwAqSIvh#Rq`rNNBhp-!LD2(UG;S|<6l*pmk^O$g-RuN;Q z^q1QrbqG6szt;1T?jmZ+AOSB2Abk6+iFM<8pX1Aj{qmQpJdb*Pj%)ndKO^pkYb=X% zf294GK5o0lrf$y%z3=tY?>l`L?VH`jSV!qJ7QXRV!w3n13qjf5%jJthMgi;!?m!z; zh%gq5LFB`5n+P8)0#Z%Viwc~oo+DN{OwaSZ91%8se413_=|!27yN~9Bo^-xh*lB&t z>e1AmeXQ)AniyY&05Kx`0w+>8?_nxEe|F)=P!>d;3?#CMO>so6eyJ^e*n6u&sc2)eu!}U3p5$*NQ{_?e7fBiXs zY8ZDmjjF^+{jHVQmt?-!G0#pqz#mVgC6=GzBelQ1vN>ocM|9+9@yWIu;aI5M{!FG+z^9&WwHZS3l1| z$+G_)Di*ijf%%z}@id}$C^l|0e=NE8_WYOa`+oi(Hn7h;ezwL)&2mhk&3~Ck96O0DYseR^YzDc&I-P!HONTAO=Vl=DY9m1#$;ysf7 zQ%GyK->;k!#cXE{|M_R1pSE~rAn+Kl3DjaUnK41dC=9DSa79hr#4^2Pe-t^}6WBr$ z7o52Jxp_yc=^LVXC#;#!^Jqq^+J~gx#Ke5DNzn>0b+4f;X*eYHI{tb@T{El$AP*LF zypI3-;3(d=LA@a$y?#iivG-kH{bPQ~IXXCKSAMU{b+6-j+5K4G9rC2!)p|P3)>HCk zO{rXN3ykr0W$NzAHN)7Xe-wg6A_iI9v#K3>71sQRyFwO${b+?rWmZ{gCS+wzVAjUu zQaNdcahSrKNQw1E!YrXj}fyXamT2HZQx$nvf$id%Xe|#99_$oe`8TOex z5oCR$UVLQZon%Yr*%sv}$!n7+Ga@2#Sq&wna>-B3BhS&4pkBNxnJr2WFO$i1kYj$O z5a?eDYzd3!3DAeU=__X4zScOSw3uPerQ@`fl`m04IqG9ePDENrk%|qUjx$^OPqrEl z)Yq6Y0(1im{eh`be?$`y^?@^1duJfVZnP&CbK`qfOjH?DCCQ&-9oqeS(3u0nIH>Ru z_B9aHQz2G|!`O*C-9uo@7hPTn_gY^4IkzMeKe(>wuUp#hjbzc??_XnTw1`ggt-dDz zB-MzfTmI}MmJ2ER)12wgY$q;`&lW8QF~8Ufu`rQ`8jEC{e?Rl2NeQSWKS$cBv%dx_ z_-g3pU{&BwXj{c6RuvRpOhHu@f-tkNLRI3r)2y0@r;XqY??2p>2-KyTY zRWxNJT7eX2eintB{^K*OFjjCX%R!+r@8>?BTfjSiI{Th)?2DlD4&LofI(In~tZ8&U zM8RTO?nosTf0*;eIj3?tdJ$#e!usI_ia?LIvK2YWf*2rR#XhpL`H?BT$NMu{ud@oZ z$cwQ1ndQ)AIPpa7$OEjsIjlPojefizT2Et%L7mR9UVY$g--})r*L>O4M|S>cAHjwX z!gKWOkQ2;nH_xikJE|U*Wu1e?TnYPh!B1WCU{VKe%&f#rxy zzec$&{eeEmoBkY&>pA8Vc;SwX_!i}G{5*wl@QJawP+lNdW_*t5yVCi$kOI?o^oBpd zJ42Doe;6|QA;S|MW?%HUq%Nk#`r9$2N+b|!gs<9^Mo%T2a9P@2_{bhN^4Epd2+6j>LcJzgMELaccXFtmNJLhXTY2L(WYBbF!YGqJR_tSSz@P)`be=Cq}f0k2S zO2QsRMRjMWja5VIyYq4dqR#1#ols_{n;ZU(MSG|Tl}$`8>4BXwZJljYag&4^T#-XAKqb^UVNe;)OEI8!4M z!5y^d@9R3%A|_q`POZ~_jgvb2aErTs>p%Tm53%j=>DWUF`IK0{5cB$BJOf#Jpqw)Z z-vHE8JfMDDDN<1gBK>M{R+9+CqIruDbMDd2qXqR7mRSW+TV!U2GB5qN=f;JsAWO}N z%%5LvxZ(Ht3*`Yu@4suk7C^b>^57aDLr66*6E_f1GD$MqzJGcX>(Q z<@4Yyog;`mCt~smBD6Xkw<3fm;SdseTw*WA!kN%`QPGnlMMR-s@LIuKu<;bRFv2xZf1I1jh7-Z#U{!S9K% zIFZqp=chU3J_A_@o5c@s91b$pcK68#`c2Ue_3{_GF2LCz_HY81sM8;*erd0L>*s5% z^G%23USn5&>WSg+6E|J&THi-$-!odWBldNU57-+a3a5E(6IpwL_XMh_b* znw5J(^%m|DW)L z0}}oR{wZ=-Zuw@^3HZ)!+-#!zFFCu;X8Z>}5TDF!e>0$`C~|j&^mawzpPPcE^$2sE z_K;bm;^*KD=eUx`ppr?Dp=Nft0%-~&*>RhXhN8fne4awH^|5a<)!O45OPpGs-I)Q% z$n~n`IaH&9$YB(oBbm*u2$D7nQ)}Wv5vP}^(s`h%^^AFidw>P7qRRvAqBAyP@8&Ic z__eP~e^%~&8T}7)^>6q%%KUHV$p0tbX8s?~4OVq72B`1_-fHkVUjDwn2d~(U`bV7= zZQY!7c#dukul^$afg#=I2cH&4UxqJ;OEMOPN2Q!VgE5&3VLpS!$LZlH*n^ZTH1bhM zA);G}`RvWpTIxPYWckt%y1XBpMx4<1+_PUSe?|axv!6e0Q6et#;GE+m%sws=FgI3Z zJXXSWWWmOmOI8K(_si|%K2jix&Q7U_eyHG8^JUOZ^;!v7xNOB&2Hb%#_9_;Fd;jT( z_kIGxP3uUBcu?Zr)^D%K`nd5Pd+XhNgKKZNeH2r|Y?@YEe@GfD zM81QHVphgqLsDQLB-H0@!n{|rwB#V`h)MaCj-S?~Di1FdJY)G^-W%_*pcOoYcV&V7sK}}C zbnkAr{F(|Yg41j|an#WT&Y6AsaHkKstE%a-M*Ec8=x$b`LkG1+K)=_*{+)uqcH# zr}QcO8k(-V^+#MFC%*_l%HwR~gb6frV^#c@_l-BD3VXlJj4%Qvpci4iss+iZX9FjYtPdTkMLIdw&2S+Dc&Xs2^+qiuX}YpnjMzdj|lHP z;lC_Iu{$KpCZeVUWXO}^TtH*mDrbw?OgO5@Y{WmwViJcUULJBrf2`4z3U*qhr=-OE z0rp0bC57tQKZEd)_XBeN_#xS3GGYk@fjHi6Ry6}(2mz}0nO%<;6z_Y)B1zk) z{dws5%8~5i-|Y9k#Gap5U%JyszV^%8UhSV{_x8TW>D(|h3*Afa+jqw}QumdOcHet& z=U9n;PSpQ=HmN=0e;>$a040SD#r_^Fhmc7sPDQ1Ax*@MYmCck#OBxlFK;zlc^QQSa zrd2^~)neZ<3MX!#OUZ~*0v3#r^kd1X2&qIKKwdRj!zk3upGB)|%Tbq+QNpay%(p~Z zq{9BjdTztP3*zyWB+%u(@;*+ffz|pPJG*OMtGMD2aUU6Qe?5ygJOcI=z+B6%cDBQQ z;F^1R<7$?F#*sTnd%Q+)EJcTN>22TLFRWgb|Ec?o^ZJi-=We;jL)s)QRdj{esZaP z$AGqcc=ApbS?otsdtM!jQ+ak}vkT#++$;AU_77`F{Xz`O1@ZxMDCmMT9)WrS@64u{ zJ3RPHtVK-bVjFIw#mrL)kxrDM{w`uW^Y1tixeuv{{c^G9Qj@V=jiv&*Kc<7alcOiZ zrtsrrf2LLmc`afkbbfp`wPt8PHvpgDT;m(vBlsNcZu5H6DEr+@`g=cLwQAT^OY~PR zv%J2q`h}(Jw5yK(KCMc{GVffTUB8{$%R$2g^YZy92El?1Ydndlx35-gKOENNZoXtH z&uqoK=pq>6DKXc`xAEu&%&GC2Um_(Cn-jr_e`Wmc;nNn*YW;xvK*;y6H69p}`G}AiJEE*s;IFBM`%OVj`5J$qWlUKVicqcD&KgD`B*Ox&xp}eT=$4cfl$%q6CDu@ z;izD#Xnd`k-)!SekGRX5O)d@M0&YFu`kw69{y;~+(siLfc*i>Dc6`t<&zyHhV7EWGPm!N5U&Htul@3jVqcW$E zG&{_V0QLY9eC-{bvn`Cl;9Tx(%&Vlle7RRK98d$71)HH+B-MH?rM&V|9SWne*o_3M zA~8L%(pH7;Jxn)dYm4$2OO3rlJh#jBfBd&GIk$~1Tc2!83B4YDZWs&P->%YDJVUIP zw#NzU`EA7!h7q&DXSOn)hQQw$7*C%HxkWs*L1-;X8?e5_Lr8RP6vWh6K-*k@Z`#$k zG>59g`J=Xa4ih+-9tG(c%V@&DPmPr#1JqVm?@vS2(nc*v;h9~1_h`IJ?{3&f+Ul#9Es42+t z%Vf_Vr?h;Z03R~JrlS5BqCQk%f9|JD3Ez3}W%qskXgq(mFY{&UBx~~S?GJgCFN8q8 zBmUkda;3uGi)Ymelv>$rxd|?E9ME<&McnhB^T>;9-R~#g`!Dp#bf3j`Ww^D{o| z@&v>aP4Dx0N|w7cw=x3tf1*A>_kH&M^!a#tSnm1EG2Ppm!eS&Gi0A&cIQR+n31taY z%|>XBK>iLQn%mM0pep86Hi9J-9$~!1|uivKitbQe*rTOvj&4e+|Y;M36Tml_;dOd3qLTKb;RqE`tYPrSX{!J{4OV zpAqpk=AyMv=EtxQC zLB5FQL)o^{IM6xbK!*ykDQ~Opy;#q{M+{b|w?#Xi9okdmOrmXGFpLjwe5?Mx|M~YU z{^M^unoGHtmd4KPH6Lr(;CSE_G1=q6S^EIjeH)YhfA401U5$P}XZ9bBV_f|8*UCBK zm=5A^$Mm#<@A)xW&Fja#ZJcpW=srlGz|L>ui+^hMcRDB6ab13Moqze6=(nP;K69LI z+$M5#xzD+_kOl_Z_4StX-p;9hJ`*I*ZOJ`00T?Zx}OR5@$AOxcDVoi|9IB? z5_i4Z`<@M2b7xL6vE%rf!d@dKa%A3FlmwH<`N5WKV!ZOk*L$!zc(V40tg)ZLT`Ukc zyg4kY{Nf7cQz0jy;`wavr1oZfSIpUyI220|e{2EIW=x$m1^t=wEA-R^5)ubz;S+%z zv4{v;Q5*OF$GUaxb`7GQQn}t1iuoMeRpw#6l`W9>rq}tk!F4vid2jh5zV5v&I)nY2 z@Ai$cDLk|ZU7yOrbGomwPnJivKRAZiKS$Q&(2bd4;)rMtz8R9`Jl!wz*ft(N8Ay++ ze~}0C!t8KZtO*G3=O{QZxE~nHh%6J_$72pT_StTI3XQkJ{?h2fpTILdtOgXeOHn)CYWOK$RS4#;&s{F*N( z`=#^b^*sjr{V}3CFZ0@nG2Q1dueDv+f6FqoWl5Ghq0a`SvSG5NkQGA;m-F-{w%(A@`a+16FbclAjYVm$ReR4-*w$ocpf2!vu zmy}V?bpM2^q#xJ^`c|to9oXtI#N;fgAwR6mw56^bkq%SX#Mny@d$ZZby#edX2W0Jd ztyqu8yTxNuz!qVoJZL+4;c(MiL7oKax20U@{Mua}5>3^t=07UkI3mp{j<>m1-H<|(%T z+Vus4?Aky5XD_C%P8g@ZG?EZ!u)XRPfA_J~b>KanMURdc-KSDc1ZwMLL)|aRc%Keh z8AUckB$G}IOQ}SvcC5g&AFf zT^o|E{_N)z^Fx|1dNe}tOw>tmwVvk)HJOgs@BLa+(7JDESI-fdB5qikz6UMISo`9t zx$%XXoTrB<3kyzE%e;=uPiUsGfi4s6vkf=>SQ@A>0%B^tcb$)if1myBMpSbowRY;2 z$Jt@hHQrWnte1SOQKKI<-ehjC-4S9!h&WjLb**v`o52J<#&tfmRTxn*ZY)^~^y}%y zJ`yF-4+|di%dnvhVuMX8TA!kRP|y3mjT36bIm6Yz?D5Mt4(3%82Y%H~HMaX6Ic`JE ziyO6U${4}Ewa1xBf5a#v(#yNC<^cs|j|6w%9cxJ)Hu+UW9(*lKQt=Rki%!s+&g(%# zhO!85`U~+0zJ!)4y=eW-POA*>f0wAvXGgs2R>u+_4uO-%u{I*5chvcxUPtF?L`}&T z02WGbEjztQ-RpV&ldGn6pVD5-e)j{{`O~NBao)E)_Ezg+e;ohhnDYdC$@MvgZ$3Q{ zc1frbrC*8EtE|+Sjz_j+@uD_o9X~&QTg|JO%iKpE`eKIKuh!J~ieDC*cU3#Q9yY6a z)AP`0@#1TI8SMV}z2K!2Pz1Kc+B@;wA))Ka^-ES^|1(A5P`$30DZ7UC>*odT59jF_ zdvRkCC!1HVe=F!Y>Gi{ly69Zs!Aq>vAtKfHkskpS{vEuS-~$}L@gu#iPB*VfeCtE< zV=jBGc|R+Ua4aG!@~!@x5;>W5NJJ43^>Z$gX6055nLar0fm;6@BLk08#T^2Equ-Ag zC{)dAzHcbKYc-Kb(hQ)cYw5iMEg-=9xcGCdH^1Vle|h$kkF3829cPzxEoDqnKVG@} zH_W$a-Lt!OvvRR^s%?Y4I_#S)wo2@u2<|I~Z=7c4PqP>w*E8dC#VLt?>+ElW;>H@Fwv-TT<1 zQX*E4f9@Z)#a;z#Dn|iq9$Vubb50SV=;%xK0BSB9(Ea9YkT2WUAl+Vdg&!rxA%JXd=nq-SHTHp|IG2Jcj9 zU&(9U(z?OgD*6~QkPRG4-{FWUjgX@cq9B<-fAWCl1=h$5Gy&dqTm>xu)(5!p;C}FN zZn@WQ?tPz+g>NoP<;yU&(GKmb4@$(0+Jt4qd<{+opP1&v22Q7ElRG5mp}7pavwU}? z9A}&1k%9k!IqM2W#67c3#5i(-gV?SyXBHQAzVUVrrN}SO@(24QsO@@#JFKxP6S3T= zfA^RfgOvAGM~kNtD9LCGw@-gK7doBGcc*ABi&UK9BznZ-NR5 zAD#Ubx<66R&H7cV@6X=uS|j}C&FW_$e|Ua~Kut*Hf)r2Fds(_wUe!CS*CS5IA)kPK zC{ByXzL}wM;593l4vsG`HuQB!q+1D=KhJ=>r-o1OZuJ@+!-IH(MY2vX=C|wOD^2qm zD8_BTTES>c2-`;jaUne}>q*iN!AtDRm5WYi{9D7~roVR8W9`q>-__9cdvLDrf8x2W zT6X=J?0il9o%?8dSeo8)S!dV}=5P@F=zrI@x%-=%SG=c5=$vFDPHYmN%PjpGP3<>Y zk&6_tZ6tYgrL8#EwZ-Gp+GWLJQE;yP7FBCqqQ9+qEdgs{EhnQ*XcjS%x)y~RZLyzW zpZfBu$3qL{&r$a&!M?m(evC6Q?B?aBt<~Gp4(As`$9K5dt}37PpS$~^&4v1gI)8rF zbrIZWRX&@O#=~AfG_ejZd6ia_J(KI6kj=)sYaTadxh2xgT-3P|9}rwn>X3W-`=n}J z$cZDyQ}5!sw&cV=i??W}EYFeVp#QVnNDa^^0Gmlf(65_4820(16_ zkNe^}AFwZ`my>m>Bx)s;b#P%%(YE*`*2TBQ{q!PV?JRHhp#T@sAD0zsQgA|(V)`+b zL41xLDKJvZd2CyngYeFi_J8Pv=d&#zPrD0D!ffOi6`$DB`y{ljSW*Tb<1)VHIqtB3 zOQlfKQRn|>^=B*@ zOj1?7d++Z4&UraCYK&kc8(CVqEv-3cVpCPw5Q!;Ks=-@+jhwHdd=aN0@@D#Q@qb+B zgQf8s6LF1&{!={p&aLH}eQtRIt#B({qB7zh=oHtHozLOj0MZz=Bx0KDVmegB9lUr& z_Bt-gJt#h5>48TEAAf`U_y!A8j8hXQ|3%fEN6V2X8$Qf0IeUbSLcd7&BDi*4AC~Tg zQy*B?w;QZHYI1?C|5th34)W$* z2TJl5k9CciM_&(Z;ziwA`t&UiT)95nIBE0582hd4@wq?tpSvOhnNBcv&{6yqbYhb! z`quUwW`I1=V}FQ)2?IUr9LIMZT<1ed?ZeyM&`B9lwnL;vkW0f+f+IKNNh#E)B~)?m zeL6hgl0fQ8lK02YsiOzp2?$YYLY9jq$RsAxdoH#60O|WP(P}|arVD+XIwfP0A^%I-D+Za4Vt`#X3SY^@N#&5h0} zzrCNarR#prjhlDl=kNQHE@oRbKsfE{$gBSAb&A&6u=KT?$nQugUej2-D&V z_b6vQpdkqK5xDR*aFyne;aPDmcxR22F?#~iB7e3F3&g8+AuxEAuGE6+6=93H1i5&EAD;T$z*23KBE z#s4`5q8r@e^Fhxj<<|YZKy3w@>VJ-ZfR%QsKD>uzIb{$e)AM}tBrChKE&l=jVQ<#c zIDaR8V*Diun)rvCKI1KR_hj}O+i%yly=ifdE&k!BpW|D+y7GGC&7;$`B|6h0&{g;O zt6H+x-kM3gF88V8&r>?Uvr)#PFF(0KNeaetjKs&vbOGw2tK|atjAU4MA8vH}BBEcAaPyvO-+1pk}P~3*% z@ktP#F{wOpPIpm}Fpe0-bDap8BvfI{UQh}?c()z$s*%_8w;15@8Z{}Y(L-!;p9MI` zuVi?}O$^lUvF1OpHa~3E=q=~s8h>w{0LDshe&BO&d+U?D?v=;CVMV)g@|uihK1mo` zN$XoIdg$8DJyFl6SqxJOmCIG&u=8RYAdmY)LJ4S>DQ`Pv+YrzCL08OK0#Y%wdVG!_(M_to8`8-J=)IHrkO z7I--91bD_q)%0$SSZ-zd0pctO(Fuj@bGMY@xA@?Tp7-7UhX36BZTu(s6<3m7^@>|v z%0}m^)j6|%axvn$#W#FKgPFhLf&Ml=)Yp&E6w=rM)sryl`Euaek`E9l>DO`|5Pk@e zr&r(0Z~3~6U~F$G7*p9;R)1eEN%RH3LVN0OaAw4V27e-jKY{KhIpLmleh4e9M<0|FA=)3MNFaX+0|K8Gl6HOwoUXNqvlp z22%y98n8t;vn8LCMOg=`d}D}}PpY$sDluDF*S0C_vv>xHnCR#|IJfPC7$W$k-uK{) z5byX6yNLBFF0r8B{Yt^&5(jXb?|*VnUUdM9aCXyP7B`LK?!}*S)%?YAuKUYb#?_b5 z50%%m5tL(aSR|D7Wq)3fh!DPY>^#Zz$$e9b=+QKpX@}=Zge6SF!kW-1 zAt*L*t8+YCs3r;fmQQN*Gr1J>U`(vGA5B2eiew&VSA5n5e*33>ugD0A$ zpp=|CR;uIw)Ao9YF)rx&^(<{eAiHTc{WeG3xDhYdqgn7fuYWAY33W}KH##L(tlZ`& zhS8ioA!ekF!@r(OTsc?a9kE0ecRRz~OAn@~(O|LBwhbkf5?$ar@q-Phw z|Gdp08~M~|AuavMPDz@tuNX#`oj;F4jTWGj*))E9a}YE7I_L}D{c6XpZZL-^v7cw6 z9kUQ?=jc3K$$x9xil-po6T@lk?9aFM)n5YR7Wh|-EAs#z8a|PvaH(*9a)V}86MKzZ zD>(jPeH(5jqZ5rCEF+CL&rLII+dUyPa_=Psg)FBcRB%L5Tw1u zxD(u$Gsh`8WT=mmkDugvqL4eL9OGFfB=&S}4cTIT$bUeI{kC7M1!BeEc|GG54bBN; z+G6kE&>YXMPHZ`oa!RIs6lOZZ+M0ywfzW&(+iE1KDkyckUHj zAEdWBGVALf+7jK%I>W5dk+VD8jOdL&_!nE#tx@CK8Q$=jnAU)b@=RwBhzvEq~uiqZjlm9}D6in)x#dXPQkFYN+8^|A3)C z8o8^$aD8XdR0-UhJsEysGcPAQC^LsW$a!g-`6!LT#fC!lm-H7mbmOuQ1cB_W#A}OCS&OUP1b}d$1S`IN-E5&Hx zI=;3iv-<&|orl;@_DjU43LQ+wU|Fk$6)mW)?2~ndq~VDVHF>~u>K6sb)zSaHXTR|V zqrdBS4E{`(A&<+4`A)fflb%aH7=Lqxv0zGh^T&Iay7oFwychJrobH8RW-dhhwABNf zqn|bB58iy_@`dYi)#%D?aNqH?Z+QPNycyqk0dPHZ{)X$kaMxGNmX+(&ZjmqMvuT~# zD2B(?;3F@s7g5WX|V0>Mo&5p+CuS`0|zM#OBTb{jQ?Z<$vrIr)5%s zuaSft$n9M5m*4)TX@+`5=6nqIk_~6tHAW5nC5H1MXkt7rb(@UMcohG@ja=g3%=6!_ z>yUO`crxd<=LU0f(|d(yC6m(LZVN~SosfBQpUjifLdAiqHn}&2@ylwstq{1pS*H^? z-fhcu^2(;Y;p|*eCL_m}X@877SLXfHU_$cz=rMU>Y~QEUx?3dkc>pTr+TaNZ0Wo26 z`4rWe<($%Ra?XU$EP9G$UiKn8o-e2K6pC>^D3m>)&OaY)@1&AWxz@*>B6M_&#&Gxk zunIeY2M-ByKI&x75QqM*@sQu~i}dV3GND>dlJlITaq2iEAY?n|wto_YsI(K!?Bo9Mg9q1;1)Y8Y<)I=nlFX+hun1E3_Izty z(of_2PC1#@pSV9jetjat*&m)i`xavd-}w-sjg1P}1O#IeKR=CX3Fx%R9x1b_DtHY0PyVWa&K(ZB95d5-Iv^@a^>A|Va&tVHdDsSOwx$Ct&vLjIq- zhu7hT$jZIMsZ{W0(JA6tFLBx7OvX4LH)EuT4G-^iuZzC4T`*ON4MbTk!L5xRmGFiu zs1Y5!h=fW!58Zi%);7Ej51k|WzRLr2{PU=THVTa&@P83w%*6`hEB_o1KL2A3vnT%) zr+ld&={{m)HN5NuV% zFa%q3R)6|kw@Z6HqARC!WOsdqe!hhI21{d&{}_X>8{W59)bafUJQv%U{@KJ3-*i|C zArOv~OgVx6xi@xuAjgLXIX*yz{z#wI zTDb)2KSyWO@}*Es1ENig;T)d&cd3fcpPG{u2!FErdU(fv7)97`u4mK*h528`mP%7g=?U_LT$O0olE&gNg4N-w*5}jhPHW0`vQq zx5GX1D?Pn!AMp$s)F^({jt`O}Z=#Low*yEpwu2KLWpizh*R%8uW32H^c`@HE7dVXR z)PIL)Ko)kz&E8TPKX-!h!$a^X^8Z}UBd;8EkksZl^=IF3|c+quq&8Wf(v`UfP_JQ`~sK%pPAN*2@JT=FbpFi0qbqZgy+ zr>8baACR+yN=YW4^0{e;&O4lNdwA2)Mt{3!DlqML)itnBSwt>t6|;ssHL-XtCpYr< zDub!%w)!)s-xz+hCNA>a=&1W@%7}8lh~gZSqDNUW!D6+5$|b17CbLt%5HGe0IE@z) zRLMZE?hb>6**iR zTis-Ysj5uEFS71bB)$Vb}7^mpdM z#k1d@M*b=6gL|h_B%X=8Nk0y==|XwR=v341xR@~*-uS=EV`sSnL`@g}pJ}9_ygu>Vp zDH)OS%2Rp4M?Ll~d@N5GxAr&A1)BN!C+{{SxGx31#qsF(IHAqs17tK(DMP)+8xOn1 zVZ-UO{0px2a!aOR=bqiY5 z;&ghA3Vog?V)Sg|c#VEPw}^`U8Q!d6NDqQTNyhf0@G&|ReG4zc$1S-ZKSfIUg&YM@Fs6ARSTfI0yT+wu z&dD?!m+Q4d;pj4b;yUxVR$(;PDI`|u#awcOKYqRI;(q#TkL&q9^5&Yj{+T>C^?ZpL zy4p!B;s30jyZbS6Nq4VhR#5)_!d{W?zMS8q{9W zi5jT^efc}*o00HoJo*BO`!7jJ0lA5-p@UD-?8$KbHl|S7qJPy@XnCWaT79Iy8mIE$ zKd76(b1upMah?3-_!jT~SzTb9@t(Kip1REQHujmQjUoHN`^q=p`&ah!Ok|e+&{6vt z?_hY${-Xc>;7h|8=jM#p%;~ajF*JCFLT>#X4iy3`Lg@50NFj)CBqNybl&d9>iJmxWK>FyoIG46f; zIp?NtXHQnvn}F`arVH0`0lrF`>5&VM1JcvNVUZw)!xr&Va(wJ~`oBJumhjbL0_|%76VpzpF}~%y@9J@cD}D0|)Bl z?J`DuZ}$h}Em88{@c~2q6T|c!@0awhd_#X|*C*6<`-^e-C-+dEIL*1FIlH!QOoGI-dGPQH2rrd%8O%8q(4fijsI-5;$H&o(5=#Fr9kxEIQC=j#oh z!5$2+^@>8zUV};zTT3i$<^dZXXOHXCevCYV$IIUj{T*mpH(w@fW1$ODy_LTPaVj`>(~78=b_#h5M}RyYbKY{(oCJ zDc30;+s%mFGXjBq+q0mCv66OCo)i1dp1qn_rNbqzXiUkayh9a3S};JaZRAA1?q4L@ zcjohx+WE85wxg3@b)!DPNq=dlAAHx}*UDSBxaohOkHl@x+{i^gvC*AnS$@Hyn=zYd z@f4f}%(U$fuFQ5_(19YrabZai+kY>u*JVjG(dx~87`!uS2`&TIbNsvejXv6dJj6-j z&ops%Vvn|dBLB5E((#YH;!*M=CYLstN_<*}cTOX!%jNR4Cp3~?Fhi=Vizbr6Ml}6q zC%MrZ3ufY~MYREG{n`$D6DJD%hPhW6|Az0xwbS-He|9@Zs$}s$)AJ%cq<>l_{^opl zZ|XLPZ6s=p%QkkIK{mU_JNwiA9rvwe8Wo3kX-RlWk3MoMg9@PtiW$G?=?#0ccdwkV z&V%P~ghQkXOnRb2ctKw-@@xo>BiZy+EkH_o-rVAxZJGJbd4x(MypxLmM&rxuFZ-(5 zUj``7=l+VLFj@|U&2Ajo-+y(c-EZef;T7|oi~mlS_9XvHUE6El(eLltoCVlqCc3Y& z2$)n;BCC&N9kKZteOG%s+%8$y<{wlCyD`XMZb9xwkB-oTPY<$LP-G|2B4A3olekK_ z6ak@C4R2QE$rRI%B>zwKZ&UiH1D~Au{CwQK4J*&NGD#-tD?V_gy+QbK>Gh1G4f+D93_TlPda*VfuOg*Y) zZ*KYXB(9K$=-r zDs5SEo1Yrnkbic9^VDEmOOJ1mAGx)eL9CP?M#wta`Oi~2*zbM{-+lAkvVW}-*%1) z8V=;f*YvBt`qf7Ix48P>^w--s?kn$r8CQ@)ujTn@$A6_}4WVybCe7K|dQZU$ym#&J z6P_0(b@-$=4D^`m!1vA{hM9HRo`Tr#G-jJ zoTwy5V1M6?$^UpP>bwsyy6$2V^ASOOo}5!D(H>CC{T}90Q*l8A0%$vp~H z*N&`bH`j=7n~~x(B~gI(vPK&XlE8-|@LVz)Tl{7JuZ;#{CZ|{%$(z^nA08Siy zg?~XU|6RMoUuzp|e)97FB~bH=F`AL2w?)6+iQ9h5cg z9OTy&Ij}VP!o|Vcx-%v_j=fcj!+RRS%VYmd;hqDrdED@h*iKhcQaw}l*w*Pq&%;x; z_0-9Okemk{)cZmI6Jekmodn!N7`=pEq<;maTH!dN1H*Y?jW1fcmwnuWK>ryx`|0ez zwZo|0L;sPl_QxLXS8lG2+x|xS*P01`X}=?~nxWQX$|zI>it;_#1|b_gbF639c}V>8 zfc@4^`7*WNpLb09kN*)D%l>P}V0Rf4!V<9#mB9UCz_2a%AvrbsL4RDM zb?(lc2gOdvYv{IdjoJLC<|fwXMGNknjDO-b)~klsly}78rYQEcd zVL6g)Rz0}2!Lhg`7a!r*e##cxy+_;cD&*E~%H^b3t_a$4E$qn5u~(l8?lo!Z|?5!PUUDy7u`=+mkX-g82MUe+ICG$Y!mB(utn$1O(}+FqP`+L z!)9J9JnJXG#+?rM39Q6LpW>>CoQ=ctfVKVd^4(Qu`WueAU5{Pcr0M_P*6rsjFH=@r z+7`zG;;3K;%KAjSk4NW@J%7w5PL!5Zg-o*Gg+Q7F`N#(qNE@VFoU|awsJt??7?e0RGA5HT z#;UL)?N2H1P8hd|vIkRUg^tl5*;m2L^~Ru)dr|PXy1&jx54xF;$A3WFe|WBJ>gD2Z znP1i)^D9roH+%1f@rD0&mgL4~r~;!wn|uvl4`V7I1`elhaVyBvQ=A_)jF3Y&Kn@kq zJmk>18>#Exz`^rk_!^!Jr>z{`jpX3(Y0l5^OjUovNqC?W$1C*MQ7_L%C%x7m1S$_| zt1_I?2b{Zq`2C)ctbh1#o-hArb+ip!)cJBHe=#}-t#3Kt5VJw&1@Vvnyl(pHFV30g z;vd#YV?R&e-NC(rl_VyfD^SVoHV2wLtcmM1*qq<>k^iJ!?X-So?4RGTQ>G8rzv)?b znGy}Ir%rld(Q;k(A`(i8Hl4qn&Rcv({7qJGVJ zxg*-1(R0@Yjj?zZp^+ZCnkP2CB-+fwGjr?b?xI_9Gk@nj);*soN^%IjI`5N9Y+ORq zxBrEn_0!xU^SJt8QjLjn_w(J+F<1f!cPa3DtW&6y-P3%}pQV-rxw=17@}^RU$t>)A zcAXEfG1edA&M-FhtH|8cFUxA98&~{_E&ZYUh#0b7aK3U+jhIa?>(J(`9QOiGtqyyE z+JG&8hkwO4vc(6vvx`1^Z%>Xgp@I6(bk{dh)C;@Nc+I~ac7T+%=H3C#x7{3cG6h+6 zLyx|~^JxJ(yodixd)>g@ykeV}Yj5$o2ktSYC?uTw)M7*`Nq9+LpYDTLx8qI^$DK@( z;U$jrJDv}*2mNn260F;1-CkVIV(P^D5vOpHj(=_V!P6gh{-YgE(jn(x$Ez%9ISDUV zL(?|R8h;iNf%*jwiC$)ZJ$}5)EuKGVXFsk>o9Id~WfNE+j~%YrD@StC5BrBl(AY+= z)(pIDUSwvh`tU~2PW7yOWi;SJi)OCj>Xw#ZUyllAo4bC$f0sQX7pmd##kr8ceBZg? z{(qX-24(X|V>#5gR`&A?44N@ZPe!1)M$RZ)rwVeX_EJ1dbl`9}S@44KqgOrn6Yd!i zpXU?p*3Nk!aTrYC3n)aALR5>1Zg_Bx*p6on&zd~8JUGo*zZ2K}RXhH-IL+RbHw8Iu zg4UmVssEq6Do@`&-}Enk-G{X}UBk6-Hh=igIv^LE-R8UQWpG|6>^&{WDL`H|Hik!M z8A-p{tL3MaEG316l2hQ*QxY)5Z^^(XWc0e~)5%7U!rd~rkHact*^GpXhdM|5t{8q9J;X@uY#Fa1UW>D0nCWb;`JPyl^Q$~YY=&A?}5r}$A6h~ zQ4BVYU8BEZAv9_O3Q6*E2@==Psg3i@-Ru6c?{>_-88tOTrn@CEq|JHlYR>&>Z z@GZoaMd1_Yr;B!Gx!yA*aYVex$G-7vIs2<_;{zbC>~&nbVhb+oZ~Mai+4Rj@n_l)* zKlaVPe*WRlxNxVo=h?H1#%3K{aDM>HWd9Tpej2?6d9h-;_xesaAzk}u_7m>Ak zUO*wVv`H>V7$-Xd)pWTCw@Yb-$ke!hkRe++S`@-2G8r@A-A^@`SdmS*`@Ukbw?CW% zJ)iHN7b1&@98dqf#$#%Qh}Fo24O`UCu0nEOqQB!uyM(iZBJVW;H^KIC7JmxUP@jgx z)_@TCcsGx-hOhQRAzpn1P6@9+_-eDKD?h}gu9KVQm}#4v&&Y5@;n)@|{-Be2!%v$% zaL~W#nHtWdU$jkI{W`;cs}ek`P1-Y-tG1n9&WvBF3wKnBCr4%usBAV-S%<5{=KFl< zkTQu@$03t_exjnde?foUV1L8Oh$8lYMHt%>&?z*!#~$UIIN49IuZp*Mm+5ZjwHK9c z%wI1&xn!i4>$&D$R_-;%WirN5bNk}=Q*UIW`iKn#HS%7yu%~u{+K`J^HR7ZoiRAiZdrV71JDIeuBH3Hf!U*LR=ovO?>uK(>};!TZbF* zZ^u6S9Q+o8+_u{v=Bpn%wGA(4)88&W;kjp@AqrmIvq*RP>VGp_M=OP6_8|yszP8WS z$)}dx_yPqHg`Ld7n%11a!IM+gd06*KPLH@T`x<@R870Ag_O+UwfVN zzx%qohEtdZt$*J7#n*kY?E(L;36AUQi8;snJ}!OZ?f%Kb9T)Rz9mdr;cpScN1lr6JLXi^Mq3iiVOK{QIm|z6YpZ@sq!sKEwBOhxH@V za~wN_1F9R!fZC89JjEN1y!^0pkc0b-&a%oUic^ZdJbz_Nki7C|OZWFP3iN}UP*MOX zk+IZiz5pYu;(*Z$2B8K4T>oLjn?kgafDa6Q%7LEMO=$We#rfi zzNbEJ444$5R`)SG!SwA!L%!I@W5*RQSurcTz4+3bc|AFZyHPo%GjN~O_Ay>g-4A%9^}2FL)_xopo)_^$ zO1HK5oE_?VUwPZ|{f&Nnc{=!z@n_F_98|nGaerS}l|{R@zbucF`r+R1%CD;K^a8fm ziZL&c`m|uqTkx9I)3bLMoaMRR$YbcH-7#B~VV?EWX0;0Is$a}rzb0cCjLzHl$+~)Y zVjmxqo*H(}T1`2Oj^}P?`T}>(=6*aqWOwyuSWi!*&J(+26!n#nXIwn(vdv`+600 zzwVv_cYUxQ;OX=Jb-L}usk2slheJAh%s(Abru);z?@i82wk_|SMYJrUoz+`^Ej#w! z@WeF4y9Ns%u9NIZk4BFYrf{d=PyZf2S9gId4;V*moF@4H^6yR=+#;gf!^<_|^nZba zw?aJ@OTq4TJ)o(E3FtkzD)4}Cl1oT%QARXgE4stAN%Qu0fhY{nUBBnhiAS%pne*f8m6@O}A+Y+zu zu{=-M9?aBtAL~0ryc^4)ugD6bRc8ne>bR0e%Hxqg)pt8dqO4{@9R?S88NaR0Y-^S({`9NOn4zx*CF_JZXp z+xq$TK9~1%H^8!{zWSxP?thF*nsz!ae4<{WA9qIOAK!=A*`+;7>_2_%525CZ%kxcn zOU&<@@^^2)ujn0+8GNbB^Dggmr~vVoSe~73?!Ww=9ZDFGS<>8Zi$9g;_Ws}6=UaRD zZ91>o`!D5TxdMUh^TXbay@hxN!(I$Fx~)HI;qQHlYyJOrZOT7HMt>}{cgds#jqf)B zQ{O+!|E)c~wfD{bzS&Fii~S_}U)k$#_ArBRXzagpryF}XR1ip$4v8i+{2QCUvb~;z z;el?}A9r7Ofh5l~>i4=|oiUBe=KEj&O-n#+MO4-$n7-fBFEc`GS||^}3rM1L0x5pE z5tlUVivz2M-=3Q|OMiQb(*kA6(Shom6)e>B12Sa5pwlOHS^B(NjnZ5*kC^pQgYuU6 z*8p#9Of=b8fYSw0B*2^tMLv*p@F3|C6((KnJMf8`=F>HFK!q`%ia5)If)7N(^O8ET zSXTL4;VAJOVk3(wb%`j` zBBrh<%0ZFD?m}#Ju`Jw5>;}cUa7SXVicRf3i-T3XQ-7}#AEG!???F^aajJb+e5OUe z@G+*dtZ5Rs5o~Rmr0x^n?&+d-SLybPK7pI%YCdyoC3?{@c^b{PBL?FNZJj z=e^r6H!t$@9{QW;AgG>_0zcuzx<@^@d-oV|eWLy8TrUR;qUzuAFuC z)YHS~ZumCtt-cnc^|I65jTei@<*d7TT6mA?o0tsLkh1-yfBpaeZsV2)s#OJKzToqv zpad6nlO#E6vJIaXK3L8W_KOAC32~S&e4y^axu@98oNyz0VkKfngT2$hgH7ZM^wYAG zi+|4wcvYcdU(wT0=#7Yj++P9nDJd)Dl9zaFTrQ}iC-!zJd`?&r+Nm|qmp<$hTjiC!JTIm?F!zBr_XR{Q zDAx14m&R1nElE>SsfrTwJi3FG@S$g(!IsMDk>z4ut|}1Q3QjB?0wmi)13#L3#eWI{ zdHVD#a&PV*m!J8O0Hi|*WN$Jck`#;iwoK(rMRzMi`5&6NVP|Rr+0d*5kLzJRDFn5k%4-T?NvHobW->aes^M8uKyTCrN;J51t z?U%6BzdWOuH{Z{9npxo(-#_Y64ZGiMNBA%?{SHWhc8F~w9kMib*rNtRDy&#TGSR>u z5Y}kNQJ*K30&?$>0A%UXre5Vn;`3KLq4cPW zx>^a-CMnj}+^c)q#((yWk`I3csqb@Y$JJ*=%BqrqNq36+M2JaW`98OsbX`?4HR=60 z2_7_UJT!B!hJOXsezC{7RQo=Bc|-WZ@hdRnSEl~j&R~CKNIJIe?`kM`&m|EIsSM~>-ybo?eG`AOwES=iLyk{RB0rm0k=C^!g z+iu>wi@$TtA1(gHdklei!@qdXCLQdL%VvzrJ8>>fA%F0>SMbk@rIlb>MfG35KaL&g zOnp`?tNc70`70P;8(}^FbvVZH zdf|wbnMdQ0RvSeiN)l|>Nl|Mc&)PQgt;9KT+uY-v7!X>)C-Zs>&o2TcSnZkLtAD>o z(EG8lvVRS(EeMVsQiJ>eU)B|};EftAjRcW~+(P1e|GjDokZnm;bREN2+ zma2j!!+G=%Hl}cHzOG{k57m-RG)cmIiS-K+OQR!RD>J99fc~NGb5Y}SN;N4be;jX998a(uLvV0xhky8}Nj%Rhv#xOeWHa2|@n)Ul$PLbu zsT0Ws7GssdP4B-_Zs%!jsH!12IKd#O3?ty zk^Jyig!w)^c=fM&VP)2qKl5_mhoLvvti zvS*t$B;jH1m}_FzkALp>j;T{Kzc9U-kSZ6HIKq0tXXCt+FIMgQjeVcPq^OS741b7i z>Pz98Wa#1k!OVjxt_cTC`cacII93ejN#dIR5|eSWZpC#rpW6h}(|3PGynmDK<8MPG zXBErvI`DnC$FX)6T;+?RFrS=p+p&=1H1eE+5I2LymH%(p1& zGxDxB`6|o@keGa#?*{fJ)Ol|VL`nXr&xP~Ljd``^g-1h~!d|#N) z&G(xozrxRDliz&aHu=rxU6bE@elz6&!m%9B6yJ+OgmvnaSPq<@G2gp+FMoN-*StSz z^6BRN6O-?=d4JpFTQ%>0$%pGs7g_wSViAwTv=nN0vHymsZs9hH zTSh+LLz+hmUkMO-dd<6tjwe?;BAFKt6psmH1cehO&3yooy( z^w}1kN5xp4B}%(G8t<=>hGM135ylcJCcdydOHH4ub>=FKM zi0|vmKRvg?zkdmSzrc7V&S8e<*VKYUVL3BcOHyGuGes7r9u+xb9H%J{{=UTD=PM0 zO9_>!pUbtcn&(e}^kJOATEh|BB{k(%@T^D7kO@9_B^$f?mv*G^^bpVE z$sX%B)4x4uhUHl*Vqy7>ojD19e}FL_MWQ{G*#EZpKAHJz?(5>Ud9Sg+Yts%(+4R>~ zHs_Z!O@DOLZ-4vU;J4>e08c=$zm4DjrQMqMv1MbY|Ehcx@I881KhKx`kShFZ-m8!K z*H|AWUsEnANp!i-0=7rH*XJW0RFN_8HF9~6HAVz- zA{5`C(YSw-?;y^I2mAo>o_Ly95L?7o{0nA40?BKb1tbLCg;`2M%?B_mVW{~7%qlVk z{s^-HnQ9v#;Y2EwSeFP{bRfw{47Ll&9uC^}O)!D9Cy@~~>P zK-OS);VC6+Z4cn7BrnPy;w17a?K^lH5d|17D~Qr)tO?1LGe)B=q~k$a;UE%1Iv^YE zNLX@GDrbhBeJh;>EHfO$m$2NEopv%M z0Yv+BXV;`ZKR(ak95CUooo%&r=9^WoTU>v?kGQEaadv`OFa zI@Fi+Sx~p>=QjQPEq!oJ4;gpUC%0bp*VS^dwCK*Rv)yAq+il;T$MSmby_DB`xAc1N zBuOsmHfhV2@bi`e9M~{HVP@;_dxBYU?j) z-#?mN(}fPMzrO*CI`hwL9NlNVxpm1~2Uk1Uh*Nld)`s`knAEF2`O4m+!>V_muV9|9 zcI#@{K2Mz5XKdQvs{`Gp-+p70wCR7L+FH-fyk0JrU)iqx{v68n`FYVjtemTDi#o8{ zvRSwHkZpR%(l))dvrS~T>8*{6?(P2Mc7JPUqT9CPCh527Nt@ogrKh*_jcU`|cHS&m zn_jl>*K6Bxlj%17e6>T-4{qt_TY6jme%l8(>)Z2h_0w4SgU`>Vk% zy=>EO^_SB&{Z@bXZ9m;?Nt>Ru?S3EK((PON^?vu(&r9Cgjhv}Ay{*5}Zl5pP`gckD z{L;2>>Fx7rTYsf!>rZZEn;y2$7w!As#sT*>4wNn{Z@NF zyWJnMTYCHZtZkn%Y}+Sl+sA+9ZTplr`;J)q{=Ds*rR?3(vo?KlOK-~;@wR;YR=!*R zh}!mb?OXdKZTm#CTl($&lff;$Y|~r&Dd(;IM7Q>{+x9z)>-(L9%{4t@6Ub-L2GBF1 zgXZ)3+vob*XRG<#JTIJO*YbA5vv8NYZMAadWx9JDkW%5e*AMjC%65O-eA%C0`(9VJ zzdgq}qDk*X$2PsZw%weyzqNtu{MirMu_LRm>67I8?k)+hb6m;$*LRn*wS7&Wur_^g z9b4o~^>68_O~3XHIm=0#ew_nKKDf>ya#nWR^lRId_WwiJd+orkBU)m=V$9QAXbk0a zFAP|rlyjC|4CS1&4F7+-u}^oO9>4>*o`hN;#S&N~MXvC7-?x1k`4?_?`QP}z`i||( z_}}=y{u{l(|GT{Y@AChe!}@>CVS}La%s+77zx99iO~+&If7JgMj{lAS^uKZEZ=4nW zjmQ7OAN_CqpJT@#-1|3f{C)o)_HVrZ8~;b&{DTV|^RM-P^@o2iN&O%H@xN8`Hm1ja z|8K{3mdCr4|Ls5ipZ~L(r{jOG(!Bh?|9LgG!~36V>Xp&|#ic`AZ|Tys`G5U|?#DZn z|2h4sX86Z9ciGm>@t-6F{nx(b|9Ni1e=pJ_{m(xjc+XXZ{8yIlC4v2q0-{B|{s+bN z&zstxEvP>q{ZD^>6uTb(f0R9TWEh93cP_d0KUT5(=PtbcLzvd)t3R7re}bpuzGRVF zuiZTRy^G+?p~1Wim*80qLMLbRv5YqW5;*=`{?hK}gvb!HcW7L#0SCy>A};K!O`}-{ zv_mHOJyG!xs0i#iCH)#mT65rYtJZDr`LR1i>5FO0a1wu4yY4kjUB+h4{&_LvuAxxV zo3ZIn0pufoX`trs72$&_Sc2wYfkk43Zzl7po&(I19?Y%~$D3ZrEm&-FGkzvztM`Fm zB9pe1_*2Iv1wx8hr?MT) zae~aCh;4raC6B|3Qv`RtPJB!}anK51Ex4-4fWo+_YiRXm2hJ3FipY?* zEc2yLbK;zDrA~m7Di8!hbOXr6(1^1hC0ll0w!`dv+$l&kffI0f=ykfYV>1K%IrGN^ z=bln$GUyWGjUKOo#cgP>qYTtV1FrN-0$uD-&0K#C&5((qILq%=q7vE?1+L=!;E;U= znsuIPeVm%LI_T|)ywurFf3@YL!-dW2Re7Rv#30g`ojRM(*%9IA9926xF+SAb>Sja=v##QJ3&f6gl^h@0aVFx z{Y3|Q*bRT%HTSa{8PTF9r}*WE4chArCo^}KPxFWd{rZXt zn+29(Xt$%C}Z)$k?@bi4+WGP?j$t=BJ5cGYG^vI<_YU zA7UvsPPqEb(g3qA2BsE3AsBzsud}B~Vt@4+y7kb8-(6(pofx+K(?zJqSk%*HoVi8T zH<&kIZQXudks+0i@t00!{b#|aI-l>K_MYT42EViEQmM<}aq;_P1U1qLcr&ZwP`__) ztF>BLaR}()*;LZev1@Ee1H*0R1`GJKnVp~SsmtQ%HVn@W>;^8;BN|g!f z7Kdcj*}v`w?4iyrw=Kr&yrwZeRa};>z0Q^FYN<)03s6cTHRe2n7NRI<%q*;tmB3_6v1rUuY2c_fhP_6OlF|(J+x&@Ay=+b08@5dI_0^z} zDU~@>7LU(1KhF#*?}6^8Tg5&cV`YXWclrB4SbwDI}lu#(GG>S%us*$~elTo)$s6EpI3 zpRUo?vM1*G+Q>8Q3AQ{|Jd64_`1{jn>cOnvU@aM=JrJhrwLj^_p=IXVinH@!?-X2% zqi)eeyZ!)Zg=gO|nrBy!da`A7UP{aO`XVDuTI20ZLIGSH?LBB=Y_)zFKfKQ;d>toJ zBN_V&-1d_Z7_om_5V@jF%1ksT5z~`A8f8pJz{K0ee-%#}&f3sjA00aXHJYTzE@2z# zm^Dcq#LC3i<_b$eptb|7^%&6U-*RRL;?fneAzJ(~t^psDq~g$bjEI=!btGQ~6U})p zIuUM-AKdla(cFG`78*dq`%Rdt80AoTC0d)fk<3UN{0e`gD@rVApQBQ%@8$1n4S8}8 zP4BXI=-YR?LWeW8a}3 z*tvMq8o?wkN2&T8DH*8VHJkcEoJV#M;qgrUQmx45U5}acexIjqwoVf^1@FTiT~|8f zjY$=2{Oo@+1#mUt8Ihg)*ii8qg6<)75;}e)jXWx&Uf0Vyzo}Vd%04$1NcakTSav%tnHBTYq~|Gp1Z(+82KZ!q1v&`)0kJOx|EtNOr6$pJ&Zl z-}o8j*Ok~O3=7u}b{YkKe4HR6PTSse%QENf<>P?@>wIlAehP{yNbFNy15o7ga$B8_ zcl4&^!r*WvuC3?GUjuW2+Xx9V>Z=t9)lCe|PI_dwExVe8IMc07kUhtphWuRqWgjyw zfha$;ThAu@SZKE?dayVR_s)bdx`VSLLN`3hH3CmN5Ae~}gFyhw6DL{Q~ zes!k0&PN(!(};CzTcrt!^8O7}s_!VNQ{nCfzwYsl^|KmEe6iB$vfb5ws4GLX&vLaw zqLsvswiFlv?ok@R!Li<^9wRoEH2j)&P%(d}+Mho{F$Wb!oDE=Dvkx+V`+L}DxAV7J z9E*CJ44K>Uw6Wd~*x*Xig7&k|;Svc~7wYGnEerw1ee=m=cN(X$fIx-vJBBIO&HV%U z7}D$Dg_~>#HL^%EXF0<*-e!Y_E)u*&W&T;uMbPv1#454vKR#WV4t!xFX)~ETysUp_ z-uEc#+%N{jROWtqEoO1fA;ZoD^YLe(_q->FRs>`ck=nr}WQZ;AkhK0(;zC7`D@TA{6FX8vtAtyTQk z#QHS-Vco|}-->YQA~EQ`Sxq9P^$vf!&J6TCLmbfDzd7J}!-y|n( zdD=K+{oPEk-G$xlYoBNQ6rN!q1ODi-xcfq%i3g#n7PpB1lHw%EHBY_76|aAeil=Zv z7Z*05y0QYKKUXu=2O;apn1wlGE;jpn?2Bt%XD8L**yQHkBJ_4pWL*dwY<#?G!+6-s zGa8MX((TQ&@{qPw6FIs3VKSI;C`E?#Gwg31a6C}c?Sq~0Tj@1}&;S9OM9U|SshrRR zi8BcLQg^)-ofRRUrx#<~vnGFIZ}dH>1YEG(q{wrj`MJ1TfeApE1)<&r5~Kd(Lgr0i zyDrzx`r=O!W(4T9&#im89c976GQAy`r|2-0DLz;9Ebd`JWF)l;=`a{)(`9FrI&O4r zsU$-(vfYgsXln6l5|S2G1Pzn)6TOY&qr^%ALs+KBEv(K`WLG*d_JV((hP?`;ZjzX5 z{N5wuPI#D}E^~R64ax^3cGFqdvR(CPAhY?$xj|Y^&%Dk?H=(+LIFZ07C`6wZry@&` zMm!xH=g7MKMtX423UwW+qAu59uxphB(uvMAxkDy z%hE-LBXZ}wQlS^}nJa%^owX5bcNXM*n|{qPhy7y&mN9G`hQ*5wAv4ChZBE6TNC&kH z_a6o#ee)cE9%~}9HbwdqQ}c}u?Lrw{CS$^gCe7gGm5QVd=oy2~Qr)oxvD^iLC=kfW zkarv@;ak<44RB8QDf8OH-t+7R;j*_ zm5up!8|IF*>%o6>;hlEiFFV_(tphB}(TA_Wwlb{EAn3>JeO}SngK2^O;~4;2YS=Td zc_PtkWo2^92g-*1yAW0CWs?c5)#BadD5dNbvR2|V9Iq zD3@aWGpKfUSqcJ6*bMsp&Jg9KogBfr{uT68x;T~RSdD)YP9MthNf}g*lnEqk6BT|E zjLo~=lx3sPTZmouL_cds&)6~V(r!6eeRh-Zj07GK7I6p1Hk{sDR5DWW6m{A6Ky^Q0 zc!CgDFF>zQ{{q#Ce8;%o%cUctbNqce;N{vJMj}VvpbL9h&sR$CG{ep3^G%`O_yGi| z1h1x8Kk|RPy1c3}s=cIsP5rDDVo%w-T|~-uHIWpnNH+LBEcbrBNG=5m&Etw@2SX0p z2MaKK$Z$A#PCJQ7&tPJGuhd#R8qU+KueODrOwIx{N5r9++mSdJBVuT7-3VI?CY!0L zY{x|sNQz;g+Af~;j_gm=zUTv?zCdFLVe?f1(=UHf?7OhQZPJ2;S__}(LDd^mL?aq8 zV9e9JF1hCZEHMc9acugT%$I`FK8;OxSv258bU^1W-mA*_5k8)t;DdGGpGo`Z9%8rS zo^dB7R|zm#8yV<(7UubNC;TKufA#U=zuZTEzWPLKCr@0iI^p?~cj@BC>iiuJCs2Pz zuIzuPQ7SIJszRho%CYQc=D)!Y5(zyG!gB>x!)9%651JtQ+81g;cG!*8Xd%T9S0xkI zA+obE$Q2VQD&AVA<|(e#z)hoT(7%hF{^jwYMN?kQ*&r#dT75;GsDPitMO{WX>B^Qo z18uoRdJbJ1ilf(AO<*ok%*?$!>P&77Sk-?eC0r0`J9EysR`E?&h(xNpzHqGWV>ssz zCjge8NX)mN^oH#&3l{4%nv$kI7-)ZIfTAd4B;Qv>)ciu18ntUEamo8`njgv$uK}}& zqhSmPgBOEOh{oCkN&Eaoql$6FU7~4p?_ULyoPKbaTHRSzTm-Td7*mh@UIk#-=7TcN%8e*eP>H@K1 zKM=a$T~7TZ)H7|A=p?cch)$!4Z3gnCS!{*ogHUP!N~C1Sb{zw-HeIMZhP)1_N=e(7 z#qF7+*hLJmf5{VJ#|}Ikd~@2OBeH+XN}`N<9>d$v#3mG3a@{X6RB!!7(6qMEJ*jN< zgL-`W;?R$2;tce87(H?z#X$h`Vm9+fq=G^!xR(ZD=O>X_IUK`-&sDctFO@FkgiX*Z z?7bY;u80EAi2!0(o~xm{Ld4L3Jg7{GgqoRs7gl(=@l{Kz`P#Pq5UM|8Ct-hm!)-qQ zB8n8{jBIt@x#F>(O@phQ@9YZ)C2%?i^wGfJ2eJ$XO_u+z0E#Pi0*z2Q2Hfhw@kPUE z>5v))r#T1g2m+|+ zd1de2If(0^0hE>D_c&NPhL!}#+GThKA*Ut%h{S>w$B_@LGDDYI&O-?98yG$n4%}fM z-Vg9ukVExN5Jf%bIziEwJ1HJ`m2iho%mR8}r94YGhXcuRQ1iEdvUMv3%Xa|VqX9Nx z`%~bRYk!9>uU-YbfP8;^?H<*e5lU_En|8UH*Bp|aZrRo;zs&S`VzV!o!B?;ioDJDe zz&xN#e#4Q(7X%>M%k3G5`3d1?A?P-^poI{65iC2}FRe5x&=@d%3&So4Tmy-m#gIIp zm=wslDKw+X9H;7Hbi}OqkT>fto6PIcQX@n%JbJ)dNHv80T77>^T*i;mFlw!U@2V_SXqi$GHjxGZTI%P( zr8i;?@>rJhRY`xx{*kbiI!?)jQWs$d3MlM=fD-r70y30e;E8-4_b7U9}H%&36qT*_+3+k)f446}-4IZJ) z9ST}HfM|3EqEQUFlj30$UD1Gg#`;UJF9fgO-c%qCJ=QSzcICBT7QX9|am}fE6EHaJ zuF89@sV{#eU9!RoKrt`8>(w#M8R1iCIs+m!b4jDBD}xrkFhzq-qE7D`KsY#MUW`H- zdOw30UtrWHaRUS2+YUcu$_V^~aF=kuJ+Xvz1D45(vf(+veqLhW3Skd+&25iCj}`}j zPKLBdU40LI+9yvz%58eZY4p$Onc-$nMos8G^`R*vbx^n2{9FTY~`DjY9*BpSV~1V?$kH zXXj^3Rq;AWtS9eOz>vEPJWSpahjL!2#4xUP)rCv6%Q`%el;HZhGEw?FRPvh9vNq?M zy1jphqr_08w5H3x!*d1E`UaT&d@W)W{3NyoqA29eAuJwXmFb%T!ciB2 zq1BiU$ODo-04vwZ4{@QJafX-j;5(0gRm^|YC~Cu&Lo2V}G$EObfhj9C=)Qy=gcu5LYCD%mQzWXE1F^6j}&U#eu1JsfV3d8SyhU& zr0Q?6w_Z{o`WG8NoDjwH^Y%ut4*`EV$5GTwKSRzKV!^nE9lH!~GinxKs_WB9gUXb< z#+mA=#-0!eBZvsc3sFmfDyj`jY*apkl`cITkM`4@sj*F`*Vx>-Pw-^Z_muil&wzgq z@Bdi2EXfCXyMP`Qh@MR`|E*i8ia%1UzNfhKUlFfe3C=&!_@lW>wcP_uv6zYAG+ zZLfi_-CFHotPr7&vpW6Vr;;LXv65z55)3Q7OWr=2$F*ALvXrC%exL1jmCmP;H=e>1 zvKp1l6MI;R~lhiuv}eD$7r&8uCn-p zRuIG~@L#V-fToO9^$v&KYPEl5ja6xRGHEFlAm^mi(lL@u9#$n+xyHuBFdTNdug!~G zga@g}p+3w-S3_1)LUhnJRBR!Mz95z`aX1fkQGA_mpyaW7$fkGLJCRpZdNX|`76@^f zr9QV(N{#rP_EnlkDSA+=FojA=ePs2^It(-Yncb}$O_TG+_=0PtH-UdKJ^-3M`Y`$- zGvj>bC3RDcxu_YEv`PPLKNX;Zhby`|Gz&=jaJ|UE=ZlR;VdIxI(-uNF+=n*oW|8w? z#7@Bv&Z2a=gK)y>jmo2X|H+UQh*#;3-WXg$BZ@m>c_g-u!?YgCrFb*t6rgLLWaEV;x zNuHaYZ3{u=he0m(yuFtrff7gs=D=V)CuYdQW8>)B`T#4;l?{J;kzWaqf=@4~2Kur7 z?ZaJB<(+3ewQhTxP8T43zJ-y(QPRz;Z8EYgr59uG&zs_tx_!LbyEF6#hn5>M#cR_F z2Rbp+_eGd6hUi41nko|c6+%@~cA$YYqUuFP+%AeMVa6|GPZed?8(qn+x*&Z~qE?Mi z{E2VOQkB4)vd@3~r1snI8A61ay2HUa-F_vbtV2gHuodJbdVU~JeMw=c?&ti5h#XQN zJarQdE&V}b<;~9qX8`>!W5KU65+h~#XgVbkd+DiAIvrJ1UUIR4c8Ik2RXw5>?t`3p z^XiH64Qb-7+Vi^opjz@uIv;iPJ-9bk@M1fDtU?x$U$}n*!&Ge*G!%wIfy}Ghdl!d6 z7r+FEqLtS4Z<3Fpj2GE7-2zuv`nYv#{xuXo@Gp}PI&k|kGKmCtA;kn^7h@#Vz7Y^d zoMq{DzcV}8^^AUWeLX;HN35{Fx6^}+E&n7m=)mm*)~N)d8Yn*2E1Z}i_5QKeI_hj7 zF5^0eFob{V*BYL4V#!#y?p^C-wjH8B6S0aito9rmI1^+3O+WgJ7WCi#gf3wae9sW$Pdu@w{ZH<+p_r7=2sh=Qa3xmP z|6Mj8!@M>TW}|Qig3ab~f|%vrzU;{(64r zUw7&Ll9xrO1u}ngWpwD?z8F9N=!=-nH5q1AbnzQ35i!0O6dOA{< zmDOZ+Oo@WPyo&&<6+VrEV&!C%NV_XB(t=gOXcpvLmx!p!yXh8_q)+li^3G5^fk-^0 zpHF1k=6C0~WX*YEZ)VEiOxI#B!u~pnf&a|473Y}{uAhIu!cN6+DReARnFPZ+5E}d9 zc#c@RH;AGBus^@24k0My?(4cV5i<8dRjxBt?Uhi_9;5AsqFXF`Z+b1|$J#vf7+y7M zK=0=Iz5fId+5)gwWDzBv?pCdf`(x}kQbis{GW#IKoAf_+j*^e_I~7BHtCOsKNs31a zG8n~cZY_T{0HqkPwy3KJbhSbPnUx_2Qu!&u?C0H^=lAiyqFU$t*F1sSH&NO+Gac>? zBNQInwziXVbtfqGicJ+0J>xx0na5x4!Zd`6PljfO6NakLt5wlR$B1{7UpHl)bN}eE z(IRC5s05WxQECUxP^ma%ElteQfYg%K^0p-XpsjzY@<(`y4I|YIQ4N3xb$iWfU7G89 zWygV@jeHh9r6zUw)UL@;8D~sn)k|lhfk1E;jl)&#-E|IyS%&j_2LJ#8_+ej2<##kW zLC~2jy+`(Ge7n9?snI&X*QYp;zS3oTV^H)bRO#-Y!!b94s+fF|Y%_LPeYaYb1gorv zIuL);#kj%P@H~VeqY+s|p8`ZJ+#Ic)t|Z2n3dj&WqJ__rk3mz8%0_hUNML|3NZW^z z+Xo{e3O-nBi>5iI4j!VaBG7UV1^t;NH~@XxF{UlFW=s3}?F4bwL#3?>+lNoWFDVi; zv_?f5#nRi$ze%^<*=4-l$uUMT^n+E>FQtF-pSKmjAddpknv(Sr>%~)rl_>Ctb1_7g}mRR;x}ZiH2OF=M6x|8#D?;_ zOAn(ThHb&({4l(PNyTPM>QBU?rA!xcu`(^JwP)q6=I!6%C+3y4NNS#~RDQh4j@zAI#k0|K5MhHnbcd z;>qYZCLX5x^5P$(`(QIhqBec-i!UvQKQkZ@3?p*2GD@Nt=FUrUzdk05#twK2IiAFe z5cFn1TQaDjRZ0n)9fK;G3vmdm-yP&hic%&&$n;A-fp~tD-Fd+>{8Ztv;nm`U#zarAZh9lWTsWPQ78=k-Vgm z>j0>Tb#@#!S1Y6kNAfvo1QrboJgl4nnM4xnKWV&j-df&lazAIT11EogLcVr(2oI2q zh@u!#9|a^-*L`Dah>WQ&1+dJ5A`M0uPug-$7oNvS1NAl^X9j7TrfBlyfz02jYu_zh zU^@@@>x{`%%C0t*31)6ASuJ9PpjG=BlOgk-bw;3gPm3{-CU^S8279<$Y{{t*k-MUv z(yZdmxo5VVR(SXKp9Fv4$&nk%bdeJP6q1UuuJJM4Y9!5MNptL;sFH{p!w)ZY@N3XOKydQfIl8lDYfHJrwS*O~iq z&h+d2Y1iql+l2k`14VSRGo@SBdErBv?gU9A8v&kB=;;&he3O5O#X=0gDa+14k{?hF zkTUNb5M!X^)!(2?q6JxaP3-WSd}46oE5g zd)_uXnsA=+%}q~(0G0bcmt+@fWs?*jXlWwU`Dn)#uonT&exg$_q~gYCIpr>kFjDTD zO`%56Ouabk(-(iw^zbe{PHQGp+d!)np+P}UJAU@0(p|UW(xIu2PSCqjqebg3V!Jg> z2qHO8^Q5BDh`5F@#nOoq>_ppC-c{t7@;FQ`>s59b8dtfRwJso#vxq#S@Z92`Rpf5D z;Q>1?#=C_Z!2_s3V1S@VI;#4?a+DTynoehau}Qko&Dno6I?o}T?G+2>-05Ij6G_p9 z&?Y{5!m5$}YU4V2GycRPD&6WEE~HkpSuu(iyV_@9#0^Oh)M7&spr_^n*N z7xlAFm6?Bi2MjxjRe})`d)0;g5+Nk7s(siTL-qj75oDMD@Z<=nv048fcJ>zr+^@Py zeC?X~JZhO=d|v;7cV>>q?z4WZrZZ=}QYUMx=Pcz`E#=n8LkG32!y8y}{sJ!gb$7q( za_NUq^a~5iSSajGIv%1MaPkt{8LL-O+U;;C0Vsc`>BGaJlJK@RVHo*dEPt2h&7m^z zBF_8ekh>J=51{c}bEP@C_k{={1Tx0>1{Hlw(gy%b|K^6;Wh`QEvNLU2(f0CYR98&{ zb>K$6>Vey{b*1wp#>`pqn{{Ec8#b-918&=+IUcYwS(Cyn+F$F1e_n>5>H7SOl{4jUY~Jr2qi}{Vx;NC|rMV+Dwr}~e*1sotCf8F` zHfA45(7h^pw8=(kgE0p|#2BC4ff+_3F9QcuxIR*|`08J5(<5&!I`SL^jK%&~}KZ=(&vIXFO@!*lmVJ>71dHuu|BO_KX zh7F_cEKS@RoPzS?2nD!}DC^`+C#J;W`2J0M{eU8SdLq{#`--@6a*_`iUCxSh<90~; zmu=QmMC8z;+X+Z{YWb2iq5KeyHQHg3TRHC3zhZW{3`7zc3ZKY|kUcJn6!#2USX~}<2Cwr!IW?1!%?c?*Z4Hu}KzGb8%3pr7Yx3a?iRD|QHKPYBra|gkz6G+1x zGBsMZikS#YJ^L;}uyc|(t5Iq%4i>;;CEeX}iK+w!7!zSG?XH;Pt=G`foKe%wMX-OnuapoyOK z-t%CqO(Gpr-Smb;!U1GIT>E(Y1kApPFQRKMXg0!jVi?{%jf>Oj5u~@E0w`)|XZc=# z;UGIpqlt5h^2*0)E>X!Xzo(pCN+VECl4XW@QX-XL9FAJDBLsZRQ;Cw8n-hE?#^Tq?jR`h7#Mm`-`qaEWY9F0h?{edO-xdim= zq^@~2qnf>;#fZ1{lvL=A1_FWb?NjsXv0 zyRzXS8Y`=kgNaCr^oSmnBf*IZ$><$!_1_JzP>J&5;o8LOA{BrF>d;IYI0e6? zhhF(UUP>-W1BZ5y+K=hkBjmMzdT++~3{v#3nkU@s&@PEK(Qw*o_kaURthgoF>8T|A z9136gCNjs!vb8a4>JxYL>_PuYu2cX}sfI4P4hHOk8~<~D_e__Pn@a8@+l(W|F64{r z3?M?RFobqCZt5y*<4)ZKzfqzsF%q8bEWOtNq(7$Fm& zR=i3uCzv|9L#$?Ie&aR}c{&V^P$h-U{Fl{h)EYBb_rYKwgKz6Q&i-2vurNLs zBUf;}i;7L81G}j*FScNRN~fQ|(0C%Plb)wqA9}r`8rM`fH=C_xE|L-)TS!rce4Xi> zH=7NseRDhXXP?e<&ObdvnOMiTWE@PQKT7YIDF<(Uz!d+O8?R9Z2YYogTsri9u`_?y ztmJPjHKL5&wX_N(y`>K%Qj0zE0q3fMF>?~lwM^dZpVJjl1b zZZ2vVqkKEu$w^m#opEebw|0Q!hY!E?lUa!ku>`BhpFn^2TZt!=FW%iDt&49{VpqfF zMO}@RUz%b}or=}{OvS#X0X9_tO$eVsUkYVuulQ)KQj)@T{Gl24QR;~mBe~g5L~%^t`Fp(LTA z6Nv!i7*q*=fZr|ro>>j!ltC>71$eT5kj!TosY_#o?PHO%leej3QD&HqS?+?-6g9%i z-yes?!ePRHoW1-juJKoM+8^7d+?GP7r?{6fWCqZGUhUFs@Dt8DG1#4wEW6w}ewGn? zsJUEMt$TI2=hiOfetCV@-+#s({W;G8(tCaa%CanfpW*MnGUxsmkxk$L!nfeshWyX- z7Vh8J{KvNKufq?|{%4{cV2tF7`ZaM%p*7BGqVQb zwyrU}&nJIu;2xV%?Vx42m|rV!X0VyDZhd7~n_q7hV`z*K{$viZJ=ZYI$m|zL*IL7{ z{jaxwnS}X%jLB#y!`b{_Hh+KG^Y`=S5r75xpB|>P1uhqM?o-b=0moR3LcHueGo`U~ zu_%WLz45ocF}Gn03){zK%Rtzzi#8OGY|GcN%rR`8iu5h(8I8U6PhiGzSL@s4nO*0S zIRb_HaXuE)-N%kaYwO0J_I^Q7c8I*ybrp%akn-P~P zL_^v`+6Y=j^fiCOs%})&UQyWGP0rK{l3?*cL zsD#ka*yt4lEl;`4d!}p>JmVG0K>!NDREzpL91@k0{cyygh7%xyUuISI?itf@#f29$ zX(GrX@(p%{l@xc`{t88?oNUmnl=_y-2bdd|LvviZxI8=suQbRLW~m^a(AbmB*-k0t zhJrta8M=`+)HLh92_x2N$-U>#IAYj;Bc~hR!pP1&WadGH(l}V4NQg>hdYH?h`>K2T ze|dY;UiE!uLGZiGyV22z0AahWk*4f8iL-&TL3lHQ0L}&h0yv>H-+kn8&TqN@=RQ5H zmgY!D#BtfNYp;r9i^XVl-iPzac?RBdztwNWzHN)@@Ma8IlTwMp?eg{%%XE8xYMSH0 zzOFLZr;Mr3<=VW0G15-X4eb`k%t#VMb!)#g#QtU`^Vpat>!Y)HCi|CrydOtjx%sus zTVEaIPs*`(^*K!rNLS>(fmShQ8d4kkMlHMwEcdjzJMDXRP3{Zj#@vV$*CcqBXX}O! z3p7#%*%`ahzM8*JlVhMX?(U3WdPk2O8o` zc~94;Zaa63l$!H@r@?QrlA!rDxHNC%D%H+Vufh5)-mZE(8DT+Zw^h-`K)TuWTEEYh zHc0LRCXCZ4+|SVmildboN zyGEZyFaDa3;-{a^o33^r$)<%=ai--dOJ--&?5;*}T$@&ZH+yH1Hx8Wfaeeb|-a3Ow ziOTc9xxKSzciXu_oAcEJv;FzBbnQA$E*XmAO63b-A2zqkzB$VSz3E6jv`o;Z-GpcL zT#k1}cuAu3bhREpY0a;@#p`P#Xe3T;ddByoR<#@8lUr^?I~pD|XL)`P-w~(_ysDC|s&l7maVtj=5(w2z{%rGWrYxSIs-$4qAgQ>SnRE z-Zwv=W^b~&OlQT!D#os^<+I@vv_QWdryG}7_4z#6c8+`73`4IduZOWwZIu_=lCBu7 z+TaFc$Zddr7f*-C_TY>UcCahtVQAq+icyxL3sDR@3`NL$CavWsRjTr~BQ}P3X{G zKZhO|@5(0aL<_&RZ~IIaV(rx11DY@#y;y0k4)gaRcB{O8wqdtccGIuu;JNFijlEHD zC8s!l-<(^1o*u&Onxg&7?5aim7-jpWc{&&E;fdgx*Vgs$rOvo9oIF){PA9whOWA*A zs@mE{JP5vSV|?_T*O{zWQ@Z!oBw0Kj>fJmS^V57WEd6ArgU`OaBs@Uno_&4Io#)Iu zPrc`R|9oBZkrvw*`y7tOV{w`Bm0j;RW4A+pzQ#JY@Rfq?+OT$&O>%9G0wL(OSE2in zh7IMm+r~+G)%}xxb-TKKAEGa<#DZ+kdgntH2gqbWzT)6UEH&U3UX zzrwLH0DU^RW$(5j;bIBf;|u5ec-g)5gK@bp(0lQfP1^Z%+qQGYzrJcXouZu?yw2_0 zir4ySJoq|1vsL+4jcBS2pLK+N?ZPy(IVrYyCZZofZO{`Jq6tSeC~kE4KrZzIT?$ zhP;|z*XQ7oPZo+XZ;WiY+05TbepHy%7;nq0hxa?FW{G#cn4#Z+neIosC|dY`>(Y2U zPXl$KyecaiCg#zbo}N*yEw)yUzu0n{clSU#&C>r9kjU+yZ>+nAgV#VN=eC9c3h1LL z8mAxs^^CY`usS)Tb7F)Ua_9y^gS^w6qot2VM7SojCN%_~rk+5}^_(0~R#Duo{c~#Q zBfi&?GL5Ha+9V^}dagtgPS6y8#Z84;r|!_yHL`GQ`kTf6Fg_^y=6HV&UfUV>Zd3Ab zT)j)))zojIoln+_R0(1y!mG@w%)vTHcOHro=B|mcN6SlI3Hx=Ml-H*c-A%)o`GRk+ zA|EH{vlERUy|u@{a@6&Bu!5?{&12;gD>wydaTs=g&E@p)cQQ+m zTk2$4Xx4KRvO%2Pue;4iKX%U-uO8?$R?ddD-uPNnsL}hdZrB80m4#<%`b)d$uWoTn zPTV;RvANyW$?Ij4+wOd7_DM23w%2O&H8|Q|m*P<_?f07=k8~cA?0ml7T~3;9y!SAl z74zCW#av(Eut^7pwDRMB$m>qJzNjy>3hI2<N3q$6dwocV-n70&27DaGu>?;+5D{Dz83s6BFe$=HXR;4ruq8uwrsU9 zircjwFLWg=CfC~{OupLd<=rm6wnxTJ;AE=gm%U4LNc{fz$=CjWpdN&~e2f{%gw<3H zsCb#Ajf0i+62BJ%O_{&;?c(Ha@?ec)yxrJ(d@JJJWz)4|MY+FSuDB*BusLu#@3-zZ z2Ll1Cztb-q7W^+4WT z%3aUS>zxs2Tf_I6T;z!EWxq1<+^+=eSN;LO{@jPVe!buL>e?%{JqIw_{?y?4-dGe# zoSirFAa``@+cV>{@=vbxldBoCzCC&->tVI^N8m63yFXxmI5?)p;(gFAcJrPC;6Wcl z?@YPU5A`eirmFaBMo*_Zv@N z07XE$zXtQUHfGLC9xxCA&_^`UOa|D^n+?sAp{FHduSh$+P1f$4%IlH$PTyY0#$J(( z-6DCrS-RGJ7U*rVf6)t9ZWcOr+a%T@HIPV5nx|(gPYX*VCgQ09VKRdvI$Ljcw%(D6 zYPDXqrLVDag&4aS5Mg_?4h|wZAgI4gMja5sf-G7QCQQs&;-JzYSULCs&6Z5H4A}W4 zE1gX!&fxRFP#!xh7y`Bk+^~5V#oX6`tVt^nR*lIJxQlrif7>iUn7JumO$WX*-)Ky1 zfN{%cmCzI_qAe;dAg*@dR|CisLwv{>FAqo{k&AgQUjq28sH3F2wwd;`K=+kU-qp|h}0@vEgLUl4-0x7P$k zTB|O+kXEKh2vL+tps72(VoUHF4+&S(+Ngof7d}Z0f5PWyji-*?mbTH6QV-T$YP5;j zLJAqBIvLitTa#!g3QVM$KDhnbSwP`n2G&My&A$XNnwD7-|JDcpJ&sWB=GJdKjr2F& z=vfpYk;wWF&OSIEdf3hTHdz$uZSr;{9`kZfx^kKr)FkI>1Y%(oM?s8CU0IOm7Bm6N z>NT)(e}@FUB<~3&U?E3=g&P_(BAShpM+?jhyhmzu7##;XqZqaHNm@4yxVoV}Gf6Pt zB0ewu;bE#H@4gK9u+N%tDVm@inEjL+(=!J25jG?Gk<<70+(s)w`#DgaeCGYbKkuaH zpO362Z-n}}_dG-eQLKQTX_v;btcM@@_0G)-e?+ZPVAh%`$g11`aYi)7J?VljfI%A-bdJqow-9QJ$(7QAM zQH#hg9~cNCk7^ag6Kl@}g5>xjoFMdd;W}83JLLT?l-6k=;Ns$!JYXJnZ&PsO=1t}P zf2L_KQy(3bj=V0pSikY`_6oP_b(Q3gUos)>80H3UxkFHWS^8z7``*&d5`sgr)7pvW zU_v2ICUzJdomt}(q$nY45Na1&;-h;Ee{KMRmP-Y`9EF&`h(&PeVeG0%&?WNc(v+LJ z&o3sDIXX3fEdTj7TwVK;cd73@CyB+4Fvjxem&i&|?4_+>o z#AA;)dPFE)>%VP4(eqdSqqn4Nx$ED$P0C$QgFEn4_L=Ef+S3QFWq|gj?RM<6e|s~X zpk(O<)CT6SsE7;%5k`XSsfe%{RH4O!L@US!bHyb=It`_=My{uGN?A4!FuQVl$Pu*e z8T@DotYp3*EJ5{)eGh}Qv;>0haLe>7)M2>S&0ImA-$D~x~iK6^s`hYtC+hrO+o{KKsNx`?hgBw=ARv1aN*-xf zJ!H+rS7?kx9gugLXoyW#IWcI!bbF*cH)~bX##O$(PzUpDZTg(*Sj5Yge{V>pn80YK zWz>M2h<)zz&|tn5s|SZuAFbTvlyubBD$O_v1_I{hprq;zYGzHQF)-1E`t#R&o+0gKPV(Y#jAfrd{d@v1_fVUf8nM=z`$tU;w<%n*Rmg* z@xjlx?xeqcxZgh8HxB>8-5<8+7oYyvvmVx=NaqU!k&Z%LLxfUXxbtN|#yU$(649M5 zqSPP8S653hSLqUi=nSShI!_B5r#Ir$uVZysEwS$DEVT)qzxDB4*E#E!B3Us%NYF9P zFi%&;d+y^bf6WPtf=J5@Q>X-3O_5Ln%DChm(;N;DcWr*!y~Rz-l!=b&y$SP2|56ZN z1~cJdsW&M{IY2&Kb>#*F{imG&?I%ka{*5OlkeIzp=gH-p zuT0%%o8B+~_VYe|58b$MHn8u;m`5gLNy=K)!Aimnf3Y zpKYaffKzIce6`jDCOoQ0D^!y=0q}XKVgGICkx2^V8Zh@s=kBDx>+%wt8BI;T)j*UQ z0v;siHGSwb(mMqc;LF=|a@h#g(+5*sXQ{Q)DMoA_d&RU`F&-02z5-IFHjqpJ?N$!br7B%&GB`UCk+gBLvKSO7AlJQEJ6kNf_T=j0dP>TmkP zm)~)Tj7Qvp{^n8Bm3~#0ZoD4vfBR#-Z&A$G2ehdp`R13xD)r&ztZ^9PxASTaN#>|19}SAAa-m<(Hq{CGy_cL5Yr0 ze_Cp+-meJ62nh89y2Olq1%ZoUB)1GmrodgQvt(sLN;J7)wO{T@%T&it)ER!ApWAP*XY-8;mrr^9cb>X+?5Lfg}&r-S!q!{79ArLF0C@DE*T zT5o3lLw9uPUop8CvOZpLI(pSa?bEOcfR(FdJ2x>T7y+HGJH9ixiXf~^t0RvHf0wm^ zS%COZm$p>H7I;h`sTcj+l}G(9y}Ta8jA=9?GhZ}Inv;WpsEgGm*HBhBvpUiIr~sbm z8VjCXoIae4AnCiRhj!*F>_l)>_`K9mP&&wOoA-h=Ao0oQ)&*B1ipLHMVH4ZFR~gfE zpB;?15@?^S*I@89*HUf|=-99|?Ix zcD|?eaAn2Lw0)k(R`iQ3ovsGu|nGuZ#n01Y=+mj)Rw*vEee`DrKsNq^8 zBGSyfb+=5>5JI!3?mIjebKsL#?5o@>wx?Uje0h+YzWQ2#qT;(eaFC>cooMD}@+f0^ z*FWE9H{Hv)x%Pq?L!#29ufgU`3_SaJ0{!TSIRFz%v_ouiCmjFpoo5?>SP zP3p_zYVicM*O!@;(`A0jVwJzGHK{X%&*F=t7em&NtKqY5avm2q?Hp7_b2AjZAo{CxfqkO&|r@A zbuE^H=TuznlU}|YTxJN$k;(XRFs@dZ>B^>1MqVUrsC0tfn2B38t0)s{asP#!FvwZODGbODE{#rL^~>NahY@yh6pl@HId7XynQ-yua<&Z(l3@ z@r!=w@ng$BJpS80^}m1d{D&47=@;ni_j8}EaE{2;!3RA?f3Ee4Mc1ICXIPbP&fVYo^C8Y10j?Ip}NJa^?A#{~U^*ohy+71SQ6=n}W7l7^_ zvv?oZc6$4b^N*hWga0Gt4;&Bwj(`8qDSzaytS`oo&N3<^ADM@8PgOQlHOFcV!gsMmo6PHP5e**K6&srxdTa>{{YwNW$=Cv8O zY^z#`hG}gHwTe-SXa-9SxeP(-X*@{`ocOp^6CJ5ryNfoauUs&4Ev-$iArcQ9xE!%M zA#L)6j1a=3jmD%66qI1P(=(T88Qff)+*c;l9utga%G=0>mLa&r^La39v_?4ch-4`b z5As+3f1r=OMpR3`~!KRWtHX1J84p05M>OMl-tR(`7KMlxe!v4yp_ z#lCt%yiOo5XFj6@1i^?$p%K6;d6PF}yudsSSVt4!Wv&}@T{}h;F{0rjJ?daC2}`Un zoEI9WQfGvS)4=H5R^~<_T{&1shTP@XVs40$e=fuK5irHm5XhLN#ypV+3XKL|TmvEV zdrabCf`r5a&40()f7toIY<&Me{=31MVv=sjdkX_=1@FW;o-5t^O$7J{=8qf3>x7YGOqXJ`-QHSap#?Uh6=7qhyVyjC!e_ z<(?LoFry~njOa#^K&gbfCiP@Ah#T^?3F$I5>bJ!b)974{w#uCF2U{VvY!}&pArF%>?c9Ie)}Ca@_HX(_c3B;}d^!aQctkc>j7oeeId$CVtM);7NYq3tnhWA&c{xNc%u`(l;&1(?#31mdMHZWid>lS7%4&F5*WSCu? zt%XEFQ2w&p1h#|MrF2C~ET$U4lp<4c1I&AStpX4&@AI4U-tR`=dH0`K!!6t&y)EtW z=nuUA#G1cx+D2Oy^egF4>`pZx#|I9fl(-01`OFn$&g@7V~PPS zZ>e8z<;0eH>LX^~A*inie)S27Il&VxwC;0QI?hn*_k{~{H$Y*yI)SmOj@W?rv|{1BSLhASQHW(j^y=WB=yQ& z>Xj&~rCvdwT+vUSedVkE64Y-&C)+jHtw(d%b#&ZAD`xbKTs2v|(OT^mEc(z#2ph%(F+jNa}M32Hh% z*W+Yey#{n>Qx-U|=onLe}UAsIQ^LanmN!2QW3DRL8D7$eV;4*Z?CM+}aQ^dUM*nL7?l;>qxa$7isEhvkq9hk& z4;zAp+*(1yf3*Ns1tKbWvDioZ+6dWxh2DyJ2R_AKX(e#ca51+_)}(kQT$jHc&27GS z1|tBgb7Q;lNqfJC0CR2nFFz;I29whwMNz2*KRb8vUv*Q^z7JZDlusEzJ2SYzQ-BRt zCBFnY0Yp&E!D8-HO7u;xk6ASG%Md*HiZm=>2%{8if96JjOFiLmhUsN6g{2MJT(jkS z_h|!1SRLh;DNlec)C?@{ID($%e-V$fd?X;~1U1mv*#ma4o6(Lu8gLtG z1aD|r48+nJZ z_OtaKkUgxnbOsRVStZB4bzm)Vxei7G1%mql76`}L7Gr@FkF6arsbUL^J#2KO?<2t@ zfpYP|DY$oP1T7j_m^wfuEGpx~m=UB!o}To-O95-7Co|CPobU!RjnbOucR==xe+U$; z1;Sf@Z6Ich^k5zHVjbvP>L7@QxqzVA(>;Q9@QZej2GXX1O0#E@MUj>e`e=YR4U`4T zT`n_(>}&}6wN?Xq(SXhyNN>&_EC)kBPp$NdCoo8I-1plM#MkScfVIbc=QB^7cO7Ob zFeS!zZgf(nzWK{>oPWxKh6Sf2f8q_0yTmh_0kPJ={iH6E0F~4M{-GJ{8x2YPa$|;I z9hZ`>Y;DBb2Hc8{bn-I>E%^6+HrSW_K;O?V9GwMpQISMGk#l$Uu!8k@J**suU?|sOYv{z;19ST418-oV1$W6aoHsBpSalYJcy`Z+e^`2XAO0bM z;0;3g_6{iNu!;Z`dG2Km(zOI*JCU$~hiNwwByXKq&s${pWE})TTSvtaj|AyjCyFyC z8h+RLO(yHf=u@xbt_5Uiah$g#v$O{EjNQSvhi3s~DIkw=!{*qE?3Mtn*d37G^MhMO zdfoydMa_0_c<2Mx`UO z=LN-60UNrffxdP2f508ULhGI_fThQJE~7Y1$y#W+#D~($JcH;}ZSF>A$-XkjAwY5SXfSOWM!1|G?Yb6nkn<}YhiISrsA(AM>#e+NQP9_49ETX6 zdWqk(ueXD)#kjA(IS#Qt^=d=4PQV%-4W2jDz`~Gkio2f1f7-|P0!kcs+BtbN8Z-@_ z-y1xa`m62rR|o%|&#;#*vD@2nN>WZ8ShCSl%4AL*Da*NF2HN(t8bOhSeBaYYqkr%q zS)ww=4Ul!Ckz2zeq(^b$>gAqR64q_Nft5Y`{|e6S6u(c5{49e=U{{QNWLGG#Cjl7kq`0`S0hC1rUj^k?=b4)ZgOKXuP4C;C}b-?L-4e zleA(ZS9SI<>2)YdDvDL^&j6(CLLOUreKo0)N`0M3UrO3hs@UBU2-1Bf={lA6M8CtP zx1m_t#Q4)@Ph23jz>0g{CiRncz*n@E&zt?TO~jote=YpZlfH#P6}!~{;RYU>-C>bP zA4A&JIDaDyIz`Vn!ijEF75>=GtF+Csy-7!;@XuJFaw*{auXqeg+x+9l5H><3oq^6q zdMWXXh?g0C8TB@;0qbAjo)lF28aOHZGf|ChDRAi-QU>H$KI70O{~FR?{8u!P`UHZe zXkaUKe><{gpi6x%{fd7zZt3qOA3uk0@lyJEd@J$dHwBH$zs4hV(F)7&@OMJ*JWP9g z7@aAeeACmNOTJkJf513kOWjQ|GN^Lw$CiZr(P(NTd9^`l!S0}0d%#xop;ZGYT4R;3 zuq$zf|E!}9=+<7^tRL85u%; z$N!oP`?G%|D^fl>Z0Bd`hx=M$u%dxjOV+&)N!W<&`?4DUivYBbhH#+C(EK~wDRWck=N_ZGAepKUDrht=D)@l z=AFROe;|tF$Ccwy0FuaS)?Y{cweL6-ecuoI>(Bl01E0$4=mXzh#~=9qdi{a#f3G)= zBl7@Alh0n`?%l^2UGr%4vS|bXAg+YlIgX2@|JuWK5)RM@PBe|o6GT1Sp@###huh@c z8yNY9n>P&y@TCuDfV@VZYtl5FeBY`Kl4W)IOQ|{sE2!g=Kl>R{W4A5|Iiyk z`fn@z6il`?iKPA9o*^XR)4Z3_zD+}@gnvta_%}S0KHo3+L+}kB{(>L%{Rho&-C_2; zy|8BpWSe8p*UG&iM8Z!#@K3>V30uqlf5Xc0hreLw{aAj%Dze|NKV{}5fBkq(y*EVq zl4#KPoj(PWbfWNqpCK$^x%BD7A6Sh0zJI~GavZ;4vwqC;Kk$h4zVGfEFI4I$82sQR z>pt)woM77b{R_5~egA^Z<(R`?u#V$MizMUDT%hdPA)uNc^McA4FRrjX1G?RVX6Z=% z!*_gu%vHHruyzQ7WU_7Gf0xY@4Fi+ism$lyKG&{`K(4qVfE4>pI7K+WwE!7@a<9*Q z;!c(;{N(7TJdxk(0SQ_Q3>w9`e5sv08E9bmqzO36bv=0khg{FUVg0Q!BNs?Af9=$i z10X~&O@{ub9muU&tL-{IzctQ~mfuu3ed!GZUc)H(j;n7W0iXQaf1?_p1kAvQV9RW| zUJUn#HQWb-nULQX_J0?;ew7hG13v#< zW(fBuH2jqL-}M@^I!Kb=Wey4qI52@;U;-Q&>cA*rD^Lw4lgY~(3>Rz$T;`8m*-|Uz z_m-W+n#(}QyrvJRe*)8m9YYr|RG@%D*>1;BCCiCG6<{InTc0vKW!L``&uu|te?C{0 z&kZEJL1526`P4+Nr!p998G3+O%KA*c&)+hXfIgSc%fJwyahenoh6ET~mJ9hk=U$*j zw)Z)D$#EX!I$vWhcQ(C9x>EV6C|2coIB2z;k0YNS%lYdZe>MVX{dfRf!ANDhOwMoD zmtAlqeT=!RFM%}j`H}2*4d?{KmF4Ce51s5M1v$S`;_LC}_+l>cO(7Ms-}z_$TK^tN z+G;s2MdESWKR=T5czw_F`3t{hBJuc?OyF>M11|p;`JksSk@NYCEB%a1cO_k}^8TKq zLjF2^{=58?f3f=MUlVW;Xbffw4o43;|3g3jTH-~OaQhxFDXchL|Nr2he=)AR#A71y zX#5l2k$huP*b4ZP_;dhl*>1;>BHK|w!8d+-`W+-44LRNqpY1tPmm*bVsF-k~i&g{!&KYocX zlS#g=WdD&s7LcC)D@l*7#Ls_zF8OD_Js;?agzvuf$Lpg%pzY<1edZH?&+oq)9|bv& zMB?Qt>l21@`Pes)q%m`pzZ`903& zPkwX+e_8=aAcPP99psnfS`ChXmxNEmhdI!TbB0#&tLil&mk1Zka+s@b=5$1 ze|CcVFAfNre~!q3-fonDBd|DJ0CUOzH^4IO0}nY~M@YFPvcI>K2amxBv{ZfvJdxvF z3YdeYAmE4L{#*)|$iWb~K576k|3824O7OTpe~*vudi>x1_y6brk~n!U3L7Ai|nTQ+!4o|XOI6-73IG*_w+yhd4|&H z&qI{lUxz5&=YJidgl?C=UZMRfOr52v;tk#kmoRQOBCj``j3Aew-jkwE$ ze{{ddD?Qtz7Kq6=<{A&Wxp{FpTb9f1qN90rE>Nx;4w|m{VK*Iil}Ylux^CNQ)_CsM zcz<@i;b~KM*ZXa_xW~K0OtJLW;5Dg`2)-zD&3vG{&9Rw3xDeS_HN0l>J=~1FPm6Fk zjt|d!s|2GbjmFD6x)F4A+41Tq3&;lef9}}B^JW~)GQ7XtMvH2sl=^v0CQjT1*tT{ zo@leJc7wpQ{aMv2i`i@G2h&Bpe}9YU`j{n~1ojp;1VGZ3Ll%KnHhVon(@DcKe>EB% zU(G2R9IdmP@_YfQGb`g1nCqPu(`t+TaAwW5bU04U!OXsuv*#rgPKlS#{@j|P*~u|UPm=%UPV=hxwmoGj}!!|JGIdT}@;)7vrKC5QCxd0$uVf8ot-u{|Ef zvo0c{yDq0wD>CL{pzQ|xEVEyS$$KGO0s8T&-)!BTqRXC{b?ZOP+9b*B)j{%5A2rgJ z?z##e;q0|LyD$dA<95XJ$K(uP+*U3J&PIDp!}Tf&Oee4Qyc~4nd|Tff->N#7@1JUj z;frq9Il3$-Iz4E{IHd%Yqk%qbR>2y!-E?)V$TSyPlkt@9SkZ`E zt~9}ZcwoW%HpkszqM+ySJl=hweNk#lx0tTq$5@%kRUc=I@0O$?uk+~Zi3Pw);wod&8Mx{}!18c@e=B)Cv7fuJqgLBq zcMG*Dmb;j2b`)?CV9F0I$(w8BdOGqLv1+^tp34VbpyTf0l?hbEk{`pAxY0|w-C|33 zjxTiz8|{4tV_%E&!>yhPSo-6As@CRVLmHpwOROAoHMmR`hP#~t)<`?% zb4Z%^0IzYXwNJqHf0CmlSIFxT)SA9xyZvgvBKA3`S(0wI-c@pMf5)bEg`rCJm+dkzj&x|- zXUNn=8{NWfrl}cbYp)vO>2;H$-3xt*z4%(3Qqd%tVkeUUF7?0#cyzaN4IXb3y0P|& zi;HbrG}!^IH5+L>Enl{3S%|9>ZF?VkPWC)GZuNDCW7vwKe5nqY_t(jFc6j8(`O)Jh zUbHblRZJj`e^&=Z(2AkW{(WQPih7Uxfgcl81%pZC7PK5}EOd8b;7g1l-UT%t=3&+i zXj_qhfZPkNg3`#T1vkSH5fqjVvkJ4FS=TyCZpbh0#aEI&=lZ&uB?lrV!anH9*X;@0 z)y`ke?yfp~9ABpgn=ZMam+OKr8xt+)-b&8L+cGn_e=^+Jna)~eH+`543B}tjb0@n@ z>Qk^~A{N5=BAFepuK6TPqGD~|HvY^>v;do{>(S+Q{g|zT^-!Fb%y@l`?7Aed9-F6N z*{$!XGox|l;5M2v-JP;l(+m%lUVy=B{(HHctO2M4tl7AXRzU52pAboVVb**KYZ!k%mn z!-X?<_V3$tw?Xd}0Trg(ql(H*)o;7g5*Zx?$z^eKEYfcL*;Os*&6?}2$LeVtTX8t5 zs_3x&Qdtw4A#Gkp+xVj~-Y)a(?w!%&v}p3Fe|{JY^T${E;>&E`mP&R>-s1q()MN?Beuh;_r9*vLNuMD5_ zf9GMHI}>ugyq>d`hf#QVlhgG0ZV!dCHqxcLjJa`{_B!-?mOcU-nb4#nUm(a5Fby{BIXqrDG;%3S_e%_+!e?CH2 zHFmSOjmN{l#TUMx+udktW}}(O_NStLyjo>BR@WLw&#S_4cfgA&O>h+DJKwv4emUrP zY&F$9A+YT%Ti8B(jJhmwhk=N;etY*sz4zwV*SOOmYx8QfSjDDbxELPx9+^Bnck=Y` zfE!bVYr5`%>5Twe@wpore@p7Z1%`y|UH$e6@FHjh)Nli%bp!HGED(fB4eFCgZo* zTNLliXY=KveAO1Yyq*C%DB0PM(p%=v*e;UCns3Ya0;jL>u_~R#aymW;?pA7=DEQ0R zEf}4?jJj6r3WrT=Tu1X$fT~S;_mXsz<8#JJ(~oW|9`EJHuVToYuuwTYOM-S0{pfVSLJ)~)cFe;W+CbeFEIV3LQ=A$fahx)f%`vU;A~JmZ$Jak{AT@||b8 z)r?f=_`p9OcsHNizXt2_qLQ}%fwaisX;UKI9#7c>y5kW?#4VxlQMc6c>P6b zrzQHWs?qiCN|4m8ZmJdV!s=%sF7Mg6KQ>@Hv2U^vzqieb05*m|xMzzV zjhw`Id}U6_Ms#Splcaen7po@}dbZ^P**8SuSGjQ#U8rEst;@lEb+R0)+pAe5^a=AC z8Y7RkT3$A0BRw0!%pdeGuo9-|ZU%TO3mGz%)p>9MTuJSIe>be25&|q3&jd0DV&+^l zR#l-YgF9!B%vrb1+Dr1PnIzR_nKcKeGkP@2I+-GZGI!h72u~LIyfs6ZWb1+A7n#Hp zwdGhFdT*vj*??oDQ;#A6ag+4JQcLAS0)FF{g5{~Q#s{8<#yRqN_oRkg@sbOsAQZ-h zl&OIC(c1Dpewf$G0(@Kp$%vzPnJAME$1 zefRW%?BU@eI1l}Q&@^-=%p;fYegZZcG;DM znT}r1QwwdbHE$5K`7jJ{7`gZjbt&I0c1*mpF>6i*eLow>*EgbQoxlO0-AS*@ z8slDF8^#Zxq&KL&ZY*o5Srrnykh~IC)6$NIr1l`#0|Q16qD~E+fUXWzpH~}@-dEWt zfuP<$RM1{$;B9_%dgYH1VDXp*a6qPmUHZV9^X2q(G_Py(jUl~th@!~7__c5 zDR`fje-j{!IGytZo?N93Rh;0Al3v5}V~F)SphbFhw|h3~dX z06a=VZO0jaHwlQ-gW^0FW?PIFP3=RE+PMm}X*WwFb&_v-N_L}lG&fME`TI!&w2&E? zh~%u5s zFNCatWOyLjNJY`ZSYY&`={D+rmj0CD&;|q{q^eNooB8@M{16lK>90wi;kcMlSXT*s zzM8c5avBoFnXiZ#N7V2T^V|E~KwGr%Mt~$t&{)booe@kD-{vMG1J6m8`818%1Y$?8 ze?*5{{2O}s1L<84Q~}~*9)a_5$s2h`2Fx4x!DWoSsfB7&Qh7Kin@QA5@(CoyZgZiJ zJZ|$x{|NC|E}CY`ci))&xiIz_Z9fK}bi_AYP3m5+)bMy}3~ z(GcUNkY14Fr|l2#zQnp8MBPM=++fMLe?X!~(Luz6a1lX`4~glM?5vNPkGN6r$iNH% zd8I8xWWv1@R5HOILcOlw!I$+buS7WJ0mF=4(qXrhV_Pj_0**xNa-tDMpsq~bBJg1j zZUQnsqaWOcBOo`wpI}b@IaAZo>~#@JLQgWNr`JvCo@GjL{o5R~27oE5GA7>4e`AYn z+?y^bqT!LHMw5gxrOet`N63fbD8;`?*J-B9%oN3-Kt5JS;Dh+rK^SC?5yvspL}a0P%YUt^5a3I)fp| zpf5jt2oBI)MuNP_5}9P}w%auM>lNacS*b8HFuU!c6|FYc{euzv@f9IlMWyjd>u znq=PbY@g;NKHHp3y##-@&~yULvks_g^VLXFQ=jJmO9jZqsqMYW{o*kS zy(BI0F%L({u*T}}28tDu2|}fiOxDK)_Gy;ezcbga?-f7O2O55*hT`@%G%1zY7L`(8zbZOBtq$F0UIjEzTU>VUQr^371q zrKj!{2M|83i>6pfodNZh_Hwa0_)QL)pIyNUn9+s`W|aBtF`J>t%W!AE%%?JqDyh1% z_u%MYdWaE$>y3fy4iwD%A4UNLPr;azt+>JDrXNx|zRJMLLXr)-N>I4hniE7S(j2Ix$@S~*7a0DMd=R2=+rQ^@&D|-)N zBov_(hL|Ntw$>C&{lSRq?^}__2f1H5GQ*1BT!hX5#riMH`Q@q62!C07C z%cnZ+G3Ar%DCAC7I;vg@+d2B$z+=XFw{RDlkHbt@%Gi`Ivd!K7>jjNM=0&6p9=&`wi{%WtAiBDW;Xz5YNtRHRSC&{jhZ;<}ee^!Kc4&D^-Zn8@<>Qk#&nyCmRd-`HJKx~y;4HQ@%z--@$epD zs`Prd%BL-UTX$4A>zKcoWE(>srNwe9+vn3{a#5qp8D8Y zZ1P$SLxo7yvx(aaRtXP1_(k*|x`JqnVMtzV+R|AAq{+N9O7y`x_bBl}!V)J2);wgp z^mHmNfz>5EA&|gY41&ECDH>G6zQaheU7BBu^>Y&DiegFD7ww^0G%Zz|?Itqde|Tif zTyG0ecWTpV=Rt=X9Pm+CKf*8k9eu8LgHVX&z5`A-1<)i8GDddKQJnM)O*cVjC)h~Hl zmM+%=}x0`f_!%eHRf8!imxFkBWn9~ucO7*2V z09%sjhKs8a)IX5cNoshEAmKu>DvC3Od0}CoUuWXOu?cHc;LPf1#m8h#4>1 zc{cgpS}eXX>PCB?udvc)kK|!Cd>yp5JIRN$dmj08IK^S+DzMunm<6+Nv*C-}k9Y1o z{nTY*4evNtT=HzwF9p^zod--9IuAT7tDmMKd5TXYd(U{j05f&F2JSMX;srW|TUr2o z75g*?qs>eAvW6qbSCxAlf92Ot81xovi)=<+CXuuJ>#l*FT!RF%RnB6<2)V~LSQ1d= zp@h5?3lB@ehe+DKWx7uUx~aBo&GQ>u*}3Y%Zui5~%R>5PS~B0hV`P^?ihYEmG4a)i z{#m=7`6fobWk0h;CXqm)o}5txl{19A&Va!$+B4$@K+13zL}2vNfAye=;#-j@snX1sSPrb=W_(6&Y($f>YY{L8(c44b z_&ZlsysRcebQOKVfA+_yv;pouI;cB`V?&Mgf&Z#{mHv2_A5cYnEm#^~b7(M)JEK9E zCWvdW2j5OpE6z+wC5m!p-oN2h_kQRdnQ6!uN)bXc^ezQ`eTsia{`6%YM~}SVR1_k^ z7eEi?DCbEazJ+er05d_Ul4@!aafdFaWHHhgn@0UqaKN6Ke;{@YClM1-b#U8GngBI= zD(D>m_A4JtNW03_rL2J$cq^+K_QbYf8Y44FKRP7FhwqJgMpbX~xsPIZC3X?R12?58 z?HcjLhuS=#!}{z(Hpgc;6;Gu%x4L-3GY2s>(VOvv&Pyv7Nfn{HxaZIk_={5N~ zw3XmnxBJ+ie^>?T(wG$rNqEkrJW))5{+7KMfbQ-DEdu_|)6+wemQQ}K?_X=lZ|%QH zbijVRIdUpvn36;o))+uEZ@%c5fN%$d-C^sV0KNkU`uLt7`jUoXHkQ}bfZ})5C=vso zJrjJ2Bd#@pD9#)I5bT{cq(PYxzf2=l8Xc2u zR(!y{q$Z~X#$Yfs-@c7F&3b>Vg*Bq~8WV{32qUx_iT^Q24b~9Vk0m{-tNod3zcFI{ z&_c}>fBP>C3Ww`B$NZtjhvE`-xfJhFwr=$ZJdq=)@vB}jwIbqC<#Paq6+>En!Sln5 zj#{frlZkmiqsz;icGbhsChWV!8?J%c0=i;X`_!walc~*_r%WVHXB?hMXLeV9?Lk3~ zf&h`aOSi4Q>b~Hxf?*#Om-5qPj}#Iw73Epef8NGP`m;eC+8obDB`W56_1d%NZ1$(N z-J)>mhV#f<_3Rl3zUju=l*HUf;{DnDF&PxEC4h?Jx7| zfALQ%Z!EmT8w7))!x4V~?ts&jj7|J4R>-i{r^X%agNCGgZ)fIbypj9s_UEo4~&^bhfJ$O@nnR|kBNBV%6 zSlVlt2zruv=O;<$3!9#pYQHwmURV9Kf9opw3uI2!T@iE3o1Mg5qRB*L2|B$vQ(*qI z`nH+ctKE@L``!5ztC#vHhovt5^LK89$0Pcp5LHF<(2}MaRt5ySJWaZIZ4Z_l@VC^~ zmk4v(1>FInb(ccq=EVSz5t0#ab>fI1j&**Kvbg0aGp_OrO4kAosYB5iK7zM7f3fSt ztj6Lgs=25UMS8p89T31WYT7>#nj1yOQa7L7+#XpuvJIb&B)rk4nlloN$G5V~&vFC% zD`YIQSR6aSSP9<}O^Q|23aw9%^6w7*C1zxJETr-eEDHL6kfpy^Xv|1D zlB|&bP^z$K^WVJJKZj0=Jb3zd_=obc(%+2J{Fn)$edaV0|F~iQpYi#Bf2M*YXv$~E zKZ=#VQ}{=jBL7ijoFhMu(Pa`0Re#e({>dpo{=TmTBxcTX&k^&_rsC26ku>o~l10H$ z|HglnD-5d#|NVRaC|HVo6pz$l80jDHi}ZJD!Y)Nq%cysbnt%Bc<@g_lRfb__t5`o9 z{Zp{~y*>WPW9a|7YUMvlf0oKz%7_UQiMjw%K&`*BZ||*h+$kh)d665OsOy6BcnK6I zeR-*YhXBa)f4n?!n1n~e>L}T=kGpD%ruJ7zvN`4EfMzVr<#MhMpy43qES$z4MWV>V z0VDwCvw}in;jK8$>vd;B@-ydHOA-_*Ms9&1=8GdXG|J&vl1liopU!qTH_qtteu&q*XP^m7xHvK^D1kMSgd3WECDrnf z(XLDJD=FIOxX%^k?bK!$dw*1V9a)Cm5wti!3ng73-(3*lObKO-tQDAM2vJI$ZUL0^ z`EX5)njGKcE`~1@+a+>m;TLKkUWxZzG|%)X_gl87oNW(=Vr$z*F880>V(5zFD|eFSiD21Cq>K} zhKtG=MuG6Nf;c2cTr|l7DnmF5Q-ZxokP^||+ayopDXC39zLkPk2rSMmj9t7O){|mV zOw{Cg6JN{X91q<9kbj{~)Xo7YetrbY%G|u#iUP8^22e0*s{qY2IgE`?*mlIOuNg6a zC4Zf7`JimgIHE97jhsv@5rt2B^von(=<2r{G3Nl~lrqPwOZPL+Nw<((t63(p<7WZ_ z)sW^zr3bUC*XIjCR4#40LE(@HHw?sm(Ys%*p%geNy83x)S$}!T7C}%%eHo_)31yDC z9L1|tnKeH0!^#W&b;=;iBPL6sh!GIP2I<=jar{cn4d)GN(R(!O~BL?Rq=?A7?#UXQu8wTRch= z?%UI=>RaL5<)?SnlXf+<%Md(PsA1CtMlCi8kx0dlh<2#3Sktean`g&-a(b4a*fJb{x2zk%?Hp(==4cvpJLqG@OvpCjMg1&cr#D~ zD`0QG7`UN9245Ae8$lko4e#wVy@5{MmIW;|+GV(@jhBFv+>sh-yYD`CGsJ+HbLm|P zp(*YcEPqo3Zd=R_2R9xJ!~qq9zv<6nXc}Y~uF^2AlF(U;-Vn$ z$7IOTz%&TJhu7QU`-;~#XORmZ97&u-Rfd6;dQf-YiADS z&`pg%TX-0zUSQqnuf%A$l#K7Zip?Z};H!RE;P>>06YwMMp{TZ8;@pW zy7_uq=mhpGV&M<8$l3^_6aaCLzrzFDR##}06I4GGHL8+`0yTLaui?cKTMt5vga`+O zU1uzm7$0P5*SKE64ig$fbUJ3H7<26%gooYFqvg$wMEpDyY3C!xh8kQk`$e9z>VHGn z)s#6eO0jH4Vq42-Wr}XD-w7_!!{T!4@)v~Ne1Ke>-E43m4PXqF;lN9y+X@Eoq2xbUrF)39UyL)L%Bz*yrXH`u?KE^9cLc+tbZzLFUt~1 zf{=2{`(=f2%CCG3DBbr>E!f`d0*e9LWC(>$j|kvSn!5PSr3u;oN^A_P>TLpM>PPO8c`Gv~qAjB)yDnH>BQ63&t$D$Q!$6tgp^wk63w8Vw31a|LQ4ITmlkB zM5`A443zhQ3$Tp)oY*$*k(YAdSowr6@e2hNNitvKF^0X5IIa+2aYu?tlsu_>n}*XHD8(-3 zLU9OqMx867dzqJTu>334s@&_83*v<5)(tCRlS$VNLhd4-p??uecr1@ZKd+kTXRx1M z*l&((w-qn1vF2guO|?heJ4sU;X^n+PK)n8&?D}m5Pv9R^k@rj)+!Eklc6(G3n13k>P26PQR%URX#ccJ*jpsyT!Gq81*e(6@L`1%)8eZ+@A!ADR+kQ!o zdS6i7i5tu>BfYG;hQapKTjgLckqx6le~_ISBcCxdj*9{P z)FWM=BQh)JVkx(SsC;pQPR)P2LmR2HrT3UNlKgVE>woceDZsf(T^T|Su-TqNOd^db zshbh!{CEYPQ6GnpA@crc)B3~>zU%Yi?_Xq9AeQE0n@kyM=Z)zm_2dW7&y{QW1AAA& zVDm;4`u4kzuf{+n$ zN0i76q^IvRAy{lP;Mrl$SF)U)tm2ulgp{^^Pe3h!a7o6f>nL*#33SNoxYtiOH)Ktwaq+)6v=&5^uqn`Ml zJbz5v%h0()J11H%YAt-|WO(8)4YW7^@mrm@W#&fANMY7ZpHFH(y-8;Mkh8pUsm_v- zDVgU30UVV2!xtbCRve^=Z1r$1hFe6W-$;^aYET4HS)49njkpsW&dZd@E*VNX2cy<0 zHEuoJ)7!0c4~Q>qfHdLLej^z)ZU>NLe18(JER(*kML>XD*k#1;MJMF?$N>zj?6%wH z{D;K8m<7_$g-&AlIB#6MRZ1rDM@v{|o2s2c27xEijQ-*g>4BB;>F+QLjx{G)%I-y3 zPF}jejD_ z%hzE=u-d_D)b3+dlvhy-6V;uDk24X)=2GR}a%1kz##`M19ULn1YYHqE!?Il~x#TKT zIlmuCGiclH2b7m2p)zp>QAMOmYVNb}OrpuORn;%*e`ixy_E^T+i^zdz7|PD?u^lr_ z^O_l6mxUv8_TN(76VKDWlomyR!G9gstPe4c?g<0otG3@GfB_z*!`XuI0T6aNMzh%Y zU*8FRfa)BY#uS$2T#k^0yFRjyDQgP!FDvR52(%c)e^gEka-lZ-HibrgCTL{XN&6WJ z`j;{>^wm}oQRksRW`C%P4jbe{ z>L{pfIDbC7^E&mGD9%8;v2Q?H;js;94bO|d$qtd&ORXI7;#}M z9OcA!5>Wd@WKkP*E~ZPHQ7Ia3AlB!}Z>#tfE+qh-FUibMe?(W;jE$aKe)~#*pq1b* zhWB$sJ|h;S;2X+YD+_-CUzZaz@>JiON$$G{i(Ci@)ct3QCNp9D)$A(uGy!E`L`W@NtII zu)6pF45FD{^E})1h@fuo7UKT_8HJ0Ba%Sz2}uu$Axts0{jK1u7d2n^OLPL} zxny@WGdxP`bg-qfWq$$@dVx{(N6Gt!=wG(UmQ0YO(q+BgJs)3uv-Y=Q?i{qIWTRZa zJQ{NfKVGTdUH(qW>DzR8Oy70mD$Sge%uyJWp=LJp$45WJckfQtY>~^azD22b*TK!r z%0W-@4W3%%QY?vHbbY{qoom())U9iK*cV987<_JV8&|=nX@597_2AA$W{Z=#W!9iU zPHrRIA`mN{NN6TN(E@K+GbZ&&lRyH+I(fyD&*&!e=l@Zb8w+8EZC2MP``#|)d?P|o zGo^<%DkMWS3>V0!#~Sup@^@k8F+_DXKN-BjCaIqO0;{v8v0I^0clCW>MDlueToQ~T z*@MS??#<1WuYV`tXJR>CncY| ziO7D$+=^=b`bDc#b}!N(h#Agc2d7GMWGqQcH!R{mL4WZ@_p5~>fFoZS?=|Ma?n9q* zfo)CFEu;RPRjC8g(-zV+6m!N&HIeMw4by{P^#ewMHvbG5CrMVMau}MDuQlg1Pln*A zLEzd-@opMu`rOTLbe3TO4o1Uo1P*Xo^T%O;NHpKsWsA5r8LevO=r(xz(`hr4W8wn9 z7}xr3l7F1&w*1p}K`;Y)cxC+vxLi$pO^K|}AwScbGY+-ZJr_NC-j{VWjfi)!xg|r$ zWE+j!Ncd9u({&^1ntu|u?EX0KM!^4=oMVNYY@Oy|+Lt-F zhUF#3FHVYX6`7l(UiWp&YfVuu5=do5%?DYsaNg9!kkM??zu@3iz6>}7P=-4YO6#{gJEn6B=>EtL2Q!?zdIcFzo(M$^W zD}S_!*(;6=425}7UN8PT(ppDs3^)IOBdxKEepp%H|A|uhpOMybpYh2&F|)vmMhT*a zxxXdh*{Sj&`Zh8LZIuy2Pv&k41aiVCZgWn zg-AC4%r8OMUm`IP7=nsmL@!kXeSOmjHw)b=FcqVvC3`|lWiEXp>DkS9Y?DT8lZ|@j zP&@+}b}OaSE%C~0cnr(ViDY8Bp+7hW81kZzqro?~#xr{&=2%V%&f|R=q=C$BGJo}R zzg`;7rph{r#K@q<(Enr${5u}o{Kf4mY{LHLry1v%WrX%)I8pIuq3uSQ1wpzQI!mEz z@Bt|sBrtZJ9fl!5L*L^*+r&Jsj>F(vWIe+w6t#>Qi+(ecN`q)fe@F*G>zI5s*FNRQA^jDPp$Ck7II6A>E>>yoN+_RAHxjYNPidUx_#e- znHaR}-ScN23G9>8jc-xx6h1QZAVTRJEK(#yWePpa)zE+SJ>4{7kGW6bBgvR&o{DRq zKTkiDpX0D9{)0k4wbt{tinQMMs?-*{W%9ytu+xg`x)>k&8t5!-9RuR?QlJ0z! z6DpLbB&u}!O!CX9wh93{8Go?4$HldKWgi(t2qNg0w4)K}w`2kqa`{$Q5neSP$hNGm0CL+%&) zkY=EyuwgNCJ^e59D{Bm~k*n`UJW9wpGt>a{EZCSNgcRy7Yr*SXXMenNDrXrdEJ&|s zwYlLj*1UD+OVsjF)&~xzM095#MxTDpCQgK_g_CyJFd|&5W~FW(U*pQ9+>==41>q^I zS2LpS?0fGMUoLI|mFrj`EJfa1gCAGBSSq5cu`ip)N$%DeY5Xf%0-0|}ex3+tPA;i; zNQM-JXf2CE2)V4x^?%}mXd76k;fFPlyb|JML9ql0vixpA&ky%9i5C1%_ysop7Sda7 z_HogZIcTW}Oxn5;t>9Y=ZR2CHu&Jc~$b<=O4KCqduY(fdoaWfL+`#@8NS<{hGj|iW&D9VT_L~fC7aZeYE<`J zE8#QH;HtkWD2LQd&>>W1>L9gqn5vK(nD;r<_@a{c_glIEg6u{{pG1b8p(l$TBdKqm zc-E5L3H&_+%YV7=r&FOi>-ZS3)8xf>ggMqq+;W?|KUJnqxmD<4s<~(4R7$sL0T5eD z_=u=}Q%aW#MW#-vSwkpcsl}W2k2Rnnld#2$zWXzP9LPmJWWtqQG<-ueF zom;9AKb}t?N_APlDm%rsE~sM)@mLY?y&i|wI=$Cw!G8lVB=il#QM*f0^9bKvB!jED zC?q`Hg4*BNIFk5b3P>k@b(@f*EWeA?SCOo~gi||t?p_@EfYY&Zv^%AYtMJh zjGLz#`{0~>tSv*E!(?jNARMrUzO^!5r+gppLKYPSD<9`p_9ym7Z2X~_ zaIP+qv{jgCu-^b(_slUy%=2A!9$Mw}{x-zFuI4?GLeD#$s^98%MEeQxM36nM_NXE9 z9SNTd?}=+g#6~mB_eGaX}I~P%<3Q zNm&4A0O!z6ud}knJbQIp+wQ7p5qVY_i2`fU37d@c4Vv)aybq+GHY)TT>2q4LxWfgP z^M8}=lKRTf7`pj<{5OI!0tiaeh%AjW5pNOdu#3m18Hv(QBZbW``5ox_K-zrYPkrJK z^Y+eeR+kdM$VP(k6Jt&S&!`K^ffmCaO$tE|SW&KW^=lL&n^1By5)%42uuUF&s-|f8 z=O(1cxDH?@e#s2R=!#LJpQJew=K1C4wSRO7A8L~}`Ur~N6WQ3>oHdTKmR-b&jmj+w zqZt|#8Bm-okJPg)^QOx!x&hBg(3+)T@>@i=78m24pRZKZaW%fLv{LbGjgELfhm?MT z&KuS)Y0l29eqMIHll`)SA^5Uw>P#$;?B_vGz8MIzJ-W-QNEd&bE2s2284o%e8Gmm3 zC)f6|ZoZv4b)c3s2+@ZvQo8b}j<7v{=?@nAda@>y-w#K3dt7F(xfp2v(_q+=#fRq+ zqG8E8GrL?r5&H-DCFe>i;2V!zH0jB!)ZvOcB!@EnEH~gf*f_1K2@IKZ4(AEF~>>v-ItMz9Dm7zfOeyw zbeuJa(a<%h0i4K{Nz`XB^K`0q9N<`K_io;QUjN-p>NO@8-f6sGHXZw#V(>hZF``JZ z$M4_k*O6^7$(fUp9TUlKSzU=4UlP8eYgo%=5#cs!=8+ZYZluD!xnda;pY1|LdLwr% zB*SWQKKT_PF)DkK27FRC_kT~H#pv}Etm#H+h9^ry=O`W1AAZu#ZO{~uKklf@pi@%k z`vQam4{qQ^$h$0I;Prt>3~6AOZ!hiXXi5xpHw3h8-HNvJf3ubJPpg=q7(8=5w!%~Uru#cyQ9r%Q`xPt2r|;u!`Vx{p11bbkw~*H6#-@x&C6 zoL+vLhzQK%>J~0BhTMW$SInnf?rQ~eKTF2+Zg!#YvXuSy>J4)z@-zW991B09{u%g>-Udc9X7%aaVe<<^gh|g%v+OGw>ECZPcBT`XeEM_d*_?G z`l3)HuT5&ZlE42;ZU`cixPPG7edQ%q-iCFcj?mufKI{y}S$|2f>l{=};YWi!L|yqq z*GAlV5f^f4tcl^;huIXr4+=rsXEcx@GY}J(jY9nsaVCS{&Dj5&1^hoyz~lHY67WBe zHtR1Njk(#zhmFhl1$37Fk#grhWLqNrOZNSdd45OX@6i$0O zu~Qx?CxGq@!++j?SfRyB|3UKoTif3MYJ%D+mxmGQXHWiT1~t7R9F427w4tk6$48tz}^mn9xNxeTZ z?~gu=|LM=3NdymRRtbL&ai3V!$xU z5j>*Z7To`m!T-069Q{WC|DU41rU*s$PZ0O7Ao0IsqyJx-@fY;!{h!kDWKg>T{%gZG zN@#oqYJWUf%{w#Cbia^uoWWO*Q*auYX%xkH3Xi!Vk#{grX!eeO{(?~NMS22H6b(GIE1}l7ySNk#I6@LjWCYi#$O29ad@UJ`*1c^pVm|b8> z=TD-y*MAZ{#`_;~wtobN|H^LvmstG2WWx#iKO}{)`8oeX9`}!A?>}Uwi7fux?%#F! zUozwWkjOQ~p8$DM|B;;jBcJ|Pdg}kDY}e@6MtFFavUILde0Q(dC!oZ~`HV+!4KA_7 zH-E8s53@O@VYv=V40it$vwPA_dGeih?ET}pMsfICQDYQRq)3UGTT#Uq{}UIYF0&_# zB>1v$zkH(zmRrbtw=jNh%&e;%y1N;D4A~G1&i?w|8q+R#*Q8zf0eBSzV%{;(y=l zBm@XY!%?{D64Y=|QBebS^>^PD9pfEit~uA*y=zzXxEVkmLh_vR1o@p90sL~tO!H8Q zEmTDZx8geV&Cv6eGBAh>Uc^HXH)_>BtXgp}P$a!<)i>_fdVB_Dgg6Cr0p_RC^nWkl z-XDH`1HXRB;|I9=5BT>VP-DU4Z_wF~?M%TQuQk=m0!*GE#OFeet>Ns<21-YdoxnWv6$f@BUz{6BC1vue1z~RgJ22Gu~C^7~3Hz>D!+e>@xq&-~S zF$sB?*K(DHlo}Y)4Ds@yQ%40%0|Au;AfRnibB|45QmImhOg`>)lo&LEQ-98MK&cLz zxP=^t#%n$&)ydxK#RgJg%BiiE4uLS_fI1MA1tvp_gzoBrwuv}dZh#l2(bpySvF27l7rEdkQaQk7Kg<<2a7YJ$1Dj`0-LB*GB9&do$i%cv`u zcC|1aY<1^BjxQ>ebCb#$;)SJh`R+qjI8fRG&Mmc+RY0XfJdo6`FCK=njbMgjT*zk7 zH~uF&2ICP>*8+G{a|D>HtkO4_>u-?t1*p@(`2Yk*U*NyO@7M4f_<#HDThfijzCkXX zKLFFeZR;DP{R=E~>b_*;A_uAgV;Dpo-*KQ#RexwVK5}#|QWyl?!DO8VeNw5s!4O8rXiyMhnGQ|@&Vf3{zyy#hM6lgj zR1t7uLI+uRlum#W9e;wx$9`wS8yFjvWPk_|$Qzqb2cv;nkfv2^pB89nb0B`vfL`N@ znj0XH6qtw)P5cH~?dh+1rhf&iuADbW?I$KU0xqJQsr`?kHWxcc^G-@vJVd|2eXoG+gI1?1ZUWfc9A#cwdyi2y{0tT286 z)8By3KLFs>7vOwdOxK)DLBJnw-EJUfjPS+=j3&03u31}7A)rJ&J)~N3=;)NLu zEQJ8;kV}=wS@A1c?KVyuSCaV#6&{aFATe|mAC(5%mk#h7R9FD4|B5xga>qYD^z_zM z^+$g-|F8bVeHHcZ`^fOs#!N%g=MzK;as4z@>u4|nKoF!4YsQwik0xR7taT_KG(b=o zN`FVARtmu6ON@c#<94$_R0A}MjZ%srWs!A_(3KJs0te$#aUC;G4i!z;uSoKFquN#4 z5S2tab&yJlg)FTimZrQfkn zyq1JZe$RZ74o=4sXub1n)dR`iU=pbj#2_px@uN{V(9vUjXdJ8(3Oahs*j%-C zn#L6s(1z-)@r=RWMB7zLgGquYeyJsP^qx6b{Me9!VywuxptdSLY7cL241Yd()ATD& zZ+_`m_|~I8@t?eP@MF8q?;P@*cLI2QBK+kSPd~c*1-fny>B$`aV&{K+!#5cFpEc<# zh38V>-cya#)KmLN1C1Q+$6zlYbyX0#aFBEeS{=c3tCo?gRnZR8cB^tp1q>5Hi3ZEm zyhw7HumVHI2ppO%My;y`bbsy!>`16X^t#YNDHF&e4G)EbkgHL@(NRgzlT#J&?(kTW z-mH(VmO9V~D1@*GR0-V2&y(i&#Dx$zs!wdxJ363>Ye6}OeW#sm$%HK3qRtFWt-!+P z`&iSB zAsFC=Qz@BKkrY>w1AloMLxwf=WS?gPAZp1B&?k?3=4U8cEy`_xb(NZcww~3?Cg_QW zXftE3?W-d8F#-Wo3Rs#}n)LF%j3DSo*UKE-bx|KtZ{r1XOi>^40nj_%fKR?gzVfm^ zwW6`*nbb3`&vq&idLlRyBLac zCvSGqNp?V-4t~w1-jr^%=;&xZ^Q~SnQLmI5U#AO?2Y(CcT?4AWa-Q;gZS^a*v7-9&1JfUV6ese%uKM_eH$QRdj6fZo{^Vag z)lJ%1fMTTG;0=`W&8)gTI2S9E5!3Zqw!X~Q8ly-bSQZ;mfd{&(dUCbZz4eNd8=>6R;4yWN^lRPUsy?-@0%Qf=oPQ_cf7efumwwjlzjX5}wtec3>F@mi zuQg#&3$3FOsEgrk^Plz9yFRJWzvpncbSD3#!+3KHoTC}VQ+BkeY88P(q z9;+(rGA!z0w6r|NLc2_BK;`x7z-Fj_=X0vga07yf07`3%5s_bJVx53R9^4Yd1!xf1 zzG76o*9qS-=a)Zx`-QK1 z^7(Nu6&Q5j?ocVgyxT#*5n_^92RK-2d^Kom-ES+SlE*5Qmo9AlASSt~N}a_W8-LL# zLEN!1EHWLNf$l2-lz>D^akcB7K)YPnVDy=o( z7`9Jc1eDyEkr^6w=sFNnEon$ts(%4$k7zux&5~fmBQI(gSv&{^;)3X1&-~JnzpR^n){W_}wIlh|?mje;Ru&&0Nxy9R z-4E6OqEl7+YyaaLV<7to(KG6uvCR|{aV629gnYmS*ttZcw2er+AOX3#Fy#wF zx|$l%Fl-RuG%|+WVx_s7$_oss>={F<>o7B}6k|9yHH)_huy$-$_)0JEV)t@{LFa0p zx@b05I@E}|hx+!97r@^BQGfcX#eH?g-qH^lkb&9o%=46_bK!zQ_nzdId7fclj&J8718IGIP83{XzM=uIG z$EZ4_!)QR1U|*%SLA*zzl@u|OK_=gB^~{b3$$}!z!t=PN)Dm^J_k`>ykR`xQ}}K;SQVTqN0y2tM>if^T{$^32pk zJi8|m7Hbd9_QlVDqQs{P1id^f`VJd?=V(;XVF?@;kc{hcs`3;9&gPgoDl~7k#>Ac> zb1{|nUd7hn+yumMX@7C@6~M62C9}!yPXruYpfegYHG+{bU`DHOYaUb|?97nd5hc2= z(EwZFXnb+jh{;;u<(q(|`uKhxDSZbut^|6#ns4fIAFH1?so!6M846*K7h#VcXK(-g zYtQe)r?;<8W9_?+6Vfjo5_>a$Y}WYNZ}_hF4BGGYqNo!GGJpD&8-3O$?63O|@BV`? zc~lPc5os~UPFlBn+@}^4b#;cJoe>9=&D!gwOz#~^qRP}Mvmlkukg|gbIs#}{I!zm^ zy%NDs-4Mdr{DA-|0B0)X9oPD%u>7`Jr_vn(W*LCkfSD&#?B@aJ;*`L*U_U}4e=GZ) z&Z$d)D6%)D-ha!fR`O*G6b;1pIj$KoI5AKVMJ6q1&~z4L&B)SRQ=`;1L8?O5`uc!# zvud__wMulZtL{q{wO3)!hw%)fLD(~X+w%>6<>McDylvXoKpH!B@hjgJvEWbr{U=`h z#RsOtTaMrN{lwEVmE?u#gjqhz(KJ(BXCSOW|&scfMhYR~eqlAAPkb5c>N1W2=Mnl|vM-uk()& zW54UTUw?J?Ke{dWK|VU%)7RHty;Sy5kkB; zs%T|EifvIl*C-D3WO6nDMjA_1EcVxFq(kaJZFZ>yz%Bw#XZcPEIWYm_vW3OF2(676 z{{jybUw9H%ZsC?c2 zI{GkbPaID%QR3y6H#aWOi9dF)efs3Xf5sz<7)f>xS(Hr_?JIJKCouSF4o5ILx055O(xBY#kf zw|&TJ4?dz6`}!Zw(m(p}jyT%;|NFhFNHjWMUoQKW0}=gMs(4fDeT7oQ#^do=9*@=W zO#aIk*g60@0W%%PrIrMM1xp6hqh@y?3zK8TUHIUZc4S$FtT?VXsN(WL#j{|Y4sN9n zxaK}#fSz#NBS z3}gvFq?N}cCkde9K638>?KEaE6ITK>d3?nG)<7ssn+Dg26CjavQx_}2~ZE{MD-I_OOt?G8ca!! zzxs<(%75N)d{l>Txg;L(I0#F2QEw>5O161BXsM z4m30E0JPutBAu()$%7@>zklzCb34r$1-i#o?A8wuf%JIHGF{6$AU-77)E$5{(c>%$ zVAH`&C6bxhInZnbHDUv7CR#&mV9*InJ5bd01_+TzP6E+~#_SyE_7_jV(6P+MM%ob8 z0VZg5KW2GHu}h`6e*a$rnButZKy)Pm;s`ses*okd(}h4YbAZjGmVc%i$kW)&669ti zZdiw5nAiYXeOGoEW`ATCOgS6|jw0vQ2^GaIqt%onG;kEShzy7Wr{tXb^|b-Li{yP%eINBGN}3yxsqJu9PmrOy6zur z!LbowetBeP1Ap|GBB)iBTN0hJ4t;b1zz26Un05%{Vs1$wTKj72Pk1YVz4BXPk0S5`VcR+sIKas4LN;v>hOcR82GH97q;d=*^uA>C` zIc(C1-a807@qB_1S0&ea>x?)aT$JR1QtA5ELn1PBmN?pb82 za~!#hkP9O=@QFlfI|qS{?@?l7oMAsRZQy2sE895;ZG86=d=J0d^g$Et;a7WP+Zf=+ zjT0LOxDj&IPzUiX$pN6kMk1?M1sLH?d@?dd9l*(3ABLDFHikl11pjDzwFqhJGzZkG zaNPl|{B2OhfKexENzq4twSa?;0_4Yj+Cfs&3ThpYg-yBSZ9{~mc9H6$mPA1au!(5< z=4(VGi-Sb(HNb8i>2CV)mPsP`r3Kx+gGJX-99zSb>wjqIK^fib9AvhQ)lYldb(DVE zhoOi5yS=b&tbf|2uA{}f-Kd5h!FT(?wz2tcM_niQ{4M_uWGtBYVom?(fVLCyL)fj2 zcrA41+b*>ev}29c1T8q&;J1&lb8xqj!#W77H&2(~lnQ-84zq6n7G+nGxVd)-2*l;J zwZs54aetK9ARrQ9ehxgr4^`My0uj-X;LSvb3<}(I+E`X<;|QS#uZAusN6{DVItrMW zAE}03?AzKosBIe~g2eo2*HP!)ZU#dS(YyU*+gSUw3;w3>b_)$X9Nz60+s5S6F8JHL z+wF?8wGCj3rraJlOKp&*2&x|$d+*pQY_CJ|Re!`%2MydF9^4lGOU1%}8E?D%4|+zX zSSEvr<&60QU>%sN=~CFF@Y9ecHUz15e9J0DZt219Egz@)rGr$~{mA0L-*P9o_K@P( zj~P$=u6olB9CSdr{ePu*5od1xnYa7a^)LD;{x|dRI0zp9VV<9M0JqXt&V|>NBX=Q% zT7S@%b`GXC0t&&!f1bY$0G(ptUuHIvH)}NI9^z~*`1~K^1mXa@LRM<}V)ssMgbxK; z6*0SlMkhYLdJ{hBWK~U3`-58&t*nGk>j19ljmUuFSVKatZPx*ibnqQ-@jE6)pL;sW z0lfj64)m;NcIhL4T$qw@7Px!w$d}N0M_RHWWFAh^u~LJD7lo z=}QNjEZQcK`Hq{Y5wY1dh*Z>R;OeOjbm3S3QO87V9=`J!UrlvdcOX{N#HWc3FrDK3 zeXfa^mT++T2i^w`d*>XLtEVB$$r4yp+Hz1O95r9#NQC}o>OcD1DX2f=@HlLQZ-2+P z4n)FVi@XB;lg>btm7hEZHe?A@@n7Ul`rwqwU&d1kU!>SJ0NM>~gl`dc2ZNN^-|~EM zp*O4FG#zMbS^-9ITKh{eHn;T9l&`Bn=qlF+7j?inin#KTmCk|Yr*;Jtk!y z<_0!&QU>}2W(fjH5-fTL;7IsaIt<*ax__mm`o#Pjt&axo4G&Aq=WQ>o;D62kY(5VT z8|Trf!pB4Hi-#Zk`Frk^|64XJ{La6% zmA8Ema<$R`afn@(A&8q*#5a;ZjKmMcrh`ErXe-iNpDCEU*5&-5`uJP_$ z#GUx`GrwWzf!f~e8|J{Lf`4B`?LCU@}0Me zJR5304ecA4TGf;X)&HbjJR9H$FTqTlheFmtd+)IH_M3j-2tTchJo@|jb`Cw;2A0Sl zLXp>CrRx{~e%0MO4Txj-ImTVb@Yi)Bj=!!uANoNY(+~aoIQr1PkAKq-{rkA+I6@oCA zVHf~_nmA68IMzPL!7u>&b-fbDU)MkML>y-y`p@x){&W1H{~Qlu902&jj&&yH1p$8Z z^zt4DfLPFND={zpMSs_Q<{!Q3y3hQzH(majKm5?ipZS02o~jNW}$4^H266>Bvx zvX&4EdWJsq9VRzBNATVEH!UHe?;lz*=I{?Ke)l{4;vkXJSc|s@qfg@cg&li9)KO{-tX&QKgXP7K-+6afkAIh02sEc8wLnl3n0B~Z1(#+ zvKH^s+WWi#q88`dxB+SDi(UP2L3stTZisF!4Gy z#*~zG{B<9|ekSTRN$z+MV9&ZSIF=I*$+QOy;*Q4+WPcF*;6I`yG(e}o7r$rejPZhr z-vO2<1VTgq)2;%I0O_Y41C>DYr(FY> z!1$+K|Ce?boC!SroO?i}SbQZ3=n5zRoCAY^ErnE4HwVDAP1ju77Uia5U>>D+yN02_Z47AVJGMc z26ZvMoIwfT6yzKZqCKO4iE-FsTnGS9%olfJ9Dk9RX9C~>U`L#HKyN_yz7LCew&FbH z;4b8tiTN6l7*8qYA8=R#jf(53c<-8ubDe+qVi3_E7xUeUc_Qhb#+$t5z~D-eBUw93;B3J+~)v`{_p9>NAB-=y}tPI9#_PL zynh;T{UP|DQP7F&!PmLXKK-u&oB^W%I}RQ~uIYQ+3~b->W58zcDaMs?FaoIg|3R<* zXuFdHsd|I{u7H{672pJ^&U%PFnyaL05=#qyU=>IXQwq z7vXH=t4IiSy^`Ydh}WbLj*Cqm-<{sZb#%=&=LRtb;Bi4UkO*$zp$J zkusQ;BPbK55`5ODN9YIcwSY1Ssj+)Vew3fH_4uY7BX}BiPbjHwl!Kf(B z8)Ser6Xl>!5A({pWXo+At)-{bPv%=C+)dBk-fi?? zdXM(Y`E2Y3VQ$iLw`@04I#TcP_91_HyLEBfgI$|HlfI_9y%n!jDp_i7yQY6IX;DW0 zA?=lZTaMb-IIM^3xZ2)UigBNAIC878)<#>)4V{<}D}kFVQ0?(BTdj)OeRHeU*)7Xb zB^pw-!nm5I(-C;>4&kyVT0D6i>!}wUuJOTf+RkkaT(oQkyJc9$b}fIVoju0suvDP67(byN)5-(H$D%*~yW^#p%XXHryARq;cilX~iN& zL$c1E=T{!#`SQM~S*+Y51>e}=QVxToNOgU=?%`%?$gBEsc#ZgLJM(`$u%906o$c7i zW<07-aiQETuv5ozcqF@EQUIjjO7VNR*^Z@`l^$@G$J<*RSk0_oEF|SNn~WnXx#mEA z?2BwtoQh<;*gr4hv7BYAWq&`6l(ywWp?I^eYc** zMISuIFYj(=csE2zX=8u5Q51SMM#smK$5UgJo%iF~FwW4}ox_KZ>&0|CESLOzEYC9C*H>S_-6nlm z?lW4Qd#7OclYJWRDU<@^U|y#lZ+fqPrHw@&w*aW*ST?CHIZb~Pl=sn%vSDN%*XNC- zp4_75c{G)baHLO1;ax%YRY}zI^5)D9dv2-cKJ}h;5y{(Re-2K5yE&44rRDl@zD4JC za~&e1RQ9J}?P-ccb>6|w_3mT+W{0voq`F*Qg3&y`RO>)hi?uXq8fS4jW%F(4?jK@%*umO~dy z+^!R=nD!f~!q14^EW8NCH<>bB&d6e&J?nU`oko&|dEeld)ttStZ}|G~grhMQ$c6!_ z(PnoqmgDH^UrL9;^$-m4Vt(niEgy=l6!mPvZgR&It5`HQ=XpmbSD(Ad^&Tv!sm#j7 zENzwB^wocl{aX^!a=mQ)=V@`5l=(V%E(7R?%|l6uw{@*JlR`n=@fYD%th zn#`8$#*~uld=$eqrDAB6kw%u5Z@l~Z~&9{Hj0Lpfj9`q;*H5k-s(}b)x%PRyh z4y@9z2Jxz~+g>$OwF9!=wJvVGY3yz`%K)`Op}2nYyzPcDJuTS59gY3z#8}Pr;$qjp z*$g{#zNuZGOxIJ8g*kaxO97c1T&C1XysOJhEqv5zxr>&hT}Zy$Hf6Rb1KZZ(cIm@= zvWb75)44uv+!eb`t!}q!ngh^W?4s2riEpz<)S156aE|jie){)rA%~-7U>}p*khjPA z=`k46?sYVut$I_?T6KWdrN?{6<%_I_-Y~4^{o-`p4+s0Q8Esda{XqqE8yYi$AM0wj zs4L8d~q=^ z+2lS{?(mqvrDbYX|7vfk>^ExEugxjw?w5LbJ#PHL*3EOg+!Z!!^yw+dE_!s{sVslH z>w{6A)6DUm>S3}9R{nXvxm~q0ZM$KEA2BjtXF4fXZc>D7XEcn+j+Hp zf@XQrx)W>;$G+slaNlny^S0R5n{qapv><8PB&hDse67w$N<=ce*3E&OY)>-$a#rK0 z?tJ(x z+nPYVjj740Ye4&gUo_@wi|FwVJ)2;;w1_`|5vBgAh)yIJ%mP0=%Lj|~#b6B|ocZ|} z13t9J_i1!1iJ=LFV~=dhW_2k zfvp}mZnm#+WO(vCx7F>6)!`ORFPC1bj$0or;P%kyN)cU*a9jIr2(f3>p`6TfuSisD zo65=dGIjUrfVcM2XjgyHyaKqBE-OwE0nfhi`ZclvczETU2BM}=?vZnVrx-LSuDtZ* zvZ0|7i(JqZ)?W3e#pbvR-yciS&rXJ~z9-*D^s#?>~zPllRFxN-Z+?QGz zwWB%JGzri2AWc>ds!!%-u;MGQfwu)xNQ+5(Q_rA&^o|v$gE!k0iLPY0kI#Ekl17V* z)35T5kGCd};;56kesV15ped;qTZetT@9dl&i-qfwL({Gi zG}GtP)-JOf8E!kR3pX=qe>ih>9pzxbES?O(WLucW#CSc{_iUp}_QP(SspGf7sP~^` zRFM75k5KlI7JpEVbEeykCKMG;cgJ4Gvz?EhOIn+b&QQ z5L$O&E}wt#b8?WUzHcVeNxLZQWf|+GX?T59AY&U_=sZ7y;l>u5?i^mZKHuU8b*9_r z$deWzmy+9VlQ{+l(u_wRnCt???~feJA)>h zXNRcVX3Wo@;CEkjk5U(#(HUmTV-n++S4Z<^g%JhSz%SDOQ(_ z^ST6TH9hRDX%}pp5@~3YR!+)hfq6)j#gjbpoBvYu=_;H=+4#8%v_i{6T-&>#y-t+) zX2!N;4LxOJujMZXLmVx(;VkG(b#i+SsFppWyy|712Azx!nE_3!+SSwbdc8hNU6sUm z^yq)o$=JLEN~$quGJ~gUk~pW^`jl$@__7K|^EmtXn*fY(qXx7ts?M`7mNmeh&2=_Fwn2kDJlv zcq?UnpRDGa)R#6n4ZB9#g=n-6WJiuP^BaUS8?=_0v&RI~P!9s60HY|pVhjkXLc47W0KB zhv&74H%~RdXX}vf06Aao&f=;jMMw93;S5d|^!0R_A2~TD>E;Y&-E0p!AJ^@=1EYU_ zb1(`A*Z$_!OHU)5o`S|PoM_6HxX$)g<=)eH91wSt?#9y6yf04YY|)xmnw+wy_OSWp zG(23eXl{o8WKTDklp&2GWiwn~>(da#UAW9Hu*^>xd+io4{l2;BZY|NpMG?rSU92VN zT!|h(vw{6q-ljCX= zukyonKN-T%+!W!loxb*)_&mECIZAR3BQ#&0{K33AHOLyNNDqr!jB@17(4v}bo^v{T z1cAA^CPqB=7cG^YYdLOqrwtryPiJWtyIbEgWpXORe%Rw`?dDy6A3bMqzkPoxGgXRJ zicQ6RHTUFxIX|lhe!j9Bv(DwX8_u1yeaM6NI_4Inu6o#wrn||(8QsCN?b^6&mfmY; zC`L1ryVd=EsDdIIbUt>b$>m&4R)yjw+G(^~v;E$V>xHO#)%q-$su%yLdn^r$u9s{B zmdRmqT%0$UN^JEU-_J^;OXYtw4Ns#HfB9ha9G>gB`7}*1D6@SP>3-ZL_uCokS}|{* zaCE-kEYEvs{`r9S(II-JyFOoRCz8G98)a^8P@|nMyJbI33ja8%sL>86KM?>kh_oyLvhWPkZz{y+-HEzX#@F?hk(@gpAJSS=b(H zBTSq!A7AG_OJ}R&zILZgAg%c6;gko~uXI1vEoBPBMK>>zLzMBOL@@BQ+4eBq?mDSd zQ=9kuwk+`#Uedko<0qTX=XqMs(0FdR_bO6m6UUqe%gf!u{C1jU<6&iuAh#;x6_o12 zS~Yw+mgORU^ug}BoZWxqlPw4HFp+2DbaP!C`P2g3(xWQN2kk1}WPeQEsx$hMdQ|e< z?b6#jlhrJVpD+mx5T+3Sjc2@vu1Zmll|o>_o^7%1-ax&HSwMb*Qb-csd$}bXbPcb{z>r-uTe%SG2iZH~rJT-xD@_Uafn4m!IY^Cdp&2%}js$5_C_mn5a(c8ES&E^YXYq z<$3TpPG8+lo3P|gokucYLfp*UoXlzg)^j7;jJfQO!WFqlJCBX*qgg6vfN(uZWn;QR z);56=IAY+P*3S0Y2ljInxuJJVVWSv+QX1+owU&Da)&A(Q!*aS=pRD|~v%GrU-fzjZ z-!33KIT3#eiHy!+(QGzNcib-VA$7n(+i&yDV}I@AS+_CAN9#4|npM59PGvWZGMG%r z=5@{$R!JR*a*OBlNiquS={Py+-Hd9g;T=&+{Qin=dnIH`9DCPp6^`hIkNsiG9&0IH zF2=_8xvQ#GIPwy*+MIXG)*z8o#6|d$*#`L!KeK=JpubRThm(p=_2UK?*obSRKeqdE zTi>+i)NGvSDo;wRlOT>#yuEmpGcBZzf7)t%8&zvfT1ShubMw!YI*a73`>-qDiy4|< zhXJ6Al^gS0IC(81XOHjnwTN7|onX(`3M~<flGB&4$oPG_oMorBwDKbkJpi-)3kq}`q|r@ti!(5)Z>Y<h*L@hNWGLKCD7lg~wv$(H8>iVB?#6*LqiM126;m1o z*zD8X+s~%)rO8ic^=wr0)hf=(u(~GQWLkPkymyADpZ0e(o1N3Cwg{qoWzYPVYNvmz zL3K7KvcQjPo{mm#Y=nK3J`&aLpF>tYjxySIovXrhXLUoC&#$_-E9aAt->-^1*_PQ( zE^BS}xa|hd3?)cYL)tH3$!?aU4$Jla^n|^uE@z9?_^_?>C3}qG!@g`D&zAb?ie=S###eufKOKL>R-5*`o?9>H1ctG*&DKG|B;|5Y)oOQG zpJx4H-Er+TRwSN!n>O^SM|hs+=an`It!_S^A!#wL(C%>ss6U*xH9ij19m10%?P9PP z??8;Jyc<=wQ8qY<+0usJuACg`U23c2xk_@yalmUX?ZV2`o0n3B>7~q#(>#B>-1x-E z$rX;MJIyyM9C?fcizSIxksimp;&zYvbzRtKHeQD;SsdzB)f}YzyfxBcx}VM_)nenK zml70LRIRMwaU(BEA4u2X&YWszDGXN3%vKs)cb6kEPSQ@DUdQklNu!Gy0_Td$#%?dV zefQ^*>TO8)vr&?X;de$Cu+EH+lq@p*rYgYM&>lo%ix9vKM!6Z*La-?s(AMcB}O+v2YjeXLVEP{$sIi z2D{iLcMjU~N^O^`2Z6iS)VJBG4Z%dE&u3+ftAanHde*Pc>$R*(v{HZOhskMKufyw8 zE5IsQxsSDO(sh2F@8{`iS85g!(f=zE$!ngtxdg)q#0^x1#x$<#5Uio2quKy?#R zH9y>or!F)_0#_SM3mukldq@1lm=k7!$Y4`^7Q+v7dmQG&uy(h zVh|(yldOwy$TBQ6X$O30ud={feK176u>OGsk@?5(W0c$BD9(Oe6r3lzt0#)|uWw(L zUk-;RRCI}?si=S2MC`1QCtx&n+yE40J zyPm50X>TL4oW|HgSr!@QIy)qg8Txsq@C7BbEIB?*hg|c4abyLb9(mpMZfJT%i?{3? zY&WH@f}FnQIou#kuYfZ@8!6hBfEeYvol7Fuu)43Oq$q!Xi^Ld(bb%Dbm-P-4O~AcS zh;IuwScO{#X!;b>6?rb3u>s0u!}kDsIR1`Fn#t)6C+zWSgubt1z&CSamAUCE5aFA* z!1-p!k%jV9Z9|LM2id9E^SC}EKC6O0s_o{>6RlBn=|h)wCNczrpNRa-+H-#dTt|E{5~WLR(rgR7cXXAYU5Qo#*jg1ODlT=X|=E|CTY~oUkZXU*IVU~*e7|A{$Q8y z@1TDzBUL{6@x-%4OfE=dmZ%3onvP>(${ixu(|d=F;|}ezS2T%=Nx07 zJ~*X0-|!L3!vc44L^|~dZl}ebQJ~JfOkcrpW!D2a;Na5U3U(#&;@IS&2uGw8FO1@G z+MCj;D~sz=Ls<49FUob0oOanmErNu|U(kOelHl7~AjH-$$%DA+S(!74yji~Evcc5U zLF`K}Qby7smBf)~Y)OOk8c7aYnOoM@*$-<;TjoOIQKM@L_AhjXGaBM15jy} zv@#l-f8+nkorm^HD7) zaI%sgk|OZ+xp{(d|70xJiZYm|jgCj_C{BQaRNdB@TrG&#q$@r>3iRv-&oHx-nN2Qq zh+zJU@U1vu1sLVx6vP7erHjtIk|gpe}2lUoZSkr(*-VZ#vzm1-6_D;$dm==_JcEzGckYrTDu1x zio6NeZAX5L-hO%eUXxQm6J8$$*`o7e6lFh$7|G_G|e0?@{<&Khr)ZDD=d zdXS&wB(e9)5?s8sLKu6|`EH8dPKyp$4nPYy3+Gv&a%bGOQ{xkdxK64VgHg!|I757~ zYS8nMFiy>u;SjR1+nl1>Ktz9f*G2$VK9}Q0;Rwow5#x5BR%f+XJ5VWUIX_3VzQ79Q zGxrLsKt#NVm{FIW)uT+ATQW(=ZZ8R&bH^e2HKWG)vw&%Pa5#9tWkSH6kb4^A-h<|L zC{0%rRhbi223Ji<81a~kuR$JB%m2K>u5#neqq=JS)B(jkoeFrJ+%10ttPUw&{+(%5 z0H+rmvpB`8H{x4M2Q~03`UJJjhX|-2NNs3~gXuOtAh{WaAXfJP8gxp%e>}YsQ+LG3 zhm>pNBZmE6}=Uk%nQ|hA9ae30g756R2xxO7z5-3{ z!CDNWaCBcx_M<|(o<&6&`foRt^BQr;!`{PkBRV*nSH8vH^A&$f#Q*f7<)d=H-yBYb z85!6>SIThv+l25R52pOj@99Cx9y{#u`TG4fw6wNsO#5XK%e{g6$Lqe>`Q?;du3{{A ztO<&ofv%m5>>dmUsm=ri+21y0PQyI)4K*s(Z@*9Nh$={?Dvj2W^x_Iq4ge%4x)cpn zI-|tu}HAyvA?QTs~@JGkE%++!#h7pmqD~| zlOZYOp8kOhOo(H*_?a=UswXu2@RFJWt}v2pmQEDQCxL9ffVGoUbcA#*@Pzs)j(c(A zq|mpKNpku5DJS6NE-*A$^t`*Zb2*`oP8c0MjLF>h9(sSQhxqOCL0pAHDpb>M3Y3Av z5s0FL|KkkZL;#k1Lp0iaau5daTQRL=z+HgXJ@Z_(HnEmd1kUmg1E#vQR!*mP$azIh zGjmi46G^E8zg6F0y;>xGP%q)vM=4l>UyG}=SA26l5&k)|uL{0WA)8o!adhUOYIk)d zT^XdU96f)TwQ>wBgme_l{(a@)>F2{(Z%3DQM}3-@$*$)L+=nPiX|(xVwZB$55tLtO zNFw)R2=l!%7cah$DI^qFIU(l<%!myZq=4f&%@Q3HDa7g^a)-~;5C|mnp?d~4gn^KC zj>~VD^z`;vue_Q(^U5s2>R|e_s&abyuVCC)D%gLLY$GC9DTVTRbUE@Tg%)7a5HwZ| z%-#u;FtXePZIJ_S?{@>zP?ZN&Hex%cm*y~)pcc?IHz0M6l4A>x2#RaG3fd>oXtgxKf{liu?+S_UUk_ExJKLJkPes2Q zl6imANW&8PosY;Hk{+gB|3jnbJa6gtmCP&gRZSAOeh9=xfi9Rd<*E3bO$cSC4GIbw zt<}p4v?<;Z0_Yp=PWs}gkSrKAur}|K=gKSVD~HjSUrbzPoz_oNfFiZn)>w41!_J4F zYwgjF!h>D+8F{jC7OQ;t7R7ZZSnMetdFp=@59ZGX&2>}kfKmEazf>2n$$%nu_qrY*j+C_-Rx?YFj1n}vX8fg%SMzqm()#d4f>;oZds#qN1rd>g*~7K6{ujAU{-ZiG zWL6TVW~!VsG0*NnfMz?@5cn99%pRyK`|HlXT2@b ztU*^6ONfla=C4NWLLW7CVKyvgP`g|@oW#i}BGJ@jE`WYz0vo6A6l7+<*%yBTOG1>e zwlgek>Ih0`FnlXXJZWhz=7>f^{2hC&& zw!`s`?<{V_twcq!A*`7Yf)D|6dV>;29Tx3R?vx(?zg>X1>*|dm&do^!zp_6kx4DA%0f@f7}kJWv?!-cb&Ec%FHNERiC!Sy5;pmQV9<@xNsF$3h_`tyFxr&`dDWp5UdZwr4M_kDgb@ngU; zBscEy{TrnCR~{+SR2xeqS3HcI`nA;QLF-P)v0|AJa*_cSF8B~jjG%ady-P#g(+F;0 zj9jEl%J-vRJaZHUS0}cvKK1iKE2q~hzLs1F;{NMEWlv45t+!LbF@~XS&Mry%7Y69d?Z#qr6)nT|J%h!`aGWkld3YJipwy7 zIqO`{>ZCltTg!`H-Hv+_$h)#_msy4cjwEEnXiRU|S?56yQulvHOipc8t6UO4pIMaY zO1*lWHgkQv8+X|&fknIf=Au7d>MZ9{J@})yccM9n5{NKRD_kqATkT6CzP?BirhOt6 zgQ@k>S>LR3`YC>f+2xMP^H-EhGm$7Y#(CoWpbjP;q!U8uI$krY^!$|z3*nINaM_q} zZe<&0*o4<)q9uQTI-cIy7L~05&9gRwTK62Y1JN4HYCd_On}1fwOK(Ts7#e9%XJzeK zgo4wieeBmlrRZ7+G#7PL*U1VVu9UNPdL)6dAmPXh5q+m3FX!^DTY(KJ<%cgrHcayc zdS+2m;N6Phk&AG}gqd_LCg?;=lOxVALQC_1(!RBGg< zA&PeEJeY`7iH|I8<8`KYy}_1twxj|=RMk%?lpe(x#OaKOHz2065P`AQHFkD|qDAKd zH&BFe`>g0tTt(WOC3JHY+TF-$qjF2sJC~7-0`aVIcHw;+M83bRPymN)h>D++ZA}n$ z83ha-?c09{JhcR4#}ycUdZ5ycx2$8WzHlP?iC)UYA%LGMto8ZZ&t#1TVm*vdG zgWBMPNk{0gvvk=K1OPcDC}>>zkI26qW2^AXLra}@tay^AoXSZG#cu|<*#Hd*jM3<| zD9R+0ZPlwfmUs*ZuDe>RMr968WsI@Fp5U<0QOnRa6iKWw05@;DroxXAr`s+8%_qAX`G5zd#%(mF%X z$F}5{C2?T0Vqt`4Kv~K9V_Xbb5yDv zaO{5$EsERPcLSPvyh~e^;^|@fE1YD*m<3L@jfjY4VS6YUz8-6!wjJ1nf!lPGJdN5eyf1yr4bQ>L0Gqp+NoO)>T4)$ zGg|B_B+I1%?&Yan`b$kT#HsF`fQE|KWD3&xH}U#y7{AaMqus{N^Q*Wn%tHpe&Pb#V z!#e<~3`70M!`e@E}QG)sSf zw#yVhz|3-?<%SmBB&zLBwHwj8bIBqt|Q2Q(Ag<(pTyI~5-u zc)!`s`<(|j{BHjC1YOpmP6g3sTsLw?e|2f38d&oQ33;HZ5NST2-Z#l$wM?`rU4FK2 zfrFRf{)iib#_OSpGn2uR`W1f|rL){a!vUWdD*Dn#V2{DWp%@;B%nc(W8EP*buS33x zew?YRsgE z{)*;!SvHH2;#9L!O+XCyE|F?~P?;u5rxFE31d`nb-IrWLud2EGH$Q)|GokYhd4y0B|)b1mOF!RC&Lf`pMrVf(bWFrMGD)X7dnF47HsuUfkmQ2T65961hbG}%F1 zCc>7e*^dtJMJHdk*_AKDxl{IhSX{nx&?eT^3(`r~$M|=CjDuTm;|}Z>y!f&a-2DLZ#h3$0->+n09o~GFLSAhEj%2 z4l?X?=KW;mu_)|F3|e<<&F$vG>+5Mhhpsr3f-dMqT#5tyi3Wcxz~gFj(R1|PWsg7@ z3EYh@?MYCJfO-SQUIs^8UBHcIi5wMTuO;>8q}FMFC3m-b>DV1%6Omx$H8h;G9J=tk;F zA>9)U1&M|Sry}j1VGC5-wELvmu&=TD1S7|5w+U;++`~^I-2p+v$#jX}hh(E6Ck0X_ z`1G|p!WvWv-$3dYzdG+->>H$FN+4vIccsYm8yt0KD}R6S`4Fo6sATM+f1*NE%~{LE zJCL%#(r|7p34Lv^DfSI6lC@Rpt{Ijs-aV)gaiyGJ{jtfB^h-v7+*b0{L32%~kb)td z0^=f`P|RWFVud6q<9jNKEN@!ya8>Bx4ij3954Lr25@LVd(sC5F>f@7sd(R7)G22ie zwc&QsL!W;65&=v&ueJ*-E2YYLj2MO9+vPtrbePPQ$6P zNDgzr@M}4gkg5W|yws@3Fdk5r6dV}Iwi5XjL%G68u?4oy1gz;{tOXpgqc8gwNZuYj z(b5}cdHSIJ483e_kZa5MK{$h1^v%(WTO^$xM>Kz^!{o@wU7e@_X`QA}s^eO)q+TtgAr0;3WHT*oEDed*7z$8A(r@gJ!^Gl;Snv>P#x zqv3zA0$2qw7yK<2(wo8rrTOqCE*g`?U*A$_LtQmD|LUA+6iuq*kZ&+n`1J|?!RCJ$&VIUEfSrhYR%L@(&UC`^t8M=CN;Bmda#IKu83o}j8eJZ`e{~dZ_o!J8TH>_V2)vVhPbm)JpP{Fi2rUPb$>;xQ93FoxHHuJdq3a5QhI;=4_=hEg$#caQw7D|dLOt0 zn;T@hQSsn7v|_Ti_%6Y<(FnfWWgj4U1g;mXEsguI$!+{Cew$ zUBhqZ41v+(TDU!ZuC|Y6z?qxsY@pUirmUFT3th9jvQJ4+_HZq$35)FYpZ+1zYSscmL$;6Q(WEJD~eV9p6pA2grtFh$f;A^ z#`{&Ipa8Y7^QW5%3zR^zz~0G70NCh0k2h6dLos>5h()^wi!Kwj-63JK`RP)e$zI~B zdtQhQm$ozpBE`;cW`%+03>wa{`O?iyfNqqvD*LRxiUS+izr%lZ-yv?sl6lX|+ZgKn zn2fBFGzh<%i#HnCMKYsLG*|uxi}ts{^?9M>r$pnS5D;rH4Y;K5z*PF_=}NJUlGDxcd8)RVMk35u3eHTlgwYAQUE1e*no10vr;iJhH?30 z!kf8RwW|xgpt*l4+OoPVQa|dKq`wJjE%go^0Bt0Wn9K0%Lp|%cG=!z)-C~eR}Gobo0}k+6TAp8SQjJMzEB%R(#1S$5=9 zjzaj@N)N?H!`LyT3gp~F9mgYbE~OahJL7i{!osx_z~68&{3*?7SZ@*(lQB9?gY8f{ z2o9=!>=_7ubPdK|(cpS=A-FJz7I8kK9v}kr04@zhq!;kMeX~!?lC4#5%x*iZs6YaI zCjKt~m{@)Muf5tadi-35nDN5>-FReHJI^TK33GS-x0x8t}w<_=P+q~ z4SDmCvi=$vUrHY=y#xrEhL|)A)q_85-b?(76tbEBdW6@_X*@ zWM6+zG3LbxIS9gY6k*l6QSWFtttfy3x|Dsk-xZ2`gndhd}Pre?dJcKW}AN2>5#hy#5pm`UUndQ!> z9iEN+k&JImC}%km*x|@!>rjcafX>CCMCI6h-&fza?*fbKP%_!uCnSD) zO&L0Lesw!|B|nPZ5i)0=U9ME0y)PE>ReHl7CNbaw6Z2U0OJw8yN@V0zdzxR1YgH2i zWuTZ@F2XcHo<0kHsExi(Lp<72M2mk3w#P*6xIzyEJ8d=mF$dNP4j#Y12f^LgYq-4I zXSWX6vzfPiO@7y+XsX~qA8Bqm4bfeNutPCUUyE1e1r#<+kf18nhA5?sXgf6!@Lg(t z&%J(d7U~*eMbA}O)ANKdaAsl{-;+eH%x(^eK%?uO@tk7hV(%feei`B1+=zcUUaAC< z8{BXa}6#~c*^|XVRK482uUNL z(nKMR!*^J1=oEJMCnl>y^CvXVBT6M8t3BN>I?{ugfuc%d=O&)F*Fv8xAa;XWKipfeF( zjRd*CsJ{?5dw~?Vb*9*&^&Xw(Dcc4!qx8FN@E+c-TvwhNHmmN~zA>tq-s~qJ#HTD4 zG(V>KHX2;J^T!$1N40=^sP+K(ImtEq^rI}9njsqdN;!&67dKVLcIJQ8P^*lXEmY-@ zIp-CnD5w6*|l?Eht;X3lyjm{UK0CQ1JX&*UCN{Cr(G$+8LNGBd( z8G$5}Kh0e)KMD2pfW&_ZkW&+#I4tRgXRvIVCDg`k(gDI_r%#MNW5spbTPor!@REZW z0Sf3!SrwyViXM*&sY9L=a?6jp*IWwM&g|PQpJ=?@5DJ|=W4D51q&(4%VWI_!!;F?gum=Ql4d+ZH<@W@u0`Pxhas>@m^!{GbNtJJ1 z8)?#RfNnLV)fJxNzaq2pB}7C4@T%yBc#|r_=YRqmLY-L=ohaFCm*+fxBiF>araYc{nE0q&D$Hfu8qo34t`l%Yh?n`z zHL>J2m?J~(QwM+41srm0kxCxyTWoWjxG^^H3N{|VR&f~h@9#Ym6LI|^eD_Z9)rBvd zUUUZmYFCJ6aS_GEd5B)UI--F?P}G-$PkdNJHSTdWo$A|tPB%F_{At68$lXGv_zi66 zJnPYekz@~5u9Y7x{W^C;I#KhxwI8%Dqyw;fByPU}$C4A@ z*=&Fl<3`nz-W;Hz0i_ZP7OI!i36|t14%}0%8vOj^UQ$_P?|+HF1AV}0yBy;__c8~{ z!|~Y{LkbL_E@&mf(;)!l63k~NSb;#1N5Pxe=mG(t=vF)6je0qCbL5Hn?MigiXFjrH zDtW^FB;J2oH4#WQHgBEb>l!7$$3mlTh0R8qE%ipukrtDRPXW~~nP4jWBD*jStRc9z z`z{Z9D;jW(@vTo;)gjtpLIBzfUj20HINQ}h@yY~{yG=6BAKys*F*mMd_g363Rz3!* z>TK)~Sc?t*Kzf>SeZuhNDx}PiuX|iw=89xTXH0)`qu9Nd$k;(PflKlgrz*oULk5Y; z7YDCX&mjT4D$r)501na*e)`FgYRSqE^tk#>Y|;c_F3hoG+f;xx7Bw4^&>M#=*Hz4= z1Xm^b(NZh7OWh7GE}ZUC1=HORB&KaUBS+7!m_Niv%iH~da*s?%cN3NmQCJ@sr7VX6 zIj?`(F32jG>y;H}?D{H}c|Zqg|48KH$*bUqaLRk8WKm!W=_8!~2Hw&m zsE8{{ma(dRxz05KGG7QUV+tOchu1${MXY~r*$LjP*sg$a+}24e@gZ|KT7+!F6`$!4 zPqu+ZFCKI7zll$$7>t!_T40n!^qT+=DhKF%%Fi7=QuLv|XS@(3%Zo^?L7O!{Oz}j5 zkJYDbbAxhucHMM=?hRdPg;#X^MM66TI!U$##Dw$;mLQ=ie8U4LAf&&nhIBbL1+0HI zGB0GPGub10v#|Whld{84;a?ti@HVSpXTJo`zp1(erF>4b`pXz0431gR1Sy35yt&<` z76)O`4WBIn-yJw+UP2GPF-_70HZyO(u6hcQJN0v3 z?P}DbVPcMwL?41+lASNK$K>->899H_eoxk|mFnv;#MNHYMN&tqGUhbUU~uwhP>5Ga zk{yofouWuOHOLw%|A_2@c2zK2a7ra`^FYS0*k^#c?+xChh1v~J`g%b2u?~$YYob}5 z4IagqnLHqC^TPA}UEEC{ZrNjSy_fyuRmmN^hFfJN?;U3ad{J_R1OzZ1Pz!${OkgZM zfRVQW-SWr8(9?v2$JKd$$q;I#N;GhV;0Z&l=%fh~o%M%R8 zs$r+wk4Z-BJ7ySd_ieO0XIPk9dsB2&*84N>0iDvm%OLQrgyofH2rO=tq?SMBX6R!| zZ5SmU14eREtdHqJU<*r|R~dgQuU+mc`LjHQB_j!&UXb|g^hSWOhh|j6QY1OMWiCa( zo2n;-a{H7E&f-rW=mUiQN_i1u6=_jFM*e*Vi}t;jx7wwm%3@V(wZ1mhQ^5w0C_ZQ_ zMq2(INS2paf5zk*Sm%gDybG+Zss%UTO&Ib5{`_s;$Q8zPuG#K1N5+4N!L1DZi`fgC z&w)$Pq}xz*cx=M~F!_&$5F<+63giS6S%*?PO@y8?o5PaO*krl)<|w%8n`z z=39b0UHNE7uOS=$c^BM=K55F9iXo$~!t+$Lt4`hdOp($gcm+`!7Z;x;2d;zky(L)h z?Ho((oZfyJqoSD{w%dPGB_B+OWOfVoH3u?UY6`v)ePo#EA`{x&Fi2!I>9J$k*pb{F zjz>jFgO0>+&>$Q%txC1!q*o~6H-ngG&JJj~Ib1OEZ-U4)qs_6IBj$OJgMaq2R&&78 z?XN|a63BQZWS$Pp1Ez$YL__MU&6k;!K0@?CE$A1F!| z`>)<;IB#Wl&_(gtMxnW>d+&V2Nof46o)={+pk@yqkl{JWaszr#`lb*gK#>us=9YdJc`Mn5xw-$I zLq2}^Ou^VHy4!!C?J7Yvk39`n1Xyzwn(HZhUG9}bjz#+RKRsG z+d97~6x9b(g-Z%zYnmp;B3^cyBZ9b>UG3Q(n2{yqIAZ9p&OxGw=Z8=X^BdH00)JA- zIL)A5zqad3@(X9%u^pR|W3FH!6RNzZdHg!knW^+(L*ai}Hq?SsjQ^TD^ulF?GYO>L z_WX&71q1G$XmvC)AYo#k?X! zpO}YizUqI}*ky&H+P;y1+*N1*6qV~kRM`K%@ia$MNwoE;m{Ws($ts9m+@GGN?lnI!k74O;$zYea^e}PB_e0n=6A^Ja~ zQeT9i1L^HCna`_BLKCaQ>6ln)%7(KeUaF&75E^XH5X+byLdto^j+W*DWjNO-!uGmIa2s9LX4h-Zf}e95S*gG;yh8CUU?8Pd6E z2}AG)ItQHHsTd$uc>Qz_1hT>5yHSz_!zd6ox;}2uo+xeR#Wkm(SztWTsN%4??s_mo z&P-D{yL(E1u=o@2t@^G*lU;WR#%E%7G{+}5s&1`a9@w8L>>d!|8?gZ{wspQ@4ae(+ zkh?26ESA6|U0yW}jj~Dmfdeld{w8}Y6K;@Ji` zEXJMP(x!JNU4|Sek2s3jtf{ziQ}10GT;BNx<1=)BW)1_j>4qY?6#=Jdg$MaWa=fE~ z+);s80wlWkAu9V?@#d%mp`t-mg9k|G?*js2gJiAa6w)__5X6B}-!Nad`HdllUp57E zm2xP3fh(m}ToaL}Y_+ZwZ0QaYl8Q01ICNJo^2{ItR6WK$1OW^B|}O zN@pj3VaYW>igKKg^1LGR4yQ;dWh99K5YNP}8mCJusJ~1T;6X^PDQ~4nWa2@&@iJOb zDSjS}xCJ$9-bfzzeG+f+f~)rS_*=lwv_jWtC|v^UjGyJj>eH8a7~k|ZVTW0nr}KI* z+i5A8o6mlpK>OJwDQ+P(=9=h>%-~{k_JCl2{hiCC$MjjUQlkC9)+!!u;G=DznJ%5I_VF%S>?nJ8Tfr{ zB7isaAV6X0yJ;iYg!}Zy((2A1=$#b-N3AjGc4&_NtR~}!&cy*zY1%mib)l0dS-fmipQ%1tqj_-$mgf7ck z`UVh-_<0Uv06_Vk2kqc9v!7G37*S6SG#LQ4IR7z#^dI( zj56OPG_L6^G7Uof{1{0EG~iGCo$>aa@s3c=1#3u&*R(8DhffQ%(@EOll|a7!3n^kl$G0r~2NV19oR{ zR4BmsG&&p~P&yWgR3h^ehsrvj60Mp<&Y-Be<^BMf@PR#VInwPs`zsEA%rO#aSx5C= zh=A)s?B%>KBxi111{L3eoQ8~_I|536k`UF~cMT35o+P&;nG+=?2tQ40$Vv*={@9`Z z$N<${{plcPQq1PQhJL=aWn@@pcqX|z$}?bPyhujH4o!t$i;@kp@d?}L&SiB}&33oP zLZxv7!DxM1P5EpL;#3lU%y&qIje}6=N8Pd>+T%~)j(yE|aP8I!umMCb#Y)%xM!j(l z?^k?Wt-8+z65361#VwP&=+s?3Emi(puY!JE8;pbTwdBLF##`?5ELG;EzVJ2ePmq zfQ~rm>gk2_Tioglm+km?(=@I$`iUR>{=VKnywvtoG$M7dOckP?7%aP@=PJjc2y(gG zVKeC{G+9v-6>~*@Hm(3$K+%E9eOUiP(+?E(eplMa9X<`8R5y)!Hjx!pw$n@j~pkUVinjc|B z+AbUp2c4T%+Pml2)jZoWlFJ-7s)%JT#OxLP79^Vk0NRLu%D;T-U)toAv>?CN6<*Ee zGw{ue(R(uV2BVZ(p-I|h-{q5Tz`r)jtU=(pE3cu&`Q6{Nd5|hzqt0t)$ zGh@BQrg#PDPrTUC#!oE}IR?2OFrr>GID;+S*pDFVoz$ha0H+C0@+g>qP#+un2uwpX zg@Qt5Lgp)f6^rQNuwL8LY%3zZiNYXzKMPu-sR5%o$&haPmrF10&G|?m#2du>S+T?1 z0s?|I4#ou{)7)c7oS4+OQyLxeSZ-AL%y`j~;C$U9bMa#%Wu9?Gm9zLzBm`^OQ%*?v z4T<#Va9PXleFk7jQS=NH9!&|Cgo8AnqOwW5FHKB;;Zd|GRsudi`z!9e@2DF4?{7j> zg}_Pv0VXKfCJ0!_0pOS0>rnL;z~8ykRta$;UobOTNcxJha}lTj2Z-`q1R^Mfm7u|K z<@CQf`;sk3b#2=Za)ZLyhNeVaBWmbTnHp@cP4V?_;E2q<&pY>aZgwIEVF%LED(0Mm zrB>O0&%3ll^wtkL#fQQOx}ZZbmddoPerCzJtx#H-dXuTMRqlMg!;I4h{iPcX3O*|E z2`j&iD^%YX@>xFKsac(0ac1uwE^{wK5RCph`NJ4pubuY>ukvm=_(TZhMSo1=-JfUy zTkb(~Zl#61s1A|61EL97d_=4Ebd5`NZoPMZ;?U-OA}&`(olY8V;+X!BS8*LKiT=v# z=9D=1O40@0D%Tf5o5~x@MHp*pZerLK@|Q`G<9W3@j}ay>Wp!pavBJg}Pdruc0#RX5 zK7m1$@AE3is<}8qGp4Vy%PVK0X#LqvzC-XvIN|Fk71caZZ(mw2+Ew2|YSg>Sw&qQL z?5R_1qz}jHWBz%IlPNzzVcPgQsC;R;C-b!-$4NfjY~3N#=vGY0i$k2GSmE>b5wmv# zNu!bs?-TA_5)z3k+eN6n0zM?mC-!jlp?1~Sq4n)#`MoJbqkfApmZB+U0b-;MSrwTKJ$S$YY^s;XxXPgI-i^w`AvX}d{f*E91c;IwSa zz~|XShMW@AH14j|wnDtzlShq0XgKsJlWf6BiKt{)9#0qjc`bLBcV6d!PCVCt!k2-a z4N)~&Dg8(vka(Z%=3Vv|d7YRtl-*sNd775`puhIFH{zaaZ9bp6p0gH~xB%-~=VGw^ zRkNYH58(P(;PV>!@}(JF*H+8$mgih@!>Ep|J2(CeA%QmqXSy3B2;)|X+U=vNI(C2R zfOw&1Ta#K;ZcM$4eiJeC(9NlTuwW(!DjRjk_-x@>4&$ADUpL?pO)doC&`t7%nuAy- zJHO)Ta(kAdrixD?uRof@rJBUOMOhLyj6Kav6{Q~8+>J&cckN~PXuG6d@=ESWI@WPT zyv$3o3bR)B@hj&NX2?u#x7Yjd^q+ecXgCpGU(=OA&Anr%=cQ%D;`2U#C2c26lo{d(%M@C4lJX}(&lW;vQB zam1Mi^(7InOc5i$$FV|xmof@}af?*JllpPPG=6|?#Wu6c#WN-Nu zSK4QnxVJ0Oj9MstQVdr<@beOA#o4Bw2`|^x=@hh8ceFg$+Q*#1YnpCLI6VoqOJjoZ z<-OG0>iOdFcwW11$kXB>8jT;`Q~jptv~5y-e@$#urFBa(^$g;kzg)?51d z;h<3KZVv+IUWtdlhVh&j>H}@(>Z(42^u}!Ql8OCixD46Z^~U5lDf*@9wrLL4^I_f- z4)J7dYWxQAHh!0>%%94*x5<3Ey&vo~VqK%UE%YL2&&RTV)S4<}oXwn3eyZDmJl{k+ zY$@7~28YG$`rehdAq*!+_+-UowNSL0_no-5T6#5-fD84YWXC;7Pt!dt9A;yJo2kSJ zzpQ?A%c{?s7)DcCZ91yaJndFu*Ve{i3rclH~vhbBO^ZxHtJ=K zwmrq8>KE0QmK*?)t}bfg$PY6U*_ZelLLp5YMCop-y$DvKx3$#(SEG7NFNj}4u}{a_ z@cIzvT6|QNABxwDQ*`3_!uPf%^frUGCn|WJZFOFMO0-sPUMii7T18m~wk7Zu#c&Gt_JszAA=VOneBJ9pxy?2HAUZlC%A$!*s`A0 z=egJX>zgbhnO#e-M);To&z@nOq9Q-cO_BIZh%d$E{bDX)${j}C#=#EWgx%$8m1@L5 zyfL8B!iP>aM|tvcd5*O~ea}NM+{N9(=#cDxalj@m=#;zx>4`CJxp{qmit0M{CrgVH zVb`wLl{7DGK#*_C%L3}1HulYWCCe!?V%m@h`{nvwz}4|Hx>fJQr$X^_a@h6KUPZjR z5?)5TzP{oN6@ya|%8qtF0fCZ2%DcQ6%ox|MNIW+Ijxyk+nj(TNurMQrDUa3XbdKkL z58tQ*CVLieAikH>IczwU6{lzH5naV_SwiK3q^gBdRPpC--7ZC zakE5uOT5ODYkLAMTSL|wi996ooTn8y-y7T-yLeetBfp81ZMZ9Dt+YJMLxs{%h}JrN z+}?3&2+4CQC7-DXjcct6X!h0G^g6`Z1>-U!;`+H3gXvPkm@J@s`KTu2iHpU5#5b$& zGV3zbLXTD8aoeYcqXm$n;u>E;F`cKXBP6@rfas_hAMg5bVKW0t!SnWP;?8o5RQVL` z!<`{OVS5^l#JyYPWKP4(Oe>kXK~i(UXSUk;V@E+Yhs`ILF;7$2IYW#W8yxV$_`OekiUC+4ZHE(90m^(_cDGI13edY9PF8We85J?h( z=3EG_crwlp5bt`b%%qR=&oK3t&U+wqvC^Bb)pmR)jd z*gW-elg~2=yxcm+BQi963QL2ju=r(Y&sno*?l#sJ{{}MKYj5I0@jmX7?_&2t5a*sy zXzaAlBlE~e2t0Xeo>!)SmwS3*BHiHIT+k}fLW{Z%*iFXC&BpmpZ5r>I zlEjxZl=3AJK$0@8V2+bY6}h@7*%z^M$&dr)*0(ZM5^Vuw_oFJ}Z4~#v!=%XH>c* zPu=BnN}@NIor1jFajt!fcVvzG)tu6)6kGDlF(@-$G3Kd%C%B%_>5^mSTRI8hO^U26g<7gHRa z`8??%eiL7R*dv+);cUbKy>%ZQJHxBH^F%cVWL}WrGMs}goY3RbvTcM|X~DNoADzn$ zkIcK~2h*by+!vj;eO!_gvEsTEIQleR+I3R%PqpYhY$lFi)wsbp>b&fc^0px6JXp$v zSCzr}$J#0Ha0)ITco&+doM(1^Gc$L-yt(tEQ@DD6hrAO5=FH;8}J@Z75%WDedoBE(2=o2=+B&3 zLv4_M8Z6|*3t6=9$X=HG6y0Fl+#Xq68C10|ry{XQ>2VUc01_7zJ7lC$iJ ziKgY9I4j-dbv0N1GU*~sJ{s>a$?J9D+SPlQjB$?Mdr-Wv=hdB)=(?M?H_xt$a9w=w zeStG=+a0&A#%kp?pY!xZ@jUL%GfKLV!>MI|o^_<+%=M;xvY{BBPMqSC5xg;UYvrrZ zh-y%s$od zOBO<^lx|A}Sfy3CrF*l-BMoR)*-NH#%T{uTH$p^SxHd8|#8ljgM-GG~FO{gVIh{v~ zZLGqDPj&kS4096&J>AewXHsFRKFL^%0c6`T}yZ$=JVo%;jk075n_X!?&L+*F^ z%It2Xhi83@3tG`{6ibj}K7rO?JN+%GpQ!Xmr`$pjxk4sQ?U{rA8TeS)i0I9K4rlzd zlz7CyjOL*Wxkj6SC1I!YUMp=TY)DJTVuL?5QfZU^!r~mt5?;H{cBhkeLspd$(d+wm z(*2t*_2FsL&R(|nvr;4&qeRjgj2FAps^=RY)hWy zQ!bq6!1BB4#sLb173l0vHh2R?XTo9j`T^=ZjA_WuS3Ej>uwF4y2`RES%2n# z+pJdJjh}mzy|qm3K0IH4X()QLDYR#Es=IhGRmX)VhRGF|V+j1@?Gr|YT2*SsTJj)g zQ1L!qTa%h(64J1Ey>zZ3pKrhL1vLSE)=WlJs5pab>kX0TdwAPavQ>)w#B7tbNOG0J064E!Ko3s0#RKX2D&qxy4vdL;Qkdm`DvP;{G`eB$=!s|8 z2@kMD3)?d2Vim|<&|mP174R$f7**L_MoPaZ4)HMKD#Wd8HW7~Syos}n_FNI}cG_uH zLhp}{cLK{r4$S`1M8(ThE_V%gg;C0{YhcXrWKVA?VY{4rbtArzb)ae1vRsWwXC!AC zcjFZi?rm5uQxKwm%q2$-BWaB#1p0>4iOh6k(KY*h85V}L`qZT_{S6rJG!~vsx~@%^ z?+8KV(&D}wo6q-RX7PQVi(GwJ6-YCLsJ9tN4{5<$DB#C=!Af&~MjWJO^(iZ?bMjR5+erGjmh* zpnf-vvkMy=Rwsq6A@An`=RCcZz1vhQoS*vqm>gELr&^E0Hmk1WxZu*d1{d&Vqx+0Y zxGY~Q)1=9(lo!VH!+OEgK~!?x(-WN>XC+=Xc@Z~%-THBU?-wT#8Q$Ib*0U~KCT_)K zy*|ufbZRV|2K6~zE8aC9jCJX=2aH9ny+y#YR3azZM#ID_dK-Odv-8^Y9p+}~9 zrW)3JOj<_j`&+=-yANfjb?)BT3E$TsE^r-+wI;WpCQ!R*twi!kKKXHwi*P=w@Y`+LVuE(2rQ9 z>-F=zx&ZHNr7gU?i|y##`HDopZtA-m3ZL%4w!W0`Lq}WNK57rgtx`I#kwl=bxn&J? zqxW&(pefk{)~+37?QPd+Um3Ma@4;bk!X==8komkWQWb5jBGGTDl9_I}v+KYrv}Q5< zNvKSIzXta$rZq8>CHgIvPalsU{cwhPhdSORBK2^Bec5&Dc^BPAzCEpG^#Q-#9*=SF z6%LCg3@zfFbi?uO;;|<2RAI#zwf62IO*&9GxlVJzpZ9oq7LSXE`B!bCE@!KbV&mn1 zZ?kaoKIMld{kNUb^ET?QC|p5?YuEN(MYmp(wujghfuwA=z4DB+qn(T3X+Mpi0v_(Q zJ*GYP>b*S^ec|ZRda)?&@20Mfel%3j^E;|!7Bvr_Lpt|MCJ8c0f3W7 zx@D{->)EM);JP&g<>gt6ULw0Wr>fYX_uE9cDDq#m z=jAWG%JJBcuNU^7lW`@3p|dvc{G1b{_mtz>%Gg!%+i`TN-fY!bMVT3ACRX;RU0DWjFTyCEhgs84|A1w2%BOb}{~eF~1#{BAr=(^lEp+ zC^m5=z&I1ZaK66nPj}rE0Vi%R;_cYy8)37-mAGEf{lsQfzD{)fbZ`*_1}2Kkbk~QB zl^zLe?{M@yA?^J%haPBGI z)m~gxiOZV;@9QO@KX`hoC~8$jA9GI6Gh5i&r{@Ff zDyK%kN6hO*O;X>`cbXwuJGu$LMkgb3bI|F%Ed-_Zz8U@1c<<@^^QcLG&Q}Mucggd4 z8E&Pej!B8oGp_?yC>8c0165Z+pyOfmSj7fHu*Hu>usx0y@n;hIQTq_=yYSGraouxS z>7V+(=VmOtE4{vMJGzN2uW`CLiz=GI(@pcbK}nlr;L7Dyc~6A=m9!4Nrjt&R)2i8= zLe}9v*;33r2Yo${dUCOU+&5b_p=NBgG1QnEmE^VYD>ApGUg-&I+~bzuoBLo61~1({MG%8Q05!~-yA~~Oa-4#C&?q}?jzbmWVDbc%s;>0^u@LP`&nVFtr z|9m^%pg(#F6vjwwE^n4CKvlt?%cg#gE?Qr8l(?-V=R%aQ*QNsS2G`LRcUqw0!wS zvw%n~Z6Nntihbxe-hZV2PFWaix7v|bgM~EUU0^Wf4?l@Su8i5Lj>Nc(O`e~t4C;|< zY%CBiycD@$$v9~)QQPIWLnf}XG}QWB!)@wgM=#Hz6}kzmxo@Q-Tz;?IPET=CLd=7oCX%t8ll>8&z3r zoM*Fl6V&H_D+p6}t{xkmhZY=O-P?Oo>GjZlM1r0S?)E?P*phlx|BQ&+tS!c{4D7*X(?$DrxHp!oIu8N#>t&ABFw% z%o$tUk*=Sf?}3_^%{;3YHRs*;XPiDAICKurxG-bF-EL>fUdI>4^y(*%Yq3qEyYf~) zpP)EZGC>UPg-yj%(pqHl*ibpEsUF;5OU4gE z+~XIS#tm_~FmR!AGfZnj>2_hefN9S`TDUGP{0 z)tSkqib^^8B-yl3Q5LuO!-o>*U}>?h9w1@Z0<8b+wenxBR$i~aomK$g4*|gZwg*{Y z=@R-!Z&S**WB|DA`^!i?I;U{oPALF@bNp>5VmJAKN-jmLg#fT02PFe^Jis9h;33?FGZyV;b5NC`vQa8BP2|Qh!dn11`}_Sx{B}b5c0K_B;O_VP z@Erw27bOHE9OFOk2kcWOGJ7uU0{@741gO@HvlrgFF<}FT7{*|>h(+- zT(k)l66H$g;G*}2i%5N!13@}O6x15L_vAOgODi4-6~Yx3;R=u$Y{Xd*(!IccMya?( z$GB9G0&rQ>GXNtHe(s||0FL{($`*Zh;f2cc;jRx#&WIl2ApV(RjF0>ovbc z11uw7fYGj(I%&A5mp54yYy_u;znEeD1h^f*?#1dbYQ&OKMK@4AJl;ambiA(33|^V^ zw!GV%zIa3oG#*eY6xq~i^P-DhVSswE&Uz5&D_D-M7mJho;32SWDx#EsHC$u$Jy_~3 z*aByvbO*1EozH}G^)$AZuDSb(JZ~St#b;NVVYe%}*Z^E^zukL)2SgkwH2YeB3P7^M zBPf2>1^>?5MiL~;092!`0PhY`7m9F1GAhgWVQ<8W7#_i3(P{t@fRnBQ#Bb{2`AYrA ze$iP#6ff8)6iOlO*u@urRzL8z_9m?|FF5MVRiPG9CovLYN~m4q9kIDl(n%VN^@4E$zaV2p8c+khTnyp?3zQ1kFaCWT%WI8&71L zt`J*3&Du*88~3HEY=t^H1W;gFrx(CI)-Qqfo%iA$}D13W{?EJi?^fTHT^0URR8 zRNd7J%xmF)5zn!r!=N#^qRQSXWcd4czToJ*#VSC005~4=)j`Q`>Z%V4ihRLFyWT{* zv3ix^>YLE$o6&gRoT89C3mkwn&$wa|Ea$**OYB&j{39B0K zRljsJ*AL}=Riahlg*9Tqa#_zM6{myZ%R72VaLgn(+K_-FWP3g+k-(Pdn174yG|Mj1 z2w3l!{~aFl-@H($B3}SeBiGsaciy+RkodHo^(&M z83~>u7Akn&BqZ_NLJHVx;jONBfkSZ)GavSUa4?)_VJTGr(uX~)um3N+Bs;P%*x&hv zziaZJw)wG#4N6pCQ)#eGC+6{l+7KEM0eUV~Z*Gp8% zu&pk8uqBUuH(xjoV}RU(`P=F3b5npssNPvvYWM5b4l&JWiup) zD#!RfJ9B1li$OH+o?pHa`l^COhgk0w79ZgQ%?OE#r{Qo7vcCV#nh|Am2d)JK(;DblBcOB{W8g=LxI# zbA8YLjYGqyO-<@bcu$QYjx} z9)4ot8!M#oxAtV4ut#U!%|f_Dwg9v6z~l$+0_}a2*c?hM5eaj>BzXHV5H1;PlJU4l z7&SF^yC0X_4w2vXiAtIJQu3zj3GCpNoV2A48LVm z+Zow|XDH~)C;Gp3*rOjGtfNN$#v8>R_*&Vc^S|x!t#doY>Qz=8b=g=wN%+Ag9bBXE zTL*q@D#0crjV&5~v{G*51Bcp(tDj46;?iUhe#S*FNoOMuE~{tO{Hm~XJ41Bk$^|$0 zxX8FJ6`d7?I2CZVcbDv?+Qllu8q$ubM{BeM9&GdqQ4vR*CjhjRGQXT4X@3O1+(aH9 zR95g&yR?>w9#Tm^6b^Wn-x`XoBW?u*f>dq!Jfm%3P;6O$^Lbqr#Z2G?Uo;7O1T|d; zonN!-a+P=Fi@-o;uQ#Z|51c4rC%}?O?Eu#v`F#P)w@!ve^TXo=^o@;PS-v^!@Gld5 zvHCZ+9X2(=H%7jg<}55fJntu7A`-Z5T#sd7mUIx+qo+aHQR}i2ylmE1)LZp&5OnLr zLBgx6fkQQa`Y2O%&o0Ala7vR45u$R!!Jiqb%6YIgx9RB!`Uc)7ll>L{Z z{^oXg%v4AY7c4h;kF14b=c4xTiy4xX@4^}!Q>9fbnQdS%OgeEaVj{?yaIHLA({ zUrh9Wr>_0=h2#2{zi-*MJ_|_crCo_tq_-@{RL(IMq0VToYDJ6JKt=jRzc19-3!Ca) zc9pC%$puI%O$bUP@pg1m*a+@!S#ewzfvOc*Z^pTj-Xu<+92@uea=lpRfj6tyz!E|= zh=yfyi(}R68?N&R?1wI_?Y*=Qe+D*2;_SJ9RA(R@IN0F%pLIS9RVMw$aRI)y=?|Xx z`tfmn_?s&apE&ZsPyOzdjgr22Z2R-MZ+-anmoG>Ci4P!>T`jjZSWM9dnX;it^GW2i zmC99fEU*io@@1mf?WFF@s?;c#N$Oo3~^qTAg)1rMMtjm2XSXD}^;|&Y2m}H}4Sb(2dx3o^3^kdb%Nl-v(xcd8c23+^?vK~kDpkjd|;ttuJ4268vppqj}~_L z*^xu4=!c7J^7p!NXdsQgStTOA7<-dN_{~2;{b>DM-qBp&v&mNSO|)$Yg!jj(L4B3Ombeyr_>uhcinczZ|OPmicF1 zD3T66?z>KZ*ZsizN0TmpB>w6Tf9fT9;I41}{Lw=OrOw`8^()deWE9`m+xY?lDR!g{cO;q0@7JP-h2HD9-# zQvK>U=)m^B*xm%0Kz?i>d#?XkR>59DZOd2WQIOVAE^3 zMl3aZ7Zr_`vr-)-8R%8KAwmz=8pOfgkL{WXxN3Q7gjX2T<7u>X#Unisz*Tg;*IT$4 zFKKHXuF4z{rl**HjCEsKe69vTw3i(4SIF)N3#u{*&$%jBcqBmH69c?6I$yG!yJ&F@CYUR_cHJ=SNE|e>u-D=KR6< zUmp6kt#riWfosYm?mSN_k9f@Z^=2SgFBiTnMX5$H;(6Il#!ZHl%R^oi%h6|TUgZI9 zu@GOxb>UgtTVxy5{2G})Gru&aowx;+a9vap(V>8UmZuJNWS3*AdT3I(h%uX%)}W%K zFncw)z$R5Uczj8g;*xXam?^LgM4svu#PjUR7Xqs%u1I`tt__q087x0emV;BD^Rv@z zU~)^AvumXAAX31hKjcBM(FTE2QzlpOq^g|m{q7y(YFK-b1rVuVpA8OYX{D_+W787} zmW;W7wz=h?fOq6U4Cq{dG-L1d*~Q~02U=PKrK7HR=2uanc+pF@OXbfx1G;3N2nNSG zqlNnF#IW)X-o^j$hHUuy@+SYAKS_V|Hhul#fG__1FHF0=%a9QVEHmqHqSBKCAFo_L6GkCs&t_{E-a$aVLukb8gS-WqJ;WkKe7DTcQCkr z!@slr#}-~FmHH3o_}c6jd+rqX)vfCc|L%``bHQ&M{K+w2+)8os51#0OQ|iJtyCRwe z4pA|`>3XU_StJBK^()SNp{K=_eqgtivZ9*>gxo|FB~^leoRy#Oiy%8bk~zh6I1o4$ zDCANp`89LoszhArNf78PKsowiORk!$>FL~B~r=@L7EuS7-AY<5AV!XY$(u2PXu9u5QX4#U?1!w-K z%a*p=csqy@kHiG|B|AK{M{aNwMO>nFsVU=qU0u!Q) z2Zzjxcu^OO9Ld5iEWdtx(m5QdD-bdegRkGFhFyO5<9}+_S8Mv!P`>q!FgS|+r)K@; z&n1$CDm!$QU#xZPjsDSpw~m~U4gcD^DZu*Gz<%;+>DB+nWJYs0vWPY#r#RLvn*#+SgI8@QC43g`?Zqh}*sblf&n0F7T7-Q}IbAoI?_C}OLT zg-jJZaexl}V=v!d{lodi=YO@TAKm6pj{L^@p@&wUci8(E!~f{{|I)LJ^}AMI9(?S@ zc%x2BZso{lICS-Y8n_LdokpejUEx9x7(yLK$eS&rUOb0*bQMFUN&h$*k?@d)>aO6R z6752F?3M+dUO1$@Ji1H7*E*)UIHqj$tmBKxmE|P3tpUnK@;sRNp^*lO=_WP4iDwY+YY5gC%)(>wz>dcSs@%>IF z{oTht>dc}Sg)?qCQ6@s`JGq30BdR=v#f<171rQ;@CUaI%-$)gU^$(|O9S*uW4R z-O(DNaaa7vH}R2g)>8c(ug66P-}VVZ`q`g>|H4keFZcLe`#)Umhrj*g^5|@p`hgqR z-~EK(&vQ9H`&i$2Ie|)-er)#lxd!4F=l#`lfWv5UNtYV0GvX?jyuUaKY+0?H1s-Ko z&{B(k8Rdk&GZyDUQrS_=BD%I~`lUA|C2>nBSAdafy-@LZCMB<{BYGCrMGzLG3wZe~ zS+F^a%ua+2q%GUTaukh{ip7qf3`pFSl~A>Io~_0^@pWahPK#O2*tQrK1V_|7NzZfJveSLC0>x?b+OX#gJy@$A@d|qp9epV z7nVKtC+^}}`z{q7`xE^CZo@x(_^Yc5)$j8G-+21f(tq}A{%8Xx_{T3vyJZz)#_Y~$NLFKGz@;%adg`VYlJrpzx2r1Sz^(wOV+T3`5!o?cInF}-`vgOF1hHYi)z@@rq{eNLAhG6Xn zY{`bTW64nK!vf2w$|%Vyb&+0uce;|@D#t9nDq_1*N)(t=#LOYo-Qqn-f>tqH=}NF3 zZ_D&<1*u4SmXqO??LOJwtDt@sRb!SSQnJ=0ekyHiiTgvDun}d}8TX&{{94I>$WPvW zlqEiA7uvI5xuhRoUBY@7fAz`d8!O?lcAAZK2I3fh=X1$xfyTJksDorC**~A6h-$&{*o{=~EzlT^calwT#r;jp zD%df+i4$N;7@mG0laq>`fOAoQvT;~gsG0_TV9-WS;gjJhCbdEm4=Xx4;_9e*bo?M5 zU|BFS`_1)hl<~ZRb>RB-ANc$yu8~H+b0=SV?7p$hQP+@PIs@p--HQnRadn91UCfo)He6esapi!e8oJ*CuTZ8cwWj@l(D|T zn*Y>-XjU#MKXJmZpYln6|E*@{cOBK|Tif$pC&oYZ8KaKpv9&LpFzYSSzk`0=ZoT%c zp{>?Hb;U*Y0!CuuiY2}rBig5`+C$5RW{>ms8Q~feKc{ikR7F=KS%#Km?4W=Vqk`3J zPHw%SIe;7T?SkWL1 z9V6V1V_!IDasjUZFt_7fs*ZftrLNTn1?ov;O$@6fH_^NS9$>zfT+4)6`ptjAN;ree z;AQ+FC9 zn%)Ih@G?a4d?vi(13ZO=f4VswWnSl_$btmA3CD~V&wcAe@=k|uorw0QUX#~1{;b>k zl-p$9c7JmQSbHK7vlb*pO(cYh2AL+A$`eA4AdRWi7ejlK7CRzML`+V5Tm^^1idxS zi^xkC=2fn$i%9({jb-o2wrYD*S*L0(*}2`(IOFx`$A*G%fqjN%48}Nue|+jS*!%=@tGwu^L1}DR zitQSu&RCy=m@r}J!L%K(d*s3Ur@KDj&f@-Gt^5@qjwjc#NaD3--&)%)Mh&YMK1mG3 zmJcONC2J+g7*ZFzm8Es*t*M*N@w@PVkYf~6WhC$MU=i>med@jFLI&~}ul`d9pzsgVfB&1@=D%WcsRn)^@*0s}vJ1PF zS`?{AdU=xG>m$k&F$l9J%*4vb>};Pi^FZMOp{RIa<)SjE11U5S%65jDe|AB4ji)leYnYQBjeq*%DsrjQXxqtD8FMrj+ za9n@Mf6kXbMt-*YZ(a&LJ8Uc6CScain+I{^TAnP6y`2S$;*ripT|=PpRkiI%$!a*i z_~zuZRwYWW-gY6AHZIYh-i;%RkS%w2idpJl@ito=mxrvjm|JZ%{_)!kurP^{+Ziuj zFRYFAWzs>;*vP*lKHnJLRquXe?N?6lyDwnOf9vZRiA)IQd$BT*7hg%ezNuxVKrh)i z4G+3kGqa9;QYb8_?V}^MAz__BT~_Ua&i7r*HahBiUUv3|G2izTJ_J@7&tv|<3$ro) z=>IKU|C*oqj5>X;w~j3>?T;mn_4EFZzUKeG zYl#n+xzXNy*SdVo|7#7WqC9+}kT@>1q5A3f@+T<#PrJ}+JtO;Ai?Sn8*;UOL!KVUZ ztEg_Ownh5eH8E+C=~YY%>B2z^Lfz~Xf8yl160Ld5)=|AR#%{Bvb@KATuCB3` zB9aYyR~f1|@fFf}xe_1APkcaP4uNCGEC1B?lRihg?*HPc^e1-yso1PLITR*!Tl$_pltN>`t)0iSf<~#OHVHP?>dGwwEMsKRz34k zA4EUaX~V?J9(oby=HlHBY|WB zi!X-kDk!g9z3A?-T;080(&_=tF{#k2*cP&vO+6Ai>df=mz~Q;yyrG(uhg}iw@PJotz$=wE$#C3wt0t)e}SBxWqZSy+2f$5 zIlb)hK5q=8ORmC02_PXV^&N_f%5>buqWiIa%i{K+XIjGTGJ06)PyN>QxBrDCGWvYK z`82-rc%9M*^PjwK_)}klOiwZ}Fut^KWVCjoyZ8)1B$`WGEIq@0XL-0M46`=Ts!T;W zL4J{{7`wYI5>?Dne-r1asrBLAl$&?_X;&I+ExB$d+O#EVC;5E4o`xmDcVsTN!Vyjs zNdWy=4}{2%zxt=v=db?tuiDk$`Tue4Cdr7$9sOYblRrUHvwp=;rOpfr4J!W>JkxCv zcX%%C)nY0k7M#?si8MEU$S#~-B8{nM^>DaVtf*24cY)DEf8Y!^VtY;Pqcw}WH#?JHddADsJPV%Kga!M zn9xzL@7p^R@kZ&h2*fEn$ks!T#C}&uA)Qu4!QDg)tBvJTajSGlaX+q{1z5)?Z|k)W zW0ZTC3!AlUcC-eWf;-mti5~;L^;z7nL1S*|w_kX%z4%)%`YZnbOQ#P1Qp>da`Woqu z*LMB#e}#YLqV*z1!ZKU=jGDOb1%FoSGLXRS;)4N}X3fO8+FPp5T7DcKlSOtoREirw zCK487xJoKwnMU2(fSq1x{W9{V*3;rKiQ>*k0FGR+gHb$T=Wm#sTf=@>ob;{>QRkYp zqoJCoOPy^D-IexA)=svCxYJ8ZCZY3|dF?3Af6H$-1X6;g9lXc9Jga`yuc+u>@*kWA zqmBI0^IW&bTpswEYOFC)cuX))2fI_R?X*peTZMOVZJI20%xoPKwJUZS+>L4-I?x5U zvyfcSjF6*u5G!K!?OKI)PRsQ<8q&vZs0++blf%Y6Onv6kRRZu)Tn4WP@ZwB?U-pWQ zc9zFcerk6Atj)jv?>7dCf8y&L29*7kr-Lmz9;Mkx!q>k5IpbebRBeCKWTgw+`9UbMLJpBUV20AdN zyw39X{aH`@kTWa6ed_Q%c`6Ee*_BSa zfyHd7`2GOkejHogw~fEaaz7{sanPV^=1VduP3Ga^kxX8+NC;*t7PhPkL2ll9EBcx^h!`s+A+w`exU%%xfN3 zslRA)C9%r3+~O<0`g!d@lJUIfmD{jy9qzX#!RiCPw~*W2Uu!m(^0><9e=jv-{Pcn( zZ~4J`y5HmlLJQI{Cv{naSe?sLs>%vU)S=xe9>L{|J=C6sk;gQB(!Z6wWJo226EGCA z>~iqB(5$E-L+Ag-FShu8o-+XWf5GYc2Olnb@%$%Wr_myfl=q7gtqRai3>}qjgCUe% zpB+^Q@fI;U()WeMLPgBm4)KhBwljEwYWd3SFb)Pim7*X{8X8JlbOCf)uO0%jql3MWy{zKGjAUoLbOQF`o`N81`2&kdxd z{ZTv?P0d_hf1sU)DXZEL=haCmsJizhQIjjt66sFPBwJUz*63Qa2kLJ>)yp2LLiwy` zxTgz#`Z8v*Xiq1GU$37`UOx#s5?f>&Mvi+MukQwkSImA6c>VuR%KiWRq+HlGFrK9V zfPD772T+cD2dco&U74j|WbzUeKtqRBNL$!5g#u>sf0=;<0S|838<1n<0m`8!U@+YU zT;YJ9S5Wiw1Nc4T_cp&LneKuNf7&@b5<%!-4%;=@nXUqg(F5QL;u{Ghf#?h&y!bZ+J-Ic2z5YGaqkn8vGv5P3mxw z89b5ojR!FLM{s{%$!`L3yZTGNftn}@XJagvm|2&&tgM$RM` zjK^y^j=T;jNNO{Rp|UgffIM-#yaiomE9xjr!5YRLR949z)Cbq*IFv0Q7O7#$Z64h=SLAuCzl=9Kpl!Lx<9{h_IB45(_^v0w%6Fy^p{e-fcr4{R#C zku&~pLCM_6>5$jt)C9@~Y*2ddzYdmX!%@8&ZK>2S}q$TAVPSiQy6bauIw*nNKpc%!i!ye!7&BwWr z<5fmLrP@|9SkdGy&uNe^f8k;H8(HXDijhK1K*r#7 zK%5TYFry?g=!|3u#vT3g0;ntKIDIL4b&T^6pI0TXqXWpgXXoy39K?U_(}arm17kmb zXvF!K&J^OBfMPuBJ8u!(DVK?e{JD2I z{~|dyfBGoG@9*KIlSNzO<9BDcgUi434mH6UFd{{smy zes#sr9_TnHF{f8(46{z*^Xmo=^8m4F60CUbvqe?gGHYyCi=jtoY(JPLQNHb6+p>6U z*Sw)e{n&moO+vrz>$YXp*lrA6`Dgx}4qcELDp^VKvV%}OP*34<+nU(QT7-VdWfYTV z5Me7bAAnMqf9sg)*ZdvOxYOJtn^ldTrM6G4?~ZNvAr=(qJ7&re|@vD-6)10dt>{{#IWY`X&&Hh zVwANg&nU6JbG#aDZwC?X%%b78v@=b38!LEo{bh~%E6x6y&!CL`tQ>7QMjGWdwzA7n zCJQ{ua!XJaMSQO8jLwLp4&(d`f8oJi&Ju<5mGXezcn}5LP>E!esDsX7KPKQ#9$x=X z;YAa}fAnYEQ~sV`@NV>9jAI4JSpCB|zwMwrew;bNCiGMEHjvOL$g`E>mdah`4*xuU zt~+8sd$w}gnt8f54ui% ze+RW?3pd$)0M-v+upc~Pj5}00f2(g}u1`1~D^R&^2k+G(FU;G~uh9RFXNZ0wUq44> zCAX8Y>aqxv109s)U+__?XDfGY3lOy^Em<+zLq#v(^)sL4R_1&o{sjhinCor~nOoS} zh}(=v<^d+A$$1raR5UsmB5qf{b9Dd+f0gV-<&OKB9Tb0Q#GJ-Cc^ z>BTaBU{e_n)(>yh|FxVCQb{qai!v)mJu9#O74OQ*%<5A`I)uTlN-^fa^cz{TrO8PSL-^75~&9$8XQat+wbWM+!Xze@Y!w zxlhao>A+ImJk&;)(ao?M@yYeveNxO#)8Q zzFHrUGSVyKKFP}9>xa<}#@r{AzU{9LRo1JOxt&D%C@&t;zJ4qBvq`8R-X7o012X7u| z`Cfa&0PN@Yf?xmq{)N~0b@+wfC$H9kNLO=xSXbH#qr?wnZw=3aj_q` z@A%OduKSKZ7~!7Z@hc*;2y$ z{tcUPy#3#>@`%T8*l|52{FRRRi1+%3E?6D=9)9RT(=YrFO^Olk->~}F_ixyS)PU05RPzf(d@kLgb!eTt~xxn!kifoI9>(g zyzU?mejcnK9rG51U=C-DanN7)h}W$*f7&q*X}$5i91|;pjz8%zc=CD~ zi^p+g9_2v<)Bw^B-9Sy@kU<8D1@U+vz#&5=|IdTbK&tOOuh-4~N8U@ohQGd-$J>#~ z;bVqg9FFn)j!GZ_RP&?8Py9S~l|abvl?-+rQu%vge}*2QF*q_b@O^BD4z-{%a5MA< zRe~cRb;K*<_yj!uJ8*n0j+fm0y!XG;FBXA~{dF9l|55m8q-TacpjI5eaeNko7pH^M zK{h@I=)(IwG9+?*_rU2b{ifeBgP!B<0DsN`gZWlA}ulDf8E9j6ueyjn|mgh~9A3FZad&$4|JMxJVbNB%?i?@&P z6u$g8@-Z}A9-4o`kMeoYVm=Qy-X1eljPk+9rKKEx{c}G5 zkyrQ&pWrq4T;56dFX=8pvVP@L|Nf8X(MB6cxy>Q~++x+i+5SQ5)rt^19I)aLlU&e;O`dMg|X_elq!d;PsE(P<~FG2yy^+26HY) z4ud7gEo_13$@msJgt#7X86Q{5lj}O)ujzHj0wVJBX}3Jt7BJ*-z6=3{oFBrD^LGVN zh7P>`W5);vGlpkJWGETi<)(3{Me?163 zmTdiQx8ZmD^iO;CyWL@b_obNU<8^utx6}Xfzy4qUJO4_||90M|r~mWk>QdChe;9lJ zwEI7l$G&>L9{+7@X`e%V`j0E`qW=EUor)}f{)Z%r|E+zb|M9OMp5b4knf?0o%u(E^ z;}H$t?4QV|62|(c4gkMT|GktUf4cB`scemtj#uUxi(dAQ8JR)B%-7j*4)_zH63m@xRT`t?`Mr? zY^9?&+%Gpv@8#DiP=ovaP)+8i-8_9SAU>Z%=>@`j8_Z_&m9>}GY2#LHf9HsDyjtE1 zuT~r72EA3mO$T?Y+?|W4%$CpP+&!KzZo8W=o`M^hvM|-I=lw({b|vL_$@WL;UmrA@ zUH0gmt zJN8KFR|z>x&$ird7K@d9e@&EmwsM6v%PoJc7vPG?xi{{saK74|H}1{M@O^EEXS#g> zE*GAZ85ieBg&MVIIecHr=klVjj%}kX7bjL|j~GsYJ((=_o9Cy6N4p-#iqt3ZGbhRR zocZ0x_Kqp~OwO-~p!V}50oty`_4Ltw=G(XMK+bHtXYF7sKG~NDDdHpi1YBE* zR|_x*#(WDdG-l3J8$Wwb4%EkUR?x{sMsXDw)9B?srVF|1uhZ2o30tMkhU0!pE?fUD zlDQR%C4@1&nHQNc#R!di5fA}~pQ~7Yu%}KYuzAr=K zf%%EurnOcx?WG0kWD>oG7oBg0^H2)wth_7_v(pjQ(`|j+!)F3j+bW(@TNup!B3M)O z+_!9w*s-m4Zl?RJLc`};%mP`Th3vh#(!?YiFv zUbOXHg-S-Le{|Y=6-l>vX7+TkRlp_hyH4+=v>FVnH7@ME8>R!@-NpWnk8E?FrD;8B z=AKa9h2=szZ!TFqEqwLE!F_v}U%*8?M$%RD=; zywh>8(4<4%WxqxmUVZk#QrsIae0k3=-u!X7>DN7Ve}>($n0dF1m_#o8Eu9%d<$_^g zLj8|}`{u4B*?y_+qiIGOIz3hQW`{R+8H;(IKaFBb+es2VdM~IB@%k8=C*eBt)+C&R zu5BO2t9jyk9>}xbB-hJDK9EbXw*C9dE#fJzKrFzlZ2>PPKJ+V-(SYE zq}uZ$f7Yoa-gki}s{}13%Ve9c2c6kxgk{Z1)X606un4BFqRD!>CW6QgZ$ogdZ$pc! z)8Qp{wTlY%X}z6TljlLGJi@b@Y(i<>zzLg~MX%^o+491PTXs@EpGFGbd2YtZ1(G|; ze~0I!Hel6fhAgWhm2uN0p(ZYzH5Q`9;1Zc`8Z%&pWstyHxOsWM#SOYe0h>luV}< zTkcgK)Dr<1&eFa1fvB?zm6O%f;$8lik%1obI#UMtzHJt+_Xi{G^F>7#E?~ z!1Ne61!DO+KzHbPC7j`SSqp&?Z)8hDf4*jh4@t9itOwJ0I9Qc(yri!~g(J&1RY$y< zl`2M`OSKQAxnpj2<~j~gRIC<>JUde*&~U zYXhBK#k%TWTVb<`>EuwA)7|Zmo|EP#H&HIxDeE&->Lfo;#Ol1;%=XK?Ri|^|nih_^ zUOQ{Uza-vm%Pw~{sdPu3wa7i~p0(L{(`SF2r$HY2hu2XLo=g9FNWbxPM#c(=pf_y0Dt8f9e@F?uwfT zu{Ue(x57vcROyY^$y*4=^Xz`Ve}t&g!t{DtA-X7cueIUO%lhrAmsNx&6KdYQQo&Yy zj&RCa+uv^1QzuUH9;BzAb%!cgvpp5Mq8_L)41FLLd)tL2J011(OI>Eb%mGikzBFYJPv5}TY2 z70yN85{OAB^@yZ#cCoCORoz1)#_={F?R3l%Daf`~TE8?<%zBWEf6uvzQF!m%!#zg9 zKEtAjlxKs?15?!@$Hy%4Pnv#rg~bE6P8<7IYG3i@+!(c~wEci-n5$%(lWe%ePP4z| z>T=)Vsr{LE)X*Xt=aS?$iba<4pjk%PVc(JR{F-*z=OwacLo2jFTc($8e~Q`aZPDZQ zVp;R(uAGsRommt=e+2=(1zVb&@8u(p%KjNe&nL~3Y_o*b)W5GL16XPfb9b1gc`F`H zpF&?{JH}-LZ*r(x9ba~RUe^2S6Txw(wzL-O?mm@oJH6Vu3s7c@FcKH1-8A)fpChB^ z$3lxEQQkUD_wi&gsb7!jb?4ic(=M;?hhRFNRjd8w>{27Pe=n4sLR)e+$x^Tvizr#Q zWhy%Q^6?R4riP7J81jfPG9Cj7(O9BM%gsN+fNO6A)ma5 zx3NqOU2)?je_qZ%Au-X;-se$l4zt_sdPWvGtv{=i5uZu&oNj7lUVK8s>9Idts`|V< z?XOe&=*Q1Wt}x0S>R)GzIu=8mmk%}{S~R4a!)Lvh6uVNb4@h33NyXC_{FT{HOJS(J zbzd#=YVnd}Q+3tU=BMgLvt|-#i?eguKVAX6pR?(CfA)g+sXRKbxp~by?R0p*oiLaR z8~^p3zvs8;m}C-u+TBpIBRL4`-RnL7gz|AoQFjx%YcA+JPn6#)QS4IE&He1GVXFw0 znM9j2y<{Ddu9v$mZStkkE}8_%{UE?GPg?2Kd7YDDv{yVsz8r)dm6~H+l>TEEy=qqK zq)5(>e`i$Bo|6WnV?Mt{%jfIkdwwj_yMFnUUDm`gVVdOUMLk`?0bbKpc@jdE9iEfs zTHY7))eZTJ4A6o}eb(O=!}R=F3tC2Vh7_SY6&G{at@GJy(ls-)e6v7&KGWy)X#4BW zYnlpM>GGfpd@hq^a&eU1c8fmO&1rGSg%=gGe?;27rBmz2wY}ZNmvVVtyli3fuJd(s zFJH5LesT2Wq&-X~^Lg}GZ<{2W9ZP>+KaA5`spe@@PkRs-+V+*k)A?a3zL#P*g@t1( z&USMzG<89>C=bkMecjzoc^;_w#oBC@g;NXmyn7gTnO^eJJ!|BogwHDkhbX|w$9xao ze`~kx)(7GBF7zh79N+igh^E(mM~6$km}qM_it})JG|Xm)X`V&NiH0k6x-si^T_kZ~ zP2q}CYvnKRCmbwI@35ouqjqonBD7EE!Sr+A-JUjVar639S*-7#)jm3=-lO^R6H>D1 za+4n0dvXXq>Rx>Ix;}UM{<#y@kvM+_e=~;|>qD)FbMKYz?w45Gt{%t0J#?qqDzOr2 z`{^A?x~9HV2I{UqqeXN}U#%rA7PCw8hH&wGmpVEb=%PreA><32O0%w>2;_o4#UsBT zFJ)0I++~@6_&IvngWYi+OqL5%8T3iEb3Tz+o+VFwnPqO??d~Jf*T>%w^S&;ZfA86I z=w2(kTuzt}t{=s)cgnMMi6_TN-yLziY|h$kb5-tVn;d)9GoY;Ul0BPxr@+NOJ;Jl| zb|wEEOz`}p+%C;j-WNgD)#kg{wwe4KHZCqQaC$PopB-geAD5d~746zST5ZeMyciB(6#$?DB6b~cD55aAL#f;Dc@ZXwCPbiC)azs zH;QdAErAIa*^lW-b8K-|-_KW>9nrMuSTgu39kz$XzK@oV^(UDJV)vOT zuScYW#yyqd`*6C}eL1_&MR$8pq8S$V@dh>b$ha@*c7GB4tA1H!_Pk8dZfOpuE7^F# zsZ17UbrUnok3+I}MmMjPPS<;|T+nUKK5G^4bC&8?$tTUV(T-X8e+i1&yuF6@{(ii_ z?$%89gsFZ&<>iC*Pu1_H#py6P-1oJ4`rtMlE_;*i7Y}47ul2$!%DA_PxqlzY8Ewik zr+2p6i@_EW)qLG{Q>e52ZYvSpbZ&*n>%2TJ*FreZ)a&+(RorZM&mdD4a#4HRX>LX8 z;k9U#>Y?Xr8&g4fe~DNr^s_nOwckjySRF3Mx3gQ{GN0MkGL5utBHU(|^`%bi-6X-? zN?sb#_L;gm6m#n`TkI6kq3`Il_cxX2B-+aV*xJd2R_$hrCMRlpI$so5l?{zf`r~t( zQ9-xYf^F5qxpduRUxfZ7etU{pM>?d*GZpxZl1gXj9~kVZO^%M&f% z!MS{z!{)u83)UunCbpZ)*Y#O_AEYxf>f>y61I3$kjU(x6x7Zkdr@mJ5bLXB{ul4?V zPmX-YoN3!^e`vOA6$!;*8?A=*K{rz;**nhsyo@i?^`g_0$Mf1W;WYf5mSELIV!hoD zacY|?&(q_%y#~ihoS!!<*1c1HP`lqhg#L{6gYT*nCWh|RNhXe^bYget4n}e#El%X? zaBTJY6uA>Co-ZaBBBf<=SarF6-ozVU?lB z%hhG`z>m17_U>hVnofk-XLz8!X;q8R6|!bPo1@}h(cae2LpKSkqeRF==y7!=%FEO2 zXm@famLb`eEo~0d&3z?2m;0UmwyYDJr(<_ki=O?CI%ADFZ#9y;c=r0 zD{qTwBY~?Cz$=$6B9@vWpw}gzCp@dQzDwJIl)v zf7;AYCQ5OLcLLrYkNZ+3(P@9Wb;=AIb~VJW`3GFhJ2j17#ShtsN>1P(d zQC`e7#hu8X@QF0(-fXM)byWnD^HH0C>d3;m+$;~d*_QsUcuqP!V9w;4-?yURRYRFa zL4sKl9-6b;D)c1UR(i{$bmfrAa>iuGI3hZzzbp<_K;*BtIgNIWuoNbaX@ZE?bbuP>c zZFd*9<^lJHbCxE?VM$fgejc~roF&l^Q*<2!X{OJnmn@BDW+Yd~Qs}25fvZ?vRFZg)L$EG)=lx_`&eJjvH-)vU z#6o*_#!fY7?tR#u-V60(ZR)Tx7S1r+wBCHn+Ipv!)@!+abo%}h*9ZIc4hpLE+u*4h zOLuOp>c&#aMHNk)_hx@u=8uscb|l zmT}b&N99qz^ryIHb$ngFcg<8vpgiN(!+ttfe>W5Km$kFK**v#YoI|iE?8MO)^mwsv z;bC)EuWkJPC?-3&v-(BtwSqqW?R~x)IMc#czP8%T6QfmWJ++dl&!hA);S&ZpaS}ti zBz``#w8re{H6inv_F4C5e?}L8cQ>^(d^jqgl27GL`rc&SZI?RU{^g?7-xw!-x$K5Z zQ0&mFl(ol;eV)jB8Ah5mBBOW_?1JlAO8kTgar(CIyIMb7Cl9f<(xv)37U8p%V)u5I zs>kf4M!V@bUcIgu-IH=9>B=-EX}28q+H5yJ^dG@9UqjjjkJt6Oe_0pv=pK8ESt!n@ zlkh#i>eJ`2S{b_i*{0GoUN-M)y2&J{5Un=UQ0f28)_JU24|Lo0UEDJ=D5s`!j$65> zoO9OGoAY7&zi{0XNGw&Apb(21W71q1mvJ{cSQrVGKEPj~t~;O?d9Wdk%<5M5lTMW> z%wb~??n*vtv7}LGe?ImJU6#001$)8Z7e;jVUSl1o8SKU=$Cfx9>6}VcCX_alTUZZ* zhX*mG+6wMr!?hHMA9vZ{c<0_NZeX1((Y5OdJ&ThXpqMK%*JNNw z0MCvq?ODb#eLMA1NZo+BNXj?C0hEtxLvX7(;wloJ^RE&w1IY%^$7N@ghe0?;?)^eb z53;z^8m{HRf6B%{DWKd3fLF^wi4hfnHNC5$3P(|nzsP|lz;B9Be`WF%2(Ak#!I|vg zg~1SXJo+TN&F{TDaojp75E=LAgb7iUiD>AF03RXckDD31H#Y1|iQnEDBvK=EHSRC< zITUY?#UabuX_VrA2x+RVJL;_&WN(qyKS5x^X?eh{e}vDpWBw|I2U0U@iKSb{K_(18 zBXRZKu?jSi!a~WWXh8u#Z8;vh`vsN_SUi^$_NbGH2F-|1@~XpJqhrQV@r?prifA2b z!9oPLz&yEae^XvQu>dvnQ7+?5S*u^H8x)B+Bbs#K(6{jZ{iL6P9BUJa&7~i$YI(?2 zS~QrWeMDat+%ANv(1%^yjEzCz%A|a5XegM@4ammKZK+%ddPdhKwFa`ycQ3A(K zf2qDM+9u{fn}%UnBsj=XGqZjN|v5 z-O~GrX#+vNk|i$lPxc`ox74to^_{FKf6}Toy=z+t8(R;+ZYH|aBZ1<48jEB4DJ~__ zvu{Lgu(1&ZbamClu$8Y`#!X)!OH!a~jqHhDPR(+{V9nk+z&KNHZAwTf0|4* z@5b64Z6SO?p|IE@NWR(XmF$Q~VpH%QC&>|UtO8Xzt(PoBWs&bDS_WGRJ_@N0dRjK` z(Gmr9Y|sH;FyyX8dTuy9r(m+@S8gv9e$Y(Q@RMgtr&bP8%4i6L6^%Vw4xf#UgE^@q zHBE-(x20it@jq9?j|^LNoUkJsf5wBhP{yGLel~m zkbQYmrIT9`iN_JGj`F9Sp^?v5hPIPBhWd`m0@g#9kcNQBgh2__TYtpSf1bg`j-Lcu z`%FDYcuWzr9Zc&cW$K|GQ=wiqaxgzR0?M2EJv~apA^KYe9Hu?!UDqiq`4OD<={O%O zgRm%~>6S5;*C8t%fqmjZ#NLEmb9Fa~MSNV5H?Yjg<99{T(NbZhtec6D!fJ1HmSnly zTGiUklytpVLY7K`!?;Vye_(07yP*J78(W&N)%JuWUv493!fL9Od_-c?y& z)(BNT(ufP)l=|jC;Pmk`=3vI*sH#L+-tf{QOLsJZMQ3IZHuz!qY98*(_Gs3vQpsd| z+M?*5mmvH+<0P$@OxXQUXX^z2ama68>b1mlY!VAC!_We3R|W#@e>3e(cF)t26|>oi zzHL?kD)p&zpQv_We+sl$(b6>mz-?Bz2@@G34Rt2`&}hFWKY7}~4T{p2qCGIz54Y(O zpd=7Yi<%s7Goy0o~k11OTpTzTaD9)ClD zb(`y2;67#R16Y+#f4!Mrn=O}9`YkK3`FocJz!s~kfF>F14GtU}Q^hD)h_0ZYvR{!S zh#cjCAWsN8daeqB)dUU!?Dfgfzby+pXw^7>zMZne=~B=EvAlq!E-RT=uSprkNe@#H-1nvnB$RPeoLV3~=#+G@9 zLE#b^kRm#MZddmc=_JOZWhdYr6uLXyI~I#p3D+c-{t}tc2&Wy)@4mP`5HlQ*Z@&K2lY8Nix`;}Qe z*N5{Lf8>Ol$zJ&}R`NEP0h2a(s+7q=hjPw>^3+#kQ3e2PyOA5*_s^b5Gs(4Pjw*h z5fL;2T7`8xw)-t6-|CNkVEG=z^j{}3h%fM=qO^$O;TZ4G*YLRv%X%5$OY&oD1Rn~_TuW*2t+1o7O8222r;%9h8 zDSD2?A~^;9%sNsMj3%X6*3}(MUcXG=-ysl(T(9A{x~>6k`2)dzk}D+?icBtEi~y80>gT}~DkV<2^i1?sCy-dwMc+l-6%f0RT_ zCZu;%JljR2_^mbiV`R~d5B^8u>coXc1W&5hX8=J!zQ0MCD!}^Y-1KU~cW8<6GyCI% zL4?X_Zb!v@E$*{wT!IOTo9J5@qy?C53ZF615pL@iKWg?T2k7~k4IaVIFn`Yo_W>{Q zbn~LvMO%GRHaAy4j-`Koz>0X~<)v z)vR71fhKTo2P;HEnh}9hj|$6)qB|$A@-#t$Xb*};a{#srPjB~!sKt&70i>2hlTO*A zl@#ukL=Wh(c-(q?-P1&UCWL$g>TbP_uMk7`#EN+fSo)lL{3O*m zb?;{5Qs0G7xVE%}9 zi{i&nOGTqwgOj9UktagufxaSPR#|oFHjCwCKbVpV{Ir0s`z5l0{LlfuvbWRqMPuEhe3uv3A}Qb5xH@4>N2@jz9}ZhD>|Wa7ScLVew7Z` zEY6loel*=+i$&C`zKNYa?$BWxl1w5+qz817)8w&ms09y^9txAM!aI|JDrsU{;Ss22 zN$N2!t68a+G;2OlRDl&$RM6{!9kd&n=n^sAhBlK*gOVjFY))UW*kJf2}B$@boyE+yOGxGy;GoL;Y-5&5QKv0|%eUv7wFB=fZT z{Pipb*d1RL5w~9NMpgd1>j0If)r9{_L~L-f4l(!~0#26X?SCfnu_hmRTew?aocx2~ zW4+*ID3IGsGLq1~4f}EC_0=OfkQuLRr)hsgKRA^9A&LEn zS|<3Q1E&YGsDG&S5%`$Jw8`9d_QW49aD~NAh7~8oI%o)3KGQ@47dSP$7hwR;%_{o= zT@nwI!92}-jj#4xr#4_j0!5Rx`%G>rXcp58_9%mLdD{d5%`re4j^o00Je!8JsB100 zRoOEiA%k0nUU5SAQeIgKSK~;Wnn_ui{#FV8J4~NT)_-yo(a6e_XCZmnJ5=#*%CO?n zPYN`R8#AO!s2-RL=N7!hf$d~mCYX{Z5N{&G)+*0RNLZ5=?rI?Fd{hQTlxNU-sGU7V zGEuXc7ebBO;#FLP7Jo~Ejk}bm>Gd3~4MJ+JRIk_Fz^NrYfhM7SVCLtDkxjdjPAe+D zr}s-S6Mx|X1x~RY`sMX_6m9Z27*nle9N_tLlmOjCkm`g}H#yX%|BhI+n^NftpzuLB zl%9~ALfucY%ukXUl4nl3F*o*x{kJRF$=@+mL2O;Mbc$vJ!0xhpugNfNJ4WZX46y~l zUNsvy*)_|m;Q2&kL?A2TSUJ}ux`iDGY-FhVX@5<{zl@*8^$f%fd-e66Ded?m?J|Jt zxy4nb{!T{GrMkL{xjHe*18Jc+b>MYBoMi;9$TcO57AckT&CpD=Lv6XVbB3R=~>0!jms!Ncf)7E2aa!@m6EOrw3x5Y5-RF* zYJVGCn*j~kuqfLEtldVbh{zU_)60q@23gQI&ClezUf`Q+55TH2y=rgth%BA=N2`I|cDuuE-6-PH!GS%t%DF!;G(H;*feZODNp<-05Sg5opVfillV z+rVZpv^7=}2S}x&NC@f| z6_axEoM|a@rl#nm69kAsaVyiX2f#DE&_-NNu$U*`n0{65+T6U~+vvoPdMGp|?)DxE zO;-wFdyJ%*BR|B#%{bHnh2R7@tA5V{do=Z`Wl>geKHwQeQ;r&tt(Xi1TtC*Od4DLU zNt)}ibs>XfB_@yCOQKrKoWz(unTX7VDd=!w_`-^=M0sEadGz%3END{$W zT2ZVMgPQKI0aQW=Y}@S3YdjB-O6>U8Z7+n)29*+5pnOzkuBgYu822@Ov4aPpauH7|YcjftMsz(-DKuI?ukXwOra9T~ zJjc4U;o_xxOyo#T`~sgf`HsDh23aeHH$mt^4)j=M?nhDm-FL5wP-4mAbrWu=0&>a(uSDli8F>cBC$1w<~|^GI3by z&iH^6yE*<#m(>PzB>oGA3Ov%oWZ?n74*ag(l zg&dWD-vCg8gq;aKOA(r%*fBhlIuqP|QpTF?O_x^%L;g{a*eE&MF#G%yzA08m6!ClIdQIJy9JtfPYh4GI+9(U$WOjT)$({P)CHr-K*uwlnh;Fd5- zMWRD`$4VKZT?(GP`8Zq9$N_;ZY`*E zEfc)e05eSHC6**iyN_xu>S>Dfoou3dOg=_8F9+J}z_esh=y|QS1YQ16(UAT9sZj7OM#L*vXJNyH#qdyW`e&VX|HVt z8JUb7bE!D~>ix3_Z#%;Fv8#Xoc}Q0|4KUv^5DterSC8d7yQ-xEPtgw zL?@V&@LwMN)c>2?KV2~rzX|t-&76e7pO||8XBPQ;A?L`hWA>>8LnXs7D)m=X=>APg z2W9>T?-^qLMH7wx97V?8EO;)kaK#znru@?!bLswn^u>RJWO;{}vr#+}hhdC=4sYqN zp3qAQT3gGVq2^z1#EbkrujhZa)_=W57D8s<Khw-%^qflMwyB{aGD%W0N6SY%{Q^w~ywLC;1pno${twx*e z0>7#cflhCk-W4o!>(oJjeC_qu+2?LmepO9#^J5^rvQ$XHe1D_lthz@SV!qXNDR$Ox zP$huV$ll=f_WQLs8CQ+$qqSSQI$=<4av_$vuguM#?{9PS`l|Xe(=h5a<-XD6-;GOiqc{XZY;BDSDvf6M&t>pb>j)YZR}ksR0BQO-n88>mkl_T;4j1o0B)iBm;>vM z-V!zCY;KTP+;f@eS?T3VB@sq5@)smak`>BVi8b^QKI3m5jvz58dVly>Uh;ue6)1eB zC@JRzv2&eCSJm`k3Zhaq2hYAa*cl`jD6E0Ht5!dc1xa`3S|)18_D>rsgmo@{S(O7= z^;HIA+&Jg}urzE?$-clCwOYC*!JeX^P|j*GEb_@^rb{UXa`dewq?%ck^|_V zlUXFsuZId+mTiVkYJa|}LEByJ7&%h?ZB+D&UxvQ+8awSa06t#wL89)-W}~Z?FP@L? zPR>16dLcK2LQMVKX(eHsk2voZ%pU;J%^F8~F4T~9;91$2xf6_6KHr1ng<2EZbAwo~ z?>rB25i(nYdP4Uy9+rxvT52i~V>jg5$QcN;d3w+z%J&K1ubc$gYi)3|yW z4Wb4(lCX0pK6650RFU~@7`x&0?_u}?=4X?bz)C6Ir++0H&U)|C%B$O~n23_{GKD8S z>%!RQ9cPILJ$j!X0xDU|+tWlhRYZRKR1nqqBLslE9{m`36hz)bA1R77^b*d{?V8jE z<@u`)0bck)JntK|obn|LqA}sVx*l53j7i(jFAn!h>Z^XDdNi)Hk@hk-zGBd3K@EStjREE1d!+t$IKv$gF_rRP z#iXa87~9LZx#RkJ6x9A!(PY^33hrdc(np|_?0+~_ehuY}<4^#f5F`05PkBVp+7SNg zn|lqBN&rX04LN#bl(VXlebr``1f=^CM|@3Rw_uPJW5Kg_%mR26b7?|x0wJjN`br#; z+gB|z+DNN&t+w`1RO?8uT6TOKV#D$`pK9S5Qk*StlLbk-legc)z5OZ< z$}nYZf0SeHMV%7k@EK zSX?%=8h8YZOqj>1rYu_&d5u55(BB+vL zl7rlt;A|??77ZZ&04|S7mov~4 zmM+K(ACz36^ja`qkxpn-u%=wROGVqhdK8P20?bAkWLZiDU=adlHndHi+kbe@mV{b# zh)c*O`mdjP{Fp>J#$|N3f?$&cNvt?+7Kt7#Zi+>$m+#A`Gf~WT7^9fgQtb0EIE%z@ zTvK30BM+Bn2=f3jbGzz^JfvD|jO_?%zSo%$wJQovB@3ByK+fYQ%Jz{DVH;24_wA?(%Es(S zCkyi?ei-Itb832N*FY-w-FUM?MmIP9tLE!QO%k68bwTqm*B%(a`=1f+DQ~vmCalx? z=94#Mjmf21f~bws1V%WLVNB_)3zgC;Cm1&ObW>sO00pJPB;8~mO+{eW;r898!{3?= zF0Q4D<*0cHvlqsD!?F-!* z1o}WH*X|8n8@dl3uvFAFh-nPoVO)Ak#R@@0A%0WJbAx$2j&B&1290m(Pi6zNuPm6P zjsaY+(QhR#r~PgO?GJj)DTAIN`xQBaB0il$KQ)jlb86p-Uw=h`I*yL(YEzU(Qo_xJ zIJYiiJtuV&sh_e0nimZ>b(^bDwaN4z+!J_B2Q&gi6XtBphHiX~3={e^Vhv+Pu%eCE zOzvIuE=Z#&s1=qkY)aJ+=^z;5NSVOBX-2y4ka^k!N8Z66C%p|;^7jo>t5BhhD`l0x zvysrycE)`7!GA_t;@nDnr>j$M`ZJaOy??jo6qw&KAHkg_+jo*Y@+$d;=T|54fSTsF z((x`LdkiB*W$Vn#Yc}F&8DLfGD^EU!$Pp23eeL|I0W|1tXAD>q7ZTBW~@50Xt{%Q@nHeQiyQf zw(>5atQ%?jrSQrOock233W4WiAIP9lQdmTq0(yW{hu~`u2DXV#x^?zXRxB(Ur1xm~ zX8Lq1tA7a0;F`Bjer2>QS|;F<`;fPAIObY>+8IdM%XRg>qt>A-fbhAjgppdO3bh6a zI(Qux+Mziy5_vd1PlaqVBptsY{rrScMz|*KMt3lD=ea}(}m41sFuTZqA(u?+t ztKr4^W?^b+s%FS@W+D+lBg2lv^bpH@j7+HSJRb6`vPd199tXunF*#R$~2`-%(qov_^m+%)0=$ZvYu02;`p^%-o+`zK}4 zvqK&|zb|c$W|c#?0fhqulHSyNT6j6;TYm=)@UmUHM*T=a2-_U_r{i1h!NEyLkEcDN zzj4|Wfd{JSWtU-|6eoeKEUWe62c@Wz|B@^rZy$jWreaNyuDd#~t@8k_6_@SvZG!BX z+@0DlhyZR3u743Ju5i#>Kx;$^wMIW%&tSa5+KO{c7SQsp50hq_X;&{D_Re#N? zA&l1?lYQ4|z+`7DTmx^=21$kY;P_NPuC(R>=jY?KhaHXt+Hm)LQ^L=1%zQ{5C!@=Z zbu;E<+xj`&`WS{UdT1~PAKM@|osRq+SuVM3X2G7?Zy}~A>lr@qdeAgb7yLpnjmUgT z=E6-{AKpnSSj<=uQ<2(xS^V*n+kZa1+7t9n3`AR#NFeo?%Y^YHukK+D>J*Vkh`EiD zEfd<5t>u@Lx(Q|cOH?2&1BQUfGH__#iqOiMkvLu*Yq|US6Z{+e6-_A3PenuNl&rN>=>r1U3zJf8* z;5me+>iFau@tyN{v2{anI=&dT{jTXUmbR~Tq~uM;w{SS(6(yA^JmLBLs$aRXMh)x~ zKqZ5`zbaBc8*t_e?9RKnEV|Qq}=>T4SE?D0@2^NbL7xolhHt zjnHi0PH?Hnqo7k`HGkA0IvST!^}HaMB==D6QRjpwe-NOw4=;IWAyNK4;wU`t8)7%v ztwV|FVg%jgdCm((6CGs-5l^g0xr%CDlgEUG9E+)7xnMj}JD*ks_CfZhsO3#umntlD^?GwZeUL z&>)4DCk;?ev*#>`b5Eef78JZJagJ%Si60Z;#jxoUu-a;1GcpC?!x^_aNg`qS8(WKTETTV5WO~B^^aE01$c; zT_MPKe>0cf2Kp}^lSF(f+nxAyfNILDYvnVZ#8zAW=_Ymm*z-s0a}*2{Jf!ewsON-h z&g3g!A$wO4a)->#*G%3yKNh|Edz^!#($bV88=w6~HGk0xbm|OHh4sn5U2?Vp2|ExJ z1{Gb{ye3rt7d?PE%MK=ErNtQ}fjFD?Ek4E|bp@q+?YTMI!=}C!z9{L^Vblcp#2zAi zp7t;V|_-pprrH~9-a;Vv;Om%M58@`Csf@+k#m>dWtTl+UN(uo;q>4PnBl$fTzvnKm(`5xlVERyRHNwv8 zm)<0=Fb*BUt-rCY(iSDT4pzhS==*yrkn)6ALVxs0pN7I4=N*-HoKaU39sD3Rs`)n& z_)!mZBscj>AFVkd?qC>g^jOtqkc(`JnlUc7A8Q1~EWF#LRjX-EN3bWm0wD7k$#Q$k z;qj?YW&>F&vN=Q07kbAgqI@D=@Vu~qAE#zX$^Vk)x=`^tb$5?(@(K#yiYwJsvlsb! z>3@V~-7+ZB=T=PlmyoXOfJJ3AhR9y+mCszjlLLUQ+1K(HH}aQCrj1f4v|gf1h>W%s zaUe|{v3@L?`iSJ)NZ?<_ymD=!bEHixHDVwldUcVe5lS`xHNTvn_$v$tF!wMm+(qN_ z*Dw6@qm0PzL5LG5xA*#x-<~2CJ12C!?|%sz0Oy4dBC6-&l$gbQj=5u;GH9dfl`ENaZ)b6;j!?$rK3c|-xMGA|A`=V9y^vhPu#Ds@Pu34f5i zk5SzH;gydXX?XK?EP0|adv2^#6|rjHNA$U@GDw`=-)z8_i{YkiB?*~WBNgv12*2eU zghd{*MBwK}ij}97v}6UdrLKzdm4jcJ>QcXkb4XAzIRC)8d<}G;Op%lHwe#rWJ4wr) zfUiAN8uq`lElP&CZc{ zf4Pl{n{kZW@5J5sa3TOg)~#f0wdlZo&KsYqNTf${Br#Nz3gfNzX}2Num@dKc082Wg zqCK8`WK`no~Pa_7O&M|!SRDZ$7X<3e%DV` z13nfcJJ*DDlKNs^mBUv|tA7(7AU?Tnu)s?3USTXmBw7koOWE%%eX?~1lDCxxLS53A z^$iU-UA~`ouVJ7N=fa`Tj9;GUFOi`yHEGs3xRD;`rr>zVoGVTT05HT zJhS)`z1pRCitWKpk!>Tu@W;|4b0r?Sf9>iPYoUOlXK=OGcf@_m*?)2bQB+4I!DTxAc+!6_8jqp`}2eOM{esdp%WS5ZEV3%vALWX zm!+dtG74x9v2WKgNKwG%+>&%v6$V1o;P5Ei51qi(rVDS}bqt-D`lz-#lfBEEegiFGBAdAcI+A99> zTJn;qAT(zfj{`EkZNFX+uPEcxGK1@9hPNgKZ3@%byM$>0KV@rRkMjh7II4Hpd@>r{ zumSfzhBbKpXf102%x4xlj#oNP#mET)pW+Z~w6hRlW6uyo{|+Zzp0g0~?Bm|QS~ z4RKr&Uw?&e+1(?r&Emn8OU?8>4~)LW?cR$D90W*9bjeNfERnw?)XaesPB=u|G;+1+ z1FiXca~qzWE^UGhIR^OsG&B=i7zq^6by58GYR?Cq@;DOm2VAOCT8aFY1cr1oc0ZUL z{%^|OWZO|?*%tgDH2?`o(8z{6b>VE&gPx0M--G;r?^ds=iceD%Q@>WoRXwtuf4x0ekLS5r06gSEsaGTskxElWHQ6NL_US&7RGY3h{Rw6DOqJxutNs~%Q!!!$e-F+BTb}|jB?0;5a zzANVNoL>IxPTtUxyfx^~QL(kx&tn$V=lZq|w8XHnTF1mI|7>KM_&F=|mdKYXr*7o| z&<7w_ps>-b&uVEaTaGD3cP22VgrC*2ZWAW|u+g;>EcdGNPWSn^X}3IYSL)J>{0%T2 zoZMGZv_ww^kiGiHynHjTK0Ylr^M7Bj^T&1>cnXh)u|{Uo44)J$3^9zduvAMgdD}}3 z$OFDd0wv9s?uyaUzM|mt8qf8)YciRex+wcl%q;Rs`FS;}A1fV^r>AaUIeSG0NfSZW zTk>E>H7Si74f45qU6p&Bk@u?@3;-rvDkyI(>UP2Et$d@-c9Ucc-$k9Qo_`=ofqQg= zYTlnGmoYkVZ9s63%q<|o>f;8l*4N&e$VFtnG|Iap(3{8~7Y8o&nOJ5~A{C|*#x-8t z@Yuu;Q9C^y)Y`V5Gh8Y9p}q)}$2ml_a1EHIV{!6t$E!)bB-fa|)z%N3bXiZuEZ@!3 z{X*E?)yqF_^Ojboh{;=hAb-VyYL54jA1Z-A#rz{?#E|X{O}8z#e=zMpnz?6n9^V|= z@|2kCz@0)r^wy!B2KPxio7jAyZ=^lXM`gh@%bF@e#vMMHVXlXGk#c;uc;%#wUS@UK z#gp^mq@LErdMY2;YdpeRkfUz$H62H!w-T8c&SA@Whp<}5%sxYK<$r4}W35N(-v!)d zS8sMcxL9ms@;snWsBFT!rau(6Y%}dxCQhzcmwc-SKI6L&RFat+b*=HE-b@YlIS8#Z zI?cuD^Xb+5<`tcXdi>N`N*sDbnYZ5hSd9|em)d!~^_ZYG(iMk;NsM~O>~A2s6EqhX zz`}hbE5Ks(;`ZvB`+rk5IcoL2d?LI-6m;J#E{C%&1isO>4&NZ4H^2^_`}>EhmUbU-b>K=ZqQ?jxcF}Ixb#`Ql@#^qQysiQ zO1#?>c}gGdh(M5N&3%A}F_YL0`7)En8%W^H3wiE6g~w2bmw$T&)T=jNqxw4CEV-?& z7u~s_w|$vQf;g1+atW?X8=j4_90j;AQi4S`zxqr($oP2-adxD7CAbwk~w324$ z)?WS8D5D1ti}-mq)`G4>!C#l9>(ft=1EK9vGsh>gSKq*&FGA<}MjS2Fk{0|n2Eg687^!x|z}0rcs=Co7SlXND zuHNjlxB4O+p}$=1UYj=YEJ@ihZc;7{m|=3CjFy?t>q~3aM_%2Q%Tu<~%R1);CgR?b zY4udFEdNppIc>6n^V~XVo%5wdrqc*Fof+66uYYF;fh9YYoeb|2EZjOyUe}>tp&V^lAL)mJ_uoik3h$eJ^7F+&z)>xNaC=zPLlJS|)h%;In|sAjXs$v&E<`9$zVB7`T|9H9m0 z+xc~yRjI4x4|jT%vK7evoslGK5f> zWv%Vc{IRhr|79Ne`+kVL8}rdB@PDzZ?`Lx&d;KVdXPym;RC1S;&Ks8zuhLTt2FTZj zzVxIaT=PsiU+Yd^s&?35i{`CPigWEQ$MF7SB-sf*NUAK!8Iyr>nYwCgXP^rAX^`35 zCvRs3UK8dB3VWY$#gx~BKfc}h@|2-Z4f5rxXeAL8MX2F3L%lq^3Lfm?s(-VX{A|)A z^A(3>UudYW6*eV1(KRlAB}4nL-8=0Pc}GL#l^0JZsxOo0eBewcbdSj9)RAF@=#@OU zM20gSRfE=-?K+hq5fasjhyB&k(m4%uHS1{QWt)dr%uttnLo9Lln2YMBo2<L%%~S~Qj1)X8cQTJn6lwj(T0 z%cbM!pr|I9Ep_qGSaD6!A@{Iaf8m332Mawo3+HwAABcZR0+l*oO@BD2(rp5rU&~BC zZ{}(h*$2KHC$E<&J!u@;5y&0UPJxW3$`p6a4KpSXKmJm&hPnN@W_ zWz$7(Z{KQOLJqm!B)A-4Fg4q7*1-m<9p zPvxAw-K008fXWWHyD1xP8CI3@;Qg!|oC!2T0Dx6;iGPgMIQbb=`I^1V#^WxZ zlO8z&*|$Zy-IAsl8^fDUCuG!2{J3ZQ7(IJA4=?q5iMG)VnQ=Vya4^l+=`lY2yKegD zq?LsfOQQ%+KMqh%8%A(F3RfZ99uO^~_p@ z_wzJVMP(>PW5VJ@?GnJV!6`sneZV z%l&bybmS;eLX_$S6iv+;pVxGziF@;LBZi$~naGyqWiuI`@1JP8G2BoW>)}b`$8gRQ z@$NVwP;=ifZ
m&fajt9T0|wfZbcH*o`PHtcz7__!{gjdGYV+mS`}** z)zinyFMq;}E^gO^$*{I0V<%EkjQ$cbg)7&~$I57$9_!CLBNzASv(+dmL&|K`Ne7`z zKW;435srS|fLW*=)o9N<1Dh>3Sh0W_Z5MG{LZRhVPG+Ov;-`HcLfvvh0Xn91(pjEN z-#JtT+f%4UD-P!(uGvjF%jFuF-Aj@#!tru@-+#zOpy~GmFfW4=1j@4rcj_Esrp7z> zlABia54IXphIauWy^j{}RRz?Um9b8Fze<(jRJk6Wn1zp@r&yq5#4{l6hKszl#DNrbb<|UnCll^nRtE!q?2Ik1 z(SOIaS+-%l2cPT%z^4TZGXrLYhnJrZ# zJIBppB5tJ5A38YSOhkvkQ!{kRLy#XCwSQbr-6c7Rou&u+*k4HYNC5d` zi-mquf%=*BNxQzX^i$VSMzDuMJM%Z{*neCs(^QTeR*NKXD|#SSA>R@YIcwfrPojTu zo*|XLs%uUsb$&e&-9yJRYizK(-#&+NWqM;u&MUQ|+XSS`g%0%Y_1135oIRRxTo(u3 zh%m^tWbxhlO3b34`Z51=u15GCIRVP|HH6^rQ5RT%dOtz}XOsWu5RL<&vcKTV*?(Ub z7y~{8mH2sGm|Hl{^s@Fv^cL}&K1C1}Nr4p5n7;>3hz zneifx!iSyf5}+f;F0YdAMFebC$uHvmuf4wKmssT`ilZkrP#TkoSUxlXAb7Cj4pMTD}9*Rst z>+EV0?}?bqE;>3hpj&t+n_8GrXNopqIZeHo|KO^(^KuXwg^L#liq$!fVdawI5~eESS- zZKuaoeCcPkVWacGNYcA@dY4D)-J3{zG5q@jjhPu)!XL*z%AR3nu+pie+V?KZPFG87 zUouC_+^Mcj?PXZz7)SP^4lvn0O@0l6)^3hqk#0WGm7Z3iKZgKHj>~odO!FX@pwK6 zale5G)ykp(K`M*rRE+aSZghIinkk9)AQ~(w?)^{snQdNVRmb)s2@dYTl#=>t-VI}b ziU^ws2#~>t8PdmcQ-2B^a2NxL6tfT%r3##kv{k)xpp1FpPp~+eSHF(i4ArP9y2~#^+!k^N{xMM)QdC&!=yCKg$jQ@dDc?<}~gR00BfQ zN|PO*&n2KvubT_#`}5U?t?hJ$+AEhWkgK*^VD{ zxlvYuOh>FA&wuDBzq~aIS5{aOc*;hwvdxa@5dv z3k>gYX3T_YNm*c@fM7q|{I+wGwWP|56ZsJ_Cq`wgdwuRm!i=Yxzja4D8IDXEox_wA zfRn|B=pEhXYvg+=yy|lAJY5<~CqXdvx-BmlzKhd(DSv`bIM|&89OwjFIZBuEXo>bm z68nm{b<%Z{pQra{D2aO5dx7+;Qb3(}5huymzC$39T*Q)K&UEfsakwTcSEep2lO8UC zM%=ZA7ol$!QiRd3e`JEAvY2Q`On z1TsE(HGj;pIn|FgQ;VKa;4QohdiUO}TC{Xt;i0g_(8lDmRY6Z?l6}9_x|Wbd*?%N-E%|3z$=*+7KqmDktQVbpH6Y7z z=HS*JV=7N!oxJ8&e4eqVfOFEqroV%7;rJ51`G$Vpuj9e%%8xBDiz@Pmw1;E=+^#nw z-eRY-^T+yp9tK<3p1FI7dkqJ6bvBPcoYBgs~+~%}( zE~X>tTAB0LEuN$f!j`@b6QV%bEkfId^o=-9H=eF0sguTIam_rXhfH-_wpcavB}6CT4si?OGublYB=FB^gmi!&oK5H@?O1Vqn#Xvqa&9ARtBl+d z9onAha&GB4Na*FDwRZ{Rn)kS(+1(4RN%Pu~L7S=dOGB^brKTAlLo{8I1f45P9Dg-} zM6hQY>Y_KS8mTA3n@rm*vd91eG_Kc-s$%zTT=s>{K=Z*(WQH$Q#BjM8bH zFxp!aS~`02dLS}bHn_t>rUP>yQTeooQ(La-#Lzx}CoA^UKAWWW2P_(*s(#kXNZ-b1 zS0g|u{1rFSx!-E;DF!NyeKF1yD}UvB|MECq)lr6vV@z`DAx=j=d7jjVaV(qjQB>oS*Vjmt}Fcy$sQ(n-|F(VM0=m9cS}(XOmSw*n`@wm-fvBFKd>Hdr*}3rGEwwOm?ngjG;dglb-t9JO2z6T zg*fv-tCK3+PB}XHt{Oe{(PP@TG<{mxb;Y_W>U;CFWh!dVW28>Kcl0X|To>k%C;EK| zuDV{G=c}6vui?ohVLq8Od}p@_=+hK@bXFpU%tTJv=H%!3p8LkRo_|ZRK2*}?my&h6 zW!iEc2$`_%u@IOG7CfsCgcDX>wXywyyG~m?)wl6@eO;n=K{=20(XelpQ4ch~KBYkr zv_5K!GUD*l0bI=$Vr_^=EwzUkW7H)n^n5UvZ>{V6K-Fw7ToM;OQF?N?_*o<7-_1S7z)|z4VxeO_Dg970eQh)fGhmLZ`*Am)~3xQK2 zAF?zzyEM%O#riyfe4?b1e4WR1_5Ctr38KFZ?in3mYrc(Jx8BY3hYk6TJ^J^}OI1O|XP zwLTIQp}*C7IrhYzxIWO(@&~(uxxgs<6oEVMk3aQDr+*fa2^&pPi@1Xgjh$7Tg{4F% zMFC-h?0Ov2EfW*6%@@kE$$Yk7|=(Ro!Yv5)O7y_%~H~ zl@*F^d~jXdxQ)BGyLH#CGo@74ula;ldd8zkJlc9$-_PV|dap;|`>|Nq3#DC{jDA+q z6hvesRe!?5N|)-R8OK$*7F5@gl@d@M+b#rGpq6C4v*!>VmXkf#DPOnNN&fWV$XglT2y{{ z$P)Y9PMd!kjH%z-g=r{bU>@{mp=5M!xRD$-K%Q)PVdaNnQ%n9~PVte3&v$op_xhyU zphqg-Q5dt>F*oCV?2f0~(jG4!OHO&6*6ftO+$}TPG69#rRm>)y^}9be~hAZpu&npA!TC%>VPBb5H(j?8*E6ck0P+*TDQ1 z|1*CqTnc?q6K1a30KnhA14ANdpJKv&rETImM`}MtVOTy^B zQ%b&5N%qMl-)0bJ54X^!KTK-q7y#qj#E^f$O=9cFcPB?s0O0NK;^v>JB;P3{002_` z{yvBizfU27->D-2K=C{EL`(N>)IRkE ztU&PrKmg7FphegOC*1@Gt!;-_R46`?jVrhb02DAG2oG#PTLEZ*SODd10}aj!hpK<^ zXz^fX$A;$?-^(SMu5{ji<>b3wek|yv@=}1U(XkDx~XeK89Z9W((~Y)RwD2! z9(sNZBT=0P7uX1nYB4BIq;a)F%npB2BrvE7Aod|NJm6)5#d-!_126%EKp+A4y34p` zu2RwPs2W?bMys);CRkCWuBoJ~O0U-Y8o&2)M4J-qYlTH5i)DH=T->V_s^FFeXj4vb zjq30Htx*q4&)dHbY|;L?PFWRc|2L2|2ff9Gh|^H5$=-HJ(IL;Xv7N=hek6a6lM?!3 zyFznk)e2+XW;#$;I4@~l3kJ2JeLnEdiW}gAR*jJi87bW^3UoZe;?NcH^T}#s=xq^U z3FXt^NDqtyly36yB} zj}AbxK@1?OM)$`TI~I0d+#`RXP<(!&1*wq8*)9yeU`oDxUcTKvSte{OyutMCeoEs! zK;SAbXnUPneeHTO>ePp&;K>Dox_5U4sCZgITJ^)tWz)Ph zxbOFiHv-Rs2PObS{Ll9bn->T+HX%qUquzbxk?Klw{)-m4Wf*YvHl=?`ak&zaVDxS( zaSjbp{rCd&fo0*j-OW;lwn8qeey;;(I0OCzUKq&V>oDMZofRF8Ig-^aI;mB963diS zHzdI=I*IcaC$d_lLGKg_aOhlgqD5Wln_AL}22w#S=~(CHlVT4y4JdHt0iklZ7FH#d z;M|BCQ!3k>DUk8ywZMO}P3P7OAmFaVyU<0@41k;W`x(Xket;v$iY>^3Bb*dlIDu$} zCJql9S0C0Qh5A}Rk*g0RPN7j15P=8)40&Ru0ab9?-k*gpqyZqT+LHE0z1!_S@GtEL z!g()})dkmv%R79V&Qb|o&K6ubWIFXbVM+jc!H-~!pt#2$NH%{^eDTvNZd!toXKzKy zHQ79T?JJxJ2JKb)f)>Q?mXRzxP3_9mC^Y~2$ifZMz?Y_#9E z1PWo*W+4Pl+5rp z#hrUVR4dgN@8N%+nBLtd-#( ze-X2cz}?%YSndA$;s@BNwA)m)@rLRhD?6WfUq1PE5yvAd1?U6k^bK$%Y{%#>Vn%-i_W1RSx#K7E;IRPd?iapL zzJ3wsv{bkB4`1z={^4JcO0f9e=Vlc0jVt>@iq)pAg-UJ}EhfjObt6R%AVIJw6*dYE z2g+7lVHVnk)%*~^A{8JI6z(U>b_6-VS0N{WD|WxtaQEAmhUl-&)6>`1KQ^e9BB~9S z=Kt&7tcZVigDQ-gyCMk6;!;DW28AeMFEi_+GO960C3NgfmB`{AI7bd>_E;yvLr^OW zl|r$kmB}?XZ8?%YqX6x=&~{wW#STUVNMOfBWU-~&LRW`P>M1H4UHgijesMk^qit}|ISP72h@Maf%Qx%C9T>=CIrdp5PxFxJ3$PC z4?z}i!mp8K3d&+$PNCEfF&$V0*c+bxO zf3bfm!JXs&;$-(5&Fx%a?Eh8>`+ZRc_w^s#KqJdG>$*P4L8>iq>)1i98rtFuxQ|Z) zakYj1K?nTM>V>wM*h9c8!`Cb-5ov^#bJl%_Dn>zNh&Der^%96bcxnl9giIlO0Fb-A z5d5*%CSo8fYkCw`rB~cE08XzGADeQpL0o@;AQCTJ_=T(q)&Y(HtCs&@ny5Ls}s5^d)c|s(4 z$^c_&rNB6y5~=jLaf2u^YS7-rhyIR>Tp_*D0b(SHBWZS;D>_B2%w%UYQLJt6_}+g$ z4ArGq#rBQRQ?Xo7Y_yHO!!;W&K^%C^j_2DRZxoP&U1upXP~MJ^3V7J@+^Ta_tKa)g zkH!*(KN@R~w=W0%x#vCpcYnfojw8#DZ)lB=b~&l((V@zwVdRE&bcdBJ{@g*KkkS^W zXpK&=80PJH-(t9^ma1gPPb9ANbEAJ|UpvmwhaiE5;W7giK(yn`=0VEsIQ#Pdj`iK9 zh4P~}OT25dRVj;5afDFfl)u_jtmsOq=n6z=QYvLqet3Q+NCk|utnRv2{>HQSN7p+4 z*0sX=tLxSud;E>F8vMNmoS6^rC@MG8j>IvQREXzaoy%Tkl0YR0&`cw#81;XE6H?nz zz8jcE!3p54z!dNKFx}TF!M(7lzj^zl;IuSWf!i$V31`iRpPxU0*9=#u*w^tU2oJ`E zM(yj+i?1K`|6QN|Xe+Bgf8x*oa#j7S*}p!fdC|sUQvJ2zN3VuNXwXa9$X4Dr(U#?e z-BytchBchxCaypBJyjsl?X-W81n{z}^x>>Up~7a6ocZE5=~=B&VsRrWI>7tUPdDO> zbrHN8;O_Zv8MJTy!|5t6JMIKh-O}(U&v>4=4<|`@tmYR2M;?X@R^}8#tnf;y82X%G#lGrRi=Thi0%y1mj%s^1 z5f`thX`Z%C*K*^{f&g^RVM1ZSjuaxw2~ucX@CyOa1D=;f0np`Q4mxqMW0{Wm4@}qHo>Wn>3_c}%W!#BH4emFY(`hKA~wCht$ z^Kedou@FgMqeWwldbQf|lK#{>oAMvI1r|zTrb>f78s*BU>~B}oT|3TZCepa!g{ZjH zd?AQC{1WkKO7`rFbI}URtOvfBi`_lX=9v&yCefgmaz)xIVp@N5l~0)?SsVQFcDS*D zI_~@VZ*KnO5rD_Sj)oKhj5s^JInI1DY#c#MdPdWH zc&;f0Nb|@OB2P?oy?g{n0*(H#6L)=wplvX6ZJWcX%-LxTOX$N2_?*qO7?P{K0$Bh< zDe#_~!P(+xQSe>P_r%oD7d%)+8M`l-`wiOj<&8La+@*ieR>04DzI-S@`7(rGp7?6M zJ(sOkflfvVe(KzBor8a4D-HhS38hN^XtEQkzj64*90&oHwnx(PzJ0HvT(C!3;2vV| zOj2i~MbY=>+AE)qZ%@XVAw;S(EVtMME*8x$DtK<-^m%lwrExR6b**{4YxAHeV2toy zpZX7Eb)$b?Q22vN95qyXYBahwMSz`cSoO+l`Fhra;-^l$01bjC4fdx3#w~sQ@Xt4| z?Ky9+-F&#Fzc}A(pP!i8d6eII2L9Cf|I@d|m%q~Bhp#*dt`VFTeCO7`IWUgRVZhSf%uMmxOJv+w!b**^;$|k2gmIJ#b%SdZNH!(fJUz*{ z>9oSLVwhrBGOkgR1MRN|@yQo007@}ZsMG+`-Wc@sS^C4oa zzIQwLyB$bmeC^<-fjIxH{XaF#?oXOi)-TWe&4*u`{n6)D)ayn1)j7X>bo|LN-&pu^ z`rdy_`Cv6A8n~0_ex9|K~NkCG6*b+Z`8OH zeJ`|ZvJPt|be!l3;f$n$i3A%fIz=|lW6*ztexMAOF$2565~k1UI_~{3jyR;;GHc{s zx7d6Bb(>Y0$8TK#x8JzfdGi|=QUA-mCvnehJ3s98)n3zWzj&F2-l+dq4*AQud;JBT z+$=#~)ZOyl)u6^9)BD^KIxC#H+Dp*JjH)|JHWZF>nFd6|V0H5{ut6y8=R5r9Xa|1_ zkl@hjSk1ZuwR+=|!LhLQ>h8U$$K6^ncINS|+vk_G4kPcPJLb?Te$IseT2AMTiwqbH zNa*V4#SNVKoNi|ywneM&Iy@(6;xuw4)^0#R0KN>Ue(Q1XTg!d5h>QQpEkz{%2VS=5 z^lOJb&JK`I!*87Zskb)euQpW*aYlbo8N^kwIjm_;E2Lc6hP+#pvg091Q0_WM`rsIf zmVhO$C@gLnG(KQ*)J2OSwBxNwZ`z6JwBsA74*IyR*NcW<+OF5}j{4v>Jn+>>80GNN z_P28n0|REHb2e7^<4hu#9&X-zFan7CBcy(2NpHj%EB+o|A^=AKDhjQG$<2StFTbV} znh#VT7y`whJFS4tk{;w@$qZX~xs5R1D#g`4=M-Z4 zq!lRVOLCA`I^q_Ea|DbqX0CAvo<^bNg5bSi8RaMs#P!2cpcwXi=>Wdib?l^K^rvt8@7VWYk#=7n7pVWE zJOAj3-`?N9a?VdK+52?Ls*3!w#D;yAY^_G5?EkGEKvh;pxH-~G92Jr1u1#_>mUP4*4H)l`Ql8~NEezj$5nsmsyjq2iGi-b z;i@`b`a2VxIAd)E&t`vg)8GB*dCkP*uODU6RYJKyKm7L11+?%dR`>p|v7!Iz%OCFi z>0kfVufMf(@Eh~uH|P9IgMt1z$hM#hGg(T`Pyi}jEKKtppnb)TUi$3gh>uzm4|fel zDUb0LR#_8678<316QO^UqtZTjWmNIS?5cRwh$qDvZu1gLK~@)A#(0FGFhc>nCA>wE z;NNH2Nqcdo1AT=TdBzm3X-8Rwz(~DPD3&TO=t1VlBlK+9B?Zg>#MY40+%g9H0%sRy28G=FmpZnz9)v7`ynDEsj2Uco6I( zvImjHs(lhUYRM!DU>`8NT#PAir-(07j;lc;Kk-LeZOsdjiRUyIO8PEf8%R9ba9zZ5 zaS2oyx4eK`dGCMw!4Gl9INtll#hz~$QvS)eUp$0ATIwH-`Da}}Kd1eV-b%l{^6$F8 zx%syq`f8M&H}{^4M;hEO27fU2uRhsVr|xyrziL66``YdsuixG{|5Gn2KXpPOwK7#q zc1d?hE!-76rV_+4=9q#jb}gq24c|b412b<6248xSQzL&9%i|<2I1~V-%1d;X;2=Oy zn4%;2Cg*96Hf&1e72L)g6%WMH(l~f;!;)rf`pJg8;a`5q2rn>j1a z7zti6Xd{0`x32dTq~RjMA$CbN(fmx-;BZ5$@DiPokilr`cBBb-cuHL4>Kn3^%COM< zvzsaBL=852$92BrS}MM{hTj;B7U-)j|8Cof*$I|HfBvnDJk7P;o5V$Xk+o#ELC0Kx)kBA9PrQA}v#N}^Pm zUq=%R_#<(QWrA5%;c!>ip_X7oD1n{gt+1pjGT-(VeYL&y2vZjq#jHc{F z`3OnIcE&*LI`MJb>k=ID_RK?wWL!fcs8`#E1$VEM2;2L*pLU?3QeInGKF)V-V^VIW z-(r6;*jzP4YKcCGilg1m96_{T8n`2d-q+5!_}Uq+%6DJFsMGByZfFiI-=4;=KBxIV zb;%E}(9*BXcD*kBwP&&SAZY#%U+aYeM=`piGVM4Bv|>&b9{W<{q98cmH(!`VwM04Z z&W*q^qsj|BC<>mN6$9WQ8^ zmUlk*?;PEWF#W-yv-1fp@B9t6or`|>`Tqs032+QeUXjmp$|tHoCe2U)clxeo(6;XfD9Tnq6 z01s%FNV7X(6Wg&8N*rs8Qfac^ag|qq63B4#uvN{-sF1`L1BQzth(NHkQVBWN&Xr=^ zbceeu&E4Q|^c1#X!+mVs_#y;L8j5(l>^g-nqQ5b*`AWu@Q7kRv&03hBe)hrR%|e^@ ziZ;F9lesk!T z8$#nxZ{v^t*g5-~gMWLE+uuGL9QpZF*r^HULXu(gv^YT&T%)oYPlt9B@MR%qesj@5 zp-9Uh8~Y;~3t1z$0Kr9hTBelDH3c`*w&Ea1<6~7@i~YRYX!z}a=1G4lE>Y{j5HhMW zD<<5~s<|^QIkxy{pcc-9B|zjrK)&rYcuS(^3zAPk0oySoYvIJ2Qs8-pGhneS29Y1u zw!gFH!aTHfcH@P9S<9^R6&dW06KG}82`jVtE&kb0-1XfDb0dcE%g-f%|Ll|P+U!p+ z_K)8C(>I~zukH7~QuTjFm;5{Lq3RnWJ2w@g%e$bK?UhvB2gb((Z%20>ZRHqfq>iBX z5FHRmV7N#SMY-c#k!UZ@!^m?UeQ@1WJ(muqpg;nUmQl{RMmCIdDizfmx613>n>f_4 z9CQ&61}uD?I|?6g((a+Z=CO-D-d36lH?$fleAg}Q>waSHGyi|ATmR`Bw*Rc>r-$?p zcB1{~e|oe(J^JG>W`5%Jr=KSMtGE55Zwoc&s54TPq1VY-GdxcdIU$fMih^a_x~;+l zm6u{3%}V2sXcR_sWksoaOJ+Gwsx!9QT9lDfTuK|hRP0OU0%#KmlHlS*j*24elpCs> zR4?8@
faS*U-yQ4o~C6dT+KTRK_g5L>Hv+jrbSPIP-8u`9ImUI)S#^1l1;tM{v8 z{wC_t@yS?~pXYq$RD#Q2 z&iT_b{6}xhdoBO%AO69}YB&>>eJBtJPAdMf}jdY8Y9MJl5EH3}WAqI6T9IV7j zJp}TG2f2I@HfQ*@t*D$+oH_>43Fj5C1bN`1zK7|gm-`5aM%D*M|Aa;GWbM~KJI}rZXe7}2>@~_<_ zVf)#`{Hj-GY@llMP^O8 z`E*1N!h?P%%<(E;+0JK9FB~G$eQfg^}>8`nqF*L@Ydq4f(n7aXTRk%t`v~R*HxfW z?k1?ak1a8#MIWE|RYUu?>LQe(ua|zsLayP@tuX5i~Y*nOgQ?vLN9-i)Mc;jT0wU`%2Q{3fQxtTaj}Fg zu{_zc%bzbu8uNFJ-bYz$RQ^ywA2Sz`>W41#J&f)l<~Iuieusehu^;1Rem1YF8Xl3a zP5MM1OD42EEf$aa4G|Cr=LI;;J?6XKEHU(kJ{c!z8U^ULpqvYT%_85d4 zb9NX?b;WxYb_>_sYD%IjnkT1P{ahL!J?i5H%SkRGfW>giCBND(J*QTfqMPugRblx_n#(>TYmB#8?!h$$NPT`{z-W#f8c~5;PPN9ER95MONZ8-k&Exz>S zx>2Gd(vn)+UjIk$Y3-grP9rNB1&!N0+-vZL?n|RMW8WWfRJe}PE~jeG?^dWgKrUxn zqpDOC-k%}xMu;fXJXAdJOd~FMNUU?JQ;Q?St*(DfrCPuiF< zGSr-+nP<{EQsPJ1O?-e3jqrBG!nV}SHS=mS0`i4E%y?fUy%xbL}1$%gE^)ldXn~&YokhBq}4Cqi|I5KqYcG`YGNOrXKW9YglTAGe3|VufEc7}M~7MNH>u2qxOGLh~_yiYDyRKGww&8L`wGuq#afljvR3p-VqK){os17)W zWVpV1!zm_NQ+(da^Mx}t<8LnVr@C3Rv2={Gm!6#m7=H=OfSuT}1XH){l)^KT3d;b`uAKPCuoyy*_t{;k zbm{WPbMRgG(-&haV=WUG)ECUihnWQR+EUr`-QgGRlQ)7tsC6f}o~acrI#`N#RN*%K z)An184}Z`;25Q@%TXl|(Yw!>T-4H$(`!;-sE0u8YHmlt^7VXUH6EPy<`Q0etQZjEz zw6UVdKAal>A`x+w7{!U*ucm(vFFe9#V?xiSz;@LQ_sb~`;xh!5#ID__CN#L)9!YNj zVq&3)ooA~Irx2soIOYVZ^UhCdfvX6j_a_6rZhuWK1#a}L*K$)3G313i&J>9u_@2le ze>$JJ@+QnLI+%m2qZZLa+4a~n(NIKVr{}x@6Of_6#`_I4ukY zR1TbE*HEC&O`!vEIiCaB?+fzHjnn zY1ZP>dY?I=qH2s&3^wui*4?PuKsX$}tn!bCg|5ODCF3Z6FuQ243G>U63kqVwAxjdp+_$iDMsvvN1*NKI>k_P!95n20U0sgKnF2MvUO<3p5 za7~f%r>cIl82_kfPo&=geCP(}M1O+xut@?VQ65AxS;#ql?JV|59+Nb+6~alQubibj zQ-0)}l_9tMp|Q~7d8cGD^m0blivOk#oO#KGlM(=bw>;+WjlM#h!V`Sj$twMLynvEL zTNkgsYI)-so1K#Yn(S;#Y5UnyUX~AkT7CaB6+CbMwGxvE_jqF;kL)dcH?J>f$N&&A6OUcRvXmRQrJOMFGWKb)uw4K>$65U ztLxUFT?>fnB?l6}%*vwpZr_ZPPHYul(w}oejg#jC>@oEm_MD#z+<(>fGCQ!lau6bp z^s8(U8-Bgzx>3ajs9{@t2bsfBM|MT(sH-5#$i$<6BscGpn!dP(mfVeDUXtg&E_m>x zciGn_({|@&*tn*&Vt1CgSYYS5WV}H5_j3Z#l1LZN)Y_bSEoDPb#b+J|^ceqyI*CT( z7#+?W)(e%z6hXtsVSndZOT&j7iO*h$v+t4cO4Zb)%PI7LHI}k|KxKbu)+mH!;-x9X zn9a%WuvoZLh`pC}Ka{vvwNP$d>(<%~krhXBjV<;*abR~XWLsKo660OiHnW?dbV^VJ zb15#QR7Oe`n|fqljG6@{dl%O#1#&6|+LGP2VHj^=?CoiwM}L#C(mD1IxE9~`XJ!mG zIdV1+745xh_6ka>!i9o?1P$!xrBB#m>}~X{VoD0FW)?nY3=EGCeq%&HLpZ$Qs^`-Q zV}ZME|06KBAam67X1tDNAMq;OV|x=iYyA8GXWJXV4C1+gZ}ol4#)fl4nVV%9)Gz`uFqUKE7F@2%2R_7tWAmXj&X2skCTi{Gynp%9u|%OT|Q zZ;i}P)WbP6d9PLFIpl%P0yr^N*}4?n)3l(lBx!H^$bViJfq4#oiE7!o0Ch!nRbLWT z)O`sQ&YTC20`jtee9)(8O5-!kDa~3pe=CnJBgKznQ)vfEI?qI>P?>R256w7{axE8_`v;|Nde!aCg@7N!O#~1Clw#4 zjP8tdVt>eU#|5R=x)VPEpdQwDDIH1#UyK2*m|7+B<-j)f7G*i4ca;@!;p! zo!es5^Jm87K=mXWvg~R<*Mp>~eMt1ZoL1C$Nx=RQ$xD{|_4qg%z95c&WWvf#$bYdt z7{b}mh2~bXc#;b~kG4Ze4V_bvqfzQNdDU5;8SD`7m^MD4Pxd!t^~}>&ccsvCV;h=^qag-?Cq|Lg&wC$(8jMaB>A1+x5&41W05;?-m zt2~sgv&o0_Hh(dIC3;h4(ofw(a4jme=6G8V6%c5oC4LzR6;$k1&_&apKWy64i4!YD z2n<%cX~-FAX_u2ouVh#}J$phQ7Wx9T5R8NTN{180%+Z-I3e$=-JNw7tvkmpGxA@xq z(6V8ZKGj2y-sOV7iN2XC?tj;eHpbDIX`wHY`WDlnf}xdII!t+#9D8SriUl{F39EGB zg5azlr-dxZXDsN}#?@XXSrBuVv&8hH;kBY|Oh!WLsLH1!T&eqLsrIBoTf;3y2ofPY+6pUa;*=*J7E zP zPM{E$Tfa2&9z~|`AJ=fXEpJ=e6RERL{5AuVcR_gW zrWdgUIeQ@C$x2&4UVkK1pjs8wL#i!{L$?vd1H_Z6$twLqmH^BlS#JA}oHC#VU8K7z zL91|Uf4z2>1W%$e?jEx#29^-iBMeK0!||^>>!(#XoGjURZKTUh&XcLx0xDvv4f>dBBX+eu()6 zEq@uHw(;)4;bK&fd$mt7IP*x82~7EgDL*>R^_T87pY}U6R~9!ZI;mpM2~6ioKa5jK z{~gq4Lyt&J$5Eau#VC=jDI&6ZaFE*Bn2lSxFTLQBRa6OeGJOmBBd??`#WJP{3d~j<5z@>^*Bj)*4lLZSSJ^Lw2Bg?W4w)lIt?&rl1X6QLrjfY4r zR&RWheKhyBHlLO73ovCMyC>J%HB-a>oYfOAVloJGnp8d*a%JjUJ|ML!43bmVBaokT z_1+NAd44N7sJ-}$E}^ocj9qi07}i=EyuB0`9^A~Hcz>thLVt|4-MPN6&1kgGlryFe zKl>kXI#)p1ukF$)aiL-rd8J0IPNBZtf6;xGX|yoeurt){M$Ceg0hp!Ipj;Z1Cr|mc zOAtXnetKLv9UJ0qj(MBvN3}NQ0upJ;HHE5rDINXcYp1Q^_OErWJSMJMj3O4DrnuSj zDP1Baw|^g%^7j74v^Q7GDX{LkIx4$tEr6qpEeJ_`EW`}clGrp1ACNJZimWfZ#^GsX z(7D^Aw&0(LRVOn;kOE&|@!>h4vLD5$_0!agiM_$JYK+_{ zLaJj!DPHW^Fo->ZtEIQ83J6@c`dFLK!R_Mr(0^e-59-`SbKJ=tAmAj^h6i@NzMQ}p zu*+bq@}4Wp@uB$a>ktm0FWUnp^dOONBI{zu-deI=i>uOP;L3GH?vYIw4&yi~RPhOD z5`VFUHySzU#ELL?>r;-SnMSu@g2GX{+_c z$}r#|5gdNHL<(KptXX0q2Lt(BC!4TUzkjRx>qpY5uoJbEkwwMHTJWEVtL$^kY(~wR zLHOqr?E9`=;)hXqjnAwu5TJelCJW~CJr3BCOz7?#YMazp-vl6?mE{kJ*x7fRCFI$l zN*T_WOL>bsN|SqyWG`}bUPZ@lvc2CY|0>SyAZi+*TviBHa*Y53#QnGLfds=ocz+bk zx32dl^A6MlHJsDawa#t93^-K6&C*n&<^A+KBamJ=1(ijy2CZj*HZY-ua2xO!`Us&0 zjB&2GsU@1f#(j3y=$TLHeZq|B9i=Wlv=R92 z0agByO}PqDpbjRWN}K=&KZvC^1u0W;NfQp&zZ(bjx}6s_H#MJ7%V;%eSAW0<8MOgC z>#XK}eIvM9;!SH8-)zat?Pg|)JhRN~vbJ@m@G_Y|#FTa#+4IKVQ}L zK22aA6y{^JuLNE?V@~qf=W}AVJrB_)B^fTGZEQz&R!goS4IhJeu~SbVN9=Gae!e7< zPH7kf=lD!EW3@~d!p42Cl6b*$y--8Xj#FtMLE%^LN;yyjfQA+iD2C7=$ezAnKk?Ce45Sw*h6q7Q>8=EyRTf1lsmXd&)_*VC z3R3v0z8HPvh~S67On)(X?ZXN$Sw&@exmjK{NqqLTAnNN%_gdFG%Kbo#?ppH`U;D@7 z4%p!beU5m5$CnFLz0*s6iTTd5w*k*+4Fo~xbEQNLhbIU~ZuvxvF1zL zH08Q0f=sMP+)4~<_9_l@jshSH{$O!A+?U`}fbA+!!aRaM(?|!MG^Qr+ z0T_fDGk68UqpCtYgn@2@s#@JVz=1EI3;1+*k{*I(&zDn> z4TERXqk@8U@KbZTiT2u9q;GK#{UKAas<0#@f;T_knIdXN>*Y@Ql0w^ zfB}591)HH^37+vVBSRlF6kxH)6J}s@pEP0$v4Z)YPJO0a^4?D4MgWIUVS%8Dyn?c? z`4E$Yk*DsDc8IUX*$VdR0-*e`x7v&RK zd6di>b^)m*(5wmn>$>M)7me&MYG)KW#^c?fI2t0i_}0vq2|t#OEe}`pOAGXl0e}K* z%TM(iy%8UH?bz&q)_cCx_F^pJk|d8hG@<5HjDM@%ty1DK|NOq;DAKr_Xkk@lXb@wl z&Pkl(xSl-7=-pnuI#CEh2`!N2q^`3F$nRM7eZ{frX-1A;WQ-9X6#XAINq)*=%)6VNOIWJM2(vYBwG~W%lk9c`S)c%C@#@3<}R2g z`+wyvG3Oo02lSg*&6b*b`0@m)6x8UjFHld!E8rw-0pXFbe3{kH=;avDYOlo9K{;I| z$N9%J_H<7&t0}&_vggP$_Q@tI9n1}{ilFsIQDgMWWpcC!cw{rUQn@z`g)3N&@8&gx z8ub)KNLXaQxOTV`dj3*(z<8*#Cugu9{(r$u(KN8ADEYrFdd#Yi0r(wEl%Az4REppW zUdx2!E}*eHvgNK!$WcMk9+I))+c&OZM538nP1m~>(Rc*nm~8yG-2X-UYd;6-GCl!r zVHp7&Koon3^-B=UeUEyYU9+3!DFbm|{+av=%?6Ek9O9D0Z%_qZ&~YWizPBRj^M4}X zu(NQGbApj-BtU!j(@KpY2ai^Bz(M!8-k!c4S#R=mVR@Y4Kyyx5OoAD}ee4){Gr*zGOvZqap!k!O}^^uTLA+=VevIvXK+HERv|sU|)f9Kor@L$$iI+-$Zo!QH(b)PT#@f_3X`iFycJ)-Ar9zAsO$mO}{T4MCy*U*1k z;=9@Md_zT|$|x&;exCNnupvfaw?^P}Yi$6>2#EavwS_9wOtKkJvN!QuNY zyfHeHrbPwcKv|Z}NYqX9oD(9GxF$7i4sQBJL+QRs3oOv^JRGwsVtw zt&24G(4|ZD)DlE{%ySiv}5qZUv?tYm{rHC}Usm8pE?53qmN$2Wya zaRMS9`Qaqv=VIY}H{jeGHC}_4OTr#Kg6E&}tYSAH+9OUgv=4^IJ%37*iTArsT)+?8 z+j@c=-qZTg(Bs0vpuJK8lLst)x$Q(cXma7J<6(9XRT+~PC-Lh|L3%O%oQb~hiNRM( zZJ#k#qUDcbd7EyqT38MOkkxtPajhEZ!Qfc7K?Z-guezTYfmJZu!`ASSqV8 z(^Z4ZBB*bKe-@h(zBHf;vYnC+-G(nH$>j`5kr2tvK^x*(84{bZsaKL{WI&;O=e922 z4_;-jZ$(7#%$I`KbtSDKXBz#BLYtX_tT@jZh@HGFeIZ+^XQ-0P*wKGqcN&`=5`nF+ z*X6ameWW@h;-(H6=}Cg8a_9OZCN?FkzBRL2v(;$x>;0s?W+7okninIG?qKCQ`%J+c zRL_E#xsizbA+MQT+M4vIJgIYT{R-N9aBx9p3+(n?w|{sY^}f+?bR_kSgDG-{=~Czz zr*F(t34=rdXCPJgKrnykH{TRisA|c{mcB$#FwxNBsJc+kv_{k97gx*27FcgQX=vVt z1BRo$oOgt+kO^pQ=POub1Mz7OxV=K@?n`q}v}9Gp#H$Ve;$ayox9Zc&pTIlTD%2rhwI zT(M2N6JWouq>8w@(Jjcj#-szUDwEXA-*fgd(+Hf1C^oOozc(eCmrt#!N|b=JUPEPuUU8=3Xb+j&aG)eHn}v|!dY?>35at2=sj(iD&< ztt<7RFHM3SlH8Ldi%S5Mu%>IhIT{4W|GF7+sahWhmoJCTE9T{mHSHj_pl^E3`QlN)6+QWQH4yPM!+3~suFXW!i z71mcfNGV^&)v(8v^8WJh9Z}q4frj<18cTMo84hs1wU|KNB5`|Nyfsw@#vo$ud7RkG zv}-!M;dM|QTHKXLFGP1Kndy|*V;AXMne=&VR)2U?xmb6dqr%H#`Q3mi@w}N|^bD<8 zWWY=wxRO#O$n$*ru`4dXy>!{3LV?-~So3iVa4TO{uVE@f7gx88UF z6^8e8fpfb=5-lF&>Xi-7l)x2zazFMzAxeP!FLlX71dAeTGOrywQpHn*%E8T)~ z4&7l%7S-F14K0qD56HQ(Gq8Q0uN-V-kJ~8>LcG@83$C&a)!yifp$$R}SupSxb3%~y z{2n3$QX~1>MdfA3+rxLY-8JzLfbWh8+kgF$v$&vu?aNbF6r4GM3L=h@(l&*tE2`%q z`#3L#J^DCuy7d@tTHNG}^!lmVHWTQ|38bQgVY`zCw)`eWy)Ri!YktcnpC6Agl>J59 zWj}ZbQ9cs>Z>z*~GHWRQeTH%D7e=?_asA>h=+R45vApN(8-}1FKMdHjmbNxh@qa^O z6utfUCSkTcEZp$@`f)$zmIGV5cX-WP;4O>zrg{9%HEkAso9z8N292TDN$rx9ms=>v z8Qlq~0_W?gty_&rogu4DGr+)D05?F$zx}m6luxhAbMu~W-gB(-PEZQX@S zW0L@!!9?^4)@+jr`6~$zjchS8`m1*1k(0{L7nh&m$7@Oq8)l6$h9-jmffp$Oj*sM%0z^8_h{OLaTgIsL+8H^6~QUrP!%Y!<-AFfV6DK zQG6&#lDolkKLP5v1WI@x{Bz4KBAkk_^VMI_(rq$&FHI+KzyhqgC^)7h@Q=$ChKdkc zO`p(tL+0U!YkbvJY;>#`yjm*1-j9D0onkCx`Xw*6FPI{778oD- zzv3SGTb0@N;7JM~tALZ{D8kIm(w;cQ%W`kKQlS(!PyDmn=AKgi3>d2sv$B7Dp9H}~ zu}32=X1Br3Jf77ixr)Zw1BZj-Sku+PzTleNmc-6WPui}~YWg6gw9}?qK!%dhn$i$P ztwL}>^!dC%V3mbUJwko_W6}#r%3MtGxpPhI^p&9WyM)aHKoBu>%BW=;>ZOzI`w(wp**ol)SNE3(-&3F6PT_x5MNGBpz-`Ex z@Z`0Xe@L3hMYe1JW=?=A?PX^rG%>Vn_uc9@3!@5^M1FsKFbv0kZ`TwPb;Z^FrKT94GQ4W zd)~@0(e5-TMIe7E8RR#QDfF%T{J@2hfJk+)nTG&q4}O^bM3wlEE4^U`V9A_2;+yQY_FTqJ z>0k^>8e3LIpUs+92Rzl8sdcddk$fYMXYB(iYQdycNwt4L%*z9}%2#uaj#WxggS%lZ z*OqmscW}l4@t8Z9P1MB(YLmq4rP7CrN&q$ENX>xC0_!0mJs(R795VrQ zGa)B_!ixp|9ESsyaW);(0(`w=i|M!nMQeu+;HVv_{N~b4(f2aU7Dl-O{S|?MHrwCZP7BWXQD3NIUkOrf z;4Xu#6CAUQ9K6KzLuuE(lM-(ql2Y0?_jO+UeqEqm-eg!MK(t*~-7jfhHsz{ZtCf2x z1SmP-EL9Z3qGJ{`gua^MDR%&kh4BD>q9K0+dFYs0A1i(f4Bor$dA1d&5|(Pfk`3lc zn(m)vHgamx-*~<3(Y_p}LpI3mFTqcTjvnG+#h&c)Qa0RF8KYV|G27Ss9KYKuBJq@h zp6CeE_iw;}KnJfyI9i?dm1_otM>b!g1vp-79xCHxWq2Hj+pVOKJ{S z4X&14Yf>nqH*OFN!94C5C{iLwap>GzBr(ZN!Vr zSrYQ&pHb>8J(&OU4I^*pGw(frg^ETS(=mtWvNn)QEEYa7z2 zH#}7mWpJ<}d4BcwU&3>3QAEvE(?p)y)%Unq-?M7md3+nzJkkm{4Tf@P=mCk(cwg>>Pgbw#bQ zqA490PcH9F5|#sc`f5qU)F6L78W53_J^5*-q7D)%f6JcQ1 zxu78;?P>HE?E#nY2Bm${?ip@tQ7Ld;OF)d~3JlRUN1dzR@Y{v6 z1Zhb7iapY5Le+hE+PR@~vAYowq|svNdkNCVS>KP@p^0ZZhM|0^*{OePEYi|rW4pC# zx+9GsRe3VENs_YwEYMz<=YSX*W=A?lzE*6liJW8FM5bk?#`{qa!ppyS8GkfRC=#Qp zb0D*(MAUG`Ih(RHsxgpw!cFLIOAAzAXum^$Z+69npb!f}liuiMmrL8>^L) zOM&bR;Fe^jK->=jd)77w&;7mNQ(!R{s+~jL$e*td<+d;f_XU4|k8?iu(Se3ZXwH7y zkiyR}WeXcVwFUBKS&j)rktx*siS?*7fhou|4USSoD<*9y#^ZgFr|k-bw)rVH{feHoNTon6hD8~e zX#k}(JW1D23yXi?1s2M^ExeUxwIzy(xcs()qQ>_CKXEM#M<>F&C32?0;aQk-z^~~i z&z7{%iw_^f&u{nu0YT9Q(>0M-AeE7>5r1cyPWA@?cgU2i79Q*>a+0mV!RH9mc3iGX zl~um?5k~)$>!lR-Y=JfAR?v{MuO1bDKCBpaQ7m$d7-P5Y3KGqu;mqLQUs(YYT@_{g^B1{ zNYB5lPb7bsO6KFUewbj{T?+nKzNG|^>rYl-6jG*=)yp%1+e-LABF{ZAz7#=wOxaOC|DJDzW~QH#L5&Su+(YJY9U}F zCS?EP>pOO~g~9A@C7M-6j6`pVWE(awqwn8D*~AMAtjEX4z`}r^bM`X* zi5P^ekxV*HBrW32Yo$2?(WV&Ms=C9P`q9OyLB*XQ5kc4h;iH8XM!iov&WYf;#2lW@ z7KgiSNT(y;qDXmgwxvQ?VLevM2R%OfU?JHF+9NUyk&q4ci+lw}R}a!Xd^V5bf8RAp zJ-B~tQ;D?R9P|xK;U`B6&ttc2*r@30AMjS=O}CRL{&K&h)!uX^;I+!`bbb<%F6(bZ zm+5vz#JG}ao3KmEmB0ZHpb6OKlBLQ}!fSoM9nFJV7=kZ2Z;7I#7O+7)|0P9ayV7byI`|y7s{dzti>f)qZ8d3Brh-N+T3f$LSuyO78 zKrzUgICj_JR3{P2EYSs-x0ZoC3u^6P{0`{@C0!TGs4qZ4-dBUdRc^;&D7tz`@r{Z! z^5&-OQd_rd%R3qM{#g_;I-5GfhrI=QQ*I?O1(&o~7YvU)$vm2Xi2`^;r@Sq z`u(#Rc01E>h-=6bk&|?HsvTI4uHjTH5%0jsPOO)YNm7v;I*2!XnGZa_{*{BN=khsl zDjn4RAIGuDSu74q9+y0UaG0~UesTz(<;vX9!Ox=k<1o>lTxVb9_GLOn>>KF1nbPBT zqP%y`!MxKgN?9g%9VMraUPr)c01tmU3+-a({*5EA>ZPcOW=6Du z)s5k-mXI=>Tb$y)>)HD%(RrJL4kV;>D0hP*gVR{QSIyeJ<$L$@YKicH5FU3xu}$C##=XcXh}1gRRio-$b8F+ljd8{ie@R z+42UWl>)+qB1--6!bF+r>@Apmi=kii0;;>|_?04*t~5&a1@ElM#b$~~a^*AEw8*Z` zIuU)B7-^z?Ac2?yLK{L&uD@h9Bi<6kgiT8xnxCvB2@zx0TM{n@elve(aWeSqoaJR> zDbuve`1YT{KGe6w%Qs2VOk8jST!f8d$ZW18w39i41+N?7aYvp06lZNDshofBD!Uv23d=&Z-;nWg$;yq> z`nIz_)=H=2_iYpMX(KLOGZF>bEsj>{UXA6K62Q0~-b+m&&^JcL5i zq9`v3FOYxcnq1Rr)+MwZw;eWtG^wYg4pr+>ra&S#F(UZ+dlxRJ%Cl}P*qw#Mx?prs zF(Wva-}aZYKMn#r*@_GhF&!p({I`G=t%L^&p}Z;z6vV-q3Dqd0ovUl zP%fX8+x1VD4^^}bhu!mz>Iag5U1{b91~1ZJ@_2ua%C$&O^N7x!^UQMT6khDFGO2CdyREP8`SrkMk=(K&()Fn_r}$n!LT+uoM!-b`+60D|zeI0ol9udXuL~ z?gOKqe0}=%*g1iECy`A=oD4vR1!4}Ya209sEnyy*KHj}h>AIjy;xV=9836Smr zv;luLE|r^Epz0_gRScmM6vw^f-djS^q}MoAgxj#4Pq4hvQph?+rGgamO6hfuQ>CqL z?k&!GK&DS{?5h+tNmgcRKYOZ!$BN9^F(fc*boYKC4iD=3n11l-DCdT;=W43Q**sBE zaO+D}P)^N1)_+e$tY$j`HrtF8bIdAh0?2>0lo$%C2d(9fUCrCdYw?l!9Rinx=!iW=%bH9~2 z4hYQRTT$!;!qBF*1KN{YcywuEZ&TrncN)@gm)ociVomI~(11HA7TLl55k0vgS;>F> zquy}dwR7sNc+gc0LVy@|9ZYB~Y77!@c0rn_e|jz9B8XX^+q{iQgNSj?&5m`@$mt42 zGI?&Hc+xDBeGsnc63xOB{vrXkRIn}$tZc;1HT4u+_IfxgJ`!tJB|rV`3qlh0E&fytdcnkOT7M@XaLBJx1jFBA4MJ>h@x)7XTLzT#)^QFY(iVFMUqYIh|9ba#S!~3-Tb{ zWiT~xX9b9o2QESPd9`kAUNQuhtSFpEu&Q~CAmM;Tx_<8cr5;#bNY5j|H;W6Oy2pX{ zgZK!WM88opjhbggBaxS*uor)5*qs?czfqsKQfvoEi;Kr~y!bvaIs1nK3i{6Q&ZhCl zKJ=?XG5CA01{LEP4_Um!PUqf8<3TYQmaljHt>@#%BkWvX4>U{n2Dhpf&=Og{5W(Y?NT2WnLgNv(kgfledbmk9`JwECaZc=y#u&p z%*saO$(Asm37?3uB19l}9!Sw5z$Ym7v*%Es*8m5{rPLPSVrel?^ zu_s#9ItNKzDvC{Ylu3WX0rDP8e{N~CoJR;JEqh-DP5$mG!6=5PqEN@0Hutd0k56E2 zKL$7?H=jcMU3#kl+YKx%j2F-QC0YAjKbZ|#v5Y@dK<)5(`{}(!!~ni)Exm-rY!Pn~ z`p!hG6s}-pF&TnX_LuBKi4ZLczB9(q#sO8loj_tw-A-Biwse2{g=>?KL9*&Y8#ogh zz?F4oTTvB6Vgt0)_ZsZ*#B>2$i9dt9d%QMZO%1ww;q3CH+o9Fu=4@LEyBnMefa}}|NmoDOGP(*C^<$FgQX{%YwP5}yv zLx(8UK_`1LI8%uAX!9647|agCT^UN}mEX-Qx@-i<`QX@x^V}sCQLQ>=TQ#%Mg>-Evf zKaom=4xWDkqs#axfQ6uW5FZx37sA<1q3)Ybb+>#z zU6#*ol?wePxy(k83OoIVP+qThS#soczye%;oeFPRaUBx9Uv7+}C-v}5Gt-iaOKqTV z+9D%#?I|dca*^3e?su>1H%Aq&>k@bfrp$?PZ)ShE@fcCH<8lGomzc6b>Izcg^%Bgz z@N=L-Q){#DnN(1%1;#sQ1swi!>_qoLFuGahcdjs@Dfp(r`Gfl2VVbRiBOKj-k+2JU zG)B+a!kU|u_Fc*TVo;CrPR(HwxckGJr{|L3baXFj<$#R#a~J4qFh9>aR72zZT}vIR zn2~?LK*w@V;J5uj-eZyjuyi;)8TI^rzZk23iD=hR;d7OYuTTm@!3-R&5@>g>yWWiq zpWu;YqmD<5m7W2(2*;-=U8!j$^Q|oW^mU5W<12Fu$5Bq@$9l{pS5SX7tKjtDUmDtN zm44v!_6kw+Ri98Ng>eR>R(l@4UA;?g{TaMSe<>Iv65=*NbgG0EMw8Wc1L!>EeG# z0|_v;p32GVQXqS;8Lnj4yZee?w*pwCVTa;x(m9-|?bVD?n5DD;KtjKkV28za1+`Q^ z4=>-TYxelN)@(vJgS|u){hptkS`R+*zlA6n4VL1;KHQ+i`rj55+OZJ)WfNQ>CiO6| zwqa&wxCrRi6I9X@d@ByTA3(YJ0^om)0kUW0uys7Kl)lK29^r_~tI)Pc_4RAo*7|ND zLU9#MSl&{GA`s78Ch?a=$JEwg3Y~uQ^zF;(-W6%ofs8^!OpT1FARNoyDO4A~;_~cy zGk`SJ#djn{x`*p9S98tAQLX6x!be=4-TX0z2y%tUY`8h#)D=HB*ATj{@ri#3Fv?EA z2=8>b#`{x0H;RD$=%$NB{S`W;iF|ykRFGC-QsJM?2hk1FXZ?I;YUPohFeH9fAm~Uo z35j`(VtcZUfzkpx#zVa_#)YOqkKceFiw8|*J<_koD{1G8(G1FNt{+7|a&gXXM;d0j zkWK|_%$c=TwMPWDoVFQ&s#Sl4dnwk*aGcGJT>dx-;t4_gfoZPo(E15afuw_bwK7>C zun2&0U%QCH7a`nf|-auy#ydwO1QG4hYGWZn}Nl zQ^Y3`a|Am1SDZCRk#PUanv7R)BbFo+*Ez)wY_2d6)(HFpS?d zNKI7nYzo{_v2s_mT@2-Hf;mVP!x~#WPsKVNeD?8{)XSNr>>U-jnEtdDzLOlwq6 z6mvZ4E8b$N%&J*fmbtsCfpOygX!-R3n2R|%@S}j`N8?HTJCsD$1f?0bGfO_a39G^W z@O}Li*ZDi>Ce(jv4?_8a1mhW=NY&ulGgE=SVD!)dGLJV~toQ&L4+|y=%+F49BhU9d zkLeeoC-9S#9ANS7s(3&7lW3pz4O|ghgJS}9L-W#VJ%G#;+7j+oUA1}V7nJC!fy;Kn zo+()*&1bL)nf>=Dt34g*T5GqANkm)uX=nRVLmf>b5g>oAMHer^`jn^DsEWUDS?Yg# zm0IvO*$lSwgLF&Qq)Edm7)Z39^>h$I+n=5M!LMFp+11PkSq4fq1YmOd2>}b)jPSob z#N{7MIt}MNWUw4Z6~ezbZ!_h&eR0KjaQLQpK}PZxyzvp26}(T<%Ac56au;8xdy z6qtNU$lEUgwuW&uaLGp-b^BvcTr6G}G_jR24& z=8}R_K!xLpE}fZ?c-kOz0#HKEblE!Ya1CeAg+c}q13GO zJoXl7pC-cK6|v5|lqYw(fOwn+-N3ryqX#&2qq#(8(o56&Hz-2i-y`2TkxnidXR;}B zB&wR{`rD@uEfJD;H#3x5sqc{o>AHXO`p-JP=yEvB3;sDW*#d0QY8fF8PNm#r90s776lw-#}8fi~qUk-(`39VMCsSA2gjM!f=e z$k%3$+@aR!T9M+HIK_Qh+PKSLr(}udmO*18k1iK||9oU_Px4NZl6s;m)? zrXPKbt%l7i9*vf9t5VCeh8y(Y_9|0N+8;mL@NSsE@Vi@BbrO^J!jXAHF9{4ewmFX( z{b?b@CF9)l9U0&Pwip3}p=urb{_KkXa^)C&4UvP#x9SL1w-o+a+dY32fCVm3vrf_& zZd6EAD7rnBJ0}Pp-Ep6I=jGVXS5X9+tW=FAn@PcjS6_fYNcCmz|>) zKKn_^29@mOcd+#6Y{3CTn+2kSW$4(4t`y-i4R4_KaH_H%!BodqkXOlP;eA;64V8rO}=X-5w4esLcZY!)IL-t@OfC(fKNa5jg&uuBYs)MYn%1Xr%5ya+6%m_{uP} z8X?z=n4d2TFn{Y|%lxvR>o^(N2MdKW{f+Brpq+}-BbCi5*&B*o3f9`S(A=>I)Fx7F zP>hKb1G7lld3J4I-th`3q{mlh(9*;*oz8CmL7UPaWqO&w{xG5b5h^u1a`@i(AsdKnXfW^_)mmxPGzQ8V{cl@1RXUO!G-X)udr0 zK{xZlu>BL`Pq-LcIN(&FP*2zthmGW5#NC{llxU@N+W(kA(w+l=Ay4rt~;2OV=&8OqJ;!$cs&S5zQX@%pJ zd$fh&zVbLrjWiSO(I!MLC!z{GtPJ8 zsMigho#p&0KN^rcomWN1E^-J)s#qqx2BYYwZd;pnc}KB%@K(nOdjhip=LV_05pSlh zHCumTtXWD1(Yo{gG*J4vp|fd2cbyp+`u8fqaMpQwjv{F)FapgW6aq(2BgR=BQ|oBZ zv=dc!okhWkECPDTAD&kq>AEyXXpS~<=79ka{-xl>AeP+EWSO}uDw>iDcoi_`@{HYn zf4%DT;VYdE!S$LdeX#iK0zf*knJGtPTFQUVj)2>7TgsR8dK`AZ+%W3{V8}DTxf`Fz zf#VocnuvL~bW=vgO9mw1K-d-mtJSiOtM6c2sW9>8-(nE%D6ZNVhd-;8ZC@|Ar@U;* zixT(;Y16y=<~ZA4MT*SW5D0G5%(MOk{EV241~OJpSq z5Z2KkPkzCzuOp_MSvXYzWN-44LU7A~Hv*M=oK+6);0vctef2(7*R?G7ukttv!f^U>%*G1`Fb-iWMR zh>7*Nns3!m0n=KiT_hu-W^-IF?K~YV>eECDxy=Y514P}eHbL_rih+OOri-YE$v=>n z0XDgF{JEIgk~ptKMuhaFJpIWE*}zh$=m*089*iLw(k}XA9n{zw#!&+)2JG92!q8r| zZrmURn(p`W#A?zNc4WL{b!+5YF34YE1I(nmX4fy!x!-Au->CYHU_S7ioFD6)U6M2b zm6>~Vp%kKpO9fL!Ms|Myu6Z7d*U)jyFrS&SHi@uajBdrpW92;}%RAu>4>bq0G28{% za|z`pTw$hUlmpz>75b!dvTOmDpGZi`Y#TI93)o06Aba#8${!cHmabhx=zj#>@rl?fB~Ogt>o;U=d#fN!K$U2==tB zBI?Cd%xxu6qu)*~mAeSiHgkI-xq{42x`_a$verH8=%jr;9JWo-1P-2%A+{hnLAJGB z-kwYg^j*1lrPThPV01IK1X11bKqteQ*r*5?Q}z^@@>%=Q5R@4lNU*}l0qzoaJdA>E6_ec(g1&KgKz6oR$+Y2NJ`lqzq817 z*KKgfv|NO6W8~cy4Dd&@Tqz=R6_+Pw~|KX)0Hdb4C=coxW4EtDr@%Y~)igei8 z0*=`?{dfJye^Aq3rK{uJXi{ZvA?QSmLRF!qeCU6QERt8ktDwNxPms-CW*vsQ3_^si zijv6ofZ%6;Eh#X72bK3iepUS`X8szA=d{S%0N4d=UoRm$O4@+1gtDwis~o?k*0-#!_NG0mF3<`x?$q;AK{ ztxkWYsvU(CF+O=bT7MaTBB`gbBG?+%kLGFhXKok&LlvS8^`&vzfPL=Q^Yv}X~98q;F3_WV3h39b;Wf+X(sSwn7Z1Qa#GfF==l=kWebPx9q*Q5m= z8|#%8M1vwZ!T{4ZvC7d&m?(Ss3<1R#_*2*1d}#!gfG1JMmiJmI?WR7AvX@GwCQ(=lh%HNUxPB*Z^CJ35| zT&KJEjhrHOto>-DhUt5@k9+FY?TEZ@+&9d7ycSpq3)2c`1*$57Jr2}~^?_U&!-)Q11$noL3cbbqOdxuVqWG5| z5N4BeyEF{51Q0YxYaPXb%0eoe>J3Z^!fHt@YfOyNNh?}-%|dNiBEYxRmymxxDrk^x z-ua2(*jUYf#p{_a<~^Y;N1hoL!SfXLK7XUo{h6H4Y{-C}MOp2p@C?R%ZAPyuoPJl6 zZq_&)ik&;anAG~`MpF)PJVntLW=B2L16sESx6T_h@Pw3J$Tq{<>L%HUVXna?*?f0S z?&1=;UMCzHoA^UIM@ea^dQN|F*OF2;TY0mJ$B&DWmvF?UnXWxlBxH^O9|psI!d9`b z&amNyxs(NL7uPao)e^n83Bbh~l(`-XmH0iST9*mF1M`Dht)@Cs_!2+<+Q5pP zsb_QS5%3&}>r0zp;|aN|i)BimL9n3A8*-DV<4(KB?v$mdWM`H65psXHE+XLpgRkDf zf)XVbv&g?_Z${q0fcsbZeDD3aK;d22&W1gca0BY2tQE27Y;LrTfn+Fkmd>uoXwneC6xLpFe6iuHH9 z$@(7q%BoWok4-l6;|g8ddSpJ_U8PNtL1Y7rBn}^WTn=FO3AKNE7|nH8!pXW}aC^(f zjF4bq0ybdqFgvcD0ndL`l%3coTjTp?r5F>{fTHigz!;N)g_)(A{iCp5ozcR5#;#zB zxs982(4HXHL50a)`E;j0Egp0a`-a6wqaWg@sNt0YQI_O21r3_x%IHg3c<{0=xSx~a zGC;pmtE3EHq9%VYb0gpQ(0cGhata7MDHARCXeD>(Tp@N*R@bB_)P6@r<4S5H<3QQC zY(x94wBuf%TRtmC4&ycD#Qa0xw&pFICe=(1!)yA1_yp5}3(h{3RE)#Zn=gjR6TE1% zB+XbWm1zza=km{DK);00x|hCgTEZ*yJh2vb9qn1O+nj$i>2FWB5!s0rjJa~AL+tkb zaW&Ifv|ON)Mi@bxAqMeG)Q?tkqI4HIc=2S z6WlWSVZm$GSB+cg`L)i!D)wa#vuvBfU|Q$UGPBs{Z$TUcPzaWs&~W zAkiEH?W=!YmXB~p<=4ec`O#t)oe015xS|@vLQKIiEoV65W-PcO# zi&;qC&ol8kr^dmy;MxJsOuFJ0(-VKpro1vzjm3Y^>W}Aw&@k?v>k0&)^Lvb`Y+wWh zZhf}#dZ7b271e$dXAK?B=v9Vlw^fYFl`!Z`tQk% zM3R4fOD+gooiBq693T^S(^Z*<=LnHdK{!0g5|~FHgQ_T&)va6X2TRD!h-2{pvkABl zS0EIPGU6XrtEidzT#XfIQ3-@Ybck?`L!-8R8ourHnOr-kXf6@)rZ+B{_ZX2P#`CMT z^F%i|rOf3xpS%Zb5a?m76x#@;7F*)2J&u1lby!^_O;xbogR?tLap$^B11^6O@%>#} zYxZ7O^9nHNv{|5-z$d`TywZbl!8&Kx>`zs}zdYF?j&(uGjGA8WJ(s6}%tp@YPq{Or z*`CFunAub_*+K2DZYil;3>AN! zNuv94yhNo~_R@(z2}cpQ#42a+U^>;r|qI=>?6(9&e7u!%WvszTi27(q<^)4%Eo2u5l}ofC|<2d@PO(Yxt| zC;nP2Xpg_M+>k(Kzg?CSEF4{NPW69o>M#ylIH6&!P4h+yNX6_$b0k21`rtob`MqAE zYZ4Ngs=a!!xMYexYg@4XtR<4}vl9YAsRwWaEv~_IVoRt5=^|4`LF~rcIZ7aVe;&y~ z^Bu5AUVE|5BMB1xp%=rXf^>l=2{dV>f(cuZnQ%WZ!HAPs$j#Cc1p2)QPH2C>zH3w* zRDzaEDh+m13T3eVn+82QqPeot0@AQ*w$>l!;h@mqTem2K)@N%S(9P+77US%9A-!L9 zMeN+N(fs%H*ek;*6<<0wTJy>p1hFLQ*9B(7+OKz)_Yp+T;ad6qI!=DXgdnDWro=OK zUkm%u=quoFlJXjiq-inPk2il2I}g!0KXW{xAYvzu&t>*7+H`F23;BIWc^4bS63>NW%4$e<9k}OOG&F?wdb^e zM)NpHTg*0a8o}C*5+90HnOk#Gem@A@wTw{J$(qS}%pDqZmDp6gIvjt43jtSjG=L|l zB@K?M3Etq&r9Ge3h>n>jMS9s^S{n2>pU-=Eie{Tqv_csk%1VfP3JcpJq^vT|cv;E< zk#Mw0T`5dV^Xnkoqbj$$FZ0jKLHac`i^mQO`}41Uq)B|y1)nKETjFj>oFr9%JHhE> zqYj_Eo?yyLf;yQQ{{(-FcMje(3S8*4MJ=arDO9RFc}6Qhug{#s+{i9B$FTRobB$;` zot)>d*-+bxfiRTjqcwe|A3m zeqEA#54Ed7IPJ@RE9p{;D~NJkY#i6`>hcNRU2E>n4&0s`EB7*4;fHJ&^9QAVT}_Q_ zWz7ESEhzgJTOV77ZXy8c06^rCUY?;xWn^%X%VJ12%QF|wUvIgv@$>T>6bERnYXy@J zi0a%!0UneqHYtBFu27s!ZFSz>*TqV|!HtoXF~05S41;gs8pjzjCoC8pfcM<}9^Q`V z-D78SV|1}CxXZV7=&t;Q5!Y5pF?Yde5%pi6ZAowJ zjbAexG(`fCTr^?w6}cKSOMi;F78*F5=5+KTrB=jy`I!4om?!C&x%{ku%WhRoB1ZXc z*;@5YGRZFkQizFs0n5TGL~SJ*!_-b5nvnD$3K9Lf#uFf>)LKMLz#Fe_{2?8k#^KvN zc`}2rym)`oq@y5=Bs}_kW)r_Rq$^7uU&W}Wk6uC4>AxYNHPRd*DxlwHu4jU%FH{04 zQcuM>gJBTj$1}AS5%;D6+op)fet2W3ofPuzRV@> zMHGJ*vUOt5==YEZ=-Cg^-G`JXvdNonYSFAB(LqUs9ho0p0$~vhEIYPS47KUGr*WY8 zx;%A*>hVeFlTSb%n1wQ5-XB7e`b)Ed^?A2UMg$R zOxY8pk8q}`g6=xSNml9|l6r8tVE$`)(QRuUlV!*@r<d6WbXsPn70K8^2M~Q8*+w z%j1E%2E%bV+EYik-KxeTuWk^ibwoikN%7Ab{1XPEH#xqKAMB09&9I^~WXWidy5oOQ z=~tm}A=cXQmlNiFN(wf}jaa$lHd>$pyx;QzFvC!LRd{%i`%u2*`O;TrrN?C8o1J8} z-{lZ9o{=PU3@B|jh)~M~_epAo{5k5vFZ6qX3raZ04~3M?ycqKvlMvnNdnWBMb zGCtgPgJF5(WyOT4NouiC5`m%-CVc2CY`2LEhKj@!pa>8JCBeNZ$iBX1Ve@}hX&qw@ zCZcc^!&l)PX(1*5Jy$iG5ZeF+=iHqx09$gP*9$Za}C=`2#{>i^xr?1Z6rCvN7{M~<&I=3>#bmztD zOooPvDQ5u+vF$1Qv5H?pj0guFW~z2+mdXF2=&X?>24EogAO~g&&CIyY%*^c9U)?LD zl4iO;7?bCM#vT||$jpHj`iEN@z@35o{WA1RWeRBGA}QBuo?=1UQcr_(#?S`M1Rq2} zCU@H*e7n!Kl18*r!Sa7cJlYnvFP|YJ+bs=%{jQ5B8i*oR@dfiUNJ+^+{UGRzj!dYq zO-bWIQPP3XLUQA2+2Gf)X>cd))v=yzJ~r)~iE(meUdHVOdHqcm<_#p#|{D@b7H8ZQ~Dyu0ej$Q|^Q5iq;zkn6{l zXM9N2di#^hlHY&G69|FjTH~#$3|-D3;FXMbd0_qbBvPK|p#=~g2;93}YcTbrVd@d` zQC$nN3|UJbsp;2J1nVZq4BwaPl$=dEVznDHzN}Vx{_}@$zkp?mDwY04gWerT^aFt} zF9D~#4&Rzt@GAuySL1BDi8|#$>LeGCA~IUr!8QDs+7?b|&DTjfGI%0{zdsX>Oo6Oa z{O^kr>?Ty7M<~3K8@<@p6cpKVxQxmh(#dv45nQ%fr7|dpY1tX^O_QMh$OBL*ntE%e zXMC{S+xg3XV0sp$Y)ls%{#{)I{&M-@<84^+$D1&dALbHixXOAkW7w7$!Cy)< zh_h~MDzfY0N8Y6tc!vI9aO7)&05wv@$L1Zc_jYFZlQnXKVyduSMJvLnqcpTyfaG>eT7-T$pND{T z0r~oWi)oCPYPCqB@9h<@Rb&jJQL4aLafJzs_D>i=NT&nj6?H@zN^_+-owBkTp>fSB z&}nvenM3Yi;1@4`HhoVt?!Nwo zXpPWe_Qnbc)N;qGK5}w>JmcB8^WUZZDskk0HJSD%Vo#h}JqBWI$dvtI=$DWIL&0VS z5q&;pZ-f^a!xl903n%=@(l|Fq(t5ul-8n9soyFWXdhr_j93v7KN1Z(oQOIHn1L~-< z0_x^HC~}dH8eC0N%(UY^N!Al4a(D#WyEFGu$;8t2Rod3sVT50i4=O3c+PXW8r}aja-$EVQcj91cvMCsf7pcbsZKWg3{m@`&KV*j|dYC4~UbYMV2o{HA%4 zLRZV2Vsoj?iPvwbuIpNcu59&_K!rM3hwsJo0{Z%g@eR?;AKGP<$Kx0gF0%t(`c~N8 zb$PDc(f0(Fa8*7VOG@Cab;`dTV(m)6Y~-O7coo88ku3G?+$L5e2t?N)KlC;pQBS;{bgPowBbA}AD0!9ofrfn`^=WL_|ylLEY z2i&Gg#6;QbhbZN~T%u|>wHw-pgY2PPdZLng$8QDVmwT~*|0xA1@^{{e$74T=WzkT< ze{7q4hsWtH@B;Xv(z2{7!$iSUMFS*-KzAU^hX-O#2-P}ji~IWIz-#D-#8MFLry23)vh~#;rv>05my`7 zT{eq2+3`Y932ZJzRU!70o@h?RIi+TF5!~bL2tfRzielK9|DLsEtAQr8up7JMA>)*> zV_AnJy@3SJb+VUr=r3O86JaNe-b6}InXSRsL2J-lLa!OR_r|==fl<*5Is^C|G$#B2 zFgqrHN-Q~agqNbK6H<+Tldo*BU#t;l;%5c%B*|jAEz66%V)JBv1$QL&?MXg3M9B+> zraEYE(pjlBn^I1kE?%YGJ;dQ848~TXo43>cZ5w;L^9qz1wzfqhPZN_f_X5=yQb%33 z_=UZA>)A19rIYd{m;5lQ{py~X?B1&_9di&vb^_0=puEL;g=ndN9|PDNe<}4`jKmL5 zCI2x~;Vj!?M$eVHVO##DHv+%+v8Za2IvJBmiM5gz&ZQhj?ps!`lkAj_4TM8um#Xvu zW=;kVLpMlIfKzGJNJetuyS0wuyk~G21+OgRCe`- zb(M_9MPM?|%N-hj$a=5(oXVykeryNPRyUf;Op6;P&5&zXn7|_9SV7dfgTUS({=w9| zYe#CZkLF8#fY`)!3w9Cm0L+z^8cv(M<2(U27^CCJsEc=Hdck^vR;*Z>qS$@|Ce z2tF}wd~gH&(ZhU=4hs#V3_2x9$ZhwfM=cZ|v_ez(6T$L-19GfN9Q-mH|}2 zejB}g<$6-_H)_UD=pMYUMGO7469E0Bv?ha)9?2&Fdh2!?0@r9EKg_zG8o|pyjpILw zA%Us!Az(GiZ>UZ7kX3|3WDy8RTEeVXTxZpAH@@$7{HDn7s|N%l3p2iNtCh0feptB3 zpeiGgs2_EIHKJB_0il@B%t-M&F4;zlyc~zOi7dj?=^IW0s`XI zra>yYdg}J30YtT5zHY0(AC@0>%|dA7~|9sPf&sHlI0gNH8Dz;&Nq|7)<(>VIhWzi!)#Y5LT$#cKTQSjmQ zOVagb;TD$F~bmmis*=0{<9^n=+m+MoWAS1_-!nYh4Zr?xSEIu=}PB6z4#>q zwi$4!FqiROMW&s;X@V=FV}(Sg`&bkABeH1lx+yrklNT9fIxb-yZCjoS2?Q4No<#hA zpgvjkbjI>RjY}gS8NAz%CyF`qopa%BIbQ{qm#g4Af#1L!u%ZE(aa-=tST5TN;Nf&L=Q4%7sDuj9KU0q%LX9Yx>6~o7Claa!BCq5SNYUnfmd~; z^)kmT8RB|y-8e{f6IJ;jVE0sqxdP;CHfZe3yPVlVM%ZWfi_(VBh5?*^Mfttyi)32% zO{ulfM=*-O$k}&ZJ{oDJW4GjW%nJQT;+*z=GgqY2i2EFf>D&5?HJ)TXsJqU8qX-63 zt|?oa;=*a;PL~7ajT^wxyM_HIMz%L)88`fRHF=WMLG44NP6=KA=nY?Qy?E}USuAGH zko0kB@mzoM^-tw`Oy+znHg?%Ly`&*PdGxm zW)JPVSa3!yu;9=AspLbId3q1?lr=^I{$ft!rY6eD53xUjflliI3M7hu^rC{>C*Auz zcY%8B%~=d?*7XMR1lYML-!4~w$&GFOcsMzk?-hFvT=p0s*Akxy$M#_1Umrj5+ZFO? zt+<=o_e_o3hvkCirc@bi-%A^wEBrvCkTsB+ciZdg)rK17n>hfgr*9%aq(MzGqZy zeF;sBt-^0Wp(}Vqi*b+*g-0w#e*@)C<#g^gqUmUYe_EwbOmJr>x}dW5sAVi|2lbqR z$Li6b%kYV>8ZklWLPLe8tR&^6I9jXJ9&oNtTj@Dma8s`9Cm*1Hbv;gvE>s>w@N5W8 zaoAIb!YlFBoZ;L9rKSk(bPY>$vpbMYTEiGO0%L zIdC9dkTqQgdKw0QAO~pDvsVB|=W;LM_x{{3*=E@S)s$1l8_+W5Hn^(U5mJ23+X7I} z{t{|EJGZvf9#3owRhYu6yAFn*gN|Xt-!AIL<{RVdAJJi%yu{hm!_ucHF#p-lcxDtj zrTpWCw;(8gT}p07X+Yy6u06`fBFUgemVpaN_Oft)A#~B>impo9*7@cxOJ#=v zyEqtGXFX1WTgkI2LTGwou-UcwX~YSM+oQG?V6qO*u8Ow&9ykg8L){ThidmASBh`(I zeCi916)p}r*vE0NEJP%B6^FNO>&#!z-FnwBM0Q?~8<1^tak{OgG{<}L|L^RCc_Pk`V){(UN?g;}wB$Eh)~2pgh%9rLyk zNYaK&nsgZjiB9W%V{d}Q!KK)&oBc!YHmjg&sU1QEFa%s8D+wzJG;mdMqYrsMp%p)W ze5j4+H<$rr6XlNAiPJx0eqp&RNNxo$!13-37l-qu-Xq``J9GPG&ClqVa-#Q$<2Z* z&sAE6Y1K~Ikarf(yif^IFy1P8*@beYu$p{-vw`(nY5wH}E$yzQ4LTSD_mZe?taH1q%xSC0TWj)`v2ZSd)nI8T*xI#oNe9 zn*GM0@FJ^U`xUCTkzLbxx@t&+0YmME7mEbjrK8$eIi1;-9_(ia(&s3#H3U8dNTln3 zbLc_<*)`L<`&=i(Q1QL}_Eg`PSRES}TGvn{!gWM%&c!M}W>Rlk#9GgV_B|Qhc}K!T zbni@oy;+=Akaarkd)a_sQ7A?JCag)x_tRcTYgQ(pR|-HEMYwijF(WO6o=>5pU{LSV zc)PO?6x(lu(u4}X*XNbmfUrjc+>+UU0{;x?N|iM{jL!LBehK;=HYYS>V&38A!;v}~ zv%PXJaC0k{WdK$j-T5&~q0}}x=!hX=$vGzIywKDK>qf5Ix1V2s1k&S- z0Ckf+v`wMXPrHtk8R)UDG zG6KK$vu;u7(zNll&ML+JW=mqs*XcW)Vm`oyYZ02?ivK<}zgUa?F>V60@j8qks=jJ- zQ{}5@2`Sip$DDIh!CjPKPUlsB;L%Wnks%LJO5Up8+92c#Nh{38_}E56jbd^r?jnv| zIRNm$TVsxR4kF{Vyf8j@sgxDwE9aqUl;-NCnfl<&@M*?!KyRBAYD}%fXB8|&hgFPS zoY~XpnA|I$4_TFQY&;UGdCXDT&@p)jU))58=`JqT=W~8h6PA}U|eEWQOUi_ zxBw)C&9axZB|J7|@VD%E;@n7msBHO%h>0P8h! zF9Hn{%rV;!c4BBBl>7}WuZ^>=I8tNj5`Fda!5#ba*Tvl&gP!&r88Ew z@fhYysY*t$KsMoKczDBq9hu~25e(Oqu)~vc0`(x~$c>_BZRP<{U+lcx_Aw9+-pM)< zD3bs;RKK^SmA^1`>Kq!CA5f%Uh79Lzkein0n)2PXB{N${IOl%PX>O~`_$)E$JG}^G z8bihn_mHx8>vnccr3WKY3t6vdv)6d1+?qy~|2a>+Wnj+!>L_=AzE8J3<+N}Lo}`D4 zb0;o5g|`*n6!(fB5R7AmuHD-gO=kOdgBJ~?9hYxU8n+gwjI)M*qo}f&0$;zDC2^Y+ zJaKoXsP0Jh`vp*h1>^4bf3Z#Ucl;k+HRz8N(f4Cl3syuNpv#L4VY`vIVUQuQ^rRw%-DiDYy( z&+7&C_#_456%NPZ@`MDOuj&jU$$D0Tt}VsooZyBJg)m)aXtU)-Td#xP21vgXfM9Gd z<9e?dMcus0S^5P>Z*@ao#yvAjKX(54ecY7!X)Zb-s5A?I$MvV?|E%UE)zEhc*X>?Dc2@Zz=A6 z-PL1*Ah(Qv3MDt=5vQQmz+1Eh@e<2}M97;6UlI%57#=)OPmxTzdel$JodTk3i9Fuj zS|g4eP(00?qrl#P)LvovS&4BF3Ngav!MhWU{Q7bgdMM${sOb>5$>Oe3L;6 z5YNhr_*<}ZLEcKH1i_|P)31F9nDWvG`7xzx{jg_2F~ezn(>5p7_y zU*pO6m~7V(h7}yX1F))4J|(!7wGROoBPC0Ja$fO_Uvc)US$26VYiqLjdaeAjMM@(# z3kzAB2iM;=k_)Pb4?&Ywh+=G=r*_eNy2&X@3MS!LMkk3?KWvOVlw6~5v+-3yLwe(Y zb#Ud}93P9a5D%Hpl+ruTLRvXQCZ2DLlHQx&l^;>A$lZ#aT3m>)_q$$AeH#A6SNdyz z7DH2+m=(V!a52w+ zGq&5uXga4PYls-*8LYv|h5=!%p>kWgZr(Xkugu(6)*r1rFa__3Z#a%fUvP_A16)$+ z-}!3haG%ia6!!o8P-j&lfK2**n_HiM!S5P5DAtpnP=C@QzC`L?Ur%1JoK)$N0b#U- zkc*9efpsmJU%HZS2~@Qr5I$=hPUMf-@pN)72N3SJp>F+&XwkvY6YcMzM+(}LXQ+{Q zu=Zp@46q!U#(oRW&$U8KX98~}j(wx_Gl)?!R0g{MCka&=vpNu%>#lT)HcB&phsA;h z^;3eU=0CNBeVS!)Il}6m^Tni|haeOS(d8Kyk5iG)^7=ual0oi@1fXopKeY3c@rwPA zySq4JT^ZQ+>6x~#2f4~FE$FB)fAm>L2PR4;uT=rzf)2$?c5xvuTwN7BR`Q~bzdB=cqf`t3AVM*2*gQFjr=Apl(#)3Var{^`W87$IrX1_K2L^RnJsxHQ2-b$t*iaQ# zrgv|Furl}7UM3kKt6ba<+rOqHXbTGRDMYH(UC}=9>+hBy~5w_@D%tqFUmeZ%g4!%2Hn&m0fg1Z9DkGTDKf?3(HZ!FBXedD z*rIzd!IvS*`?%%{yqU-42S7d65aA17I{)J{A+gMRrLSnAYfmW+{F;#1#D}OC!2p!R zo$iUfqgce7D+EZBYdH;n@P-n_hqB*<;*sv1y3HtlJDVSNbk9#@I7MYx{43^^rDXO{ zVTvLNJ7YJK=NtgQr!hVrlO7e+pmalE*Pq0pFjYTi=#nmHtho7lpduh;WFs)#W=1@v zRa}AflOIA}$?gsyr1`=La}oN1xr0K=YoKy`NPp%t(C}~S>a!DnSxLT3zO`u$JmROR zj!z*^E{WysvJi2%7@XWEQ56>=pg}|&LHddc+U6-MMn4b-#aW^yi6=z#+<>SF=nus> zD4NLz`z6_yB%?~C2*}8u`m~f1j6&F{Oad>=2-U(dK+W8vOz96zq=-m_+>??NbhTxYqb%~V*05Ti83t6 zXn^mt;tDqdyqRYTz+5$IBIMecpEMe;fwk8!H;K1Hhfo`T6Kc`CjB34mu5+-%W&xa< zyh6#!gEtbQZEC-m;3wOtsglGCqVRS(&pfPUQq>Y3u7qvc>F=*P6j%r+L3JIDLFlME zC7W=H8>p-8A;_aa*ZImg5G@W1^rwpAkn;&gEUM-d^EGR)bKM6o*t-S%1(QsEONW6QN^t46D~`}l z+9@UP^$KP46NUnDJ*V5pjQ)O@7&g0z&Q_Vs{Mn^abHc%LHY;>2suoA| zgL{?|{VS$WbI3nYpz*^!$Mn>-&w>}J3z zo11K)?p<^C1BHnJIkTl7Kq>BN)N`sWC3v8Jm%ARZh|Vf<@{miG&n>dc)QxD-!Awkc z1ki0UW)Pd}3>Zr3S8^zL=50Rej?PThzjcX!{6R&_J^k8PRqragEyw9JH{Uepny8h~ zN0jgmzwg?nT!n369i(fIL4yy=@7F-guO%b=>Ukr`8HjE^!rF z#$3hfT~JQE2N3weuo+MP;#$^~AbD7GBfbQQN&kWSoPBefDh`^kX}~8^&aQpktwyAg*taSKgw}03 zrJ67+?eukJ!_XDK0JFNsjxaTU!AS!ZH@Sk0rTxT2P=UhXb6PR=qy_H3p~CBXtF0Ed zH8G|_8rLB2XME1nlo?|y2~RZXkwl@}J5F`J3vtSuYLUG|fLo$f3Gk+Lxp?5T_kqX< zI!Ps2-eaD7-zBu|lT$y+L*(j=a=c%N# zD1)Ib+}CM!PM;0mA&96E8Hrp@<}iToiZ1emIjk}zC!_jtindfva|I|R`meS$VzZ*+ z?blR`4Fdkaiq<}It*@h@fc7IdYEj%4)c53A*ObJg5j?jk&KF9+zKzm-ry3Fau=H^v z;B=cTc-W=j$;~wj$IPU$9}I95dhR;^?kXJO1{A%0q6VSn^Y3d zF~p--rpDA)P$~DO^8v#X%_S11>=+{!)*+zHYDw zn_JlOkei=Z5o~f+SgnoWEtW-D;)6c&n^A`mwXm@6TG-NmymhePw|^fyodBsaO*-LI z^MyhZw3*C^YEl&05S}ioL{^Uf?-pBs%7~2NomF(7hF5&{3#qzLna4H!DhU{j?c_x~ z7?6AP67^RRF^U!#e|fz&A&=M!rg(XRO#kN-f)TWi~$a{mMQLrZ<$ab1xq$G+wLk;g{?-S-WW`zgS(gqzP048 z3lRbb=!*BXjkEymjl5_y)RfK|B5pcQW)Qv;ADRuQKx6e%*;Pi~T*w#r*LLMA^NYlU zS|>-b7?zNEdaD&jc@?)HwZ?nJh^lh!l5$+X3Ziv?ZVm%dT}_FsX_uEgC9=7(7bU*? z{JR+|j0d_t^(ptk563?z-GcEspyf_5;Y8uf&@e@QF~hlVKOu;UwPAI(x^JoXsMh zzEgwPD3bE}$3d-nBm))o8+has?=~HnJg4yu!XNL4W6uy?d*j{J1RQ zoeSNa@cR=!Z1YQ+3;r}@28<*26zoInGma>*9}{5ExFdrF$bzu^EY=98i*@lkTI@WF zE{!k+&kR@rS5u?_+y+c2!{wXRO9-0%WKq~DWB!xD^<&D6p#_t3mo6w_!@WN4(8quJCHg*-- z+aM&NKc#^{O2n;PZ8(fxRQi)~#O9v`!6~wp@g9E8v*U^_>p-L+K0YZDP+CrlT8Id; zpw$)U>ma_~JcvCIK|IKqR1Zy#eWM;>Rmm}vctVpyJp-}sdnNt=W2z^jg z3lhP9GjpW+ZZbZMiWpfLnLse5HzC|=`#+JsC%O6LjV zGSK|?dCQ=Uf`eX{B9yOk!u22^HzO8&`aG`M!o0zXy2lr+-Hy<4&WjX*@bCrUMqQkT z3UXYB*PqL}|KwT3BQ_gLc2j%BqRxjs$?^Ws*FcAc!Q)zqb|#{_pE1NPuaaB{x?kO} z-$7Yhc_`R{9dk7Z(QkNv6g>^X8a}508$|lCo@&aHbjz9w3sBV)6w!893)nac6a`vw zKI2V=f?1VZ+S`+qlV6x5pftyAVE}3-EVl|^#C&rrj_@HB^vsBewSVfb5=9t>HjtH( z`w}7$&yDv zH=zXA2upRU{Z1LtD2+6^yEKvhheU@a)PR!E~(K~G(}Fw&}QqCd$YWNK9yVWjnXXWPeC8dXET-*5bqA1>oUL1Ev? zSd6H@9_A_qna@I-_t46X2BqTLdaQBTM2H_GeV;nWAizC z6hf*Rb>w8NDI+M9ydtX{gf<4^|M&36)Qt>f>cTIsTKh$aM^(ia0pz4Ids+~$bnO}N z0#I4>NTg!lMt3KLAmDwvn5?^l-L)y7^PRx`h`MG-{YD8!5<3bHEP^(FIJKG9d@&;Z zLMBJ?QURfV7B)()0MU+_MbHP%x#(5}`N0_v2Bt#8<;=odpJ;+VMUe~~tW)1DI;uFO z#u?2f{@_wT1;bM4^UE5V0I1#vu%e}@<21i=KlaDIq{h--$?HauRYRcJvw`J`Wkcp4Sm!0bCY!0XtzLRK zhPXeTVfofjN+f2po;40J;9WzII|f{kre4xQiJ_ot)di!2NPo5R#(lR&W7wU|FDSyM zF1vP}38@vy5C+_O(7uA^Z9S8o9iytdj70P zLkbR}C*`Gn87^A@B7ZEVE$xrhW2@k>q%SriWMJ-h)<+)_m<8m34eOS#2EvV2vm_aY zwT}W2iaK)5!N3+UR#8bDX*<**ekrYL(Qoq6cA*xy?y-ALzF}M8S+o$0adp;;QjNa z5kW(o&~zmW>HN4hbsBLNM%xlh^RIZ>+IZ)SN?)^Ane`3C0ZA1;z3rJKtXAbfwgf(Z zU&h6MufMpbvppQa3nY<>CJs>Zd;!e+f$tix!Ni>k0tX^_<8!`d@K-HPQ&PZTwo z*0ZhZhxR?q+Dm`m%OiT3lfEjCH1eN_{a~Z7db{u1O!1c_a%dVJw?%17TTS)8(r= z|68Pcy(qnr`<|!r-`4$9gBwK+h^5}^`-78bC%|C8NcnU0Iv*YH^+aWt5gnh>fmYiI z76ML|2wk4OreT9jsGId-W_NiRUD}(h6wx~>^ZR_55M_^;+I}8VV2vCOFa~jd9NXp; z-R~OJHF)^86Wd_3Z8gekDs69IkuZW~*G=sY-u55*|9J(1kVWF|KD;g>0}-P5nU%ba z%`F=xo)zR8O5~}?!xr0WPI;if>C+71b~7G!hY{)xyw~|vPxD>_EKV~i;*cRlu{f1+ ztCq=p@OFLeAs993^5CCj7O_8o&hxv(>uqD& z^~%zBfDSmc)NFAOD!2kW$xY=c`{L1+#oeCr0XpC`7b}I3ET5&cppX}ThFiNG;njeh zy5(K=9*qj!Qh!HEMbI^$RL2lbUp<*omqgBsjxJl<;m+Dg+1s>42waTM{UfV9S@5YHk-vo(sUSgn8>>t~EV z@?4!Ulv4joV4(+mHMU=W(NZvTK=%^a6c*rkYIG_oxJ0&rHz2TnQQfEMQkyRwkdLit z(e6x)L{TS`KAdP0zgiZoVmd!>`Oz*p-|7yytUOsRT_k2U8q)?cD9LF%({erl#jHzD zO2wh=n0#`(F*nX-D&MW)YQWtyQyd8lLm>(rlKm0}m99A&i`A<~U~Xvn)9*X<(uofXu$Z16%8aat`Ipl`smDuMiF?McMtkBr2OXvet<&9K*ixG2igJ40xG0pV$`ZvA7J3+ydAX#oM@= z+vr>p4US8^wpKZR1z;4X8J4V2$&xd9?|dJnIwezVa4Kd)v31buR3}RnNR;9{#U<*G zUn+yB^RFmK;Snzt{3T{}4-ILn&D+vnc1o*B%FreSXkvlVZcA?zW(bym4guhw z4(a$qmzGF@g{2u?3D0nHW0K8WF`N%HKf7G))ZB*`j}SzEkP(yR=h>-WUwRWyjaX3I z*QXM$3OsWV?LrtBcBqxH%Lo;0Oy`9ip3`Aac6*9*y zfjWJE=%w(d#Q7vDzETcm5Pmo^2f-s}Aer*Q*1F3?;crefLH2!7nLjUK>m(rLe{7w% zu7*msgdd210PjM)_nr*z`SI@QNBbPzJIS;#f~b=gtg6-Zc^{_)8=8~S4};~mh!`37 z*l8f0-tHN)Kln{>?{BIsu`BqC^EV8#Jm5)+fj8Zo=@}84oG`ZIA3&VX_j~tzVT25l z@kZ11T`zl#Hbs>4hqHStNmiF1%-Bn6f!mskAErrvl57jGR{K+$0vg0eLA_r;$9@W z#GF*ST6l%mxK-WfXre9~OiJfvUSts1=1NtB{3OFM2eiz(-o7Hy$yRx%qF}bk=KZQ# zqh_#wtS2wF_PexM?)h5Kdk$t>t$Eo;H_ziDeJ0aSo$zGjZiREDA61V~FB}IlP8t%r zCo?_hAoeNLXi|DL+*r=45h4H8sIyaPD>@3ia5cI4?pjO0;(Rr=!MdoB3oS{IPXx}; zJKgW1z_IDXG77jBt~k)9O44a4A=V`Br_p8i9q)CDc>*(SsewH${-`+JY$s47E<#ELS< zB|ovKsJeSDg1vDzc`B#n;V$ zUDu4?9un^XDVp^&6V))~@s$mv|INQqb-UEbWZ;fmnDJoCP3hF8NiKX6-UDJB``O-k5)qgRq-S z`RGKzJ#HHvSbQz%4eI-osa0&WlCx{P%Has>>0K_W-4zr*J_iDsb5vHvL zwkrU>gD5T-HyAGIS@-c8T{EnGzE$DA!=8gW`w=w<$@4dbI+}7dT3Btig$J6$g@o`%w91BSheE;~Kp!P)ZR+ z?UBzA>Gm`E6S1)Vrgv}hGwfuu)}qw4jkq}1%#fpMZ-f5F*kGcZ)x&LP2*O>f)<4_{&F9?Nwjm4)y z?D%?I=w_ki`8yK>qe#%1e(-6Iw@LG-a{p8tHI#1oa>B?V+cx;0uhhLQlkuGMrj`^Qv@*;M;VR=m?M1CDoqNF_sTPjqY zNJ>&&tFMxI`*k!y4GNkSU!zE9PxdZn>rtdvVCQB2aB6G~poGeQ8P1s%W4J_?Z%S7z zF`j47LVo1EFO!6<+WLXxjF$P8Ei4l2@OV05Y3-1$48Q#3@fCvxRQ|OvhJP*sqWTEE za8f%PcAN+h={+uGwmqbQs>DIif=z$}R8TxxX?JK&&~)0&9FOr zNMlS8tRm=g%=5JR7I3FCqwZvtH#SEXl0K$dmO?J`l8eO(C>l*4wH-8yVqxL4fjobJ z47_g`zd&;#sw0u~o14F!_6qHZHl2ZFWIuT5Am{hXQw;rop~_-Uk%F~5rB6xET}{yj zsN68QUNtrJ`Yf@rt+7bhu$8^oQ7PydQFqoIA&2>|Ff{cP?HnRe&u{xbpv>=b8#PKN zM?le8=ixx;;%z3%XW)~cpO{ks@z+oz`O47%p zVxueT$j!fhkPGydf7x!&GoZ>CV#+bnNYg{n1e_lv4r{l4oH%*RD?q@+bvd&{m2(1v zJ%HV(Kb_j8p31^7QJg_FJxwjn^Po7_{8|9M5S1yyCs4&o632bcadA{ruxwGc)>?|) z)q5k~8}PqFJn zUw?gZMoaGavbfkTw?{R&`l^XSYvc9)6tUBJ@4pDXsc>6MjnFu9=hN%ZH5B7Aj-C0y3@C)JEBl!22ZSWB*KZ0f z9xmAQKEE~@{|u^R+D775OT*o5JYx>qWa zj^jxNHk(Q%^(p}#qyqq#xNr<_wcdNF4~S`#2pJ5r&$WuQwGiyooEG}a>NH$q6`7WM z9Q55fO5*L4lCeb+Px@MmIW`h0g>!3Ej#C=eI zsp~D0`zehQ)H@IN0I**IKd{bVH2X@a0~pAsJu&6v#+^iqQEFMRIg}LC3Jd)8R)cae zz6>9({yOC6-=b`($>2aMXAIJrQa)}p=0%-p!dWctGcO*b*AFTzYey9+{;_1F5U-8U zXklV)nXAGP`Y zobAR;Ycw!OzxA#Z12`mj@Ux*NSn?sQ*{pP5Q!D$$ZVnL&C^&s(SDso_c|o9m_caX< zlE{yJ?H2U0qkO%eet__5a-n*113yT8&Ktfntiq!dY|q`tQ&I&UUF0V`_1rOttbpWK z*mdoU?F)r$O&2AC1eZ~S+3SEFcE7J>t3>Q5MV^5f8CUT)`)BIqhX_Z_Nuv+xfFLax z7U1OKkIi=4=2;D{J*X7U9POxoe6JBYw)h=SNMKRNh9;YMP1>_n=_%e8GUOG@-qT`#R5u*5ts)_` z6A1PK#nzz+uQB&&@3uXM3uwy~YC;85WnH~ucDFU?Z4Qf4z0lN5=Ht!Pv?+}|!ACHq z^`p^*(F&>Luy+=aou(#Do-07eY$PVJMKd?#NzVy1{nM`)~#66h<$9kXsFrzN`DL!b7de!nA@*ASd_VB_9m_WN&p^*gVJ|z^DKh zx7%}O1wn@qFa|{`zh9kG)B7Td$N<^3CB+Is-Fo(`>UE7)C~o7|zbGnrD2oab-eUz{ zlM;F0Vj+6h41*L(2q`QB~y{n}-@mBvqQ(zdYqbS4;ovzkfH>&w6-pm)0l zxLg!tet6$~CDyyP6A4tz{Q{_}H70>`g-#hqHR}{|$sLg6Mf{+Sm0yV%^8w_}$}uY! z&h8t!y)Q>yu`Nq7Dvdgxwy_M!xwN@LwRvkN$^9+EYnU8KTP1xCy-2@ZC_d7vfN--3 zP^&@koorBO+qSZQConbsc$6^jY3DLq+-dh0(8W{r#t6f3KdX=!v zA8(E2g+ecdE`Kw8cR#jm#}@}Zo^6a2IiGQb5IChRP{~t&dWS=ms|wtT)&c*1RdwUN z7|C3@N9RCP@d(z03{s0(cB{MTLIDUHxh!+HD=6vv1(AjkQJ(9Um=*x%Nv8Rme0ucd{8l@Ej^!Pj9K}@ox%~uO9=^FEt$x?i zT(%}I&C4>lQN*8K`Gc7HTkAHf4|#b7j>4X;o@i>Ac1kt(=1! zH~{X*D0OS}vl>_v&I~E_@7`;+>SjZX5BjcIT^$9~ixE~|0oCdSW2V(b>nlmaC)nP1 zykH}Lo*=|w!qj1$-(=fzFPDx}8#~AsrUJMcIuDwuXiv{}#ZW4`g7!oIX(Tx@}?Go#o9S0D`&(kA}2t8J?b;Luq{jMhST`E+ZsIu;=J}dL&a~ zB^dV9mM)+twoQ7UFI$GrE6$vr89x7xk&2FgG8p1ZP7oR#)rwnpxqJ3v>?UNPV#)aE zcmjs2@{zVxO9X^iJ-L6{K882B9|xs*e}-^~SM-@>m;eGDi2NcR=-wi~?&S%}_`aFd$d@@uS95rU&ZGz6=sewF zW|{zHd6A<+qOM+pYREgmNey~1Jn@DrwK3vHHokuVH!z+T)R z%GV=%Y4co?(E1#B649k_2FV28Ybh#!Wv!L59j14109xzu!}5p8(^c6h({)ir-kZ#^ z54}yW()Q;}ylb7I=LVin3SyD%KJlHGFr;0c%72YS(~^kEZvdkok#4)xLRn`BPlj24O@4=W zuiA+kuN3}DhCucB1Sp_@;a~Do(OM9&QtJpQDZ3Yv%}dfx@S?3ydRI4R-Ylh!9Ym~M z>KLpXPbz#9bdIS5rm5PHf0L z(TF?Gt)Bl;ITe3b8m_j3=Wi02Ntp?P+q99HE04t+C3>?F;k=WK`<9etFnf&DNCFK` zuCq`@U)%`>0?_I6Ky_t_Pjez)Y#y4_7TA7U6UnsVmU(-HWWP+I0{Pc}8_6K^B2GoQ zcUa|Mf1>gNT&-l|GKBgsu>?NWfWYz$5`qmR6HJxNsbU#OyED6XJaE}mBVGINY6g`HetDebesiXAP0O^L3KjzAW(7w#P zJ>{YV2>bW$X^Rbi9mV{AI(J1r;%p7=IaxX%E;tM@y|Jjn%umbchU1=5MTw6!BQKsC z&|eww6lCUs;#?kU$q=j3LV9jz$9i{JVRL%np@z8a@n3Yc4DSLjBNJqO0_z~(U|noy zK#o^K$$?!>7b(h0Hc%x4*>ciDm`fK1%1`pps5!bZ>?5_5V-wwf65@eYCFE=}q!HUJ zCyT0%G~_t7u1dQoTE{F|MXJYau@isHy%wh{1v@1mWpU(whv6?@Rh4;|&CK`A^^ROf zr3nCAK%~E(QFiY)%xOz}K2gtWZDyry3drDoZ|baMDR5m-`EYrKd@RCvPm{489)P@=q1Oel<80`g}DZf#+Tc3@<2& zozpX~V=|j0$PkdhvQwrX?336PyD)PRR+SjVQ4sBWPvB5J3q71JfAi(yoPJXb#-4wQ z^@iBOI*uKq0Fu5_C6lOa+qSQ;A4ox(QRnIp#=6Nu6F+mb!llU&_6Dal=zq6{nzFBARpJjMxP zAp}rj*u|88r!&Vxe}3UzjxxbbRLXX*VJ2H;?3M2;6#?5;Y(LGyKt7OQz9XnR4w_=y zl2stDvh_KRCsMzD9+Y8FKADH@EJrVGTc1>N94nEeLZ%lf)>5u;xj|@W=WGZxjkDp4 z!3uOQ)m5yC6S9EBT5TGgOjPMJ<7|u!QMHw*G*EeJv?eN9fBZ?;3CQ)&CM0ns$*`5| zdzy+Db%gJWrVQ%a3~{z1Oq*^qO1!~;vB|HVwkRBw3`}zZ zOOp6je*s*zwe`557I@hocVTCgmwIhy*?7^YSX|McP%a?J^-IkN zDAD^^ma-Oeu@cXbgqaJy;Y-f6##Qt!jrtZz5ceB_>rmb;+fJ+bXADI}-CPX{rNo(? zf8IEJ8jVAzfLS+T8`cz+M0{)$lEbm#Ha(JUM=qhDWn-98U)@yksMTaB2@)}=e($Hn z;Ob5njolh0jaOMHXj1TCFuP*@vY2`i0E?pl9y(#c#YSn}EL_3AA}2S|f8!xkyAfT5 z0F!J1B!_HJOEwN|hRdCall?xY55ctE`d z535n-G%4ZEE{hV6=CB?X>s2@){X4(m8XVfdoX!umIG(&flg!y8A$-&Z+o7o0WbgpT z^$q5YTMIXcGMyV}-KDvMf?KNe_(7;#XPCE>nOWq5;+@#86^Gx>;m~lq`8L%qf5tOM zjdPxwL653g&olAgs98jwd!ea%kSU%Fh=Yxmw7D%_%#69!dF+PZ!J+Nwsdu$qOrUyL zDrsRppKMCo{KuD&XwsCLCIjFpqmm#OQ9fyJ^ClB2Cn3(o@#3h^LZ@iPbgKBQ?F|3aM{@s|$at3E96B;g-qhZu7~Bb^+N6=`8~BAp=7cF61I zC+OE@kBc2;`n_tJ^g;gp(t8-Z21!vB{v<_>ilC`u8i3YfLyE zGr*_yc)R$r@XDM3MlnbcB6~yxa}v=RAL57f$ynN|SnoLk9gj4Y1615SWuH{svqwJY zN{-7&7_1?dh#J+&$u|;SOOrYudu*nx;ADR8Q?NXNj9|dE=rdxee`33Y(I1y67=`|R z+gnnW1gQMvt0tU5Hiw!|qIB;WiD7=N=4#Pe*u4hWmb-5afnQYf)3BZw1Wb}c z7IcOq=#OvM?<+lAe_~N#r(54@a}ivUs>H=MO^7!*7IigopN5U?7x_T>yl~YY+=RYC z{c=oF`1Vt;5-iiS`BDpMflZB36gD&?o&81{H>ZQfyP=hM9e+VK{fG2ZOMqx*C{Sg`m zZg0LT9!NZ?M5Mk~nd>yd29CptTr%PkAl5dyTl8x7L!#?BR?!~Tt@ye$+v#Lhce@1k z$8@viB)Fdn+p$KK#}30u$^POQ;$z_){H0&$sk^!CR|{3ceI?woCGr*@z2ZN$GHDFG zhE)9FChE;=f8aX3-ng9Xk4GYv#iN)UvNjzcs?+N2ghiJ-TD;sxsB|KD*N!G=&0mMp z&R`Q?DA#=03-0aCO}GeB3luc81+n`XXzCfil=vG*a=fG<*EWOs=10NyWXFwPA36~e zK7gLpFUQILM~ffGQIhS1P9pJ`VeqX2^w%KLlSBeCf6_Lcjm!n2H!MeJKg_BhBNdBA zt6R-YVB!a6P$tUq17*MM_=-MZVL}8G1$#$WUIK$3*Rh1QypH!UOA!JWULu?EDAz0! zdN-=&X0)J46h-nzUt&(^uY!@^XU@r^s z4rI#2*Jdq`W&DsCZ#Ae-hnb%V-oAkVkHKT6PVk&@_n<1i+ek z$!JrTKL_&1s)uZ^JX%B%o445$5s>h(J&(gF{8(8dxp6F`8$^c2=rk`#E{2>;A3DKAYu_VPCVV&j|NulG(p* ze<^XCp?QM~a;aRwLGoyV$^{bAQCisZDyHjE;^>{Kl_6^)5|$`EmTsHsGsf#lw2f-0 z^>yHR<24L0O*8gSgJIV?(p%%GbDkP>0NV5X1qzE;E3PED6%M<|lzo)TL~n+n>5WSg zI`O=%XQuQvJfy>!mv(WFT0hD)%!kF6f5#S2R^1iD#|bMB{lh>?NJ3qu-vl)DSNY1$ zr_PACgdz=g+J*1jCs%*KQ0~*b&fn3&BA|DAHSAAWFpK=K`3bggPADB*%yB<;N6u}rUSk(+Rir_NbBYC2gLaxsA&B6Pd$*)Z#Y6)o3VXi_SgCt z26YTRVnv9;G+4y@Q>~TSqb>_E^5yBKT~piYc2?rU6&E`1rl)Xm&Udh5fMfAS^c3lja4%P_Ls$y_S6C= zC|m<7vYa?{&QGMbnj`H7Q~-K)0l8%u@_Ef$)k8SlOH6C zfUk>Zz0(Eohq+b;iThb?l1ah=3a~5kr&UI#0e7VdfUmF{HFXI+^eY&KWf+E)7_McQ zfcbYQf0u+&puc;?H}^kpD|0Ds$ApqwU>KdP5i&gxG`Eil&Hw-0AzF6?rV;O#Ep7n_ zg!wLGW_}sXlVT+^T76}ne@rZYg4Mi)bp1s2C&O)za2>$(SVcMZ?sO{qOI-$M$w30# zeRLX{#YzT+q~@*$$~JEClgHh(Zi$rY-3xCpmgYk9sXV^9dx-8q>sqn%E!D*O&}nop zpw;|RtXRpZhJR~YA-E;F#lUmS&q2 zIgVeldCrKR}_H(5733~KAU!Yd; zjE6Q&Yc?$>GTPM~NkcWcNGggcT+TXBGH2|b-aWP|kVb3z zE0Yo}>nOcTx`z9hQHlUUpB@5Feoy_)Nw-s&2&)Y98??x6p2y+~LNP|4w;r09Ub%tE zvu=n8Ayka9ZnB#KOrObz%%FVHjiJC>B^6{QiS8$we>F#5vQt%G7S$gHwXJg#ST--Z z-9CQYI#5_MJso9J9a4WXjb`@=8-1X^mW?Xk>KCx*loC>l7&4~4sqB6dG{eP*mfISf z0UAh}m(j@$Cn7&cOzI`jP}gOQT;oVA4bkPD?;uIvR+JgQI#Dn3-nbpibFEZ;RustRG3Wfk9O7H)1ibxTk(WgxrH%i>O9-AwMTFr&TJv> zMe3+R_NPigk$XRAHV4-(QgFN_EH0OrUF^!kf8ku|`U$s*^h8ojw{fi6^@c*m+dW%F zFJC?3TeP?wZm5^y1vIe)L4;pm9rTr2DfK<qqbDt9CyT`v~FY$X%;VA&(ViXCP zTesk=^x{chw7vL~^Yhd4GKVs8+YGgu5Q<-M)?sdda|msDxd#-T(*Z}|HUU!lTGjiI zfBQ0b0qa8!oN%q|8I>dfs6cz$rbKxfnqjh66w$l~Ce&R)A%T-ba%iNsgnN$Jkx z$tX?BAX0u5BZ>jT^%BFqb5n?wx%7#oe^Phu$xRxuO*ZHrZ``!N{+^~V+TUaF0?*tI z!}88;kav)v+cP$b#+YUiuuUPgPN99w2I_9tO?vTBU2_|@FB^^t%ZQq>u-DxNqcHp_ z3!d`N{BnchFEw~XhQHAY9uYJ|^vygFFy#OHEki23JWrC$B09=1kR5weOxq0kfAVu9 zVZ~D$;I?-0zVvS6vW=e)5Tp)uUzbQyhqf!*yN~+W)M``QMxD=ZRFid0 zlqgfrNMj@BX%$Zw+QxHO&c_tSEJjltcy`pErBc%NoAPds`zEEqLEQOE|w-^C<*~5Mi33|XHDMz`E&uyWnTcyzs~QD=qtBDp*4lA|ifJWv>*x?|zuf4RPj^`lr?3i+i3 zj~|C~UIk8h=ySZWGx%+?+|S{~>!d_RqJhqo4-@YHV5Ig3y#>E(wqSweq#d#i9^!>` zFs_J8bWr#;ogT_29d*d zV_2TpFKdFxTkux#e@hEb#-i08_PLZn8QCJu5<6s?nTGiryZ56EtB-iwg?7#O^2iJT zoU$@_gk*tU8IZoX`*3TEwx!NaGkHlUlu{|X?C}W#t@OP4d_vsphfqa=#|0!XUX_|+ zfcWdS^RpC_x;%TUk2w-52$tTDE#a_7gT6!)UGdMIB*tUCf9Pv>iD(g9bT3JqJ20Ol z@{?EexPC3z|MsA=v8_{>J_`~~k{!2X842PN8e;BB=hlvMdZ_bdn0ToIAR-McwSgB| zg@}Z0QM;^}Yo|O7R%)kX!fOM?2kmN$AnmX_OMcA~ermq4CXb*5mIevgcoeVQY=MBW znzqVyg-St+7Y6pT5bKq~gIuel%7n8TQ}(2-N`TA8Q3PYc_8gV%wNX>^0|)HB|{c5|xxL70P_?Amn3_fcf^4g6e$l z>D|;XmT^n$Hu5`a)n^ELG;L=V40|Db7IED}g4hI)0e@k`ny2RF#=3aBa@bluBh0)g@qB}=RzI*&1(>K;jfTPQB_o zB4&T)SAQ{6Feen#N-&G=3WyXT)WYT-PdsEQF}a)xAbipn8fk_Uk=1HOE=J_*HQV$0 ztT~twDAa7%xgiS?QZ21%$7vE!=6x1}uHc7L=JN%EL6Y-i6y-ctW0P~e>U3t*os(Bl zaUL2?VwQ(q-Gu@3vNUn@;n!KoK6p^$bzr2}Q-4(wf^+_MT*R%31YU>!hKS!yzh{Ov zpRMIp(n?v*sOo%frUx{|HNSY$sM4LtHxjt}5Re%**MKfBD6Q7jNH$n#WE)7j+`F_g zEws#sp#sXlAFH-HAsuL9BPv!<%%Fv9;JJ9%D?>jOWm+I=hUCp)|A($by`Mx95!Fuq zqJJlZ!e8rLNazfx$dQ>*#*zVLjXp0-+{@oh{a<>|tpg`#Lx~!ov|bXrx=7UL2AGDh*oP3L;#6 zS$X?4B`bVPGe?agD%e0}3i}$M>QVm0)_;KZvMw)ziF}fAt`8lOk7LWK~$J;x!TL9mXz1!5H*uMu-v4 zmF(FzNysx#^Ph`DAHNU{OMy=Flz+#B>*M9?FUY_I*kCkBGdKn?eswXTU3V6@#7LU% z+v;r;09vSn0vXLqNw%Wun1x?WoU%bu7eW8(1iD!l?X%?Kt2<`6KrKR|?hR`eqhcs7do%YUz^$Kih4-%^aJAXU4bgd@a z7#IYP4i!c@Hcubk#a-LlLJ~G&m}Gu@0!nM007;{)6w0N)7;@Hz`Kmsr6K)S4P0y(C zIB_AZKg}mzT14H2!t~jP5>x5L%)gb>;o7@9oxghLW}rb)|DwSRes~*)1bdJVoR4O# zmP-s3`ql>FbIEpY0|ZcpI)5&(M&j(7dk7>zQ0y3hQKwuGe}DQymq>h*a&4DLX_H!l zu(t(k{55@Z42KJH;xl)_n|qUU?DCL|{Z45+NH)eG(P#9{;|+3}3w?J9_tNvCW3~X# z)duTm*YM-aO<>|yhsa6uS4tk~E%&dYmk1RWNb4<;JPdFr-G83}i)-tuBhdKO z09ZWTxepuwfW!q5gBk&$&bg|yy1zBeY}&+BaBq?N%8$ZSsI4vg?)Gl69^!NGzda<< zp$crwS7qH27f!BEw)IF(3kC>$XVOiQv`9aI4}r=72N-}CI&5=mq?PM3nxpb zvBJa01cyiXl+6~){<%umZpZCwl{4m$*BSEV{*XiLvK~RLxH3!R3`FtgD%nX#sRQHq z2jl*4Q^XrRXBL0Q8F!5^}@o2qkA=B&3{d^=qg=ic7#{>tzJfgP*B;ioi~^*-?wbiWA;ST4HBm3~ZM%D?rl?EhSbVFcc-aQZ;V z?3h<{4|4TSzpeRHz?}ecuJ4o?o5YVKkigx597a)8rEQ$18GD)OI?mHFNmJAW$AqC7 zy2$lBE;37ASASIH*o(axd-T>c%~O)$_gj+Z)z)t4re$5V$g0}qwzRe@z5Os4H+newwxt*gujk=XguZE3y3o3PV&1+XbNlLnt$hGqu?yVG?rh2-S9UzzR&w= zDw>9wpt5ZVxQ_OJrN-Cw=QJVG6h&E{5IByHNAnAUC_=O)P18Kd!Y~|LAc)c z^E_FP#_J|}9)rQlwDBW82Kf$0~l!J?P}iEP1f>HwA1qn`sj*ud9P!u_^yr zO{Wjb>m}dM1GY%jYV($L?#USY`OBGLGI~>!$F81kH|Bp!=R9WNV)cNY)7SOat{}5< zlV_vRe6)JV&!f^jF6zz9e3)-v_tqj?ZI=hDo6-3r=a82eds%mM-bTKA{#?&rqK^sx zocRxn*O$V-s(n0ro(Ha1Ot47bu^&DH<^SVXxc33s0@v+;Iueaw? zJd#HG$DM!k_4HhZn8%HLxfsQfFZeNkQ&*HOUv0a&zVLlxC)3C2wP>!!cpkp)Z*>aU#(yGlU?nu;i~wGTF*Kyx_NBjw~yuGoSdVt5Bj_k zxEm)q_V_h^sH@~Ddo|iDrVsgN&>gva4(5;UF};86&AMHrbI06%@Su56D*Bi<8!Xlj z4p%h6Z+mf*MI>l|Exyj7*R%;%>=luT;z`fPh@x5%%k?!Y^*^RMv12#)o* zpl^RMJzsZRb|!x|f&`LUk77O&5)Nza{)KA)qm zj?CS|Bna0NxtP44AJ5e;HnxdViO=VR%@qno^|7X7I}vX5(#$aCsgmJ|r^MclcJV|$ z9lFbg%^#X^;raCjfCB8tV}SYp_uv1^zW#qWzFhy?e^vE<|6l+6|M>hk{I}Pa>&ySM zuTR(U^}7FmcQ2Q>NArL5zXh=Di&_fFXu`jEwR?}>4FK=l|1h=vYq~)X7bDyO;P>Dd7I=}G_of&qU>X!XMuSfbq#QUJW6o&F+Z8uIpV6fKu58NXdx z*iit9wg40%SRga~w*BX-)IkM1xn*qlT{`p2wDcjfS<9xk>rO1_ zueosBbmEh2>UZgsb;3afFWkaB@G^DnSS_%;_}G9oNP%#`Hm-m9bR#y!!GU8{FNnB; zB^d=#g;p!@ipnHobz z_5KIObBV@s@d?y~HJ}3EeYQ!Ob!3P=bnJt6Os}bpTIR4kU0Y4Mu}a?hQG9ZXOz)-M zn~ryJXGMJYd4F#B$t|>9rZc}wk8YcuSQD?U{I*9jOIR0|Zoz+#{l4eiwgR!S@@(Tr zBJ^>3vLfE?LIc7rN)A;S1v^#Y#ph&OrCq8VVyxhh zMpPIS$<2SWC1S^s(t>l^*g<~S<}zhCUy9X}&51Eh&g_FkFx}c5^Poz%>om17)RA^k z3eYQS2dv$p%~#tn?$^uP1Nf5-AfS zmEG1&`>S}un|`nKKAc?D@5k|%zn=LxE!hxz^xl8We97Z}AKOg7@2G#L!zm^QIf9(o zc209IkBdYJYhl+5T%H$+AeuLB#}5U!H&aQ?6Zt&g=Fjzc}A9` zuV*uxN&b3Vi6!*4sa;pl16gH^mx3?Ia zS|~U_du_I|rTIpu`P7CB2g&f0pra|5kz@!R0cV%5WC$W!)8D>|4Y7MTCq*{+I7xFW zq{&VRhEe)v%`RwpZ9%9>lvJi#KPyC!~vi0){foFNey zTAnzvzAQk!S|KBF5a=a{Lp!e`Yh9NsWe6JrKf{+uWe6sJUk+!%Tn{8ZzlaT$F$#rS zS%I+59>2wRyF5c2Td=NbMO_QU4m>U^okxUU^AQLJ?+PTqOGkhqUR-7aKKzce!l zPH6iuaqr*t*{SL;x2U%arMsn27LwHx3$gTafsy10lnAXb{Yt`AvCzlM&czE=xkQ=_ zqMaImhK|E-n<6aDBVN!1 zh5X_0!9HoO+b@;%imI{O7%>)YI)4v%u-V%t@kA| zlE5*4+9SS5uQgwGBKPd|yjbe>x3q(GKB1n2J@}P=pIDem)ba`MVuR_m?_wSMXTv?w zfHyHRgWc$w3tuW`hKL5ap%x@hDbz86?x~&UbhHGyK&z+y_&Oi;=Z5q-fd#b*D}8QS zx#;~^b35xpelz`J?fQ6_4cJ55MHzRN=+{nvpP^fR(BqOjR#{ukZsWCFSxT;~V*o*! z&Ykx1U>kbA<|~xaaIq9*C0PdhhX{mQWH~4*&+ZCw+fm$N;EdexQj?}WSUCHlfjn@- zCClLQNxRDh3H||j7K0FUrWmz@TB9I?cG<)U%%@OM2LUIR(DzNo&w?%WItQxv7kKc0 z-@QM)`|e-&{Eg?PH=pSI#%emYxcBwyF-h9_WjeXxhr91Q#*V*AoLE3DRxyrqlrQb= z$wF@Xz!!x+0A?)$&S+x>yvS@M>pf~<0fjg)d^s%$zp#*f!2XpcL_Of#Ra}aMMGN3DAN;vd1=G?{DshZvcOqtk+|~0OP?3^JVI{b3SbXd z0A9p7gCQ?uVSgVQPfb`9&-`+ZxsG#see_yIJYh}TSz#6+nQb8FLB5ScvLT~~$LSSR zN)Z^gErbQi-L8|<%Yhy~LGjThdfHBfj{l4x{Z;hj;dOPbkhRM*?I}RO!+0EjV3*JXHazjmZ06^;{rb!IvOG?-jsL{EK1L3ES@Ya0 zIYD9e;3Jb`#Slozt-=zX%v_RZ8V^&DaTH8~Rg{B`)WLsk_^aJyXu!(z(Kf|NX81=c ztB;4U&i$MsgywG~tPdWJ7j9!Ta*Jdxf(v;+Tw_p_p!|{{DToyS-{h1)S3Q7Ur{LOb zrhX$LwzFFJ=y{7@_N<#vG;gu&mzrq^Eq`mzhyx;10*c3WQ}@?+Zx0LL zkciZ3Y3&y9VKniQ=4tMf6K;|C!#cgXH+%}(UKY};bl!$OcrxIixcK2~vKi|!Vt=ah z+$k+JKhgXC4bS^H*WJ$j&EM(&DKMNNJT?4C`eh|jhejr1`FxK>-C)ct*sdu^FUO(j zf}#j=Sa=~{;q54-hZ=)OZILqyO4|%~k;o`WmfFCrtDLZX@L>5OZ8U$zSK|pt6PFSW zKm-T{-nCN`uT9q=`3>*ha?3m$Fn_nkY58|f^DCFhdV5BZ_DO%_kQ>^u>FwO~xsDZx zbsD_B34B?^g}Db6`zxpxi_)~n>|vkGWQ=9C38nNxpo?XaF&52VXi&32nWM0XQ7xHm z6%RfXm#Z(Sw@57U+1 zL5RvqfJz;=F-}$vHtsG98-M02*Qp}Z)ILF%$9$7LyFk-Z*NEvMJ_cmI_litgraPiHK z3Q_T2`4JNF8iCkYl7GKaVX*PX*XUDn7Yl&`Vd-;k5ky_IdU_dOFCl~iSrEHRP{Hv* z5iFFCMVK80Pq0!hVlBt@Cqbgp>k42SWF>632k)TQmH9~YxHh`^w44RExE2m)o`rN{ z<-$ptEJngQ!8)Tf-#v4btwG(NrzjG2)aN`Zi>dmOIEXc#C4U&F3vFNNYV-k(fkOqE z#_d#q#_gMX9_e@K!7Z|C*t@i|B;3!Jg*J1a<^p?{7{#+=JpPyy)UYKMo`axrVf^?w z{%8^^3_*ZJn|JmYJk%OM`&2liRtlF0<-$X|>*E}y0K$2L4+UJoSjV{)NmHbHe!ln< zuikiw!3TW%0e`o=$3TtSzOh1b!+NB@hi@G4Qmt@?$@=UTCEJTq?ro*{7K)J&kp)7> ze~Xr83exaNOA9Mx2=aM8)F0*tx0s`On@K8qE<`S3s9BihuH;7tE<|&~1k+FJU{O0X zQwwnmiCD)QM8j%7E-wcT?90WmS`6nN#^ag@;XUbmVSl6jUvdsvBegvocz<8KKsR6B zW1g-*XLro=6BllLiS<`r=N4J{_j_+SKzCnTt4YbgHSZfVnWiAZupQ->MkC#0`C!*4sQr8WahSMiSihEFwuiE zU7tiI65K{X_KIO+`a~>fJ>5}hTo+Jt>ubKYB)u$>;OG;Ew}R_@3BgfOBpAjbdOiTf zJ?AIsi&M&(uP9!98fZUzdIkXmu%=5EN{^Rz1e}3zz#rMyBA1_U2p|DAm%wib6@Of< zrh!O&+rNz}*yGH!tf$k*dQEPA~$IZ~<~n#a1$)7`N0EjZwbj6hUNS zew=?TMU3Z?V#`kqOFE;uqPWlNsK&vI5qwmz()n|j-txMi99Gw>c;4rBNq@#qxaPUO_CN9J4gix?HBca{k4ahlkDP-iulM{9`HLyJAd#Ur-zZE zbCU58%-8Tl7muH>7l?p`#&)&NCyPkL*a;1s5fef1l=9BT>zt=cIUn{&QZ&v?gmP=2 z6rzw({?hXegZ)MDF2M)QH}KaRPdD}T^@Myf>_b*kU0=HG0dI8eCrel~yoUvStx99} zfrr;QN)w})7#Aqb5#(`3!+%T$Q-9C$d4Nk3d1~=2kIe;+26ljIZq8f9B1_^FNnpoi zz8XytKYOyWVE;6yZCXW9hCwYxk3jWVyR4)jbHykEwu~+6qNV1`;G|x(`2w#p>~=C9UuC?+8Gi1}TV9lsNO%umF@2pr2z{OL zo!7l#cWG4lMBV-y6=ZX#ICG71@*I0TUMAu&Ku|?lE z)Y9BPJfE^W!#;U1!+**kL_m=Q_5gdA!Bh4#ISW1^dVOf<`=SP!@&QKb`HY43W8m zE^b$vTNQM)O+*TcVMk*ypy2A*J5l&UYOR3gFMAyH69@Dj{(q`BSs|V1y&v)s&5zJO zVgd~Z-mXI7%L#G4*PK4IFBUWx2>lEB^}6N0XH+$p@pW?1oC%a{r2l_fs{a3RX7>Ms zC`Th=_Sb9c7`CnT^~U}6iu?7#w@nWFlGC1egl~RsT}K>m;?kxtFH}R!Sietm!NY9On)>z8!xIx zD7bW-iO+?q!?%a{g@mGnj3Eo9?>qRt=1&FGBmdhjE#+anwZsQQw^)xsWC3j>+~>zP zeZKpK)&X;FgdBhpdNN;o7R$1cF;Uu<`wtB-#*AHf+2PMAk zPy2Wrv+EYp`{zZAJ;IJ6Y$4{!a^FChr*|(jm>dwdiK9}lr2p|yYlgPsb zB==||C^VEjULU@f8g&RCe^Y2o{0Xqt{1;xtCkITVd0cw0;rv~Hy7Qe~Y7IP4`ppmh z@F}q|Ke&>M);S}JKs@`~$NGfLw*v*XTL|Y&Y&VXlj3X(6*Ou<=!kSSba9PxdJlZxc zM2Sz6)fT8YF&^8AMY4w=*-l>*m(nTo)CtS_^nvv^0p4-VBIxldf9N7-F=4^n1I=sx zeU1m-{&(J|<7+~DZOed5`}m}BtL)F}PW#%O|1u4l$At^^p0DR`+vNGOQ28=6&WY8U zOQQ4RZy(5P$5ztr;%kV61#ff&M-##R=>#HOI6*b{qgYUR)9-Y1;tJ2y{9UZjTd&`- zw#d)tw$?GY5nEfLf3Nx7W9o^+KlWsL%in(0lzZM?=M{!8`7I6)`JUBe?GVTNz1~GId*Gp+g`h z*9(?(Ri~-vTCV%jam#@Vv!(Q)stP#SV1wwgacxXch4!TEop82u3^XnA`X{@W&1V;X{8l7c7DMK zu|C&Df+rRT2gJFGOE!0#xpe{`5_!R-y^e9{c7e*UFLhoPV~~u|Jf=tmCht-s%xlaP z4PT5Ze|O2%MuL8!KyC+I_}gm<7erWfa5`*((7aR%nzy7PTcdBd!F+fQv~eJSv7I#1u?Fhc!cvf z<~cPNZY6Av$2P*-1(5;IMWSArcdif+xcL&rxINmr8GaJ^wu&khN zL_d`+DB5Xkp!lfsfo&cCop*-Q%`bgSn?4(!h>k1#x(aSMW;-5NB-A)&SVw--vwUM0 zf9QF=tQ@ph{7?Md+h;oSTRwpUB)ci+*j`#YWHJ5FyzUB*RzGYNS(#;e&AuQQU zB>ot1*J2biOUqZpSB3n3J)d5u`6YWL&aTG?s2q<($aQ>*p|2zN_yB@G^B+(qwnX~P zAM|*j&%;g3Za&4w|GWp5$C2OZ_y4g6e}xtBmX>kpi)BP-DPuXyh+@uqCUz?{w=l$O zOH~YHdd(cTBB}4AcC{V=i+I06*KPxsP@egggvRyMnIlV1GTeB$YASybPWP~VIKcZI zUq6Z9;b8@`wm=FW8tVbV)o8Y<0#H()NefGNb*eJN$$+WGM_klpdKxgQGfKghmoRz= z3hmf_+CS*D5$ag`hfezk zCVJgcbKpoMIGY0)+G2NQ!Wit!#3wKuA3P ztS|;<*@#joF9<}stz^oe_ahj_Dn+i=#X+X+4fXccWWg=}Nu-z5FRPk7lG(f}jX(8D z;-*(p!#cPFu3FQaZLGmh8bASQ-8V)H_AV%*(unc?e_;$4b4dgGInKM$R#ZN11cJwG z$j|q48y2S4Lpl2I>w58xf9>-K8XOPjwZxZbHn)jEcG}-KCdT^hA1RPNABoYxZw@i| zw~lrHPVuQ_2s$-hb&5d9@baY4n`BMzFFV>_2du^f%BS(O1kzu}z!M}oRzYb#^NtVt zkk$ARe+m;d0y=$e@gjcq3+4>^5cUa0_O0n3Y=YnQFXgwMpva7$8f=-hfsP*5ZrCE@F*M47$I81l;y%AZJax8jMGVrRw!mj(4~uni`8nBazUutZnm4sTy*CkF+^hJ^F7G8A;R8UhWW&W zw7r;KRwA?E-K1#sRC=39EsiW>-6gYuqJ_pK0UeFstC^?v9qQsw4_#S2bQ_(<;~LGR zmvnsyD1YRLm{}+$NQo$o^Ow~cMq>8Tx-lGyhy|n%Q9_%Ds8>Yvp;Q&drFin9X1wjL z))v#s4Pi?q>+Q|`8b893;;V!YAcnTLheZHH+Z#lEO)H>%-H9g_=1a{L_)+{@D?h#0 zWCje&OFy{hO1frc;u}{rgJh1@lZ!p9HL^d3mSez-i_hldmnA9bP*+vI^f$_| zV1X|U4{8UZ)E)Yd$ao~3Psjwg5|qw&7p>gic>LSG+%&l@Pb~B+PtoVN5svXDz?D+39u-C1cLg9S04ma2YS zsT4sATb9p{ZZO}`h@DVWO;=w}TK7l+J&k?EJ|W8>ltZ6KCUwd0{GKLq)XdHB*|`>{6%KjWnk7T{r6Mr2T&V^eYtbljAF@+?FM%O^2GQ?zg#Vt$f5! ztl)n`?*3bM*lrK3kq*Y$yFYDQ4(iiy_|ipcO`DpczOK(KyRQ!l2ULuqsLjk^pQuSA zba>6N6=ZG|jf_ehL1dMsY!B3OgX`LbfF&M{J&=6QQ1-W>+1xK350~&;$EkH}JQ?~x z?tO$=GVqM|x^17Q|2iLk=3%tgN%4sX_C#|P80RrTxgEj%j$@1>>chLq>KE$roMKMoqDW$$pwefL44^UGSMBMi-HCk!_bl-NPV;mCgU z1tz0t^)T--`+1olQ1(g!9zUJNOQ3(@?Rq}-^;n?()Ae+>`1cDB-1-VSez)DpLNa}Q z_0RRakC3Dk1u%b&&avCfU%dbbmX`;e+n}KD%`4mp@S+t5jz<`;o?HQDY*iOyk(*~X zpGOY3oL;&F3t`>z$XC6lq4K6c`hJn@J0CB3N2ByVasF$o`@<$?Zkr8U|L=b~&ONq= zXt!3>ej#Fy#q&FA>ZK(v2p!TVLTWRkk`e3rPGz+soqVvG#KJK7c#$Rx1hcp>h8R*E zVz0a9U8TO6H!WYQ_kFT8%Z4(BC||6C{cORtmPuM_>fPMdp;^?!eFeYC9=jrK|I zk#n1Fna)iDQTdr|>ia>^WIJxizt*I;G4v_rhgZ&HD8a<$k@eSJhYYpwwkqcC?r8M) zd~k(DqaOFl@%Vxr=}_HQ;q|zeZm{}CU&UWq%WP`3xBj_1z8va;hAXK{3hA5*!{1P& z;z#zdCE{@h+nH^!vlD-$E3C}yHCIclYNWrNJ`d|dvYsb6OrQ3XY8;3QNuMYytD=gAWk3iN-T_G|sTk1ZNKcy@bn z<{~o!+>li1IIz?Ak?Z?#BwYJ`^kBZG`aas&{=vHG?7LoHX5C+$;)$2pptoB`JFfsQ5L`Wtj0<^d_ra78^x zGLLMUYk#hLzqPy~L8}9r3kc(r6Hoz)QOhye#>#%%G;?v_c)8d)Ltb>$dC^JdMKsmd z*$_4K7Yyg^*01-S17)-CdJKHm1SY-T8fs?Wbts)X={kSZz;*xXp=#3g!%=*aXiFTL zaR}1-t>e0{$i;Tg zmw-{OWW4YSHljjvfLyF|&9yK}@_FG0bqoQ+wpDwUD>9-C42`R(Yd?O1 zjfvlsuIqokbhiBFCz63DPB*`>=yxxz&UbIM{h>zQrPI26Tok&#B+blK66rb`Z&Tmi zTv8O*l!dT&rF$hG^HKQ_yCB5QS8>HKJD@h6VC5(DymWb!ZPvYi-Hbx%E^S;zRBX0gv53-mj8EardsZhxNju-7_Fr0S&$^U9aC)+zR9 z+j`8{Enkd|h$GxxMPzYb_kW>w?)PVXVNXy)IfC?q0pkL=W#_2R(Lub;u0yV9JI#E- z+seRZ2pG(shqJ%N&rk_;&!5OQPk-kDb$`(C@21)8>xX}8PV_x$yX!ebREbFtC}{^d zqsxCrbW}-eA%#mj9xs&$11Whr10P2);mSWjAXC^WjRR`T;%nB%y=}T1e$9of``B^X zcJLPCp4;ttt??Y%7<~Z>^>thF*LDBkD}CKd6f&C?tnas3NOmH!&&`Y5z5LDpPJ1r= zuh*Hb8)noquG#)N)sH|fj;8Pn)sXuICpa~}tC z^AgBcpMXOW%fYaejT5P=?d7y?^5(G*&9X|ic}V)jnq2( zg$Nl3K{+c}g|wfo#FUE6L8!p1BIO{pE`{+RGy^2lU{S@xYeA;_=rKY(M2cEmT#Bs; z_9|on2pZ6$F45Nj>`(oD;4uObqg-K2WWV~$@0@%({kAWChQe&nGvx47 zP6Mz}6zx9&+o zT6+1gU#v825MVoDj6ukds=n_}0@wVuwFTVQ6@&Y_{pgsq?mK_oSU)~P`$uQ6MZG?| z7XoI)aJ69t=Tr#~6dR$INxMkTR?A9)IK`6VmCRlt$+S)&v@z#Ya2J$oqh`MO^LgF# z+1szN2@S=qKqc50J$LE64b1!8rHLO6I*5LrQY5}{y^fc+{(JrN0b33lpzCVNqd3_|-x&rYe0}wAI@}>RL?pia-!Dg)x#w zw#+YQN7ZAh(k(_}L4<5kku{+B;wVk6t8k!AF_OYMh}?haXF9cF6gBnIZnBq!=5~XW z@=|THM8{!@v4|@Ki60C3j^uphHkSp0*d`?KG0+7DV}XhUKvvq0FxY@CrTdC9-BP&j z`8H*`x4B>6y|5a;TX{BUR{Nf?^f+$kk`?A0gDH36GhP()%4rQ;#Nu-i@<$68jtcg< zC|I=sb{&7_RRFAnH-ZhO;)C2yRyKnls-N3^PIJwNG z=jO241{a67@xmch;c`e_NCo6_O~f_C%D{P-%}9R$drzgBRz6alzz7;zP3`rZaVt}%(F zF>HU3eaCaAvPs_$c+-W=^2%ALx(0ELk44=r<(xu3dch5wtccd}3Ife5MEZ3>I|%11 z+;F1z%hLt;F~uJeEwoM_FA_h}HI+a6@MnI`X1$&^(78^m`v-=8)j?Bq?{T};S^7M- zw0^X{eZPezQasFU;1fNAt?x|=v<9hvj68oaF}F%FpYYQ$+yb1`y?0`+H9u~ndn7ON1D9hV-tTC zYsh0g6V<-7*`~*p-}85}pFW2^4<9sazjT$qW`tyeU(_*l9>0PN+E*u-Xbus{Iem2k z<5h4@0*2uf7_W}NkTaivKY4Y677Ioktu+$cHk$=A&@=61Mv!ogZ075lg68)KhQ!c+ z7YRu$kT#OmB%1{d61qOm^?3n(UY>s>Tyz~Y)P3N_7A>)Pv6Suu?{zW7FOK3~C+vDG zt@}Tl>>nCDSI3KfHb9Rv?eE^_Ha*f#99;#!W?Z~{a7i&Empvo!`F?tpfzmSxsZW~v zldHzGWZ+SZPN##*s9?m-FDS%UOdX*tVT2BN*-5fjJsV+sNs--N=`mtUjNMNa1X&}h z?fNANMA9e(0pEDJNh8R`v8kj?%)~b48zk7fCq09TCUKJ{Opb7P-U~!mPx*RXm*c#~ zpp5lf=(8cOnvF+_8Srt@I#B!4!m^jhjtDXqVJnteh3R4B)@b8qJ~B2es8WWgvuKwP zj|dq8ttyu*j|d%q@vDc86T}OW_gXgRC7fRaxgVLUr%LCrT8lN*o%tK+@hN~@ur7c< zUN;d95QAG^An7@;*{>O2Wj`~p{D)>h+t=w2?Ad94GCtn$)-OFsTxgEF_fL|>-}TNx zKXmN#i;BRmq8OFWuj#S!kRCh{UzcGZb=^AAz1RN6twRuh-KH?#ik1;>yk9LrEh|<> z3h~MfsdOcaUsSoYn{woZmr~DuLSSONO;eCok#-cL@Zq}c`Txl(iervMN^k)Ch@{k0 z7rIeu$qU+{g4LwF5UWY|Zu`30iwj*ARX+Zt(hbrLgNnI%r8xp`rPA9u9!cmFP6R9f zNzF@p*Ou;osL*vZu5}ncHm27`##^|4S6ac|Pf(K0mZRay7f$yTFK&w=su+$wCs zQtG)AQh6<3Bj~TQwYqFJ>~;3%ECH1{x7dC6vBsZ&J;K=(1{|vQ_~b+CK**X!%S*3- z9It~c3PsDqn6s9DFl=3Yy0^J{*x1tako40eO*6N92BJjg3dbVd<7u}5t%+Gwx~nL zxE`pA(ewJky8M!&)8V?r&!eTvoaJnh}37MG2?#pFzzIxeZ7Sb{Z~VO{@Zin=zA8*A2Uz7Z=W0LJ`_HQK1|(vX;IQl zmIrZi8skiXd$_AsAT!I-vvhYg8($7UaN2DmKF&tihH!#dy z!;YT2c9{P$ACGE$od7yXmI;IRpZ^5T3^Ik4KEhfT3Io6WGxJ%`+R5%`59N(I+w$y2 zh{qt}c9Ew`EEjl@*RND&2M_7KgLt$IQfttwq89cUisb?W%zw?-c4-OT?1o{mrj%j798{c z6FIG(8w1#bfj_v-1HI?-x0wS><8aN@ZGM-4ln5JtUfkj@KB_VBRrkRlmNKORP!)kH zb(|*qOTrfH=jcf+Jb-@g1G@(C3d!62nBuRwKq}k(^R>4}3)2d6sxi|8jY9F~ zbAiUHUc<()JYX9yuLE9OJWFf1^|t`F^$U&IPak#7@jDE2G5@uXy6=-2VqM=CG4#p| zvkLxy>W^Oumg$)TAn1bS^NAg^N#5s}zx(y6Lm=3)X6~TXEWXW=nriG@==mOc{(QeK zm-Lq%b+^9GHEOAJ+jQnjnE81>OIG7s&D9|##fz=FIpQL?R zd$P#Z_7Czvj3zOMt0pJ}w3rmn48{U6n#3f3h5q{QT7bqp-M!!5`#b2X^C*czO>4LZ z6wMEp5sf~FtG!d?$OY8CB(uRMY55s8m`O%p--}PHnDcY?)x_j}%i}y-$m2}0e;(ku zb}}Eb$Y+1(fBnww+Sqw_(ZK6;yDyrSSX&fL5BJ<)O9rw|jkhXlUw^?{T}`3_^z#0H zvDY718n&ZhMJlxzNY`16SMnrRo7!^w`%$lsN4;9fFisDI9X`*Kn9_NjJjY|#PG5Iv zZq$5tAlzF@O-!z-X3%!GM$Y73!<#R*&Eh9thSre&!)!?*HU@G~?i4 zuM0ve%G0q=21haiUog#=LT4^fGY#o~(c_0`-Ds&)wGY*s@0Awzr5NnSHq!~lC_bIt zZK30Doc$v`jStwbVu*|f@TTzYBfhExc0k<}#9l6lk!^l-R=VklHyo7~Hy|Nz`#MFl zzha#wZE7~ke|67o54x%6mN2~XC}Q6x=gfrzIDi;%mH1*dx)VfhoV!C064{$BeLwWx z6!y_MqsGsvwokM^;?HD;y@|CGg0lA$d|y@fU=l(2fmzf)If83 z8hI!bHcga(ET4++hZkWU98f8rsw7F^_=WxLnRS24OimrLdvf$!JS{+f5>qWeEzhwo zm%C;>ACgy9OmmYFVJfGwIX5@sN-GGkGsnKLD1#4>)@XC{qcPnLwd|Fs$5F{rDe-@L6Pt-VIhqWKXsb81}r~j2x zM)XmZ{M|vk$2oh#Jl-jP4+Yj=R+@XWXmIjk%Y*r$_jj+|^|fSMY58Y4r3=#w;A2G9 z=yW^2ADT40BKOaAbxwXTN9B$FP~GBfI^Or6+9Ja?hq-uh{Am2$XrN(#7O^{2htbzZ zm`n@vfxPRGn1e^*$X;~*NfoC)k)hCv6lQdJZWPSHcpxdL>XQtA51eZ4QJsgN($S>U z??l;7_bk@dIm13HrFs-0UzhRBc5Q9A&b@lvX&CG!Az_e3rE)@TDCi_&8Z|QWD7fcO zs5KMBT^_+Omh?T$o9CEEoUOdA<6&NqoSS{Odq!Oi{#M8Tg8#nNKSGVJyFY5-4d?b2 zzu4BI;Afw{)VBtInn)%r+AQXKTcwQg@NGhCnu_Hh7+4-C-wR58w4=OcSZl}o zi$_#yaeT)6QOVAMT=w6jB(>QQ#XY#Ik6@fBARa4(Gc^foFi0ogU8wtAJS4)_$JDOJ zr!cQ+x0?j!L zFsm@nKQ_KS7k=D1LCqpM`>xKufy;G#qyF|fzS&%_dw`^ZC)61tR(r0u{s2iAkN@C9 zf8rKfs61)L>N_zmNn$5*V;hXi)M%o@47J-_UgQUi zx&Hg}B|PKjZx!z2&%ko*yS+Kuq78rPs-=IOquqYi;3*cZCVKLB&#S}OWwvPNHJQY6 zD?+)JJC!n1%^2Zp51QKJN|f0*sN+f2N#sOsBGlM_WyXX~WEy>1yU1QWW52jDxUBNu z*E@x{c}0zxibNl%F@(kb_WyXlM5Q&bi>#u4}CH+h@Mb z{*AxVt5+tcKv&BcjuUDuPmzo^_3pboH6)3N^!B zKlu-TzDlao6+T(yTTgl&F5_Ob^_45NlD@;RwMa6H~xzo|QT#{M(0(VT&L=`f8|-1eWF zdGP{mf9g{AzwIS7T!LTP+?`fRo7eo(sUHNFHGPt=&75n`1gq(dZh!qh zogMuXY-D+GIEAwZ8R}m6BEq@gC)aC#&ZF%_=b>}VsFV4w-CpCYWhT{%0l z=Yui)n_A88zk7W-aqAJMvASpjYf+EM1N|Ef;}^Ymv~mNY0~s2ZonYY%l8vW-HM!~p z7k)PRtAEAY-|8LXeNj##BhQH6yy{)m*FB7O_10#7@+O{mHttz5GFFso&8J4O0@Q4x z74^@upxQ+J1K2#&42<;QnLAP+34rr7(vtL_#&posO@Li{l9#yTSADnjKWgXiODc%; zg*qs=Rz9brooMuRacOr!F9{)^svQ# zkmw(W_S`3zB+iWVJjB2&Yc9xWMpUkU2-5j#8HZ8g z(dLNW2YP|q9vz{x2lPw^dS+ZqM*!wb3~+&1w?*1noVMV z?U(B^t&@6jPS#Fs%}}2TtlDG5IwPk*l)eWcuRN^+4%Tr>{1tpy1eE@=&#)v)5n>V# zaekmxVyh%4z~4DBhLW}GYcpfY+=WIFQuCNVP)15WPtUH)!Reh?M(A0lLT8OO3WKq4 z8yQXutr+?S6^GA%S(#@{SAua~F|^`paEz!)E*-n5az~Hx`JM$yd>ZmOuk&hx@%4AV z*Y9<42DE%9^Dv+_;-j&@Bt)1!B$_*@#RCTQx}}^|1iX`fBDIH(UBR%bI#_%JP9 zA&8Dec-+!EJWnSmp6}Tdmf?=jv_6DEqy$qHwTn*y3iD5_QJIL;uye^##|+Omz9Ntd zDDo96eCZ{D=dV&>TjOd{e(KVE*2`kt#39ja`~{yGIV+-&-wiZr`SQPOqd$Icf9^$P zZr${@@2+}(2$y&~REwLs%#Cjh#wECxJI$~yZ~s2nz8fNbRPQ31C=T~>{c63s`qBuk1sCqH$jqPFCk2{KPMHLahiW$FQ0uM^hi)+*`QapZxWx`)o^l7 zzU>IMw)ngY%6ry7@_Pr4PNIB5V_Dcqj;GRlzYlB*o&(Y}#ogfC#ayJBpUrdWut=@W z2W>S#TUukxKACEBrq2ThIz{=%Qui|Vq^kg zf5w=~g=;jYsncz0b}b;kX(y6RQ`=pA15<}UN- zvIY^$m$n{X4eqa9+qdT${qyX*)yK?R{J_;0+6B(5#P`(!_Y)R_#o(Z)QXOY19P|gi%wb)Z#^8UM2}Ef;cSm2?F2%9{V#RYpge-=K^oB_U#$5f6aS@<#(Ni z(Utq$&67RUyw$F)c{_j9Q6iQvajX>QI~c`?u51$S9+HrwMu*4Bbve@q!ibzx;1gx@ zJ!Hmbb<*Gf*L+MV_#C~a5ImeXGDbYRP-CA(fy-tOW&a)ZX2c7mBR`6;mxj;V!RP&i z&l~Tf!LLNgZO`QN>poy47_Kok?vrgSP`J^O2yL%nIf`YR&j}o<3zv$e2tNY06PL@S z2s3}0kw+onJQWYN@)gxN7mI=7T8a4boB^M&vAL0o%pQvVyKJ5{gK|A;yLNSXapJ=+rtUN_J3dd1zPrh#9*DZ>?c6(Ye9tC7da*xz zG0n$N`|Wctm3mhqc`y887J^NSEFzEzK1_d8-g2ZGy>!^618x#Wgzb8<>;2Pw{!i;l z;{0pI#WlzcvZ^SBnBiGayU|OL*c7Bqke`5FjbZwbAm&t!;>60J?#6@f-Pr&_)|pR{M1HoYWRO$Z=~ioan~icywO^`=vZEAadjTHISmzW z$9qQ}ZO{{{G`AOTAy=r)dYrmfVpMjT`SEmRbf4d8|4OW)xzZSF*9nfv0wiL{iE1wu6HST@e&!m!NJmWzHFC@EAXFe_h)XzQ*&N9#)I@ zxy0$sx`AgK>zNRj|+h zc?{>FVvtK_M$?6pWzWUCT_skLhU#=8?W>iVkL8E5m{)cguf1bZnbYpI7V(I^M2CKY8ylV3tEbfXBrm)D# zzVM5n&Nwx)e**Kb_V`}*{Sc!c22mKRY4ToV^Gfl+Np$ZHcMeZzVCPaSNB8cY$6KDiB6LB zKu_7O7z#XVZ_Ykgj>x{O?t-!^VN>y#jWS+VkMPfDf6(uo{%eN^#n|h|TNG=amm0&~8cx8@|JV3xX_BwF2A}9b z6r6ZDi8Hb){UhlAX-sRER9e*XW2*h!Ha?x04wn-cJ_BhZzZQUZesisc-ApH!p82f@ zyF--Pe~WO@6Pr;C+9FNfXEL_64ozGv6y`R0>9O$yR!3qh6N#pmt>UJ(gEctgPIiO~ z(3zy=wOLy}{%p@$-dH$&^Ad*&5%5 zCO+jJ80Qnh%I8JyfYNsn%U2XRTE!%Zv}BF0e}u|8R|#$B35OxH>s`V%-oDgL8!m5$ zMR)<6L(RU%cW%Y!usV=;bLe{%zmH*DH{VBA29FX&nDTmJHcG|!F=D`Fz^kFH(Z?{0 zs>-4SB*PdF{UD&xBOuW4jrmORh7XPTvy3`#zCul*tEbEM{OraKWBqIp2=q2Mk_Xh` zf5jY8O=BeVF`cAD<&2!IK(m%cOpvaA9VM^iUyZH6Jmno$$I0wuN@9E;YAKl3 zGID*^)?O^yFXEmEGj?D67xN(Z|HX4Jf0H)XR`LaLUV=;$`*A)U8l`~POGZGUR`m$}yDs^klFD0ISUF@B@i zqi?+JpL@H_!IRB>TSzIR(HQaOR!?fA649_KUjg5Y#hP!@jLaHm9%ffZk*rt$6 zs#I2{7~jH6|2(D##HTkNMoH<`eBbJpU`-F}rd>Uz;7b1Bnl3o8TW{m;ENx<5{2OKK z$KuSmIZoUYXHbtS+Gw}rs`0;6e?E7&qXX5bEg+dwC&8N~Z3dQz#;`%J7NV`mgqd$p zQkpQQFUB0FShu5<-!ERVM=;?A6FsiM_*$YKHjF41%9*KT_K{54vCf%M+y&H>kco_* z8!jFtei!%lBf>^hws^VQ_0jdKyY+EX&+l@cwl{vej`4S6{5SgCH{91ff377(auO-w z(z4p1KPxdRY{odATdO)jFK6Z)>NmesE(J>noe5g<4?24o_CNB4FybBN$Eg=1)*pGV z)nK9?d%N$4IljkXOz3GmhYXknRT#_<)^S9U^t0p`)HC4z}=K-k0)^0SS zIAO(MgYyWVhx0w|!8>5Te{qI;>`b?OC?3N3I_BM2cW2I&Q27O?{HGUnt=HtIpL#*l z-wN^n-QGuz=&!A@zlZTeBAa?kWKI)pf~MwK_u`~TCZO;v&L*TellerXlh|o`H(9{w z;m%i_Qj!N0_XK^|OXV}CR1iv@Gj z^ocG$ppLxVbMZ|2(<88nT`uzp^=SCwI_FB+)#*keh*eV$!+flnU#hE_pZHy1#1T#s z{a#c6M=~_WIS&&_f575WSCZ}CzndDf@=4}qZ^Tq3zDGjzq}}$U(kZVGqPyOiESI{$ zxSC1TKAAnp&;F-Xy1L?S zi8$2kWVgPdFa7U?9>D7ZuP)F1>=#|H^e3Nr{awGvPd7NAAD_KTeA)T|gxws!!KO6x z|JCi{Esp17r=i4^$O!lx?*r045WpTp=83snu?EvIs%FGTe^#47Of!{7(EF>NEV|0uwVObIeRoER4Fg8 zj+r2~ltEPvseT_u@cUXQ!4;w0fYySC8AlD3qrN-4bj(Sq3)&w%(E2zex;hGTqy_RS z!Re2Y!TFIP+pKE$&8yedLdMs+phNUkTZ4h=^uy|We+VH2=>Nsa$=EA|_#>Ak! z-cThHu}#FCMv(Wyit>HLEBXpn2u3Yad%cN^DivVd{sIIxCIR&kz9orvujQff?S*l_ zH>Q>HY{bJv&AgB}Rt?Vd;?49+OZrd$ryu<4FPti%=F9c`!=8xje9Q5E#a9rU1t`>H z2^_`7e{T#D$zXhP>Rc1zRp96Op6AXWh&y~Vf$a!!gxhu;M?5(3e}Vkb+(uqbNOWG@XoesbC~Wowj5n71 zbb$Q#m+c;FbbK@V7PSsfD)ql(> z2d{^`k4QDRmmj_#V$eyabJg{WFvr9AglTob;|zcB+*b{r7Td1#iw1Sa&CQ0~_!o}u ze=yjpuJ^14xW-{Q@&z0y=5yqny-lrSJuUSH4EDpZN02iL2Zz>bLbY`G>9`T$c$Fov z_q1xfrM)`;m@vcwDgkBwl}gCnNDZhy@Rke0iF+E+z8(jp7(2`)9JKm=bzbG$$zPy; zE;SYxka>h~pEOD;s8(7bgKze^$Uk(i1G(2Zy&!#@6T3aOgiD7LyY^r-gUm z)GRQs1?8T$s7}K+{Rfz$zX9UL&@(o6w|OBjk8HF7^KDbVEAyeF{k>nv;Z(^9oR1UO z)$4DCntOOI&z}~qKlAM+1Lc%pCTnUkn?96>{`(PgQchG((v`OCL3#fIK6%xxe=kS2 z>Al0-2>s462yJj`{UZjit)R(<0#X4 zw;u^;v!1>3j5b^o-w`4Dv8h>MfAhDNr^x#6dh!C*@wOiIr*EEpU2EqHM!wxwZeBnA z_<}V$Dd>r8k$HV5h9XV%2>X=jBJXf>~dk&BBi!;KyhIXoyK$V>I6F87T( zM);k(Jkn&OZ@e#PQij%pe}pvqL8lXT&H!@XMUNU#_S=a_muvi45)Ix;U@>TOzv~y~ zix+ar)O6SKF8Y0oB^ycgvEP6|A(6%PO~3gG;1)gY5Morf6uo&Tu6EC@ifm{ zw%VYVK2IAfAx^D%7LdvmBI}W<$`PS8hj_ekhT0Z|Nr|o^FRoH3Mz4cGTZc$W=wCFb%;IsWVYV3nm} zp251Juz#x-dziuY__$i~JKld>y^7`hRb*{Fj~LR|&NH+Vp=^ug*evph!_JF&c~*ocC8;Ix1O3;{;XdlX;*8(eec$5 z#k%w<8_n~Jf1c8Prd3}hPc^b^gI40Y- z$lDVCxt2Dm#X1D#EY1Sx)@9SPbVZR*7F7}k#gUlNdTZ;0JykyCVzeM?DN;3JaLV_f zL*EHUGA<#?s%oW*0iCNOd5LnF#K-;|#uN(jB1~e3e^G)wJ<{)MJ_Cm%FL$O%-0Xk1 zo{zurRvrE{zs1XRbEo0q{XEP1%7@2ivipTsy?6>Xf9rYep5FH>0#15oZJnxsDC7im z59W8v$SV(u4A!5rzMsshDuihC4nr~x3YbjFb*NNM0yS_#JyyPDDY~&6`F}H?`SAxpZ z_6#0TV*G^+8-FbN2K}dF0hsUN`9!CXQKBkre~oZRR<7C%k>eB4LIuR+0s6fVl}M^5 z9vJL#V^0S4q=ed6AHAXBE;X|4Pf4dwH0{CHsIia`Cc~PWYICKR9xV?pXq^c4QK0_u zopGKCNtU>Ns?Au!s?j<1fs6piPi?N#Nj%SVa;ep{d-d3w2;xsbaVZMvz2REL+9Y61 ze4LsR4vsu#Upysm<%QlPcMzBiN>Wm>wRbGM%xrx9IO zPH%N|Uz5Mp(Ru4@UFNS!@zB;4q33xJYBixnjd4KCQ!~l`x{9qX!i$CHR-|b*x+JBDG>`dXtAs}{e}k9 zrq+Pk+NWkO`hvD++{CbCC=X37G<+L|JdZXWWS|tGrFK!z*r zn3iO5vhuXi+IQ((`Y5CB@ zu*(wQl-(JJS&uFuN&7vk)bVLDei|CaRhSWF#q6NdShjDmOB$-Y1}$TaRBRf4OF~l&o;Q=pf;g zZ=CY*Fi|{;h<{#qeI4j*kQCqrTbPLm4wLxs2B9nYImo|zgeeLmI437oe2R_vgQ$I=^q)V|YDu~S_IS(PaojR$f&esaq3r`lm%q4HK~cJ@oz zpM|3@uX0mzXFc=5Nl+zo!ICpCKC39yOQt^8TVj$R_9Oc)b+7CCVFZ2cIzW%jdiss+ z$**T?cLrTzm7DrU<8|?>f5;O8>$al=u8S_xF?QptjM&d5+YQ!q-wl^EqB@6 zopMOd^R3ZNF@B2-;_?kAWSP4sQ>k;Ec_#6LC=W%GCp5f`CpcT=Nkb0=|9PIRCLU__ zH2prxE`EO<&LO<@OddH+4WL$2!qm_|A6T$GQ2*5SSY?{IOy$#Mf0CoF#eD?H2O*=0 zZLB5M`xr|V!x|N6e-@2*6Kfff(}7EK*s#K+wy_2YVnTHy9fCahd^3qg!zt$)=J{NF zTc%eWBb(flIdI!UExd?>XJP8!OuN`jief2QW{({xqeWC{m zAxyb8t^av1VKe3Se{h}!3i}ywiZDO8ZoOzNK6L)Xve-m)a!`MNLd4_At+}7xv_nd=BRF?~E+TFn$iQ{(gVu`~24V_J8msKOU2JaLN{|fm+$|`ZqRbyIju_e@o23_D;uv713rO>!W}uf3eMI-Tzj`36BG1 zGB1b@Qx^Ss{bAT%pKUlCBU`n7Ho$!I`V)GQ4_WG@~ zY^v3>M|Xq#q&*t)DJ@>8F7#FwCDlbvMd)W#9beM0l9cRHM;~3LOw`mnw5HEQ2@-}! z{yp?~tzBB*?;Fget`=!A@@S}I{)&xil{^#K>TKZtuDq}jGlRB%@oT*_xQ*MIh;ksl z@u#`jFq_ihw2U%fy=SrhnGfGDd(koT}>s%jw_pDRIB(-_A91w?c*T(snCnnXU zPjij)L*+EsUX$RQB;DD2o@)PYs^wX>I0xp0JfO3O$NtBG(NOxZ_Yw2tb5&7ww;ONf zxt59t5RV1v4_3lh*sD*RD0%m{>d{fc5k)p+Wb(WR-(>`~D)u_Xa(X9?Tr=asmnpvpGk=E# z52#u#UT2R1(Upr@b=YfTp$ysV@zno15F7bvO6<9^T=G^f`bW#As&e5~axlv0HTV19 zUhhHk6@&*76*_`Jkh~+04lI{wf%`zzegs7`2VjxJT-Pwy&m)`SNPw0ONgt#&$dxiJxeS?^O<%{bG| z!yOnuWXedg{K{Y7GMk3J&$VLxkIbc)Kfnkbf4Wh2%j@Yudp6L%vSLy!7xBWxY&%@J zkVT9f1&s#V{ujSng@|FPq!@iqj^*)%n1n`%8!hTX7@G~&s-g!gr}`MnMBlARuIhSIvS8zPpQofBYXVxUDW8SZl7VXQJ|#$)h*f5*54S zfArM<&NC7oeXg^4Y7NG`hKDpBEQFTYCbF281Bk;T1!XBmb7pVfA7tozKldObC%PA_ zHh<9lha+>8>jAYe-o^dv0k+xcpL!%0T%=#O`-F@-Z1O7|7mBCyWJdPGi1T7~`UIWw zYWX3wW%I*sNuQ%rqi6iJoI8UFmnC~Fe?8+h{SB;>qrfMqD68bF7mo8}mPdHY|(nlREf}-`j?d z@ONSUD!2D#R4e~J`h8zqaL4f;r~{jHn(2E1>BVy{iV zO59??yIy#=PY?Z@{QRar)#@+&^njY*UBmo*)K4G#;^=|8pC>~<`$MVxl!+1GIidZih`Rj<@Q75urr8K>u_h>CMW*t8o#SA=b=>YBi5Kn=sbPZfVfL3NHRg+F;!Q~{yK|@&Q$kV zsl6FKV3`y1zYX|f*lQ35Ua-F485msq^{xNKRo~*L&XVnTf9pNyK$PvVkK3G^y7QDN zauS?)=$}IPUP0=cukaoF4x#;76|i3J<_B9zl7aiQLtU0dGD8fB1Rku|-ADg(Oz2zi z1aqJC(Y;G~5AS{zKsbG?;P~pQuh~Zkbdt^@u{{t8U-1Sq;wXvN+f`+K_cMz~wSKJB zh}e458=fvce;`j|^bdFeSRkGoePUW)s6RAdZ}Z-%B3D)>}duCoPhh~j2C-@UC=p2?H%z0dcdT|FIM1WbDG zTNnL4-HX zy-#}|e-Z-UQ3AHBhH67Jp}<}cefc0TufrIa^VPr?sZHuZ!?2kO^4 z-2xr|-{W3(J}P*=;aKX9=U8@rXBK@rkmOA7f9*H)Wa6v4h?BuCQm3#qDm*pRTp&M zf8gE_=e%q1pXa2&I+cu*M)L>uBj42r(l7GNR?Y#`;gykUDek8+EwxwO`*T~h{$haj zF~ma}0JlB++0|RCsW-^GsrJt|zFK6ctmuMCVMhO1C&u`E_ZC)5VR#KOM_z}=8%}!{ z=e5s^Aa3SUL)`Uwu6|vz)$K(dbI6VTf8lD=;$g5hqqU$4H5(^)_U2A^f72C82VgSd zD}K8AE8b_=cYFqb-@b}9|EUX|c1W% zeEwI})~p)*rq}P~)JHvgqA)JObFE#UUEJOAP#e6IICZ{z=WGa}wwYPngHas%fB5rJ z^px)q<2?|^t-N@>EqK5RkP|QodotRmW_w20igpw_SViosEL!Rb;j@D8rdf99mjzP!Ph6H`rKCCP;NF(^wCn9OYMe>ECeV38Q6 zTdFIJo4?^v32Khw6a9Mi!6W)87K*duVsEDd=oYQJYBll6YnSb?r&k1uusGVQ6GZqx&PN%Esy`Z z*U@j`&)V(Z=;{4cH}9PPe?>PBa{sTdJy(Bytk_%Qw#&BAT3|F2#{o*<)rlcPVx7m%#Qe{nQtTz*E}E9O$y z8pCPU!yJIZh=Z{rQR^4B$GXk(^LYwOVWtlZ{WwjYa^0Q>d^2_+Zv5*)jNO~@GJg06 zo}0|Nv*+113jt|=Q%F8SY+PQElm5YIH~EF{^(i=?h+APe4qLh4aaVgf`LAy6_^v)L zo4vi}0=`DezCG@0e?mzJkEl-xVIRHd@xB|VFLXJIf5Eth+;Gp;M)1ve!Sx~o<$c+k z+T%Lw{3M+T*T>NjP385V;h@ih)Y2|#^G%8yqGcisf*FxRte$C znz3PAxt7b^p=|>5bmTDhIoIzS(CcTSKOLhvRb$2Pt2NQte{_)?I~+Yh{RC3N1xMXt zQbfu19~vobZRoUQQ2Q%(FY&B3l^)F)@fj)JNBz%1r1D+g)*`2n1w6EiL&#&~5zzW; z#G)5&BM$@PdNsX=V+PzbEXQG2bB_2G2Xm<-xBM3uaWRN@ir3FV`{b05IVG~;PVF_P z;+yXmbv8$hf8Mu`$r%CWjQe;@eSzw=N0_Osi4qN}@;I4foI zzQlf36bz2^^rXqbs0g&v%d~2EDbT-vCCZ;d4P^7O>Af1c`l{B>aaWPC;S9+FM8{8S z953)`f%-${7)}l6lg(qEJHQ|;gYG%4ex4J~?WCQr8uqvND!<5CKz&YX#eMYZ?#@Jo zJU^9@e{WMU5@vFyuIJ{|hNjlZ+uC=AJbYqr9!KZg5Xh|#mpT3<%+T7vsyP>sw|wyu z|1-aBahHfQe7)vFcA<@e3QaqcC48?J$-=%Pe?9#7<`tel_wQc0sKu7259R3XG0aSd zb9(Ay{ChU=i2I#e=7Yv3@y1*1s%stj9Y!dB{l(|HYNE)HC2aAV2$?A(eahnqy8K! z5kTUGXJ5iwQbR#Fjr_TLf=3vUDf32H>~*Y{}@SE-11?sjfb zsub!VN@A1?Z{KqPyAx3uFQCT6f8=Qk2SKvCF)@7VzYWGj%^h4f64%WD*9~c~ska!a zZVq8RpjjXO_P2)r|FOnWa?zH%!MQ==MxZ}UlDdW(5!%T8t*brMz6fo9KdJnM+*k5% z)|yc9aVxg>^XQx_!Sp*Ul7|e&emEzaP$%<@9mvE;W`m~BsS81zheU-Qe}yMHtChnw z>iFr1)T*!aSWY5ENk|T1mF}5V`wmz34T~mjT4G-S_raTIff}zkFVv0qYK(m-XjZS%?W~9-q=Ioxx_BHg&h{mI(;beKI z=>X)6>bTbjKAal-8*xu>9su=F(ck1t2k#W5Pr%;6N^$re^>#(}80;@sb3f+I`7`T} zH?SDdfhKR@Bu$*)X_wN_2r++R^D{oT+~IN4;~FN2BS9`SdaM{r5{=I%p$sg}3$Uq4 zD21wI74}FaMs+qA=}qQ=)6brwuQ(?t>!1`IOy6hg zTA@#D+}&fv!G3}B@qg0>`?Sb>&o8=;$qUIj(-s%QBH@i!Tz{hzwMT8l|KZtPbZ;C6 zs7rD9d)=EGoT5}o(e9BQ2BvN_fUdYjeE+X{K;53%aaZA4OCDx|z+p2{2h={9KmjDx zEibblJO4oshbu_0n8CXL@!b0~`a`ONAVnsa(rzq|_tSdR=%$GImlY_v!3hW|3xm_R z!Khqn>KRe7cp{4(fepR6O z)GqLub=Yck{0?gApidzw$hr)%-hn2^*>)AnK8MVu1%Ds=O-_R6cR!*{j6!B$kN$Rk z{ZD#2|7M;=e1|_bF;AmE)BfHc^Y^csOm9Ak)ZxuBN963Ho>)Mk8hqNvwAEXB@K>>{ zsL}xmGn=SGF)`cTXCI&8znPDG3ZDsCnbGG|q1K0_cAUJi?}vFamVY<*i!p+X2`Tu{ zYcM?j#eX=cs-6*!y4R}Y)tX_9V6;m}=`J=8`QK>${B>Pib@$qHqs2Y7`iXv>pZs=i z{|{QmyhKgH77w#V43=vAKNZg*9RL5EDNh=G*Hq&1I+L(E%*7k-wf}#*iWeWw4^6~H z*QBj)Tyd*g#4)rQ7*}uaKi}{AtzDeiX!cpO#eWiEEw+g}_cTo2^7{VWCfuP~eS3z# zd8aYbxnk8FeL7nGMjN9TMC6vxVQPygzp-(>&T zw`$ED+3hEb#}knAcOKvn;Ll8R`$7PzbN9xxw0JCE;Ikc6=N0^O-h}>h&jyaZ-+!U~ z-`k(>;Qtsy4J0_=I*-!Fp%|l8)_F^puw3WZOm-Gh`DCyRu2Eoh zImOdwu#=pxqtNmzSIFjVIkw!cU!Mi2c!9RMf=hVsY`+TW#aFfC+RDoJp7Vt2&aakx zgJ#BKwP)LoC^)14fWam_>cA}`LYS?aZfCe~I)|_*1X0DqBdjlCC`Xs#X@8;2hbMa~ zMQTjWa2X_aOxI#xBs_>xuJRd>?$u}h9#SzElT zg+7Hl=Ng-S$#JuCQ2Z&;@8N8;3rSX~Z7?;pRe5Gg7G1fOVVY-#=Nih4WoD^*Xtv|= z$c>e`*QdCeGxI#2*?m>6%K1$0czO-P3$ru*b<#1ByWFqGGj{QyZ+~e%ICwl|-6ptr z^4;oumg4o?B37>8&E5tE@9+*`TbbbTK4nL?u++EKE^N`MquL%63GkU^Gls-?x$Llo zY}LiXQG9vvs*~faN?f?afY$}8AUT6*5i!8g2(AE%1{UBk!1wU60CZLb zIJt}g?#x~nFoEg}Jb=nIDle!k|EIF`DT8nMQ~yJcd*m?x(tkG&Km@F`K=@N`0Go4q z4mz--`o;#>l)V7Zfyn~OV#+Ape1Xb|1w>160LZz*0v#9-+~xtBe&Hk%tL~t3i1M5e zG*A`=L-l2Jj+yd+vKS43HUG3g2uKge7AouP8_CB#gZ#t)8D19XIqL5n!4-?g0Gq=bc3?n*@IPhlorC5JmodNsR6j#-C6{6V zak%E7Mfwmd1p}+N}FMnTK&_((|{liX3zwm(I`66RL zK>FoIR8Bwbg#ZBa1*X=EP|bQZXRkNu+dXXL-eqG3Sfk$yk^ z6n^E0;27ffisaANUw{AjBl3Tm`)%LS9Kii$KYz8QedvGe<&E&aWz+hj5B}pnA9?)o z-+x{H;pex%;Fmw~@9@8s-{(K$0V=boZ0D%`@ZYqD^uvGs^%u@_d;aYYyL`qQB##go zjck3&AN@mT1Ud3wU4|Hn3wfNug7PIn{%`uZ9suv5`USvfU?Y9AU-;kK`@@e6L?j>h zUVi|C{P&8+Q|Sy5e{jKp1=1%Ff$Sptz!z{`G81i|xH-V!Rs+j2KjYU&9wG{F|73;Y zChUX*p_kn^RbtF2bNM%0)PMSe0@}yF=jzmc<|+OI$7piPj2jl)a;V}Ec;A%}LoWwdwa$b(!hrNt`TXZ6G?hTMQMbd*MJ z$MU|ZoZP2nRjQHv^2!&hiSn42r%hYsWvt{a*p;Mi-p-9v?&mh#qPKFWyF>XntJV26 znjpup^LG*#dC&v40%{Hhw0W-OV1LcI8!*m*$vFzeY;l^aDSa*)Z2j?j~F3f_HdVw(~+_c|wjr@hrusoGit{O1hkZ z60kBW=UxdnS(6L7MEB3iB`D=hZsam8udD*(S}$!!k>wgMJyt2?)+*ytIe*H%To$Ej z%1^%ROSP9rtK3*^mfw8EvU;f`dNVu0MM?GMv_-kn;@y#b5DK=uuG{NPvmAzDScM%l zYw&DTUNnV)nOiNv!9UwtVQhy#z7)#ObM>!o7ioQMa>GGmh?g5Q6OTDH0>2hkQS-a21 zk)np=K4r(9dQtbmIKHS?cYj`vE%mzHo9yUNPwrk_jt13t_u|4(RCl|l8B>T%zC3#y0Y+&>Fh1F9z!IeIUD>8T+c^O1`Xjm2m|5(-Te*z*F) zVBvV98Ub_y4FKna>gL4=R!+p{MQT3v?+z)_Jb)E?AD(gnh)NU$b`c~1szejXCjdO5 zXb|6#1_C6@z<(hZ=q$PIunT1UB4XJE;)WGyK$3BFVOfI}H~vE-WXT1V%U7mAA7s&4 zA*ryk24sW_Yd;iEJmfq>2tWUu3uvJbo`9g1d_IDLljS}y2LRO*zd$EgP|~76gqBDO zPT=QN;0TedLEsVP0-Q0C69@w5S9jA0(s2Y0r=C9F$bYzym!}cr;|S`?Ds>bEU0JQT zfL3WoAXFm|@)4vjEdh$gf=@rW+Nfq=fv{vhbVnZMK@cw>FmhY__LD8QjkRcWr8ldV z;wM)8$G$X8`)6O^*d>>FFCPIvAURtnz$*dB?Xa&x2iCF%@~_T7GI9{fHy2M31b&V5 zY6YSnjekIIWK!V&@W46$p61Bfhz|JB`IS+-fX%By&*o9zwFHN1G1SsAHlh@ns(uNL;ctx%Pu_owI38v zH9}ZM1AyVi(h$I^EKz1Y0o~N9h6vX&Sa~BCzJFXbUno~7+I-+R3m16Roh283TQ$!w zG>`a!*<*p=Jo$wS?6v}^aK2xnQ)}FavQrJfR*7~2yt#nVXGiPIDxSLXKyIu7K;dH# zdgBS@3M|Nl8sHUARm+l9Wl~&twlCxpfT8nO8QCw=>G-9=D#7=CkbtX&-%1}pQy!j(uKc!j>ySbrrt z6(JOp{kgcv2E4+SExQUJA4Yi`#7#l83QgmI%#PE@%HwG9IqILhZ7V1N>R+vCJoklr z<7inQxKeH_bUyG-(|GXG=5m=&b6EqXMViPbOIbs{ zTrQRfqmQmgP=#)ueDuS~f3rc}gtgC(fDcBFe~l?;0wZktsiL2`*;2P!a?xo$Ck~}m8tPKu)UY;kc&|+`IUvzzklyPy8s@^ zi(HItnT36sMlK&^ZprQK-_H{xfE?15Me4Mc0Om7>!myE9I=4>%Am0V0uax~$lJOLc z*CAR_{Kd`We~H)%hEa22?b5%PbUS z{QluBbn|KC=n>iK2-C_M0H-8?RlKjRMgYBlrSmJRBbl*(hshcclmyLLe{jQQ1YYAL zpLvO4(T4)jsO0U(wts^Ex}R&Mp3B^(nymgAXRq5o@@;Kk{?UCNY9J5A_+QHKFB^ma zjr&?1BRdyIm~nvs60d1OnOh@m;1^(9gO)(}&vkzv;iz=jJJPu z|MkD{NAFoNf~B&`A3I5aW^H4I#uTm8>A%_!$IOaSKK&bCLH@C^YT2T7 zxXp#4RgJ*s!r-%pR&pJ z7C5Tgan%9uA%A%}Xbs1WgY0~4ae-*g-pel>@xC1w{ZqF_{^HC0{kv{O>ulTe`0cnD zWj~F4|KEV4bqs(4G#6t-7yiSAE9mzOO>J{N>Lrw}nOP2OdZEc-1r+ zZMULl>+|e=o^4H&;m>>L^Zb49e(_WE?0xY+&;A$x^M4$C@juTuO+%q`kV9<;?6!Y? zV{GFz3iXIi(*s^dc(JCT;~PJC8^Qzdi?@s;`mXJL@Ujmc5I%Ufa@$eN#ux8AjyV7} zTEmF|J;U!aKaM&7^Ik^JKkxMqzJqijS|9xHv-iROK8GLt?{kXy0D%AaasTO;1$o3r z>>xe>uzx>zJ?anUt@>#vF(d`f&kUI zf9x6`MxICb%ol%NtP`nFm_ObM!FsXe|k`5AAYZ zETktHeDP0^MYuUy)4P9gS@uKw7gtAo{NffLp3i@iBP)DpZ$EO8(f9IQSRYF{Hl#fOIQ&z0d2l=?cRoP+=EzcLUm|SlBB%6lwro;eypHlFu>f5r(Ev#Yu?0I64=DraqI$6f8&nP#*r2Tf8Qr`7DT5i2{(sT#isb#b zc4^eEgYZQjBss`YTA*-&1qzmrEPprwsYdVu2%mBYgfnn01g}AY_U*(oNKi6$J_N}E zXVk8pBYtGWM}z3=Ihavh5lB4*~_}Z@>fCG}J27rp{#TLX-JsCjfM?N3@Wk`=A;unfvJwhDuvzkC8PBRQry>`~Y6S^DwwGtd@< zuOqxCvdav>9og;IAC|w{#~biy|NnD7w)`?Z~B*iD7gN!4~?XT#&O{@d#D{AUB4j^bVkz=gm-_mOcmy0`0u{N-wa1K?1+14_Gw(yl`Rv2LUD zlY9-F9BPOu*Fq@^QJfXDfgl5KK}>pRe7hlL0}IAL7!lJM5PxX2pN{sCA;?()$TVV( z1Bwo)oJGMSptjJ#SEMYq07iaoK$pR5;Ui%8P+U-)HV&nn ze|#c5ExrVf4u2KEH^2#@vhY1{YN+P?fCeAU;%DHTp^oAJTp8*Mj|10(M$S`c5HKw^ z1a1$_g+HTw0@s{-1NRJ3fF1B;Xe|N{R1ew~>i{oAcZSKptD%z%8VVZfP&|=GLw6A@ zpr+8X1PAy!XaMU2--5n{^7N%J073-(9tMk$0w0IBTz`0=!6vkX66K4FfY5*-M0bVt zAgE!S3j+uim{`ILf-_8ku!5Tm(?!6+&4XDkP~eusyCodKZG;aXT)>Tnj|Huec$nuR zhn%snL=l83EP<$iu!rR$>LA2nm5Wyp0VxZZr_MpBgN#VpdeJMLf|NhUF4(NS{(&o; zW&OZwgMWoyRwMyY{NU}TYYDzsG?~bYt|&}lYZss?o}htG=o`<7Bnn9QOG-9lLeiWk z7EeIHJ^n)z=b0}pdC@uf+hzq8efhvQodft^aPJpxhrvq1kIe??B>rqn@fM{ z@Qa-nO4KPuOxZ;Gr@qVYER{8J;BP5qZ{f|@S${!k(^eQV$=_fgjL{ADY#?lDd!Z81 zZ!?e;X{Wq1%3s*_`#x=(E|x4MyeV5kWpzSj^Pf6ld%-rx)!*(5`Aj1FjBNCM{ zyMF=WZ}2S+vOWXrmw(bO6VZNWB>i_9WkT8gANn@uDX9Id^|zE1MwayH_-5cyz~FHp zjI8KSsbn)L>Eo=JZDC+P5e$<47zIR=be5b_eici0AKw1-MP7exqT-34wl>`j^$^j% zDbJLgrttR5$EN?vQC>}JUDEclB$R^3!GC2AgGC4frc1;>HYIO=(U%FfUEdMeRAI0d zNoy&Ic8~tnl534;+8Ri=L?YSGR9uiz2IFI%tqU-?sbq>Cq2uJpC{K^EAx&X?x!P=l}Kp{y!dl zH~sIAIKBU0KTmDhZ2zIg?)~T%iqm(S~Q+M4@+lvCW4>G#cYFY{vhj}wmnmvKn{ zV;-9SC5xx{KM)XG`p1L(FMmlq-jDSEmf;ss{}b=Xi~3K#Cy=8r0zAL}#25Zo|6mJ1 zUM+HvwEO!D+))#@RH z7LiBdR=rfdZej&{A3}@2iDW^>4}0|$o3`0*EfV(|=wxZQ-%CA`$9Qa2Qc&9fLh%x9UXRXKZY}kUOXhsT4_Q=CUPb zG6Tg-_7AOxU2zTrSj8nBvjBBp9>!DGUN2sFR4PZ!IBl`*a)4rP5@7=w~K`X)#;WA%F2MeHLr1b8s45B(rzB zU%*_`&01mi{8sevyb)N^nW`TI5ql|}_t9p`v3oC1`rba-5qfFngxRexu7}%R**EoG zv(LyUx2?&hA$lBkve^^Nq%QkmSJH@hgCI|!0A8ou8}17k#MoVg4pkBSbjMF^w&`JF zJa$F^l{=if=#+_w^FMoidi8m;Um>K6Q%V$2IVpWiCgE!A^=}J!Rv#pV?grS zmirtRRgA)wZJ^6lrlxgSWP~IUFDB0D;B;s`r~kV6Mx7e17<#jbJvJ+C3==Vc84QV zX`XgV(k?Mad#b=8lxy_qT?^m9^s#F=O1NYm1q+amZeNT&v@Viy1saoG4lH)XPivDd z!6Fm)BUjfODbegReWJbYbkY*>De#(kXRnu} zyhJU9HO2j|J1<;W8R~uO49j@>ZrC;a;OvEgwSN;)E244BDFI&h3--!{qmC7>ap0l{ zq3;93>-r%N(t_l@`|ZqPSbjOYqb`&-Ztcw2q2325y${J2y4-%vj4WnjhUbz_?E3Io+HSV-`=-(lji(ypqsuh4U?M$K`Np;H|`uNhdm2{npkmYCK)H zhJVR?t?uIfw5v|+5pD`yGAvSMTFT8@ECyDvl{ML%4Jgahs?*}YM3>ak*3K;E>Jl3D zR9%b*aG%Eg=%qK8TBA4!jVbQBqwe|X+28`)ue6ZVvg8Okoa3O-M<_GmFP zzd4*hN+URFWtvAi$mT~%6=XGK41OZ(1Am+8dTKuT=k0xf9%*_C*MQW0Kq&b(eP=mv zd(JkmySXafy?tM&mu+`apTZH;wq4P$AUMo(e44HuV2^WE3<<#;QhbcC>vmI9?XK?J zSh(JHUtgSUw-RIYV0jBgHb)f)V04n)$_#~5qQB1UR-L$d@9@b=!*I=FUQ@Gcn}2rt zL{Tk&r*EpZ;UqjO_MA;2tdAxZ=ayk&INWZL^(wNMk}FCXS?c!Elw753f#LQQjwqJ^ zXQm*V-t2HcJKoEq%Yw()r+vW=?<$o>msW^ZX=F#dvP>y3`@?xIO+LjP5oCvK;TPx3 zh)d3$`aP)6&4p~DlV)ahDpx`zRe!WRXrGnaycCLi5Sh5aHS;Uag?bd zWJX0^II%F)k?Sj8<-mDb*gCU-qe)gfJ;;&KU~tB&z%t4U_qZA~EYkNeDSwx_-#z?Q zH>Vxv>MKe1{%i9{;vUXMobgt_UA^?4$ue6!xnhfRkGWb;r|XHz#bc1*ahc$87RBOu zz--B8us!H~gFCH(Jkl6Rq=ZB=R4p;>5K;Igsn}bvjF{_jL|V_o&e3BSF15cMi_l8F zmpVn(7#>y5x5vJfwaMk>mw%OxoaD7^eLDvI^Wd8GD9HLVSXU8WI*PIDlU;&MsDx^_ zr({H7fajuhsH^qyEZ-3y{)+hN}6Yy?N zt_2o1fu+ocxScP;+1NC@wPnbQmgc_VeIaBP#WsYxoqQ-N6XdaI3_FL6P-K{Q| zDSg6sW+~IlZ-qxK1%IAFFIp1P_dsI|g*@Y%=k)Z{TdMtn=VB3iI*}>R8FN?#RpLrS z)b*`-XPr|}j`<6xx-tncc5NBbehQJYDmOuy;^b-=B4c-^RR%=R7$N(Tn$0p)vOS!r z;do?Joi>bm8bixCJorW~XT8CTs|l{BL{I5JPPUTwsz|(bqJI;$Vt@5UXW;4?C)$`^ zI_9Nd$C;FdNKS#$_ITjNB$&yrsGJioPm?LNwYrC0&L#q$dbeJU%&5`llZxN&Y@VPwN^4wZ$zCIVo0 zY#?O$1Or`cx$C0b)B5z<`I_YK4QrxFZfvAf}D6(A!c26 z5U(cDrGJBSr^}qPJD?mxuQw&FlJ6F~gOS^W$8bntv%SY-yoc9hk6P57EA z^jx`_c1&nnvDWU-i{Y*W>fRA6EZoh)?!amKKV)?5xU4>(YKeyxTpWEf^KE-?Oo6&8$guac>!fdG9 z_Od;?Hx;~YH1?^Tj!`(LTh+UwP}ag4xdl83f$SAz5{0DsN^6sdHQv)E@AVS#peMu? z8h`7CTWPb$!TQj7TrhJ4>4u*GS*pT zFX+!E8P(yuUI_Cnh z-j?szJHbzGUUIl=*iKj&&g^j0^72+$#Yk9ZMy;I}E2rpn?e}sfpUwrE$zh>Pta6Fc z!?I=STP0Njo#_rGtyNiXHEwc<4oODTQAGlEq}si zAU1Nz-Ui9=gw=L}=8$_A6JIXOyG$S4rG_!cBA+1+atirPIvLn;%kTGSQajP+0)>Cn zZ%@|jN+)D}FX!!gX5;Hzq=RMSqp`l@x``tB<*dstOdROWN6~Wbb{%z%YFb3mL|~;8 zYFI8xVm-Xw&^q^Wt^7$!nJ5=us(-~$!tMDybiruL<2jzNC@h6mec~{kYZ%PdO;L-g zG>$CShZnP+-wYMiIJ=}lcv(uLdMZR*!yvwT>VJB<^0S@WMCD4u zxGPP3dGo|1-{7JqcL^MxX0^3D;V9Dv)--5 zlpU(G5*Q50VNdrZ@j>p5r>yL@_ha{%HFhwiFbqvnFhytjMluhLeQUX3jN&n}{FQhX zmy{-6ub1OZ%r(9B?|)_HlgBtP7*`q&2eSL~5Wd}Y60N6FP7mDESDH!5dOD{XMlpN! ze8$J|QcS_NsC$Z84)H$dyCzBe}1&+*j=_my*gpF-F=u=^pzZChr*ZOP7CbJ>O z{MhK*n!29pDb(fCHPnC1sVt>Qq)(bJSExzw@>Yh~j&4h}myj7uJ@X$1De-6ZV2rfX z4f_4y`O__<3_bLpnppJ5W;L>KMBt{7y^P57&R6%z5(vL%1h<%O1__eDKbt-DQ+ z*P}2Jyta5in0VrFg%z4;9@3O8jigbNvWUswvj+EyRS1g8>gXX1)L3c{M9yWY1&0R2i z%Xy24z`)|NKRJKT1Dn$nn`Gwo5O*UD_ZPm7_wHFA>4%-m;-kK3gdtmVu8hj~q~Sy8 z`rhF@Dc7&jd&~0a@noBRZP$D`E<3fK?f#*iuBpsE{orA#qqU#fJiXfMLOGWp=6Wo{ zt2Y{Y&Pn~wxbTfT_HLFN7<|<~os)wSONOT`*Uq8ab1%UbMYqrSa{4;dL0EqACiSP zHm-lT6Zg&a=a)4+Z0y<-;WOMSsTAi~6|M(St3g%b7Q8F2W%zHe_f^K?orQdIkGrO~ zm-C|EH3i*3_;l-~x5*7_o1ylqDmU}|F%(aN)W*;uvsS1RZg1n`UX16*JiRlswiEX; z2qL}DN3va(Gyg6XdrujXhmO6c)w3tJO~`+4u5h&;1M3covG+B%2rug5=9lX>L^*%x zN5|dvl&Hq*J-~`77WeY3wP(SM6y<2A%l`6T{AOy@d(lLe+1Gb!6NPlVUv!tzD|hs- zyVWQ;UagcmAH2HhcS<}9S>d;aYeRIefY6PA4#|!=_ue?rc~m zC-EBJ-pSh+guY|qxUBdY=v=;UM4*LUvWrfda*O{Ib)h83etuV8ovohe>c)kSOWA5( z-dkEu^xFDb4dRif&m01CnGs%uDC>1`)O{R#YQa}izZT7`dd*5-Oc zw!H|H7C$+e8=jX#IyKkwYMx_3rJikW1t%tWQ^Rz*af)%1mU(Vo{vCT0Wok!P`ivb5 zf)pfaKOU3Y+YAm?(#uBD+}NZ!TucyiF`MY%{3>VSUHPPaosM)$vF?@}ZTDzRnM{>^ z$EWMFSh$yeJsmOp6oA7}P)UFH$`n?QtXdHuKZwc@-3SNQitRmJior4RX1d-gIAl$w z%(tu-HtfPZw&C32S~hf}+ZRz%y86Yrxx38a>&}^s5t4MHx!BeU21|yO(#cMo3Ar}D zm*+5&?Lu_*;|i5bb^Qk=52G|(>?7;ro9{v)+K_fO_sb zUGK`n0=>4X1vywtifdR0+>6r8N0rg3rdYN?lJYQmF17!5W#UDgC;XgdQ@g8an)pgb zuXUvCbBO|);J^(G7lXl-N3ws zXF0!)jAbb_wZ+j)9+uZmkuUK8tzILCw=#`K4z^JQ8?#X8&OM&#dl%*<`RMNQS$S33 z9Y5jAGahWg(q6<?iET&&KL)QYda`1Au%bR6Qr@w%0QgY9mNz9EEcLeG zwG>{n^jKeA_hx@R=x3x&?n959jUZmPQ@*K$x4#zmwVj4z9~{J!kUg@yzhF+0?sg{B z2blN9EmHX=Xm4)ZFK}{G1+kEISKQOvYyTWN%XC7V8L& zcSep`ootP1KEm}@%B7IaW;%s^%Tuk~`FKEtH**f6(0qTz`rxKyMAKJFWVHh~*wXv( zeiFu*B(n4flt-pU2Bp2~yZvnQUL;*`SMsWuETev&Oo;38qZZvpQX?^zmxJeBROX%- z;v`jTU|!uy!R_684lzgF0zQ3KGc(qujD7-z`yO?jDLSW#YKexr^!`fN&eOcjd724| zQcP0b^ACSR%&{gJtx)Fk{ozg%?6d9UA6nM9FeSH6#EH7#c|uih&8=o%xo`8WYhU}b zyZ8>snzieIJCa1eoSP-wR&w9%TM;r)Z}!I<%vjm3bPf~B8q>q#-i~(z%L;n#OmPcN z>O3Yd(369Nzauwh;trBleqathVOXA2J~(&}DdB$sg?F}J4X~c*$;PL!S=y6UlIz5{ zBJK9g_M#lRA*myrPSR_@o~;fN+MH@`e&`32NX|@jx|}P*SDPIZbyWqwpifGYhn}k6BRz6ngH=4c@?9)O^OkpA7p_X$V#^+{g7?_2 zFRM4y_ARykAhH*)iS^-LH24(SOZs$i@_~P6JMYM3SQ(EiDAS{c=`I&hi2{T33lleN zm9Gq~8S^w;ceXSn$EEu$>60mpXK^647pzro^};wkQ|2WKOOdFf_CD5LT}12c>`r$* znxml{PPiU072RU8cP3tyZM-Qct;ZwF4t&;BD$}U$<-ok#VZE!bDF_3bR0ymcrbB=J zI%Y%wVy_zo>$uVivlg$&!Bj^l^>`eV<6AZF(JAdF5c0F3d(?Tk@vjcU5AS9TkJ(L5 z4taK>8=q9tQ|Ir(sp^7oZ)S0oV~1HIQ}<#2tZ%$_o5|3c}Ie48(D?y^g# z?}u0-p3^nPv6UsA!-*je_k0{LUMhdl%D$-%Td0dx4+eQ?aM=!4?_l2T!$#hPt_TDJ zdSOH~w65!KX-$S&%lK7j*6MIE(>IZCex@`>tW?dzHj`YVk_30^F?qGCOI|Z~Ky+!^ zDWMsYb1V*o-S0;#2KVFp`Y=cFi4Sr=9R~l_JUZXjs;8OXt}SzY-}9i?9wvWQ0poef z4(Z8tw>D45L)(gNq7>3iq2jKMvL5(CG$>UTlXpflxNu6zHTJLGNod}bwdB3=j?~Mo z&2TXBRqN888B;|4oPle)yQrTZc1Iu27l$UVXSXIWpAjRBr{#LOg?J6cO~L7^R}l$(Fl`|X;OF3dGpc1e;#^Sx9)aq*0a#8TGDj{ac`=+c14 zXVUz-l%E&pp>@%UD&Md`4DR&r|2$>t0|#b5{m#*Z&}W=6_tM~zF$Xu^}`}D zbB+4i)iQw8o()kn)rd}-NTadbVV=r#@zseL!A(BUM26N9k}X4j=_)VaQ1MrHia4AC zgi0&1hTjIC&>^n=z1!yN{ku5QaG!Wo%Aen&4LO88fIMTZGs8oL+&OIK7>1foBIP8@ zl$=HKF!n`$TnMUeg(QEugat{DmsbyG==UAAU@2Mz`ZN0wZ{afvCKIT!qJ2JAm)%^H z=u-{ZNghqAUtwD2`-V5ISSs$hRv-S17g@>RQ4qvU?ILshxvR?U>J)Inafapf%U-Ef zJ{?DHX~}KvU>WjWs}ffgT`sI3wst}9L4;KvXh#^5I-0~IG1GrrQqb3I1jNbhh?(O2 zSr^#)`z>MSYC08qv6Z-jrxdS6J7!^?->fEm38c2H8fl*J!}Y#d_dy4=5m5SLy+zWq zjk!JaIP}w(*vBt%#fQXkLHK#)5J!cZxxFYUFHz4?H48jSoc(_Bvv~z#v8nA2meOzU z(FvQS*ZdTWQ!alFUTT^|<9tGYt(%8cKb;nZ22LKZeT&!i=NpjiJ5S2DPgy*pK*_6i z!>M}`OKH+caX+Ep*4dOEuL=v~n~OI88f$Kp%0-l+Vt1C;YUdEG5iHo2sB$&T%nwJO zd&R~UOVTR<5>WF5p+Y2+iW~VAz%oZX*_w6hyKkq&-8z3%*!zISvy{_i7Lkh|B zC-_Y1#fAkRUNqH*X1caGx(w4(COC2!4{8rQeaFjpESf9wn*W`ZpB7=|o*AxxlgI%) zN7?FwMPGm1_ZNR+n4egApbC}D(Cg4APPz&8t^w$&LPD$=v3EkNpd^6?tFz>ui|#en zbyWKb;f@+)rD`t&zz|P%y*fd1d&+v;YcF}5R5^83&6{+2_nz_Q13qpYiBs)1>Iu3P z@SfWIfMcM}z7N12;oU6D%?1GumO-zdEdiicYfgVJuD2k&HXU8TzP&m0e2gc{jHIOI z2;anhU6&p@Qw5N_oC7<3a^5F42`tV@+SI;F16E#xIucP+I%``` zY+78sWvj@hWq3|OU^w6!QIT#r2+$yO=+rq&Xah&>nE z4sI$l1e{tNi$s$exEspIVm(D;Rf+v9c91US3$B)vkVmtfqwdf4belvIxQoKbQU?pm znhM@NP{EVS)lo!JfkgqswV5!7aMp;XP+osOFjK%e+tTVtB3MtL!V~mb;?pklLr+Tt zT(D;^Gub#7az;rpn7h2=c%%;U(6VI)65+OVpiO@83Y5BwEEadoI!P_nu-3sH>N-F^NO`H)Ll%wG4v;QWt+r z!*o>1+j$fp%f7yB(5=PDvt&Gw{%!!Xuy#z;7uV5puub}nFc(}sH#c5;(&7%q)sdu? ztPG1tWc2_AUvJ9?Ow68d^(h;^Q|GC{0NkCQydFw#+82oW0MhOBo|{W~K&w`buC9}{B+HcP1t&W3=ufId$bfz#IR7s0jQfJlIG zSnpJMi+B(7bR0^-m^Bh6Tlb3og!6uOylABha6Q~&~;4$AG*JSeJ)2M&gKilal z>BkFCZ`^Mf4RIVW_>!>q=Tk9~>E0!Q)2VmoS*R#YN4`fQgt;EOsS->(bjwvdCUHSe zQkJElFfq>bQ&BhkBpN^$j5L0+{$*ouYfxA~=U<|^YgV+hpEYmvj+hrP0JbcWOw zgUYcfC?7Tkit7ydB9CtdAP|2oE*+Ogc__Ki@qm=8MK3D9sqRNy7h`?@NdSICNHER} z1+vhY;-jvm*dC>9)h?ky3uQyH)`v6T8JP}QdJi#X1av{6>S{L^U92{hs)c3TM{DNh z6&*t!VB>S;7+h<^a*GA&swCrmjuy<)%M@Y*w>fxbkQ}=+o+9g(EH-~2u=JYxXjS8K z+%=3ZnLG-y&};&83fq)LsgC=vGo-9?PbH*~?w<=2A_6+wi7+si$U{S<(1vdYIxxTa zr=KU!(s%);i2+~y(%PD}j!t%gzDZgs3+aj#E5IpxIy%i&>2)sYMGkaeqf$<~Anx)r z7tqRceRU=pGdB<1olt)ZJmjJ9^G=)L2iMKoD=^Px79QXmz#bkRUvWOQs>P>7wNbSn z4m6zg`_(5kanigY4ryC0h!Hawc*4ePbwlx%maKU2-2VhX{Xo`fA-#yeYb~9wqgy zl+aOui8yh-(OEM~a`hWHfl|&bS-PzA$l`_#1CP1%<_0Hf7fY4SWZNURr>0bpt`O3?pPSrcTd+)DS=F6R^B0d{N$^td{jNHau2*0O#;NX&-YOu9D z1gAbXJ+FWANEbC4pLnzj*=cHf16vAv6Qw3T)FFq2pl<@QX*&TEk@+hCiYstho;n0X z1iEt}g(Wi<5cSPq-IPDIWc3!`MezwhDej|x3!&I-5HRegCwMp)@Ta^KJZLJNiK18y zveR+rs=k{#kBvtry9;{`|HPogpwOtW)@3VG80%iAnKw+!(0E~*mmJjX71Gi69h zlgZFSshOQ8mJ=KC-F~uq!X(X~V&D-8SHbW{D1B!oe@+O}A&!td@X))<<5x&vSS+lOcuqvuD>H;R?U6&cTd5Lt)sjc*1`z@AMTWKyC^N0<5v+dyxum@HhMz>E9l? z2b2&2`|1sEq5yF6zZ9usn{(1TXjw0Q4KSIvwx(ZCNQMQkfb()BSQoQ!W}r{U%Eqb4(g$y|#e_#}x*l~PJL=%Vhk zqGeSU;FCchgB;PpKzA%kMY*p8h-X2!nrDVp1^X+h^)hF?Naw?9zn8J*DGYyQC095R z{4vadg?GzY@6V3R&0Ak?DXn+WROTSwH~g*dZ4lAIcIEuG@vT#&-CGWKBWhd`NFLZ* znEN0c-IrbU2KGhrI_9520hAG+33$iP9h}_{@pf9UM+qG@TE{Fo!a)pN7GBTd2GbA|DS#{d6Zd&wHb-pb@H7w==GhU6s041kc{H?JX<;g~-kLe5 zHr4;;LF%S^rBCzjSF2E*onmgFmhmIvReu$R8o4C=Jh4`u#@^$x2Q_~Kpg;Qx9iYD| zRdCD(;3$vZ2Hc->z(1?&nvN z3wtG87S138ThGB(M`8ig?cm+iHKxG&%nveDAHFJ|E^1K&IMY)3#j9A0`if*P@5?O% zgR%P1zDZ7!vXLOVRTF=2H-Sobv$q})$1$F!(TAX9-zaEhs?ZXGYqArT-&(MHZy@li z{Jnk=Ao!Qi7>eSZkUb`_5uRTaLHVMVT;86b>@8bz=Q6HubQ{5yYF3{t2k#x89ngUGCd z3Op|nGpl^tG2fW4+Qj^3LrtNK-O1S&<{bolPVy`AVmi>d&sP}AGOjn zUV0No@mf~LoAeK6SDK0_*Q`@c`T7+|*aqG6h6R2AT%vyg*BxSCWb}MFXktw))mLL? zzxqn9@dP{iEqau$GPY9+0X)3>(sZk(F-X`Zb{6vkpK0oNROsz{*VLrmXnJ{ux(=M6 zd^6AcaGBc>IS7Cq&FV zVupeGa%^JE>(6q%8muNr!?t^&wqLe3Or_aawnTsGgF2;FgJG^SIDgZN$zg^T>BbcX zY{8^2N3CGfe>fxJkEZnbBtIDe&z0x|$H?YxPyAe3XtT2k{I?T;MWRYf9_>HpW{<{v ztYg;H+-AtD)2EZK{r7m~iFjqS#UGhbffG@gD1`8t3h}hDZ9uKV@O1G+vv8Y|PZooB znS6iHN`APKvJf#T;@&hVz8+CFyqw&=AVNmC!y&FDL8rQco*{O&G$r<(t78{%w7fvT zqzhzlP7|t_VTlMD6KM(RXVnoJ8o5}&e!K}veLj-%Zca=yjcX0e%*@UF2kb9@UmJb( zJc=UH;C;YM^HbL~mffYd2A6I~P=^M?hFX6#$?*kaRMZCv&K6fy(H19y}uWv!V?aE5CCSqtaxD zhypkSIe*{xYeXOc!=>aQb#6PV82~x&c-=3E)yx)rVwHr~(u+v#te#P}UCLwnAn<=^ zsUfl?l=5-p2UrBKY8v44rEPfg5!bMuIl>Qmrc}labMt~7ir9W1h)!+?hE!HBPA;zz zNM~A~S@Gy{ol9aIU{m9&Im=jU9qv4yq{|(*7R;1t$bIkky}PC5oWZ4-72{x6Z%EXb zN=cA}hYdW>Zr4tl**`q#}Q{vp9j%TE8w^IxtQVUkqjY(Z06__r6U4CI+aI zQL~|@BCoQ2W)MWp?$MWqdF>oOfx!nj<$Gznh8$LzoBDh9MQ|d)V!oOIoMyC~bv9S#sdf zpz=~#KiEBffj5L+o#2kaa=U2v&1tB8;^~*n}16!|);@a=U1+ z2;lXiUg{$D8!iLvzK#$zmnzCX(Vhg=yTM5Dkigu1!4pW%n#0CB9qn5bTmmwPQc|0* zc^6qi3@F$6_LjgLL2X<5sQiL?o3&hNHIe+ou)=)%-^j|B-CK5_@?w7qZZFe>iF>Q) zjD>H)=Hh2rKCn#&Xd241fmsM?ycB-HGE788v1EF$TcKnnxo#E9>=L>xWU_a0jT`Ck zTZDK{jtMK}-H#paJX`!&;yKNhdzVv9ZYazl6=>#6k&X3dCb=4WD8(9&4H^ntt(m@> zM>49Q!{gtskYel2KsSFaWFfXdnO5%M-6xo1N;?QJEw4 zWa-WocsHI*9-_*gE||vX%rOxC@$sMxBrm+;!EsY|>^rK+MIe6$-NU3Au>)ovqt8@K z_a5oalr^M~Mm_VwIB7C2h_BY~6|@;8VtHF2hgy6oPl+H}R^fw2!$V1=t{1PM7JfBy z!`(Juh{jmagv3Y;P>YRK4%wYW0rHN>e;f2@hP`Dce#AI$e$@A>4+yT!9uqFH!jXt)5 z$)?~P*f*}asQ7pn3Rjed=g&Wj{}sgwY2B8a4>Gr68{d0hcFDH*L3)ITJOcp^`#fC0 zHEo1h3@tBvVbA5K3bgKdc?z-(gGj1vzw!kHVPi~PUkrco4W+9AhS#PBbV-6x2(bDN z@vk=t;*_>aMSiJN{ZeLHLaxW?9Yg~w|=8)L26eu13eSp++ z*`wfsfTsd?+QT-%WTlL@ky&Ls*oQ!IzjEJ2#2k)p9m%Tcv74GRf?m%9&sTBZiH*vZ zDw_49!&iTW`dy>WDMN-%La*h92${#mTOn6rom!M{kaWI9etl2d@dOOvL}}F44{$_9 zfVh`4bg5ZqIV(hF9NvfmDlr);d2JWBDIX**dhK>NiyzTe+>DSy(n9N}Corw+(3z0- zb&QMW54r4kR~iGTraz>C>BO(QUg_JYbHSeI3mkug4i!p50R7zoe^>U{NZ4N~i@io6 zFK|4=CO;uVAh+2b!Md4!sEnH9)DM+;^`;^t*LA`)4-RI@>+V$-`mGlzQ=Q7aY;FN^cd9X37Z z!P026RMWyD$>u1G)*EHE|v=iLh)ET6E6R9@B|P4`}sdKHCOziq&` z%-o8+3dMfZmtkezto_|Ka$VZ~Ez69Ml+&&Ubb4>}`VSkS^GwZbA&E!=m9tW+PxaDz zb~(aMvW9|1gEHS)wVk$szIGXfd9Bw_AdY{`ivUID+cmI+`TRyf9E1Ratt$@L<0KzP zXZoBHgj>`gPTR#`8+d)?U2hI6>ZpZMyg?;^1T-HEt{Pu7&e4Q7ae@O;;OrdX;k3TF zb;PbLhB(u@n3ryI2V6RLI5?2rYmaB91F;r@ot7K{Jya-<_!qKEPwD%(HHY?tJ1c*2 zfKCrw=gS7D6mX%KF5khS7giF98xtVwP58MNdnS>zP=@5U(mH+({#4Xx&l|op9jyV+ z_dTMgQrE`wrlt#$8nYGW0gUW%8Q`3LE6wpmsjqXXnj;dsM40aTdhUW#v+Qv)A$XZ| z%ko4t20VMWmiiTgx9CJLc_`T#mKT2}1h``&zd1}@5WxQUrn$LGziaoi!4gbC;EG6> z_ZuWH#dM`7Jafn*96DAw8!F&yHTgSw?Ig$9r1im>#x|N5^Y&iwHe(BrelkeExP#)f z@O|nC$fFwtQv^g04+DFDoWBT+l)Kfc0U64wLBfE&GB0rXr?dTS%*uIbnx21Nn`Ga3 z*v5NY`I!?TMl@NN@dP!>E1xi|3Q$|(l0rCYB%$|o;&5LQ@*^xqvntj!tEnC1*1~(V z<`xK>mNlBd&UR05Gc0JS8W~;aw4lfI8|fXXKE@P>U%z&~HTD-aAUJYTv(TFLB$kqA zMg<{CEn{N+<(`7ddHBq-D>{ETw-M!a`(?N}zw#?D^^TMcMTKTAkA&ab@?xyrhu(&! ztNnl~G$HZ7O^7Y!8_2}8FN#IFD6!4EVK!sm@SVVUr2T^N*QCdXcvW{+&M)t?p%U(3 zX-J30^mMQDmqntPbY-QErwAA&{_~Bx$EIdV{0T{!tchb_-^$I1f@*(sJfM-5r zVH4eyNy>MFBsk=F35*p*DC5g;8zgjn%paR1CgGAbC9=P!EG3sV%EvN&=>83I+VSDw zQFo%uxewSVjwhdQw<3R&M4luKKN)6`k8r~UF8V)k7*cS-MZ*4RErbV;CTRQL%EHciKV71xSZEJajg$$?1Ot5GU^*8;IyUFDKYS zzfsO}>e@$3hR)8J4hY1*#R#1~S}?$rNlW0qb@_a7U;9vkwrjqDJDG|gRVt*d^!A?E zODIZV7q3a$W_#wCr+_mFnB5Aar9z>YwMoB76p5ZG!=je9rW^4%9x#AP4AMui(d;%b z;L*p_zwpf3o}Pbe!m$R*@?}<3v}1%%P|x%_ac}psU?Q(wOQ6SRb~6bn66gL+(T#g( z$1c~ZPr@pc=YzfpV}8~p^#Gox*@???3xUD;JHXp4nUuUixj8HARFkZJc>vZopEyf3 z?OKFVRq)u!@41oU+VVP!^~+hl(i=|a)!?9lK1-Ctdk%2|JrvgKwUz=xx zgW9exy8YexFo%B#f9ga}y9kBi8#Pzxb`Q>B#dK7i>4=k--X@)yD?7*^l0?q+O^M!D zSyO|_JX1Uh72xR3zH#2&wa-BYz~IID9*x=2cl&>>^`(l?z~o)uS?!p#jFUlC?>D8v7%+J+x9uQS9;rWgyLTfDx$p#GkCA(!MuN-M=r= zVVjYLZJzOwwon&v{K|SlD4bU{sCtXkBN*v>c+mPUbo`J-{7#dN^F3myE&Scr4>-!i zXGed#t9D-gfPNRznPk3>7VfBWAupoW(O3aotY?jGjlEX6W5mMh-1=rtXQmgnYtpMc zVq#?uCL$xxgL=|+FpCJ=6}ky%_a9vn76lYaA!QQNoUW|pTH zDnPBx+7us>&TBVzjA% z8I7Kv*`KY10w_bTTz)hq^-O*iCU}3BYP~B9`)RBIE-qjZha6?o-^&3@pCD2Ym`MRa zkC+76Aw^i`LVXW*)~-NJ>)tyc(fMTKFTcB(Jfk{JfhFgZ$RgP(Td8CO6uZ3>rDYq$ z);ryS&$uiEOH?fZu~+`_u93;58Nd_3{E@0_KA~H>2WhRXlLCN@q`kHC=KFv7f>$)q z5!Q9`LNJWt=vhGtWoOg=X%c}FW1F# z!1oa3vT*QCx<-ABftKuRhIP5B%-*g$#^32`8^0|-8Aw(m|PliX?z^K^O z$ie54M-CBZL&2oa==sk zzyi3Jz%qEl-Jf&W@RSies3rxfBGlDFjpS$R=(~=`z7sic(bXhSBBzj@><$UrRGOlM zzdXj+yOpR`mz)QFvhN1C#=K3bcgl)3slUL;L}s-fDny}Auz&|ZC>#Q@P6YJqi=(Km9o*<^vgGe_)xwEJ!ue?$` zgG?hvvy+$c413SNf)#9d-!+&Dl$Z>C<{Rmj3|8kivyeDM1ChjPv;t1PCT!bWp<#Nw zhy)aN07Fxcu<~XB-{tg%P}1YMZHKp|*I zA{F`qa1T<^e9dupu3{n`5+f+YKRj`rj5@t;g&;e)C6J5-^103?!H=0rIdBINUq_Za zkj@Y{56ua1iE?SuQ>mq1vD$PSWZIuIC?VbXNxFX;oD5Dre4A(HPA9+bO-Zk+_)1BW zl=MMGDt_lRkw6D5+!UZ~tB}ptWoD`r@JDv@#V(H3nD|@bSN*0uVGOMI5Ks?#)P-Tu! zVETV9wnCWY*fr@$#UX*avMDTSL!Qdq$2j*&lY;LUZwf1_8nH9BRT%)2S!xOeiSIxm zw_G4e6};!}0-%ehoaJP&Ws7~3$8%6W9wvoL#k1CIuHW^bpuZ}JRdjRVSL_+VqJ5wC zB4Bsr1c-dMM)6aOfy9%qDJhZF$w)X8W-5Q(7yjfPJa5+wt|fYHT0*V3U;f@Y2UIUU z8VYb{5NZxh)sxNH*tHSqz*LY=?d&-U)`dcE3Vws)M*^7kYBv>LBOGt_x)pwJ(oMT9C) zDf@O|w;nQgjttkF_O20fIvV=*+gyJ~l-!ZH&pA)#i`5{cGn8r#G;oBI#osXH7Geo? zGs0A<7eIH%H{~=NIy2kVd#Z4j?$R3BmYz5@1o6UFX-D3qbc?M6^#Tkf`)TaRR){uF z-lW|a0qx1H_#mRxhU1zVy*>SOTtm_aCY@T$oKmIWL$h&<&I~Fnz>QN9YJ`8ORw5J- zBB!0#;D?BI@Y4>B8v5NEa%rIIM*lfD>&R|9`-Tl2k_V+X)%`T({WCeseyRz(JQzC& z9Q-8aktq+ZV)tRZ_-UcyWz^wGzPg%Uo@}M{D$LEU_H!9u9PEAdSD0Je%})iziIVmnON^PYt^ZVK-hJ}gXcGsRZ&`Ak|=_ZK={J_v;*m)#(1-|+yGXTVZ zRUjE0En0NPo+2mAJea__?ks>5Q^&(lsa0uES!eOr^8NyN^5`3)d7-R^f7eF zYAAvfnL~{y->8dOmY+tsnzgOPdGKcVtEmF9JD_;&UgW&+i<^yH{gjRy7(naB0fYfN znm&7~F%||@6>6#~@GLh*WDSI;3#SiFTAUPJ0%94spv##1KmYT8_`d(}yZ`6^{>T3h z<^6w|yv~0!c`yIJnY@2`{lB=&S<8Q^_YW)lPxvGJxg6NqIYfmJjU$f!+T;G*Kn(on zeDEGHX~Qx`6X-9~HU2e@{B6Qo{ls7ybFn!7)J1>k?Di+}!t}T$=}Ih`9Q`-Ge<)+_ ze?$^?f;@Qgx9i&s!`}nLutQ#35>hsE5(<9;Az=8+uMWc8Y4vQan^n_*o0uRZZ^Jiku-`4!FH8EQZb!>a#I`g0O7qVe}fNs!n#`FH(l z>Mu0@^QSAr{l|aGG`8Tf!#py|K!LvrJI%EmIvqA{Au7T?+af>SH~XWL&cFW z(R6+}mI$&*6DCYwPktU5Ij(RoS#Sn#Hv_0S4EZg^O%Q*C3sAz@ZI!LD@2az5#|KRQ za2tQ0d>zUW7~?A>P#9h~(j^fdn;c|+^o1@37F*!PAlTk~$6vWKn_IFm^UY869RBOm z`XaCv468a8UY8Sl4cAT-zqLWvGq({{D^YDZ;6quMbw5w^B+TK7Uj&VQz3Qzo>Vtf* zJmB?HLmYnqBvOFXWYX5Y9vW6x!1p>H2z{juIRYbYGl;z<66r?{WFFgSyqI6AE$2ac zJE8ya8-3aJzqIN@3?z2+I}SIZTu;(~SG8KW2dJGLo&0@?9bgA?WZ#SES_oyct}hZe|(_Hwvr&@eIrW^TTf zX`vQInQaVMx?`2NN~zfNQlGEH1bAJ)4c{`J%0(Vaa{i5v6sGJs*&DqC1;@5{Q=#T~ z#Akp1A)|p7bsPPyIoOflhUw1lny@1QSqtL<>rl;12J)`EW5~-l8o%*fG!~^r9N&0p ztInx|M%Ti(#+|9P(;1bI@wjcNs5nm&)Fw>=4JKCHaq0shc3`0=t%4^??gtQ+Tw;+j zb;xeql1W3mI?YW{Ufbv4aHLCPj15J0u+x8ZM5n%1&BNQ)ZjEEDrRIZk7tVuM^J1be zOdeuwR^)ul75$A1q(QN7!l?3B_uOy8q}B9^eh*sYt!y-;*X+Aik3O0Oi4u>#9e=v) z_k(JvIc;%s)<7Hcy)(88DIP9oxt+fosJF}{i|?!Bi77|pgOVR4hSu4}!Qpj4>V|)$ zR#tIAYZ^%-?S(m&{qt#!~Js4@lm!*%%y+6zk%#U7H6 z;+rd#{R*H{i%fuEQqtcq%1i6#>8pPWz3U!$ZS;v)G()~Wrf${;>KED;50oFG|K6Y8 z8&!692y~?|*B(D*ReQ+quOVE$c#%cuK{Lzhl|mKYe4`g(slyaZ4G z3{Aw^EClU!{1u*G14z+tIVL~4r+jkX`;b5Gs{7avob{`QH~PM`YA@L5aYPi;;7jb! z(Ouql`-?kDYF_S7uiDS;?LEC8?WhQZ$&PkKvg8XFf7l)7D~{@H>lG0yDUg*Z*6*OP z695Yu{X#RDgAvaidVcxSMNoe*xR7it6@W~#3Fs-Awbw0mU}ba-l|CpSqb5gGvepF* z%P?V_?sBm=HK8Luy<$A|`9gDLvt!#na7FXH+K=UpXcuo*$;0+(i6He>%Nhbm-YsrI zZTUJTe%8<|vFjTyIb1V(3FiGVMt!*7Mj%1iXoQZ3bBCh9eS#GbOP_z>|{m|^D4 zf<>N(`-R_c6d`_@TzP+{$zbuC7qVNe0lr!5uhWF?NTc-fNokZ?w^aF*4$6N#hDx?`0y88CUmZok z#h`V!@`-C294i!m?n=C;CxD-%BbJA?=O-?gYYC-4?QJqK}2cZsvmFi5!E%6C|z$kb{n_5 z^XV)M*!IH*szp)s0M=8}`tczrH|2H3>N$&m`pCMxhK^Rv&d();EVV&BD^OIsaG8L9X3x*Z^HNeEonZeytywWYL5<*IkE7 z9cvg9X91RfQ?YX>#u!yGWoNCOizev6IIVSYkdX4TZ8C~4r!OZjpXEL^x*j}9@mfPs zxX7CHK^A{=<#^eB=H58os5G@U%Lg^)=?xv)4@{!WBdh6 zgTIjdEpM8?b%;)y`-;DP?$h-5?AYZS&`qp}5HEk{`q|c-F@w@u45952L}z5>^)AFH)AbC zIgL*)(fOOQ>8s~)$>X5Kis6vV$`8$Y&{;|)iV3h<&&Utrw(sN$=*5J9q^&cjJop~l zg(DH-qZad)x~9(!>;Z~Q{W#?&7?oWk5jQ?M;1rj>Jqa9teJedQ-W6_slG=uW>X|_R z2L}c6c!2ez5aUljuID6Y*Va@igze9&q>)ydjWAp3wNDQoz+#?sZO~1#P-7cS@u!^*@_P8`Jg*bB`UVhc?|wP~PiA);@`(M8>C${WZ`?T? zB`T3jbOUSP0i6QHwEMT1T>By?0J#sg(22@o%$%5i!qxBF2bDYfg##5|EJy$pf3z#a z+kAq}EK}sBZxVgaZ-!$H@!k%_U6T)v&0`4Zp{V5?5>$u@Q%S#>O%F-cpZwI%Sf>J) zc23b@*e*N$)YG~)LhT)4sKjRlaWp@t`%S0-+5FGM6E$(2{@0T0nXRccfr?nB?;4wS zb`oZP%=E}4s^WMT4oQ{wE}M6bDzGA<(C)SRocDnZEDck(F3F)Fi zun3NHoh3k67RLAAc+{=u#9tCrkG|rzhxY(OK)k=Pm-Rmh4}Z?5qsb}3D6~bdpH$Dk zhK2Kx^x+6hpFaDZB`w^CPb2R6RpT(3@4&&ZLuV4PoPtt4xST_Wz099C0ND#fZvXo= z-<^)zOQ9gGPx7z!3)8Am9-~>~NJjUc5yUPdKpVa>7;%Sv?ZfvwX0OQjN$qKhv5EIPtCWcrP{OU0CEby3!6h8IPJ$aATlLBvtAdXYNUif5=^}=tZPpSyUpAw zZRJ8dnF;1r)A&d4Da~ec_)o@8=Qi%)GVvLp0BC zIeK#SBk)puQkMjm^V6JYL)BM0x+S0e%Ui#ik}cGaN$pi54}E|aF3_-%z(zghCZR0KqQ=mF-1tnY+4SO4xUJ-l>+MS&y zXMID25(@TGagS#j{|PaYS#Y2VO0q;h~FQ+HIp=LZ$=w#s4!g z-Gj!So_Hl>kTDclCP{+E(^T1YJ8kqh%9X`b}ATP_v?Ozk!^$4 zOiz=hpSzhjq;p|g|9;A2car#H714pBq6hS{nF%Hn_K$~>ptaVw64*T44(%&_9Oin4 zE`d& z@tZuc^D7%=cZ{x7EoWua+Y>H&$ z{GLM-CTDC;g8C*;ZlOZ>Myb8a1pCTn?0IXMYh?T()foD-UYRIxCGxkuMfAPY1*XZY zBn{IcS+^yc=8qpj(Nrz4$rpK1b$^XXSh7o@^F09MA)_R8&_~N1otlh z?AjED>q9pHXMcUocXz92Oiv*c8R!m;J02Aa^pLu6OX_FIE-fPvjg{aKh0dJrHq$!p z6k+KKffDL95SH{!@wI?@E+>7?p92)CbWv*32FP88 zVq-_c_2+EBl`%2pXU#3kpMT91S0}=Mc+x3W1vV&b`CbN5)Ar-c{}*|l46;C)h=xAe zX(NDQ1A26BdZNe!d7<)6M<547_F>t=Tu8OBb^JwTJF7^6Kt1YY=;nS8srvl%7SI6WTF%LSz>qMa~NBE(+X0Xl51Xq zRHB0uW{{&hT9*#IRR3N&`4XNZ3k_`4m&?!D*xqE_fq7mc-1kCN`n$<9W$9M*^tzTi z{z~;LwK}S;9=-rKnSZPeRCHZ$A$!X1mgL0Z#h&lj4g=ElJCt>$q8#1tHo-pC6J`!Q z&8B%3sWV=8aO{TX`HSX6{&xMw(>MculpNz$uVH=#2+<;)6*S2pB`Gg(UON$r3(^@jhRJ2`BW_%p?u@+$6kpn;>Gx6=R{;4GCAz=g)~9 zCum^r00`gfF@N7_e&UZmRYfI8?zM1Y2Lx55&;QDzi5gP0x|e6RV5e=^(?N3&fb{hHANSP5zSYBS0uyVO59w_{6>|Ca zmR_Jd4~v>5^~sk*sa*H7VDMgtJDTLP6{#l%L53KJcz@~NfoomSQ@CL1koen}3ZI1X z%iAQdI1#9qI#m&~+*0qg_*wpR5ao&kBIn5|-%nZ|-#U3yk+>WeJ5HLCvY13e7MTVB z#?kIVXryScNNAwWG&6t@<2>y2^F zL<&)l<}M|A+}nAk7VO!Xa}*=!Gd z@RMWW3x_miweBrD;Ke}{<65M$-1?vn6JQ@DAOsd-*8OH=98#H{?T#CgKVyX6%XmX3 z@4nl&p)gWM8hg_x^6>yrgKRAFoBbFB$lDiOF*Gx{Z$8XL-=VN<@(k@T| zM-2f4I77Oz+W=6|67 zU^3cp7uc`q&gXM=0sJt;U2YXX4z2y$4(|3Qez{E3gk54g)Q&9H8rT zOiA);y{5k(urz%0;5b3s4`W#+ZYweC2AdJv4M>|j%Fna?ypyd}&ZD53JanB(l9&sV zkS^}q|YY=WPRDZ5?n+PW^HVrf z3IQA`g1t`42M+UODrfHn(GyGgc0}wkp`^XH=DTFX`6zMt{Pas*_0lUpOIcW~0w?R- z1MkG=4V;CrCjh{;i?0;Qr@)9vZ<1y-G}|G0<|w`X&{(jyb&L=Ru1I2V-G4LMx?;iW zZym^vpdk(B_UvSMtwH>vZ3G5JKN00Ze}VhPkJzRwO_Y4GMj@1G>VLRejGW~fF0jHY zongXIFd$V{lOpuI!8~ zaD<|wjTu-eUQ1EW(GL+u^buc8MUD_E6dx$wL!CTyxa#Ad=pVa?0FMOI z*r;R9M__SXj1J=zg>M8H$*#L3hQ^X+FeI;t&p9z;S)gGap!A`LYp7sO&1qR$sI;~S zEfc~yjo0Wu4BYKWqEGl?D2w$7FLjX9lnIwYObID}(({pI!B~be_-!wJa+!`rHLk~d z2amT-SYR_%95>iy1J>=vcN7dzW9ds-vDK<_Sz?}Bi8A`W$`Ue&d0PU+4)?oD@ZFM2 z(l<0nIAa+DzO#;BHIE#}Q7xnG)T_LHRSk|rNx%2@I0AD>?j|_nyJrAoTPNSgf+Fzg zVZz0Kt~afAY@!cMCx4agDlMKKaPxN6l*OwPJ`Yq!B(Ycw>J&|K4!C? z8lIN{Edlz|ZzmpnK)w2t+x4bh+y&w7-m+x{ytS{9b-=43KhY z)pShED-axGB;;tm>_qYaxyVdEIk`}OX_pDKBxgegNu)F>y>^gK-7g#=d_GVD%IIg& zG=r7(nT|3Xc4(Ob>#(q1(!Ax~u0tIT|8c$$dc{7<3dpq{c)8x~A{wtxplMw58|%Hm z6ICmTxZJ(SJXGTQ{FKG{ne=;f3z2>EBYUB1qi^ayV(VO4jV=CUoIgtL47kdFGGx2= z9M2asmaRmtSl59V<`K7OzshmWKopz;7A$Frl(C!_-i0O}%GO@*Z0UQ|5yGx@rAY_u zU*KSf4)l$n8~VQ1UQ9x0=~>I@sl|CSp-W1~5T?_DpiwY%BO4U*Ns9x|b7JooCyBxih|OY5WGqI{Ux;Qg|rmj*8xczNb%Fq5_b(9`fELP~Wfy70AmRb|Km8jhGFeIYch zw)oc5ZGsGnY~fnHeZIKhy(|C@2Vvmt>FE0H<2SVLcE5b~8Fsw=`DO>D-KGyo+drk7 zyvZ=e=GILP8Xc?lMy>0A_X`$B6mi%AeP6a$LF5gWR5;&1{Jk!Dw`TQ&Wt#*({)wB>AZz5@?cDLvmM?<04U-k z79WaIDFv||8|j$Y#@r?cf#T~T6)t2HMsl)gyXg}#@dwv`AvcuoZGJB~QbslW zLlqKX+a5`4Z$lq*?6urZG{wk}4g79|T}lJ#$+|gZJuADVe$3UFWZKCjw2!X3dVDU!>eWXgQ zyShPDtZwAvs&G;&{*K4WDG09dR_@U9?=3C3@q#d<8?FDUgKQX zsFi29N7zH(l7{=PKY>hd<;e`ni>O~w@{FdAdrY|HW1M{P-{7RAw*u0+``k#Xsw!A(Tbr{?QVZ0j?juInjJrjP7?D8~b+Jr~VX8QMk$$rVW`%7FA(@*oVehO;6 zw`+Vmn$l~mo2v;Gk^KaQ*sha;yjQ$6Ej{$c(q^i^4;a}QdQSvWN<*U2ua{= zWCnQ|JzCCnii5_nJ!F@EcyL={cE>$tJizcp1`=brz?3Zl<@i@!$Z_=9^@?AOE1uz# zIi|FM0@8|qU`;l=EQEmgxu>dz0}X8<&q}~T2%1;}L{p7CE0NK)(XXS{*PA5U=b{}3 zxTCLMoe?bRKvo8(>}IB7*e0pgOVr#hdA`QCKTsz*ncDbOHA~8E4)8e!TlMNd>=yag zc~?m#MD@V#D0Z>^gP1v4E|^Zfu6`H?e5_nKiLIwr@% zsR>HXnpUOlFu`x%mSj`#+33)@oTfsSRky4F#N680gi19Iz4}+3eoQ!PHoB7#dBBO% z=EdARKi49n^==sF`k`&$K#~C?>g><1x`vB*Z$QHdHal5ZdV=ncxiR2Y8thPL;6#2h zH>d7@TmQ;Ww$`O{=TQeCPX~7!W(wrKVCzlFUSSYf>=L*cg=K6R>XQ*knSW2}2OTovpE;5gJ*jUV`_M{3L?y;H>Ib41_Bs5HL5$enbAJ$7#QvzuSySFe^q^n z^-By_JsWwVa8%`pv>m)kOmVXFi4m0;j4tCT-%Il=BJ%7#jwtmytBaAYm`NNv%u#Ihk!3yfdA zeZJh4&Qh_S4GKtytq$&&Vwzxk``E4LN-wud=sEA$e>RuOV2SI1MNaI25!pVnNU=bA z=sjO=n;lsbETvnUc`~BL9uVo$Mu?1mIAt;XG9y200d~L+@?Yh~FMsqaA*a7qK>ebB-QzT_ZXs!{1b@RCJCcpk15>pce)fsE)(cD5ZJg_SG0=UV5pLW+s`{pb zlT(qL^m{U0L21f@={DZ~bleTyRMyzrHogT&)~NQD)CIh!5ys;Ae!moP-rUy8U6~hW z`?%64-X$r-Y-sD=^qGNj;2oH{e-jP)ORIVPJ1xAweKbUti9h{aUlFp)1sQ6*p8+|ATluVUk6$u?84Yr zcD@XTn4}{Sd7S*bLF-3FIceS0DU@dA)1L9@LIXLUjP0VofR^H!nZc=9fA=2hl5mERAVoO$=Y5R*LG*K<9xux8+kT}{Zr((bC}q|3_PV3y zicQIal->XDXuNNy&D-yRPa`!gvN2yI0(bQiP2WE0g1c>|@-OWYFXJk5GXrHfJTUiMu5R1@0p-b&7k6%d3P)Mr z$Nyejhh|P4@{TZ;Dl)Gha65E5y3wrC!O(Co?oD|_d1Ms%BIlde2w$oVtE>yYKk!%~ zB#xkf6_e>*h{o%Je|*y)nP=riZ&^jlsetIO;4s6~a}-jnub^xQC?8&Z&f$iUI^Q|L zX8n;lZ6%48Ccp!8lS?_jz~pct{)9i*8Oj z=4Z1$h5E&N7MB(PJK^xjCwEWE2+uXf-Jl+FekhC?MXFc^nwnsWl14{*Kiy8IF5bm^ z=?6ifBCe=6ACZDb9nnF1WNvlOWjywM?>kQ$=+}~mxrOYF7bRQ2?wA(rT;KLfjFF+_ zkBBTpoz~`3e>3@0Ku;e2lKFA}z(wDgEOl)gsEl6#k+b=_yqN8<_({0g6As4x7`ASr z^L^&1AR4iB#dX0^?A@Wgw1Hh2gH4Bd`R9BL)%05OK_s7nAR;7;`1Xb+GJM11fLHScSX3-=p2NtokS$G{%tBM*> z0#GNO;Stw67R#U-&YqORnfk(bECD_6SDOb?T&_d6o~_%Uu{=z2+MD`4s2^ zFSP+&QUW1eu3K)dVC~Y@sfTz@7UOHaM=JxhEX%a&DzWitEBPs^^W$CV*CGB58Yurs z?A@CO2k)kYYhb=Ae1#DlDDfgiN_<0`Gf*LWcev8Ziv#+2}&4xoud zoa6{D<%iQT;%W8al_(?Z67_(k3)P!FDRj+;!g2IoD|%vaCVGgxR)wOopC|4kg1Cph z+ESn7T!IkSD~5f|O^=d{{+ zw4`Y}#?UX&B0o4ri{|~Hh?bIxtyC(N3J_PEBVhGm?&pRl;AbnmQCGC}zdbdNtC+1X z@-T$zMdD{=^=(93{o~RL<;^`gU=1F^fAMOOK6mMvp55YrExin>T)Hbl}k;xFFjTFKQ1K#N=z#*a#m@ z^wqyLJowH0ukcFYVzO4mqj>-WKkm?YbgAtf9GP=RA3%~bd0!EA}Wqtu%Cf1g)? zFa~To`g&8!$tL~~tmx>aRrFDy)M767QCxgUN1<3n=K#?L%Y8&_*;vxmmbIaqKtv(@9Uq zdiN86S+_APzT5#|y$llXnm}pdTYQg&d1Y1uw?bqLvN-jc$L8k~rRRDf&0r&GXtz%C zabm-|sA|rZv?;v057`FNe^PkXHFXknak-1}@H0~w@N*aZ&6w1o;>pLa)}QhA<(NV! zbS1QQ7&4vS=z8~G{CtI{?c+4dCDc)h`GeOwqZd`AhngYiRA&X0J|{C}t4aWou#rqd zO|K%>ui_##P1tN$scCcTeEKxz&9wWsl3o*=#O>#yaXRFvfSLrUe^r56RM!#=cY)V6 z>5!HuL*(+9q-!i0kPDxtoN5xWx%;%GIa*pYenDBSO*FfQI{7s<37|2|YSn z6jV{`iVrVWC@QJG9S$jrhLv6n`+r+Sp-Aow4%ibr~4f|>`S&SsC z;6tn3MEF@iN)Y4_tG$yUO8<^<&lN12U@pNyTPcY>f2?+HZcR4p4IPRUe~!4}bj{jg7T|5Q)8UnHe5Ew{bul}$gUjvEO;QU3V6Sp_rl6-@94>hru!XSC{ z-V`MBo00ufosku?#2rVGZ(%X82Vc44>K(<Ws*Ui{ z!M|!d%s~GpCnvAg?KEQ~RUq24Yqp&A^|qZ1e|#V*MwJ-U15el4um&Vl38+0Mce8%e z-LgBNmH;hSPN_Q0)=XxKuoXoa;ghhjl#i*~%ij^B-L8Ig3JYt{F~CY9+I-Mr_ZtII zUenaYMh$|tahgx4WbjNSqTg_=8qYrQQ~#JB5iGv0mYA9u@R!*=z-xSK?(7Gf1zSf6B4IK)+1|k$eq;}AI1@SP0_SV8+Tm2 zx+94=2ZnvVlcs+AqT-|Ay#1SYl*A?=whvHma8A`NO&4+EA#x1pb<`zjD04v#d$K-w z)KWJrhDXtwXddBF7MHO>xXelplZ?0ANQEx-(GE=#(qBL_Qy{zdYM%I@~7(xY0b{pmU8@VNnm`60RLE7)N5)cQSx zm0)^`n`y2CN0R9%C~gCq=qvhCe^hKV+rAh8fGi76ljM0 z<#z{^kIP>)B3Vn=Vt z(#(*h-wL;hnjC}E-0hnGIkS>XgMo-+_|D5Y1qHHQ<%M3Y75H^hI4_i_*@w-KLA_qTh8p_ve1ZU;-a|5>H9;XWum^8 zRYM+0EDz-fSu7B#vyWc@mji9m_aOO=$7TCKSAoIwFC!CeT@;n{7LmLuEe{ofDt7ETj*1##=?|co9+k8cp^NU~B{MxHR z%aI!#_~c@j7<4r=ZQ64f!jQ@6le#u?fJ%SW8S2M8iHzci9{Wi^iaX%tXJ9|x589Ss zp`$H4nk2iC+wZCpy7G140Cv4vgV4)r6%UrNIYCnc9MkS*!Eymd&5wSKq*W+ze}Fg& zZgv2a1`P(V4^rdg1eM>+Rc|>eCQqQ3+c+YU7`z>kikZL@0zi$0FO6|qZv5!|ZbniS zD5{rwQ$NUqUJc-8Y#3!`3QbkN#M=qO-X?N=Dgu+l4vjGh8&om8WIv|eu)!y?x+y`G zlm>YSy2h#LpL_#K(npvA>?GMQf7_IG4QC0OgF;utG+>C7h-d|@*)*y|!eD)GW^5j*0n@iRqPd|H-zpyatt*;NbEQ<4uYNnyq?K;tOL}9 zqSRaB=|~7A==D>dxJ7vY)!tDUf1Y7aMAsV^s$~?yyRSK9)b0Z#+weQQf1n_@f{b?W zM_9sH`-3&ot2K?c=||6N@nY~3<(DOY^UhD(bh^0d#O;_^nj%@qI!kK=COy6~3M;E7 zQJ#UK`<5UyHz0$e4=IyiA{uDfd_p6*ISFDPUX6Jj)2)sOmo=-MFYOSjaeRbv?CePq z7}ta8yR?*iX+Hr1-wW<&f98*X`7d(X1Hx-}z1^+O`^lhVA2)f0K*$=V`|N&?BU; zE+dF7Wtd7o8D6giesp@8jhejl%>$(bx_dc@AaR18z9}u8H;HFD;A!?5|^vi19aL5lC;^r zm{9-a;Cdm;y;>r}eU6%yyqNP& zPS|DBT+4~ffxdHh!*qIWCQx1~stHXCVX1E|-`#4&^UOGAf6wa`5eopf5L?iK>b0Q5 z1(6)F3vp4koC{f;OzJS1=n8DNA~dYiaP?mU4^qhE$=)*PvR)lI<5M1DXm|E^j{8tz zA7P-}s8076rsTknaBM#kAhhPQVRn*@St17XxZw!Rs_t1Myu`CbJQ2 zaukmrfE#LNU*6N;&h;;|P_$#L4XAWn3k_wl`6gQBx5B#IY^1CE=NU`Jv<83$w&>;8 z@0x-4Q2xTMIy7|0PjsS;2%d^it|czoeYhw&o#N9Qe;(z2dMK|l$cLJWU0w#1T)uIc&1gt3A`qxxa;w&ER%w$vSR7 zbO;%1a1=#1h889eo2h`sX@PQafviTTIaB-YIjw3AT!b^9^gtbcb>2W)eBawXzdl&O zTA(~Le-jGq^b9~Tntd_+n{OWea?b2J+dBe65k>|l`l8G##q)+8t1C z#(-06lICV06A1)njwVsTCpyxOI&|1!J%ndU|M{0bH2YyCyXPh-PEVt ze@@xVG^Jp&s4J$qCdR}4baEg|ipB~{VjHXdf4cf@X$m2TPEt3cf%40vrsRCqWmtL; z%#Gj)<(G*u$m4{7vQD8^ZR|uz{p+f^YGo&-%s)pe#*g1uRRu`Q)N7#KIT>9OL#FPc5C0pVGqwhV4drv^Ls z+PB_tLW;hO_k{yT3q zBXsiK=ineGizAC2|AeqTu#W0iGWKELrNvsEcl33|yjg8|tSVhc%c2n8ii9Cze=56x z%Z)xKJ;KL58^!&L>qIAV-~WEKZ@#RDMVN4s31|U)k}Ud1Y-zDknNt0% zgLs3o6tfc@;)ME+y!>$+6cm9Bq~7owu#WhDd%5HNfx-YIH8R8-WUzy2Md-D~ZA z&N%OLvBv6d5wTJqbIvNrWo{)rFKp1`qi?2`5B=~8mBQR8MJoXXoMbF`?uWT!7Y?fR*$mw8xo6^~Q zahT}V7U|Eu;XhXc#b0TewRnzqdok%x&B<)(+;^~;J@xHtzAJ5Wf7wSXW&K$6rd)d4 z(^zx2FKznF9!4-`HVzDO9H)cxOUHgMo<=l`H#`4kO!L!BFHg~Kv3p#C$4pPoy=cK- zim50wdK|6aJ%5oAin77TUzhJ{VLkGUzv%Ojg?_S}$usosZ1ilLLY*we`3ubFF&RDZ zVb~jn!&4a!z5Gb&47nR$CdJpRawk zm@RL;iIeYmF`TKP8#T|J^(csCKQE`t;AzulIAMdw26l`-Ry8trwul-q{9>Xlb^Yib zAFl(SJW_P4OtHn8+UM)>VL#7bV!d9HV{jR}DPGmbi(T`>e`9i~&8gR~xFjh~ALpe| z@A#w+V)rzyFQi(n2Skd^v6SfI9pn3IzddK1s!SP8r|0Qw`+NqP&KrFm^hjyYwS8uB zKAa7*H}m;apE?&?@@FK^-gEnW=39O8Ses%tzxI4E#}Mr&wfP)BVKko8mxmtxKwqv_ z`pP#ovRNjBe`Rnl*|M1;-g2R!#ei z@3!P5`2Fl z74MbwciGfSjYoV_`@?Crdmj3Sy|x|CHS7H<7bp37SY!9ZM1j|T9_rXVr7#8BJpys3 z3;FwGM_#m&2(Wk zfA#)UfJIHx-Z5L!HO>2>qf=QOijZ@gC}k7|Biqf&8D4^2lGCKvR(6k8^Wrddmi1*A zR*qA`H7%}=bIY#NqV^p{y&dq=cXd*4!uQl&u!s4Y>|9wra~&b(XU#4$7i_~D^-bsO zl)fIF>B)CJ^xo^KxJ#*NGY)g;iU0bPf6abx*+(Lpz_a`=8vS|xraCN!9Msp22Z1rw z8ZBNZx9e%Nxu1qpeNk<)-B5ZJyGFPB=`l~u^|-(4+kQ4@Z?DvD@$xO-q2?_8wesFx zw!Md*O;FL#(nNWiJwB0Qs)nM*^P{dtILqUQe>Bzc zbRND=&A}>sU%u~yakPwj-Zq@e?eMC&+aNV3_hwp!yT^sC*FCBz=9DwvX=azFiH_qb zxooO(@o?VvIx$b#b3H!ZF0<|9+&GmPcv+$cLyJbM=is7`CdnxsEsXQRUn|yq=4L}h zkJd(?jZUj`+}q}QW6Z-yS!ZGNf3~)F8%-l;W3A>B?Rl`q`Z}f2rdpg5Z{2vxsT@D+ z+*hpr{_M8O_;NYhY+$||IgL?kNwRLP)A`gKUFCDNdrt?K=cOk@V`dxUXLL80{+i78 z>N1rxi2N(LZG+x@Zw#tQ`<(u{r_HxJW1(E!{&HB=>MCQ;)bR50RPnXdf82x1`uW0v zx$y_8wqBd4Ap)txlP6Xt620eXe+W%ywVn1aeeJfeR1_o#QHh6hxnF^GSf@i&?Ac4n zRWlK(dBKW!vv~k(mRdSpvj`emQ4^^f1?2cKV%8W=r`i$H*F{C^TC@9DO{>dAW6y@C z=KOXzQFEfd{X5`GkeWb1f8VKOSws7ITPlwW2m+wNd8FK|QMAmb)w#zr|1zVtUOncI z=}FxlregYB7*BH@i0x#4M=yXY==FTC8ShHPMi=aRzK|;25*xEkFU%3zRL#LG>$DkI z561QLC6IP9CxsX`RGE@Ta(#N&`+a@guW$SHMf9%pJ{^vVX=dRJe>M>3rgOG~HC;qn zW~_|Cvk2|iWgBTK^se$g9**j1Hhm?}on~K_PoEpn^z8WQ_$}D5PNT$KWSkCf6XSFk zobH3;b5NJ=F1*giYoe9b^R-{?`%CM}d!c^k{Kk(1j=+>&dMrEp+>Fg~cfT_|A6_5H zE-{=noU+5sOLvYke^cCM%7>%V#;_SpsIpT=3-30&s{MInOJ7zRpY|W=Rmu82Wzk=|%Tg?kD+g%2IE|GY z8_Gn#R1S?~rLwUd4le#GUn=uSb{mAob!yg>zqhur4>m`4f4e)3Y4k7E7K* z!%FWDH5U&IZK`N%Iy+Xo^pQhkFHY?+L*fh=IxP5bu>~X0i(aOI* zq@S(!b-mj-(cUF{2?R~I$WRDuxP2IL9jUc*r8b_2Hrp?6Zu6dA(%VeAau$=r zB@G|t$-d(43=LID*5-P+Ico3yiC=q06P(-wx)cRIe=gbaQ4?jp+28VGaCjhBvKtuh z%`7{fNYR_c`MZwag_x#Ox>}4Lm&rh}*O+gra4ghWb$0}x+V>_SfRgv!so_?@T#zNy z`c{-9o{168HfY~!r4 z=dz`3fBX8Rbchica^YR`h>yocUhcE1c%AMkU7I^1kB`;Mnl28yTpGqrfO?aV z{vhgfw5q)EbhuXBz$s#*plq?qQrn7Y2Bpe3eC%Hj+v)yjR~yOa*)HJwvmsdQbQBkQ* z=O@p`kH`I;H9p-3w#P}nPTX0vb#|M3dUWF#amcd>Ctf-U3bS0UZ`YC+*?qD*jOQWZ zGwYJphqB!G`CgyCR}b@q&{5q~4>o}O)R5zGsc!ni$3|C+hj}vVp_uMsdPZ8YY2JHh ze-S=uy_q~p>Dh6(vX0hj5ih)P5(a&D_qttHqvPJ@tloslv~RMD?W9M~(^WLn7HAgR zX>UY(exFTjDWhCj^~Y?*d70I(z7ei*Z*_##SUGCOgXq^sjnoCZ;Gpm3YB+Bn4_2>a zvB&RzFwfH{XT9TDJMD3E#;EV+m2=#hf3SDr%aloeU#}nIOBgmXV$*9M2Zyv@%@5-R zkfD09>(}Wr*HX71%3-8Lqv{@&E7vNuYc(R7a_~23;uo zMkscTqUe-9&Z9LB&S`d>?AIE^Uhi^k?!B?*#t^-;p?Tfuce&)Hdd22qokp4Nn&o>i zT@qUCrtU1{>God4@%Ek_GsQOSf5{l{x90Xh#krh^@#vcELUxj2l-rkM4v(;)& zsduzu&0*orHyRG_lJK$#fiRvY1Yw0=a-!SH1tRb{#zJi=f)9t3_ee=#Og{+yt! zS-oyratljGUu@sRoZa^9oMCIc#=*h9CH`S|yYybuOTM{G-j94^EQ3X1;QlJXQ5L)R zsu^VIJGjDQyIz>y887JJWuDpeoUoOavw8Sl>u*6Wsi@Z3WIX9#S1#iB;j#3zmr^VJ zbyclmC8~}EpIyr-E1E=^e+*yO0Ke0Dvgz^L-58GT?eY<2Ykt`gzD%axWrV!G%!IdB z@Swj>&pkaDoD+6a%lqnSWRqyevU*qY(T1h3$;}!pp9ZM?+4NRDl%Yd}V9w}#MQ+Nj ztOhq5ZIWg^dkwPv-ng4X?jPg4?)Ojri0oq+8+iZ|BV!JPUcfm`e-kd!s&@yLv~)AcJEsNtHGr_2SdkB7qHrKbc^Pf(S_4lvtLrI4~Ox3kna5C z5jRbvnA7ETf+HuKf4wq(>iaj_RQ>I4WsaTW@}R%`$mQc{LC(kiC40ESJYOWdhw5OH zHjAygrCR@4?XtNt_pEco{PPv8_YCXCaJVTSiy3>mn!ddC^uwsH=;woqyomIzs($7dQW)4mRd}6oZQTT7 zJnhk0sMVML!`M%F;0|H|#qdrak#jV!hBjRZBL!6KjrlD&YR_B0ZU*Y1u}0oLz*o?{ ziT4}`r-?{)f4?=0;j|2neRi4OTxGd6&u{VA&I6$oEV;~~7=vSBXWZiNIp`r5rCAyv z{m4Q3v)|Wtf$qi7-oGx-(c`K0Pj1>+&vRd$YTJdSZeldWo}4$ZmxX~ocsV^Gp_;r( zPw14IQI1@?gvccCObB=cXhc1AjDKpVfZ1l9ulF7+n z>B+GlbD2hCllvms*!&O{G<_UUx|tPRvKD)|Bs(MfuD{&6_AX94cu7qo=%fs+_AJjvpg?`|U1Ix6=i}QAq zouhkre|5CbAO*i%&*OQ|c&n$p_H%b$+{ksxncds2UA)-Y_2jl9qs2Bkm*rI7-?sJO z_H?+Stq+4`?=XFr*HAmsVg@h9*0o{}q9jxgiE^xuscg>NGw69$xGG?4qe=J;K|4oG*J>U@ZN; z)6@F#NPkTGn!7kpSC8ogIb_gp2M{9n`8XGGc;7L{)>ebnb#a`Y@;IlNvaX};onHoO ze{4>TMtxfko6h!+{&iCt!Zj6gKCCy))T@}bIsx>itNXCm-`sD`I23~&9))LfohK-` zUMg~W8`shL?my#8@|Z8rqlq2}W#tt#Ud+vCC6v|n$`;G{$xW`q^uAbcgL=5xo`&gc zx7+i~w|JTPBkTHhOZPY#XOEF}dsm(ne+?$u?m2(RN%kC#rdsdGo;H?+Q=|_XmPY_g zK(oI$WH);<%WPRZa1JH_rlSXWoc3qbK5s^P+wxY9hUa|bZ*Vcr+7Wo31<_zUEEeZ_ zxZFZejuC$>dAVTc=%S3bJu5sep1b{AM4BIuHpXs!93F*g3b<55_6|*@el6l{6@}6D z{eLJnr*W~syLuhlYb7_=Wm%k*S+P0V4n2CrK6+rE?s9dwaK6Y@w7=R}4`yz#j>Q0POpH0PSDXPHsY+Sh!=~{qla5&I=NHQz{v!X^UPZI+&6GUdBf)(~F9e9Y2yhA!- z2Nkn-s(+42541=JUc^V9Fm5!=Fn?4k22n0dNlg!kVK*g0z;xgaSg&+uqx#|o5;rfp zL87wyXGKRqG2)p4DI-P^Tfhl|{b*x1_1O>E21R5`TC)hOK|-+rX9%bQueYfiyR zj#E{av8h9f^`qPxg2psgZaz?HEBBQbmNtQkAWktZ(He(z>%0YKc%kQ8U7!oJ8oZmxwE_+t=*$RxNd}!C?5G&q09eN?NbU{Ds&|d!` z{_}kf?CrfXg}&(dd;FWd+kay||Ja)@jDWZAPmDJkb?0l3X>G$ZA4FyuZ`K`yUd%!b zf7@?1v{j)FZ(3Jf8U|ry)Adgj3v~8=i(l zta<`ho-fj^4#GqrLy#*n=K~{F+H++TaT#EsG3DAUHXtz;M&7=EIe+jXr?^M)>w3`x zvcO>SZOBX_6pG|rgB2Q3nT9$S7W5?onZ*Xr^f)2EKS#~6*7Y<5Nx3dcQ>Ul_PSIh+ z4NLa({|0G&Y~PzI&~dJN8LFq;Q&bzn+C z{`h3}W7nl|V6WEBKYzAI0!;d%pY;*|9?#5fyIOUqlyO32n)_>7)a`$}@2@^sGktLgi5>m`C z=JZ5@DzzG!AgRnv%vU5YZI(mf0JZ%>?k&kMaJmG+*Uqqyf`Ty!sewXHY*8ES?=Yc*SUQ+}ol$P@ekEJ9WD{ zax8rd)W$4kccutxyyXCM9zr8Bgv8dw(9otf2&k?fYD{mm6y@N8pp1s_>OUzmcpQQh zF5%MlBu8EM$*WN;%o1*C3){?Ess=>#24NS=Lx0w!-}wH<^C$m~kRX#!4WXs~5|eKY zane9tK+X#*tW_wqb2x}64lg+toFc{B;I{s8T=ZpzeQ+OuR6sX22Top&jN^}AeSI`< zeIR?Y)-U0kD?Yx40#th-M3z9cy)9*gxyC_?U|uu|3-|;*Y9BbyH_j1x>k`pH#*n0j z4u9H^uji9x;ycT3=Qsk^T(e*kFg&a+RvUt!A|XW@8abUEA}fz zq$zz2my7>iZ4XlQiBJiMyV!?m;Yop2Sra^R#_6otT})~N=RV@cr8*v3Yju_C?Im!1 z4iUvgYY(H{@y@d>ATOt3#V20Uw=2H3KYwIHqfxOJrJp$ZjmhkLU0pp~cJ?aR4*p#K z*%dTFpPc(ASIf+tg~YIg2@sSM6BxDfwsx?qEvSwJHE0kAh9qw!#a@qU1MxaXfr zm7fb8BQ+vI>Q0ejkSRtXi%5_mHmXcW#nBcjaz(wul2JWRb0jkh!s-lxEas2}Mt{TR zNO(QO-{P{b!)kOyu`7o_lwf>=mq$SE>pVLC$rT@tC8Fd7Hl5^}uik1Ks4>^0!H z^`{V8`~Bv@EdTz0Kn=$qtdyR~n;(8^4SQ#u?VzkYQt%Q&o*1j^3gQ~$fVe~&w7ifd z5$p66rbmzhFLJ_A)ti56D#LEskT~y!yR1<>O~@w@%r+tU`$W=v^`-<47m*QE#MU6#+Qt|lxr9b4lb;$?&&pX~+nfPYH^Bqf+9 zZPG-*Icv|GwJ1I);nMP)_Iq$>&-?y^-C7?_6C~C{tB7@swVjUG6Ao%=iQ2Zpih$(I z3&p65zrJR{RkZOJ{&l`T_#J+7Pc{TS8$u;KBALT_=KUvB9Dg4H0>C9;H!h6Ufe!SBLiz`D{9>98v$VBn_*st*x18)( zdZ3@$YC`CbfGf{vS_SD*+c@MHG=+MwV>UO!YKM5dH$e^XK^#o1-N}Y!*t!hgIa(~= z_iI(o)*|FrR*>d$E=#kCH#Sc(4ps4(@-9E^yO9kA9|bhsI?5mW#D6geY8g6R{?z;K z+S)q*%@rM9Ynal$)~)k*eY)=>EY*l}3l^z?d1;_mwO+qg1<5lCf@2h=!wqOxhNyuA zCzxwDw^WBZI12)s-e^B9fm4j2iiwDI-~k0etg@K2Ea>38~Z=N)i6Hk$6tH|gvpq%Op$BoDiHzKi3wxCTJS)Az@Xllxs zpa2!Vj6GebXLhJ_!ZS|?Zks{c)_a5x;>1dx8H%#Qxn~oe3V$B(BL>P1oOwFe)`&Bo zG;|RNW9_`iBvj}|2eLZo*@*_45(Zge@d_iz7!HABK`g}8-m1lmq7*hKR1a$p0l|oK z>Lkk1eZ^dV$2}vFNg#>kwyasCo&mHl1I0y6Y2twD9w>=jNTv0=&Acz+%N_QTl1A~?P_shS6@3k*sTw3Htnz)Cq&oqx$W0%V7*PvrQ)M2hTEdARz_ zM!b4aDI@1CwnkoEF;E1Ohghlylhm3O#1Rq%AxNn$7)GKjDK4dPunjBM0uxkMhl(N; z-$@XAeitX8_TMU|!b#eaf^IDP)V2511^!BE9T(uE$-O){vKj>x#pE4@39FcOe4LP5Pof*VArCLRji*E{lJQ?Uo5A2AxAFfZk&my13KNz|+%__vUB2`0%opOaAizUw)bXn^z7$_#*z~ zWbw-%%?`UC*iY=Y$D#?+5)^1zSB8VMb7mCpkN9WSSh(A5a9B8`Nbp1L`&`Zf3sbKu}Z+b^0sbxfIjXA?0OR_cv z0!d8N#Fi;F+saD=b4IyfE{M3z9e<(PpGaENWt-6GbW@Y|G{9i<>_6dDwRq|PdIli} z=dQ$K_7W+QthLMHFTJ3x#UHKj_2Vm_@BdrpyL?7Z^bgNl7)|!-aCy*bN58!5WWTbm zhQl6&!?WbL@SLTT7(~N37blc^YVxl493+~f^k_vj)}@ylk{bxIE0esPiht-eNu|cT zLI~o7MFJDd8(2adhIMEi^Tu!U3^XXg#I_G|ZIqXzj>;b5_rMEya*av{xNQ)KsSbi$ z1uu2{>=gq#bvbtkFMaajXFa%obQ^2h*TAmf!hvx_THR(I@vFt@%Xw$N`ik+%KV6*s zL)$(42R8cQ3|&ozh_~_f7k{?-=-u8OQi@9x1TzUDMUbbdB}MI({LBg&^H<#XMQrT_ zac#VFlDyjcaoi9=R4d4KjMtu-itfAJ4w7HQ@pEdWUlR+zPea{q;s00v{zpYz}A zxwY401@wPikM$Ri`__x}Y#+Wkk*9|?2Kq>zoFH8QuXGEiTCJC7E~@2Rst?378$%kh zTv-!QTIP%V$Mp^iF@K4Js*Fp)9ZoP3wXK78W1(t6!9MysM|T*dsNmoLid&vD$C7tE zhkRl%rym{O=&r9IAAYRKrNKE0g<%`Pa>kl0L0~u#mx;j*4~G>6t^|fSh(A9I0HL=n zAiUzP1*C>W+s{Z0#-PIgz>9}Amj8|GyPBLBP29!zT=3T?Pk;VuPCr=lTQj=&|JLje zhb3Qp`O!4GbwPTQ2^?&-Kr)UF+QkXzk}XAhdmaTI$6am(hA=N(6S#$%b_eKzX9_I> zRY67rmfo<77m0w+~8bzRJ4=re>jRZpLO3hjIi58-JJuJX^%7gR&Jwh!fw?>G)=r zG2@S;7a)&eew}0Hc+qjb5Y;x-oB0aU*bvm2ONtOL&*p3QFd}}iUgT{dWx_;tg3Kt^ zAbb~;RbbNoe8wdbln~&)8Rq38C=2hMN0AX6FpxyNL0PV~+>q>m3KWGeT5jL+oJfX1 zRAx_V9)AX6hNp4q1zNsW;KUB{bP|BuTOa_~1?mjAKu((nLc(ud9(a-BQ|H-;wXxpy zmcBVa{BZHi{I&nLX969^?A9SPg7}XX^M}7j_J>csJ!$18uN((g2qZOzjkZTox@z$> zZB}j=z!LFHN5CEO($+{&v<2o;sP`a<=7c4|LVsY8;$#fTVcBRNEiSHWh?)qoLxSv0 zJykp&2T%Y7Nl?q*H)N+t5Z#`GPjD|C2wz@tE~cM2D=!*#EMr{ZD-U;f782qZcOz&<}R~3!t~eA>=)A^=f9j~F1r5KsekjWP~O`dk07-Wdvk2&BOOf|k(c-SjLsddZtbrV zq>8N-kd@^caeO$Gfzfi=PLm+E8U0sZqPtTqy(&}<86)8BKM@;kJRYF!3s(Jyz>p_B zI}wg8l(w%F;1^x;)^EBo6js8uYigMFkov9PcyAthZ~lj)ywlMK3v_<+=YQ{ibk5el zzSz6ddltq=L;uxeKgZI7zF2F+R!{r#8`WUH^}ee|d8hMy^u70kmdM45j%Amhur1Uz z$ZU?Alwh8&tYEyK10NE-y6ZA_3?b%Nco-z5MqZjo#FmBITt&C|Rl8gfbP+1T;T;s$ zF;vtL++K5r@qXlB$-6wzVt-NK^rKz!+GzE=mP?_#VzitQ%W8td?1FhU9|A@6T?a_S z4w_Gg@Ht0pA->Q#x!rIZ(}a0NvPV$2K8!)c4&GNrmq;b&-I@Ub-g3w$#@DZ&mI&pe z|F<(K_^-bAi~Q7s!^g(mdOT2Aq?f9%oO^!E~_U($wVx|AkuGzEBUp%SisDHHuYjw$jtnVz?p2k8;rS(0L% zN4>se1_){*P`MGKq_V8z*CnXVSpwv3TfA`=x%Q8)%YgR zq_uY_zkZ(6(0{{Ut@o1yI*!Y}IJ2|M&rD7`@AJ*ob3t2<_5N_JzmH|u(Eq!C|H<)e zL_V`V`v3RXX@-I_Nk4chwGd*jO*1H5A#jU(SVXo~ijfrzzQ=GK^Ez4M?36ZaiJ{f< z;)cbEby^`7ApXG?jJIJ%5bx?o)vcM6Uw^LliAO~I7*$~hUr zG8qD=Iu@MkLbPiawlUD^{{6?HH-W$N8$lqn?|It(3?=<%JLC3`xS4;kNwO> z@dCPabcFiPb^NQx^zr}JzCXO}TQ@s>hyToW{YP)Ah8Cl#pBlhpLf(O5EyR-WxU++& z2S)JH&Xu`HkqIP(KfZ}uTj}52`(umFj-b+36a3h*J+9N) z$E^-px44y%R@a+QN&e_DA8cT;uO{>D7loa6{8y}h{kff``N`im5MNFGcdlujSw)%= zNxOa-Mp@FA8cU@_VF@AqXeLmTj3@N_mKYf?*7EycNVjt9e()JNBS@LtjnG0>xUb)Ik@d1 zwSW1PzP_5+=ks*$Z~-h+ig;|`oHQ0x=jEt~peO~%$^l*#hH=Tvg3m$54k?)L9b$x< znA=ccSCG2Z>)Ie7L2`~v9{uAi6d|3d=x@{^sKwYvXv_#xaBcPd@JWK>E)({?+W;`WUw}m49J5 ze+2Q$SU~*8KIe3s1$FxM<^EhrEcdOR;+N)N;0ewvv;=PVMf%IR{)*!+r**jLn}5?< zKmNvXqba0J zjJ9t4@YVgY&5KOX_VJ`^b^wu5IDa3fAe3Lf3*xNv+qU-TO;FllupJ>S_aTu^2MiHIV<{B084tp&u*_;4lEe4d6uA ze|+xw? z&aU*<&nxz?dfxHks-1fo68sbp16EpIeC%}F&)myzU*X?;xWi52A3AOO`k!1N%xafv z3x_>o@{-PK#69!eq>I`yO&I9zo9s;?EM2IYotK+55-T$(17W3FsDHjDIuLI^tQBuD zJiDQS(TW*F&&I1n4(x(IY)nr$L?bqsmF@TF_|rRU^Oy!iZRg7I{H6TO@BeV9Km7iu zPW|+_KQ-*{di2VE;`q;dKiuC$?|<})&-U#HBUYs2Guo2ka@)>lU|rqj`Z0-3na&aL znn7&hcK)KpcEh%}$$tofRA<~;BgF%!k)r^WjTx{|mum1={v%j0pFr8xZe#uyrV&(j zJR%+l<7q~%z|AV-oLc#j`gyw4^ERj$RCBHW^#cWr6w#-iI-oyW{H8)U*CZDAAfD?Yxl%J+45VfyY=|x zTcVr${b~%^2%bQ(>(u5mw&UHrzgs-DBE;EAkRWuXlSJ5J_>@+E|gpT(qexZPqN5!kE{4UaP{{dhHt<4-!nEp_8R_|S*Z`c z{>))_I%%8t{*V6towNFDPXGOv-jxUgxBLWB{i#(6h<{vCuI}A|oZ~|lIZzoUs00bV zFaZnCcA_I)MR1>W|JqFs|0CA}I{?s|z4SBxsJoBAgRes$2Rh8Rn&~Bi=)T zq!6@NmOCKT54YzGK@jK`(%vp1dnD3wE=~mLG1epS5YRSXCw5W#a0bQ+FgIW(0(S)b zf5nf{vwuq9gj0%fR$928>p|wXqa|RR`%~%kp$1sC^+`$7wm#X>cSfnz2>v}Ebzq-0 z`r!lxJ%2T}&&Dha$#QQVz*2dHV7TGW^BrXKK`=A|@ zH*fT$k{HMgq*!__C(2obq|mqTpw(EeP_8U}kbiMjiM=5P2cMV7Y&1jXnFXA05Y(-< zrhn!L+994acawDLBZ{abEPkg)=R~B1OUgL)k=Kb{CJKoubotalG=!6oaB*7~@vFq# zav{S;Ubgp0nHeG%6A1>q)seNxAabM2ZocB#@{lz+2Qa$%it-1)eYNW@N3?l^w7FTT ziV&gW?-t;{TXwiO$+QNc)2wDyK z!`FxHJjP76x=YaFeDawSHNL&Q9{k_<)n9W1+3z-dOuj}A6iU1ukr04p!ecs zfuwKtw>JKlj?!{`b|3?vGrU2iUA$By<;2I0S|IT^jv6q%X>nAVbCH}O9~NX`WPgU# zym!{-0p|SlQNDOc2jMQq4Q2WuLk5*0q|nY>VkAXubUgU0k4S33b9_TXq_uV%bard` zQ9F;ZeiQy@zV@qgTP+TPF280U%}_9%{x!$)(LX--I(>OTVj!Bdvy67py{A3y)g34N zz5yk{puP;w3GByx#2GUiMaP062Y-vomWmCs;_iv9mM_)DJVS=4FJ74sBVfcurGZ+0 z`a-_hTrgbK@S~+>1p32m%Y)l`g*idKAS&?8CWzA3UL;^4Zf6Ic;|!f0ZP)sw$ZQgD zyYnTkC;gsAdg6yic_;mIe}?Xb?4KB``O6nRK4f*dtgTD0*4N_0|J3nXEPwVnKC=JP zrGI8bjVAqF1Jtc$YN0Q8gF~#q&+o#>y%#GHd~sO=j}+xQBhLupDRIKe#vp9x9qiis zH0g_fcor}V2lj@(>^*6^xo6w{9viM>bKbP)i>V!~SRj-HWK9q+`J7k!Z4*;n($wVC zMtG_H{><(7KQSD|1V~H-*MDKLJ*|dYd*P6qBY*wa$L&3oA3tu_4XuW&^J-KhQ>HUrUA@CV1Y;hflPrz?k=UQ(I$ftF#Cp~8VXC$3h`N0G)3^TA zr&en{*;2)ww$h|ac3}V3cFaF(-r`~UbN|@+uX}?2?F%M)Rk+P59e+0La&vt6iG{h~ zUtjo38}IO$OpJhgR@riV)1cqxknh->qhA}MVI;%pkLh~!m^ zRoa=ksnyn4Yt_~V-FZnJ%hE(S)T{h*G8(RT_p|W2)o8FU$dM9?P*E>GF~kw#!X$%Q zB#5kAP2D(Dd(1g@4u7R;prGgCy2C{{fN+W9G7Y}`jGJZvY|Zlg!;{;&?tq#sY!B@W{cnu-P=EBdCX|_u{Bo-AOg1gr zwfU*N?Dd_k{c0rLnigIEzS97^`NuE6up`41=T#$$)}~%W0$cY?7gz24A^Kpqin_yW zy~?u~b3v_w)i}>g#I48%?jT86r+ef^V3E={WUHN4p`WJ&3Ew}UY7I&C)l*{f$6kqb zxVSFJ*hXCHx__WnH_MPiOhn%A0O-7GHS%_k?Jw82) z{u1bx=^E~f9 zjV994!nkevP~BZ!Ro7LL?TFZ@2{4CB`@sTX2AbSNWwj-;T$w_+bKsQjH!oOkDmZx# z<=SW!P0VVIb^{8vZ;8Yb%6%o+-y*SXIeWPXK|!R`)Rc4FC#i}bUL>xQ9sVb4Tv3%cW#@0t%{x7@BN@`^HRuUrC zU`XFIXmxT{e(4k`qY`Z-n!`DR5RDRqDSf?ur?AFDrZkF~wgkI0@by#Ins8W+E zVK878anVOx8e2S*Du2e5#3M|S0Jk8YFe+1)NyZ2SBT%VBq#lC+0+u&W${7s?DTe4+ zwSOuNJinOuyS?Ap?(LSV!Vn300X-o5(-WxrWTBl(ss-)r(uheS#b6-m4r~()24NpR zKBUmakDGTfWlKNr&i~`!z;C!S%M8EcQ!n>1e^+Oo&-yENKN|&-@n~4LF&B(o zL_s>c7&5Q2BS;ol1moeDYYAafsYNzTZGWb*HyY<>BN7&CVbAh1GzIfrL!OANDr#>1 zxfu)%;MDHSvTSPQshvN9PsGGB&2oY34I`?wqtb&ySq>sNlt%1^KZJ9?U@|!dDyu5l z_0K!@4@aH{T7po(&#^jL;;;RKa+kjTUQ4f7m(JgRhc!@X{DWV*+L`hEco#1mNPp%> zLun8rRRkC}MCDWH*GiDng^_c^&L>}P`M4sj`4}9vu^kD^zWT^aDbc6Dv<*LCp-8B? za#=3Ykq_j@?u$M|IfJ+6$(*k|yLnhno0=>S^@@<1QCU@No}R2w3Pb2`Ry!B5Lit+` z9)K?)^TEfsl&0$AQaYU7oh@G8*?*jV*(a}9@^0SV*(X1t_Ff%gKw=pL{VI+9%7&F8 z65Cpl$|zIGseZ7?sSX2sTwJFh#-*=0D5`i;Ncb%eT_8n(W;%_|`1s zmZ(YM=M5KkaxaxTLVGnj>Cedvgxo0qhvtW$S;i!QT(ES(?&KjsoAZneMk5mPWD%yQOADKdtt)EkLHY&@ z|IH9=&km1f((;Hz$(Xc7(ZyR^8!OW;>m(jQ1%C9d9;LJCI**oRo!q$hKyY zK26EXb9~W#BTP&RjbR(4m>!P7^1zkR_%BwIcx7}X)u>y z)k7%z_4QiEJZa5qtR8gd>Esu7=7vN5o2>uDn{@WpU%0#5x&2#SI)C{ZSm9(vlwg*q zY+EviLAr)m>O_Q~cC%}hC|ml~61p%+#-*%L@(qSCO7O)E{fmNi8wQZCTgt;RI> zzhGS8Zq6({MIANie3t#3BXt7{abPPb!K9IhxvI^niNvxppi*%V3g`cP zrpbUwKj*?Xe19!8VkulNOgPIq@@Cj3WGUK-n3?FF#%41xr4?o`Tu3$$WoS0FOU30C z=IP(o8p_d?%&BvbPqdNx&X6-BKmcnBfHo)_|cYyvrQH+LV*Jmd1)^5_w zl^L}|DZNL)xC%}|ci^oa%$j6Mg>NpA(?beww7FoyZ(PM4j1%A6ikL!oK+SiCN+@>3{f z+l{~C7uZ>O7Ipego;*`wO|9>F$X{?Ct`+JrS?8xVdko5m^)~wgZOeCYQs3MhcNoat z{@Q+X?hJp{q3j2%(dl^UpXa4Y4?mneJwC>{>V~^8+5TZ(Qg}?KO!~?fJTz&d*=_O` z(H`<08o751Af#m#JE()9guiKgN(HkiNpVdxDG^qum%XhC7YokWUSA}cO^py<{g>aZ z2_JupOAy>3d1fk~2JOV+kQGMGNTSIV#TWtP*O(1-n0NELi&qOshgTn?@2ij9PFnx( z^Y&X_|M{5tRx8mM&jfV7Ct*;Tb@B+UX(P^iswRQeB%u(3P|#chBr%|i9W;(okKmD$ zkdzM7fYC07!tdwYvW%h8mL`$m`nWRsc&>lH^c(6`cz$E3C(Tx?tU=2HQnM6^eGvpY7R-YJTWjaz1jY2fj(wP#d&KT!J^K!vtjqwjQJ`0UGpVEI9 zFfK}%C2uUj$qc^_lgcD|&ZQrjLd?VW*5&XXI_+jQNavf zWN7oCE+}e?YR~d)J2fiLGIHIQ$_1fvi)Il9ZBh`<)XXl9npBmMt;~y-Q}8qcG|Iy& zA<}gU$!3g{i-kH)?1Yz)r&}u*7QBCdY(P%XPGsTvp;k5nv$F#{Gu825d~;Lq_HTCf zr7ii-nIil?AIxv^{d0Esv77e#QPSzdhT=c5h=&B3_~ZElN=aPUnBER%VWz30XoKiMF+H9?LHQlL^aLvnixIJXer5S($&K{_03Z zc96!`^8hzm?afmqfeS;MngxaQ;Y}=fe9ZrOU(1zwterg>QV00L?)!TEs`#uwKj692 zS>d_Tl<WZCVDsEeRRWMRi zmU5osq<*)<$_kWN(r_|nL|cCo$RoR>Aj+aXN)c_*M?jyF|Wtw%S{UoIxa3 zo~>k8Z(hmxdN??jVhqvLT)3J38Q_i@YJ`4RaZZn|$xJHK$8{#kbV`Ty6w z`0d8u_tR#?3}+;4>vxF6#)dvl$u6svP8xTo!v$s%~oC6{g zXe;zXd?atf{g*(o2^oL&yoZ2t@8{fbha0}|<(>Zfhj{Y7uW#{#82ih5j2^Cy^E<9O zqxIJN879)S_1AnuM-OdSpn};Q{ zREMYdUVl7cv#Xu@@-Nq)?re3#joopk8sGd5({G;DpS5LXgjIUCR)}fgyYx_^W8!N@ zhs==j(;;&26H!=1%uVIzj!0~mB+1>Hz3=cJxL3w!l*$cz%rCbF@Jm>QAK z@<)GWl4@;2N?(895%b^q!RCX3B##o?+r>8{t{v?P*Rc~F=Un&i#xHucOW)Set4`L9 zw|U1|aOFY#(PvND&Yi0jVw5cDtO(^N`#{cdkwk6*3oF!?woxY(D%+&~*o<}0cP5gz zdg~QpWlQmB#L6*c5>mxtkhSI%^;g)+*p8sK8b7l|_0E5w@~MUrp2Ui zV!pGAve$?910B{Cm5SIM?xpu@FR$a)VUDi)_%~MK7pGhbBO>Nbq(p3!YGLs5W( z3s>dRq7|2yMHmP@u$%1tI)7YKPUf+wh(jja$o*HsDJx&$P!qh{RKHkdi%>g z&(-am`}QAb`#+GYzjU0`h_*?eCmH_7pD6I+wnG0Ce1X9nuePT$Ga5Dq&%UN zuc^vR()f0FZp$%>Lq1W(r;e}L1$VZb+g^2B$F18s z_Q&(|)xLaZ*SWrDw>CjrGga@b&Yxd;R-PtP;xkG(?|3ALBd79lOxq3m4aDv3= zhgCqSvYbo#=^U~Q$yZ4Txe?7%*}#IuS!{{k?gLV9Bc(bU{4|EwAXKT>lzl&6Qo2Ar z!0hwBCV9aj_yl4BS&g7bg8^2m>E z&}CZr{<9wH1y!~3DCbY z;qS7V2n!;yr=ZXIQB_`Ey?F33uBj;?(7Qp@%8aHwrq^&y{jzt@n*ke{d>>8t^sx* zCy?DR!#7*+IHR+N%e5i}Uk z!ZJga0ae-7LXx9g!`P?oG&+(XtcHl~CnYNakuZaRNtGnDNd&CZ9jJd4W1(CNCLC1C zDM_R_%CmW429-Rt1p5C=ZpOI}GBwQ{<8gi05fX8}Ge0ihgF;*Sh-K@r@2Q3MPZsfI z*UzGcp>ioYS)9&~U3d03=Nr#(-Lso-uJT*l><^c5zZMV&@!V9c8R+OgT=7Uu?MMYg zWFGS3`z_m3nCy9@&#XRH3xr?;uCr9&hN|^*TbbPIh$tl5kWMMoMRid(FW|(2cJy1Tp zYwsY^oh>xSp;bj2kVIgtHLjD47@_7-&^ZfJTzB3E7oEHV#6VMAcM7A6N9ydHzje-U zeqMh*4my5M{_P*{=Z{$u;oDP%X+XB`uFzv>hF_M@EE>s`AyB6jUmnJdOFseO&wS#? zPQ!d6)Rq9-8cGYHVGLuXd(0j3zMu{*6ylI_pffy|AJP=#40@bIZ-4m%o<19~=C|(1 zBZe5*UmSzIO(whYnB3XkZtH~dG1lrj&#!-+N9xA$b3gb0!mseHQ~03|k z9@y5R(v+rTk8#MHZ2=F6kjB^~#PdTaBIH&;Ou7jy#@H=JFqV#+RK|Ny(jZE3BLT~a zxKGp0H#B7jE$Z`f93GyP_*5f8b?V-uTQgPsW`uz%mK);AL9>u3x&4;mvvCSI4H96UKx#XM!_ASb*~ndBi}W zS->$sG*+;Bkxn?|F=G~0lNmhuuVQ#4f(pq*>J-2tM;%{Rud6qwR4ISsj~0KC8>m9Q zyS&hod3NnQnOb>5naCVc@FR3FVh+h7aJHi+T1iZ1jaU%nP?4~8(Dd}AHAW4}+sZNKj99Q~TTkNm(8nqyobg*ZbV z28W2%2owSU!?fd6$e9^g!b4o(ES@mLsyiW@vMzVZ<5y(Bi$f-{RY1suLi@N$!53Uq z+#|^M$(R$?vsoxYLyFQDDzOUx!D`C4puo76m&6ULghk>3#F}-!g)V=VG%?JW@8YDw zxZ;xs$}yD|F3eI8kN7jfjYPFcYhhDyU(E^OKWXDxfQLp}3}KeZW) zStxfVA*L~Tdw6LdW7nuRw0+@zu5~!G@VDO1<7W5&VM4Fp6KaxI!c2Y)s}cIKc$n;r zG9X@=1VpOUZj`nlQsx!s*(?W_h0F_6LINcsCxjIFc234*;x^JZ@=w#ry%+sbS3xLe zp71|j$%Ip*8idK*31o$w*P+>HTb@s5EU(snn^M}c6^-hznbdznrV>s|QIK3kMmQm> z13y_rKM=txb07O|J-|I}6v|WAy#eds3G2XsZhz$yiNULd~h_P~NM)<2 zXfDWHFvmskfj%Rw7~yBG6q#3SZBw328*PbLNqAe&o*N!oq~3e9j+x+}1nrkVy$Kb6 zRS{a`uH77~gVd-Y1lg>4Xyn5rWF@zyB=8;-gEMmBO54b{QJlb$-TY?43Q#Yfp}yF{ zonC%jFSoEbBzAt`0Z?=y|puRN{GX&ug^VdvszkaFUrO3l?iK zFw!BDlP?bv2H_+%ghlMq4_OySt+mckV^sN8uS-_^1$m)*t{g|&Rb)2J$I?8$cX3LNESp?ZX6+B(C$7RzI&_*%VUT+*!-z~ zITEAc8;e!QiT`LH*@Ka@mjN| z9&I@x4Ir#$eeuGc4mQdMXP1pYRF**?a2t&f z%qIfUr6JnF*^C=-_Kj{?MXoYVg=YIg#H-81Hlq5;D|pIDW_YADR(R%GW>-77j#v=0 z!@)si+p(1PA+13rsOLu}g(AqmEE>;k3aoO%$;BoBRY0o0X=aNTlPF6Ve^UMbBW^v^ z$g^@Xs4}p-cpCw|-SwN_;l^EnSc%?ecjbq{ae$;^Ye08X;u67KBK4_D)f>jdkhd;I zgV-HD-?f8FWN2YiRqthS}3e`djUPRtM=wBrZN?S*mT9H0RDEIkVd0~hz-$lp-K`Z&7&?7sM0 zZZ?YRNcw{fzjEe7UcHHyaUOwTB;_Rved{ck0V*{qo>{3}qYuQAvNk}`x2Pl}*SCf! zOSh07qkxzhjJHAEBd|ry4Jku(g@LGPfTDRyf7#RE{{wp(e;g9b;j?xz_aQK}DZNus zPpL}2ELBshN#7{?aV1;AzN8Y<`4Eov%%)H&t|G&>2jGH%ev)~J!`+TL) zCS7ver^R!!;CWgwmA?_lY)hd0ng)Dy{D&!1d4_wtArPw;j7c-unT?F8sSq<3P?w4K#QE6U-_V70xyBI99W7z9--msHmW~X=wH2ZG5MHrxafG|zWM%$sI0L_oDK)qh<7W3nGG0Q zD9B)|?#gELhik6GOW+(cQouZ|-TIB=wl<{S=V|?3f4{Q@>&GwWv>Q9X^S*<*5mdJH zrE?-8nUV%E@+tj*iVmvyAs4MI zw->c2i9a8zOUGMGi^0Rh*dwn4Nip;69`&X0z^cxEdb!z8)8$|C8Qicb=I{9NzvV<6 z2eP*rfBF~;cSuI}F$O^%cvrEce?qnkP0ye@U}x-yb|xZdX*U{haTYo_?gck9;?T zDDGp7HW8IksRVPlg+%`0`_7+=0^&8uuN`)LPf54#@aPuj^b>>9Ve;Z$&O7ex25}HQ zKb;}{-~58TJ*b5u(>gu$J7z8=Q$mDQCR3~9@WFhM+#z6A$V+0A+J!vxXyNauKwBp2 ze<_l)p(^<+PfUTh#PS2enq;xm>4C2Z#vUThQl`X2ns#`0dAdF%-`(DMc@j$RC1uYTvrUe*B3C z<0tM~-u9O(UZn89<6OIb?Cl3h9|v;te^>QqK=X$auAiG%9Ko&sk!J<56+gK>6XXu0 zx-6No9Xm++3<~G1s0ox8ac#G$g|uEw*LLzMkWe~ZfO zF!hQ@q)39!Mhvz2f&6tH$9cGjLgYo*WT13>K48ch4w~_fJf5OLeqmyCu@MO;ZekF{ zN+zdI$PwJFkJJk1N#JKF4@@iZ@gVsWnf!5tsn z9k0(Fui|a5@jH%=f6bRV)eC`Ke~29qr9r_v{qQC;(%-vrm{sd_u`2KR<{&LQZqq-P z!!@WccjExQ<172-N+w{xjDO#|=O$%W?q&x2aR%Zaah$?89(#lQ2dwimY1jCaf98+5 zuJIVdoLzqGz}a1lz^{3L`HsiEPSKS!mQo`>b_SpNhbrG3=WZmq@wO_Be{5N1QBi-` zGBZh@_=b>luFxm)ydIoOMiqA<@r}iid4T>0@@+s41*OY}#um6fNZK5~~@#|bHa58G?JD$8@- zD~7|~pNz6Ld5A$H+NFMSpk^I!qEeZcFtoiyxnhNxJ9|HNAShTsx>y0>88V&DF7K}d z$u_w5XKuq7UeG>!!YOek4~9sl_ItiX zjy}Vv9)CEIlkvVFjTOpkjz}FbAU5)ykc&7hyBP6z(_`U*C^fx}3+CY2j?;|`^>qz+ zIYOxKKuk1WYKTicKH}O2jM#EAH4p655Se8dj}_t}Xsl``^tF9|X#y&k6;S<896-1> zXa=cmqA;dd--nP@q>h5;Sj`Jb#FG5;Gn=$#;bbhmC`7{;G zG-7{jlPobvA742_Ebya_<38;0COzL$ey@)Q9j-q6;m+&w7=Fin;n;nTOAx335k6KN zx64m#Saz>V-t+)}`L7@Sdxnrn zaAq0AqUrNs-eS!=o(^eEErh|r8B z)y|~LWg<+Xcogy`@U>)OH1C+75mIS9!dpr_K&o+nv~&4=eqtlL=A_7aK9pA;5MgVW z>h9sIGN@wSU$`g`&I}JCdWA^+;c*Z%xIpOZvcUY8dg z{m>2AH+$pt%=QaoaX0@)5UHI*)t*w|eq4V{LD@v4Q~JtwrnEJl>qUV6q1-XL{OC|w z=o9>Z$&YSqV9v=q@flxJne$P<0Wlp#hJuT*2qZ681MPudMVr%zC@Z@%>3 znD*ZH-skvB$s*TFk4!iR*$SHK24p6YIyH`eYHLb1wF^<(N=j-t;2b38ytWL0;2Mq-D?@J)r*BlIF-4qMN!B#tJAUYR%%YH;S3!}vB=>P$aD`c6{xiUxd!ViI#?UHbDQ0s*d^Qu|j@XJ-Dy~SJ znM1L9CFjJBnZugnm$g{$ZSxnI|3s+<{+- zNmvFEZCpGF#9v_urkBkc!fDB~y$96Kw|ZLB$e8N7;U z^Wlu-n_kEVcUHge+P9S}s}Din3%9C1&h6>bRr)C|K7Dth*M8KTy;*%uTYclb>tD2Z zU_N8&@nSyutF$>A_Q98vc6D$cyWSPo@t8fk199FPm&*`fVm!g0{tWalS!7nx2XP%V z`F-yR4BmfCwEh}LR8j+8HTZ&msRCOwYM?cYQ#c5-grFuPK?~THV1qLQe`0JUU14&* zIoi3+imZTKpqzlMoEcb(#)?evI{Cf^9j~|2nE@9xR@A}k4PM)N|9tnpU<9^CV`ZfH z`|l7Gz@*`${qs`ebq$72U-0^zA&t}kT;D*mGY0tt;2gjxjXmZMJ5?ZmKs3Ohzdy{N z0zU@EjJcu`hBXM`x?q-u4fG4f3`R02AV^sO#vDe-6OGqE8$ACl1T4TT2Lo&mi?IGF z*5BgP1o95KfR*^Zeub4Wcvd_CBf5P)-O{-FRaCrqCo9$C-eY83RQe z?9WQNtHB^Kriyg+k1$x9rEDwc5c@Zk7Qy;m+ed7y&kV245ng*Z{t=KB7?1FrP{3T+I`|Uz%M~8A zUp07Ebn-3#UvC2<$(lINgU9`gPG;40Ui{^cW4=TC04D_zD`yKuZO21fmS_uj->nJcV61!)Uk4lWWF$dE8YW+pmY%8nzFN4AOYp@wSp9b}R;GrHoEeEf&2YaB~gWcfe zV{q{X>`VU|tnHU-HcAGo*<}26*$jfW$#8I441#!~4K9&xy~))l9*h|Lul%b4YVHBd z=efCvKt|6JjHBt*u!3hK`1g@R(9e+o?GNDgTm6jsaf8px^!VsI^L zyZjgVO0GQLFi8TUoGPaI2-u>v;EB-O#u0&0gJaBX zDQi#%{47RhSwkpx9-f6ABW14QizMS4phBCrMxGFnD~V;f=C(|+80)~F0TMGmE;f7t z0-xjO8bn=#f2zscSWrLbt)3{qw`@}!B_ip?f?5ck-*6A~#FTk$<7JSa{ zo--gZW!KNeE0dVecwqs)Pj>HvcWJd$1xI`bj8bxzi}`V7Jp*+%;ENHiIZj#w@w#WJ zr3~hOZ=z?|bIiRIDA3^>G8N)S>~z+c3RBKLk&bMtodpYk+LQ$gKN&p z+koj&Vsa0dvoK5Ufh~q4tsy&z6L7Ba4Uh00rt~&odDlLLww5c8*=fmT4HC;D-}$B% zED+lK(Ea}mG-dNc4ao1=>GM>9=L*rao3wV=U@&r4Y|5-&U; zZd2zqBdhPRi$;pHNXqD+>Mqx4AVo1!l(veZC7`7WXh72q(DZe!U@=v&po+BBxrVal zeLJUOG}Q@^0_R0y7HGjlDgwQ@ZIpmmsMX8sc(4$l_%XzKAJaA7ku%<2_S&AirZAokcfT(EO|yJxi~-jA>M6VGG7%3!|mSj0*k zSCiI+ofaDalrtxB8$qr`b9AT4M&rZppSu|PQtWG&E=9^lv- zHD-H5e$=FwUo%or3!vdVOl!gqq9A4sI?kzpcXNo#)XP%bOqqZfX-q}c^=ra4SHZ$+ zOpvFV*C(s(#$#wyrw8G)R*1~NS4<*pO@#FLJb~&%-m-NP|lNu z2NBn}3^y>v{xEP(y%NeTihVP*h$I+KB1+dyJ(f`VsfxDV@FTv9)irv{+L7W#+du@^ zzJup6$|<%_)V1)t`%ct0gm(8|J&&3FeeB<~yYJGrA#Hd6-SZgl?_>X#-F>f6a^?X^ z=}394lBox2N(t^wBxTcoZtumpS5y51_fj8EmhGqVaQ!kEu3vUHFaL?0@e$K`i+j10 zVGFW`MN@~T7-4c zm2DN&Fgg84%Li1+VAHod=w$s`KAHc6c1#sSpZ}zt{e2*7t=DpY2%8nYUdMu%QI&fe z70Sj=fjPxSUwNc_43TBZ=uOM~} zfOD5R$*WtG*&%F$oSS)pYg!HLAWmEd!R98#ajohaU{RB9zh&K?nDo!oLV-AeC))F| zDD(rTFewiBd-dOcpNZfzYy+E*P`5}A;>3JluA-WM;vPXA1NYT%;rWb1BCThiIWk!< z5~tfYX^DGtU~?@yq)}j{9$4M}{U;g2y}7;VF`y&b`Sp|8hOkrLN-cB&!4^&0gG z`X`wobd~pd4k`IF82CTxPFCyZi(mE0aV?s79)Nj*C9W+v??{wN?>3$ng>v(9U#5oY zAPTgor-$$v>lWlwI#RQuMOjs58_*j3Bkn8xSXm0pF!c(UpstDE|J=jAO^&!{-O7?; zp-41$`hBT?!~{8zj{L(-w*K)v_dj_Ha>gqi_kC{zgWvUjH#mK~jy*wLMXpYN>O=oW z1{u`2#)qApua&67bTV}CD~pT*zH@qhNd1E-Hx*Ec6vieVmE zL$X>>m@J~F#s`+*`YPI1O^f=kWra0#{#G`mt(vxfpDwnin=cji&A)2Try`~QSsS>< z6Z6`K{rtO)`^ZuT4d_iyYB(Ei>&qF1Pkt$WejoQzDURZF_m2OeIgIPhKb$Mw{NVh< zO(UuZ3*?GU6-k18)#;F-k%pgmEj#`3kMjrH!jIuA?k_Om7rEgP$gl|P?%lZW%wK=n z*R~CR7}d!;F$JN({^F>UOxrr|WYGf)b;7CF-^aRpCs6oo6ybbHWcb|at*B=cGZe9R zf{|N{R8;(n`}o_KKjIn2cpc+dN7cDzTDNY7k&o-Ng?jYu_q`37dLFo_Kg6ilut{A@ zfUu~~wF&r{^q*N>OLqO9Kg49%*WqOJIe?@qxAQ#2fnLkCZ3{q5{G3w! zZ1$g{wgqwhzQE7d@B8u`Kd<`o{pY@X|G6*Ue{QiJ03ps}OJchSu&z(vyLtd&SZ-Z^ zV7u70T-~=n>B`l8`^~Q0rf)y#%jtdl`*L3>Lo-_ctQ$6<{x)%+3E;C?gmR}&HDg%5 z?dK>y^Qi=mlkKBE{9S&FI&u76et0cUe#)o#{uj6XSv$6!Y-^{Qb?boM)hYT^0_Dao z`|{6(V%Z()l<{}jXU}8gr>w%de3#vScXikA=gdR*?&4F4uB~`hg9rDS5G*fgU%qDi z;xF3Y<2ws-x=6MXmJ)-O5M+n8MYh1z}jYoBn5@BS`ZV7=q- zvTj$8@3M7W%ep^zdSF@4=ravWw9Vk2wMh7>hWuEAUnX|(qdP_#pslJ!n}=Y3m$k057b>d;#AwcA(aDdKN^5DuI9b@O@im1 z%Fd1%daQ&r4{`r2%;FK_RaPu&T(EdDFLVl&FT876v)t5CMheqRKP3W*7!y1{%&*S@ z3s-2vS!|LmC5(a8KR&@8F90sWy_Jb0K*9{T(4Xz(zhXK6DuX$G=;{`yx!CK3IFIgqeuXE(lG z6ExyhKldrd{hoiW4PX^qe#$eT6)}JAo4`0)h;P@{U+xF+Dv12Ko`NBNj7OvcwI+}O zc?6roT97w%E^l$fsDxC(JKBAH2@0Q_3OJ+R$49%jfg?>ASP5i`yn~g&MuG&>hqSv+ zU?a%^|M&2Htn9C;4)nk2ce}jbcfTv~cRehh3i65NguvfnumW-fOM{)j`wZrIZA!F< z2Y9{4@{Geqks2(4?{UC?7WkeTtP<;OVZAnzXc(WjQKZFotprH?eGltH0jAhaR^xLC zwr_#)*-XRhcd&HukZu*<1Ej=uZ-L|(hqcCf{`Wp-rmzyE1>48#FD%coKd27AfM=}N z7Kn!NBdH)mY}ZDS3ftAShXS}jM%bU81igC>`&Vz9dFBVMP99D%C7zq@Pr?B5R)urt^yGRFFBI=_Oz z-~Yhz+M#O!PfJksUT%1uYx@Osb1@@9~t&z z{`d0wx17u*%B@0w`5t4xZxNhE0TS8wPd|R$IF?<%$$lI=xo0}Q_l)oD<@+DVZ#RFy z#rViI`U`Cz%m2tN{+`13ruh3~d@jS^r-092@c#`yXLa@Yywe%^pACwe(-uIoX6g7f(w*AFDNW83wAiT88N z8?}MHU0Zx_CpUe$u3n5ma(fPtf4S=A#1@d@HJyN`$OV6=lOwxp?*;4A*HeLq<70DJ zDfq&6BfPR-r|>=9I`D<>$rL$U*D=+-ALYBB5B0r#c#6z$U5RggJpdT!KFa4m{^Q*~ zL2$M0(e?>{Zc!Zf6Do0O?c$53D6*`ZiX522LP;k&-G0`IN_}l&w{8^ZFLVHo$amu? zbmf)cN;skXdIIO? z!^*^hOtJqGAR*?_OcYiwLAh9BB^QFMJHI86Em#>t!yh$4q%cxO27v}6VeCO_zrOP#;u5a@X)+j0z)=77zy6p1ZC{;U z*XRHIpU$Vx-MM|={ipx(|NNi(>h1i0?AO`u|Gs{DEspKKx$Ahh{J;El@%HcC*Y&wQ z?*6Sfua8C6f1`GV%+LR(Yudlpr|jQ8Psjg%yj`Ey|AsdQs@m^I|Grt*yC?mB-)b+G zFR!6G%*l{4qjN~3Gn;Ypul%Xx6+o7PFXU@x5XNa`ixs!z7T5?1VdS7ZDJ2jJW?^$% zHkmJDzG+^MCK-9@6bz}4#*xda#d?$^&MMLt1AFkCQo|SFrJ8P@2@};zve2rB;Opam zX(?Xl(@Qw1pGMw9VzQIdgRd`-$#$zf*Q(i$YLA+^PVM1hxV=1oS{w4Xd#|l}F$`ZW zZ!am;9*TqI_*2a}ygi8Nu?k& zf8(xa&3Nk^pIZHGbo!W%r`wlFZMaV_^;E3So9WReuOD>-&+W-ypJylA<>u&k%(p%b zKXt-Wb+Ff4w@4nAex5A0)r_n@xMv=xYO}51wdNR|o(H4z`M8-q&FG-rf4;eYU&qa5 zE1kwT&S&q*N1LgK&#$*dIecCX9-;9_t^s{y{${cI)ZP7bDwo#h+x{X4$>Du?{(Myn z$eLm}nHhyIhM!dJ2gz~s2#H|t6DJ%9d(G{@i}+63@AI#{`IRhI$!hzuiw|!vgZ*wY zfaF!*483PbMtQl}&mM>GC;ei7j^omoWpX@`$K|p;4JP}|B3(`%$%1{l`J1zZ!DtYV zzt%6B*glGL73{(#9Gc~ruMd%P97X{>zK!!S*=a9_#b~m4ckJ`Y+DydSH`lxPrB1e< zl*#$2m?j|T`I#pNZ~Q_}6+gl%eQ)2w;XZ%;NDkpk@#cAX@CEzvv>5Dv275c5v&hNB zn)t8pubcA8TfN4c%5>W-gwL<9^T9f)#VD}#{afJihF1|hF27dxaQa}4-!G%j1T3*~ zIDI2uZxf#rnO$CWMY(71L&Zn0Pm|3X1gjC@&*SQ#@yY3BrhRRLNA~uXeArd9oG#pK zZ+>haKPMiU4xc;~k;#jHJ|pC~TJ9AOgEuB(#Fam;C)sjNQ=hktht4Jr+r??&Ps%2K zqf>KSEaO2D%^m`OXf*p5bGd(cn(fB+=%pTPwOVeU-~H_CWSYmJ^H%!W?CDg#7P-Nn zp8SnrM1u!1AB>jqpdP<#yXO=0(xF(Nmy_qQ@i-jEi~U0&?aSeR>kR4soIM8ldU82h zU)A90dGPY~^8ENPetz4GC#`vkRtq)EqT%KAFcR;C!Zvw$Zr{MA(|EdndwmF|Bs7jj z3=a?6u{{|-u9$kKhUN~>*5I)J8XtVO+!E?OJnHo0aVMTlb=-dqVt@QOdpT!HR38Jz zAN5ca`L@mN=J-y3y)>R^uUf9iOBM-OkDg>{cxDl`4rIDpkK1FGD;3K6OYQ3Y@!hrd z{%mhmzHsWs-&BYF!OV5vB27?hyOFzjm~WDcZhu(9d3rn^p{)31E*kQ3YTK{O)4WSb zL;EtRq+ZU>nbg+V^uwAD=MU$%^}IgpYqlQj4zD5IjvpU?FNfXxC@da#Yqm3eHFh2r z1NY>(jlLSu^J`L0Lunj}bz3vOj@6>%(t2x35?i7B8Obr5i|0iS)94_};S`))43hts zqPs|v5Qw5Ex*!I)4RNP%o8j)x>N|QF`7`c2=OFU^K^ztaL<{pYyh}YqARcq0_S_t@ zv~kxSDyBkzZ4wH*K8zjZRO4a_Apb)1SPoppU^bA5&|@D>Fr_FmIz{&d%*!S&L6YJ4 zS*m1!Z$28`CAq$*A2pq+Cozkpuazf88G?f2=4c|QrHmCyS%O}`U}pWpc{ zD&c>BQi*#HKxQ;?VoP>@drW4g(wk87(Hi~`9ND)+x4-O-M7Wd~JjchP!)cE!_$U;y zi>xEl>lqswiu$K8_9i9w7ew06Wz=+qjdxvCT&a_K#MJ@g1a~;=vYYfDYbTL>mIr@9 znrzCeW1jpea;FEeejXloH^~g5WCG|J`szG?2&I<>;pcKQwPJ%SiY@hn#Pxl{^~1pR zy~p)K#P$8{DEcES3Nw(<%d{pJmBCd~GpAz{Z^(Kwl}Ud9J55br(YePlRU`E(^|(t2 zRM{Vd3xn;dUePLF(cY|cBJpmjh|ZW2L`uzx#yK-1(|>|kd+Q|x2@uO#Gh=?}mQTWe zt?y?j1;%h+KMWJl)l)5qgXS%Hkl0uAW=s<^B}r34|eKMMzCPl|Ejx zb8?#(Sc0mZ3ZXv_W_`JHz8WHxmNq@P;5(Q37JN*GtNCoib<5bzMI*X}N3+F9&@~3e6z-IqAb_IWR~@@TUg+v# zGb*R76$T5eJ`Vv+%H1n$BI=16&z%xMlJXP00CjPgbG}x2pdDF88m%%+xo{}f(X+p> z$wKP~AO8LGXGd&11zDD;hCV|j0y=&q+wXpUL{RZB@f?}V$lMUp2{A)ncSLD_*>TQQ z+4#}uD^$f*QtCjY4wm>eS3?f#JF=37bnU(>o-aQuqVvMc^bu)W^XN~h&y1`g9e{RW zzj%W=-An|pMsosbiC+VkAIK8Mu&MKTCb)=Qi0V4c#qR z*b4~|ShGT{JN+oGsk18Zg8IULdxDf8JLz{KZo|4vK4!%+zBeuN=+RP0O%ovP+a%xQ z7=Y7EvBJDQQN{M=aL+wwKksE>%9YTwH5e7o1Wc|c<$O0}do#L{u*2f;^SVu5bxkcu zzm$J`3S)1#{oa`n0#;EY1%7%MfFS<0&~+nhVz68UzAI1Yc3v=kz$7Vus|otD(PI5R z*D|K%Z<4lXUnCIHP~&_Zb**U*#23!NeDpvoKfmO)pkbaGCqcR8Vx_)5MxCWz9)$>z zrLS(v-%tm+R6?);splgU0^>@BXLF0ezv0Ma{140SE|&39m2Jj|l-_ilm*M)tR#585 zL!&)RtT4}LbbE6@P6JGTKLsr^lEt`}qm-q$4qh!9ER3~EbqkxK$m2ZE)3$Q)qfB6^ zw0QBJLCfjn-4C~HQ9GpMk00_xh9@b(vu6kcN+-gKxuEdmJd!8MA4*a**;Y50_C+6Z z-{Y(s(O=zN<|@t#$wdvPziASqx`|Y}c6R(5=gg0f47T1M%T_smnk_Uk<^*N5QC3h2 zR7Y=?3j;Q+@zQ03Eq;Abkl+e+VHc)e*Yl!tDxaq;FQq+*sGdYyySj%2SZ7eJnYK^G zt|M_%t⪻d7LHJS3k{+=rHl2pA0o4VymgL2=St-ubY)^xO}ofiO`ol_~sqDOJy2x z9hjF)Y3i_O`fclfIGa3%!eY208LDee6#5rWOuK>+^D?488pp z`g+-E7vR?pUaGtmUGKqbchyF_?u)>to3imX{fe|c)cogv@_M72c=aS5pSg^^j-hL7 z|CpJ?7LCIQ?Gx|XkM|NP%^}6SVjCBgbx4TmASt99YW_fYlG%K&b8Xb7cuqh5yU*uFzbQCK8tEI{lA;P#Hp_?Okh}PRNH?aecWy5ZN6f3;r4OOUHH7{5l6-|w??5%x= zNBe#Np=l!Hv{FD1TH5FsTB~{wu`KK0nA%__y_T=*$>a~;R!grsbAyYH_9V8xu3OXu zqPz-ks`kgC7xP14$nq<6>NqxCHk*SQTOtysZ&~h+j&5Ss962dCgaG{GsYRmT)$Qe1 zqaSa722K}t&#MttiORTi1tK>UW#Ns4Dc<7Aj`p40sI|A`w~jG7X}MQqRssX9L9if8 zBcAyK!H?_j;@6+=W(so-HY@!W5F2_hG{A$wW?g@AzusSp4b=f(wTYc@JkN2>jU4V8j#*8no@ZHesN%JQ94^Z z(}2f0pkWJ$n#Q#1-un>pRXt+ydLz4h0fkCSQd;iS{>lBk`t8jEg)QgewTyXFXplU! z>{-}nO{SE#3Sn|x@SV=B-4SxyI3+sxu7y{yVPSNSf?Ss2!CB9XodB!Qd%h>E@YmW8Gr-eb%)GtJdciF$;eDGVL>HQ|@$?#xWW4q&;?vOi zuDydgvGcg$d}mt|t?-m6Od^$I#Y#@x#c^_?r8+BoZ*s1W;YxSlI4=KQ&+w#B!uR*C zaQM6*Bo;<2iIaBR>Fz_ySnK;bR6B`(@d(kmn|%b3mEMz8hDOH>p|4=)-yj+9O~C_h z*2*DTXu+xLqI(AGudQj!U1isRXmZjX6Pe0ZgK*%Cs`>`vsKck@Rc?~dD_&o~X#no-GhtPwZ=}gvfag`LNMu=dC5aK|0H}q3R(!tD z8J_!@nH>$LIWN|2J3%^2rmzDT-NxO=*W-CljfD1tkLy#kRL;@&}^IY!Nee^VD*W?GNh5racEuK9?UW z=L|npBV2rOf3h3R0;(n0tRA+1CO^#C;?c5r+o^VevgX$7-J_ZT`^S_q;voH8F5>(Z z;$>xb5m3Yk$3dAYSt!(A6UG;rJ}(#CfC6qm$q;w&7h0^tk*H9vRNj4=Tzr4a<-O<^ zWp7w}!Zm;}NF;K6-M*&t)-!ul!k~1i-WojZkTf9ofyBdUTF;L|B(JeBdLC7Mhkx3A*}8lF4i|!TC-}SHzQ5u12e>$~f*- zf&4&N@A^&V`SOIwW#njo#vgq@ANo91V2b6Cwn#u)B}F*F&rNxZXveparo)sv^ zPG1eB*oqi5O#1P!FCyGdcOICRN@@2R+J51KP-t$5YsVVlBE>MF8y|zF&u~n6aQhrS zYh6`Jv0moA7Y9a{5~toTxTtFdEz?wuJi+99%a!2;bUWGOpO2A$qo6;vGAdH}V8p_? z3rJsXzEPUJRUfSyVV(Zfell>Y{Bp{_F(hy%F z^PsvUIOkYNCE;h_&ih@GAB>QRfpG+hG&u!{hyMa znbvB!X5zPhs^>2lFw`ExF;41w(?9oeJL4m4Src*bOWIHAJ3J*KPH9_i?Zrj<;is-} zzJORtqn``#A_lkNuundsq3xj~!_6oNMuRjEkSYq4r5=Cu;F*tTTVz=Z5w4T`u52jM z39n2gLyz|Z zfAhsr)nf-oo!OoT5aUHLWydvNvS>)Yc+V)gqCKSi^ZBZn5s zW9waTyuu%Z9yRFrNmnWlCT{`8pS)~x0<4L_bdE>B$_2-e1bqt-W3!H9b)0n8A}O|F z<`fivgX-{pJlyl@-eHjbgXL9m`%{ zMP83}Y6}A2jvlM$vMquG6CO&>#^;0_GZ+^aNBpgk;_lBAh-As#TG_!{c~mgMC_gqw9^z)mq>3W!hpc zT02v?qol|;9&Gj|69ZRBd94T&ISTdTavB;)o%K|ZB=5RQIv34r=~wXixj2z>Eo^ar z9NlTirnP`*a+IY>k6QW`isO8>D>2JB88pu<$YD#jLP#y8r6BA;A;rOWW_XpGJgYMJK`clw)oG6&9@JE1~KpEw4LQJ>RQvMr61Z?lnx^Ngd)OFw$gqsatt zDPV-Ct@e1wpx&@+Iw$wKf1yoC*`)E#<>L^TLxJPNALJky2!=y2RBBZn9Sw453Rxuu%LW{%=suW%t z2_%SUojOcEu>*x_AZ0?Zn{Ewqc5+SA?L3Q|LmMO*eYpGhyH8#jdW&audI}rYEN3<;Og}K*k&78KIXeq7Lpq0!E*Tl$e*LLU zjS$3DKxa0%09`V=X?XH z(o4RK3&XBZ%xS(W6eeANBk>ps#_{R$<1e!!pw~G|c@i3#TpDS%bS;0u@fYdr6l|AA z#4uYRle9O?B+-Q`^y{5hmV%5H?TP*4_iMgQ%{Nb@LKrq4$CXjgB-}{cL9}6*n@4aO zfn^jbNf0ug@}w5J`5Xxq!6FM)$#tv-4O#J{>Hk8aOTP<;@qv|p2z$mA z!QHl^ef?v~$Q1(Ngd@!bp=Vq7u&M788Hv7Sjgd6P1SEBs;@F(#Yv{&A!Tjmcx~&pe zmJ17H){2E#d11eQR$_pz#1TqL!y24TVOlv3*rv8jHrR>n0d{@tuYC857WFp3OKouaiORUiTGZa&QH`#{df=+j3nlIu!=qgBQMnH@z?Cc5 zvy;tYR!IdT24!scFGdbY(bCWgudh}G)JQLE>%eHw!_3z0`p;DV_&VBoQwYe|NVh=Z6^wtcnkd`TjU3E3J3?=62|4v9@)Y>DQKe zZ**5YG_v#1sZT)a;1vl>)doPIISK?Qv3fEGMr09B@usa{UiRbfr&d!rEF$oOWV=#5t^OPS9Go$=26bu=3t8zHDx* zj2tWge3Z^i#LClclz~RLOeP4NxW-$n*p%zJRLN^&JMmZ;Y2PjPwzzF*1X^xV? z;Y%)m+Hc9Btr)!Q4}*Ux?I8UaCCYC{9RS(_RuXBUgo4EYs8D!GpF?Bq*(TqL^XG^5 zroC^OBEq1B`Jl!Z*`~7<>1P`YDg^?(b$TQ^uh8oHj7}b0*xZPQ?9W1K*M>=tM?{s1F{*p9(<`DnI#4fSP4y7k{*yMxcv?HEl||7zQ+1m z-V25NSlW~27XquZNz>_EZGRWo!W*PaR2kqI^U>hzWxL_IWTWdos=n=q9d^DF6}QQM zginn42!=!ztLNyTv&m?kkUfI^kv3mjB&|;{ki;@RPzri})2f<*tq_~P_WPK2!h#UY zyz^r3-jnS6CfnGqkh6?>e|4}BF8yFul{B^OT{nT)@6Sf05b1kS~vO@cwxzyo2|0dRSM?Uy?rwP+`Q*Ihq@ zv;>M)`ht~Jm#>{>wxaH3RF*qLaOI^reavM_+=;TZlvWQ0Ej$)%6Fr^8}!qS3kaF3y{+O>l5hlnSd+O|a&H5? z{+>J-U%C`Q7^5((KjWO=99_XyF%V2&V=75Y=rW*`jw&`Z4DD55q zx1~G6-;%)-sdSlTBf!rN zMmtu%J&@)xb=vU)1RYC%I3w?Pua!sHJm_c;d09RQ3vgc-w66zx(G5+38w&Ri5ebKz z@fcpI^_P~fMPR+$y0-piN&m67$5IKWGMtH5g3qnlI#Q>du2oFa;Lrsb=*dLZfsS!; z$aG4G0{|BJv2TZYVVaE;i#Sh|AT9#I^e>dV2azt2;-|606OkK#B8%R z&_J*_7;}`F+t9#;hfD9CpCNqW6eu~S)4lSeQMiJlNHvr3V}=)PXmDb7AZW!m4x0vu^36p1j60 zJ}m0hff{nQHre1!K)j)VMd4@yucp1Op~+DdHP>Tf&|tUkP`vVlRwdlwAS|mbD2bxf zE&0u%CJuoiruKzYihQ8pdM_^g{B)V`AWOwGqJAr_I|clI>23x)OPoulNA5Vu`5qi{ zq4wscu3ey}VC-!$pnwc@Dr^BhSHLOjIz7fh3B22}m%@a|n1|9dZf;T7@9;085-Z3Y z!T@YRghF0{PU)_TErS`)os1({aSJIrRyi{eT#yp=qgTrVb;?Wf`qiW4E*AQ%tCUS$ zA#~u-iVO~alnM!0EJM~vlvB_|M+PrLx>yVF_lx&ygA};DsuA9A!T;g|Uwh+%b19X6 zFBiy(;6T}&I--g@AN$m?F3Q-#zym&J5yIOV92ie@gxF7_19Ko79vJUHlpeylwFpE2 z*_g_#guYY^FglNZ03a5IWFmfa%Y=GQMgOlYl# z4IMHa$l~}cV!5~@OFj&wW_`feS5PV|BU;ktq_|x#Dj_hf%a$eYJ2<-<@W#{5X8Vp$}Mj4Qg zXo%Q)%dGmG*vxy%P#U|8~{r|w7(sWZJh%Tsxu#V3<8DGqZOL4B$3A~B}8th zP3Mb-Uo7BJJpgV(YfOJKz(5YAbrOXw8GIQAK^u!VF=@rxUl;7(xO*7o#%AwbZ#+Ap zfBJ$2+FIMK?w4%1!sFWya%O`w3G4#?7+Q@5(`d_qkh17SxJ?aiGJI&Ba#jXK5}pD1 zpEMA;gA2arF&yt)NHKJ(5J`Pbo7({6LMBQA=vPMEzn23;+dOI*qrBb9qB2Q3$Tnfh z8W>6MlrVM3lrT}~ap?CL;X9CFH2zBGe-I-;98#ZYg)|l^;{YC&PFraNQELRQ6a`1x~ z(Yp~DnARl)G-#toT#}T9fBjA1PqU|9!!Jn3NYP5DzSLmu+e`4muI}aQcDy=p z6QXZhWz1U}@;V7mMDyHr71+F2C@kQM$exCLVKH5b&WmLrMLO^o_J9)gPT={g1SuHw z;F<4|Kocnly=Db{ib@so&7728kpOT@FGmSS6f3AZz2h*S4^x7sXV^}Qe=^^rl7(N2 zj_&d8L~uXV4QE(&skoW2C4~CW%H|Yj8Z8R^WAa$w$Q=5>ht;u>IQdw@#qfrQ9U@2>_tB%=Jy`((3ff z=j(wM$Kc-Yw@4zL%4$(&e+auN>f8$qvNsy?)AzT#P0MMw3WN;}Z{&sRduK{K2WW4e zm3QH+1rKo~ij^Hy0LVOe9*SmgbYJihtB(MQ&T}uTjsdTyE^RD=K+mG~VbZ^uQN8)m zWOsakf=z6_BbjBi7)WFka)LY=AP-_?Uo|*{oK>%1JP*`z>T^0Sf2X4{n|yRDgts{7a8043Kd7s-2|cHPYHIg*-0cZl$R z`yuime{xrv($6+RE4kN`+bHudX^(<>PeY>M?)C#=FOdC=80(fQ`cbFx(7|BOvPA7 z3DWc9CzturBc8^}sH+L@?1uCpWCdM$-T-?&3{>y)W%EjdL4Ede*J@^J{aC@-wf}B( zVO??xv3o%XWy4`|WNG!n?1XGj=CALs}9z^lT5*p8Eos4dO zHGLwt8&uc9)`7H|3xz@e*6+ZiIf*Y-RP8xVe*C5rf1j-(USk*btHa}MRE%vneQJcz zn{icxRRhv?fr3t3qV(%xKl+)pxNRM9kdL1iaPJ1Xo_<*aT6qdE&9P2@gm<5QOQ47> zAaYuh-Mt*g*hJZ;5E^kZRx7HhGXJp89c`>~J+xTuhcPA**Y@wJS)dU~f7q%mpAILW zAt(x;e@-jtp74J6u^TDyNWZczt{$B%Jfo)+Z8LxsdEee_pHm|q4t`kpYh`#7Uze^~ zetZfD2D0_$UUt#0NJaL4p;0rxUkdF)Bpa@Tj+0tq!jy(p|U^fBzsV87yio9-s`yZ1FRPmASIpv`@sT zi+3xz%@`=AYYzj-kU|dCgGPL5mfujdqG(Ndburj2_x&lpwWouOK4q(ROEE14c%|~< z6DB-eSPz+S2V_hM4W(uvom`a45Sbg9sz{h-ruLYu$#^y6KQGGup?Tlp?;=2`P0 zf1g|{G5;vmWWv2j%2KA}yBU;N(xQt10v!ILPFPaeJa^BjVmTBFi;w?oj9H|p)o7Je zuJ%Ssp?8KM&P5FD+vXJr!-hwT$G=zWXQHhQV$y;U6D)xmQ2ml7VkL5lG2n|}B`bbq z6%tFuX4TzgQ=D)K4ny5)MQOO4wH;AUe~gADlr)H?K6Mx%v+F0zc?gp$b-_Ud0mc-c z8=fp98+Pc;FZUi5Y$ByH`Bc!a@h#=8va@!2&Ic}nWkk~I-?9cHDS6!!Rh!VDO_#E| zSx*o5I2j_S4QK(IiFnIZVR4oJksK+jE$TzU%E>1mWt6BdTJzjKRCM$sMihM&f0hYST{w5 z<ZyQ?|h1@qse{Ntzeidj*$rKW;(SR(9mde}fIFLRl2wl3XksI!( z&U|Mz$+F%`@yWBY`v`MWC>AE`B&s_0WEo@CgL%t%mCSa+Pd0f2P@_Y`%k=7s?!L|B ziZm}v!1L&SB&>+Tmo_x)=tWIHmbHGGp3xY9Tz({}iba1r-uJBK8(PnQe}((w0T(UQ zWF_t4+9?#geO~<(VAzO_Hsb}NuOv!=*~{E!aY8|<*?@Y6d6z$Tlf4gfW1g=xG*lb9 ztQl6X;VG{lp-(>7?h1)IyoPM^PNAaugmwETesNzX1|9Gp^4JGuY31nAwU6b89v4-^ z)eQK*bh>;%qo$t-p+8aze^422tKM0h>q4;ee8A9zCDU4g0%YVXCdFhTesftk-V$+< zw}7yt(zhw#4EQGD7hEdbpdh}G>da4=Jm{?@J~iO?^rltE2)zWA=r*+H>|d12r~nZ) zl1*uOdmTcz-)ybOf57rjJ!5-x38}k-VYjC|)63q;*Pl$~g)}at*nA=2=Snw)@<|zW zkh~vxoi)8Xz8!=J!@Ve@sEX08t$9zyPVK49QpE-+p;ay&2zrsZTUDV;F8R+yFHO2y;lx28hBmNac|unf*fXsPGlLEw5a_KXar= zBpf<2LB$Ru)sf?Vt^DEhiwx=YwXxq9GOUi;)D;j#e*xJlZF5a@f@aJYY&a7{&N91S zoGnAn74^_@{}LEs!J2eolB<)rN770PBiKZ&inaDrOlLaLfwL$0L=LL>Y1=jaSqI9S z?m_16@X#n!H%$Dr0lT03NUsm$Es)jbWo4N&9*1=IeD7*!Wtm zc@9+t2n446>HBL-jgO?c6v?i=h@1z|0N$e%I{JB^7J6ka7RtpIo;hG@w{!WKUmBW_ zGiAw}{*F>FIYSSk=!O3I@cq&hf3Vx@q?28Vxpjl1I6=V=l@ueKS>ABss#gEJ68PlC zp39?~OP=ijV`d1p?0aWDQBClDV$Lbx2*nHDWqC-dcX1awPArtW(iLy2N++LSZdXJ1 zCw`;Ul}-Ti6LWx;f(nG50LXNjQ@!MYSZYQXgVp?`)na0i4;K~XrH{ZYf2zS%E>uE# zs3&|UDEQhC&jHE>&u+#U;KwM@52}%#eI1D+p9n?-(S!Ot?Bh7opszXwXL;}KK^g_J z5xrf@u>%nhv+dpi?|lNUV5po|kNU?yRcqX29F5Td@KUb1(uD@(TFx=!>PRO@^A#_v z$y%&+2X-J@mmMZk(onLze?)9FLwlebL#(}5h`gcgqtgkP3p@w;4M%d=Y!*(7uBYTE z1KCB%SD0|tW~X1gPU981COFSOpHh~ zYP9U`jVKPcX^i%R?VeMw=yVWR@*K)%a5kE2dbi0~Z#D6+lt5=d;)?s?hT&YhP1aq4 z@nS?;qcle-8IGujf3`@i%Q8zL7xiqVOu?12S)?kMq}kSwx$9U0;4=164rX zMjUg1uU?d(9=z4I=Eh^O&+%jR;upeJ;Tg&}ln6p9XJ-gMe*^Q!@w=;WySQ2W2nSyH z?z!)y6?S`R=Xo4-7z8O9{p6lziw6~1DoIu(Sy3N3dji$X5KK!ddP5)Za7pFQw%cI^ z{Q(it6p@xK%1uHar|O$>4dPKa;zRA^-UQD+)eA|rYI)N41aU6ndlr~XKd}Lfq7u?f zv|X(-WY^z{f37g44=Pp^?QNt~A{eEJQ)PwHbP{T;T+L2FkqEbr(%jzwH1<&8Wgp7N z<{l+i4rPL@FLe^S+ZG~U1XOTX%Nr{7!FB=RTf&X zX8Gvi>7-5HrAYkGh&V{qZE|}MKqz>k4D(+xsB4Dwe|LS~HhQ4x`BM4=vn0~`aRE57 zPbbibhF2UOm?Cw=vchf5(vPe9D_a?$4+2_LCc-sC#{$B#nsWnnau@pbwIar{er(7K z@-J8(gup71@JWyzCIN<-Uw4)#vbq^}jb3q`dsit=00V<6QRYRNQ|R@WRiWoQE$|x* z;`6k|e@t2)(dW@dLwm)NLJC4dB=up6^@g>ZkyT5-7G7yptfFk>cnzw>azf?r{3)Q6 z5WT-^>jVmudKC2xJ0B?YVHU{Q=I;`*brZUu?Fxqz+yS@HUBay+ex-40X7piU(9W1*`XdZ zz`MxifF`)LZar_6`h9i1O6-6!n4tZ!xR-T45cl?SM5Lo?5MzW;+@cDx_RE9JD>6BC zf1ZbiHAaj-w$5W`TcAm-_u`$!NaVC6=kSnWN6tBO_161Y@;{Pi!=}~M1sgh4pY!$^ zD(ymw=Hua1w7`sS@$;~HXI#p@T3VlCfV6Y#W;m~KV#yVV_;|E1$L5VKQllys*H7S7 z7m;BNp|~mF;kYb$j-DXMerz9-Do=e)e}3B2jALdJ<78?Pwn)Zp?J`W9zdW#BhapBY z*EKwQb7g47$(d*NZ8A=7h)s)CBsZaH~)L7M>Q4$GHv_^>*nj2UWkmg`-o zEYr-7sSmW-jOdXvt=)csO(`2=X|@vYizAT8O!R#ilEbAtOy z3aRv*f~HZ>->Mwr^|_)8*Qmawd%>A|$QceaXEwe?et6~=^S=@b%Sq-RExc4ROczg4 zu-ZMCMD~*3ygz58qu1x_C`SD5f6Q1{U*HxRa`l|V6gTAiEpq*;!$mtV0Wxt^ zsJ(2$dH-toc(l1wf18)xS-Ur9 z+&p(y66oaam0~4@gUYSTWRXAHYuICe>{Ac{lrjuPu^nANrXA0J>jJ3AS}c?N@yCxp z-0cFYrSJjEb14xmTB4q$-2Rm=@Z4{jL_tr}X?Y}9NxbcrAF*b~uIG;+Y%t|-c>vB^ znj>VA{BTBV>2@feL&!%Yf5=^nIOh}_UO|BVc8o^G8#^J?XjG6-R`?BEWO{F%PoLfc z4)<(U2bwkeXvd5IE)5k*<-xnAYHGu=i;J1WEMcv#+>eH0f@_ z+pmybP3=JIu3;c+u0bE?z^5)KRMBQD9BtH>B`4zeU1`67zG@cqe~Od3Ijmb0InMH> zC$B6U&#WZT0LN?EShCN7Y<1i$1n9&DPXk1%J)EI`N{x5{vD?{7obu6ag1jfioIm68 z2Xcx-U`A)d7fB;YH^`8)PQ(yXJchr(i$ENA6s?X(3={3dD$2$ZU362Ymh`A>)AZon=FX=fhzs z=WL=(wirc7HgX;%z<1I2vkv;mj@M!A_Z;$=YBz3Rf%%W%T7pAB{NCFX4cni9zIgdE8zAq6w`_$)-#B0AzerlBWmeCBoR6&-?p(wzrsbZ`}_S`_QJINoiCmcZB2 zm3HR03Qhhh3@%8upA>}~yvCsvc{r7e#b%(mr?;_#lKx;86Lm&Zqn%v9U%OGmcQuci zh?>G`sSrDFy`Vz+TgGGWgp>m>TZv)a-kYs(>MQ=Xe~o19EyJY76*9Sac;oWgSH!5d zy)H*2tnA|yr8Q+cR+N0)Gl@yjvcwV{#32y03RJLmZr z@Q>yEf4jE}W7z~6f1fjD0S4qDUuWu2#tP%b#fT=hhgIRwT9?h!*geoOviRiAJ4-u} z{WAB^>@)|Ev45wZTUpD%F6+=r7C~S)Rk*j7!@WBzRa`>R2ozsrr$jm`%`RW={^5`i z9wELU>Yb$t4Ci@`X14z>`ccG>?!LdW*hm|~e^_1BkN1&D)`(j%9N!eJpd~z>JK)`_ zUvOXe`21v?7$X_C3E^zudk{Nvlo?lYSA*$2${{KZmyhZLOFFePjQ#ve@ECFbMC|^R zYZe`%5MdT%Kp$$+Bv=<6ew_68V`^xsTVaMh} zmbHMhJk|oeYzUdPpvKeL*&A%VS$o8Kmeq&X*^Qv^0WA+xuq&MoLv{Ku5e}MwPs^t3 zz6bbJ!9lk4lJa19hJAI{!cDfF;w+CRk3V$XkE0Cx$?TQm!G@4(@$ot>hGQ>e;$vfa*`eq?>os_v}kM8O$`cnhShMGz*49-l|QF3 zL4O)^XhG3t`frq_k@IzJ8HUn#^>Vp34O)CghBAY^PZ(DKFBIz6*OiBA#Y20Uwzq(VybJ`reL3$9c~OL|eRuMHe7nfCOfBs$C9<(G@Wbd)-b&J?#I{Z;0+(oHv*4sBj5M&*X+uhlp zmh(AZ6@)^$oiP3yF*ZBE)~sJwUv2WX zt!R3|=iFAJB)%8!&F&p4Cx1EO$NizY8sswAJd_9IFF%KCcqtuUNlci$A6Px-kF7x9 zO2wl^%ykEdzIzX;U#`b0_G7Aa6#V<(Zsm7`p$%VScNO=dgII?fT{P+ERe8M^li_(6 zV!2>K&CE*k3h*-oe{-6iC@5sV7!nq!XiN|T*5Ag6?@&Xm30lJJnDFj-y2p2X({JNe zd{(wMzp)gP+dPYy)rCp)Zip75OlAUd*OHRM%Qi1=9jBus+tszDd0UoAP?_6>>QH4X zh#!v2uk6cK9yZ}^WW%yMldv{Ot;`P*^~KV*+KvLXiM_XNe;A-h0PQ?Vn*bsh-$y>sE^`&CeD>t5Xn@P!( z#Qcq0P?g_V;Z5P128={YhDYv8K7W7W+WDe*EM;w#q+%_l-FFfQD`Rirkuj>)T+^hx z+dgp}_rK4df8LfSY!y@D#u{#lxb*xa32u2A&saoAK;$b|*b>*P-amox?6X&*X-YVQ z*102FpaeoHME zwWeP39Mo+E67UaS*({FU=Zh&cYlEXodGIlop2jZ9f7DmT&G`oLI5HYbdr_{=Z6f6q zrAc&V(co@*ZC7ob>%=`H2s+CEE+takRqzxIW%Zar&9KnFwRnDtD)t$)vG6af?nSfiR8irG=}Ov%4;layn=KM z)datBf1!###FmT$8k(`{C7#VIUvdd@>b}i^)uPwWeyv6G6o1CU=`IhYN(b5)pt-A~ zi?^QU6SL^Dzg}eEYz>+qqpagJPWN13DE{S(bSSqtLWJW%5Ym#Rr0Y$ z;kBymX=-MU>~Bv>91kSDG(TaWqU>>FStGHN&Xedo~JxAe_s98f-mFS5AIqzwK^8Weo7B@0OQE%A%Wln zOLkpeV;j*9NNABh-CnZ(T+kuOB!3k5Gfjk3la=m(PAPx0&d-!mPg5-Hn0|NV(>e`S zBdUChcp}a~)385G54k;r!TYj6Am#~GzECUH3hv(WaeCgDAMA<^>c*&0a)S+8f7@(= zskJ5y7aU;#%igS-is;y(Fp>WU!2WN{<9|rv|MkEB>;DH8|94KY^Zz)--+ws8@BKgf zeE&lzw*8m-^RKY~7OAY{+CLOBpebpa}o-F3WtAQHu;~I_a}F?j;9g~l?=n!^xw;i z)n5zGzL8bodxn^QsR6^kk^cr2l&(nUbcCBy{u=}T7f<=WNwUUG3fU+gf60$w+~zNy z^siGs08zy}cZQk}!?61CPfGKrLY5-BWgHJ>-{jx-`!^CZ{ylVcxNA)+&DALHFqWXv z-CE6U2Y(gg_6d;%IS4l#kIVRYrYxz9->KW!sn7WAmc~7JOwJ^$Ec(QpHiQ8r3fvV= zQgFii>^rkVU-Q+lK@wA5f9)F#R?u)!0r`aB66M$WKN9va7$F9NpkVX}wmGVhNt_<3 zoBF$t*$FGD2^D_B9KDUi)oLcd+zaN^{R=PnTda*fvKvjKLO+hO7z6lOZ}i@RewP@$@vo zQ9ebuNBFwpuv6J2fR3y-^8J%pBH!2cI~JdpS6q?Pk9BQ{5S%40t2nm9AR;`#qrYf` zS{s5yLP)~{{X1!Ne{0I!-!}Hw+VypYP1I`qaT3aTa^=O^YV@549D9N@HxF=fWX&K4%BT1%bL%Iw3PeZiFl`2@YC@B?HzP?<{gYDFci$KD^3q={!hD!X{1r{(jBW zM3!x*3h}1yDKNME7zSv{_X8Ymqsrg%!w?Cy>0_L-S>c$Ee~-zl^k{oMU2!O&k1)FG zzJZ@fQmV=4IkWu8$xzA36kRCGla?kdmZl*phcEV$KB~23zKvbemWY(3&FXt96Tz92oS`0B|^$(I7Pa&K&- zD>VU{UzSEaVrIoW_Fs5(ib7E5+GbDuZPt>kX+MZ)iPa(dH}P2mVo+Jq^RncPvD`Vw zxEO)hE2QV*w&2h+zDNO8<4)oHWgop@OrUzd9Itugf0oC6-o8cV2lNrLu#)k*C<{rQ z`bIiJZ$wtYH9~H==M)29n#AseJi#GHc68U^6t=T1ttuyr13jcp-6vv{D^Yof_bbF< z5;xOMJ10EAn$X)Q94xlDaMaa|<|b;O6NRnI$F^DdxsA}sxstPRegU0$Gbd&9T@Rp0 z=PTEXe+jmn8My`vrX81+?aCPROPdkIlFk{n$LFB}yG{@X6xXTrKy^q>_dg_>acO0v zfNSS!5ibYCTa=Vx>I^Hw*AA9I1>Bs1b00*LMNdI+Q;1B7!fhBkbUDe}s3+tLET1zR zM*#Ui!@^KQbez?$P!=wM6Bne?z(89hs01cke>w%0HQF{w&Gc7bsS!$qll6?~L8Q-5 zvj~uiB~YgYb&2~bTA4fn^G`3#FHHA`(6UPKEpIc3h|oLaTKnlIU6a(#8!n($a7C93 z5kN8GSPI2^M<;9L6~~ukRc}C?cKm)lSZgKQ1NNcdpuR_SAN{Ol)Xlt!H`Panr>AFl ze-Y>y8P2tJ{AIaQ%`y6DI!%Q#!7fZ30oB5prCh2FChD}3-%bhC3=$RfLQ|3IGyp-O zAX6$p;n>JEzXPhi?K?nA`am#zjL&4G$gcpavq5Mbi>qVB(bO!VTBSLU-fzGBbEdUh5P8+zAb$h{jo)j$y3X1@3R9*Gu zTVRCQ$i@HUB>!}8c$euXgywG7BQ7}4lu$Jgai~;GuF~)9Dg2q5v%~EK4U`uKRjq=W zs?=B_5C{)U;6}Xbf1}A6zR1d?CHd}p2-M#{h7MLTK*mp3!g~hY{>I)7q|qnP+3Sy7(o?uKtB!0V8DUg&giAZOoTb2ktFp_?Ur0 zr`6A*9eiTwuD>~e&LxVj{Q%`Mcl}3MZ^IZ}CD7w}3#fD=u@vFhTsDq6d%LCpIHEsw z(Tc9RUC#@`U4Wl*7Jkwuj6k~@xxg<(gSPEpBAgA7bF01rm*)tz|o zk~R$ofXNVk_}ud#z{k1Ohm4aB4xqTqI0p1N++G>Fft(~XY?9!`El|BnDA@NNG}%t{t;MQb6+P?G1ll-?#XxQwE-~b{GfAMo@BpEk)JwXe<+K)ji&p{8Q|7riiC|DFEJA~Dx4^&nE(Xxd@qEXn<(dvw5S8iX9;_EuP@c(pHaHNhJ&e* zc*A84*RFQffI~$|@HI)Ze|?1%jOP%VCgIVC;z$&(W0anfff|8?}fk7NQRqG;BIas>;eJT611HDx?H$-k1KX;%rx&6Kj?2 zz#FIxmezm=YGUyDs%dFGx`_-!N>(8z9SwFWk%VsRSe>s5(o=oXf5pliLEDh1N=*oLa07+Gc#J7@`0S;4$V39s`JlDZ>#-GQO^poO8O zLK0BbFBjdy^h%+fyMNLU@Oi3~;ew!cqIFy|em-|1@>$v5e@N~VH&5)a319>PPl+>cF(PIf4B<%B?7>J=(l0mskSC%^GUM9mSt0iV zg$Sh*fAzZo)o*{ZiWN)74?nkkrhIt8y=P+F^268~ERO<%oQJ zSl|2X4k*-+e=o1I*pp0PIkglcKTXb@T6=rOiTV17SJb2EU5|cRH%{w$tarL0PQN4Q zmxGo!deqFXI0{tV^HNlRMWuKW<@m9f3WA_mHp~tF{cXM{y6=)U{9WW z1N-ZRa>)(iUZ^8-6|5d6!7_G8$iYKMtT$?DGP;XDK2eZgLe0gz(LDdG#t3mWm9Zd5 zU@%u8iByDp`0!`MSUwjB2Hw2}e^*CGMno}IMOV=VKT7<8Xz*}rSCpj~)~_D5O*uuJRf({B*4Icc1&;6m4O^fb zV<%Rm8C44WOus9Fhx>q1S5HT|V2n~|+``yuTc2Zi7q*?jNzO(L@a6XSlPgrBbe}r<-vFM%7dTu=_{4RggSEm z$*nnrgmn}Hz`kMnx^mT^)T>RX=eA%TupzI-zf-ieWS?3Gx(pk>3@hQ_`BIk8af z56g)VyxK_i+^9>$o*45a|Ljgp|7@28(rC*%oCTbrLseZ4 zmII5&g9D~vyc9^{OdTJe577lL`(WlPZiOY8#EH@S1AK1z6}(fUE2`{8A1*!(ae12D zM1liRG@uCEOB!zudoos_q)NU4n&rVuDl(|NUMm-{bumlmP$INz{+%m?f9k=$f=dlb zf`gc*dNj#noi%4YlC18N!&U13bJ8a~Fmp=T1E69awGbj~*yVoHHf(MXu)^jKP>IT! zK^q=ZtRyslwbprN=nYzL4F{59Z@NiCX62*ptD z4IfIbIwn|QkE$>}FP-fSl%{ZkX?3~#37>m6@6do)aA@}g-Yt$)e@h+oe0+u;-T4~` zqs#F*&MIU5)(j>hVqkYAlm=_K0#C65=N&xjEyF3p7PxUU<@pzAp^u3t0+wU}o zlNY1!+qucLA&ZYt+$#0u%2u8YRfjCoYf~1V%IC?iiy~f&SHjs~Z@gAjch`<0mNn$! z5qddO0b^p+3Lp_Y!nsOfVe7TO?8w{m1ea(x5!X)L5%60If1(Z6D@~Wr&HFpek;zyH zGOyMXp3~+xBazzRhyj=Z>fz#20+Je20=Ii7T2=4wcWehndC6!(rau5aXumcZ*I4iM zhwnX@dhhK5x7vq&NFZpmrOb}`TN0!`MnuLdrtk(MZZfycf~@Z^Gw1jF5msx^xgjCh zk|VCkY}hX)f7ur4X4%t_$TFD65t3C_A&3qA@kvBCTT=VP@dC(@G)z;Vy@9^enk}(V@?qf4IMTfYXR*o#eH{9ljp}a=oKktDV5Xg=*41tGcb{~Lm zSg;uUf6`N1wo9dj++JW;)*tFbO?E!#cvWQNTHF1FYnrw&E5O$%A826VXPb8j!UH3J zu!afTe*9lNv z3AA3HVyiG9KpIfAx{s{*wvUchQ4;khwJ+Cm%AbavN<9xNikvO7j8gY??5RBZXzBacYRnQm#TXdMf ze>Jyc`9k(p913*BU!dr~M?rotAdBbr8DKyz*g>FPB>gG>j$V(dZGGiQYXKaJc<@Lm-~!Z$4?h>3M=8!$8%&{*v_<1gVN2P?i?uV?EorT& z!d>@n& z&typd8yXtkCO2_17OwQAO!gnP{^p+;1v--=p4WJvvAwWH@HlnI5)qEhS`TuxCcdlF z&IsYHcD(o7bPOX}B!8nz0TZ8kXB-@JBRe=_-t z7J)PAy&H5+eccN)J!N+HsuwU56k9QDbLbQB3yj&vO+BT|Owyr!k{x;gRVX;HT{7SF=&c{5!%8>asz=~1><~IzZ zx&nha3`Bg_^SK&%f2A1xhIdic=;gBn=3T)&XCWxmzP~+hKZ=>sy>4rt{T0@aV;PCh z052d1!4!wnlsur2-@LXleT{pYvEN^N`0tNG^+U(S?IH}H+-BPa{EijGnMHb2no)Kc z@!oY^rAnlkb0iTS*si3-FI7=J0rd7dv6f0C=6nZb!sob=3B;K_TF zN9}46X0)?%wxCh;@%%cnLFDjCxkC8j$0QkLK{7)6+a;)VB?{Q@VBc;DqMn}}qzco{ zc)hdNw_|9Z94BbXI!RNlsI$sxRfeEQim!1#7n5?gE@&m)7r?Q~dwR|1KN-%pz12Y4 zL@Vkq?ppRPe{s<7U4RVaU?CKTFQlL0MudT48+3rGIqqH+|u zgUP$?BT#%}mMxf66NSBG8yQ0$mR)j7UrR{5Z5cZDN}yMhpTWG#Ca<$2cEHYO)&l|n zuLLUxf6jC~U>C?{Ht%=kip1bXBKM3RbX!X?eIy3m z^&A?smNU`ds!Vs--+i4>^s$$IUy;b%wL_xJjV2Br+MdguBgz4|ljGmCQPxs^Esmfz z(mk2#uX6#dkD2l3LjxS^t;4Krm`xdps^FT$e^8>fIRi0N_9JclIq1D&LZOrpU>{xv zm=o?@ex3vc*oia~?jx@_$hFL2tyV3Q;IBR3?V ze^avh)ZV>}bMwgevMFqqb!+9|-KtdarZ%v`IPgt1cr)MPSEB51q^wYrSB&3C*N=F- zD(6DZGp-a*WaKu@3#(4^ZO}--=y%{@dTmmCO)e2+OZZ^vL4~Bg*83mtru#deJZ38N znUt@LeP(-l-xR{|$lO%WavYAl(RZxx z+RAJK6r~^qQ>VPs@{%hpYn9LQWGlcKXmDwg%&I_4%jhXbOvL15utX1lw-+KmofM(C;TuAPux%CqR&Nq+GocI#kxSB}!GkWKz*T7J=Agt5A9R&BCTN zFAf@kMPt~Zr9mlCbTH$%N1holmY#s;zfX0`vNY(IQrf%~B^8lH8iA7)0K-+`v8sf- zun~CiVTF(?RqHBBnhmFtqJ@4ve=jmC0YuE_$TO}_56(c<`LW%f*v4BZL{xeUltO9D zVbCf&;mF?LDdcHN<6?|dpj0y?C(d1qd|f7OD+Ynlpl%D7As@;VQsv`W_FF`|bJehg zIBKeaNjnO9bbt*-M?1g^+)PUQRVYrai7olOnRW~}8?KA#6hIk;zFh5sf6sX%!$9#| zh5TQWf)Zxb2dC$h?G;WV6XYVH zZFsfoV%ilcWW9XVYt6}gt*wb>u;`d!opOHAX~m7JG;L@$4SQuTgwh-7;vat%6WHc)b2_L&9f5*?#fr49-$J$3MsX=;u z4F&5-?Hxw*kqneq8P+!<&V|LUjolmHR_RSnInTk&Xpk%}sNK9#E40+`0BC8_!qHym zIu%=b-0He#z>u1%U5myg(^n@myy`qz2fh7xX$vEYnJ{zGS#6{nn* z{3>Q$6`^~dE%TT*{PH3LC?X`a&3EEPU&rPC!9FgXfAIVHjS#9DcJ*W(x>{=pTc_m8 zV6%pAWFvm_PH9&^w6Dt(J$vIM%|Y0eYCm^uf-+>z>%`+6*^|lM z`=5?NrSD3sclXSykSr5~tPSSnT3-dz%Lc*zSnj7daT!+7Q&smtg7&(KB-tyB!6{Sd zGPA5>EYhN6TW|@_X;{J7*E`Xaz~*pRT%;usXhk^mkAH7Ql`BtbN(94X+l-djv$^(e zRfD!`enkF2dj$$^hC?y4h&zqogOq5FP!Hn#aTsl3OGp`6jcey;M)?k%2Cf zfcs>)hY8S!`Q44X8Xoz(st|vCQGyBi#{nd5gVbakv}X8PYvN4%V8{g~ZF9r0nKHBU z;~8LnrGJjzt}-71B)pZtbst(-imv^6z`i?!TWT(cN-QLEQ7^IgZK7xPg&k(g?3&m! zoq7r79w8+VBtoTC4Co|E)5w{vbkNTFG8q}*iJDnst#U1Vt zXInpEx=R5o3}2fBq5$A1FZI-XR4Zjh!4mwl@DPwBfqTy&{Q_U#xlPwGB7AazMU-uZ z5$ZGzx`Fr&1}dY61-Ie^!H@t|9R3R7F@Jg_dW#QL%^kHtK=8#?rF9?n%R9EtGc+Y* z+qjF#%A~>7sk)R8C2Uit+e|GG6Rw%eL9yG+j5&;JaU6Tm!*o@0X#VUNsj45lNgcz5 zHyVr1E9qq4a&aFe$r%bZl7C6ME1+o2oD9ql1;5UV=(NZg1<>xfI7UkN!5P#S5P!v5 zXIb%(rUvgh?6;<64PSu@A1NzUQ9>joRoIg|O56`ADy(_84?x#^ol6)T<}~ z4y9Ndx8i}uPrNrrGuikCi=2Dac|biV0W0u~c_|CGVUk5)kJZz==@*)c;l~f!BB#gq z8Q|DC^ynCp7pWsz`$oSP%xT$2ln0(d#8nbH8zRRNeT-Dvb9k`wkjly!h<~&~P-vgt zCro4>W6wiWvPfs=1E|W?iv%|6fK=4@y9A_-z!IHBo07I`B>6qmgWHP@NK9(_A-k?( z6;n2<1F{pl4mBz0l?Gepcl^r1cBH3M`-4KHkTm$Z-egu3 zBv}3rWB;-BJgjRAq7S5ovMe{LoOA9eSUG3u>1%xdTKk+m>Q>|0!+#AW2;w9F86+`( z(un6nO!<(;5ZNUvf+uAtzFsQatcYjf5Tu1hHh+t}!nsOo&UvWU7J_9+c2DR6i3b;4 zf=fsFNh%a>ZTm0^O*fVy_%$uHDYG@dCx!*h^?v6?i^=o{Cbx zkep9--Em@B21!h!mnTU+`$3SLLJas>zPln5a)u(Yis02CLXGp<-_7tvXF^`RT_Z%6 zr=xhv0`7TdzVlB$^U0R-8S&dVeQvjTQj@Si8es+w$VWUvG=B$u@^I^$JIy%Tv#UiM zdop9a?A6?dPS3FX`FbH4F(6}~UUs3GNCj!v_3>oA<*MPiS%R^ZJXO}T`2F#uO`;#z z8edW6=M9kV5ygFfzX|+xz)e0#bt0s{UZ{GMW3`?i58nSqTV5Z0H81h3r;wlrrGBY!PxO2#bh5Q6wxGd(R3zjqn zo%9}I+$&MW=i{E4pVWY*8F78$TB?&(ENYhDepBI11Gw)pinuoX-jpG*?}yye2FR*jG4iqU^=`MH%7Z zh)$P~+{y0+?iaB!P)j15m0pL`_Z#@xI6bOXbc>cAqb%3W)94=IklN|^IO#<^MH$@Z z_v##hh<_`y^c#YQ^au$UMY6+c^^Mm+ajbkdH6Xuv=cXZKZxZS3zbcRswV~XYtd$53 zxdK|DdWT->wuyE+OXB!R+s=~wfvgH;Og;o6 zH3MqBOUgKFhYg>UV{NwKmEL$!_v%@#YoE|2eXz{)48K5OUDUX+`uXb1VX3X0`|JGb zIDcK^O<^ZZ;j)f6QL7oWGebZXomkSBoIdsGK+~|91|bIhGIPtfoZFW>*|Fc<)Ws#7 zDnDeEBfQdS5-LTCN*!5C#&g&*XCQwsnR@HHjlB1Er4UWkQA{<#!B5+_ktFwuP~?8* zl^i++Z+ZC+{-C4>>8^aZNkyf<4^56U?|)j6Q z8O{={GM6|hT=uW%ya(E5xAaIOD04C=Wa7oqCnITmj;^7X&NQk1)oJAW9RdhGBXssP z4PAb2Zi``&(w|Go1mYF?#g6<>GiKMd;WL7EK11aNPe%_;8>K&SySlvHIEWk~%4lHt&qLVc&$QDSt?pnl)E6!R$fjo|TQC`qKi3dyV4>25>t6-s1 zizNxiy3p2VAZvJeq9SqKmLwzo{ToVQ4=g&0@z+lsX?~12v6O^5IJBib(SL*x9!Wk! z{F#ajw4@}ZeKyw&E*$X;G{3M$)mB+?%BB?#Tw6fc5#~B{)-f5%(>B+bx6E2&)h2lU zav5>@NpDGtI(^JKKsE4N{$9ZXB_#&PzXO&htIzHtzDZrfubi6{F6sTVWCQjK?`nN; za(CtLw#Eg&%$t<;Tf6IR3V#r54FvK-tzL`I%CnI01*=TYN}0*O>I7hK|rL(RZyodXW(sM?S^Tt3~DLFweR zEF9&%hB%qJA$xz%LE8FRL$cNJl+KTR$X*P`J&55MD>z8~!HV*m5gsY$NBqLm*sj-lL*q}9N)KbMP= zpkOzAoNZ$XZViJ1Dt{f;&kzlBXN8UF$g0OvMvrPrHHcVxD*P0gl};KKIWQ{{imFw+ zZ{#DclKV>mt=Sm|tBm%OVm=EKGmo$XXvtI#Mn;0YL{{yf#oH2J&jTR3;+0 zgZmOp0-GldaJO4X*RpfCUxQZGiW+Tn^x)4p!#XAl~Bq^FncZ$K=XV;)#w{;6iuEhyM7>8VFnKhkW9j5A% z`JuEe!it!34n^5b?*#Ll2kirLHu>)qA&P8fDs9f8M1M?Hz)Y`5sX}pm8ljA}V0I6? z$D*0XU1464+OHSdx_=@v1>Y}DK*@qDcP3ypEV%cKku8so=hd4)!~puX$~VPlC;dR_ z(2&93D7_>~bf@YThx6ftKb8FP_a+!VbuSI!Yk&MzI@`GU_w=oz&bc4n{>#l>Gpne~ z<3oYrJa8UD7wqZ%73+jHi5eE$u13nB^TWWKBD;ktBvVFLfbSEk`$VRK`i|yLi%nT> zMUjlZ+1w5yIX(_}N-)a=N%m`7syMvGj<+uZ>6VnPR0XETqFP`i{`*ex-@@WGgD{D? zGk?Ap`cAJE5({Z4o8giq1TUI1xj9(Zhzlgv+7g!a>r3jXLjHmOLwgPf|2v+tHctw z`n%u?={bqv8F%=WZ64#4gdxR=i5;mXi=c?H8fwgjS#sVFY$Ou#rS?gqVxM?KIZ$v& z(Gi0!cAG((FKhheTDvLTSn@nAIC9H1-QLD#?V)(_xqMKX;Yv{T+&gOpWkPMiYkzC6 zELNSkwMc=4wSBQxI!G6;m-CizBV~744iNHGN1$0f-)v|Y*+aEph0xW3 zi7&=BVXY`u_~+!F2HQ-mE*0Od(x3X^0j0Nf=DLs*a^z|Y2Pq8aKFoJ6~bNmw8>x$n5fa=EJ6G7Ja87!(XLr6q7u zQ`XQYXF)I~riFyxZra+}Io;3CAm6%UWkFG0==AKz#I$MFckr}6E@qYNf`6&z`Ot;O zC6>Lip`fVdWc7n4Xlx1!zPQ$pZ~%_3O{f#B0utSUVW~%JIGyf#~oG^7Ol6!Ae?Hp-t4IM4Mv7n8bm) z&xh%JtKQ+q)V^U1%af=CjZ`J?_%82=dXqyS z4R(PtHK{>_KBXDRTYoBY8@wHZ-#KtT%<4iC_s=qh+Xo?795j;7vw7u8)NB&bo}Vkv zG}DToci)u*{w34VAs@*{cy}VJ;#xZ`GNV!YE&g$b-izM3)E3ADmD!?cJ~b%ALw%z6 zO|(Yffd8Q>-j0ON^@H~4CuY+`kaT^%$j>zy{d-0t1YM`Vt$#B#oat!guZ~25kI`QZ zeb0dwAueDOf(qJtvNpJzK;aLbAfAfYVTJ zCSkYOky3#^BH+|*Kk3Hy>C)6A5S!khfZP_%dFUYVQTJ!+Chud{qQy7p!` z?;cx|cp-J8>|jJXA=>!H*Tg_3N-|(j5nR4*i@+Uv2!EwxZ1U%1fBHTq(N~-me9zhu zp&Mj32|EerHu)sEdE&Ns&B^oa9rW}D=;+j_PzFv6X(v9&+afZl{PbdjU#)v1QM8(u^h?yxpw_|u>0Qn~3u+ZlIZavs;x`$?o-+r&eR6!fQU(0af18%;b zjqyeNw|}#6Pho4(*hR}AJA1=s)sWO-Y|th-qd$H0NEAvpCtqhCj4qAE@K!<3XSEj^ zBPCfs<;rH<(PcP3x%4mZJUvwjRC1}GO`dcF;V2vx@XEv6-+?KYe)aA$bnB*mH+_Y- z(ri)m48>2MuU*CNTrzrN=+o!&3Xf5m@XH1M_<#DH@r1=5?S$IJ^+2P8suaCd1(pl- z9Q^L3clQ=1{3O^r%1d<6NfXAbpWI&`$-MhrL#TY8+@o7FJwN*wrrqy}(bgCKD41o> zmI({rVH0sS$mJccoliza8PpPVF&WKDKJnvMO@2-%Oin0A^B+$le+ECbH+d9|td_K3 zD1Yn8&4vo!&V8H)XM%gR1xxO^U$6GG{N#9s(%HS)ziw!YLGk6ah0eyV5`&(hVj$dzu7?M zz_*1|1JSMB^4;~DDe41_rr>owd@Lwg>|&7bJRjD0EC9D|!`0YygzGKsi*ESc8ycySRPJce z>l2#Eik=pbU>~b15LWNh-(-lpV57Y}BEn=-#~*?xQmFm_3&?wgM5$PH-|{G_{ z8H*Ge1WAB3VNZ{Z0Lu}lOC_!Srhif4s?H}nwq%_(x$#zel3uN#8BxB1Vht%GCHc-N z5Dgeu`xU5V$GZ`2XKPm9%q;~}_KYyL+RF1}{InM4%DjDr8n)G5!EUmI*I2!!Y?dln zGm!{MX$o=M#fU{n+j*xCzpG8Q_Z&^Aja_vP!<$%pK|WqgQc4^-q((A%Fca%9Z8WEsI0H z&tZ8tb(~=oSWYIoL>g|8ld6wvMB%i+k@S8=u5A21nml;?pi1>A%Uo1sT|V=i{N%V; zQ~nTpbi>q&2rS0F6xql=u9gRjf_$R2n%ltNLtmkpZj5LyLgs68t0Nm+SIx{)G*sgv zpUs&MYM0LW^I2f~LYvc<*41Hw> zv%{t-86vKW=?xfxT5zEu>`6_8`m(U+F3;W}DfE_IeWnj)1}daYCP%PdBLu_(P4xNG z_iC&%&0Zh5qi<8!o`NtGt?wz70V3p#oCE=Tvux>CLv%k{bQn91kt)`>Wg`?Kq4%J zwrb0=D!Xs1Fp6`j+Kn_Bj@WmUp>j)noP8Za3o%0H@v)q&-NgxPd~Np)4$1?DAQbQ6 zM0i@^lz)c^iu8qIpzKW<>f%#q*(G@ff9H{;1pX zW}IFpOBVNOkNLzl-eB>`r1+K8AZXDuW;J<=aCD?56c49S1l@a8@oB3COxvO|Bd?%h z_W2{>_EdJ(ud0o@l-E+?Yby$L*bU(NG-!3ge}446(Gv}EN%>HR434I-;%JYJEq;yu$PcFzZuKnCU8wPjncvAV z0b4uV=(+QaJ&_$a@`k%*;Krs;cY%<^b88b2_4FFaf~wO;eh`?iG(57&j3-ynOMhO> zFbe(P&sd%zM3(|lmpg@L7Kt+}A*q4NuQs-0pPlHACn#4`}xDS)1a~&lISV-cyAYq z)q_{%i|ahggI+|dSIZQNptoh`_J8ygi+ks9j63#<>x)!#^DSh7*P6zmJS_qXg7_-3 zv4dxj1}DT=g85zj5z59r&g$dl2H#Tky0+N(M!UOUHAb+t(Kgr^?ft3VUbiPu&0V*V zqE@4>_5Dy!X0c9TmR@8RC3Qb2GL0#knLYpQFV}hvOR|vi=}L%}(Ry}3<9|@E9}-X` z1A^JZ0IqwMdCn{?)^Jf$pXnae!QNJ=dk991phUf;|2ddGY5~{pdM;-ZuBd;j%l8M+ z?PEPAwtpzc0jY*}AI;$t9arZ(#l4;gZiLEkIZ|tM0xE!i%ga-uO1lkv;7j@p|!>IaFf z>2iT5lsV|F^4HJ@|HJj)cjHM04*rdSrH`h!tW0X zDtU-Q&9eiZ_p}Eff=e~3nwybxtO5q3^oCTjsgV;Wd`Wy=_uW}u&3}GnLVK9{Yq6~N z>Ot04RC$gD#ZU{Xe<|nqj6UiXI@7(rRIyEa`%XFO`vZnMRS7SoMOD%2*s8j1TLR<- zKh4%sNvQaA;sbC9AAs@jP?bXdG_w&Z^X2NNNTAWI?BgeVaBt-$tL z5(-wvVIFQUq^wDtDJ^C%HT#k~m@u<0*3$*;S?~6aMb~+}{CZ#RA{~8BD@Gj&&kE1r zQMg*z6i!ho^(;>x{^H*ZgnyNpdHt7vzl+Nu%4s>o(kQ+IM}LF+zPc4XayLfpx2@F} z%@a(a2OG~F{d2PBtsMFvu>mJmO^)8P?~L(9B1^1n!QK5jL5r&IyB%hS^Fk6hPtffj z2$~G$p{2gR700K$@4Eh`maPv=ONJ_MIj*ahV!NtH{(4z@#XgcO!I}LA8t<6aHYqJH z(N_(Vjs3{6Xn#6xcmzm}@FiA+y&tZ0@%V|3)N7kX$oyBX@?S;Di~arIqM84yK*m7+R}%8aRoDNioXJ%iqU|YGwvlp zVB^^@&v zU7Zfcb@~jDFIU)RVABzY#}>-4E^pFKz|@5( zU-5Cbx9HeO>x`4wZt2BrnMmq6&peD|PKEE_gVfMJPz<)Zhq7mKMc(dYcebB^+vLa)|mX9iS)8!68t^FlymQLM>g=V)zeUUYN4nh6o z>$kaRO}RFtK~t^ZkM^M}jhHUp;|x9KALf-;y+fX#r*8Tszfo%)0Px^Hcrdv55vAy& z>8}ke09F5=RZL-sgPF={!9n~NH2Ht(n-W-q*?)k#f40L4FMWk}fmP1(rtPq1`-DyV zgjIVTwpq`9+His~xcDCB;FZi!;4!bF!sD(g#B115V?TiV5^JqTVpJaSF#)1-pz1qdkoOq~!0EBqdyNA{dD^E#vv* z;u)K>PJTX$!g8xev)-XhJSrcUzEV&M@si$7g9i2u{~ov%0xSIv6dbM;)~1?VHF> zI}*0FMDp>c7|$U(Xmq-rL7J9fEcmlD6oqKU zu>O3e$y-qATQLa3E>pg#(eG(#_j^NSHN1t7o}01nsR3(HalnQd;oQ0Dzt@8V7~`5C zeHq9i6zroPB88%o=dXg&^&9vZa~q3J6-z5V5=h;gV+mg+%%X!^x#~1A9oWiTYJdOm zAB-yQU)7XFrjJEtiT9bibk4pYJ{eyS>#cmtfEVj3#;ff*rA{NRUw5SOWpZFrE%{a9 z2an~t`EBLp5+XBskSV71NBA1c#K;2E#ekc0o30oF8$DeKY~hdC6cN88!Cda=ToB@* z91dV3Qf+1vQ=j9rqo*ky2y z*$vOTi>GlEQZZv5)~ccU#xeDen`sFz6J?O(!u<=<>vCvz@=TLGXr@W?iPvRe3-C$5Uy-h{sCk2PyjVC#v{#Csi zFmukf7arW%=VWZ=ET`7_cz;EVo!HvaUws50w!OI%BAeBD;g6Gjr~T(LY0yoecG7Hg zc{%bXbEu};V%zHNdM)h8K`3o_e$GG+*Dbwj&Oo~0D;)mzU0-wfdc%YfOE!(%Aft#`;afRTF_F2*T|W`G+E;K#vp0;8jr4M%`lj|zM#&{XX@b;3 zD$xKL>qSu42p`!tvwwrZDu6CE*D_U|{FS@=``b>*I{N0s-5K1rYxcEDx$!V5I3i^& zaHeEUj_5U@gk6b95aUA^2US*s{0XVcv%foZh=FJl{0W{7M8=6eVeD!j)RoNJGgWSH zx4O@M)VM}=KPk*nYXjF}4YDxYHsHc#10gbzK2px;Q_d2h+v)BRbUccdlo~5k7u&lJHIG)C|7sV z-&I%G^~L@%cYkpmwHWvg`9pm&;Cqt-1LZ5b;lumM@c5m>!8zx5QJ=}_@U`@wcr={F zX>J3%6y_4Rva(Oro7xoq`|*fqRwcwHp*8ok~%-@$y9davg)b86)-Vz`Bokor^C1-@YyJuMEc{MsvCXF0?W*pt< z3JKd>sDDeUpL#&nni5Yr!$L(cU zbY4C~uV@mZzsE`|qR~PXWc4xdvZfneHSOTLI=>p1SdR5;P}AwQpLbu-*bS<)M{h#t zK3SOh*zDi2>4~w=phzB$7SAE&o-j00?`U@+)_?kBp(4DGXSPpYmplX8r|Ligx1>4D z_UV4d`N$n+pCg5Q<|Ne%Q=dvq>o~4P>?@=aYO+1xgaE`d+%Y5MMb%+r@`4jaH5bV% zY7WKGMBKIcb>_e#XAhTPm`OvG#iWQu{Y3M3y%-y(hE>Erq#f?R#E|ig<>G^pZFE?tgIwYFR9#2}hYcM6I{uA*o2`81Q5?MeEMO zlkf_odfwr#+LMvsx}9M={S=p6_LCy&!|qcCb&MbF7=&Y7O-k5;LP%v_`}V$K-1dRB zQgFA1II$!;$1#>GabwA8{vL&Yj}L!+HDj_&;Pwq$&+j^}(A;eTi>INDznc4c-+!g4 zd(SbOP~ot}+a67byfss2UNfq1aFXnSL|NCPKhn5owkI5>h&F7@c^qaQfWo#GeZNgf z7*bgH&)K->KnL$^5chLa4p(eN;SVrlkT^M+l&ZEu#P`ZQ`n_ zJulq(USHH{wgyyG(j+kv<|)$TW9~dNhJ#y3@zmEb)1y@%3P1_dYzorXt~}cc(;SG@ zb?!@t1%sBvBi3h18W(0KehM}In!65-zaF%>@XagP$`WS{YJv<&Pm&}%+kb!Sr0BDT zD>s|b)YfGciIQE`Ex}K>WK!R`@sQnL1oaPbfb=&4&dSUd|lj8>c-T%!6k;upRNHPWv4o zn~99?-?fVp38iIhHf?1VZ0E|poQt)(qTIa-A~&8yi)zm^LPP-141WeLV6_x!NPY3s zDSGz-1Qie!+J|;AdY}_}rKqv^GDp9>IsqM@FgOIOpt}ur3R!kIVt+(G9kiWJfuyC$ zlE_!&T=@<=n$) z-p?D2%X+t^&4Zzxj&G2I&)Rw8-`_f^O;<$`)_Qk%KC%*+YG2*V2i?+NuFjfehZmu79)K3FGU zd#uEm;6uL}eTO79GheFHtzW3Z-lRX_V8;jr@Z_o(Svn)5+XX#$Ty4cSAy6ATcE4rB zBlF|9Bq{B}%aWbHe4uSqqiZV_rHp=hlYymwY9(=JH`s;dK@>=t+-gxy)U1-#Vo+(z)`B)Pk57 zW==wKPDyab%_+`>c{0srW$P(pBKXd#L;Z}C1?R3(7Q-VfX8R5-O-Yy(JRs&|yu2{E zpU(tkU6vM-=q^8MvrJXGkr27bn_4$|=MTC1yx6Gs9W>AuXWCf*P-U%THv zqY(XQ>B~u2L(lk0HPxqu387a?Sd&i(BC7as4oaq{-?4r?g_lig{VNV8j$#tP2)901 zw@0N?|iBU5PKF#N!>;6OjT zsdKYz5~J;Iq}o;aEdz1eJ!hX~*Y4)j=}FTL1SRx?zza|=*tWe3LkxRyoA`V(EtAG` zFTS9zZ}dhvNg5;0RnuRQ7cohqxLp{0-=pY_dQpT&oTp`Fk>}qp_r4Ft7?xF9Ir~6g zdY`iFgMYtxm6v6yB?9{W(qF`~VEn=0uS^-@GxHE!RJ!On+avMa8#H=A@=UpkL#va<-AX8QTxqet4vp#--MxQt4iaV!7-NsIM3O#fj8EK@1~DEbYpOqJZ{{) zuEQ}h*{H&rsG>-Q^76RqhK}m0p6arm;Z?7cR)5v50z!Xsxc#G|CUtV+DsS3&CsQ70 ziunxaTIP6vbTB*sB@6>V|G#)@{(op{_Wl2`)ciNAWByCdTukMm`l)x})Jzos;{A`> zCH}`c!~A2J0f5;K@Q-z-Ek?Tn{gfCL0HyYqg=5lWWX3F%4>-C25b;0i6_fiPmYIJk zynnv|u)j||W>P<-(zSg5JdOExewn9YP$!}>*n0k!SiRX8y^aIH&PC>mTe-De z%>~vRRwS723yGj+t!+7#|)_Ae+kJPrXlcN>|QZDkbJ;u!IO1=Yp~?icx_yNbIfq{ zMtT08E64O#$Bp;j^GO28O;F{M{^t1q!REFl=FVTCwk7$>MZx>qpN9g-MSrHzTjnAJ z=E_C+kq5)Gaf8bt(B`;&(tp2S!dA240iI=SL}x0soT}JjZIhSz_yG04ZH)yw3&=D8 z_T;l2sso$nnOYVZAeMaN|0E~28QT}xtKvZQf3N-jI2QI5FCBj}d{qDE;uHyffMNY* zl}($mfyjdb0K*3J6{EG|a!3Mf`%y0*(_c9h?)cT)9Q#lI`!7z{AAkP;3y11Yz`yNL zx&Iyi4;D}SpW|&D8EiiM+h66P;P}39$GG0c|B+kq|B+k$SH>TPZH@`A-O7J9@EXx)VhKQ4f*&(n*IB@pHP+Ty<|Ju&(%HXpZBWGm@N>qlc%FAQneV=4bjK{HX5L_c z=n{eF09pXoG|GY?NLAHwRmU(@u2}eG!)GrU-J&aZFG;Kb3V%0$%b;Qy_o}702eM1( zOch%WPyG2u7=|WJt~C9RI;{EgL$UBR8vUOi5!viKj2V~P3S{pG*7%KxypL;aQUe>!;f zs5J3^FuvmV-+%dy{}`?Rr+NfcuKY2VAm!iOOU1 zLWSmDCu6_nvUuRTpvo`p%Kz-O=A8_}ohlO;;wMx*%YOrY6Iq#6U42*TZjjo1WG_DC z>b#b#O^Xqqa}Yn{r>8R>dN&zjvENvi?>uJFpTb@Wqc`y-IJ_@aKFi!&tJOs7*jvWh zfBk71#3!a%Th9EKuUtO&v;^Fmdv)xMbn+vIqNCfoL;F%43&56n(8vJTTmYhq`Pu)8 zN6e}_jDOAl+4O(><9~1o{)6|Qt>*u=(;PGGKm7PjxVKHZ^8(Z|DKmu1+Oh69h|U0A z2Q88aHL=O}qzKh+IZx#=6{3L4S51CPS(|5>zt;hqwc#sf8CyI4tH<9AoJYO$;s3_8 z{4XAa|0~`F0ux7n{YHlQL%-#x?s}dBl?3htb$<}r1@%=ij7Mp6pL%`!E4=R~DH6FyR5dq(uG+ZH&Mc==Wp9e$+#*Zo2 z*ndEg|NO6C0dadL7kKi@zyEA7)bBVF{SFq99NorP@e1*Z@@5s7u04GnQIc5t1^IHu|g#$9Jqc` zOLI&Wt2`4ovdcI6tMiP_XIxc@KF|%e@_!|c;aT<~E-)e@b5AU&{Lu8{Sn_P1v6YFt zHwL)fBD>vJsB)PBJS%{}P*~p=*p2}=(1dEUc?MXZrhg3Z4eri)ubVzXmFt{6F+Ffx za9KF&`f3Z~me|Y=4uGE!g-5=xyQE^VZtm==X z6FSiO0K%O{?S!bPaoT}je?}#MIM;gjK6~sl&V^&GsR*XKtLmwzo~l-w~`v(E5+h=-v{Zgg(9~ zH10lP+i}cCtEigHMA|+DnRyE;D`B8P&Eu~j?4rJnEbByhvH-O%c@s++tT^* z+udZ^`@{a+pJK!jbAJTiYhE7hd%&mtm)^^2z?k%m;qI_c^!h6I)lR<*xTiSckCz^2 z*#76a+~~vpmi_C#KeGs~u(7ObZ&WgQ3BAP%ZR<_1+=t}w%%G^^rhF1EFQ>K@XxbI6 zh0k@Xlk!ES?TSMaGnh;f)Ixyu<#M6&J$!H|-a=^gRKij~Ie(3-F`~;EBuX;p7~hZo zzyfOD+Lc=98)e4X%G(zHetSLLPu9E^J7$IERw!KyjV9-Ok-I2?MPPHzz^geZpCPKJ zv~_b{#X)Dc+UanynSA(mGKDKEn3>F&F`Vy;lskK!IYZ~njxM%L+vpBtAHOfA(BlX% z-sH62hqXN5HGjf)T=)1b+?~a$Q(kub-U(v{c<|Lj{OWPYURNdgp_QyoxktYzA)VQs zLJ1{u^j=YV1vJflAaunmo%+^NE-RUtT&iws!FJr_#bx5girE{C+<3HE*nn@=oH=o; zT>j*Pkkh)joYs)@wDFZGQm|t*pi3}@ff1_)>>@R7VSmth0jijrcFNLXrZ#00RN$K| zcbaVrS(R1fZI@U-r_1X}2wC}sRkG~=7H2k5y`{1GciN2UeTmKaGun^wn%0-@k^YE% zIpCwKcyEqp_ca{+C#^$vTGJ{Y>|j6Gz+c#$_9E}?x*D()y_Rz7!8WG1%I3vvXK!Sp z1Lz?2-G3@w+Hu1xh1h*A->Y&K?Z+u}dBP~kbY3Q_0$JiwJF#LFY*i%;w@tt+^YFHA zCD>-gJK#_ioFOE5-{3uKGj4>0bggfrZQ+|zW}aCynpOwZ{n7}=jONTbi7uZ*ZmxZ6 zXMH^R8ok4&WUmG_#e>Kd6b~{wOM>#luGk&;gnz+zAM?jrA9E1;+3FD2>f<^5&R_K6 z?2Ps2_^{@_nwVf=gb+Lx2Alv}M8U#qCAfRvl>(fOFe$EQn+L{h6tApaVD{{rPA^qg za%A2!+Ht3<@lsUneX=M2=}<#1!O0w4qx@*43$u>$vJe$w{%{Cmx8kZ3D78a4Mb_%q+-3FGD>O^XHE@r}+d z*{s$dV7KY?H4IYImYAwabgB?l{uWeTz<-&(mEab1Yi^LLm9S1hHF+1tNU+|kewpmi z*l|+3cONhLa%a1+zNrFCLKETB%vxkDZz9yGZ1tnE%1`!feOEO$!luml&Ee+cDm!-3@sO&4)eE;TJ z#vz|e{48cYt`iD_d^|J4hh5HyNQJ~TA(U)obpTc&S}pijcClqD7PW~)yG85>2Wlur zqYtPX_F~}$_!cq?(Wlqk>Rp)vXn$IlBkm$UJ>(guGjq-jlrIBbDz4+Itnq+1B-vi~ z9cy98{lEQrUA`W2f86p*eTQ2OX{kah7{7EQm6~y8#sO#RKiqf@Y;NxOMDi?Npv(WT zr0~&bU++X7&3`||b@OZK95N`3sSBEarFMt*#@l*+6m!8MZe&!1W|9|=K`!|`m&mhSk zrZ3w?z}%xuyidrAQli)TUH&kg|Bn2we2A)c8Kf@yDx(!FC%{Ry4fUn8lL##NcgS%k>@inUsSq(QD}@7Ru2R7n zj4rO9em^2O4Nhzzo!>sX2>?JqzrQLcHVEw$=f59DpB}EOA_SYq911p0`v1S9RDkj( zM|oXcvb{kcq#~Ywo~Cw6d6PKi(*s{|CI80hRh}Qud+$fTxy8!^e}8}V%>^F)FJV8z zfPd9({nOWjC zayNcNliwjT-H%$&-DDxanEBE^24EO8`PfH0ir^VFl&b*CB~Lsbt&T)B*+$7^G6j(a zX6$?zzlI6u7$FTCrNMs|rC-l4D)ip`uDF|4ZNH8$^U)=AIdcXr1SPHGXR&GCXdOdS z{Cke+ik>t5=Hbf$)7bJn&lz&{{fRT$$?h*qv@x6bh_g=|;DqUYpZ5l^D^6UyOw8)! zSBAfZ-5Eg#IabMbjYcj~ghXfS=0)VcgS31IE-xQg)hwA6$jpD@D#qt#_m^IVT(G53^gpD@%`dqOTDz?473ac>ZAQ#Ey zWGy!{rW*aqtU-SS{k8OxKOZ}$n)q&{SJ0j9j2inl21%0QsGSuLCttnBxZO0DqG zusz@uWI_9wXI+6b!-m zJh@G!7^eah;RlXTN&{M6)=(qcYT`}6-G6|w3QU|VKE>3$JxYirBAMl7GtZ^CE znra`Ca7Vw^wH~lcXZD2gXm4<^fqcNYWJmP<+zUTjfgg8Q+;4KQ#k}C{lp#y=i{iVSe;scZQX7V;8{w2I0{c^g6R+CWQZ}-K+z@wT|&Ty=~9kAPw zKiGfseCxbMO@65NKjcFw|6E8=gU=^FwaKrb6~E_K4-YyO@63u%-MpEnuHk4W3$P;a z$6@BD>K>b$Hwg_BOV|Yd!(K3@lAYnuILSRl&r7vDh7f$Fm!E6xy=~UcE|(jzs}|&r z3+c<5l%_y-6~+-O7QTA(qGY|~AZ9aI$KZdAqo^#egR4G6yRh5rWJp% z9kUmpx_&L!tN70yZ?+r+q7|HXJwEiAF>DHSE|ibv>Gs{`N@S5=2(RJ!B4sd~r%_q< z{4v#g;ZMol0pBSvmji#iKb(QV?)F7`;A~!pH7O`hI@AL70WQ$e#d`>e)nGK`VN^R}=r#q_ArSJpV#-CPBrYg`5!I?M8HYlkQ&@j7)y6hQ za3)u%67Dt^x7H(wqV+DZzUj}ufQt!o;;Cp~3?O{Ar+raFec@U4HP%B8(Z_VP(?jlY zB{qb+O7e}gmX)l`p?-AWoU#Fn^zTtCYQpRHLmz5Xb4%x$c-Q<;U!lDn@;lYXuK`OO zYy}rpe6LbWxEnLDdZ?q>awC6TzENHwSX!2JHo=uRapeOTwcxHqF!@Jp)h`K?N?H+v z(^hf#C9u3>gt1MpxtQaA!f0R7wJX7`Qw(C2s*JgQooOFVev|4uKIPFA*Amd^N_9oP z%22&`DM+1YzBBW+}Gi5=Ik1C<9DI<-5Xvx^^>gnxTJf7ifCEVM+D) zDNH%YhWLiOuYbC{{-5~E_#eKQuwZdkL)V+5?dgHfI`ZHY&yKm}D_TFg8~DyzKq&H}0wrd`4eO)qCBqXPowH-`k2N z+`U)y-{rue_OE9$F^|yO(IFNNoCvM==7%5b@rT|F`!;5}D*k`%cW%*@BB;#@O*92l zTnl@^Yk7h?YZN0~=Zr5lg+|*H*nvP5vAGd`+$pL(3bdEPWa5UwcG#E1Yos&0c3Prs z6}R~lSc)Ttvm{yi3vbP6KXKehm!la+Or`YmGGfM)fjjt1*W`=p`N21(=xaHDV-7co z(JI1#hMOXQ?xuh88<=x9-HLyEjV0V~<>mb1 zaDhFh>HVE18++<;<899m+wD*acc<@1E~doZQnnCJjksosTw}GUvTXYj9YzJYRn$Z%+ZV{a%1I4`J0%+-9Y5 z^t=i5eye|G<{RY`E@ozadkH20s{;ftAAa8304F~+#A>RW{T}^t;TOEOFelu$)6oI9 z9k{t6=Jt6UJ>vn_l3(d@#y5UD>VJ+p1OL3HCagcnI}`T!19B$P%O_vtGv;!_|BNMS zi#GlRU0i?7U763C@9T#?3^xeRyztxa=% z#zmafMi1xbz=t1r4f3n98#9^ob=Bd0c0b}Q{qBEd3`6~qa7l0f-!;lj_%U8(9o4P> zBeoL=1zkkj&8!Wv&)4#Y5DN*O!|k=ZL{L;WL~x!-vD{0TknSYM&G-tf!9q^B+K=yf z+Gaz&zR*E#(+u_SxsXq%FPhKxW1N~?vm}=R48Df%$To}rfD_9(H^t#>lD*~%=fCIm zOY;GRmjj9lDt|JiSPEwF|6oskpFNP@=i~pO-&a=y*U@tzhZ?e4^!3U^O;~@|B7L?0 z#*csP$4p(pdWCs{R)OJ1>AL24PZh9>WTs@U+-(pTh*q7XKW+FRCU~ zrr@lapF!yS?YDRBqu8rJO&uRk(Hd_xXwIs1i zva?SN1aqL`&9YMI%j26u7^OjXd!2cnmw(@G(A+#FFEfzU<+Okn!H&nt_2MD3_Q*f5 zD>L}HCG|Pv9ZJ{NEB61ymnVL!*Ha$qli%Ffz$4~?RXe#+`@>l|XfEja#h|S?a$>H` zf2;v3U*_L9k=5=DbAI?8yDVyvn02xD)~n0nF(A8j+mM@o;!Su2-r`#3C2u(|@#k8qV15qCa~}i)?-S!kiEibI$6vmX^SB!&rT@ zLU@~k%lS*@wWNIj`?k{PA`~!2+hy6py>K?3Blr(~HCczLs$wYTbFZ2_J$Sb0+y`v?be81kbE3!*{(q5i0+TVHCN`7-}J|4N>^P1;Io!2Hch#H>F9cP}j21l5T3%&+Lc zPac)^)FBt9cqE{9g3kDfZznFp-pq9^Y~M-*xVCWz3F+?0wXHi?-m1RNDoFh-!uU1>SSgWBBbJ{6P;{ zy!JJ|o!H4PUrFrs1bMHAJ^qZH=^g(wt~=DFhWH`6o}c*Umw$dM2@Uyg|Lf5S3<-nQ zK)Ie^i7vz@|0IneY#m3h*dB6vCT)&6v;HnIkq5PL~n->!oI z>3mt?UqN3(;$TcD`F7%i_4^)%uj>>+k$&_$KXB%L(Bl;vk`je0In?fMRc*zftDla3 z*#U)d8n}te8O4o7@YFydaT+zJ_Qq(9$m?_r5Y9QI3Js)!A_m=uf%$3m{0S`ccBGgHWy zOXH&nU~PW-1j4=v#Ar*dqh@!XIj$1p&Gu%$0E3Tq{C~)7u7BolE)wu@QMm_*Ypo;e zf?-{+XmCxWGeOVg1ZO#HMsHpq6C+?#Ak=mydv3qyff=2%=|1@q$F;T{ zz)Q(T)qhyPMyvTkJ-6^miEcXvK^;dvJXY%ZdB%^H315pO4w!A9YLF<_Gi{kiKk%8Im9tdC9q4TmKY zlaih<-j`H=qkQ+T&(j0Wv)Yec8!{tyNmJ~>h<~+j++l4!o9sewtGW17nNPvo)9etx-%h;5?3KDA(XDfjK=6Rdgt?p7lI(&(D|O`n;Na z!C@_r9DUj6TW7VB6SqdbRCh<6koG0@3v7zN&OqVKZO<3~d}N-+_z5Ss8Le8yH!|k19aL)V|?zpJ%7ABZxHmKsnOxII@&_}KC|sRvu-1tikP9g zXB**8GXs$iuFSbuiT8CMAz>`Q=smoCdK?~=1sxI(B8=x#?hU3%Ir%=@1b^`H z=ZShfn4`VdF}Q|$PX^rY%0BP+ZF}?Vmi5+{h;n(J^t>|FjkuuxO5z($fj36%^-IIi zl?B+Xapeg*u@85ZR)5njWIMob2R0Fm-?(50c4z_*3_Ufb`IO;lp>O9|m;AF?# zso9}NWB-Fo+^Jw<`n>B|x?FKvtbb#kGseb`;3)2z*>qEN!qC3wnQynz!cId3F;&DD zTS5IgvBBcYN0?InBr^9Zg3BA2#fMYi9Y3T4ln%xdB&jgmn&& z217UXStm60!LcvbAy6%p`jZWV@dSH2qxy+a20koF8~wJkvjI<-gO;RxJ#Y{{>0!@w z#Y@Tup@MrfMnGC@=4Nb}u|8Tgr@0I-#21-xNt|;;wGI$&nog#d{FMqA8&OgVhTl>T zd8R&O-_xuJXD?5ha9WEO!>(V~moJtIAAc9{0y5IM=4L$Yn&Z5R98P+>laAphKeH#aZX5`&fyf|%nX07tzmE)%Yxnw}I!NkmJtBAjo=#Gz1r zmWsOBu}WrfZObsRp#Z&}b?_Q=lVy;IOu9A^oq6z`7s#r?w+fb{KjoZZ@c`DuQh&64 z-3l>W23=VR(;ynPM|)#t$@s%uP3}ae0%^8pp1MkUp~$gS(@upPMI)+sy@nzlbk+OO ztqC&tC?Ev*Mfz7=7dcRWLr1=Juo>*k`Fw(Jonxm5K49P#vIF0L{X=u=iaFT3N5~(1 z&eR6$@yE<7x-e_H=_Zlzvh`W#Eq{70G;w2Y%C`W`N?0`wT{Dl;(icg~-d*l9^G8)q z`RekMFQmE%lbc0?TLbq%BqyRZi5Ok8+^V^*M({stenI7w`SO27oIcje4wX$oOAXk;0L#6{!n}4Tx*F+2r z12?`}V)NufGd3ol7oMqoc)J%?RemK_%F2;&+LzDz*V5W7O$6hK=_nwhnT&+X8M|CF zzpJK?Ni(DO2+|@GX>Nx7T#asxx~Ba!fW4kMYt*|K`c;T8qrdxn&*&^)>&oy$U%@dR z?0?qL%a=IS{=cn({7?3Ah=0$p5DzL*OjCXJxX;F7(vDN$s=0pNmzwwyq*ug>(5Vte=M}XiFeCybSj3qEPv;`W3NXoey-uS zmU}n&qLb!wbI<~R^Osy-?Gv~}m@L>rT{3HPuzEIM7NMVwU$wQfWdk;_LW9$Rr z%|GuZPky3O(~Y&i*gqK-&INJ1jUr)<KxI)6Jw(x>A`9Qy9ir+DI9dws{ig}8Eq-*{p3Q~zHc_{E;9qWMAI zSsif~XNPmM|Es^z%BQf>lp_jWwVrl;jZ4Ox{62(np>Xnw32de{lp-mfh0SN&l@Yf( z2ct_rbuIS2Kjr7t3er2QWanKoTW&NH_aZ4+f#Vf+WoU|asDEKugZ1z2xY&p<4(df= z`yvzdB(0LNgV)sF-ZS_!t3VNduBpcit0H`2)*D{$Jxj7mOMQqMe_M)E4)Q)uwHNWu zP@GyF`|F64J=o0!Z1T-nJ%u!115ZwPm^kmDKXlpa$%b`4aZf*d@W2%w&s2afTjQhf z!P@7b66-0n$bTK#nPyQ2DcemvxV!{~4dXU<(v?U_0qgXynWfs&o1Ja*=k+GnENz}i zkFHH=B4c-!3hz#hIr5!m`VP72Ap^0 zRyS&`CSeTDyX02z4s<9pjSMHRcV2$z^B-M8t!OS%-+u=mIPa)SsZaeR_SEy54LZr; zjP-Uo^lbl(XM@i_#r1<<4}NmrRwbg+cfx7H#tHF4s%e#AMJpK1v5^lm5aA**_U>hD(Tx*s!Ug5+ecqO6 ziPiMt#(yb4+Lv9x#=rA)oNzFyzz#X&3{mcUP;FS{?@yZ|?Bjv>8r@pCgf~hAP*OaY zbraS1w{+V3X=8o8?!>cw+vTC2YKl`|)Tw^;H=a7;r|kY+b!8%V`BEfBjIPZ@Mg3C- zMFkR!DPqiXQJW9q7M4c%0(ItBnxkobL3PbEoPRQemnmpmnu0C5ypa?mt9zeAgQ9X1 z0UFWdvqhTEU^jjdF6XyKlh`8(1ob>AAy%q@dc5Ig%x{fY(7Mp{{}^O@SQkO-qAESC zOW%KezApN-|A*er1-dai>(x8=Z)d#bq3-;hH*LUdE7^&4*qKz`H@@Iu%&fhTw(MBv zVt+_5*VZo7u5Dx|R)Hy`-<3}Sk_{NW3Tu&b;@Bx~f3|suw8mmN`UK(@>>9=_PQ=V_ zi}(AzVOo?C)Z)pnbw=4KRQbI&4r{la@f7~}bvf&MrH^*?)Z5jT>?(1(&s&c8K>nLw zKInMDU2)!%bgGXM9?*L=^dSwHVy_8L{eO)k2R}u2TA%koiv5{>_HB%bSLn6TYmwT{ zuXPpR#0Ckh{LX}E^QEkc^)&HHbh%#w<+u~7rw7>g>QkOrQqN%?q;@V?v&a2da+3>4 z?1Gu04;$9{45v_DTA0yG%cb?&I1#I`n$%IJjoi^WE>*9~%tYlI$_K?=<|gx?D}Pmh z;#F4LMUT}%Cps4?ZtMq38#S$Rh9EZ!Yy)127@qTQ{7Umz-o38`Ww@d-3~Y;wo{0}O0!o}R?$3x@dO&;$MTu1+G3iTh$YAG zPe|JyvqbcJOZsr?|KaF)Xl|GQGo6J{f@#_`f#}XQTHrVVD@>et9E}_NTYq9Ze73 zd#6Cp<`B-ID-!NnbGGhAB;_jDOh`W($DZBKjWaoGTRQ(>fKfkU{vZhRP+#Y1uELMFxjO6pzdZF)9oPKxS!H>W+2^s5 z%|n7oaTBKIG1%LCa&Upw5!CuZbe78II9psst7w~}>>|IbTDRNoWtyy4nfl|SLEN|P z?0KttoB;QuIrW>}Z+{^*_(<_^(X?+k@BrWOk8~OPpZj^nEA@R{r##@Oe>via;knYG ze?iyZI74lQobeyLK|tp%ANIIV)#MYMy^c4ZzRnhfOq!J8cnMX$P_`7l>Gf(pt5Au? zWQW?GFPW0?T5@UHsYr72T`@xVoAbPkE1@MS#&%`Mcg-u>rhkT{IPE*sV$FDRZ!VK` znL+?|dxtV60UY+a0n(`rH9q2{rU=0r?IcoWyvUtmc=xc-yRytg^D1rGVgkQd8b$s2 zpm|kKmd?i|2%49);PDRVPWlh|qwkk$`;-&todorBjZYf)AG+ASM-N?*o$0R@XPm~s zU1eD_%L~C5_J4PT?pi?;iCS^Cbu~J(9%|1iO(fbmDM6Pjie`|9^^6_QmBl7xiEHuQ)-gG|gCJ zd7g1>D^Znivz@vVSXy+GCuWR`jIEV52cKjVCgu9*lc1lQ;9i!-! z_L*h+X}VCK^J=^bjPw;M$*lJwNXG#-V6Zi+_FhuVKjtuA1-o(~Qq%izmb994SW7#Z zQ~gqU;HLZWXOy&q9;U}Z@)BMZBz-(`FvO!?Pk(=y!*kML1l*d%H;;9QFUS1kXD)Zf zI~2o=x=lYT^`0jRMQ3oDj{RP9c_kX8nR6~jZk+MiI3dkvAH!Wc3wF&@P}_Z&YMiFt z(2rnL9PTEWfJSi}?u2nF6U5PzuW&(=6fPu8V2VruX++Ckk!uCb;P8{C^Aq7kaFuQP zS$~gaP8tSSAoCr~?m5P3HG%tP(6M;M`iCBap6iE?Jw0o`=YPfF%b4`b8=6t72A$^j z4AJZJ_&m%vBzqC@Y%_@k<}2RinV--sGm(;J7*W3kJN-VjBB2HooqCnXx14VrOP%0? z<|#Wb!Aejb9QYX+S(htrG_qYA%{R80W`FswdFrx&I~orp(cH+en|wq`@mbW~gY@!N zx`IPm27c$M(Pt6xPCbArXW`Bn;tdfW8sAy&!z5xmd+;9x{s#Xy@i6$IHQ3|4tsnU0 znA2Uyo1gjHp*B6>?c_{U+{*P}oUIGh(h_>V&ze^KW+>_45M6CZHSo0LcG(*gULZ%+8c zrT6Q@vVMK?(y#TUs0yblO?Bz#nSU4hIw|Hd(Q83rB2lKF5#xD*09!{!B%!g~n8pvN zm%|D^vpNpCF%vs6rFb1w+e*O}1MtvR<~i!T=f#zGnI^)QZmcoONAu0h|4-|*5C2W+mGHsd8n?^&_W+ps&Vv4LMyhXSfY$qDhJQVjE8zkg#(kBg5w z1B&PW7Ng3a`Oi7ea1Mt%71MyX3TuX+4gpd zx|}1>dPv^nufeW;ll5giO}tf-YWotJso!~Q^jyu>yRt&lJVuzO5os`Y`I|{-CWz*8 zxXs{-aD+!VLKd`M4u(0F2Og`(4~O+BenVA!_uCIMV$Nq0{N!wUuI8Ip?(^{>ZyWk! z?8&F}eq>j*PYmqRuYVfD3|&-E5I;N>Pz1t_Kkhee!=O#W+ALMpMBs`!v!xV?cO4|2 zW~QC{ok_Tv(116=I`DD#&K}sLl9heNUY|B8paXpCG{eLz`#JF`@mq!9-6WeW-LH^p z8pg#_q6?(aeYbl{e$1^ zh8g2>r(O@URDTY0w1kK%?;X?TLE^rSY;Jwn3W@C}JU8oT=N_VJdpAwC4O+F9$>aq< z;f(rGtYUjQdFUwLEd=#jYz}zsJYO8_b$N&{0}k1g*fR5nCG7|+mHF>^@A0pI0M;dAa}XRL2qGvLMjXl7)8FMmk*Q*S`$dwH6_Uh7R{)fZ5l z+n?sh5I*kb(p_2;`Ze_0omtnkwxV%G`JtbbgvH{QpSEAZP#;?!2pG?%sa{6CZb~Dw zj<~>(r|J=5%XEK_1=ZJqe^TFxn!u|8^|^k}ecSh)REJ(=KRoFYhrXt`)0=}%q#EWn z9p)09@_(B0Wzc<|YH|Bh9jWij>$Owtps{%Q<`(oJ#+=oQj=G1$_`Sb8`OrS*kD#CZ zSf0(y8FS#~4-99T$xL6zG&d)ktmei`sQ0wjo4I^8uSL2E?Lz>TNCQD+<$}0WRjcQDVlru|e}{0e4Y(^uV1H?5L4kz9c>^yBC$-9~S=4zMn)b)M zi0jxLba~?_g4_n%W-VwPqhz7&Z`M}A7W^_NT#tf~R#_7X@ zZ-0Nzm^k!q&z^$kw>z6Fn)!{8rwnueCj6v+Q_>!f{)`cdlKO1KXd-xIrk}tjU@X=9 z->*|ok91ll)MpbPd$EqSivthSbH!C&|Cklq`}K0rB^~NGqytj+)YE;)4TiaTU=Oq8 zH|KM;=qx@Z^J4v&3Qzgo_=*`J3=}@=K!2t*vv{8)d$0b?a-7X<(S)|wUh%{PdMdVp zLzpC+aihQ?Z$6BuYWF= zJ68eOJp`lQUlR`+Mf1hJ2uH80UN^EDYT83>WYDso*RzZBy3u#7u`5=ARg3RgZKocm zANqkqPt)d=~H0m<4m=d(+pw`*|OKrW7M8;i}hi0Y)b&6Fyoa zgG9_ZtUu;;dFgk>J)))jeZkE3Nq^crH3#hijM#qrn<>v*ya7as|D4uK7UD{fgGfP zwSnmK3YS?^>?FYf6GG)Us9oA_H;3a+GEPVXFmrOXb!5mn6b&08%=uf?|*wvdToO~_BWjF zsyO6C{XQJ-X7CR&5zl6j54^?xFpIA{-{bP?nN{6_iTiK|VQ%T(fo`Gt-bQy3EzCH= ziGn0w$Rzh!McP`6ab+7H7ev0@y>#!4fBJf=r^`f%z zO;9y!e5R)9;0uijMqd*ef5-Vw8{d>+J*N5vX)T{u0nK={hW~bA4m}oXB24_Kc-`L_ zuKM|e%nEVn50F<@NwhkfR_$DNV{@2i#vk|IgnCB8J%7ybYFq~;VX?r?q#;ZvH`P~w?Q57!?q#M@$%_ZyI0lKe0c>Ts8E3q@ z)JxkkDQ|eNyHpS8V#;%K>KU!P*Iu)i7uI)rI^F5dnbUD%wo9;vbJSOKcL&c)fmM-C zjV#06#6?%1t57(t4dKy{M`z7vFXIU~3XLZ1#XX>9i zn4kQK+@IOR_w4tB|A`Z4vqwVfd5ds-y@5r5fdx<80&c>-RD7tmjtG(}8RK=d#OxB+Eh zV_0Sm_rlozUYKl{J9p~4AM(I2V6V@~jCxae;>*{5?x|*3ueYVSdxEr+Gv3b;lLknA zYNH##eBj%M{*x2$%&j`w$O);nFCvq>GRY#vI-h=O((5FeYB;7q6+Ox_=D0 zZP*Jay7!8z@jJNB*&jo#ls|ax?|G8;(D$$_P$iR8m{yvq&qu|Y?z7vrX@DYZfO-vR z-bMsyivFAk>ZRRy?DNpqfp*xJqhC7Azd4&9cbakSLt(tR-E!6`K4rPf%tWBwW>Dcf z*f@xH)Z4R_(v~mN#G(k= zjxD+$^O5n2_?h?r6+grB|1Cc=FWJmwGH zCxT%|dU3VohU}!oTNBb8Vvs;8!TK_&@(@6s`ltG}`o2H(YM2?K|MSnR?|&;}#Efqd z?ZiYolQ3fu-c5yI#xpB?-oZzRBBc2y@X?6CGyxN#Kw|@Ri)fBCZ$|v5?|;t^Z3WG# zg#Q=wLuq!&k9lr~9@~B&_W#8k&_Db3I0qD+mVOFp#=Qbuqwhh9$j&;sXI^U-L6BbxaD`se%{)(rEPJ+W7v)qjV3AkOFA{)nYy z13xed7T;LYmfmp3q#k;^`~4luxl_mbKi%Q9Kj?Sq-!n85ns5CAmD7Q_DejW(=JiE= zO)tGssuqvgMW+&ZyUtS3F&CL!gnGp+Y24Y}vgvz?W#4zK`Op)r=`I2WFVAm%|7lmw zsb_7N8KXFN;}BY4ynnddrsD1OJq2@`ixOAK-PK$D%9}2^^b9s#+3t54*zJ_e0d2#L zbxXQ)XyEs0_R4?!8F{rE;=U_0)gb$OyOziKW&NzE?|$#TcT`mBjGIx5L%m~&v6Y!Y zt-kN_I`s+b-EbdAex7qKPc`yUc@b^K5z>qvx{Jn|ChT(Dl7BJUTnbbIx&j%DOG((8 zzMK}|E`7gyn;z!2Qs4MBb2b1QUftW4E2AR93PZnlZ(o@(0{w2nAoVaJUX4C;ezhe0 zg3vGyN)Zn^^z9d%C&CwxUmxIta-S4*zfRAM%D(SjLrQ+*OR@h$+nY7J$}?+%f2E_Y zs>0m?#@NQ^M1MuJB*Z+AhMNw+HnXw8HbY1M_YUE;_q)II4RtCiYTYbkrG%79Nr{yC zO0S541AG*V}Lw*fU^vtjqjk)ed2crB3B?-YM{^^qSN4s_&e)4TzMG^ z1@Hw(u_l~2Vj{&8L$lR3hF}E}EXPq--#Q5+LvW8H5#_`mHhK|SK(Xk)u4m+2yc7Y6 zO=Qbhn16NsdtUC-I45>x87!{^Cr!K%tOuOZ9Wfe65pBq2iU6J zUwA|HMFbIn{$T$F{a-j)f8rRgxS~X7{R4xVdJ}Aie~z;XZJYrohim-Gtj2#&OU`+S zGOnA%Ki($(@e3`ldXfEuC&TL`{>Tr8=pS?LYtL25g&*Qyy84v;fr0)rZ|vW3o(tJs z<9{Wcqo=v+Bw;vbM9|>hQayN^CEaF0bKoYN= zIv%OdphAEL;9(f#nBc9F^+<%Tj>L-Q@f`VvQ^zab*zI^F{zj0!YQVN7-l6ZoYWpNh z`l2j6!Q#y`qT}9(KC>nJISt6U`)a?Uet*A*ir;eegBS6$7I?pW3I4F_BWJqp-^SR^ z{=lAC&j|biQs&4-eCDknJcJLn z9NDGZIaT+?Oc-THz)b_~;icOn=JVAbro|6aC8dlI_6S}B`G(b1zog@^u^k~Nv0`&xb%g^O@g~YM9s+WPGZZSnX{p!9|6o8H zV9dW*4?qWZ^3C?j3GT{+#GB8_x%qC|U&0q8{*~n3_z%n#<~NSwuNc(7*8Z>E%K48o znXkuE`=O`8+dZT6nKE;W-Xg{^$B%{0DlNz-=$(^bfYs1+BKQB(5*P z`FGgGUGolgL}C{f91q7!rcC3a1FY#4fS!#-p@|9!C?i_^JP2O>iUINyw|^58-5Gnp zh6p}*abiQk%_|3v3doHBnflfTb7sXN6wUcxjkWV!aYyfX5M5svT>FMb z62m@^@dQjf8*&axANtleCc%I76a1TVz7_BDkGq@SchRN`S9g8=Bu~)|+8=lYkqpM^a&^de zU!!2eZA6&6ayiJz@qbkkL7pEkBFGKjCwETpqUnzRw3pH1`FhCgoM4_K!8bfeFj9(e zAm8}poEDCC8i?4_P|Obb2ti=z{M6a7frU^tisIS?pUcvm)!#>e3b8lvWuBgKFz-{h z+A1I)nzL?S@)^^-r(2N3eH&*T*lWDt7+5({b9@1CjeBVPWq)V2+Svp`9!bQWLjJJ> zjDdE&wDhVIfZ8u^7no-G5TQA-k4=om28eut9BuoY`u*aCCy;suq#N+no#-LDh)PIn{yWvgAA~ z0OuTDZI$_nTkpWHz2u*^jDN(7{v&?oYQu*IVHyT$$}XUg_cCNvWJkK=&^ieK4=Oqq z3U-jEP7(5#<;=MUb7Jr1t0o>2wAxe?dmMfN=K-BZb$+^q(4%qowoJWG+fCL}MBIqB%#@fY29;%?$P$83;g}?DKsuAmwE%b@4 zOFS6(%U!q0cRt9oqqAHP80$CAv5cm1;&WZ(9UJrHMt|%+iUYE%Z(G7LAij}}f|Ye~ z0)IN6OA>H=gbT7EMTUY_9D@}*1C6zmjdHmLdj~#JTm$j5E+bo(2;Q17a?7p4A)j|A zeH<{K*S$00uOA_qUL(4K6<{5#atKI#MR~n+xbpc3e*NJbxB$uk6Mud+gYGD8$ob`o z;B;;Wew*7d#3N2^SAc4bPj&#|g?zxK8h>1B>6zyil1G`c>pKc*<(_vj8ic6J0{aUn|Wd#WbU zvE$0QcVaapBOFq3uV)y}P%^uV@9bKset&F>Eun8oTyK8_0m0tH16<=NK5{~D1HOZMQa-MMLHKKB z-|_s_K93A0%)m|uX0A~{wvl+F-lV^QEm454p*}(loNCbJnhr!8dKQ)cF7J@>;cJri zN!lgjk6kieC!kZ{l9T-1N;kq@>it+Pww@g`W+ul2be|_k$Ww<~N5Bxnhb8yi4hi!H;)XA*NWSb&?6}8bQme?JfC~EhiQtH@K1GO2b!DdGyu7Y(pn~LjTpU(ilLBG0Kyai zdp7W}o(CGaT{0tZXR;f&S_+Vc^g^ETYXJHni|zs@I{;YW$qhG&Xm0ly3D`h`>+|*z zAbabuvIcaSJkv%IYXJLFIg@W&n}KLRLL(t@1hv5rIU4|9&SZZLAbU2@Q0NfTqz&c> z1Q^9ZI}15p%V{aVng)3F2m*VXSF)=Y1B`il{zY4-vS{an4($QhZP?2lu(!Mu8o~f# zAtdjdgV=9g5h*LOk4V-?bkDpPFpR|+jivx2IR$JO)cIgz!={7E359w93qVxjFm#GA1yJ2Kv3vx;7m9|oJJvu* zcp19P2;DJvTr@%y6fr_0qzyaNfF#rEh>(HA8LxFjh#`OAEMJ}iy_ZFIf{cP>4akHR z)2J#sxI-$Z5QOgUv3m_58wSwu>XC>j*{>qs!KCk63e10X{v!hoBdr5q2&5h2tQVo%6C*N$#aH+IJd zj;ThX&^D_a5^pj9^@@zGeFVVj$sM%tm0y484}j#?fgftXm1#M5Qecuf#k=bqLb8Z< zBFrIx7~l~iyH;1e3S?Y?Pz;VOt4E}%(@>Hz5lX@@zt>W5)-((ScX$mj9|oIX$TOqp z&J&o{{6y0*{^k$EVEb#ngysnrYyL&ku=LGmhr!Oj`SUy>XU*?5r2jSkM<7P{<%fSr z&$S-FvojPjnYU;h7P8$6DR@cU#)4F|K|I{VB2fHQqB`}*ZjMt zVey+!_?xZysyj~zt@%$)!^$_G9b|qUK}aICa)T_QQ6yemWN!)~|J{25j5P38OZ*ym zq6u;RfH<;#P3!tKgMTe&$S6#tuY0+SGjg(P+Kfr&WGQQ8E|;S%!gw7k22_8Mm#6Fa z@bVurh)q}rXaG1Ic>Ek}9}0dD=|Y{#4WZQspaDxg{%7g!LFU76zxh?xf9M6_pY?;R zdH<@P-+U1MQf3{VtxOHm!1155%Zx@j<`#m|zwSR80Ve!nMq?h|!ZN-Fo&?Jrb7%h7 z`@{esPsYk-bXH5jeqBRh)X0B`&7C6vWbT48P|EHh$#h26>xk%5WFM7jbv6Npf@70$ zJ&^rG=)rm2LlM~%gKwD>>j0}GqV+zGy0<)o+>$%;zT@AQ@pG-tDrYu`ZXwFFI`IIa z0lav1Mq@;D46?5V+&^eEppvy)Mo^UL>)N)+%U=V}4n$T5m{oQUcqIi$@xNm=0O*H^%`K!8o`0b~ z^JEPON=o*uU$jv(0Kak5bzj;e@*yS~l|28*R`}27xmJ>x$+K!AeO+g*`ET28&*1$t z_Iad{Q^VvR^6)Pign+F3TAdI%7YBIZLjk0I(?l{q#afTaVL-2?5dJ-#ochAw(V>4; zAK51yvLE}&D9}da7cV;u^4t>Y_kZ94D3J9P>%+iz{om2Tm;3j$_Au~6UzQ-ZzVn6f z=6~29B5PU?MzY2`KV$!$f6>plUy1>Ym0kYGNeVQ39xAe?Xr<2n_5NrW?2hOof7J26 zHiw<#znCjze#}TMTcU?Md?eb{00KS=R~-`D3{KayK=j9dJb$89Hsf`Fv44=xiZjk- z997Pd)J@h=A^xj>*)Z7Ppw5R$2iPb4a)}Pd554TJvN%GrXD{V9pR|2GjQU!(Ci5kb z@%5{>5s7^5oqinwRw(V;)|EP!GdM%WL7q4#K$vJxg^ZU34Kzfkswydp%~A0h*tCVrK-fJ1Ibv{h%Cexrp92@&9u0;O4Ltgy;=& zPzvP_trLq!e18W~;e=Wz=Kw&6ksNKEZ#e*Q7&LJhv|Gu}9SCGo?Q=-6;Rt1B0y)IP z7}PL_*!fQ$=ZWN=3g$orYcxx=Vt5Y{b;xCc9Sj2E%z}KN*lJHv5qjm~fudIY#1&ik z^`V}ikKp@5Z3$kD@YngH0wfuL5fJ;_^=Nth^;g;kK7UaE$j^ZewCJz&2occ#%7*}y z#C-L4+J*?Yf6>GIZ}WLusX>_kmCgYTxDnVR`~V4%5F;}XYY>mHBf&#t06HM*K(#>* zc$tjG2>6=d(awbPBzhMaX$QcDTwx?OyaSO1GJqZ_m)Ni&>4@BGkZ7#{D&I7CldAtG z-aC;vz<+-)twDI)xNlfl(;un>QP|F!X5#|@MIe%qCO+a>6ve-H9|n!}3x*NDX8 z`kO}Vw|;&Eml5uOlmRV=FVf!&F}4sp!Y7u{{o=Qk6F-~({c`aS+SL<2Mud;z-_vV3 zX@8Fl;t^~JKQ#bUQZ8A?FDLwQfB3VG--V2$NZR{{Tp>WN)CwTdU%C2Ma+H(Xm5#_w zNBWu}R_3z%R|6RRb`3TqklPg0qzkdil0zYzyzx@$xko>FmAwc@U{LGJG9S7nE z{_WR}(64f{(tX=yllWHOay0Q%Mfq1wF2uQw?|?Rf4|!iSe&r}z=Z{Xt^V?2xzbbO1 z-FJeY;*oU#kEEUJJj+Nt9K-=_B=kC=dl0$20Jspj{mzH+Px<%&%n?&>{d*P>39$^$L?FQ`#b#*e3Q8Iw1;3h!{F#?vX17;3ZoJ7ke zSBHS`8n(X&eSajux<9J(5oP5a*X|+Sa3v@d$UpJMqA*FPU@+_Ya*{c+>vLuYV2rE8nJN z!C{0~8sZ;1eQ$ukPsD(Z>{yS&Z5fLgg<_3-u@-!$xf9yp43{LFK51^)iCFW#WITTA zTx_rY)>Yxr`TAWza=*%t|NX!JxBuhT^=h7q5R~Y5L!XKKr+K-~880 z@=E@VWKv^yyD|TsCd2i>{+}26BdcmAh;umzxKY&p!^ zjE7i8opx4aRn|t#fYRnm$<_*<1>{@0lya%6eLeT&v`;cA7Fg}x4}TZNk~aEq+9+6y zOc_SvF|iz@#Gd<14;DpzRJ)80Y3v!gAvn;c(#A!}%QoEJ%pX^!^p!@^RHvx)`>{OM z9anW+J}rX7(Ob_Q>5}%6*Y22>n~(ZTOt@_JZ&AG_cgY)?&6)F~LJkz@2Uu0c*^`-N zkUw>Cm~PdH6Xi1Xs(+zoxQ?NzZV}_n^EAoF=U%6d!j0NjMPEjp90^IabbZTy{Ae(_ z^ix`QAilaC=laJh8#X3&+Fr8$&brPhV_SoEZo3c6ml()(ZSq1d$H7yOpT7F=C*u-q z?_oe)V(Joyl%8zg$$WXmo9*Vop7VEn+I@D~_9GUheByAy_!1F54&z#Lx_}%M}1!U#$@U%!yCg@f+^yU%-=C_(t#J`&R<+5;;e zhcQoF*Z1{P!hgiiHm!1^&CfodKF>Djso_jzZ)42$GCH@fP>=0*4o>uvK2I-iY|i)C zsV>9WlVq+{Ks)Y74HY2M5$EobZMLN2`4Dm8X+t$V!}C~SVne6LZOMJkTUv4GD##7` z%uNQRsGF{QyrPDEM#Z@*j!L@YH{-eX{M}~2w5B|v103(PV})G-d5k>>s}8*;V@r>2VR;{KcQ|R!S2|Ed8WxV9`UgLa z-RN}=WqA|8H?>{e+w>Kp_$#si|%rHiiiE#Ivpyl zK?5aJr{f%}SF9-0SPc!uMAO~dw83$6y1g$#>@c9NJBv#Wf`7W&A)DfQ;z!=psuP48 z-hbkAaV!n)@Dek5(XC>`qMP~V_xs>p-6n>@Lj5>(QAa)NK;MI99LM2y#UR&?DKVvf z3#!=3s$id*jv1Cs);Ef@cdROI8KxCKIlE_f(drdI(33Qjwb^ zW!xr=-?o=df1=?Gls+26)ICv|bH2RJUVkW=my;2{kC&@tJ~!zmcXP1`hq-Z^=dkgW zD5?h2(j_~4XG0}<)7Gd&oMcmxqw6P{g2Ovg!rKO)ji9E8Uy7!1ro@ICt9mI$*Zc7# zSO7f`>YDly&DrfJyw z=WxFt<6F-hmeZ@!582J4x;m!&W}M6pnT3pDbfg*1V-}WhRxB8J(AD&wp_n(+d@Syd4Ak(61H?nTIU4++Jl|9#;c}8DQ5`S&zc6kU^ z&(_1;|G3(%(NB-v4k{U*7wrk4BT9~n-l4rwWQFB?W45J_Tm`2a-9;@<^w2Q&of=%v zWm=v9G}H+t+eyD_eB(bW{lQ%?xAWs3)9j$R=gfX|4?14pqm(8tD}dGCWq4>Lpze=$ zjODFd^%ucqvyf7c#bRn!Uw_1Y)m|!8eBdyB4CBrTo^;+vk6M~`7uvQf@sXtL)WGN# zKH~bss?YoT(4(Qdg&SzzR4Z{uN6d#|%Q^DwnSRj=-L`q6S*h~M1RWN~&oiS=-o2X- zaeKPxaji7`SSdHX$6N8B3wSaE^a_r%5(id!N({8ZE;O;^|VU?{Q^)5e%y~sx;3W# z>G*nd!HsHZNlByk_Du!5x_UQ|0cyH^x_{04jPE@MGA`C`sXC)}J2=MT zGv?r1^{kJRzWBu_Qg$1s$P2birvs|rLp3tN%xhBJf39F)cUK9$KFYbl`8nP4uLc=z z#CDvciLV;J%r1Q;H#h3jt^-f^{%H|7D-Tt0l^Rc>qu#jglwM!C5?waN`_Vqn&XP}B zc=aH|kAG6Ldo74D81}4=?OtfS$l{6@ymr~Rmvl$qVd3WfP1m}yIEsxv3-u1k&4H?S z;$2dY$A_y7wS)HRB--vJ;ka{CXUD8O`njYt;IV%+QQz#~Je!K#Y|o+#6sDX@?`6(| z>lORyBr8ok6_YY5-Bu}RrxsI=d|&ylGt->Y-+w)GuQL87+-Y7}K9 z?LWm3S{qnPsb)<#5=$UdW}V97pxCujY=_(MFkT;LorIF73?ySk~Z z!llVycZ%=IaG~Y)BwezQ~%$bkp+dB`u z==9fV5Jr8dA4e%+FK)%}QVI+QvD4*Rn16$9jOhn5t4@!OyQh`gA`XngU?B~WvUq#dOEuGq4753WeS*ju-Cgn(^++?ZI*eS}r_I%05XmyFclt^X9l; zih2VazOP2%Fpo6F2G&02Umm*am%g!g`MtU?XM6u@TsPdlZg+L7@zDUWSyCiyZ(qg5 z2DQ$odERfW2e^p6@^2m2JU8-1CV%G7_*iW7X2Er6>(1oFXF)Twm!Uqj8}NM34P5_Y za&d4T!T_0eWn)dbD5h{tw{;{RsnV80!-B%?9sBzxOoAD^8vpE+tg=Py>eJ$@UO1Vp zy+5%|0`CuBIcc8{)83Atc$>}nxb;vgNH=R1k78FFyGzHk%1K|mVe=R!Q-9cK6>j!D z!{J@oR-w3_1YpUpnHwfuZ#O#j4Z4AWc2C;n`yr?xlaMR{=@%IM@>#mOBN7wi4J z+yj|+d#4(nv%&0ZTGNa{WPc?s-OT!<59!*bTzNhf1{bFc`&IrTXQA6%0*4Y zAm1)GS4rQ|aykP)h`ZWX8cX92D9GnOFT_|VTm4>raIw#>;R&a0dyU3Ug2%46+fjJ$whU?;B%DJhcr4$}E zhtgPX71Uf^%~;{{oPPy*W(6wDhh)0@s=$>ELL;m4C2xuX9)caq4jtSi9=1X9-jZ>vh=%lT@HipRP7#?NRl3G!A+N}n(cyK!e;woOn)0S>>l)nmI%FS=NnZmk3P!N+ zW_g(X^m%=#IrDCBfp`iAzmN>Kon-%g752h{4@p(3y3_{!ByNIdne>{sbj?F3%?>7y zH;Wf&%9LzBz<-iUoX*g4{9$O>#76D0@JsQWy zR6{1dDXg#am-)4u8(VHu0bRDOBi!3W-W}w2a4BK1-haK|m5;klH|BmuX5;rLP5jWl z18SGi&h%Davw}_M)9KEQjm8h>)t_<*t3Zmi%T@@i9eXo2wnqnj(7By2WppSEcHUx;tU-PC1&AT!}$^JdE>`l~~p@%za^calt*(2v@a+1%1+tA9He zMvZt=@Pvn>Z&QC#>S7j;wxaC${O;Z9LIA^(qJR6jG&pEJJsn4PYp$)&lfN9gtxPk{ zRe4d~U^||`;l+^T10O|7^z1u-vLD4@6p6SPhaB4HYEnPdC4uTI72C5s=^kbn@iK|= z*wKO;D&_;5f!d^YpgqE8S253ef_mm8C1Pe;V$D-@2OHIPqZc-s=3PrjpTq)1!N;i8 ztAC!%>dSzS`+V_^h|^D{cx>z)Kn7o6?W*5z17Fw=2$&!et0tTp7Hd8Rx3H?Y@G0H& zWSfcjTn~kc--_D~y7`CNF; zzEpb;2e6B%NL)1LEJyckqNVjbOc6Ev$EqblaKgGhBwal$~eviS2#1znZyvp{!nQvVl3;!~Lr5PLLX`{(tm%?i1lu z1+U(KmJ6fPG6~OjOODkVX1{a6!w1?ueKdY(oI@x3fE#=^JnzgS7GK;kNgt(oV5QsH zr>}c^cZnm45k=VFC`+B+VHIx_YdY>6oDRmD*?0gmlR&Z5&d$84z6>_r8&g_t1X+~m zs8)RMaVg`&q>b+;YyBBbJ%6ZqczZ;~^?CeU=($k4?ESf!vky#}joXs8?Cjk!zuoANrOJSC-pN>J`>cv%CDcJ}9spfC#Zr}F4k{myM zcJ(uKLyGhn_61Y4Oj^vBO3*i=cap$9>kr2Gj)mgkdz*1{Wl5^~S&uJYUPAGyjfk#k&d(*5sgo25 zBC}Q1$(rLLNb5TFl8uo#QxI)kUbx*aNRc@e@$C4viiAPR)4+r7;u)Y3pW~;gDpJ5z_1i-_)xVN8xX_$x}OKsY0_Dy2b zwb;l8qS}MW`&MRe(N3RzS)wiv**o;9J>Fe+cN89c($nVP2Pi)z_g!Bdg^|pg+Dh#wd>sN^f?dMJ_;BE$rObKaNT=fMqs@B)8_J@si+3w# zIK6J@W`tt?s268fU8<|}RF7lEM$5j$nz5Ag?e*5`<3oR4mQ=qDiWA|_uFv#O9o=*s z;oa8V25_R~qayDwD!=TvJj>9G?bG=d&rNL=oqE;)G=Heu7PQ4f*VBpZ#``teYrv|~ z4Vu-mzUABQ^1OChx}%d#BEjG}TK4BAv;Gr{Y#dRxk4x$^-8Kk{jd0O}8T1nH&1*e0 zvsf(9GGosip|90}f?yRVilPP-Qnm~4luRDgR05@w-|8FjSgSKsG~qLJPZXz@Hauo_ zO&7=3OMm0NDRKOC)$lMm#h@g*|KK#sut43n)~KB-^d^%|On}>yxev zX*&yCuB-g37ft@T9r7u-PyRj*3=5X;MvCmnzHS|%WX`Xa){~$QZPwkDTc&@(37=#? z+TJE-Q#7&h(uUiO3m7`JJw)DUnO?;*^UTaSv}adg zq%ArgyW2IB+ns6VHFq^O&Brq$c${i0ttBlzq~KlR=2F=inzCVM0ZX@MKZ3dUe7ru} zYJVz5m*V6yv@0~bQ^q<1r3(b@pH^;?q)P6 z!*%qU@y60(-_cZ-HTIpYx7j=O>{MHB+yy*+7gcZjFo)|oXWBm7-p8Gz&E|f8Z=8WGO$+RO1&znjzfqi4 zpC-LJCMW#5y@3-aWe+C))Wc`15mDs68o7N zg?M_E;2tdeqwX0{O>r3P^nPsAqt`Ak?>I3J$vP)PbDw0OcvyFM1&$B*Z0|JTJj`q~ zKfI>tp!RC-9j8H6&bY3w$7Q;Q!mu%$czme1k7XwRF|4#yh-0Myc}_- zrn47r^4HZluz}8Xmur1MQLfUoJ9*wQcI~#hTF*H@ogx)@f?|khn^n2%gnxV-v{!VP zLFVYWA>iP)JqORbc0JaYP%S>H(-%#VpDh`g%f>G{jzMwo43R`%{1M1}#W=Uif)-t! zqXToZaW!9v#nmz4PZyH>-S~?_KlMuuD35a{h|% zjr^de2j+J`Q^Z~3w_9KT6n`I2>DhGBa6;LVcr2SVesu53*6jV_LT*W#JnY{K9)b;Z z#9gtq`@8NHExNGRAqeuYK4UwN*aGjL3HZRQx4egL*Vgqd?9A(4tm_t;Z99Fii|HjQPonRE@zTAvhksL8oeX(=14q9V zd>zlFB}&YqqG^asmY%vYc}2xjWVB6#wD}f1I9qJx1>a7~+mNXOpRdvjd(`g9H9X~x zIG#(|#gZYh%o``0>f_(!bMcx(Zg=BB&hH;XpvgxUdz*)Dh)=;SeCa+KcebpGu~vvQ<# zTX(x;GfoPn8k>y-`ggjK;|ZL({*r9=m;IILWRWfyOv0}?qXPKzq;#j#^I3}WjRv15 zdrpLm&#b3h3^E%C(OCQbXhltv`KWH=WCFdTrY_Z#&$dqYB85dNHiz-1C`khCcl= z&@YFbF}xv3g2=x;^JjfgZ*|+uMXXdI$QQ|xlW_t84s!G zKZ90Q3MYNQBpk5F7Nzt_D>^wBrkm-_tA1Rtwkd-iH-AnQT$;!&?>Z6{HGgVb!?0fE z+kf1W?Ynwhw|GD2=cuz{eHRlHOPDO`G^jGj1ePl~zoR>2U(8}B@8bOV*y}wq%=^~K z8#h~2&b)r2E&EYRfw@K$J3}GAKXQGi8U;1WUcw3SF?Y&Du6LU%-S35`dBX3QJ-O0O z9YXq!w}00eC%wX5oi*Af&I?T6@YZNdiq8Mv=Q`>8~SASAVcW6rz@+FPmG6yF~H&u-QO@62c zIfC@mo-tVZW`tX*ba(`<)En>0*wYXHMnT;`!ad^b6D;(w5cBhh7n7w&#PT9H zPAOyigRF9H00{rb4j>5y=R|uBuNh22fn2191_R`B>RN3NLn7lSiPXb!|g? zJ}#ymzNe|PIxAN((z{MG&h;o!J=@*XHJA%_il%Iv9W6q?==G^hLyqx!2%1w<-yG)t zKCp$DU(gS=yD%hTa`?8jSpxAmkJuYLD}*Ere%NzeGI0LC*@0mnq?YwZ5P#mW zoePT05O(5=YXa(So=&W5GwO=r+@Q%LyQlbalMh}dr|N0D%HBuB2WVdXE=}xtx6Kk!{?V;YvC8dFqn_De? zG9|aHzsVrW)MJCg5dOb2 zyxd0#m-%q$%^~9Yf}~8Pi49|P-y8T@Dzm0{pjSukp!vm`g(!FjacmKZK#(#_LsSI& zxP<5ge2c@}GcKM)7=5vTD{87>GYJ76<1GNb6Tyf8Mc`5lVFXO!L4WP&#EJ;KQh%FC z6AcDdB_?z-eXn342rYw_`xt6v&XT3nv$SA%8XkpgoJ-L+(6vY=T)0~MdH^uHVG2JX5-}13ndY7DP9xp~nDc!+;ei>Shb!uWMfv-N z^MnP%J}92e?>fr|`G3j`*ie7Y6eN&J>MR+WErLL z6HHAO!a>o+T|4$;-$#0S(Q8hpQdB-D*IqZ<*gj)9fC8Z3_J3SP*`JADbeEKGdwbu3 zs#6#G84)8(C%ZD$$G#*v6|M0!j!F;GOIq+nFI}tvP^5gA!^@?bm=jec5JE*-Ev#1} z;%+ld%E)lTLc&*OjG)Rnrbl?Xl}Xq%-L6+iS%|jMN zi^-yWxSk|alxlAVNZ_+CL(j)N9=MnuON%(tHZr&}p#PSrWnu+}GtpolbTbMDOVHlx zXzA?;Uw=OT;ZLl3=*?%74_+#Jp@|W#|bl&46Dk5~OY9#xA@hFJ_vLA4{ zRm-*z8j_2uf$Wxqo}2_5G-?pQAfnRliv#2=OdQtIOe6jHU&(8E0uPCN!m$+Urcwz9 zYRs1$yvvTW6NDV@3Kuf{i*@TG@R$@3sXwO2L4V(D^bR!TClG7=+#oz8?x+QN&J8}- zBx0KdM{}802>+$!)BYEdldW<#CUyQgaH?q{dd!}0Sb_`lJJ~t!Irl-4N=0}fc5@0d zi~Hf6odOCK)z{8ds(Gy~fYCs^X3HE5$*rgB=1hh(Kpp5F2p@~2lGH7z?|9$ztA4CG zE`Lmurh&%8DbQK=TJef>49UQ{l=h$t5?_lf|R`N_iu!U;9OKMG3MtTY0Nn}0)Rf9?uJmFwL{A9&- zUZGzg31$4cLSvE^{HTO6m1lBdYroB$w14pTc9b(Inr7EOF5fOO$D^*$plrUYLFc)# z7|Mj$S!gx&$&KfYitZZDNS(<8FY#&tAs8uw56$5EuL}ygfXq;{`5{cZZmDVBG_w3M zG4=NW%xWqPbx;eDV*!`#i>gR(Gz4^^bYEX7OuSi;vOn>SQ zk_4yGa$pe9L0-3~`U+Ohj{t4*ertKjD#CT^vLtfg8Xlo_Pr#?4gNed+Q&jF5Q1w>c z-UAcKpu8_&VeY#AD)nhoqmVT5R6hxz>?fXwHqC^RZ$})zw_#I5!-^F+$NMI|PBuZC z{$hRaxsZ%agTp4Z+~>R>OY-?ygE5^Wa+nbF$AhMVCCd%@ z5u?l|?jV&V*BjK14l(aXOD)_vPc@#YT9+GHUFmu5!V7t7oKb&o;{^NfOy#XGU<@Dx zt-0gxY(;H9^h5&(`6Ze*(DnZ64l~zZH3wEIl5LDY{>YBY{V0B)6TQ)e;eQ~K}~0-IW zx-xDHzSW4Y@#A8H%5a$(JfI&8OFgi+-6@rDcNif5lgc?BW)HBS)jt);8O%qajvt8d zr`)n8$JBI1=P3kAo<@@Mm)i&fU{ke%M_uSS>En1AXa^=qB7dRyoqs=`ktqeh?VQTg z=N!sPbfDY;Xs+M$yk}TIQ+VH2DazCZTKyAb@iJ{bCEN|b)=eI(h zWGXcfjzAK^4xH(py^ncSdJv3!_DZjDzin-Dxv|s;eZ+qeFI6Yl|MGN+?WO)2?e=)` zxlW;L?y$t-mWkW1gFgKUd;G~H%y*vgv?^D;ABgHFR3CwT>3?H;iK%K+RSkgyeRM$q zScC3&O^}+BJj|-)ffQR28S-L4=LhG9C3k@!k{j|4aBz3;8(FOUCc4A-0qeNyg43bR z-YxUPDG^Y)f%?kzyQf()On*OnxbwVNyHWs(N=RJ3#!1)@P)2PJVS8ADZX~Rs{1uyk z0ly!CuTWIFjejLC162n;D>+yf1?_5+BNZxs0)F+CS4K%s*zh}@Y9o-6`xQmyCu$7) zF7->sD!MKNRSNP#s>QA#{kq6o0(`F&e{x9GXb=*=HjGuxkRfV1K|(K2BPuvbfd{21 zmM+C^Y?5Z&U$VWUw<3v-Imwzp<{()3!fQ#{SS6ohlH8!|H zxnjb1@CRx)4mUqY0;xD)2Oi_gBi)kiH;Rk@GfN=ofz8O~@ZSgI&$n@o+7|&I8 z*0v`3hDQ>ae@(~F1DCxS5aT#NfU9qkL@Y>mo`(e1BJ>H&=KP`y`aJFx0IWoxz=7U| z&-+s~S%3E%Fo!cZet%3wstzY%wmV0r!;qchMPbQ;GO*6D$X)X+#G}H`U)A`j9)zj1 zdzfr)!tQK9`(WkWa9uJO@~N;wIOmc}p9oh1XF}+mxrEvgbG?9u=};Jc=!hjZ)g;U` zt`d9HraHTH#F+)Q(lO-KIwtKpzQ!OKL!ufw2Y>7TqiFnp@DG0?IhsZr|*Z} zFQBECdl`d(qYFP)sx1p(G$+pmo`Yjd%9#t>>Y;JkF3UuW2UI9*(MTb*>?lGYrM+Gu zK7yj6$a3?)b%*W`5DPB-@zi4%Vn&q?sBp3_lnWuNPz;e-EKSsyC7-PRuun-hra=Nj(+?hu#SZu2xLRe`)tt7I# z?C!OAXR5j3jZ0c2rXGrr5Qm&a#xEJ`>aQ)h@LgZ(BDc2FgTjV++36;;CGXfsLViPZ zP=lU0{dbj~F#)l=fnpcWCNgI0+xpmW^naf?(f9&Qe(aKvQ`-o@s@7hOn;Zf!IGKxY-;M!fiW_GnlbJeKF7%t?bOQ=o` zSr^tC0YCz$ITv2(l6tTM6n$9NaLcuusF~QQ?$a_R%y`)S3gYV>Jbz=Pszx!sGk^3a z=8M&hEIn)W_GFh2R=<3|{R&Joyc&Zn(a}d+3ujGsulN;#pe1SdjgFdLR4z{OtTnMN z*gDsfT(SBNTpi2mP%|bdr?-x-7qNLr^T|V@-TI|c2 zHp9=Za~sMU(qx{huG%zPBp@_7X*|5Rsy1F*IoFMF3EjO27b+a{fqbH27l_Gv>R-$Q zgBo?xi~HCTc95gJ^8Fx#ZX>miWs>;j;UYIL0_11%imKH>#Mm}qANU?A)_*NIY@fPI z5TQ8N!Ak_6%nZNQDBao)s*jU<#(}gT`=*m06-9?US^aV_)aA3NbM9`nKY9sg3I+k! zR%rv?D3!A!x3b^%=*<3A)WXf+KaCkcBARko7vyY*A%s%`H*TuE@8_j+ClwWUwO5%L zb}j-~E)dQVizH!ATGqU!4u2Ao3M^^yFFFr?2haP*k$^z6wTq%oA3_NnJ(VQGda|44E*jH# z)-N+@5x!o3BqSnH1RVX6-|K?ZfIRxYkdL!i7s&Bj8ZX?GB@V=3P@f& zXNFRU)x(Q#HSKU2)_-Q}>hjj#vwMv>6Rt^#6la&MIKRr}nzjQ5YJLt9bl!+-^+I_Y zVrD4eQ$fYz^S!=}ox1cEx%gx@uC+$nLf9Lqaqc;9V^R%j>eB3_^DR2X%@bT z4>Sk&A>>G2+akD(5I4GF3C6*B00!hWMP=b|{_U!jcwh54pnp3yuYSD*nh*3In)2Oj z9rM;&NR_C%#S+VB#t=Y!L{>a6Y?w){Hlwa{sH_C&0HE;Xm=83^!$pU9D=AlRn2Fr; z6*=x#>MC?vrfm%mbYP$t6yZI9&ZtGL#)87t1#8MTFlyL08Wm)c4z%h%vFw4oA9@vU zg#$=ZJwL+v+JDk^tPFEc%1A#xniGg}h;V~Vx&d&K#;(z@FlOfOyxgsIT`5JefbM?# zR=((ngEX?{zlT*)D-2Aty{alBFs(Ik%l92TSJ0?(wg5ohLbQ^KZ;}v*kc+8`-?F0U zgY5yX7XI&Nfnb9r{W$umaT#!u!0Q%r8{CZ0kAoEOEPn$4e`r_C2h`0H$?KBlq)Q9_ zP>xf216@%!T!!{69!-H*1P?(@%-@eeI>2SL9t-wX0nX{O48RW3;IQyi6e$P$>~MGb zU)d`LV?Du;XktiQZgP{`A{i45Lxer?B~{qMtc{)gdkkGeYD-FC+O)}zN~b2LrD`9z z7=Ws>bbo}m07F2$zs|&bxtC#(Gto`e_IamuwhNT2qrU&OfQisM+NHA7{zx>1m*F5i z6lcbJ2P`#evperz1;2nmU68sox4V|0cz-Cq(ol*%nqR_8=ShqB$iF_sHUHNTJV0PMbks$T;;tB)rj{^1=|Aoy0yXn}uWZF&sFi$=qe-3vd%e417J zw3%M$;-r_Qlv6e}hBk;`B^JMY8uGa6!`n@?`icUFj0 z(x>79V>ExH?f7jWVf`HG#<+CEW*bOOLS5rOU6IE(;zRXOnj39*SRcw4e=W1N{-Ce0 zBxqLYffNvULTwD-Ltfc;G?GTL1IAu}`d+cXsrX_SQrFsV5LxizLicsCYa?sfQ(Yw8 zMxdTX39?&bGb8?W)Aw8uI$M$LrrkH}9cZ%Rcy)i5So=<8uH!GGfzc4MJE7RD=`2ZG z5^QLk$ZTZgE`H45>GNn+-vBlLbSh-+2}yKu>vep5<~ZV3991W%ZnJw`({|o_LInp zOf-KVI_lnw*6)W?rB>r97gbb?4i(AaS_#i4SLzz!^>};Y(lG-cyYKozaCYRrUJ7n$ zO^y8<(<=8o-2oC9xy`UZ)EFPH-bxpw&(foKhvKwjbyCTerOX)k;9u(>$T#heTQ9nU zw@VlZ*D zRJjjnO|)K!sw?F%+>-W;V8e|E&CiEsh#3erBb&zPYyohO$l#g0@!&_hPyprJ z$oily9wk9nUM88POXl;eJi;hDb~@DTB!Fnmn!}?8cB88;!K{WeTk3&8)5$NM07miV zU$^*AiiYsdKcD;XmB7<)Q-Fkh2&I4_W3PUKs1WVzJtvngqRg^+Vs7GDA9 zTH9DvIccBb^Z<(6-;A_< zTv-$R@0>}uz$1N-e)S##X?1@HMtRy*-#)GF6sU`J&?f185V!KPIs0xl?l$#DZ?czY z>{uPvd2zDJ4DS=PX7gxmDcg3j5RT2cV{UOIvZpaO{&wH16wt)y6v~ z(`c;+9qQK?p5Z(f3!JieL;D=G4%WRefSlyu0!P05MhgPXkgzSLBm^0Qnn>!?K^49 zCGY$jNqb$9s1_3|&Q6JS6hd>7eoEOaqi}i7QIpW$?iXk_2jHS;;;?jFSyrMu{8eDR z#R^3tuxLN!_zhxTtF(W158){r3=RJ*=WG)ZUveE`h3wCj0WU@SP&MxY%dxi1_|R?O>g|jr^c>W#Hmir1#u zN$?OcO<#_uW9tO^dsaec+U=1ZqB9`RE|FfJD>z~S8t!RVrfQjTm7`y{>5=w4xvgV3 zeOOfMVM%>`P!aMi!k%`kps;edi}L>Xuzd=JUnfhNr1r1Cw%KAkh09A4%)}%GW}zfF z;~qoaTC9JO+>+s_lNY^XaVBvCM~ZPS>|;MVCkwO`Qw|Wf$p+^xQQoeZvDyRqe`62g^ zH(K|z8@&~^Q-E?CCD7P;oes=)q4ds{Rh-*k`PzS6<%mnzLR2Y#_WV#NWMPr8`f!r2cCirN$0HZm-b-NB_!-ZY;0;>J$QI5yj z(3pQrTahdv@;RE*kgg|o@#TDf7o8W*X6(#56>qI7U?ql2!)-|OohGL*`oh7o;693d zq75O>ZI$ISCKn3#YSf~fHPJ!M{s9H9I@hrMWjOguk4 z07L|LMDRJe9$W{kN21(I0i48FuBxYU;E5p_>0|eV4@zLQNB2l+|DYqwn_;E)vPOSt zgX;5!&TvTmN}XDINv8ExRt9pS>sJZo<9K=yFGPrb*~!E(;_MXAdpcI&U}{I`JcT$zkc|YM8rX+BcqI=J5V*Sb!;Sr= z2Ap|XpM{VE6tT4}xa;=r1HxeA)YE_U8GMhh)aG)4;t6G#w5+Fpsd6^cQ4$__HWYRB zLmtD*Aa5m!avO6xluGw?I|#s%D}G+q-3Sm?&<>xA;O_AlX%Rc?kNpv~S7~wO2}r9F zwgH-nYPUILd`nK4{QF2P#qZ}~Qu?~P-138qmv%mV(IT8YXE$&9Zj;E2Wc7b*KlA7* z&ejB57lJv8k;}mkJ5m!SI$9(yk4Z68^XKxW71ebNAyO2B)PhOQF?8$Hg~hL-W_~XU zD2&We?09%Hww0b4Q$qF-E@>4XtJv^Kj+4~_%wg1er&A{E1Nm;Ed(dRbDOnbapCi`5 z>}oZlpcZGX;1^;OMQ)_b*P>^u?EthAK(m|lXC(JQg+d6=u_=jh zY4m)quBRx8h{q|>)|;^Trjuc)psDIs_%MWo)Yky!jw$z4z8XT2tAKw3;iY1 ztpu@tFm*QU))NW(mxzDvI`>Abk;GNKV^mInQCu@>pgLzBs!5;{8?i5=)9!?sSNFiS zW|*o4f5LU}=4vI&Ac&}3mRg06xjUEoYXWsBP!A0OL;tY;|!- zncFe^i@PR-NV2<}QVPp?nzo!nGCECU!zm16GLER-g3^>Oq_uxjQohZIR&4x{ld`$T z?X@mm^O*2&_y2lRXh&&h#uLd}!bT)1B z4MNswuP!;2Q z;yNHcNQJJ9Te~6ZK(4WPJV)2GDgfhPL_f^LRBOH*f$x7fUxv?V(vXv$I~NEJvsAJZ zf!P6r6`n*9M6oCl%RGNtdD#TEWHCgiU_psv!z^>M;z47<$GvSoXHA)#8h%-OA^!V8 z)Q_}qF;+|#0QeNNu5aWF(tMoCX{wOgf||E}(lE=*EHA=Tu#iFMVYJ28UKCLdl783X zRiHHu(iML|skcK@N7~2re!u*%$ zX~KQ}PQ{8iU+&F`c`dq`3ZpXFs1E3eY&Y{-R5qH29>^kCt%FxF+2`JlOA%p6l?Fdc06 zBRzXvw@75553!mYG7+>}tu2NHwCM>z<{JcEbYQ@(W)_uLen8JH-(*OWYEu3{}yRckXaro~k-#oj#$j zPSEf}DN@@)ML`7oVvI0QpvbmcqG+9D(L0x|Ewx2W+@`*hz#^#mqqNpk-xk0Q<{3->M2g#1ARB)OD#r;~#F*i{?k%CVJ2sbk+r&#W)yvaZ{D$Ln zIh^Fr``AQhhjQYkLH=ft-iXk^53O1VSt%7fUM-imR6;V;Ay2fQ8M%O(D&A>xYTkll zD8>B%QBg)94+y_;kOhH}j=}UX^H+U987$kxkt+0r^eYeYwECaPKBx*igeQL;>JXqG zgdLS%9VJ<-vW<9Pa|V6dHPJT5Em`CYPrQ~&3Wx&_r~EJA&tAfDTwTpsg%_>~jGsy? z+qLPI-ySM^kSwB?cB0-~4P4hivDj?%^RtJ1q>y^+d?w#5!`e05!fELoUh(CeA|qX` zWqUI4O@)O1kMob50L?f(Y zwEVjJu5;E!*^<1shI#tzlY!7uJ>(`LDMF3Mia(x{ReSRT;Cvu7dKGKxJvD(bGjj(_ zvv&n*#QtnQe7+*U zU;0YwFF%riw`BJiDXVnAK^{xaDms+?z%y5>KB1w+H%GR8Tu10WTeb#rB-n=iOE)?3 z4&(CKl}VJsAD`BmqTEwZ1Q?b)g101E9GT5W?hZ`5KrP*oO4TuGB~=La-$*kPpeaJj zI2q|OILY3|?JqK=-o<|fk~*?V=CW``7r$oqKx6_jKK$ftfG0W0z$`H@7N7oDN@gRB zH@%BgtJz`X-|YFo^!?y&TPqBN6_UtR(7ax%mur)PXdnk6iECiPtSZ=fnq2{ER7>A+ z)GnCZWCEkwuC3tUfY`YIT@U2hteeIQ5zYNmhP1FBAzA{YH*tTXcx_8$B8nYehQPhe z`(w6GYeh*CYy=&1F(cM-80h(hKe>bnynJx44SND}%I}h&Fw2W$p|^ive~G#e+(A(d#3EvG?TTUG zx6Y00n!#W?nuQ%wL;5?*EFx|+b>g3H(u^Tp2>Ga}JWo3csx`=lECvwkfS*PT4 zQ_7~Hn7StNn5};Z#hJ%(_Lk3v$}LUQ8AgS|SXzId4D5CP5a9D`7&{-~W^rHcxhAnO zK>V9#(Bv^aKOd5LLvSVKZK@Yr8`x>laI=B4^6n`QkkQ{4d()NwY$r&`wmZb1o*YVm zxu6X@Uo>wl$N3aVV{QxA9L3V5og-2W&LMXIR|||8-7ZoV*k2?!_*|q*Htf6$xAT15 zU^susg@2%8lU#h8)a?V5ONaeNHd~W66C9E~6$!N)6ukQ)-UN#RJ>!uq0}@9UVZTvO zbtdKbt@^z|CbtkVf3EgK1gcYpU_%J9X*eHmFI0A`wA3UdAzXx+=^oUuD_HkSMhWBt zL6paNs>&h_0N@LXTCTzRMaw{cSs;v0*VNWW48JO7m2$+-TJ5{D zGdXIx4Y%LPaUgT0^Oz;3%JbIxxn+NHHLkY)d0bN6+-|(jEtIA8C3l^&rD{m3x2;UG zQR(1>nMw-K=p*ieohiE0&qn3n*k5Ewcw^uMr$@dm%$9y2eKxL%Y~+}~SuB(BgMTY= z9qwGOjZQF7wUq^2*Bj?as6yB3qV7((uSRNC+lSx#N!8se^R=abzVCe#3cY_ZCuVg> z)o~U8XHqDi1D*hZ6#$rfae4;{;CgKcJ3XW)AB=wi+y?!q`d%A#^t2u~DpP*CQqBZ{ zy<~P(Z`S|HH6G&X8}^Zz2Q(271P(s*d5p|SZTAasEoUzK&LpioCg|YR3hhcRIqYo8 zx5>6R=>6{g^TQ6ewgV@10CIm?pRK9hF=L!i7i4Ulq3T(HTgew@<~cOt^S$`>EvGO? zEOmsi6~h<*H{WVVRhtq`6O8j1et>-djLFNzu=)Y{Mg96kO<)AX55>*-ZyPa9wCLJ} zY`~HkA8dSuv%Q+NW5|M17(`5PYF$46UcKMofUJNOB&~h_)e|BEP}qO6u5M5f-#`_@ zF)a?2=G^+e8hb)g0A%V9CaTP4la#HEz*dBi1LF3|l_Z!BzuqXks(F5NQ{?G=sRHNY z0{R{Czg%!Zwi!Vzbwig)n*KZ%9WxP?sJS)gQ@HZYx`opTirnHyM$VJveE2K zPU8xyTEg}oTHAGDe`J65+8<%GpUVx6-P*?_0TVa)f=~&8!-SUUfSdgjX@tn5m_AC` zq2`h#Qvm&nnT$3AcW?=vcEiy!e2)$nZHd|0o-~i0+$NTeGs#Q-*Z%mP1(5XmPZ>q z$C4@ZOG|&}RmUGax;?{h*v>ct8LojrCQBZusJHLm{PJh2-V~KUz~=^|ZVK5L5r})E z?^g|-1qLSY_2z%Z4Bg`vH%UiX>}?YnUGClz@)@1jRSsPu{Y3!p_1kTpsExjd z`aK;FMIuf+p{Ikex;|CIFW<4q%9DG(yuskyN~6_Een_$usxEWq8AB*nL`R*Yay!>i z46JZ45$%8LysYD6u$}VgT#55@YmpB+?wcxd&sMg^+~5W+5|n3b@17DDPFGs$ZYn9l zNcYxBLJ;%r=yPe^Y+F0kLC6R>`23#W5E`Qjv*Kcr!?p9KdHXSMT0xVRnh{Cy*T{^8 zr%eNQK_m5340idFJ`wi~j;e88AKY36#4$VsKcatYHHe*NI)QJ3Ge~}YSWW^#$f7#u zj3|xIY(L|YY?yjB+z6^xdLAzl8MN4NO)|E#<_xHPrNvu#wtrYlT=r!A!nwMf|!_g;(trXS6}16-YCmVs1jx3^xfP62))h(DnmJX(0r^7oM%d zc{6{KSI`Rb$!KB9Z#?P>e8a_`LCWBVM_Pr$bGOok;c-(F5@6qh5-<`y%Fk!Ara>73 zJ2&zUmhByFnvjE7aYa^FP7-u0fxSNUm8^Q828Hq`i;E;dN|(WkW_J<`78>auL|>YZ z;DXFlJrYqttX^l#Z8vFHaTb{eHbR-Lc|L!BwoDrXUJC@mdnqjFQD&`AB~o zvPRslHC2s*L_qbH98U&$!9G*@QFt(l3A7wV-AU$IdPoP(=aT29p9jU-ufe@eBVCxd z#MaPIU_?SaigWtyI+rJG{(#Y^CHiCH>^@i0`(H>*_^E4$pJZ#Z8*e@~p;W{l!VlSM z1IL~wGr9=?H#Ph!y4XH3*pl$%amIhQnh6caQ9$AmCl1F~{`fkNoeg>B44+DrRelkP z-V$X&@4d65^U!-c`sn>O*~JUXjLg^=WRMwOx7L$r7^+XOyFH{N#ECBgG#a_>GVDh9 z3}i}TJF$l(aqhyEQv_CB}FigxgT&QpaURp6b?XIq|Bnn!NyApq^eUFk8 z3|r@#Rr@`V)ARO_SDCjt-;v^#8S5^A{N-OOOh5RNzIFjFNIZQFXjo~}Q)V4`r zf~2W(A_szaN{hdf7gupqnrz}T*u!KF&J(e1BgbCUlJCq`E(b*pFIgcD=hMGy9uXgs zz_&OAc#$_kFyzMmi)>7P6+M5u#s*$_-0#5&r*?m9sWdU01R2O-^~|JVVzImA;%oIy5albGg>ANqeiM0R4CK_*NN#U_tmcSu(zT)Dgk;7C89hPbq;U z5!n!|`_Y|;HmnS#zc(M#gsU|D#go86?!yqQy~d%9ot@9(55$;9UfzE>RBZ&n;AMBK zAXChXESvP|xLV$3tdsQHDaZ!jvfE}#=xoGq=qNOjF67Y>hp$vkFqy(3Pfot^-_XF# zYR|wP4^hT^;gz$vza{Bh9g!GV#^k?!+C2^aaW<3L{Dc;pXwqg4GV^Xxc$zXk5r8Xdjb8NB<-tqm ziyg@&YLdQzRea(V8{BCV@CAA)ON8G%s*I;FxY|y8bYZik09Jp{hPO)5{$gVsUq0Rd z4%Slp1|Hm0v)NoYj~E&lzN3jWSadO1zDfr@fDP{Olt>Ri$jEUb&IK-p#oGyS4pdmN zjOB3cRor0mzO6lu*_ZltJFgo#SV$zCx;2fy`!kA4(I%_05d$?b`k+F4jb{I0Yn1%7{$p@ z(_AbmA0-f*fVaACFAZ~ro-fC#Xs#lgI@E1CZcb!k4bz+S=vCkQjE-Q--_?Tc>qQ6j zkSD{QW2}*;X1*S2`u985971SScwBAV5UYJtbH+2BuXBG_Z@TU`zvWZS^jyl|lYsFv zjh9RuUjd-|72uxNONleJL770sbpzc_OrFXxFSMnvp1@GFixmR!=W9nJOW|pFe8H-vKJ`&8+xTC^2zo9agr@$v2g1 zAswOvFGSKEx1}eTH0a3!bx+Ba$r5mkl4 z-kQ|T1EXXO1i@~qSVM3U0zqBD+%$71AfP0Y1LNko{$Y9HBfc#g>`TJJw92yJT027! zi7Q)@AzxfKYV}cxPt#-Yg<-&U$6qYqnO#_cm-TMHg`~6y+N5ebnV<4yECpTBRvv$& z2L0f9oUlL`PQVtb-X4=NkxfE4eK!XWa2(iZ+F-|Nx^^5QuX<&{KLvTSd zILdF%6pVdR>_r1oZ@U_y6)Yo&8pv2-L+Shysyd)(`cQr1`rOZlTVy>M5#~Om0x~}D zYwSEV!ZQQC%TUzq?1uncG)QMn-j{hzo6Mk=OsGC`D9qm8_RZ`OoZ`S@ySt?%n!i4of@b^!!3c!3;0Gpo0ulTVFmdnDgE4%X>fx+5Er3rq z;}34~9%<0a#f^S4F|W(D*FY}hyNqQfPU%8r5#3`8lHRzA4D52)9SiDw2bK?yXOpV& zLnm2N!keiW!8~hDghqG$#J7I{egSwp8Y+9_+lX^piNkOV;Nbm23RxygWZECt9AB95lfo9$E63Dle3#>M0y9$O2zYs+&uPyqIZABgG6HA>%(n7 zP^3>rx9vglGCa;O%V$1?Kd4#+t8aS9+lPb0Yw%2cE`rvk|cn-E%(}21YTOmodueX9;Yt_(JJCBoh#H;QUUM^ zLZGzTv&!xmkYnu3idjsnXOMAEDD;x^;>hS znnU&}@dpT#fP$DJKtRMw@8 zCkIk+)+VVzeCls8Xy@oodII7>^fS$AQ9G5CNFM#90fu{{d4TtMTHZ@4YgJS=cvk&^ zu5}(yNg*yBE&_k)H+MTkGlCh>)p9oIhEGI(Td2&$UFW?=&-mgIPS9W6gMoglS$-2_ z9-5aD^QXRXdJgq(n3vj9xM&SEaK>TZ1CNNNYLnuJc;cf~k^AH{_4<$(`vrrmRzP&ZphCeG>sq!_#oTOA` z^jB!#^h`DG8)7;tszO_CyFw~r{)`3q$P@V8{`h~vbM72kYEX7-TiLP6;@!jF$B>Pd z4S=D`MV{`Yc06AUZ__!+2Vp&(HYjy*epC@AQxfW;uUv6Mhz`p;I|w9W!~UgA$82i{t zdQTrJv&`!CNL~-%7Jpi&$87Q=c8Z z=fBb}SjgX*5~yc$Br-K0V4GV9>AtI0fP@;C5T*OXt5G=At=3$;)tF1st*`gMT^0<6S$r4`FXYMx)Z1X*j)x zHkk>ZdZxl_!K|92p-e1U+|*Rdob{}E0KQl8A2nlNzTu3a9JxG3QoD| zo^soKIb8_KfyP$W1t3ld<15+iXie-m#lHzX$g)}|&2QI4RA=8xxru)PFqE#gGiDkk zBs0mVy6N^9*-KRGC{fI^1WjGn^P9!DBU!?pB%Bi>Mn?%j? zCZlPZrM5;^MQEk6&l%#`$+6t`xtF5AsFkgYc422pLLN7`-$_KT*V<5qcv5|zyiYv5 zVWcJt_IOn|iY);|@l$_U&LJ!k4t#08-fdVR9WgHc!A4M;EQ6@p1pM34X`Au*<3Xtl z(V#*W5KFIUizooL;KY0n*gNjaemisTG-KQSo_g!dp2R+J$Qc+}Pr4h9HLOdGwww1o zbup<(WM4P|W!u?=UVz>!d3}o}@T{vD-|<{sr5+ciJ3k{03LbxqYC0%>wvhOq8}bgx zd73I%^n8fSOnnjCdkWA2{8T(i8S?X$+HRRHoQQJ=`AN5Vf)1mc;czo(=IO_xmx_6C ztLK{#HB|@#UuKax#i74lm*h#{GlM=e*?}geFW{K%0$~JzBVT-Tc+L`Id^7*O2F!b7 zC#%_R`V=AMig?C06)4UWOVl;t3t z6=+AGS6D*{wtEN3o!}MAC(X@ItU5K7CMLHS4^!4*{)1yThZ`yR z%-kf}Ez{=`9}(}3%ENpMGWOe95!!T4`5qkTFvcf=5B4-7?h{8^tdM(vg=aL^LL(qS zQ7UOvaUqNP^^7lu!=(R`J8ZdEw32d%Q^2ZyxTS zp;=u%t?n=bbED!IbDE>v%IZ?9VA1dSO)VIho}6KG>eB%$PYvSgVXAux)tLDPLWNv8 z5}}hVO%#Eerq!=5K=ciJ6PU0Q&X zso{^`57Dv*vn@j%vt#FKR@qD+4hIi?HiT!{y%>Xaj+l{B)X8Q+U)+=1w&Z^}NMimF zCBei5NZ!GoJ!+}?0V9df2fuR%pdUHPp0W(?f;5ee<8C-~I~sWb_fk2^Rko+Vg}>Wg zwQ{*=!ivs^s$=9`I_0MDaK7|@~{H>(UM*!w10^1DPut}dZy8`GYn?Ahub-e+O;de ztuDlKIq7V)&*W(to5OMdjXo#H;DT~k z)sjAuo;UFG6eP?9Fvj%`n}ss9;_~8@Nq>E@?XxI92((|7Z*bra@haGnGe=?UpJYAU znEdTNXwfmNX?rWEsvi<*^0-mW z8O-vfz9hFq24anZ>KG(f6og~_dwO4@<}N%~T~?lC`69Fy-u?TD;tX>D#+`@)SPwC4 zzalgpo$M%;ZHQUh@nApmw;oVW;1INusr&^kN?)K8gMvat7Sey1X8*{p??ivtQ^zXV zpGDnj>zMs1mZp&8@J!H6{`XtT=J2C_Dy6|6xJ5&MObg)-mE}I;(u4SWZ6+eJw^&5W zVsCi}cga6TffPWGI#J6-U8xGD7$}Hc_k*m?V?hrH(^!j;;@c{^5}b)wRrkuOwFp+C zGCP>z05wg`SrLEHXrGmX{chr&=TCIxu$t@ZUxhUKFm_bqvSz^C(;;Smg`lmp#X+D7 zMovsB3$L`CU9+M~g_0CTrGM(;(Z-jSNfjIU(LFKIwEtanFqlS_*&bA(pO}nS}B(%Rc6OkBjy2 zT#>{Nye@l$#Dayb{kU?ff)HN3{;`>Ld5A9Ed;A&<985d?u?NK#!SV9Sq0}vz5Jn7+ zvZbp*#9va`4R*@B5Htbf39jMv%jom1vbSO|uwIayKfyz!aJ@0gReGo&v)rc0E_l7g z=RQvj#yEdTU1N;ZbPPKj@PGIsbOT&e6MBo>)r%t6N1~|xLUnjWZwaLg&bn}cn{=42 zUxTVBumqwC!-VveJcF8!Y8Kn?Zk)&vcq#*lWwH`omhy3l8PoC;jE- zMXOvU4E-1e<-xQVO?`*T=^swNA4^%}M-aLBu%D41g9A|0i^pq0bQUpDmRJ>oKVb`N z*fMKd5W2}EcTdJ;%tvuaBe=_u`ogTP-^_F_FLTRyWaIIA4thc(x!sKlVE1XVjCe0YP`#@%Ft?M9pFFsvVXL@Cx&?S#qj1a8E)yhuikSf^y6e)Zs90S2 z@R!g|Hv3^Fd4v)HX?ylO^Mtk|K%`ZuzP5k74a*mEKwd+7rtkau@U#J|+XcFwd;CNQOmp0IX-z(@Wkb|xDQ*)sKMuO;5 z6e0vE&If^luaOek>B0~7Zk_1RT8d}Sl&41*Bc8cYc6!V*-{v4588;PNLL|wBw1Nh5Bv*ubd3mSBc$+2*n9xiM#LOoeqDuKy zSj3UxtmGd3i0F1_%i=8@^HFs(HI1(_&o&9de3R3Xt|xm2u*lSpJyyXd*V3d(g)}*e ze`}$Sm<1$AsBX}jGLkuBvHLSnENNlvoOPm$+%HdINReR(HyD4WU~!e1 zGhTJdqah;MnKQXU8TRRtAo;1(#&kwfzzfL$j45J4U^^{NUNQzU9J?7maA&ISxL5>z zgt*Hp<*R;-HVOCt{LjDst)90rJO1O}@42p#fB)Bip4;#rWp-r$RnOA_{#pM!&-RMI z{x`Ac#GF%0%k5ek29hH;qxC$V<$QZK`>GyZod8h^IN&1ubw%p-{zhLQR| zXPne93J4_|2%Cs!!VeU{7AZ%%CEHfWpZll%f6jMO?zGHlB=!u$j(`3bKKXykkyI@CSZe1q z5*SA6gg>M5k5fmLn8L~=7^)bCN$0=z6@+0Jp){G%vWGoK%$8x;Uiz!w{dGkFZt)B= z1a2$&?|N8HBP2bU+!go__JIvUa;L*+I7~2qOHkLlhNw9yyj2uCmq1M{XY!2Vg zar{qToE8)Hc>Bp^xpir1tlSmzLFWsxXScZGs$mxdOio|wU`6pmO3lu-Gf-9yG$_Le zA_5v@s@57r+^SoX)rq4TVL$|*yr=k^Lf4S(w6v#Hr&DM+MaNnGesy>}NSb06;JF!; za6g|L>%%cU1qgp>;)fh}4~bN8msIgfiud&VIPlPn{pL#^69c?9u4)5Y7esaza~ETP?o$9Fg%|K$uzg z^|4*@Y7)>>Ji)=4k8B32a}zAZ=`<9mi{^1qA0^sn$Y*~W!?2JrqqgeWv1L)mQk)v+ zp3G3Ba*Nl0i|dpnThcnp)L)EKNfT-*XX^!XfnM$ax`Skej03&U-i?~-b0s1dD-PMX}6fD$zNn~p0R6zMWDo+|Ul+!4J zb!=rjCq7*B855*>!BN93ow+4dyhryl)sMaRP^E-TW_^9g?#62qSL8{ggG=J>^}$UT z)^M27wAZNKcMj5VUzzU@;ia~Mge?mh-sx_V?xKH$sy2!)Ghw6C7@2KN{2YHbJ-UA1 ziA9t0mN|(ON}4*p2dux2+3B>ol3D)2l0S}#Z7j+CDk+RsCaA#@Yo*>!@BC234DkC6 z$s9>WL3><9r7xef;%@T#S-akKDW}RnBkY~P0b^R3e*47%d7S75n$PO9r0k~h>ZJ27 z9dm!eb6}J~1%)Ie#f@+L+`!hvs!5zO4tUm#mMOU9!y+~`pAXLl@RW>cw%rciDLsge z&Q=sHqlUyMqhCNS;vPadq}a#8M??WxIbi#2WEcammD7j~6UG~De_fd!KWr3T9J>qZ-h;MM7T3{KN4Zh&ty1HVz#9WoE4)S1o%eU@@vY~%c z6QRMhz`oiB18pB3YCxNxe_t8ofTHi$S+U-H-$@fZ5PVfS$E7gx&q$!v&llPv$O9c2 zAH8d#@082G7&?)-9$zvmD;7z|T#!7}oo0-T7rvuGWg!Y@6itX zN4&Nh?r0DF#_*>`OXY+=m;72ag_`($kyz8+PGWqE(hhiDDQ z9{*C$qm(Zym5S|utrTJCw~=6uS1MQ4!_3iQ$;31WU_Crt<$>GY$&e^I6IYEpGSg2O zqXHaPCdp?7yg|F-eZ6d5Tn{X3zE{{1`6X0?FW7bubL9zGhLBPAIEVRcx=k)|8VwPci1!&Se^29y*>Gv`}`8*-G9SH|UCKXP#R^ zqH>7w!U%0pc@qu-=s}GV4blY2fs$owwQCTpo}sWAYWz?Y(`o>2`Za&RJ{Xf;<4^P+ z-V(l|NfF~TRa_8$W93-u`&tUi%0HXYq8SUQ!OEFk;`?`I(){aO@J%XR(R`z3RTG~I z8M-PKi%F|8#>la|1|+FRLy96E)jeqw^8LmJCZmww-i8tkT#$LEL!vhxDmNK=D94g_ zvlGbxaFzvxyBq=P07pQ$zdjsTUh0aDnVb-)zu%}r ziG@$PF0qX(+8W%?Le%IMgdYPZeW*N16DC!wG8DP5SgD^AiW8;R?(xQd6SOMi&tj?- zGRI@CSvXS4Tig}Wf|@c}jjx6+EIF05Rp8nHXvEPCiE6$^cbFMOn2^>A-z4)tKU}Gm zfji;BHZZ>%e728hi#E!L`eu z0@ORz1=_wT_%);U@J(!gBNRd9m)?J=lj|)eNkI`LTW2{x!c>2BicFQ*bh>+$ql#tW zq^8*&OceyW9EYYwzigGwM}hkz2S;gwD>V%A>&|zzPkCKq{E-^n;`!c5Ztyo7lG~T3 z#y;8DEzX~)P?{fHcZ7?aa51FXhPDK+b7Vs*xt3KfA+XIet%94r8cZ_WBRYiO;^9(q?=dgE8CQ_na2ZBuNMrbN7!MaZJrAExf6F%oAq1L|QF$$c z5kSgx08~u6A91dKnuv}xZW_7hu@7to{ z^eTFDnNK1ZNB;qd2O935#7t92E@^xu4l`?x-SwaO4)?thx$ufh!MU?v0W?{(SPnCX zssk~145~i#+3P}tj-MIki|vG+*ZYHodk~`#!W>$DQ-~nTs_M?p0C$;)b*}P>_o)_k zLJd)fZaAfX)WC7tm|(e)%w_3$!6-KGka8>$HHC8d^l}Ixj3(hx6oZ#C;P5K<`lFPm z9A34feu{y|MApmatOGZC#%a5C4r)D(`?KgX-*X>d6frUZCSc{9zhBFxH$e-n&0am= zhZy0v_3*gY;II18U_Cta)>^t7n4liTSFSp>M}>NSB}=o!%qH)5NFQBy3N+E^oRE{c z$hF+>6t1vWz!wC+2AWUF`}%7==LObm7dS5c$TFHFgaAcgNm|o5Vqc)NQ^XN4VK*5ARA;lU@;1WckT0~mr7q=JLDBh6xy z{2PIPD$1NFlc-#hdbG9MI3!1J6Y!dBT6hu!A96LP*2FS?4ul$)L~o2Q4$c8&i0>o7 z%_VNSOQ-KjSn5Z$^1$eNh@+U6wSui*$^h579Di6mBZC!5Ey3xgxq;^h+?EC}QY5;5 zGIBY6JBd`j<;G0#Q5D%Em%^GzjkMLh^?xVm4r`oNPYCCfEI4wbtor)ld71lR|oAa|~>?$$u;{=w%T zjHv=_x-a|;=#$R}6@s!LG6L@2oBcJ{D&RW5JV-7G9>`$^n>`=30|lTZ%ewHDC@)02 z?k(r{y2Z(8)R!i~O_wYqb=r$xx^hB)mR<8Vb8T=|Vo|PO-IncV1V{jn-bU~IJu9Xf z1^0U6SxA2n`#zvS$A$#g9Y}_hvFPWd_j%f%x}{2W+fvnbSi1sHW!uKAXQDs2E*>E$ zcyq*0=N}yeeLJ^;X>{zuZvwu(me_x+-Zbp0$tiK@3_sx~LTmJ|a*PF6@md>yLFewy z;n5D-gFF~GYILF3f@fmqqu7r0X=JdP1Bw?x#CUSXF`l=D%QmTNr+8iL!vQrU^*{)n zD~3<%vY<#aGQC^ErBoKAS*daXw9Y}Qg?vGOV}J%t5|K$o3601_E#9f?;x~DAag}wX z0^lHC=ce@VuwoL*iRgKwD&;S;Wc4!t&FnZSlgNXz=|)7Q>hB<@ImFJQ+uxhv^B z-A)H;gs`54{l@-`!ec0M^10uD%6gR zq%$`n#`vWU1RrykvfG!^FjEWtL&?tc=b65+B$;mRtBbRHzf!|&d*bq$gK(GV=(OxF zELOT6AmTa?0>$ewqhrv2o`4t+sT81k^kFDh>e+U+;eOQ>bl$?)AniYGWYK%r0aR_R zcnP7b`Rh9`v^v`xsgM4gexpCOq#OmXA^;Q>k;1P0@NQ($fq4t&rXjw(x}Y{h26t>Z z27^PdK-T9d9J(lnUW8!;u@l50V%6h9s)e-C4|!3;(2@+Z8Wk9S7ranJW`L?-`MHre z9Nh2qDSp@mya0H6)UtN*CTFroE*yNjm2j1Rkp^HH13W#($0I5= z+~$Kz$>S!}PVz~A!7D_HzZ+3Bn(zWoG$x1_`r*a&G_^~#2@r?I#@ z$yIZw2WQ=&qm4NLMLlk8pS7MdE9XXHg81D19{I#J^#7WzM)Rx%yOcA zIAa&I+4Nk)3E_=>?D4ZeOI?9{)WP&&6&~Yh+`cKF7}85Dg_WSf@^D6J0qz+G2A@m= zy@#ul2d&S4ib=dJr*3Sv!RfiudgjTQDB4@GD7CH#nYNCd9Yl{J!pQU3Zx;z-%xF=* zA;fz!HVhh$-ueo>q2gg+ziiLT9>GPj0edHXXM)C~d+JPwl88una~s|QwNUSJ^+DArcL=e-Ea-?kxXy0KvSH3G+#Fvg%Pw?`fnf2rms5{xTEm{a6(mz&VBp zjD0&C9CX-V(VMcy@51&^^Cdax4Z^)} z)t&K9%A$wiYMku*pn2^S0KSsu$#?nhK@MCv$Y=M<0{Y!o8JGJqh*thmsZ(yV&f~^(kWI#obcafZSxSM$TG+mf;vFIWW=#_q=FldE02UOO zZDd#%#Z&6<`2)QuN|sK8_5}&Iqj>0lpNg`4xYDi^Y;-{Qg9z7>=O1@KWec4u_bw~P znEXs1-seBui$@seTQQ?0=Bs_iC?kJ=S*mbn!J}XN&A2s9+F@+$VnlXI2`~dAYphc>19>)laF_QgK(c| zLgf4fCAZM_hAf+DD}gK95XxfKQ$1%pE0|OQ>`w?>|1YdM8w4v5-@6pz>Kp8f?UZ#O zUe|MMMC>2;Fe9(+swe;#EDMb`lTb>9P1`r1n*#4qXR%%M#{zyo!bV&u0u_G1M|1&4 zm1pRY01jPFJ5_Kbg`b~)Hk`njq4%nE}c1Ml)3^)y$#6&F9y%S^&P8Ol# zmODE#Gpf4zVU-khy4cPf4iO`O0NrvkKPb}?YqH0Equ@_**yo(6ycqiz`r`zy!eh}% zd-e191}3~$)->mt`mtu9|G0W#x=c;dj{WjirxuTtBjLIfez>;bO5tyRK$!iE8PH`I zNI&isxHna$hk!xJd(_UV+U_pvSm5Fcpafc(qVn4IWb@(=9iw*rNo+ny9_KC<>6G!W zgldx1t<%^i>9y)BSi^)hkr~BJ<&P1Pl3m{X!8ibRYp2jRMBa@hx^aF>EGYvh6Z%M3 znC*`%SC<*~%Ps@BwQqfY2$#01YQ_x2-g^uJnQHKKu#ru18|%bII-1#Zb5V1*&T-&3 zce=82h(*d6cSv8@bMtQ{$?P1jOKfa*qEYU$q5R5<+%N zI*D9=NY={1&o{x!=ZRD)#5lnIb{G^VoEgRV^GoDuR|L?UqV+qz#8$f2X`^#U^z~vA zx18dCIXa76%b^&EE+~UA4a3ZwS@4CKSKs=*g=^{Ba=A`5Xl>pE4z!V>7~f4H?#==I z58ED;*V4RQwp#gr6%(86!P6C6%-&!e>_t_iSxfbbV57S7ysIStOB*%q`fjSuPo!Y^*3xz z3Dy^^vGOLoZSQeX%^7iwIR{Rj&TABp7^z$97I+Kvo z_ov!0<5kvwyF9MwvhUKb2Vil$B6c#jgBvHOm@AR^ox5^@WbSw$oF>Gw%Y{-nO_L;r z#9rL^v}>U>XA+_hr#6lPlCSwwI&tS6m<>tdv?JuGpXb_q@W)d&x2Jsj?d`aIqu#WR z#D2+FjE5Zv45Pf{Lbo{*y_sT8^Dqt8`y=&6yk``Dc_i+qaVJ1#gZKdT>tCnom*u;= zTj`atb=+WU!>b5EBlS*`y=+@}6=2f*pqs$^PoSYOAtX$yF7#J((RYGw{a(saFlToP zEcFW?XnX99G-i;6Of@<4gT%iSgevpP zH{L-xIU!-b46sq<$aCE@fv>-aUrw}H{YQ=R@-B!pxy`Dy!>O-%`aSz304YHgC#)PPUQmyTx194$>g=kLf?Gc#0E2hAX&D( zEg>k^K|Dm(7x-rDH(a$)E*v_&TR$y-qrqYB!TjSEIz2$B^LT3c=%=uQH9g1!BkH%^ z^+n+>R-fBHJ$PHO;S)gAN26Ke#?&hUgkNaJ6%IPOp3Youk=Z)tBljQ_f@L09jO^ena`g8VY=Dm0?8G_I553yjdT0?gGnjy5? z@fl&z3-xeumSTTNOm~w4`7$|w)gH5rP0>s7$5siSO_b9zag{u9VezBZBh5tS%u8jf z^qwGS&O9(m!b`DE3L{-t{C;v{JEl*T7{iqGqN=UJN(4A$`IIG zOqn?Cz2~ee+xu2G5Lgp<%m#^~gC4+(YydDJ!u<^;3*wArIa&Gyd5 z5AYA3@+wj@Cmrv?{7o<8q4h1(7S}x+1b%btJ%D6e)RUF?2X7R@qg7rXK^g4;fRR6+ ztigDRO_bo!slcIAIB8av3QbS1j8?O+hebKm51jCwRZaoBy$h~?Zp1uw^LKXRp3{^u zcJ;*OzT_8&yz7ulMOKQ!t*3Y5Y3EN^3R*T4OEj&Gx^v$=VBoQS9=J4t_XKbqt^z1V zYUz=T+;>ktPNzg|uIaMr5h`0Ip&cTRj|TO=9mrlDXYB5GtrdZoW&u)C zvcbrr5IWn;wvM%b1d1$M+i~n$wsOCKv}3FBR#(f9jj)P8H{DJ3R$qy%xO?!r%JXRR zTAK=Q>Cfv%RGTK|9g)45o8-DKqT_udB5;Gw&@PU(Toz|X6;(5nBn!bBv!6o2rS`kx zSH*^Ac z^#Z4Ln2A6%j?49n7STol_yhQfOMG;S&ymBE^>*x?0PB-0dE}6q^|rLr{D?2gCLNJN z971!?m}!T93$IgZc*T--JuR}5GT_Q0sfj;L(X?3=9Xzyp^1V4nA6kjgCw|_A^gwQ~ z!B~?X00QZ#bxaJ-L%F-c%JNFc08nFHO@D4!UZ0G(eyX3T*hffoDdekY{Kn?@K3-Xt zH;8I00IwNLb+9KXmc;qm4nc_;78u|M>KtZ2djh?G;s_Q8ulKPC9^-L_eAG_vg!jtc z_!iyOSxi~dZ=XHW9$D~ASS8lmZ$zhQqa=K?JEZ1!M2f|bCkJ|NyXGcK-Y%>>zoq3t``f&W+2B`AYKpXFv+i8?Rdzy^ z(c;*7cf}6+OvEFozUmMsvSFD&TVp+B#-+S}gt?YOrLKq;YPxC?#g8bW}>7C zvBxX|A@3}=w=l`!moNdmlZ5sSca$AbMeQHIoQOHm$mWJ|)%0DKf8kq)i2Y>8%3GMP zhZs9Ab!o^!$Jf@=Y7!>&&DZ=+59b~37ZN@N1ZvFBT?Q<`t}(9+!%G}Oz7VSQn@tLT z3cLF|fa7+7ICZMzj(0Nb@2)1tq?EyB3nfRBWL6CQ5mESBQKs`T8^)AkD_|Li$uXAa zoW(S} z&>D4f7N;>>y3-11ah(*1=+Ykm9~l0B)7^5(aB+<&Yu?{?@_j2Fr)di*>zA-?l%z|Z zk5e>-X}g8QhF9Yy!n518xx1H9UNbD`IhgXEMW}duz$C1#+LtHg>Vwusqyx+B*=U3{ zF@@$ucMypT>F6aa+EZ$YH3nI1KLJuzinS_A@pV^vz}=q zJXHG@;jf}wDwOXvl5HLE?RU6ntObF~6n^u{U1fscclXn;H<03pW~7~e{%GjbP

S zfU4q!?A8*aLkX;#gOcIFS1uj0f(oC6I_aSssB6vs>wfeYSH(aj`L?TW@jX4U~`7DDGw=bR>2d6`hb^`|zr^}JlSpsDv z74%p5-~Ih$YJy5+*-PbrGe?AlsVRnKsvKrhRmUv%Z*&!qP@#p@WLl_Ha&4W{XggzkewMz(DCVg!*?+@?IHou4$j?YQ;<1QTN> ztwCm9W9KB{RAsAw#BUNDk2_+%CNML-sO_LuPF67_WHYAv02``TY-#-#F+q47G0h{MAGJgJ*a>YKeZPf&(T2B+YGOG%Sg7l=6N>bMd6}iiJ9k zaleC=(Qp^PNW;VBGGa%r*+;~<`EGtcfcy~Gw_XLS>N4wfY4z11A}Ou9o3eT975Hh{ z0yq8;CZT12(L_iZzft)TF=eug)P`tBaM{=whiWdmKFd=hZ~%@b020$>D$-@h}(9=zk*yHErb?nj~3rj zGUfNvVz=NjFMixQ-%Z*yiIT&|Q;SJ<|Ggu-SJCNzd!@la(}67_One8<<-VS$gE@=L z6s*9p^_|oi^Fg2#uZI`SvOK#L>4`%RUde-8K5M+6^V9f~t0BbV`0oXzb=-|4W{hV$G-L6v>2wV2EXq^Qw~_H`JjOnE`vS`DF2bGuD5 zu$i#g*0xF!UGy*3v0sv$P%yD0aVPGmF)QRX-Sr zsPx~p6Wqf_If-1yF$Fg)JpJg@Varb>b8zWD`*=Fl2LoR8Id34jv`in zsMXM7R!qq(*&kC*|E5&~Q92Q7)95TUO6D&EA-1C|{if2zkHJ&-ZqI=O9zAMoHsprv z4lpOskM+BU@-D4B*)n!UhHPY4kBNS(@Cf-q0gRsXlWS~-?0#Rmun^~i)Eo*j6Lo6iXj z2vCLng%L)a}q;RmQSlD3!(A*)(HbDgk?{WXXfd!?W^h)1a3$phbT% zkGL358d?WQ%c7WD9>jKaktk`mPU-+!x!uEt)rqbzwH^BYs&z>I?T? z$12lpG^$6tQ5&|P{Xi+9CtS&YbDAqAao&uzSJ#5@pg)RdK}z8yX145BOa(-z5b{?U zd20weOKS^*!XUf*N~%}VuMqs3VPWWwg!y(Mv0o0;36B@TK7i)|?+nedUhqc{_H29# ztxFgGoo26MmLUb*WuxWZtL#eRY&8lIOMVmAFDueh7&a=0jzSVNx4qea;TU&YK%GdF ziIpL7ip9|(BeSfsS=_hkyL%f=(>&2YQ=iwJzwhreM{DKKVq}7oAP| zqcLL7-BjN6>q_UKS_U@oDXG5aFM?{{=VmP+&C*}JT=B@+TptyGt!3&R+-~;p;zMn_ zebQg-)iKN>1uofH3pwP8sN6YDAFy-o0evtxe1sL#p0zSxuVKWwJL-OF4Jtg!$u*0Q ziC*m`Bi8?Z_&Zs4`#a&3w>bFRina6M{IEf#trFoBUCfJdX_qrAsF7Gk?}-1yN3F>3 z>idW;NDP-*aKcA_rt1^ z7W&}l?mqRt)usg!x1u+d&a$3fR7%Ea1QgaFTEoL*eH!ziBnVCG*r`zU7Van(mK{5q z$;?BY$l)>xs!kT`!YqvL1@p9Jnf3wh!T=hz>W)40xvWa-dvM`XJHQU~$A^Tg!CESD%KSa}5gxVs z$I>r+T*pJ}QFFF_>N?bic5f3vSCw3fb>9^+TgNes?(s7})P`=o$Ns9K2j;Jir+FM8 zS}J~-Eks9uLY{mUN$0%ca~?g%#Dx4!R3E?V6;8k{_&zx?P``=~Ksm;e&CO#7>8+rv zz?oU35a8;ZTQN6&%;f#Gz~U*)Jj!+BIY32zj8)05x_fY`>lrNiWGep*K6tb-EOP#h z6@2v8&*O$_5m{{sc2V+dmp=8OFdI4c7ULH<>mq-D3cPhpr|B}Embu!wd!)+EW3LAR zyWm*1Gc?*h}v)6kX4K3v8(72ktA4Z8UA-Ta52PEWR=oq8EKgY>V0GI5~~C< zl}(=YWW|HB)hefvfRE1_XV&}V0qL#)gRnqxs3We(hrQ1E)4wz!oy16s zsLil{8}%!oTd65;1H9=KPjka0yfZ1W0>@#AUT`TzMMjYM;?Fv;a zL_A-0%w~UQOQ9{cxf)7n4{uX9fywj@ui+H^Nt{YKM3zC^-y3adzoKtvtgf4rAo^v9o3TG#yWhDL^H^L;hVxUQw19A zRgMYTL`1?QUSAd0_UI($e4}heXGji2wIje7K`H^Pnql2+366wID2=Dn$nY%O-+UAf zY9z0~$CLIT{5)7ki^Xifm23Ix&C;&T!=I*xvSKua{tj*!ep@zeOxEhzic#uII>fge zzUandS!O0)898uhIk44#I}0hf{k_TeR413(f?Hzonx&t%sPo0Gl&d(9s-K~J30WzT(AsJLRF^tASOdv zU1xF?Iu^T4mI&+4$?6Z)Y;I8RryBD-tfs!N&jsG0hfA#m4As1U_ib9^<+XhpzvU4=Z{6oPil`lZXi+T_U8W@i>Cyd@ex6RtoRF4NRNlZ}<`c znI|tC!Px^;N{mY4Rd{p$4Y4540NMXyPO|)sil#;#_tP+eGx=Yp^5(9UQnfA4I6t4hDjejKf1`Yt z)F`>)tdS~zuy*vrKIqy^HhyY%0rlF|{JIg!yG>DlJq3umxtqkL_o8<_==#^#31V&b|NZmHR>(#dJf%g znR0)BX?VluCtW#UG>TpF)HJ-hKENRw0szwUTUm{(P*sNl(`LGd(yR*J`_j;4ZegLgLI$L)J`c%6MUMCi#2#V8q1r^plKf_E8PDS&K!y7~3_g zQqJh!ILDhIOnwab!#g8&d9lG)gcIOj60*d9qXKe`xi}Rwrw(8l#Z!W(lTWrL(8Aa_ zJNqTGab5332}8WE|UKPLw6akKuTrOL)YpXg1ob zk#NbVz6YtZsmKekn23bcv$Bp?g*DUtn7gGYa*VaM#{7tM<}{G0*A@P$o54cJgMNnKxul*{yZ*QY}tQyDw-MB6(H4~uip? zjuM<~-GyMJu8}Ppe-<8DEQr$ZNf$|{@7{BDNsiI7hyd3=Drb`%x3 zS;C{K>gu17_tGTj`X!h;mmF6#aTIcl(b0Af1=KS4o6-ZiG%_@udeyvtN$6uSv~(`i z7S5XatEsp)unu*?P?r73UtHv^k z%c%RJ4img@t41t+pqp5Feaql|=N(H2{CcZ08GeW5-Ya!~jRse`p=$G+PVEO^ppZG5 zNQT$wdpx=F8!6FmYG`N}??Pv@CDcd)la}txF4h*0<=xbN6<%67U4Fb$v**eHcwZpA zd4y~Ozk_M#9ARF66?C}T%$^s}^G)wH!N4Z28L#F2%TJiYTw{W77*9WtgEo3L@R&=h2yEUo#{ef}n0Gn#v5Pktv-?j5?i- zpuNkiOO@;O2QUQD+Xdyg`Z#JeRK;tdzdqP}NfS)K$+%^Iu8+(Jj5G7+QPUo^xeecM zz$fctXeqD)B7ueqJ#cFb_eT0^d9I>9&vx=i@WA2K}OVv6!{ zSV7##WS`1avU_>Q*A5~t#`z;%fp)Vtn17eE*m0HxJ=5R_a=9 z0N}nDVW*XUd-yQ~{o+v6H~9cQlmsUUHgG`zRkWqYU+Wsh;QKGy|5X8T%qzmb(@e!y zdhO*nR*zh&zKFPA7=1{Z({%=2I(qDvWgC_eQH093Unpy1yu?W0&DAbM3Ja(CZ4n1Y zuZwJYj0sp=f-A$8P0cTmyAGDw1PIO9XAuSnB(=hSg^QlXvUnxkGjZ^Z4csx`fMzaE9Y+{=N$g0*{H4Qi-^TOVgT+0^j<0pgk9qnTN*)~E;jLp9gZmR;LA<+@ zqF$I}vsN?Ha!Nn}8GnIHo)N`s^Pn`DHUz4Fn&WiYpZ8J9)*VIqZp7i{Qt*0#F--bV zJkcj0SfJu*T~mfH_GxFD?EcJs2Uq;0+u7I(l45FM#Rlaj%`a{UTvxjo+&eX>74lju%w7vh$G_%(Qc zZ&&ic^%*}v|016Np<;scvTMWLF~UJFgLIAONLc?1O+_|no2 zIY>F@V)xVvQ<>Ye2UVBRc7GKwm2>TX|G1vL-UfX%&@P)EG*a*Ip^71_-~^q^@#3Ky-zUkx~20? zyIw>mKeLq*pI4gmX6)a>Pz9$1AXCc;ZRA8TDwexT_x@UyE8|W|I8H{KR@57(l=PW~ ztW;?7<)vDg?z;NqcHB5lgD>o|+Zqq@>1YcqnRpHMPnh(U~H0ga0GXO2k9xN^=bLY4_5u%YDhQCOy`k!*i6mGHB! zmWH15SBTOtmSqso@1R_NJx*$LJCU6ueq8)uP&uy`S@(l_%?Pm>VaR%$`JPU_aC?H` zgK}Oh6>HKpij$KoYf*%j%TOI*se?u;&!8+JyPTqd@$KRw3~>o^!OLtJZ7FE;TrJySuzsHkeY5qgAWwO0lEs(I5%v zM1iOqs85Y#Slz*YQ=`X~G<2<$gn`aW?^{_wo)6D;C%ijcaz*EDdW$Uh2?ru(JJr#> zHyTG=qYT`{Twaws?S;}NW)(@rD~la{t88sXNZYgQC!u*bdyg$siKQycf}Vb)-SWXx zypV1=JEhvqMUjvldgOJpv0CdI8ug>-QDI+{lllA9={`4qpNxYJHaq9>=X2_``KDPT zbrrlm+iDLNxj4b|7cRLA7EX9*CWK4D;#HcpKFp)GYWP-#E0lIwfu3^|KmI{eRDq=L zTj2FYK7gBf{WvJw)ZomdWZ8F%2G^>twZnJ1RxUFs)Qw4?)kjB;)yzZl?n;9@j&G%P zscfEJ9Ch@6i#q*#p}z@CMnAjcIxL0vQ2~l1LRYz_kc#dHUp`HX$#i2DJ`};Pb;(rW zuJFKYd8AVzOwOV$DT8=%am%jSx`3Oj+_)2-kN7R!FJ*DJUfHSA#~Ui8V1n^)*2%D! zO36TS!ZTMEZW*H`y*7`a<|Ny8YR}j&H_w&g!Y2cNo%&m?M4h0eJMXfnfHLFz>n)g_ zLd-}dZ-D6+G8sSIl}DSJsg)WY%kK!y@97+iZ;TB#663Kvw)D5+ zzw~v_By{f3U*Pmw)y5hU6hX&T7Q(^rwhS&)%W;69rDztDe$z^P9iGgZNLRQOx3Am^ zcq2f6${QNsCIpEUQ&>zAb0KqASUtRJtP`s#Fv$w`JysK3XKZZg<8%F5x)k@j2QJ5mJm)?In09c+l2=TZy?VKXDQ$$yYkJGiDX-&uxFL$zfig z>cAkP(dOV3_IKibYcFa4<${Tw2FG&iyIQn*wO@ch7%k~6W|m$STfIAxi`$HaHzeL( zcW+*byh6(J@IvTds|+qxT%HdNtc{iG7Uvp9DRf?KY6LhskP|CMPW2zXjR z2SL9ZJQZHuHG``C3EX_U%3fmn&!BjZJ%(57haIi-=xol{m&=8oQ~1#Y$w~KqA`gD^ zq1wu!;F({p5 z=*U+uIGSsE_s~zR4f76(;|2%Jhan7vP=qy38LSdN(g3+%#d=<~_g@0)+*s~^;kP{= zb+@0efSi5k0KoAY`Y*inX^hQZU1ii@<(FYv~_1+&T;^`K6D4Uedq%#oe*`7`~rNb7+9}wAniB_({W$z^8wU*M@!y z#AO;4zGb(E8^bDcZIsIQvQJTCBRpz+8Ym^h+!0*1I*l+C+uF1(^spxGL4lQ}A>4V2 zSqHP6?|4S6Bft_4K})#Vc;lwm{?xcMR!GMn5KxU9M+;eth2T&rV_TemaS#whw{nD2 zpyQl+XPacVj3^ao|Byh`%JZfY?Di&3*;2Zq=D*3YUP=;hyS6H2oq&oZKTrH1{JuQIa<`f@z? z2>}uqlM;qbgzz)PM?TSi+NXbxK#CuMz!Y$eZ(7q7IZ9YDK(6wQ%_7J6_x z%?Ylx1#u@xIWT;Tzk!^L@@RnE)_4@DJ2Q*Cyx?qTx8dzeVW*>YIVu%|w7=Qt5mMNq z#PcRTTTqLzn|#hc6K#cgIVD`9eMOt>EmP7*vMB}?*^->-8GjFJiuqQCO3@>gN*tYt z-LX*sR@vqQV98T|I>_)LKTL$u`!ljlgP57O3D-trZs#&RG6>XGDm!v>O85yVDpoPb0yVd zf7i>=`S|rA*~DD`)tp+Ma2#lT`*gFzf_CE;l~5VpPYug?YWtU+nx6!d3&wdpM0bi~ z%5N?3NeDcD%*#^|ajk)77-Rl3wPSn6C{a!B8lLr)=`Wp3l;eUDa zeP3S{-j)a8Lh(ml0$@%k+}RLO7;HoHEb+x5tpUJ)sXR+09lFUlC@`y^HFAXpQ6OFz z(8ub(<)vUJZ5}!MyUf2L1CCI-<|17-B1?}klPpo#oYlH*!yt6`>E@Hftj)bMm zb(5I!-#MHN>b3Ka#eY##D$)Ba=^9b%PgtRdgCd%m{$woYjw0bk;QKcUaqkF8vHHCW zFzTO@`$H_&)Z-*8km)$HyA&4ghn433m;Kv+<#KK?rTvt;)*ca2E>pbFd|8kOjO z7WCW)K)t6)L-(>C)q^g9k&)~fSXJKMtChd~A$s<^?>W@k>zc#lo>}oXGQ+ozreuCr zZ=qLLZ|?pAr3zXv;d^|;n>Xe%h#Mt;p(o*^`2NJa1=5-FD=|YX)bZhUs&M27z)I{$ zYM2wttpZiA6^NT)W~kI)DS~Y1Rs9XLIH5=;eH_u%>5I_B5@d#WxzInJ+*rh0UtP>w zqDymFcQiOne)qOl@%OBvl^}$W>}3TuNK?Z-{W{~I0@VDB5E*R`+6!LC<06##$zgY)Y7VG=;`v~u-d-5%3COLtMEg~zt>$rQK*1}7D;TB`Bq{hcl z8Ent73H)6nU#(XDCarSBQ#^ugSm`xd&HBa}L71YuAR~#QSkO{^=cmp<4@#{~0(@PK zx+GJhx%B={dIs1x71TrRm!*=x%0WvRe|LdAXkU)bDCNA1#GMHx<9Psq?0=u`6K2xw z{Ze`-vF-_;1%Zz)L~2=GzSSCA8NywUASEJ-EgS=o;8G_+Q9Z^}WLP9&ghs+Q4cuqB z@#{AaqUD*E9XV<10_0+JHubV{nF!=!`jC&L@T5UNf-hUBL^cJI^(YAQqc!-3f2r;e z2Z%jz?r$&f7u~-<+g9D+QB^Z8$NqW%2_Rm7MK9;+450(gUbl10i13eo2dV|cms(!W zAmdAoxu5T?=!mi=6(E0QpiT+8k$hTS0NbA6`)o*}Qd)*1&u`j9#u@FOd4Q8l?douX z?C-u%F=TgROHVjYE2S9TQZ`PMf6TN8F5zF-zHnSdZ^+G2^k5C;q6d;M`=(ag8zO|2 z-KX5s68{}L1T(uh2a_N%WZI^mSpfD4^82heEmUI~+3Ikk^rB zSO`9Jbn0OxhmwST=P3~!Ysx+B07hy=o{YtpkZwVveP>W9sbmZkmM6Ox6?v^Bb9gS) zW-LOB4W-PQ)NhA^i7JB$>trr`YqksHN&VvE&_g06g}gC0v)@fPgrzZi*2FY}NKMc} zORw0n38;|HgDIiRFLxBhf170m$U%VImK97B6dvQDUoPkx|A>A8M-vZzAT$t2cH`x_ z;=5%L%p?ucOI@HFHzAI7{5s~1qUkB(Ije%$%1QH5^9QtW{z4)OpM>2&yKhJCJVS$$ zOo{#Z*`Yf8JW**==WTY2XSh znDHuuGJKQ--6dw-Z_~N`;8_Y3S1vnMkY~Ox0L57#hSdJH?9;r0i`|DV^O~+kc)Xv$ zVS7NZXC%<%Z8)!F>&UZl3EGs&8eK0?&S|h`eN!NuYTpYG%lw7 z(un9$W5SMB?glS_h#eTW2tatq$@Yxs=mE`A)-5D?vyx_!}Q)DXX!^ zXgVyPE5PglvnOjX&s(KvzfS>?s7#}ybno|h`}8Bo>UZIJ=s?xVJr&wP6s(~XN*0Ah z1~Wls+tQ>)!fSj;#%4>>*@1L6b>e@JBG?PdQtW}FDohuf z1B-mIS>IKHHQh74tZ;tvyYB$NGiF{65cZXC?_A7+`OXv8e?|fc?Tud+seFT((7_8m z?_nzre*qOb-VZehAEe#)W7(#7832sI<789%ZEI*J9G|Y!k_+XeNMV)fF~=^%mDXs<#Mj4=P2 zKX8QcZFp|$yTXXHCImX&ao9N14{N5ndgMDof7GH)>==B)+UTteUUd9Bn0{`5KkvgM zo`@pNZ`|^0Q=n@FV)Z#a3CR8={h{+_()(B_W04! z_aaakZW++}#+0FYY6xX4;mBpTU=%Gxa0{U0;o*Of3p(0a?ZMiAUFlmh>5+#!mZjiaTcW%OFix> z`9>OjVMZgP%g$*)OGNs#Rhze#Y9eC>MXOVtEGTjd5KUeMKzcz@&xb63t{-eZ@GR5= zkWYAf(3lx!tI zE!3Y8c;^azd(X$4AvNQFZo3s<$7-_CFI8OE@kHtR^aw;<{U&bo?ZpFoU&mUVj>!{p zig-H_l-CjmAYkuJ!B%2mQ-K?l_qB8&4kYP&(jld>!hYm%0udRsAY-d0XC^>oEgpxT zL^)l7(P%<@BIg|Ra1lfv>+qf_e`eD5euZ>jZua|XAjB3GO$_BOCKolI)wh6{dA-hY z|LiKW5yVcsf&GDDkxYHvKx<`e)WB_UCg+G03S;h@r5LYRAKC2*VJH9_Ck4e=x?Bw{ zv=D6I37JuVnlcq+KkT71Z>Vk1?VVQz>_dS$^{kp5og@pr1f>a97C>xAeOKoX8FdVLNX&mA35$T@PO!cW15o{u6hB4uhYf&DGMzc>_x~52M{KojUn(;QvQ zQMmp&m?BywTQ_T!fqrsHe||-NdR`CV(p2b>c{!#)VhCbWirN4~UHu%$;d*&i!A~#R zrS!mNr(&gVT)7|_Esk&^40^!kJkxU?7t6?udvbx(a$5TKfc>%!am0fCBvwOMVH!|L z5$tLLbOOBh2AZCSFw=70KOp@Z)BP)iJ<0BIwQhn6lA4xIc28wcf65ET#Ia-3Jg?dd z<@^rEKrUIJ7TJpQtns3OWt^ z3nK>}=;$RKiw6OERf=)+Kw??#GAx6v7~#phH#YDybYi z=KYTUP2&(#stTsJ&9`(~VOW*eAFfbi5WCQRT8bA3`!y6R;byq2v1Ft9uR7VoH2h}W z=N8Fp%E$!7jGVI6Kk(xCnnt>t7nQ3AS;irK!7?E%dVhl#BjDy<{Ul~Z>^rx$0*2JXdORFVmZZ_YAffZNXn6-&jk@KgGI7d z*RtYcwqbm{KC75Roll3Rdoxb%K94NFo#FMx%h*B%ZU}!9R*n61AKl*8tv?HO#t

5Ni?!xg7s~jH**(`U9$|i4r(CUKM~Y)5e{DBQDxR&hU$|*O`jc(4;Y91i8nUb)?F4g5$K z@E}_g-SLo2&-bf!w*rn-$6~#9t7j*N<(uSex(yX>f6|zH>ZWm=jCHA8)e4U!+Fmua zw;%;*l*6yKK&qS*^kF9)!wp_c@M2pSo`wsv_saGHYu^r$vFipQ7TfM{35TGXAz{ z$)%5Hs&!<{96FoUo91;9jbCifH7!p>4vLG(dQpeDyt3bAVU_7?*{XWs!r=2`VZm-b zX%z(xh|z0Yb=4vz&}FL5V!s@QuG>MMq0|p|rXS;L{yGx6rYhA8-_W*p6#L$_Jew+9 zf7E1d>4IIu*x+nlJHqWocOBmg=Qr5jqX#ywSMINCDuj~Hwn<<&1%>0jq$nb|ir&0R ztggxc+2Ift8S@uqUzF-p=DMWbEVJ>3m<-Rexd0Us%TEgN;-_SfAE(%izhfT^YM2Lm zoJCJ=2VCbHaI3&`LYV2H*u5FjyUK4af3`6riy`?j-iH{MTv~JeZ5#>02F>e_8W~p@ zgi-*pS$HDnHZ4Ct{t1c>bv|j^iv=C+kN^;o#Xb2zCgaKSp^d{U{B{*7-Oai;>;s($ zoUN-Zs|z0Nk+BelVy%eBP%vh!B|&%~qTf@|Jo7#(Ks23tPAk!KMSI@cYz;%Ke}(n@ zvV>wInN>!y#RUQbTfYUe;st#4_n^GVGfwzGh{>RJ_szo0MYYsGx~w=_#9m#n-D2So zY&TaZ2@Bh>H}j;OB>kHDbTc>0YKGc8& zHAkqQ(8GLGDy4m?rc7v4XiiymfBt~TJ+xU#D;nlU00CB-lTRaf#I4@fzY9OJRGigW zKs2Q$RCun{ESE59=4Rn=9?EV}+1@b+nVqtzV}9!GrY4nfaD7@Ho_ZU_j2HrZgXSmi zgjIf>Yag3;fw&0QfkZo=vD;oT#Ak9lWAXj)OCq@CoVa@WBV$fcD)fg&e>j~Z?z>rS zJD#qAc;sDm{I+@DWdur9ftv_Zz+w%`{KHJ)2gY9%7kNVa{n7I+$V(<{_WCj%MF~yqB6w=MJp1z7L|vWNYS!dh>iPl5be)BzVZw5Q;Bvq89dV* z+G(MLAngf#eMH@@D3mK?@S@JGMp2C*QL31`xY&vS{$%`5!? zk9SvK@1r>&POUjh!`%v(UqDJliB_!FA{cW-4&rXk0FN(41;|v_IK4vQYJAgjKvgGO zN_~dKswRq$czjPhXskSd=FG;XCLXR^`l_7kKe9ny1Sw~U=fYvxes?UVyO`7Jn4t}z z)iVSAUgPUde_gtPwRBrbh<{6J8Z7Sxnt%!_@asc}-Y6;B-%u@dE)Hxa9oJq5e$^M_ zpcA|G0ko*$fqlTKAlUWJhrg7;Gyp3N*t#^Wb-ik*>!3q}z#>^PzhJ(Rv^$tMWTf9} zsPA-S*2G7qW;2H0!yK#zh!b-1SMNi?VlKR=#ae@Ie~o4Mla9!|=|V+;6I%pzEbWeG z3~n(;4Cgr6m!0p?`J`92ZZC(%=!d*KB?zy{OnszH5X{~*|EX*JP@^pD!BAdXlR7%M zvW~nmOxL~{QkdYI<4gsddKylLj^AybEb8jtk=PayoroO=wDs*c;4}Cvxnj}1MZa$Q z>ElaDe=o_&yTLXi2-q)bs7z$$ zVD{r+j`2u4gsX1LaeA3$v^Eq2UG-KMFDt@fW$U^1plo@K^?6aWwirTd>fq&i9X2gee+W3E zjCkrT3}z#%w49SSB?tN%RG`8Mi5?fpWuu zxtqlQ)lp*O;Lnquuo|zmP?(&#>)a3y>g&1Ko@7OJ4LP?&E@7S=Er^QL`$=A`u3ZWn zesjLW0ufo{nU}V5EI}yK@OfZ+f00?qU-RXwC-v+4vJMdXO_-0e?thM;iE&ZmYN&`- z83|=9@GUEu5u`m>mVLM0`BkfR#qpl@OUB=d$jybJ(21E6ZeXh9?-gfYq3MPDa98E1 zTtRjaLvsQdoQ6r6qWJHt1;8L^4RZtIvwJY`xA$~j+>T$*on!!*-_eoKf3;wMq&}#h z>4QcI?_R>Z~cH`nf)wLvun42-JX8z}zJA2n60(7x^b5`YLI0~l5zoB=*53L*{9an&n z?K4)M)5kA4fU5WvMV3rFn}~w$EEOm5gw#&=uNhq#;od-9Gzg)of9L?z84G|tu6jdB zo#CJ;C+W#j4Hs^x@VxK1^&Ymn1cqDkW_%#)X_prK2-Af#hjj6(&wV)UArxqFAv~$_ z`iVGXb*=Vu1BUv9=%TXgA2}R$(9%^MlS6>hdP155HdtJeuWd^oXUA zhP!LL$P|aNDqIPte?@uo@CDN7B#2tIITJyE`zf6tTaC-Ze&L|KORW!RWiZdVG?1c3 zDE+i#X!%t;yOb$>n8?CHJg0Ha@_d`Z%Y)(i))&r_&+9lh4DILM=2gGFf`bf5`kq%X zTR1uUndxg6$>`5ZH8y=yUU|zNgGiHl98J`^lgpQPtmKBtf4xrzj2|8>NoKY^s`bzW zkQdQz6alm*)(QkKagI}qS%gK)jNT1cu;h>x^vE&w&i z89xap4GFW;h)5ofL2&8Fu9>)MY+UNOKjV6u@ zWdWQ7V+amC1-onMMhU9PW2P#y3ykR;Pi)w<6T6yA@BCc$2PaNy3g40};z=4AMm6UY z6?-q|DuxbF2X$}mj>yHRm+zMY9LwqlkvA>}4rJsPe`7Jl@)H*_-eU^S6G80(qNX#I zs-Q(*7)x+96rYRr1uIwtc@zkHCM#8=o}E+j;kF-qSJ7)S^y>!^TcJBz9lq?Ai_Bt} zB_u=VV_v0~$MKBc^qMEYH%pryDvZnNDY4usKMJqu^%u_?i3hl);^r2dL5qQKJGP|{ z54VrgvRZTK*YAvB`g zE8`iNe_S(foNKjxDMqTJ%WfbgB8%+1^@?uoD#IAY*C`PZ#Q_%w_4)pU5_fJ?B5Id+ z!zW7+cnlxx^jjHNUO5U#ljWSR$TOD0%L5@Xf9j`Pg%>+Uy%knAo+Ji$#tc$SbpIhL zalS}wN-;wiG!oumU)9dR!K|os)?&4UIml@^m@nuQujU~68g#tM^~JI)mE$_<%sYD; zDxKP$fNk5CgI<|6;AZ{M<%xVr9@u+noiV>?N%4SCbd_yk-C>q_o)2hjZ6RGajU{hw ze~4$MGF0|+()q!(`F4l^xIqKR^NINu>GVn7E}2nk=EaY^cR9yhBjbqf4_0hc`B4@hO$ZM&)DqG zP&wr#cHrd~L3>G?@&z~ zHvf3=`Y166w|Mps}bsqeV!|SC^7Ql<==jdJl$=3lqg#3@4Kb@hCR#4-nal z^})6uN0=~52p5M%0ESr8ga0XKZhIx5_6GsqD^ouqLp=yDEB@&a`2lF1`=xL)e`WJ= z!~2d!+7)q{HK7!(2^!_@)MHSwhp7=mXtTQSLg^zwYB;_ctis)kezjzH5aAy&eX%Of z%dKBLp8>}_D&+*$h1TD4y^TU3uyCvc*?N^+p_xZfkF) z<&roRCtz5WB@HDw%Q12{gP-1_f1V6-nlCQ?b}0GQ22h)kTHhT_G7Iyx$@ex>=;+<7 zh3aLvLb&lAbcKDQjhNu2P=g0oz#S5G9{~^9#8naCC3SV+2j`iXd6ao!*6cSz9j-%B z4co%OWk}z+VWV(u>jQy%rBaiZo84x3Z<@uWOp=ZW>drN zJ}I^9*7e|5(eH}yNxaLmMp1joM97ZkVMd1Fy;%?Jp$aD8id?GA8_myG!?I?7KMTn` z_c8?(r~q1LqK$!*IHF6de@J3bltSHWD>_a+IE3hM?Gi+L17Q-%*m}dBErj25foA8= z4wKT|#8apN$_pdI#Ee{76y9?C$26*G0;A-Dh)Gux8DDpe|eL;n}UZ3r}-x`%D z)2EsS#v+*#SyLV^QdeEusgX)~iVc)$xl6U0VQhwB9E@3OMr8gkf4IL!VmRaPDc6|( z@2%XWkuLB8`!|5Q4+PDQ5!w(kIsWT5Gn-**%eiEpF<(YGSv+7iip_IY%cbZJm|2Jw z8COKVMqgf-C2=mx`jr|ll&4txu%B+niXA`>_OPhna=sTjv*;KB7eO2esg{F!Vx$M* z%^=S~3UFQ>k7hkxf5m67eslTxQ_7zt%#q(|uDNmMwW!RTxV@p;`r^M6sg~u;l?z97 zN7{6$L(etPUxXoquAWG0%#{frbW>Owu~oDY@|y|W$B}QkOas9`vR|skD-#;0EljJ1 z@2^V6tqweeH4V!4`+`^fx>V0`mxv!Vf?fwvH==py~@Zqex)~feh0A3ZZ~C)LB7S z`!Zi|ZaC@iwJAtnsLkH_9B1j0W-t?mO^d@>|Gup0mHg zOVDdd;9`Xle~yNK^e3Y%2FegkeNbcx6kTgHncd!Hp7ck)Nd#wAOj9Tbn-z3{vIQIr z58B$skQ5ij{D}P?U;FH>;vbn$D2u;ms!LQ}OxvE&(Ya0H&FWsQ-q5xN3=L~*|M0!@ zQTye0^n~fHxH9!U?SY~Vz-|jGHEcYz&O=b;cAu(rf86c8q-TCQn1As49hvIwclnX1 ziKE6#=?2w;L|v*fVyJy>7JcC(z12c3&M9;sWg`>pv)Fni`Qf5BqNfOt&o_`c&xAX-(h zrfBDPe`SfBRWP5ZMvClqRXBRFuZ_K2Lhy@+UReiCQBG)91C^Z^f73 zZubWiuh|9ty`=MNfij}2Ua@hPy`V`*x*-4f>o@f?92ACd z5#t_RyM#?zsR0*7H-2ZDqmO3SbwKh(05~g>9W5GOv|Ah1)5{_J9)kSQ(AA@4z+u@q zf3+_06!-?i73-SSre9Ti0_L^Y6!INsNlW2es;04*zoIgClfb|;0E=6K!JUHv;YDFc zN2sy?Fpr!Xik9WDpal7wjO)A9hUPh-Ez@3`dXTuHd7{*`zb&ieNq4ZTK9pyE5+SrR z1KcYK7i=JNd@@c1;iZcMRC|;vSHRJkf4Ckx=q!};a%`JpV|X7l;6T4J2quG#uNyZd zR+%q=6(FKq1eqkdWm05RtvN}n%n`IY!LKm(Rx8R*Ch3ZVyH)FdxxZ}7R)*NKu6Xp+ zR@+kzwwR-oG%yKvCudy!f%fJGjSKL)*B}@MY4&8P$s0#Zf2T?R zG#tEXZ7DDYCjtJn^n8<|O+{jdSYp@sSomA#?Hdr1>0@HOQyzs8 zq^6G1dcQBZ*RZ?n&osVGo|!F?f31V7xo(FSQ5~CSoo65xlzGFYTcn45rRSJeXLxG1 z%bLZI@w0r4Ep&&GFk|Rb`Z)BnY>yGiG&1=Awe3L4*BOx#`D~a4K@emFMyEjI_hk!B zMsyj;9*zjNqeLpi)Y3ivVs2355wt*2FU3q2e~Fh?-&lQR zbT<+8MpP0|U0kp6ya~!2C<@`-?eA-c`JM!-5%g`)waQY=NF5?>w_eVAFYRS(_U&TX z)tk7#e`zAtPD<#G9zN$U=Jum5)0cxY;dksTf-r!LxozIc#?7Xr)T4De0l!bhlOYVQ z9HAc)Wvcw4AIr)iYRX;*e_gARhp{OpC4Ib20q)~r)`#eG*AA(kA_vY5Ur`6<-Lwa8 z{iM*7LPjtRvKx-opTwz-%i%5u3e^|7UFcfqOBqs55A$eehB*E z6+9OFSa52075Y3jgdX}GACe?YTj&O0OHWv z$ky|>*3Wu8>0+nbv2G2wuGCsa^zY-;*h8R(Oug)wSp;dj%q`(VNW&i4QZ_LbqK6tc zb4+&OtF(@aR1aaLe@=ZKS#3C+g)y8m*7f3}zOv#1E}hSqtfVva%8Ly<^6(P3sz>3F zT2b63)#y6t&Ci<*WD3628HRh_`&s|3FY6P+=$3g5R;=3)4URk3zeyp!P;L*a;HVNZ z4ipBc_E@-Rs;+$fD3+E2eks6X=5Wp}!7&Rwj@NevzYUg~e{Ej8PD*4X8fXpqFyJl) z)6@E(H|KZN5=@XB)kC(xL%fg{#uaf1-I^1{C($n@mze zH|C*zKT0>de~`BvXjk*! znVdJDjfj)Z2$jcpltTjJmWd(yh_`M#KS>d($&c^ZU+fKnU62!$c#N4IE ztu1GFQ0vJs@lrWJgeq95Juk8f5enP#YtwqF?Bdi}sTq$EuXGge)vL+Tp|QoMSzIRZw%q*1ObP;ye#vDrk-R|*btEsuf=>s}&+Gd8VMbN@86 zSst+re-q@S`$t`b}3g5Br+)sR6uLc&bVSOhTyL9#;vvlG_Mpb2BwvdyWsnLJgm^T1C z-<0?`DaM#;Qc<@A%?#O?CXJ+gS<>UdLGC=Te{su;V))_5P+)X7) z;8D5vW&GXMugt9DDmrsW6A75ic~cw7D-YWGwK*&;k|v-T)5|~Dc&2VcWx`(du};_> zuqV$!yTW7=)BNzxwYP1{QQd20$n|_y2?rPSQFj{2@aMvM!!;V#pIvD7akGfXN-Tmh ze`)1310GcSmP3S^g8!s%j&6WLvSz96q?8D)Gi)W z6>y<-yjFL8W}E8*R!9}#NxtxtHT`=0f3d#C#Fon9SFu^c5ZlI-VyD`D^uK9L~AFSoeI6%Uz;4K8DR2p_eD zM(RF6r|7u2>nd8ZiaCbQ09FWoi5>rQ>OC;{a%u@ zco5|*`b7rkxaDb2s58Z{^22#(Fo;R+yWcMKn42cCt@U$j#(VEVwcCQBVvS`%2=+Pe zxQLr03A{G_^&!6-UdQxJHkr$-B&D*PVcB}zME7Wdt6u&jVX4`nr^j&ne<2{#Z;lRK zu3wmqqmrz@(8$)4bg{QdX_#n{^?eBxzBiOjc|sacM|zmApqN4nSHn~OuvdzHKa^>J zu5_YB6{=Z26Nf#i zQam2ok($C-HB?k-GSBiNe@6HAwPH#5Nu z*sG*doNg45Ju3BT)CeM6era)gf1FZ2qM4&Yksnw?MFM*&@YBNViOe4DWKCWK1NkKV zTpv0hAKSFRk|4ChXd6Z0xdSI^7va@TV>r}6;5@wYBa+c4bB>;0e*z`5K)gF18D>l; z_HDC?c&-*Y-xoMZ^c6|97JvS{n(0$r==o*+wVWy%Mhdz9JFjgIWPUs>Lr>cB4_>C7 zfNZHqlOI&HZ79}K6mmiAGefeRO|TgpYcO=O+)TD{0J3^X7#~AhrNKTd4Mf=40WBRw zKp#;MK4;|-{4zCte+<$Q%H5C+dY%{T2@_}Czx|a@!teUl*E#tq8lBT!0_>)`{)NmE zYkqh|L|Tin3z0KAeHtO62U97#mPHcs#MAue;Lyh}M9os5T|dP!;@WU|+6yu;0oE7| z(hQCPj8|TaXjQGrEfJC=`?k6p1%M`Mqd-dYQk<^nZ%Bi$e-&ZsoXQ41{@}SsIIx+Qb<+QoxZcqC!U%DBn zSAOim+B)CAwJn1m$o2O3W28o!^%O$Z8sRgEwr?E-P=-1#;JUS!2R{%vsQ6)WrU@|X zu>a*>Z7lJ1!ZmFyC3Rv7!rtVp9(%^iF&r+)vBx|Ve{bx6eu>LHF~U10jW5Z4t^E=y zx#J>bccXQlXpQ}+yXO%D*~aQ!hG2ueZ%;{in%&4Q(HMg<8Iv=+%oxdZvO0}-zP;Ui z9o(Z|cyL0;6}=srw;7AM!aYyUyjUmb4}wGCSTg8gE?Ww9Dm zDMKFze+ZUzI_`N$irNVRX7^h#y6PO{;B2y)Z&&c24{WRb!@Xo*ePqYrwVtd{ws9OC zN}p~pHIk06jS@b3=md*$6KAJxuFghVTvz6k2w$Z)q& z;w)3q|%Kd0AjM zj&qe*P?TT%+{#?{?bx&}7n&veQ+~?O`+Yrhk^R1$t7WpL{9>Bo+O}_{pQ@^%p113~ zD9f6z?a)ngnyT(E8J2m8VZ=P!zD00?e^5iKs^d6On#O6Gp_7Gilw36p(>RH7TpXHS z&5!u2mvEoAV3UM{fIExr;`HWIWXFLmGX=#2C*6+lyA|Oa=fbGe4PI_0B_y#V(U_(( znQlTmS%h>$r*B2UpciSHuJf#tWer0=df$i8PWNf-tIv{}KjSi0lL*r|O|k@Ef0}K& z`Y@DQ=Sh+{U8W%znq?@WFqF=M_OdwrS;lcR>j)tEp&RB2yhsCp2r`x+^Wsl0?Pgvy zjq^P4CkxYLy1NfyB=+9uBwHmLR5xAZwO6WwL!EhZ7p(c-2p z4WX*&0j)v~hGQp0CaZILm9U`C_m?n|JjtGd@KOcRMf;^H$J(ZQ!jPBEqoXNHksQ5h zVX^j(KZyZK3L(3mPHtUGx=&B63uI2j(jxG7G?Gw|sCdr$!_07Q8ju_Yf6~AJ-`9*+ z>;e?EC`4fFr(-2vjX&>lLP$Tg#QN26exWorSL3nk{Y;t-KrCuML}YK^MzpQy zz#nm5ffw&p;eTyGMUzh_f1UMKVV^C~8x9Di5Fw5l91kf8p^#1PcV+58@K^>aB_>r{ zxBsE-+nSU2z4g9Ir&ryR&R~d>kaqUP(#e)>8Dt~C+z}IGW57vFBEzTe$c7~E`}^;< zdpfjo!bN8})e`_vfxvxe;8oQ5J zcz*r4dNPA_$xl<4Bn#~xoh1qG?$<15=*3A+>EZ3;;}RTC(fE$Bw~<^%`-fV-lKE`v z1f#`U{8*|>vwLya_~YrLtiBrWGncE&>LEf~QrF>PG(9wzyYQvDJ9}n+uZl+r*VNcjLG85V>aR zEHBH{fAdtsqv48$`%2KT-aq?-XTc16#7oVgc|@bt32olDf5G^v9h`@stCtRfmw*O? zF%a|b|Nd|PWm|t7)~EmWUroK;{?~u|@1Gz0|F&75*8gK$A5VkRY5PB4*3Ijq``@n& zz@!864bror_!8A(knSpgD0J+DnHQh3B^>VWHr5g^9i28zcAKNraBxkA{{yA70e}HIJ+xI5krpWKF*9as@x*v<h&X_Tv;FZ5d=4N08357J3r+J2O~rYCkY}O!eDK+@57j^CcLe(Ak_#8nu4bFw5_yfoLQJq z%uP6Fe{w$?jt;JZ_&qlOu8AOI9d_Oy;AiqSUkq z0ZnLI?RZjGtc9E*XL^=$Mku9_(3zKt!Yk#He!RJ)*r({z@- z`Esm~Zt+5zh`05MpEDiJoKcuKQ+q#?>G%#zg-;sIfH{Dp`FcL%es@0?c`q*Ze(W0O z3JT@_{NAnk99_h1t#`=e>ib^K!bdv4-D4Wh@?Cd-*L9&gbQ zf60AD#3jDy$@pBqA&`RVAb}U>J(SYJI*S!}XJTq^xgRUGOwLHx)Bd?xi1bkY>C1)$OJVaw3`b=gX^Fphs*H6Bj5H#p@K-bOD-R{=6#cOxkA!nxHwss`5=3|mjM%!%5 z+H4ClpxF42V4BqTV3>^RIWEVae{@%WO2ip$^Ac1$LdJOtaXuhGF)V9Ra2cfw#5&rR z>;`8L96nu$4GMtO@S-7>@OE6X)kN;9J<;+U8v2+2$nLLpykt9mAMf%zG!tj0b3o>{ z7x{=@vJvlo&+WK}qg~r>y^(#LxCpzKGdcV9ew|<7XMPL{ao}fe_jb;Zf8FQoA};oS zDFn2-EDlC_jPab&bG|~dO4oZHBmF$_ zH08vsc%*X#CHJGUK1@K=NIipc9D$wZZ06N#H1xJ$@?B8m6f+Tm6oE6-jU+DjhmaFf zCw`*QVl>ayMZSUAe@CKqcX3t4Sq2jC*vRVL=-DN3c3A*4_T<}mqALU?^@5$MFjP;e z5zkoeVM%Ed=#zvcN7M8nO3XNzUviA4n9$k%9VX9lq!so@&6i*B#Gfg!pJETo28Ixh z09AlK#1w#@!`AxyZVer7e*BiO>uzmK=iWyDj%%{CagDUif4`1vh5I$VNE_EWlU{y} zJx~sQ(vShHFGN!$%7!s4c0rjRXup4>c$Z&eR_;-hFF-jdk@@_DoX@q)Q?R~Xf4&^d zSBqtqz5#V#4~tC(*%VAh5PVjH6|S%W1Y|ovnjWkl@eFv486%k4c$uGtjfZWPT;sim z&Rx8-F;iNfe;yMs>M!KZ^z5mtkyc*-6gG_oJ&3L24+ z;!Q{eNZsAHv;wUcVek9S-gk_Yz3uap~g1GO#h=!WYcscd*MxSKs5a+dlL@ zYspO3o%`UDPeAA13dVEX#!ewc$g~?FpCY2EBGOeLf8E8jbqO}^0@uYV(#?YChUSv` z$j}+l7F}qAWr>!^(II-13A3nT(1YA)Q6604~dFiKRQEka@5d zD0P~MCLs0&d@@5xr@$e*!H14gsB&Nuyr!j|-G_K5@A*t!eJm|lA$^`ukC%DEXLx~V! z>pl9(KfC?5=ld9RF&V+b;sFa%O`I zY#;YXW+4WHmJn4~Q-REKoX9%FMu+g^*u4KZxU10x!ZjcX!shX*To6h7JpLA6Z+Y-f zf9%}{WfvEH2G`iw`|JC7mv!+m*r`Ks05?s33iY{K*f0)x4O}fc~V>7O*)mA6e*-G;iitkyxa}9Pl}w~?s|lE4>`5h zb8oN5*T4NjcUFJaSm!?LukRZB@;RTWzCP)5m1|CtdyI&4_qv0db8jf-M;b#ZfAV>n zz{ELku$CTzy7s4!F=#e>NuEhEa~}iX6pUK=XNo>jxlX;!CT;n2vJ>xVGJc)_Wst~d zA_X}aF=HU`a#joq^@mVd!*jk$N z&I%?ES&B=w_OVK+WsH_R-4ExA!qEgJg9X}wb2^c{cY6{55rFYrKsLhJ0 ztdq&mQJ^V;f^8<3WZ_}qMy+!BS-!9{XHYsruB2L?!2?NgAHS40zYG!6f9?zDiXxyk zuUL54*3n?)hiGWRJzxQWvG%7b27v!OueZHdMLfIKdTpMFR_(5J!&0>BNTpap?oRT1 zJ;BNN18YrT`}Dkjs4x^FMf)}fsklgNZ?0p{E<(UL?BqH6+}0quH`vKfajqsv<|sM| zH2ZqISVRU$WcL}tTMdcze`gm{_I{7sSY>M^yRWx=`a93=^61_lIO39B*mZx!h0mGn z^712_U2_k+m%WeaU(>{BTbGAlgLlZawc`^|ZgB=TI*XO9%OTB<_FS?gzts_oE}mcWDRiqr zuJxn1t(!gHDDQ2$%Vo2hAAN?MJit;dLYjR=hVoElpZuJSF>po*|A-UFsCmxks-^L6 z{FtHWe^6k0@ywWG2v$ssvCJw;?Aq-0g`jcDEcbw&2!TjX*}SPs=P=!6 zE59(S=v7zj!cL2nzlLQtpX_o;N%d&`Sv(lL!>ht?7BaH_3?%B~MSdR>ap}(FPFUa7 z0Dpvuh zr-`N++P&P}vFssKBB80^sc{P|wV1Y&gBt5(SAF=YbDvzaM~jTh9a!5(f#&1IoLv^B zyf9OUSj$OsZpIjB#m?~INwp6&oucyVZnQhaylB>bf7%q7(16K|p|X1NG~jC-;PBR1 z1knR1hf~7ee9h0{C0E^A^)>goUn4dA+a_#I)cJuMHrL@&(#D3_cio=ItDkXk?`PI0 zmQYHxT=GKr0@EW(_n{(s<0a!mOf{v2;wp7Nra0ckyXQDg^36)tX;3tCA{7&9wh3Du zVNk~mf2kY{MOe}hv^h_B?PHp-fJ$dl)rS~zY<P{~wBB8QCMLZ#6k0Bskw~Iux!L!CE&2vDxty?3|^c)%+A9xj< z^UjCBEr0*9H67%P>2QSQ)7_}PlgMa1m9CFvj?~xF_6>N3THz7*jV4ls*eFEx*u#*a zz}-&!XB3QiDEedl97_yp@CCnW3cTOk<+CGZs4E%CFV z%rxsdibxJ1Jq?Y`R}t9rF`0=KmiBz`^5)ybuEW57^3`izclCdxs~z|GTW7OZdq!e$ zeSb}7^Y+Xa*xgQXeu9Eq_^T$D%_{?i5s1jvWFW_i6;6uF#!Rr9fst?1;x|XYv7c-> zZ)*l>yzzJAcR4g!P#6K4GAPYh;_96vu?@gE1<1@m(kM)}cMHk4-dRp@sy$Nl@qIEK z4B?bvBheZ>*W&K`C5t=U9TGW#J~y%c+JC#|Dc`ZITa#b8E^B}1LiwLK;y1RHU)b;5 z;JPkXY`=S6kY4lM-|uiyNB_L7US|xlWo;q)diDR+9}M$JFHO;()^C*=+oD zy!NCM0VzLJ2Uyg$HXpqab>R*e*TszqEqT`Wz(p}M z284lecV2gvSF`b^$8GsVUF_b_!hhyd;$qj6_VLB$%e!CoppC^xZt|b_(yE!oi+lcL zI-m!(B9Kp0B+=T-5ws96kjNlS5Hyajt7*>Z6zieqq1_uplAtnQpq1XRLY~EFegGpr z;hYngDI^T#ATu<`S3c4*w6!Cg-4v?ApIjObNg31BMT_AECJ*Z+^th|77k_YR!Vkc< zy~*-$@AoOCPT0CP-1QLjxayUg_^zvd;_6S`JBkf;f-*08nU$#Qe7naID>3NK*(ffA zef-GV?)?2==Cugvxhl?i6~`riWTs7}e)Cq+L7<0aBJUT&lD<(>qb*VIkW|M2=WEao zfnlt$78c(+dpq;BT1d+`cz??seWzt}frD#vzE6-mU^a(s&1Ied!PFh2f-f;SDXck) zG^JSkG;$;dNf9#1^VZf~l4DXW?;bCEiH#SOrmJ^7Qq|DoDT5DqtFe(Vu=y*4tiw}S ztGsVtR(@l3H*T%B$sTJyUvVtI=X3tb^=yvQ+x|VgNe?aBxu43tcYnkyswtwYH;YtZ z0wTjzH6n@of`EEptY6_C$o*u3#CAPu2eFl3k#yI5#KEA&( zYiHUS5}fbt`wTv{hx93ZMZ!<2yW^|Mo)T~Gdx_=$z{WYtwBr`S4?wjgTlgt$I^TQE zcaPR~I_|6c>i8|2qkrz=nXo<{@sc07&Pv|J04lF%<96SN;Jn7T5N|v`e1vFK7ShF_ zA15zxADq+vTwuwUJaG%Ac??D%&^4bsEDEUGOt8T(x~7o22&9_79b6R^$yc3lZ2yL! zwj+TN$ea#7cXtn31faR{m3Lk(orB-y5f&E&yo0a?YU655kbms(Z|iG!aZ2Z`wRMNO z=Hc0`9+@+HpWWUo%YhI3JPH|$ecKI1eVV%-OEC|k20YXEF}to9^q?r&(ms#b?||;M3GLM z4M+-Y@R~@=(|^$rj&BdPt`E~%4L-FB%ZelEc}0Kj5js1c!OW~ICk2D2r_K*a8FqLk zvZ{fgwC%;x;y0I^_4rNiM|h%^tW~=>by>GQH|!}|-q?M)2fmn%Fgm)D_6v94uE)rLpLa|DW(HpGW!-hb0fiPDUWb z=g)bH6udV*PIT6PoF~y{=h&VPo3oqFUV2-ciGRg652*9R`kg*T-{N?7Pdj_)%n?6k(!#i!t|Su1N+3wP$B;|6iEWh;H2kpREu2xN{BlS^E36BK(d& zS37wx_jz#xh$bQ4zTNd zZhr~)-I?qCP(p9KbE{B_$lV=+h?V&Z0fj*;;hpmZZo%5hKmOibixC_QT|$x(4EB2a zFAUSW$FHY1FxuKgXAjNdQnIWjklo}x&*cu-_~ zI^-D7+y=>d8^BMTaBV#FsI9-;KjOsC+1&kLe4VDq>O%T``^#r^IaVRlz8>gaTYmp3?16U=EcH)`T(=M#*6`eh&1w|;(~>w@BA@HyETNifR%&Ubce zC@Zu!c<_-2#PUd8e5NESY7Z62)qh4#JZVnLp-Zv((;hr)4^?LE;j8nvvgbj*^NsFo z*)!8~OVGmd4_=96qYht`8Ah$e!v9#KR0lF8sM+JzNNjO-DZHf0*OQBmzzD^FXYh6Z z^^bei6d}30dz?PiU?f2@OJ-HYyhLIor*{J*Sa0Xnzga0^d~f|*+VPw2pMU<%`u~>C zul#mz1J>8Ibn(yq&66qcVPG{1B|k!|Jci_){-Zw0?qgSWK5l)eDB7^ataT?yCU5JK zzA>3!eHd{mTz%MUMG*%2s3{_}O0XOr*5Gie+BN86L6_og`p6`1;Npj|se==JO;bRO&PvWid`zK!Dn~Ikj9| z4ys!NdlnILJt(F1f#~jj_zY`VL9fA}eDIVFPTJxRR-3~IUzfW7kqvf@m z?i?FiZw5t^fWv3E^oq^ zRy<&0qCuEubugPVtmj8D8!u2L9jVnE=8NSJWKu8bDy%o}0fIX$8K-HSZV*Ra!Bg*W zihOdMsj@<(LfPA;%6|{?rw+*Ne&NiL%ltHxZ~k9(zzvJ&aqqqk_}Bg_M?u-;h#oJ+ zEsyTw#Y`TwwmGr)WMC6-jDtb9dfKk`OD*{1{5vk!Xo2ecDXN8+a745qiQ{KX^=gtA(yL!Z(i7J??*VnmvDI>s#mh4E*f%tyaA8J(u7jhrurI zwM_lF=wbt8X=pT^$oB8^VC&oLeWINhCC(|>aRRbp$k1VEvRRl^Inm=VtoblV=rTwe zkD$VfXR9?bLw^((G?j-YLqN$qQ7>Mh3a@P4XmhhwdQV>}AVk zhCHp$!I{S>3H(gf*h}iCDU2~;^HthVvu*K!EdLVa)}N{_c`S!ZCeEnC9e?64`y0>D9e<|L|Lt2ZcD1SBd~*4nds{8o ztw!n3?fECSgfu&J=eFOM`E_4M@ht7@*!A5bH6o1CcuO>!Pqfweae}E%Adv{QDnZ~d zA{zxJj|u3Vm%w_Tk5V*WWDQXh@?V~BN zI0(I<8Gn+EJ4Y55r3j3urB22O%MxiGF{~qGR2YyZLVER%@6Fsh=Nz<0wCU>TTkV7H zcjjPAC&W`qk|qgDt-pD_t+e$}V&ij~b~=-L_Q$#G>fqlzrrm=+KHI#{{x0)gbJOFV zH*M?ko3FoNEIOY+p}L=Y95r5{W4sMo=PODl3xAlsiD~K9Asx4*6>}sT3g(KF6x{Nb z$sDUgOhxINHUDNjvtVmzd*3&l;kquOwCg|QX|m$ zil*6of|?~Z0%1g}xNu{(eSBond5ue%V4wuI`AcMMe+Tf6FMVmS3*19G(x5XBp``E= z6(K23D39qd)99|2|0-**_N=C^bE`+r_v5Kd^vZ$kVkt6uC}ILxEKtV5 z#AODY(S#FtGl*6018AwJl<2I5hmCunvU;B{vne*tCxthWUY-%tSr`?Dlf&BslXaXt zyiTBi2W)Y1duH2=BfY)5d_5`9$_MSgfg;WctZpkqaMU)hrIF@1(wZPW`+s=Kr!iEO zcdJ?GmwSX4u0YHDRv$CV)tzkpg0_Fw^S^aE*)RsXRt^4h-!s40C1=C zGd^{%!JYnj()qq_&AS=_5j}J=Z8)7*8U=S}X!(c~G9aVpG1CpBX^l6AqcvnPX~!JP z+w*w|&I(_o;6Bc`SU!Kfwtsnq3i_U6wFkpP(q>aDUFY3ib=-ybY&HBzMo4cj5t5Uq zTGEIl^oA|$V6;DijPC4Md8GUG#3m>Cu6h8&C+sYiD-Z}7cLAkq{b$tG-#YxTZCvO> zsDDv6Sgfo&gQ4MnuATKQmaCX`dD9>E*5_*ZkAA;Z7!??{1w?$-aDNzWYA~*0i43TZ z?3$$l;`zL4Cs4WpmZvgFrB8<91?JB=XfYw@agVLYhONE#(}xz5t8baY8=h(|5SVI% zu#2H|X(T;;I23OPu2_q-IzX%VvU#^_@Am~v7cZ=a{mL-leG zd9C5Enqrs81`X}`{(pKDP?(2>+@k~>51lolQ@;fx@Yl<`B&vD$T5P! z=+j~gc%~mbrwTAccXt*$lAICgQb-+(MDCL@D) ziASvn(V!*BR3X-qeD(2nVfp)`i9P{3y@16k?0ODGOYM0`SbeZ+W-pwYEQCpBuRjN# zLMCkAn6lSTf%ZBkTbJ56bnQ8?+7(ph$M~|oo0t+KoyUk|!`L`4R%bw@69y1GPh>F$ zopYkOiOO;0L4WQ+0T##YClpQx7`D^FgVi1Vtk>M~5U>?@L8|(Gwy(huUL7k97yunx z9y*k1-uHi%H~rL2H`@Dx@BC~g=I9zvrg=ep+pjI^YZJ zLQ(~wY0FVIp+)mLB{1P5R@#qLBHDZeIImG__Pnhqw2FqQsc0*{f;OM}CR^|Via_hB zpRCJ?ra_A2ikGh8R#&Y-4F(Eh9gYB3Fx6ch`@ycd4bfwAO8nI1w_3K-AKJbXV)%|G zaJ=R&4uAB#yWiCd-@JQYmoc3x(ALJqy(;)L8ICbo#`D=M_p# zjJ)@M#$=XypN`?N$dTz(~m$o7$|f!J^+IaOdXv@I+Ia=o+PV4+tuNT|kAEPs{Sm)4AK8x{>kq8ydZu-E@0|t}wdw+^ zu|N3CRKWCypU4PI7BjHJ1I)~u?bp<_&CJMfF?>$3qCVNlg2Bt=+3J*>`jD2OI5ZyC zGKi^Qc*q|EJkPhbUUh;KGXe<=550Z~*4DRgAVPM((1*zWj^Ee5xnUQ}XMMeB?SJyB zdF#)q?Kkasq0DDp47q9!dh9Quhha&ymiSzAP#fR&3aFifs8l?~`aS{M+f|pWH92ef0yL*#;;Pi)pLNJFw)%Lh-7~NmTsV4= zKC}GsLZWlu=hmtj0c7iQkHkF^67pBaYIG+tKT^)iVU|Q^_HG{iq5nsec8h=MIBlrZX zl&6pev&skUts*pDQzT!B?W61TNm3%oF`O$CrW@uXnR443WaCQ0wit;N?ZlJcx-HUU z+e^k#F&;)`-o)qn=%2IomSiqZ8kr#a%o8IA?|h0O3xYyLII(_QQjGH@l1}-}Hx#bz zqweFo?)rnS@qm8!c(cCe_a+*A?4w-1 zQVqP%Hw(Ze&pAgj?H2Y=f8B-7!)>uwsd z2{!n<)_X!6a$?tZPoP4~ZxXDRVgcp-^+qmu7Z%amxr zNMV*3PMbx`IfdpO1ErXV2;Lm*w@pP%RU_@@eBwrfT82pWwFA-NbgygM=V`g+!FfBr zKwRUa)zgwrho4c~YJX_+qkqg9$o%Vmd*69chh260d;dEY7C*7SM!z(M&X^rE?KE^` z$lXZE*Qgk0zRmwvDh)b9rm&&?K1iE? z0vecb1Z-^m=dss+cAVnnn{S-SAD(2#N8MxJctdv6t^MYWZhv{+cMUW#{1UZ1%$730F{H?b9Z69af&DZf^myM9FIMo(o>a++fxQr>hQ~@<^k3AK(XK@i? z?JOsx#{U;*8h=?m69Dz$BZIK(&oj8MFMjfd>l*w%yVh<;mW%88?3*S*=4d4_n5$i) zAh-8)ZL|%hIIFduOX%mSoriaeqmC=Rfb){xg1WJ&r=0p_z;W zl(=Yc`WVB8mtqL17kcHZ{cErw zM%wss|MyIVU)oCaHwPwms`FL8pClCaOGz_EST$g+^U9plDJ{`` zJWX7k5UXGPkN3@2xf81Vo$s={&`7(FYftA5=!eI7E2NyeOI#X%KF@#opnSzkbBYsP zrhmJ-_2tzEwL|pe=>5ct+1maOTW@X0cTnVZU1}g*f6Bh9_gdF!A4rAu$~v(>KC z1pb9z%13cxIX8abxBdOb{d2ULwef`2D^9avSZ1%t5pPISNwGp0*AWFBw?96X_?09C z_&7`)H#Av7%!=_D9Z#`w%Y#`npmA*UF@G35m4nakGd+QQgXVL~1q4K>6<*sokf2_d zWs{zTv3AWm9m*ehp&I?1+3Z$74})7DoBUAc#y?Yno6WFNGXD1e3=za zN{yYz(uDrSrTAV<72xAXMb0Pgg*)M z8keaeemAtJ_&{&i&6Rg8V&bMJZ_(cAT_Sq0kNbFrULu$X=+fawVx2sc9(! z>wiA6wx8c6RR1$$!qk3rk$=8EIBSgHmJiP?o;3jP>xz$0Iy}M9E63k!gpj_@7c=}A zY5orlQO8SJd&IrIR%!JSkSeo0I@KWoZMofr04b*g=?v+&<7akvq#_*$xpja)Z$kkWAtHce}?Ukb$>eh;yu69>p;r< zFJ3USTm|`u#@PJQAl^8Qe&&sx8FGKMyWfwamY_A1)w`caj?6GkAY(|=2+m`<(K4k* zB7q;~SgVwp6r&bin9d?KucsK?Lxc^&Ef2WK0x(s{APdvInIPjK3cCTo{QgyE2{rtx z@5?_1?_-XP$BR#WM1Mx3+3VXT5d(r9%xcie68zl^EANXE$cBXyL?z(=VbiS zk9WAp72o>4*M6U`@#VWd>UvfFg_f}Apz~i``?n6WdtLYVRp-ExWk7~Wh)p(0&Xh)H z_i)IU&mpqsaEfw!4i~@myx)Ahoj3J2XIUuQ^Z37?)pV_= z1+H@n4MmX0#DBQ20TM|{8$XiO6q(a}Sx}DC=Y3R~j9PA2nR`#GXo3WuGj|$XGDdMV zl!Y@VMNKUxhqQ6U`SbqMjQe}vjscdz87DC7xm&e^)A57tjDQ(E_Vtx@AXa%Zw)0G^ zK9gSgrBOGdrNiT{z1!b&Hgc!S{UGyMDvmb|ACUaYAAfd#f46>c4jG~eBb*B}E%XON z%D4UG6uhqwV|gczkmQfDr7KcsKeV;zTSG8pApx4Pq4{VGB^cGYLV!9h%yM>G1$OQV zEHVO8d8FM)+|7~kEnjxFd|B6@ah6@Zh%7#Q_1#^)dba#~m#aDPt+!xz(;@b_%?*qE z*00%^^?y^pESQbc+B>5n5HHffosL4bG?a~pbn|9~2c4hSb|6rNnaLE=0+XHIGsqao zx6c<-Atc{uX#0Kc@)+&J2)Xr~vnm2fw~@@&sC2djy^TtlYnIwMOPL^25NPdZk&k?> zR}+3-MW_yal9;ONTd{oOVC+NC@r_;Hf_naJVSi@`(LOg#4yg7))&Y^*2crE0yxQ48 zX4K7R+S)lf+`j&?^UK%x+3^K-9z&a@*BH9!@Z6ij^9Jd6YI8Z)=}uSMfwlXp(tOVFnXg@C`C1 zf4m3EbK94Vy>P3wJ#=7uFK8^qTnoOOEVwjF4j%pmE43xJTd8h1*Y|x zo-bvpw*FcF^LbQkqx1MkXr<6f6%iRH7>(ak8-q2){Bl}jT)%p4J@b%GeSfX7&?1y+ z{h1=MjLxd?%V*9YJ57;HC37U%)qC3Nj21439VfV=?@^^^{+WJE&)suBrzl-PCC(~s zB9Ji_bN$iz0pPGS{2f-(;@FYJEpeolRO%}pA%AAXoI74=bk$ehYO=%fOjjPJg_l&? z6wM5z#_TM1tj%_CkPChm3x9SN9!|6t4H4QQkvGnh-VZ_rp!Cy2ci_?d_kIL{@SO7j7NC0HGU^*>vLgAmO#c?@X8fq;`%&&#&1UHk-4QE*Rtmy?(c|5qU!G zy`#`@k3ePXo9kSDT$*`u%b~s!TVx?#s@qLv8JYoxd#MesCG% zq|+y4cD-jv-}FBDS%2)Zv-lOCEicy1#p}OYAL#t_B)@tKLlYm16RVpkHnQY)R-Tjj zp3X69jZvOEMc_}DoLgK;V3XrPG@cM0qwafiFF=~vqRJAxPTT4D1k0DfW5)-5&ud)T zx>o#}|He&jIfctlu05U3K`N5ENx-!sQV~APSGaT%;^eK>(0{Pfx}NV|e{m$5Tm0SXa?)&H31o`Elj%LGDl8IHW5iGLG7P#N-n0k>h&lveQme%^ z6;ChEfk?VOz<(Ob3=aw+@wwu#%J(=C3`z0j$wh_}+ha?ObZkebd$2uPv~(%FW(d)M z2(Y5A~Kx3p=$F)}+k-omip?F(U zM|Y0dE8f-%&HKKM0fQ684;oq^F3co^Cuff*7qLNI-v;eC^e%@pzveUjg+HTG9J~0{ zVO8>v9)Az?qmx-Mo0lLigL7V5UD|iQhv5b#LSxVgNIO3=+d#6K+IdLyVgJsOcBL*F zab$A5PzlJshhuSQVG3FbCoU!BYyaVK+s*afK2Z+NXFQ8HK7=vI{8bpmu+BVB)}N3h zY2J?Ou-LbCLa7+)Ii$`B&(8@~yzkeb`UT;-nt#o`xB0@H23wa5sxg{S6Ag5S+4$a! z?mPU`=3|@uyYD$jI^FjDpImN-VGTPh>9+UR_V!{((#ixS8IQk#8fN&s-DGK${{hX@(Mlk zOoKt@2&HE0mHWF%d7Uw?DM?5%V_JQ1v%R~YK5hCKZs&(B2hTFRx05*(pjWGe#2=f} zYW|jn(zWx$QvQ2>Seq@c+PyimKJ{DU^M5^qa>b^x8E2Jul}rXXd7B>V4!5*XXg@ z{cm1HSe+kqd6pAgl>Zy!i0kIl*IJ}Axn;EZ@Tk+wh7AHUyL*^BTOWgCaSdqgobXIc zQn~in2~hi{Cqs4eNb|gI^{eH@L*)3ZZ9J5KXR&NBRKnU>OwL)!A;+uH_U;iX1b@02 zWZI<>=1vMXEnu~^J?4H|H1mXG1m_@><}%HxRC06Tw))6VjX4=nDaR<)^JCd(#{=+2ftggpz zwW3hrUf1*MGXVNLmiF_XtS&46^?w-xSFQZaJ@`ABz&X<*;bibtPvB^#WH6$4ID3r| zB|!--)dD%ixE4}RIL-`?^EEO$!$NLDLOChnAfOu<2LAMJY3{` zfbRUAowsMPz`lpL_?35?yu(DhI;!W1f1g3?AD!>HMD|Qv@_Vox~h?&qKS`s+;hYY)D||4(|A_4t3IPvMZ* zIg!uZSn3z>Z9Y7&mx=D`UFdt4cYn-;JZHap4ZnLv`#Fk<^~3UTt(AWIfx6r$>w4F& zGdS#X%HS-1wC=r6xr3b=3PW%GZMS;u4^6AR-oRKWzKZxxtyja<6Mr=nkxFb?b|NS`T^BNn~%a62^7|N zd0q~32AohGJiRI5hAFrF6PZ*l)Ni7Vm->b11EZK@Gw!_$eE8*mIX|QpE{Q_%I(0-< zwa**YSH7(t^9Vdcj&S(Zb;I#7Gaf!~2g`c1<9n*JT-NK8et*{gP0x3|pKE!}Z*0zc z;4k+fRc?EaFFNd_G{Y~_dBO!UN#Y9E!xu2Q%O~*UH48O_IhnJ5H^b_Re%Pg^8I9sX zTCE?)X3pK+jAlO}GZAg%LL_?=Q1rVLjd>SP=WPO4`UQh5jxm#eCN@mI>cPW#TQ^^M4;X_Jra61MCFouh=F z_8w1lwD4Bf#FgFWuAVDx&HXaA{HovWk$Ful2UI;LBiiNus=xF|AJ1kVSc@T~Ub*1? z(9YA{JspgPYv(77i#NY1<}PH6IhBOD@mxU8Q=1?Ee}Aq2FSH<#b6P^n@n-7u`z$x6 zPbJ1xrtpK+;@~2Ka8*2T;GCI=#c_WNTUm~qtfVTNk~MGKvOVXbpGKKQajH?`iY4y=xD| z4WkF^L9v=@UBYlB9y#K7Zu*c%@3eEL?irZjODuf6Whb+WL8Z<@U7s(0||cm={(NlzhZi8s~SE-jkRD#xh)RVunGy zmtNg;xHvWD+McUJ;efGci6c=ivNm(qWDF+K-E{JP?Emxt<>Q~IohW!G)0C*-6@QH1 zEApQKoa5@dnmT%Db&e4(GVp=1)raLRA1z1ocfQ{JO&>XqS*s0O$tx?P{Dm!t|Cia^ z)X{1&x}wSciJx)jfSD37H11`JY8|EPN(&Yh{+hqKlh226ysyLYo=oP;j1aFZ;_9R9 z)xUBOG~a{a!^KAa#2MeE)BTLG>VJ^+CuPdMCY{~eQmn$IShYR`_E~?==uyE#s~^1h zHXP3NCSLZrEc3%}%(O$4lMB*!w4X{E_Z)<69|zXK0kFoC77SE z=Yd#fs^aT|D|sV)f*dC$6OWb!7>%5FV)@3<@_Q1DPmb~wH?*~s+x(3C%bIjK<694d zoBYaI1J<4;;q=_L%*ZR}Wt)Xe^#f3ga1^VoSYdr%%%j6I@teAoP$+IBi4y7fM}b~K z%7^{NGbZhP<>`NkXEmZmC3Br8;+Rovq$IKM$64R?KeI8YheLE2L_F(%Q=?o^8Sz*7 zD)I+O{m2Ee+^#d{YL$JiXpf^;66UWMPKa>c@~lXP{Ri}Ov6y7fQ3$~P;MJ{*vxnVp ztfS%>C4Kgn96Mc1++X>#sExgF?b=>;e&QV8)&^w^x}Se^zUt&U4uUX8-%U!ao4eC- zrN`)7L6jLYW5+FlB){Q+Jw$I!HD=A`DOLT`{`zxQ@fh>*q04mlIh(LCb0WaC?_hbO zVv4OqRG38PDcAE*QuI`F=R`>kzl>)&2mS8J*l1Din^@O$s!sdyWbP%pCK7c-U)3<- z3l^geb@_i3S>B3i>r)?wSf84->7RXJZtB!$?ocs9jr35<3ujNT3xmNs`8?iVH6NCD zJRz$y*WuI$|4Ur*>wRs8~*e3a{hn6O#kG>@Q51!nUu)76qF`)KH3d> zo~~H;xj?QbxyCo#rO|Rm554uM)E^Z2)@9{Eqv!ItuXU;EFmZmH^W}>_l1DFY^Fs(5 zfR79#I&F1P?YP@|K$ObaDSu*fFj3N{znvP-@5#=qw` zUxNS^}o zV#S!dP3RgysnSaUwLd@9AM11OWkuF;~s7G)?$yZqxU-M(W5v2c` z^c(@1b5P~}P0*}2KRU}GMm9h85e~kX&Rl=c!DNEE1f@bh74@vg@5Aytiby<>S$`F< zML9d@#@CMD>ihuiz&}Fl{OX&P;=b=6?~plhdEI#T*RLDzF0bQ2-VlBIsZV}yeR{K= z|1##6b*Dj%hdj9ZeB~wigoBX7xJG?W*m~VT=B)(zCRG(QgD-9#yHr^g7tkTuiE=54)(s;8kOkX`XM19TTrZuZ&=CG+0S}@oa$ehW6wl( z9OPvje2ttG8*_Czm+bWm9DimS`*ua>D*eSvnNz4j9#gJ`kIYLc;OM5 z43(Z>-GRYZ!k|D7_bMbh@nW5NRIf0OAjV9CGM5CIqu2X!x@XE~FeJmq+>c|dZ)KFl zc}g4biMD=o1hnZyjWhM)YU-!OfFna(i6JZN)YZ=EHBKjLj(79JV1F?>=oezD$UUeT zQiEBkyAaUbpTmp}{h zsRZ}P`wvllDyfU-#j_q*UH-m8j)6)7X zoaWoQeV!L<51bJ)%V~-Iv-?kho_okhG>TF%KOc|#h^gQyC^Nwz;|yyLW{-U`(@=5S zCqs3WhQP5lV&h4Pb^X;I``zD=?qz+vPrU}gs_ogaYhtX>JC}v`3m1Rx$+vn8`qn?Z zKo9j*aG1YvmkVB+3D(Yu(qwJ8^xURk4I>b%bex%crDZTI1);Dnq(VPZ2LsPW5IGNy zA{-ir$yf+=l)hMj%i@7_9dut373cw#rV4%Z{#M*0gs0Th!PE%Bpbm*kB&1F+BE?wV z_ZwID!*MEy1o}bf6fu8I1{+SAhZqL4krAUzqwd(eaxgG7svFRH#~a_;QD`4A4KlfSx0d%PZ~hk z^l7WvzSUaTSo7?=jK1WaI4&M0{9zca_loKLQldAg<^kq;NV9*@L#jVUOuf3XE?ina zQs57=f`F(jOnvlb3M!@wIy^u`D5T_&m(=8hC6fhBodGTLqe!b@#301}Gtv|xTjj)0 z^{YtsAAV9iP?K{?^F1}CFIL4d!9E4;KV>`R1lc~NrN}7qz;rHI3Yp8Z0YeY>OzufD z5yM06Q)B#Tmc@VXN)5dPz0qLqnyU1gTDkH5HEsZFPp@9$_#yTeG;Qs`Z}eLjJ0VR5 zn#W>HNwyM=j+ri95pyTRJ{8QD!m{%OUWE7A`j7)uc3(c}FvMAhf$JCc%wJ=u!>@6M zJ&WD=lp)JeUieC_M&^_!(e9iQh$m?`cac*=VzUE1Wun#1URx zkKUj5fvdzA{Sn<&RtBb6LTOf@HZ=Zt_6Tt0sV!T@=!->I=b&j}IVc0jH30HyEvyv1`{6zkl(lnDVoa+G+o`^Y?gO zNAiDLwsE_!<2m-2t=$vr=Cqz!&uV+GPqCC`8I;H~Ku@9k)C=9-zop3pF;1Cc{X*x= zJ(#zE!mAIcgEWPYx)>sc%=WblKS)FXYni7sD_tW;OvS+xWJ*voSUZ&D`wYE!1!ygd z^gNWtZ!q81?+KDGKxf{o;sH;;c)-aG=dODs9fCR|OK^X6;Q5r_9r z8!tmP8+Q4wc>*2kM#uw4@0u@P>z8UZ&{SoduIjOf^-LK38m@{3C9W?Tfk`07jN#VX zUHf;>mg7@_V7itIUAp}sBm;a=X4K#V-(Oq@7_p$YH7FOXJwuuLM&XI3|o6yMOUV>S=r zYlX4&IY+lT&6zd1<(A*nB{4>}8Y6S_vn${CI^QnZzpIYz+%IguM%@9$9{8F=GaxjU zV@@g;`@ciTJoLCHu?#Uc%uI%SH9~)_R*zF31m&z6vzQB_H4h*%CHld|=-ED4@|7t` zT6{43_zNY9k5+!`7x`tsNHJ)iocb^#H+j^v`JvxgCLEXJ6zSFfJyHawZ z6xOjQ#PLI>_KcVTA-bUHPNwJ&2HL(Rg99n)kF#RBPw(}+7nFtVkLZ7NGPVc5gL-|y ze*Ob*d+n&LMV))!@y|IvZh3;IxWIba|8)HT^-Zyo#0W;od>ca*#9lrnMp0tSgp|+w zjv9Y?y67fH_`&{tYO=s36ZR4p=uJO+$>fBiN1l<><;2!+;?A)medGnP41i|1pQ5$` z&&q8K(kQI4dDpptECYYFMMj1F{5XFat}y@LmPOpgBmVq4-d}re=Y`{e>Y7yjKHkD( z%J}5)FqUqf6394WI??m9aBOcVVWuE#;y$>jL)TbC%(x3u=c*>J#{EHFKl^c3w92=q zzVG|XxN}=W?OkF8UHBqtbE&62>3453?-FOM|LOU_Jc;xeIn96f_lHjb`GldjtI^YW zr2?A8TUTRhyO6HRm#^dQ3VHxoK&QWjCS`{Al0q45Vl!a8T5QnMZJ=t7_h-?SEc2Zy zn7vD5HBr54hH7=Uw_$E9L!$P~-F4O9JX>DNYxeI3;IF|b$gPWpX!$Vc%ep>!44-4B ze|VlQUKIafj-1(#?H{p!jcASxBCQjCBczgByUZIKNb61{PhD*R#9Kfs&$WiBmvKOun3soaO^0c-HT z&85$0XrJ4^oP$(L&YiBq%^!1+cCOF+qH}cE9)`-+!^k<`*yfafB6jOrEDyiLs0T51 z4czL4iJfa_@9(u@E>ytW&y{gUjam;NJ% z9rWq?fMa}k(>IBK>w!A6c(RL@DOLCF+6I?1K0Z6qI$vLOjymvM$VVfdjYaC*QJo`$ z;A_TIT5IZI2<6GBE9M=4C4?9E!|8J}!+E>UAp zyqfU4SZXf>BO;2m{PBa+srxQyoWlj)|20K@t45uFMn>t{&H?x>{&8&n<=VIR-|fjU z#-`}Og|Dv|2OWYMR`H>jQ{dU(um4T~s!p(9`$O$Rmvx6xwl_m;T~U9()+a|Nqq^R% zKK7UEA+(Rz({1Cqd@bVBd6~*{+kY?_(=lmuK74d_z!w@4by1TE913Pg-9OlM297DO-N%5a9y5YZ~uU0~QFdON?FhFLqm&wVB*4%4rko_kO^ z%gtci8B5nU3mHJppae;D)+zA}r2x-DIfd+%I1eRP$|LZYWtj7}eX*D8o8p>sb3ZU@ zrm}wK*z02VF&!RVFdj*po{K@5Sq^rpKLXP~W@X{RQVj{F1HcfXGG*iN!~8JT&P^Q6 zN63de#nIQgdAknTSjNW7J?Yc_en3<%?HHDeM0Spcr?~csDMrMdCQN0R>+KYY^=m)H zi{Y2<0}L8}gGGrtOX+t4XT-D|$*!1@Wsm>k*uVd$@wmTxHQKX%!SUWtw>(BMID3z= zZg=)0t>FnMYtQYQzdq-o_s*%G4)gPuyyMlI-tGe}r>*q0WP9j5?^JyhCd;TPcv25O zh8pX)@7rC{$LBmDi9AvrteJVb`yM~`>DuK4^7{yXYFZ(~#Ej%7P=cTJz;UJDJsXo@fW zg7}B^=}Cvx){)dXw_Y&b{yWBTIq6&218|H>ksme$WqM;gzvzO~?uvPB42b)(>-8fv zdPjeM)krZVK~<9JEC|Xzg|gmIrIkBML3TcM`#jufwg+LkwLm0354+1cUfyum&OY7OGY!t+ zuI*hvaYup!&TTz{tqkQsy66K4dZOMFD0u9D;=M(NPG;}zPjA6Wplek(F(@xOs<1PY z3MTuAn7S9Y=NCpEo?WI3mqYeay88f(=I49LOeTV+&!EKS{tT~Y**W&9GGV{f*Kzd3 zPOsi~Z7#^(*Yo)T>l5qC`7^aOLp)=|60f_>i#NOnb1tdkGU5mb!_l+6?#&C z4bd8yU~N-lpHbm+K%Hi_@wK4dDi4SWdu;hM>g)$YnE>V)CDHkQH_c-{eOU}4_ZT=n zC4&Psynz(uNy;TA#rglhQkM|O9;7mKebQfupUw^jIc1s%Ky<>d=Tod@cdvO?`38S@ z#`P(&=eSmXQtcJ8v2ZCh|W=uB>V$-*!^#RdH9_t z1C?Y1hzG7cr`*Bu(X_e9>_*>~uWmVn_PJ@-!MHuIwOWam_l)mj z|DVq>_G)W6_gwK6)?rcmK$=H4(}oh0#p4PIcF({S`U#omsHv97X6I1MWLYmpsgZ!s zO_d#qDUNZBXEhclXAs<&`tuH``-Fx+WAVR z5kW4JNSG47m-HuM?L6~RJdb1NW!UJ;>;0AE!bZEpxPN^;eV(eMcqE{D!t9=>y4sDj z;?YV5@L~5&K%M^3>X&M(Q|C9m{vx$KgQvX(tT$<(fkYH0yb+Q@?ZUx->>-6uS{4ye z{iO3_7>Sh&ou_A*-`@>H`2-&vxgl&Zi;v!MuR&Fm>-2eo9@R$C_s-KDDF1G!T+2fV zv2SvnBS+*O`K;|Z{CC~mrOtNon7jI#oa(a2;h}K~N;xO+9LT2f z8RcHqlheq3kMs4!^O_ug_lQ~I#FRJ1o6AKEO+5A*vsFaC2gO%R)ZQeq%G@%j7|f$# zBU7&Zfg6jErE~0kC9MCo8+F5OR_!-o0&5!YwdXDPSUa3F3r@STR~+OpAjTK*eskDX z3R2jL%|eLDfUwLd$7yxtnk*IetzrC@w0+4%Y|jP<8)L;HIsJ`)an%dQbJx&`_=suG z>(qM0wA=6OYPyMQpThF_1+oAX=1}>v<(3Rkf4A#A333F>H^TNAo+7S8ojd2q4!`0R&j`F*$!TK`aX^)SF7s;zO6Mls0tSfddHb!FfTsWEe& z=q+#hy*N=T2Fk^MIy2EaqGpdKEL*>=V(M(W+>DlqzW=)SyFZsd!kN`@d) zIKoQLU^3QkzBK$z1=YG!z>D8>UvpF3xhWNqLT|m{UBBgjCx3UI2MihFr#^H*9+rrI zdG1_%?%Yxw`Ks^2SM7E_97dA93!TR=sDJJ#3?E=w(|1q|^&_3E?a_hHa)f%>4(814 zIgMnUXH8=x3fmvV?70S6@@F3y|61$GPI2;;2YY(W`n;DIG!iRY+C+!)z=5t zIr)LXxKk;Ak8i&MulHOt^<2KjWwC06d$)UgW59i;>Ht3*@42oA4z*_??c}ki6nfWF zj#lt*hJ@?CV4@0gq zmp{B$q&F{_>DI4Jtp&+uah!XdO5yV|AZ758lwUu8FZekDKi9+Eq$xeO+oRN8%RWHO zIMaI-ZA5O@U|moamu}u*{hbH++)nV^^_k;24`bopg(omkBd9WD1w7$d=X@?4jh|oO zOOR)P<@Gtod%`dG=7w$7{pFe8Ib+s&nnF^yf2+*H#!2{jnqc2%*m;`3LbmLI@zW1J z^VNNSz9A|fl3+Tyu#4>HKyIFc+`O?pFJes>eGK8En**2^E<28z#3j9aV*am(sr#8J$bHWr?{S6b9}^q6w_OFL+}gNEN?xt%!#Flb@Ib6zCzOn z<>biPVzG=!YF<5)do0dEsKFa<>VWHkAP_k$mLt~u_L^kk`kDClph}q4dGoijgp?{y zIGA{8avnA^gGRqlEuNvw_f+-At`c-2a>dkO%wo4gwEA?3WFDzQi1~_@gDHb*#2UXK>8Ef-0aPgP)KO zAF>%Aj9d~I%=BekzexV9B&lDdOU9?ZhI@E5|; z`#^M5x1P5+AI^Pw-i@Ze<)og@szal$+KEP8`rFSn@e`xAKX-%81O_=deCck}Hx&u$Ep#-q9Y8EVDCTw$Nkvh%D) z27N0613mOZ0wQmErf%Y|TaRCV>#z2UcEmmTS)P0=+MZ9LfAO(9bB(td+2rHNd>pLQ zh&YoZJQStBewP^=y!zdF^!xW=KZkTfXmsRHQ8Un#Kix%{D@QA5>aLYDS{F+JrH71| zIE~3T9{h0gz7{bI-TS#rciDud&T`nuFh6ibn?)~WyX8D{!IKOMFctzq24o4dqH$Yvfv@??DxcSV*O=@7`jAGEHWKktf9wk-!`A4wwH%g zIsTpMlLzf@nd%H8Tqi_-+9Ajq`0k@a;rfzaZ*q0kz6Gm1=Im5!Z1O3O!H>@Ox1=tK zQTrz-(MAyDHx(Ij8C>l4!yY}NwZxzIp+6*J(<8W^t6+f4;=aBfqKNC8u-NcclXPu87YNJlpbQbI~Gri>Doys$`N%=LdDnTI@6;L)|aji z3>JSzCL1%T_|%tgYnQ~r>wGQm z?$NK?a3TzU|NV1>r~4hHs@twB`Heqhdq%c-H8|xbZh9D}mY1stjR)JetV~MEU&%Q> z)AiEt)sIW3SP{)9Y(H4a9N8Xx7ts1JX7YbMRIkxN4Wd}_EKUZcSS`XJ>AQ!4{_Xzh zQ+_DBdOV*Qt?l{XoHLr;aO^RD>p?XbQp+N)mcgv+rFFjYGI`aFXnM542+XXGyIsTlc1^m8Cy#%( zo+kNO_kM5vc~8Ay=bkXJ%I|%u(XRnDOvuF!S;He9>xs`RmnLS$VpIDhkFoB_Vo0c| zun8y;<-Q6I4Kh9omxvdvYg5fb3VAL*?!4k9Se9x)hOx8w<29=K6z|LD!KU7MMh@Ef zQb4JO2}}ndjaBE|F!AK->Ps%X^Ll@M@}N6iuhko0J?2u?gS{FN(A^;J7nGRkurfW5 z*1KUQ6;+Zl=o8BOPgL(yM(Um*{r2Znyb4p1*8@*n7K|}o;#~f)vT}m5(*A)DiSv%R zC`QWui+31|ytqo^L`kkr>%yZ0_d4)_X6N|vwjb}j54ZKSKl<;AZPYY0Q7V574^=>D zow?<{96JT4tNlAw>ce^|t@Su}s+=d!QJPdQY{EZK%fjGc7neF7Ru99ltfMTcGn7MN zJp!nrj`kJMJw)m}&UQR+HQ_B=x*g+|5p>QKu4T(@VmWE!eajr$;#jLS`W4hG|1ftI z6-!^MUTSchAF>$gRTw+dh{J!%Z~8o{`kaotj~s$W4whF+95h&n@!1}9rRvKh&EG^; zt|@{jscx8VM5OmwXRF6V6%hlC{T&$3O|efk#l9@oM-JKlnfa5R&$@*xl6->K>uc+! zQ3iU*nN}i7vNsfm`uD-VA@)H#~LbLKAKfAD`vzZu}f44B@3yQ-d3#pnJ} zEqraJLk?gWE9F|$=sfu{9)jMD;`Cj_n}-)>_KNu*J}Zkk6JPg?zz~$A!~`$!COo#q zzIGAkrbaHeOEC7!^wMpLWrFqN<&$vBe3^g~P%vSRy$ofrqR3P=8EO(p0xfR^1fEaA z5}-zZ`pv2D=Td(Q3`+D>Cu{E zUdA{>3f$eJw=Zg=jg3#qcBOK61uiygi&DJ^)A9uZygv)TxHQK8u~Xg4uGb>TvrAq> z6}R7SKUc$^%Mwocj2^=p)>{efXM43ZH_24t8q(%XKJ|aV#aa(VAg3m2=RT)jIvAiY zL}2T~r+!P!89_01&8f%mAUA>42cZrUeqDRtD-TRHB*slTUw2g@umGeO*ROZ|@zWdL z59HVm$M?WYz;M0&Hhuc0MZ_T|L4PxfGEWfX!|G71&{G|K0npqb7DIYjYk?x1b%C7d zox%HAUiyEHwBn~+mrD}a=Db(geURko?PtwQk5G`? z&-kM9Xq8vQ=I-mQ0E~h4R4jf5ekZAKKAV8i`0g3&)1y=WF(7E7UDql*cI_GG-d)Sn zUd+KY|K>G8hkW_FH8m3OVb1FYg`{_x{CeT7AJ&AM3UEn4JA+^Rp*r z(z*9o`PX&a_Oha%!7;Lbr1dtCr+=Tm{)eBP|2?3nR}0erx4$oHghzFsa;wuE-aYkK zJ~4kfCEBHW-EUc|*2jWh|NZ-)zx!GHhc-X=9c1mw`1jxUCjKw}J}MgJlFri-pGBfG zzHv6#7cR(Z;){QNpUawjz6SVS9#P3O$!gAx_JXWf{oD5f<$gn4Fk<<3 zX{U?2?s5--0w5P1uk}9Kp86@yXAxVYvoU{fbJ4ueuM!goboO>9b_9DN>4K$8G6{^P zRGEa*=iyJ+@!Z3_FQAA$vU0}^xy#7$5rj15Zg_OY4CEv|mPOELN*X?RJ@GTLP6zpW z)H7huqUU~EVJ>x__8j^PC*~TL*jmCmr?o~lz1nT2?7`KUazXzjPtOZd)~h$l1;>9} zfEFU?&YHwgp{x0bntF)(FP9~s9x2Bo*X%qH0Xu$u3q6>E$Xn(^@`G?fy6!i!9BF0l zMeJmO_*gP^H{Yqn26<$^)jTlYmc4&YKzL0p#ywszgWfr9%>&d2%;Mra&N+N$)Cc%F z_w%;7hO0j0)CYfmJ0~vvpcfrsvCYi&^N@J-GKW>MRfA~vhz#>W3$_>{e~?!Dd(oKF zH3+QV61sa{cs!rYa`Y2GZBK=aXF{Q03}Ro>Q0 zEWhy-S6~jyD=@_lt&ffPwI^<2CD-lQbN`5Zr1l==_m@7#b6>(Czt(mp^7@%omTtF7 z?1E~{=7SU7?TRn~C8kdmp2JZ6MjET+3@Sqb9&X*t=3?6^Fk>;m)ZqG~SJTv-J;PZLwQ{Mfr z@}f<`+<8S`1s-w;rWnt?n?8L7gT|nuqYY}+1n52Co&Ev)=J4zG zoJ!yt7lY+xbL)AM&$!CZ`D*+u>^8PkfQ;Efpk83F`wG_M=Y#2wVN)p9>Ft;fUR!UN z`zh5D^9(L5(`WSHxy0na!7|u44xMO3vB7`6?KNz*U}qfzeqGL0F+~0p#<2uy?01m# zr*D|)6ApXoIlbxdEof+?4R}w%kqD6 zdF#`TT$0V1oyQZDV(~LajzkHM+cW#iY~Bx}`4lMgAKoMj;)ItaMIN`|?mh7(WS!LA=xh>_ZiF{A2?__r{kz8w?nKE^?gC>^I!6-WNMvcQ-i*x!m-P z+_)m~IC{m6VaxG)lwX?JOvoWryXPLd;+e&^(*OC|miBst%lls&iVgJM_ZatR zw7UQOJ_-$cV67hYAFk)(ui#n@X6^=U|N6}8Gp&zg;al%^ogpjNYRuffyT-R>_)5%d zlKLcn_c)_JPotcQZ9+|VW?Ys~!;-TB1)a8;o6 zZrAGgg<0vm;HUxGlkm=0~t#`+YQHHKDuv@ zUt+fN$B@MCKU`KVmrO4HufEtjzfj6HlYURM(_lf7T{_6{mfX1?2 z7e~(4L!AWW@8LllTvd>VX|S62)s$^l6JHIgAYaVMvGy0DrX-%EX-{`XoV*~5&7DN! z7pkgcy$L$s=52_4d|H?2|IJQyv{N5=570W871q0jr)}@2h;a{fmzORb3@#KxhV^#) z-fp`;8h+1#k2+JkZjHXgglvyE@O zDj(G;zeaYJcaQmH$M3w`{Bj7WZ&3aR5NYen{CMhvJ)K`pYZbH-Gz)`*oVgO z2h^(pEB@dz_%XS#eaP~!-5*tLQ|9^r%w&H*Pd4FS&!Ao)T`IEcuFH#ZmkG8F=uL zq5e;u`qSDN$)34n8?Qh1IrEymJ?D5tx9k1zjbX26sXh2<+Pwt*$MF4>)f2|p{&@>Tj`~ODZU@G?}zKX-7henS)F~9D{28%QxkCRCQ_8lOD<7IY)M%c9Nf0C%M%9^~fNv z)Nk_?XRfvWh5z}hACcWdsIDYk-zT6xo^+%VrWt(%_`rFDF*csz?e9PQR;R~29m}?t+U8Nt z&h`B#!%47uh$z0V;cQx-b0CvUL6Lzeb8ZA#ZmR-x^RwV3O=(V%ZwZRu75Y&HN9PUH zyQ9q{T2QPrR1sl~bB>of#*n!hzJH_d7O+zfL2Rh2^QlWtj827Iy^A)q8?3VdtnwZ* zHcovB-4jRQ*!p|o`^GKY3{RH<*(!>3dXgp_OymGr2UNjr0e*0~3$Z-E$aVC2Ksa<0CJ|Ua}xiuyN{p84_m|1 zE`>=JQRtJ-+|W7R2lCmuGyWWw73L?fpJH?3NLx$|q>=?~G>5i+H+aYT-NC@(#DVBtbC!Re zg)Zz_&7B*as(-^9JJ;BeWw%%BZZ`H0TdqVNd@RiHs6T!8Z7v-#hPYTG=E(`FAWR1D ziw?-hZcG5i^2*#rZV+^2ToYHB5G+(g)+RUNp>S&M>GR{FnF7yZPht zn;Sm#Kg)qQR1b0*zF^-buP0hJs(pU%`_wVR+|GM?Vji^pU+W83w!P2oHMZaXMW=J> zz3=_OV%Nh|Wy*oAQK@cqU5Wb5!Ib0U4L;PT8Gr^57|aDweE+W2xOXWXl&ck1@?`< z!Iwnc+TJ#g5UV5VUGyZU{p8$_^UE54zD9b9DQ@|bvtN5C)K#ytIP?ZGR`MyDvdJP!8O^CaO8?-Gr1n@hJUO4eL^6PxA~YH@JxqXI5QQ>993Kg z<4qAj% zPJcSvzk7Q4O@5w2QhrgX^W4+hm$lh@FssR%5q@VAFWr)hupX_G?tXyJzgiAT$P{*R zwfbmcpK)*kgWT(r*F}JUC#C zr98TP+Jvjh>m*r324CrGR*?6{e8)|{9e<|-C#f2y;5v3w3sNz2)yZ=L+TL5N&7c>^ zxfeG$e-`U6(>^!2*0gzyGjpT3J@ZV^r@}JJus(R=hugLJ{JXHPQ@6d4Jk3dr+A1Ry zGit~%wwkC9^l)MdJq)-@^gvk8QBy@YFPYKEkwvaKZ+p<(EY#fYO~|)`Mq-_uS$|Jd zCDU=^tj67VNhXSd$-I~&YXg}O{HQ35SK9iR5-7Pd#`n;=Myqu6wtauhDd=2;R7lja zkm(#fustwho^FL^y>iDCryhS^5i`lqlazeCZAnv*RPQ6V^LP+#U!Ohyr1rd$VZ3@} z7-@lAW*o-;%egWlauTt&?nm8ZEn*G%o0AjMBV}H(ulgdav z-vsxP&|Shv)^!4q4Da*USEF@b)m4|fIvsF!x?YBv$nVPPc=@sA=Gj=j)ePdi&fNIY z`|4XQ`}GX-SLcB~0gepcAYD0IOym>Fzf;Uvfe_P}vzv1O?HNw#YCepe*a=cyKtqf+ zStoML7CB1{)N|2H18x2){(rx4dE94(97D@BJNp;^OV-fahGbYX|LFV;`wl&r5$ZJ| z<-F>n{EIw(pCbwGa96>NR>|0VRAD+;VFSPso@PVkBdo{eZ6Jn#5{IOdPHc z^*7bIN4GDFq0Z6Q!YLI;84{_^T5keBPt(q)4rQh|Sr^Wy;x3qi|F`@aOpBd)Cucqn zS@qACLn;g-Ywi)(fzEz>XG$265&su_m;I8xS7V~|&exKna0>gYnQ?r&p1V}5A+Gg1 zCmRuyDH4D1DaP5LWSr*VNxzPK8lP$rVO@06-q-s}&i%l5VkKRnE_$>7>N@{D%ZMi= zcK#fX&!4+*xbSOwm(eN=C4V(TTXQgt8|lk>>~UO=nL5`YZCvwTuPNv+v1<35k&+7g zUP(6jSB$i~&pn}~2tj%8)YREH+y6^G8X1m}8?$-|Kid5Ti)_?U;pd6h($+$4Pcc_E zKX9$HmV#dMyO=lNF|@rf=icDv+yi0Rncm6kJhq;R*7p{3gbr=bWqo~pv#VK89uxi2k=c{E_a@Y2C#W@9rLa7X|5 z4%vIJ{hd1Jo_i}QqMmBKERsw~sT?F3Ntx6BN#mn1NP^yg@OL10^d+DmSc3MtwxNhO zU7+o2OM`%GeDESDB!3=4-)=pd2O(-fXyEbjb0!#%jfG(e1XQ9n&5ml=+-D38+j?(@&p9m@K1)@HfF`JjM`cBEivDl!l;a z&enmh_%-CAV}G3VLyB|J!F@shKEDQm^4@Eye+X{#NS16u8S*|FA6W%g)9|j{)xr$sZGd%)_+3def~a~WeQMZ7}6FGe*WB# z72)$zLJC^EhZ)nzEA$nt1n=7B5u6sha9kkI3Blr=pnpM#VDST(5)yAmGn@ojF7gNg zLox00fHXrPKm$>rXkdBV(A4NwF*|5a8&~j7c@LOHK^1%#S|i{R{y>~x4u}MecI;B3 zh0=BtM=>dm%P9KFWY0WZJTb=lYilodp+akYGZBvWxTX%_cSD7dGq4c zsDOlJXX~aM z^ZL12o4~r4muc-&?#yq}SuMEj{eGV9^pw00**OVM#|vYghh=xXu)FiS*{A%jKJP>z z6MvAj4HfJNQMZ!`IgQrZv&sve>De^HAH9p`IHKq^LFi;B$!OxrSz6g@vWG*j@<0@p zD?XIPYh3s0s7=$YZhBH#p})D!NsGPb%UyikA!fzpCjICw$^}J+)2!P?iwveA$V(ua zT|+6CTV!3DV{_TWb3*JN9>T#u`QEchM}Kd{yO)R#ODco+FkQAad5_CH(RU5seE3@H z>)*IFzx>5y>=O`g{QFG;!0|`$&;IzGc4HT~jClY0>r}-L?)V*-0018Gg#mzK0WEN| zfl`z^CwpHyM{6SU?DXK*Ng-bl`;GQpm)w@_%<+ zpSNK66Tjt&VHx~AY+^hbzwoF0r#>DP@bazS(12*b1Lo%ga70g> zr@SYCWE&3P3%suyfuI36W(a@{k1%}4d5bL{K;n!4)TccF95J58Co!Z&8Nc@zL4$wi zOP}uv_-q7lF)ZTqFJMF=j_=`wAb00HJt@UT8_;4uOV)2qf;9)F$jjh`Yf z*p3P;uVyi{U;6cMd#e2dr&u0RaXtVc#N{(^(f1o}PsZ|q8*bmhuz~drKl;CZ?1w`v z-WUIEFLIAk+%wL93s{)^uG503x0 z9;O%VpYg!|(tq3g4GRFnC*1z|Yma|l|8MxCAGQ8_e$4vYAE5n=U%=o$=Mz5gNB<%y zkcjPJC?pO*9JdM07=IRvFTWr8L|EQZis9S_ES9HKm<-?-Tz~xS-#?KDTwsZRPlq0` z?eF+n+Hl6OxkxF{2zbZ#(KNtZq7I0*Y%gEf0uhcACEEp{VTa|7{bTvcPc(2a6MPzk zFL7K+v6t;V{n)d#j1&L&>GYo!zvX=zV161Sj zZUATjdI)B~OTbJ4h2wgkQ)t+t3-G=8LRZ05APQI)EUa#^a&QEEzgR6e76qt)sz9TV z5Wtrn3bg{8Vx}dJW2`wwDIpro99^UKclm=Axriw~`Q&bmMwUth&BBVZ*dtVimCMi8#HLg}E&!Sq9t|&8Iaju~# zn^KP`c&ka%zT7LX=3Ez|Xi|5@ovm)9x?i5>?fIe+zu)io^?s-Cm)mZ?XW#fI>|xL9 z`|-AG_RM8pzjn%=s`vJpEA};Y8uo{IAM2<7_8#}von!Xzc5hvOIQsQ2_ZDAN7LsV0 zE%F^?X!%y8JJ?g)BuORO_)YtjEdyG!D{<|Kv4M+nmo?tYYzev1rv>~ z$J;DGHtEoc4h#i96J{CdK=9;h!}*--14JVdTN&RzZA^YFXdie=s^|dOn~BEKfC7Pe zuofV~F!H-iEQC)vJpqEQ172f+Z<9r>xJEg`h_|=jv`wN&b}`~FKEUpRLFs`_@=joi zBZ$NRx4HR$k=ITkE-TTGP|Tn@{*f0T+2G*2uG{9gIir-g7VMn1xIEM^CiP;8^XnHNw63on@U zh(nU;0JtnJTa+VMd5M@8x&4&S-Gf960M__@$QB}hkklv$oH9rN)W|kcSO7e+Y%$-F z4gxI8z&$L`T?)rV3q*byA+$ikv;!T;3ZX3snMm4%PaPpTEU;R>G6lwI|3F#=HAowR^X?M5Kh zBM^%bq)$Bos*b>?pL}E1Gq6Eep-;W|gj*2A3kb}@(ZBsf)wUs<1~%a%t&+A+49c&4 z>AL=pzQCiUPz0|SfiPe>+bj^&02EHx*P#o48$}1jS7#s@d5Gm(NGt?_-(bDkf#gRc zFk0mx3cq>a-G5!@h<|760DS8Ckx{pR&8tJt5pdu21Xv&cMRA0*295i zY{-W?5L8dostRe!0cQ&6ON9k6bpODA*nY82$8Q}-4ZiP#1e|Rm$NiT>9iV@-9o!a! z{Z0UqQhao(LlBDX#|}gNbH=#Fy0uvV&I3LgCVuS~dLzL2#`gjpxC)^ZP690KQ@s1} zA*4u%!bh(0Sw>-j$ZoWcTt%!`AeN(JD;kS*Z5=DD6S2bb@&-KtS6vsN=#JBuXgLft zf9RhDQYcJbfQ&*P0K7V5sbIgw0h@6Ye(9HA!l-abCsZ~tk3d{MV~l%Pba$y09oS)q zWoj*I2X$X8Y6Tq^a8(k;6(cYbd z4_>Xd6}lh%UDpNai^uX!KX}dER)jwIr>+a?7f+80zkMQYjffi+2`v(ihKT2;0P^3t z7r;;lvsMYy$b)f2+%O^zk6-I&{Mx|3wlidw7BW6_xgw;6q65o5SlA~k1%G4_f2+lo zVD!<|0n{PR;f(deD}S*;!GevCMj!+uFFZ%vgQDk$hS(%Zi)oD!=-{le|5$2NGlwEPC;HzqiC_RhTM%IkQP|JQ28y%N?BJ^#$c5T2WVuVV!J02_q`qJ0n>EMF*?rSyT}XHFY|`cGsp)Cyi}ps;p2ftEIF zL5(S4AHckP`4F-FN_f5a&vbyt6X1^Z2qHYTLyZL@93=9fMV4%^My{R@o_G(2`I2ucc(e|QTo z#WeEth;4O*X>AXHR}+9#@2jg3KrcXaaYhD~8Tu``wxPnWkv<3uaO^=(ApA!EOc^va%S6-Q)GhU+Vau^I?bm7rui1V?$ci;dQvfhmu{7 zz~{r@vxXGxgOGe+5w9Qrx&MS8+l)T*%NZH8vN#{*dEWijzjc^_}TtE zd!J`V*A@8dz599ozQ26wQ~d0G=|9i@m;Un{eCa>WS6#<<8zGO|4$$`g{KnYEX%w4r zTm%3F1JlL2e~#}C{i5429e`iDWgPK$PVYmPedvJrp}SVw=RD1qjvL1U00*z(B!Hjc z_gNUnLiqJw!Oy?m8y|WX>q4?W^xtRiL;rmaKlI<{6!QUq@bTmR(=Qten2*@Sd;s8l z=z82Q_=|4%_Fw$aCExzTFCFpizxSb=zy1G3hu3BNe=_`K+*t7X?TqHd#${yz>z>nC z2r+$HeSGv&W)u`m|HkX#KlC=EcT(Qoxs< zg$UCYe|SxQ`Av(^r|utG1M~4iTYh-v{vt;te(K(S$wk5A6WITjOSygNzcN9}r|uux z2G{*VTVb9rKeW28;~5G6?yMGA&Km>@@AyTn;NN(Di!>G}Q37~Xg!Et5juXJ&s*KVN zbe!gjiAL}<=PjcRWP33BPfnph0Fcl~X(`SSe<<2-ShYhR$34O@@-!BHBSRnn#^-1Q z9gpVrXPZC)XnQjFo-q*IRRxmb`PE`8WjN!w_>v=xZUHM%3};Y)O00g-I+4T*cMz6N zp!fLd8-xNe%lklZ< ze^>wsA1VO=;VVI4l=sI%`DC^X3s~WN=ISTk3_w#My#V0%=Pw>jIRDZR!sWat_iy7{ z{`;o_S;6<;Aq355YXp4n!+&8LctHCL9&h#N;U9Q`0x;|!7y{4$yZ8g+REs#M@Nc;I zdpvKe4G8EzZ~+K#Qt)Y)7tW-T+i(L?e*>Zg_BbfJg3nV)2EGSg;Qf9Bm*g9q-Fd^3 zH*BzAO*;U#KsG=CH9*S0Y{NT{XrSQzQRxG>7!DWM;w=OPK=ljbUGn~aq+J}e_OI>I zxLp_1O9DvnzOEqPc1j$~A+q5F>|Ukb*zxE-UwTOXetrQ@%5|DAfl1VjbDzXRC=3BIB%EC0jveu3qwfOLTm zytV%6A1{2ir0_jF1t;^6Z4ech-UAtcFC2U-w!i#;6b6jz;b`!_elY3{9nhGD>Ppi(SP6i`2}PPq=1|Py?_(eTZWUSNj;W7jp>pF z^YzKc{P8HeqFz<+#v z>{#;f`v>+%1qX4Eny>#!^p~$MKO}}D@a6MkPg3#aM>K$IL*v8e0++8||Dj#uAL{+u zNAT%?0pB0{#`FmMvP0q9AJGQ!zv2%8+>Zm6e{5sCfXCbMSHCDs|FN47-Ip&MrSn%g z0`NUR#n(0^UEgyGnQjoz&k)3?)PV3IR52l>BsLm zpf8x-z;q96ml=R>Y`0&3SpI4sFCcJu95H_FL*uRJeX)S;UraktE`FWQDzFvX_&lS8 zkHjE6Z{Xe71)DlPY5~r8@E2@+p3%Vj+de*LF5*1Pu)(TKaMxuUz0?IX zIL^O{<5Hml`_B`{*Vl1eOhjWjEm%(m9xr$s-`Bx0hH;)e2FuY}0EgqkoeQwXY~pkm zm-uY^wowr9v4GA{@DsB zZ6pDO1`mLEpcLu+Ri(7a|Wl8$4`L#5?FtA>#Pv(6vPkcq6<3 zlmd@{mqI)PAD>SW9pEd_TdwZ7~Nvhm}x*K!M?cmEpmt z5bsziu`NFFmy`sE6MqP3m@HUH0!$0>jg^(z;sIP!tTg6(uTtc|m4LTG+<#V1wuJC; z?-EEN2t!ydk^;g8R)wTvC#-GB0wISj)d7(Ln?>><<&XF!f!a0`WLVAcHzrmKu)3ki|*YJ(0BUi>Xfiq%Op^n(0^9L*jDY>_AhaeVOl?x z%|{}gWQ5QTnSb+S zU4HjNV1CJdGcFDN2meb>wFu4P7v2+?-+0>-e&dn-yS%3gKlnHJjpvQuGVx5Fmwxl9 zj)~-V+3$WFoGOw1g%AGV)2_Y8vTgk0zX`jwNvLf8YY`n`cOv57dh%=%UHv(}Mp&ko z6-Ep(e^*~N3#A7?VTx2oK^I226v3^%Wi3It}-h11;im$$^&B zLp#O6TBg5sRolz&`c2+Y!^3HgD3!#0{CO|ff4|!$4KTIo)qiB2I7R_e|JZRI!5{kO zx9uvF{e#aCFT*XNAm)n(r>&||Mn?kv3{3-gWo>)HiSQT`*-_qW&6SZ*pZ2h-{lGAxBN)^2k)cb zW0(2IXG#10gBP8`f2Wez{I&zL4u98ke=uuB2oEb67Qbs{*(`?KltJY|I-KJ$3`iq`PGL;c>iwS_QYTHtI)2$3iU7N`0D`|GKzf74R1a6=* z)|+r%_m?Nr@O&ZOM%E~rgCxX;88xaDYFm-Q?AF17-B&Ijk!O4JuFP74z11y;Zq2ph zoir}xc?iHZRr+;(zOpMiy<)L$cSL?jPQ$5AUY3?ut90u5r@_LB5l$Ode=z6w>rSyV zpidSvP$2a1xZ z1`djFY7RPvx~|D_)z8a`WRx84Hoe<8LBK>hx`oPz`4 z;md6tnJ5_bb@Q%M-$vIuOAL;UlM9nA;M@zxnc=-8d=WUpDbnxKg>4(iJF(U{qq%z}Ptr%}8IMb28Uf+oO@woWG|U)7rwHfJ_Jsq;y%axx}+i+l-+ES@%7Ax7n8 z*q17-uKkN4e7#F z!eAy2=qF1#+6-|)@LG0~@j{)6)6k*d(Ke=4&h$O0j{T6>1)DpI;}#KW<9&K~gz)NH zPs^LGv$8ZKrHie^`fAm}C;vbhFjX&Ejz%x+f3`F46=*viA7XodluWoIpF{nuZgp-u zVj0>rC$V~L$~V3cVwTl;MJ8X@w2I%S^IM44i`+)mWfJtvI7~tjmsf(aeW4)s=m9-u zY{0?V2%%)AqUN}RzQKl5?n^4Xmz#H0q`KNb25RW6nGs(SX}8yWGN8vSAM4%Cnbk>zc9w_}otnq~Yc8B%iEK6L~{r3Sh*N%v9;CybSSsn!Ube|s(T+A)zPDEjIe|C78 zke2fSJInm)I7=@!cRPLfz3gZS?`U-GXu`V^)w}QrEMd4>O4(=1+8lGLKd68o8S*NRnVlx^2he|4D$i&@~iU_0LIL&?yVob-72E?($Eb#;0Hr7R-) zV(#4BAQgV;-^uOD2h=r4Z`~uB+Ejk=Y7yL-4uZRPzpxNUkND03FSyx$e4#FRq+O@S zwsV;kyaOq@S3>bx9_h<1=GI`vev6!xcMh*j)O2XSNkBSjX`4Ew=h@kIf7HURi#Pl3 zc01iGAIDuZNMa+{rQ2rAP+s0oF3Ux=5Z6O}w=8h==AE!LGd%m_L<(>35(@gX_K5M-&x_M2v=ibw#Om9D5lqM z8*fUkBo{QLGd`bmUQB3;f2sq?w3A2usM5D6I`>F238pVKWA0bW-R(SawX8Udqtd;R zcPSr)N19r}WZQfz7^L^Kk7QNB`LfSk5eSFaEj$FtwC1YF~ z|E1c!P2SYM#N< znk7#rWC0KoNhFD8SEy*53*fyM(3EzOHs7RBQe>k^Nx~BCijFl{=c$c+B^~)Lid%{& z`KO-p6@TRWmCJjlC`8^+S9e85G}&(?c`dSwWkoj@pnks6& zLlC@?n~aq@_7_17qULeW5^u_!=$tQ?SY6S)#f$TBf0O6@3UsHNqEPO-3!}QO+KHDj zb|7teKF=3dQ-qDLE_bbH#jY^3V;prd7X+@i`Pp+O^=vs78!zP(QB@=nW~$&MMXdne};b0>8D&NopM zT|;U~e>+--JYSmRB{m}@skT>ctx4o>Q@mNNJ+(=#sVCC0p7%C$q1-cTusG0MB1FRo zl5n*3&SkG$W>{&cyB+%iXrIvf317pK3>We@1kXTufJitE-_f=9bBuwFnuVsb1WT zQn||ApORg1B%m)XM?h=LWq(Rid(>QipBqYePTZ-a7y6Oc&+}fYP@Z^}MygV57+2;g zPmLvB`LkMqVWot%Zi~bHgyL<{E{h~C?V~H~;X*ubH!rZ7tW_!Gf7kJRsHp7Z?6=Nc;&U5l{ShShqIN4)H>JG` z>PxIvZ_9YyG`K1cP>>QT=YqT)xf>k@j#_Q`47YLskQmFbDdf>wCCuo;Rz#Qc9tSyp!a_f91t* z3v!wWMAhrlj-%yz9S7>{$kN&fk2@t8SzDh>)_NK0|ld6U|M z^O_monA*k2O-pO7-gDxyiK0nWwD-3_6DdSvbTNjaIQKgue%1hce1}#e}!KO)77%_ z#$mdMv+`cO)&zS1!SdL_$7#8z`}5_AD3FiiVqJG7s@Ip zQxTdzS7%;b7K-h~UT>7^k@ z@%q-Ieu^S04i4{Je+lK70aWSj=##x7;f5w&*)4`DWu*({IjL+a3y!?!wJOO@ zP(STt=Uqhi(YYV8f@Q^1eWEkS6lM{0fqta!{z2P6Ghv*mO>;zHzX#xCcXA)N7QSi&A*G|uULp-O70`gE+Rm&=)QrJb(H`3iT|*4pHb+0&<8rtWunP%a?u4uGInMwk#zSiYxx z+oF!0Y;Wzv%#1-YWqlUCButjNrB7uBxFphZ(ZuzO>=z=FATrE-#$o+JR4e) zXuVYH7`>g(ul~%naLUyHRA-*xgeTYg;-GJ%0vt{fe~a^B+P}K?(5wt`cXsntMj9zZI*mSGlT3X~FU|c(G>?bRjl^DOh%55H=|$qIym0!e^7OJ%}ZVZ-Xg!3x0iv4?V@;xkYV!o zXs=c)w=cdrxwa&~tJ;1r+S0`9oj%!~>M|^2>U5s8lYF(iB5YfbSr>xbwJwJsbFq6K zosji9kN!#021ng4gfnG?mp-y`(C#8#J`7j8n=OlMX=f1P-kDa2svGK0Fq+_UiSGAi ze@)}*82X&t6_m)osAYQ(iY5ZiGlxg@X7)Cf1WNAiDMO<8qtU_XrV&}ZE)VwwNLhT= z?*XnMc(E+sX#%fox~BpugVKwY`z(Xu8{Tp!1kzrg5zZo+G4JT75t%l8`<=~ERm z6-eHrw4ASIRy}s7JWNU%(WXM7B)Ypre?mERu-puDfF2nzjd2|asLQLkIBC5Ok!e2~ zzgt9i8X4(4-_N|X6QsGEuLGDO`65k&Wt!W5d6fI!X)O8B?h2LVBfb$Ib>t;%1V~ak z<|KD1+q6G0N=GskYo_l!7>DO7F?$zY3%R@-&lmmVBe>A#^42d$V36;Amr%h$e|qa( zG(CO2C-*)owPa< z4kGdD!VXPvzxV0GdC}`=Er#Q&e}~)UG8&vpK2no8+z_}UqRDldMNMP8860|o?V7!F zBWe&PuEouxFbc~9N*dxX8qYkGapU;lF}pIE3cg$muL|r@4bvT8*Fic*$7^r8v7WaR zqF#A2Jis{?wd`OT>D3M^zwWfxs`;U}dw@%)9CitCcbaW0FMXQwc%Gsxe-YeMte&^< z)n?Nq$Y!U!yQ`_tFHK+%>FQmJ*czCYvP4yGW2ta2leRNtv_!^GSvLsoDNJ%`A4N@7u|BAo0=?TLzm<+ zdGfE!NL}j0Fh;fWt~+;4f0L;nF7b5cswoa=-ZdR2BgUg0scp;q=967spc1p16y~-H z#X}F$myzAx%Q82d&xKD2?#3YP$e{?`5J+=Q_?Iq{Jt@0|0yVLzFEYV44a6l`gl5fT zH6t#u*KNCHdGGp%({lFieCU~d(XLefU{6N>(udX6)of=Nev+Iie=^m&{veValTc3t zdUM-Y_mB#lkDN)iPvF)EH+ih&tU5%YL8q_xE#Lyds7JWZ$&_VUJ5!JIUaO8Q@6NOz zEqtGR?0v#g`)u`$V4WYr>!rR?{K}Z^6jcXG6_p@dtb6ph*sK9-RIF^sBlm3v zM&g~Wd}szQ z`m%nV0F#;5e=wdN8`Ed`>%A+ARq-T{bsK0nn2`bB|E8?|Qo* z??aJR<6$|C$DkY3{AqV1xoO(6SJwj93vIHczg#aul|yDgX|fPHN3`J-BONP zL)v7`WIbK$@jmy~97hj#eq9BvZq?g8k634)a?Q@*`bDre07l<58)2hbT1M$gXnDgB z_91>1yWv(X&22ANB5g{fOPxARBPcexU9V(3`khz3H*<}-?XekdzA|T)KC`lhE?0RU ze+fGq*-od&*`iw4SVUC5HBho8=;#J_W$HGVQ%!${7fuzQ%t<9}9!a8hd(+}sZ)T3| z^j@1?<+@KG1DGcfDt4jiP3S%WF$R-*WG=~FEZU;oDj4~pbCx_)?!4Z)WV_s*MExb- zMkmR61|+ql-T(}$HM)0N>e8?Ckv%uFe;w?1gc!g0^4);=kz~uI?oOp1bccYF(v0Pu ze0Bs7zWjaXjF-zmJW|W3%fr4n-dyeMDQ}G|_o#IGIjA(JGlik`O_z9I(}6?7+YIvc z&PId=O4W(M=&s$7>W)$VtjJsXq~-SAQ`ftpg=b6A-zTb^qd}4Dc}L8z>W0p|e|h(~ z>~^-=_dwso6Bqji)Kc0dkIfVwX#K)j%jNb6%%n84F;fh3CONqx)mLUTRXekRtLabK z5gD#8&c|~rjAj@W{q^Z@`&Z{*a;8hJ@W#^Kfj$-QBzq^ubGrn`@YNgVEmPs4el^cq zxz6mM+$vAcbd#y?%~2q4Cp0?uf84$00NJ%zRq%X%d0|JXo&89sm)G;pJ7jH``^v#V z)Ho|iJ zi5OogU)*RsWm{nuIy24Tu7NUU6gbVTkv&2#SjFUS+2LT@pix@+LiOe*1AY)4#&97B z&LGiw+1;aZS<8J^G@jzQe@|IOMq|6xu}A5WRdl6vQgOcz+`_8<^A^*Yd!mAVghd=V4A~dFZrxrk*^~y*`Qk;qYtDeI|zS4O^J0 zwKLHDo*-B`vgaIfTgw-F!>`7vHX&78@~}U8Bvt z(J7Rhrt|_0#;z+qgtq9pv|*r+Dcn1}xxx9uGQLofi7i%TD)y5-uil9v>19z{QT9tP z4|F~dTF76mcN%OUx{DM>c{l{Wpg_zb1~l0zT<>2*JgP8)fAqGVC(hONO0vGOkvCY{ zkaI?$B*DAQJwxEu>XWg2Z-A>vTZ2w!GSSY{8O$aDj3i28u-0D$<>(En@+u0HDJVxB zT->1Wo>VA(R7n%Web@@a4TNVEKq>FO4-OZ1X4T%q&6;IgZAn;lb0}{gXLzigT zH%P3(RbA4_e*vfF*t@PfSaD|_&eQ$~qTz_6!lkOb?sz9zNEt|#@7 zVG+juieek6m%y8`OQNP2~9KIrA(Qz)N ziM`2XxMr0Nj_;z1%l@hBalFNiTKh%ll@W3DbE-SE!Mt&>Q`*OCr1STf?~=2m=S=L3 zA)9kve`(hGQ@Lz5+*iVa{iSX(7iywzsCjYdr_AMp_$t|v?mKqAYg|@cElr#&jV41? zvN|HtoU|uWrY7iVisPFCu$dxfQ}$}*aA5EtP&nON@aY8+L&7yxWje{c*LtUe%07EO=5+#k7;)2 zqqYZ64|9ybb;ozE=)7rf`xU`WnRYK(n!zfuv-H@0tp?z^wZ1_1Pl&d7O zf4*js9h~3Y_aVq_^8NmY0e1z|-^!I|H;z~H_cjEN@hbAvg(RuGil)72erPHM+^;Lh zR8pO#cbT)Zp}PHM01bWkZYCP`JmUj@7}{XR>|8MDi3J#9b$xjA(pz zk#iH@pvX4|Q)(15EEu21KrqQX0eP@rf4L)SmJnA>DbR)tvryi{FbG16VV+>T_W1J^ zo9g0S4yru`(0BovZ-`BC=N_ z;Ep*A=UI$aRlWuSmYfzz*(Q(+5_5YQsn1?RS8^LR)X+0FZj}xf(nMt1< z&gWNA4BN(sxv^wG4`;vBYhrZ?(ALAbCry$?HF3y%X(SFo8!{Gj-az`XA!WyJNY%29 z@j&~jJF|t6j{mk3dr9S40xK7;f2)W#H(%%LEsM5Q6?J~-q-%f`w6H;zvrc3;By6lX zi>=tvXPnGKR~rD*{^p>ET!7g$EY#b{kxkAnhOGr|W40c_HIS1A)fWldxmIiZhe#|R zr7thUbp=bL12Z|WdVVFB%5T@-m!NE|g+HoY zrn$`d5D`4Gw@JTRXtqA3kS&|$9Hk48Hd1^`SYD1quXWM$IShMLf0Qor;7Bo#z;M8~ zq8IpGjaHkfT?(%uvtW5{D3Zn8QIG}b$Wy$VlYgwu55?b1pL4V5UtyCB!C;i^h;v!i z-|mAA1#;rf2=eh`%}xMhR`~ju03Fj=^-Rv@E=;99Nhs}=#JN%gMTjfj0MUNEB3;no zDSsuwC|=*k@5EHBf6HU%Ho~miN#}``#v}c?=)yR5Zqq?WOO3*Iq&Mr(sL*&=I}_Cy zY;IE1tq3>l&{hA#WF4YF0n-?TgPl>#x&R3i3z4pyLpF@7DZ%)9;O(hY3Iy}u_n&8r zCNEQ+fM3C*@sv_~@G-BgeFGL;I=J_I`CzOd)?NLNssV ztN>t>A5=FOxD5ato%_Y44ZOrV^WePEO_z8Mhx#ktMf6G7!qE+!=EYC%QF+PiYB!8y zG?6ok{zw7le_-04hFJFLLttUZ@S6?MGh(}5E{qZvGY5CA@$T<~=x9;Vf4N6{vkPx$`rd*pm9 zJZkJQR>Fnr3bSFf4V8zmm){l#IH^>Xr9hMfjt9X z^iIZm4gxGU)qb(kXQaGZI!NwAWvMZzbJ`n?NoN6tLMPxoIPShJ`j zkX0hDe{`t@vwWeJt$qPT<8mrc&`irRZN;h9n-iBR8At8cMHP?SVE(#TdLkiz`W8keFm_I>+8O!i?)pm{vypZx5Cw8*c0$K;8D@#+dW6k>F zHz%j9?ZEug7JaeSgcy2X*U8RAvK~T@HR|8*e?rG24Z`fkkxxA~aK;ie-qm&%zZN67 zWFMO5H2i_JoClmIPUw*z84=+ws_g^I#0<@v+fMqdL`GE@H-eXDrL8zAgO)?ib?qpG zQf*HC)U*7I|e|VQ=CLwZ{XYfad4#D$3gu52uc=uA?sXv?| z2{scOCWh$Fiqj8CA-0)qP!pzNKtH=+ENO*$2O_=>-_8kYSL|;fy4zM|G71vwt1HKo zUPm#_6H6hIdpBs@=;RuJ4vRQ~mi~4?uAQ%AhR*L9Y#ziGOUZ`G9}k;Yps}Gdf9-vf z$_DYUEB74F>$d8*HG}RgkePnCypHdYT-lI|xx=!Qn~?LXY=5q5e7H2(%ww@d&vDmRC1+myHlA zk*KCjVtD zSE79q1_S7^c-%09V$K87hV>Cy2yhb8X}ID?@*=Tr`Glwz?`D~oM*a5tJ?rT z+$|fAAGsoXgq?I&SnGvF*3X()B(;{P%O8XSiuYv!7o~!IM|=wAne5 z>>z|h6uX$A3lNw5OU#4Ysr)J(*;ry~IeOCnGRqULd0y8mfG^`$^RG1zft&CL*(8G{ z`Bf>QeA*9(i9u=$#+J_lUJe&&sjtIA?&Jy0oPasI3z+kHmUL&b4 zn1Hdb?g7y@JsLl~g!crKle?S%Gr`(ds$(JjpwL1%a;egX9QF2QczM82hw$TY?MN|+%P)$n05e5-5 z$S$_O;O6&0ZJO!w?X-x!-?j)j^NQ;l7X!6pE+3&i8GIT9eG?ELa_T9+ZhtG*9zD?Z zgNYC>Uep>P`K8Wb7lV>t{M|m&Pdep>l%BGevWdF@b||jlWX=dD$*TIDX^vhsV1vd?n@#!esWXet$55^ZTEAAAHH=uMg)Znb68gZRnwf|Zl=c6@CCr=Mk z1$&dmZXRx?dh3qI9cIS$h8xV6G&cBGMNYxN-L zK=C_MQeL0m8l_bUyEE76d+P}t8{kI5r<8~Ze_y_gwe&vlurL7*#T^vElyBQV?L=~z z&(cElwsuj6g8_Xex)1@=A;0?69w7h`9p2nzb+prQ0KYtjnkT!qgbQ&Id6R@f ze_oKPe

vHqZAPX$@Fj1#XP!VdE^I+xnyfW;1L7uS%n$HJWXPM^`ESs~-Yj%(SG zXv%1M;iQ&x{Tg?;d(^l9Ok9HG2fU1xS8uB49Sy?>o+C1${S54TM{MS3euP#^_aU*S5ZV0A;By;le^a!zGJ#cLHCB+qR z0Zl5jqz+glQfh=hi_3$7I=;oqA;kPe#^^eb7a%J&c*EWXk~2%ixOLB(>3hGkOlKuG z0t@6!Rrme@P!E-YnbNBGuZ9lceJ4ak&%s_eL|$(yzJaWJ{bS zKSG5ISZR7>4U$A^cln@~>DX+m>Y4%l=e7Hbc9{;orUSxeTzn?nK%@fq3-F$v`O)eg#lwSY^z>?5wPVZ>?w%PMH!)Yg_iWK?Y<>^ecf5(CNj^Ey23W%4+{P^ z^j?Or)}MM70nI#zd64xOar#i>ht^`^o+&R=Vfum`OJ9@nHkQzOm<+=DCxKrqU0(gLH=Mm}v;3Gx}QhZH+d^wrb+xr~CY}c&ErCkEi*@SQF z`eGQU$2E{GY-Jh~L}QbC2Sc&)c|9e+*)8aTo0N2cBmD_)v8bbhH5l<-`&WO)da_DA0p@)$og;K4#|68R{L$bx6|a+6q? z6cq12V7FN4XKf88C65Cb!JV`VBhbnyAnbT#e?@RcEa)K$B}qDaXm-1I_6I8Buq~*n z$5_M;scDgc(R9$Pe~YnzG(0zaqX~WQXGD67Wt0$WLhxwVF`>bG1g*b25f)`OvPM}9 zABV7PoMSZzc628Pd{82~?E0DLR!-j~dcRind|8d7;S)w)D3w$F8Rg|#j#ly&Bg=yv zfAc&1S9xHZIps6y%Nqvi5_u%;J$3FKOmdolBaylbZiLcKqS4aj=8T`=uUL81v{9nA zmag=Adsb)8OnM;qcGBn1K}?VCYe7s%!`;5+SZVh%00D<})CCRGKI_0qY~Z7=@NTIM ziKaqg3eyjzLV1f7VHGe=VrJ-;J(>B@f9SY>9jA%KAS0Nv-=^@Y{L$Xan|@dX{FZw6 zxV&`jLePKWVoU~!CJqXJ7@!byd|F^&Q7(tx-P6FXXQ+0#h#Q>|=$Yd9b8x3_TXbE- zF9fR<{(x9U+ea2|zl{6fY8@tzF~*KA!9X*z)S8@?7ek+@E$9Jz3s3Zjv!Je$K8#f$diJ+gTa1g^+S%HJs|A18&Z!|l3kkvC0bk!v~vNG*DEvtze;Y)VSU(1?5GwOv=t6R}v5M3Z`}1d}fv7d?^gjAR z8FS$YV#!esx~P$hUa>bYc zJl61=l)}%{5+zZ;o>U@5FjLPd*p(?FM!8QrG%zB(2CjZJL616HnMX&Q@I-=Xl*)(H zk37GUz7cR=ErlAsY&cTQ=BI|6h#?a<(IOQrzY~CY<8`^CJD*jv+G&t%gTP%OVi(e~ zx6wCD`aqmRsZ*z22GIb0f9tl;US=@hE!T5kqHFmrrvjIGY&pkNEl$gTv;dS9c7J!o zPk9Yz3IpCbRyRISh)M{-+vloZS+n|CLnmCTPA^fy97}%5;2z^UH@L?z2!z=RX&C6? z6aJHtpiTn>AON>)kB+;zh~#;c&Hyr6M9TLkP)(}-1dEcX0tFq~EJxXo+!A+!`9F47 zrES3mBNQUTKp4$ue@H%ANAe*7L)yD5cou`AI-c4fzkqgfP+3xE+=Q$aiygo73bhTq z_KO`~F?eUg0xP`Og=?eTD^G}_xz>x*UQH}`5O=Q}s$HT^u=93iw6+E6fW!Jt^oU&# zLPwa6`T5hU`fb|*LelB5i^G^yR@EDbDMPdf1qAEAy8xj(e;}+JXe)MiW&}B%$&xSF zGdy?eRA%!)5HltqWYs>LH4(HKxjP91%IND2s_^dsqRyzDsRWF_~7LS zy}Qib)?E2@Tp#+QM1(V-Fk3rtcS6)hF~9PtN6V@$Kn;NahN$zb_%+kM(Xp*sq$(DX z-rp9&%S3Fpe<*d_NV|LcjSa#>_(tU1gff~xyRiq2A9`@Nab_?+b??jkOh?+z_oLnX zOX_%P$#=NzqW5ulrRBKb5Athkp&d_^ zA5n?{qH%`9%8rAPcS`1-26JXRByx$=$4mmBoOoeC*^E*D_1+^T-I25`^Cip&X!J4Z z#-7y&f00cfprE*H8;w)>qO^wP;s9AyoQxY*#IAz*5_f9VOdUC*78c;N2bcI<%5U7@ z2CEv5{U(PBpLhnlXbQW7tZ?72ivT!4$G_!x5f`G)wNv(zC{g!~w9Hi}@ScfCIk<{$ zLpU*b{FK}g(t68fQ%ASho1YTvI!R26D=mXbwp{kCn}7JwRMcP>zAH?nmc$vdHu9eF z^+Q=yB?6a9qm<49T(om{wbm35%*l6wL+bP^q|rpU_hLOp_EBKE@~aA@G;`Z{Q6B_~ z`w68pF6*($#-QbQrITuQII_H8f-{MYuGwuh-4f$|FzQ5Oq-}tku}gsB=T^O2aIA~w zzI#zm{D0#kG>$0azTaz)rc504@c^X^pb~36vO*EkMzleFwx{tMxTR=ZEjfUI?+NvA zp%E?5ohnJ2@CC1WXc4I|;8L%9hCAC``=?-)0tSt79%s;!a>N3R&QTM3CD8_gB?7#Y znK5k+?2c7-OkoH#07L_-1poLc8zRB6Su26Tseh;hk-53ZR(Pe|ew;nLz%R4#R!PfF zTlU)vmLY#rhFDa%j+wepK!?^IB&D|2PM31<<0#I}f_yEHhJX|^x#eJUQ?_ZhfiLYR z;8m0@K!kAtuz^~&SQfWE*o7N$8kK`h%FcRw+xJ=x`U40NAhjH0(#D}$Lp~XxPYXzK+*X% z=<&#zXtuTZddD^O!_8lN)ZOPK2Ks_t4PoSpucy66^dhNsC05ZO$;9?Yfc*(~3me!# zh+qtSgJ*J4AgN%CmA^fcH$_Z`fuP+duYbPn*bhho%VDRW@f94eXTgDQCW*Ph1Rek>&kws)q4WJAd9fNFL`k?Cr|&*Qw;5MTVFH7VgA${2?Sj;;79pR zD8gaaj*xcIW>Jpnu4s_Rd2t9FyChe!~i(j^RX>Oum|A$64kU4w`|b zI8@J>(UxtV{G*v%gqKcr$={HgK+sWdUKm|a6->>n;UA5v4Jx(4g`YU8%R{-G#HS1C zxJ7TLhx#`_4Z%Si;_ZBk;Q~arAbd%G+y36GtQoN{B1lb)k1~sf8Mf}f^M9!8a0$gp z!b0a%_dxXg`0-bD$S9rh;h1gd1)@GCHpbYu?PL&zQL$Dx%I8fDHL=#ztWQ6t3|NMW zhhM9#bE-A6uL`O=NQ=$%?rH1mJTtDsf4HbGCdWY0kL-FB9-I-zIi<&9#tE~AXqEMw{S{M$)V!uf2jYwFujJ3Iwt&hSvO<%*@B1r^E7`VI? zcLwImU;rCEpP5$8su;2O$aEE*fy;(r1l#h-(*;C(0y|4iNTKEZzZSX>z+@o^;K% zCMlg2ontv*2nAW@`N&TzHvj-WJ4{OiKC48>{J68Yv$#Q(cF3g!Y{&fL)r5 z%uyU7S3~mO{)-VDQGYiJZd-e|t|1s=9WY!LQ>ysDj$H=cCAM{K+&$%3*MkLn`rZ3#vUCy$kIj(^d#e7fKH`|TrQ25AKn zmSZRp-NCl3Lj)2XRUVQAsa5Vsd!bDpFlY-*KH0hU(9~JV!@l+Af(ibKUJ&vsuPD~em&BW&KJg4# z>Zp^G7SqN1G1FM+!@y2zdGJ)fY@{qcN0XcIvsovi1b-kP^lX&aB?nK%)MIo|EVg>a zDEM<1%2G{)-N5%$OsC(?A7_LsNi;IMgFl=&ZUCGk%g15_$7tRBPR13?p6vQa73tW~ z3XYNbN1!I{FgIED`k*$AhzloPS_q*h%v;+qi;$HINx}naW-Q#}1hOL}t(noXS2cQ0Bjw>< z&vDW%;}gV;lnOe5tB20AlCYAs=RhlyK$-1H5Qd&x0x9{}Bx8u^d2+=GH8tR3OBL34 z9#U#@Rw$`zol##3{eC4Z+4`mueV5D6a`3p^8S$5vL%tV8_} z@8qq#p!f(TnjjMK*B>{Wynh_P=);$@$a*+#3fjW%%QCda_4l1+LLdN!-;P`hC8za7 z=^I~+ceq+FUlk*&X>p|fm=wIKsbA#{)_(Frl5JB`A;}??ffB)^fpY`n8enuBcH&4^ z7k{)GLtn&UhP>1es4cFxP2_WqQRSwSi&=r(mt{9427@xk8GD!%bh9Fu23&Fyc5Q=Tv{0sOF4)Ioo!7!x)E3eVC)lDoBXy0yo?B27z7WuAWU$=- z(o^gq?ajaQm80B~l$=uvBHFy2+i9ow{(n4sw>+09OC=#uH8W`Y`(285`;VdezyBZO z^&f)ufBet?_20p)|CP(?{D;fh?f>Dj7WeGYqJ{?7}**3QQMJj6KS z=uZ)V{FATu^*=wopnx?TqY3mE>7@VU0N~$})_x_U@R&=*pPCWy|90B`Jf0x_8%#{C|BrDwnl7Efjk^H%=%D?@j_TODd!c|ejouLNwr%CAk;)DDz z6B@}p??l-*`KzDc|HJo=f6qZz$+srf(pZJk4k+oILU4H^2Vv`2Vw6YXY>u-9S7Q1i<9{E)Z>jw4E1Dt%~%ES`#txJJsKuCvkzeywNy}Cznac^Qs(-oz_4b{*)cA{9avMw6{rRny+G8*JBZpGrw1kp+d3{m~ z3wVfew#KmUoJ&BJ6h7@vi}rwrBm?za{bPyGcd_q@#Lpz@)=|lr$jO{v8uh?9*qBNh zw~DVd(FeJu!N>c+uf{hXi{0G+fx;y|(dAKdVl*nziJ`AKF@^e>dcFhaHSc+^j33bwnP440Nu8 zUhSC!b3VuJDxE)x`#{v`r_1SK*W6?`&##>(dH7%z>cj;O1y=vv7^d+xRX{ zDo-;y?aGaHjDNOEv7kc39<`bNuz~98Et`H%Y`QOLNC@EOM+E*FrO$&YmB`*}`X>x9 z{--fN&aXhbZ4-AbA;|7qlc8p#+8IacJXe)O9-A`9f5wsWezB&_I<{#1H7>oC)6}hd z?Xglpa$o8lu;@!EI3`-F-wk35n9XN69)G{*=Tm`qu7722dNt;gV($~0t!S@PMg1x; zi_BVfkZ{4sg*KB$#Y^W{ZcY#)2?BHySeJYJrCU{m_jG9uZ{=E*=5s2+{nX?^5dNLDW%S< z-goly6+{ z`$4U?9O55D$E3Bm{VmG9_c=t&icUP*56N?mEq~l+hZF>jOJJY6Q1&a`Hs+zq_lP2qr46UTs*TZziGZC(z~it*41Y5PGd+1(!$0+ps5#-pz9qnVf;PW_g}X9@ z?S#OTxbRLt2SMH+~m^U;S6G=L&qi zvFz_hbZdjQa+y;l;}z$#e4kabXMfp6uE?I)@;ydA4hBaPfb6A)1mN*tGGmp|S@Hw= zPjD?x@P%J93f?e}lJwAL#nzl5-7CjVPRppwMzFJw!?h#YpWUD?xM-Q|wvgimiKW;i zd20=f5^QzT#8kHUk5;HWPzIuVt{O#7a#v{E;$#A(srtqmYesDGycQLG+t zw-fiJD2v_19x7yRMV4pRU5EHpcwIepxRzHMSrx*&72O#AM&(P~7C#GvrpHRlCS($J z8)#(5lDjED$bvZJ{8^7kBw#064E4sOv^o4r@ax+VCN2#2PT8@?^R^_Duz$2>Ow^k(R+Te%W9Q~ME3xQXAWdEjP21ue24VVPhXWk% zfiq1U_l|X6Lc<>PK{y$)#9D=%fD%QxA1V>Xixh1`-1pib^lyGqHs0UPYWi!}?ASa( zzf%;f<}Xr@JuP>1FY3GeLGe5(6@D}XXvi(43=@8SEg@G9@Us3(*njLB-_SRj8Y4=r zW4U-`3mD3mnMGH!f|^OlGtq97f&JLd=(Zb=K>8=p7qX!j#vqmmPup`fsEU(@tm)Z_D}Qwc@=#)NMPn$^worXV`RVn>Y)SglQnr60Cu;9Joy>h5AB}*n zVj(0#>O(RXQHQ^R_!o58AQ8wera9Neo)YPkje_=X5vw-2J+zk3n^>$qj5E;?4TBb= z2z(SxO>edR?eh~Acl(@vtloL-Ub4${XSy_M;I~g-2A`y)-G6OAF46|k3RfrMfG|Nc z(}e$opbE^NnIXsNW4nSnMW8rfR~FB2ZMweSb0fjTu? z-0;9RiqRv21WyLyHG?}a3-__sNUU2}y6r=_T>io!zhT?&_TtX7AraL5yt6<<4J{q6 zvo^i0xjB_W0G)rEEey9TVC{P;rfS8>5<+Yz!@yIE+TU$DL76S!QXl&V3AdX=lVr)Z zBF&p4lz*4ymA0mVp#NhyG1Y(?4V$01_3-pGd0A^n_)sK4vXImbPIKJ_HZ*}lp91vk ze4+3e2{DTP`^nu<;7T)NI!=3Q$LwKo$p){`vOP7WHoM4)e6_^O!>{%-JuDtnW=B^L zSSTaTANCFgVGXx`Cv$WXnqxl_Tw6AkY@!ef<$qNd{l>jzw$DWMuimpVbLbSuHN`E{Ol7KC_8g42vT(T=`{F5KJk5AQNg3=*l z>JB1jzQL2%!4+`RT-gE&#@LcqG8EUckVTz_5jv&8%+ghOr!xKy_v@fvvRnUCkB7bn z)7$vf+?JX+JVpB!*@?IFqFW!06tNTi;Y;Q_VWL7m_t*Z4R$h#xhrho`2OyV}mVcm( zoK&nc@SMUfu^pS0ZA@L*os=YEvpCl(8&u%eKvkA!5*|J8s{SO$#P)!xMN&vaQkrd{ zJrJ?cWyJ#Z&mIrj(o_2~r?MD?PvN{ZoA9UlI9!%}W7MQHS(@EcOk=Mz%V&e9=wbkk z2t+9gXi*sioSW0N8z&$(?ELKdN68575%IGWQ64pT^qjftH{aU=Q3jF_TV4ESE7)zM{5*zE)V1Rbu8HLQ$4n)D)0g|$b$gS^C<7PDuc7*J4w2gG%zJEv&_ zKfj=n%2RuDOVSS;F4kFu1 z>fb?w8T27H2z4La0%QVU!Ql26Yncy#au7W(^Grksy`Yj9YvMuy4gMe`w3vQjaveKl$-d4eKK83I~F&!BcflKjlC|Yh^8BtMrnV8gq>O*&0(|@)QFA=Z1iFeO0az zX;LBHhdREl5fYIgD1U~+5qX@BwFq$cF(O3>qsOo5?%n-FH3{+iUK#RhWs_wpMAgw? z73QB3T>^k#Y%P{sTIX7|MPFg6WAsc_vfAB^_DL<0*e=|lcHtZ23oq3VXY|W=7*V<= zEphV`@96hAppx|r{JrG5Jc7id~ z#=rX5kN(cxqny2AMkrnJ8#E*$JDj7wI#C$h%X0Y zGzsxvdVfW5W$kY4n3M_ndx^~`e5YnaVW8hyMm(&Fl+ufQ0f!h++fw=arF&Hnavpt| zs)(E;aflwrMUe7b9FDoM-*9|-u_^PaE)|6I=(?CsVadIsv?wY~HTz-wO_c;LUY}AM z8uhK%8mh-x)~|Baf$a}|na^?8BC_^_2ZyH(+<$$E?3BefP_^@*K`iD-ghZFLc?#3g zUQ4IdW!=Y5pf6O~Y%qjqJWGgG?$IsuIjj9XFEWsYMpV zEUA)%W762BzX0^*`gPvR6xJF`1qeA#R+&W>1rQ0stMSnaZHVN$bYz* z({+RAB|WWnu;z48bd{A%37X>3>LYgPpKKObMxtu+eMM|1=r#ph`a0twQuCpZ!R8$D z&unRhNU$C(|0=2&e6YK5D}6PZv%Gv%r;;q+Jg5t~$riOo8z7q^u(MVYXloNfb{b1>?be4MSMt36mfczB9c? zMJyfG3w{oY^_-#4+r2G->&`*ViUuwZoAvtF`=M0(a9K)OiH0GvaxL^m-+$CYCvuZh z4%3lMoR>7(izaV`-~xnF?I(;1rwrLJzO`1-C{) zIXCBVZ3U04Y3ZH)M?VQL(Z6p^rM_*|4H(0VgK3?_Y(?TRh}rxpFdgi{4mB@c3C>JIVwSR(Jabu zAoNNJ7_Csrzmy+RcYh{8?yFQTDkLN3y3HL(M$0gs902mwk+elxS5lFT!m+X; z0^2A_C@B6|u1SBLUTixpupqWv0miWTX}L!~aNP`G4$G{HxC<|9%47lXf)qeyL9hJ>@J?<{%Ai9v< zI*EVXgjD}V%2Td;w_N7sGxVBm~$s^lF-Eo9!1`Bcyegb)po@0egrM|HGd+rN0rTwwy_zXNJ5_>W1CF6 zOv58VcO}5|Jb3JP^Alr;kPK#g3(K-nSfgs@tw&7~M+~hzW5}NHt9^uz$EO4W`%bUs zfvgU;>qKB7!7#w{L3q%M$;sJT|GYT@uz2^T=vfW*WoNqxG*LOy!3@l5c}mj~6Mr5&RL4|$r5+mv4*@!XX`=};hd6>I zDP~G1zTp<5K1JcDGXV$D;ff}b=vu>>)t0UuB#6en(WYIv_ndi{TwQyq=UE^=%(jXb zLBlX>XZOZa5|l(@XeJ!Q4<#Y&?`ggA=;*0gpoYBzlbZO*F{spEi#d}#o|g!?LDWg` z(|_Jaa8;KEgrg}Uz*D%(4 zkwFCw7#*A4)VfLbppo$wA@6aJA`OSN)=JI{ zy_KMbb>`m8spA&m*L7vV+Pb-R`#^Dg%zfr*Qa>x`-)P)zTXjR?8fNY{QNDgO zMZiRj?-Kd~6!Mw>Nd;xuN@bbJQW5U5Z&}iSHcD{cLL(&?eyY0u`D;QAnUkQyxPOj` zbQ|Ig>1I&ybNRX+pOM7jt&3F(=Dx5?_2fu#G@of{R^VKdnt@1X(Z+A?r#+IW+{^f3 zsnup^bZA#YDL$FDF#$MvkRND_<7`ru=Y7XTM!R^8DuFH1uWBN*aC!;)7GK-l_7jLb z)q?8JGR6poA|+H(hzGpc-5V~aQGeCmoWtt$-}=rJiOM4{MDnxqc7wQ;PIP(YVZT0n z@U~}QxKp$VwAzn|-SVlzm?g8B&rZ$9L7{RH-@&umANLiwTto#9NLIIBNaJ8r;&D@SAt7BA>zz!^RF%2k){)yuQ;OMhic5CE-a zMA0)8`QBnxFF?KKqKTwYVj+2*(a9ibk^X|uM z^I?2C$GJu9*mJ$f^3A)ds1AWln5@1DM9KwJ3|Myd1}OV|cDru+z}Z9#q+35kRCdq_ zDcvSHR&U?ReGj3<)`Nh2pWrGEzmgfO6e}8)XNwVvHb!T@jUO45{5 z-+Oa_x#Xr@9Zc8~X|2D5q26_;KlsMOWvk8N;L>L(?5d?$$A7TID$X#5fN$dCeJAwn zdCa-Ee-&?6)R8T^(2h!$!A0uF%tlPdLP98I2L@+n9ohn9;P14KFMWMDIi~bJEbWOd zjHbe|lk^(E2Hk2^#h_r|TZIdH;L3WzFezCO^uQ=K)>@mR8h^J%+0fslJ*eq9X7V){jWi5_AE)E@*`GH!Z@e0x)m!ZXnR#C1iD zt2F?50e^V42BV`_V8cArB0qOXR=IE=LTMD$i;NT6>*HMEIR^%M?V3rN%ew7(>4QE4 z?KO6xr1Eydu05=5pn@PH%-u33p)>q-rhB3g+Nfl^ZzfqBV z-}BM3(RXG#b%LDe6V<=0H1Vm#QHAO#{RqZipMSa7SX#`+V;U3@&>myQc#~6iT?eQ^ z!mKH?XG)0}Dsz~+?YnF8vmKrA1(WU@4a)IMMuakv%T&Sq)?}t(r7k*px4p5Jur!b% z9e+G|H8kqbqV>EHPMGST7h9raD;f|Y6w<$yU*GKb8KLOKZp|{sn!Qu%*8xCDKXqfP zR`c(l3q09l3G}zI;@N;qmi7)V@4wxO_>z&dZ zE2H&Stzb6_Iv@;r+!P4haP__8WP+~>Ox5;fOUOfa1d{G150Q^swIPl#$sRK-LVx?f zk{%{Tc?hvho0a5S?puhRfd>P$GOXFUrgL$`5-?(KnMvC*kClKtt$;rb zXF2qhpKFp~x}5*k$2CQ_%y@)cS>@+d7N}@~90PKm6@Le?cB%TT|+MF;QSx zGtsB_?~@LYD%83k#%mf0CtgP1<@MdbDcfQ&?~bDDK59S#^jlaRTlY&*n1v+Y6*1Q| zkAie_7+Qxee_BD!HgvmKqH_wm=GpyZZp9ciii)_DEH#~gultT9mJIRJZ-2N;AJg`< zeyQZlsfHM&z8IB_rpk^UoCV(}YLb?dC3|&H*@8f#2`4q`(JFO^Qaf+bHS|eRuAFQ> z>eE_u*&@@@N7m|Gp0X)FZE5V|i2Pwfy|6s)(t{|iI#K-8hTpxMTRp-dH|%aC^IkOe zfC)89eqGTxafq%5)hrLaBK%5PV)2}!!oVIfjAIiU$R1Z{GifJD>;=Am} zR)b*$!tJRC8n}!BLnD9}36tfa#1yEzS!fFR*=WeZ^wOA?eCCwEZRCU|gFh$!ZrLv7 z?Q}d?(E1XN0ZC@N^F_5Cb0E`YR;*sc>bh&cAKCT{8Pa;{jI`O|aevNr3Ffq%H^WB* z>d&#>0UG>ycYB$8h6z?GH4XVi9m>xsNg@Ic)c>hhtD<0)cz0Otd5vZkoo2H8MGx8m z4PW*LGVpRrHV67lhDYPA+dgPcp41kZZv-6Kiby7JE&;sgAXi|VhUD;X?vB^N`ufYe z`FNtoi?HYBRr7~GWq*0po1tISBv#|hntOQEdaqjkZ(hxXQQe<`Y}x=&-ZEqPyZuId z&pf}&%vtACpWzf!01?@a)Jo;2w2?y8X;xqD`g?)e4*7D_k4zpbb<7D5Md>(pN*T}I zJDnZ9*c~1=AyCQC79#P|Eu^U6RC){=NsCh0jx(N5QiuH+ZGVY;N=+Y=0# zXZW|r{8Qlh^%bl&f?9+y@@)!pCt2P-B*J7ppw+TJ|CVhjLwR3pB|Ut)BITb+cnrq% z8OG`6UzX7|`c+{jFr00?TeRdEhWO8!|kE1t~L@6V7Es;khfX562E zS`55P77gmTiMv((y2lZpJOZPTf$|+dV%(z{`hb#b7VKcob@8@5_|6obic!0707j{6 zu7Oqd)&rNTAlFO+jG@t+$C6Us zECVkZvg!S(V^AWBk4c)<3|S1t2-=-9CKp7)+$usjf5hjHLv$g#(yPj0O zg<7IUOum4qynbZLm-^K`Gs`bA-F^S5b6#0YU4L)hs215E3bMqpd1jgU!w; zY-N|G@{AlwZEN=DIwDsf;icHmmHuYdZxEErjGuhvJlO)1}eNS^G>5ra; zl0Cd4tHW*W;_J7M7vDLk*Ggf=453QCcH-($m!gEtWa3VoP;TF*Q^ z7k{ms_;Wv|`Qshu+c{6{Ei2P$GX&Xgi1M~!n19nNtA66quFU{99RDJ)veD}x1VMrhzf3b`s$D=h`yOv0rV4i1Zk+$sLC4P!>xEOX(tj`j zrXt_#Yz^Q8c^!GODxegpL>6IsV``2BQu06|mfM)(hljLi!gQ|$cC&w)!t4p`+AJ@O zrG$|^#(_l<31UZ0}Mns*BRTk_-i z46ju>iA^O&*FP;XLHG$9yQ6UYGnxn1z=>!UyOF$axHY$6_~b+m`0k@!LY_#cnCq_Y zDmw5;O=1KnB2;!Qm1UY7G1C;kNd%7HJFTfa!^*K?T$@Y!s-|TuYC5vQ!hhTq`G)V? z(gB@G)%q_DL63vU@@!6g(VbO1JC>rWSlXHiVv2n(%4Ks^VF8=Y<4mQw|KpiQUyW88OEUKw{2OO-p*ZYv5b{(et$)4j0t8(8REBR9H(T%dye;7Mf{<>&*l*r!D9p`KySEun z`P{bR-|&`>HuPh9_GBdP`1mwL4W{4SzIDZJO=W6fei?Ri^x(u;P%zmP&-CCX3Lv5o zVsim=bEs*6=^1@YrCZC<((d$~A%#9Cs?^V%U%2!evAzOkt7Nxn41ai#&*vG(PCG%s z5uph-Q^6yh5law7=rfHWjWj%$BUtj8PY=dv-wHVllDo6}HkTeiSXgN4R6SNc=zi=% zc;Bw@Cvr&!HRfaY+-f!3ao0MxADaA2T z&TSEr(x@zla8d)$S*KC@zP{#>_M>W!6BI7a{C)Q7CxN!o8CkT3qrqhrKJ;h3Ze-ye%U>J`FmZw*rjBx(QAHYuD18Wj&HgbhR zg(yosdghJO2?euGbET|Ilpkg54wMPHDOsiMW2ksVw}0+5XT9LNlKKxafb24Za4|V4EgNfY-)T$G1fMd)wHK)s|7NhQbwKnbYS-#^S1aDkEl&FZhj! zck>qS+8*FI&z$WL&}wQ=k|wS~p-ZS0DcIgDm?zEO7;IIWdCh26D(jDpKxKkZcje%R z@rh)`$bVTMO+NK+>`!ERC@EK*-EDw%geZ(#;|=Rv#1a#wL9hs}@E4_eN|mkHfailq zUd=5QM9&PC6(&RCnhS~fm>tD~W&zyO%$#2Uqt@hlGC)u?9pBBNlKhpd-7S@Pk#k-F=2f1@zcyijs_B}ggST?4W*Tn>^9#LwJX;B~wx8Df%Hkbt0_KnN36ZtirW4p>K^fz9EGbF<3N5T}5K!v&3%RV0a&r})-zndo zdVO_*#Hs;;BBGMhkkk?zCjrpqU(I;m)PJn0d^#TRaMSzQCy33;_<^kRS~ys)fzSE_ z52X(T9DVWLykJV%Hk0@Jo(Y*i!LA#CWg5vq4n`Ncmd;qRoQlGI0Qk5V(WqFR%ZZ?t zKMr(Lgt{J#r*t#V`^fS-m`%%nKpi9!uDsNHGHsX)l-~g<4qeZ%O=Ga{DfsyvU4I2< zbw6h*FQZ3~%zJ54HJA?=Hexcff#-7V=iokWGfeMWeCYl*a9FZGeezf2^Z!OhWny4W zm_rQTNQ^6JaVnK*@~ug8m-`pcxL_B?!Nk(9Kf2$}X?F_7J*)jf;qtrYANlikANMtBg^2jT12m9 z6}`;bT4;eRsk^UHxF zYlV|vB{mA(izNUDtq=Vt&hby5qJ*?QT%#(xgNwLZZnhn;W5blEK&z*lj_-}T)0_Ct z;uZe0Vh}Q@F&HOz_>l85nzw{evOsJ!PoIeK26VNes3)xJ{C@HDD(OwA%D3v>KR3a5FS)iwmWU+&cPr)>Q74X?}dnL&~^>|kRQvy z&ReegKu+Uo-V}|v?6;S@Qm;!yYnCw}KBy2Qt|n9J%RQ}dHJ}$pJ+YO{=e3^O^8%!Hf+O>3{p&$a`BqJ)E7* z;7St?^$Ji_S~yp9La`xAWUHZz2TfHJphO>eI;txN+Z|^!-24^<{zH^yIn^I({S&Z_ zYzBQ_6wuvfW89$iO!qj-p8DF0dwvZi#tyA{SA0!2r6U1HOCvN^PKVayF5UB_UddmgGq{8c=u{rQPO-}IqHW5rJL2BTe;_g!hj9RI8T5z zyl~yn1}Fsuq;bR^PyC~pBGa8Zc305KrCr(5(DY!+2YCIXj=>Z9dM;tnCJGa-5wp`M zZJ3}4NN{J$TYd6;h#RsciH8XGjlb9}*6N`~|sih$`-mI z*O|!5IDfX`I+a-4R2ntLPWTK~8*eeFPBP=3yDV_JTUmuK3v=3I8p4 zrdBNX{?Ug_lRN(?@2iYmrnHU%Tz5T6_W_@M2q;>dYn^Dc7;2!J_+6UU#fZ29RA#C> zy$U}eTqt7^b_y~Mwph{NQje9=qeTlv1%J0h+2=zDb#+G+0OXk;P0t<3T@QnzQ|Ss^ zQ04i6qQ|mA^~iR(V%WkAYxIl$lAT?f-&b5kE$4t(zyKh)ZAwfXGGmlqgc7~dDW=UO1#Me<{c?><48GkTH zW|1h%DDt=kQcc$}8x02fosFB+`-_RQUPV$wGmmtmF+7`m zxZS4t-5+q$;vHkz+YA>v!~)TUBe&nu|)&fsZGVnTW>)8nHM#4cmuL`-M0g&aYlgvdFwb!eIOQa?V+Fw9wv#wg z;ux>~-S2cZA7NYEhR3M6Ssq!?;mhk#U_hX`8T8jRdjdn=jyqhC@g#^O;;C~Hzl^Ge7ORc{vP(#P7XM;vwKKCB3iPRd5V~ji$^3y-922^MQ zPVpU};2*Cx-T7ZN*iYnhvgxHeKl6{58l1mB64`6N$aXF6a{vD>vYw; ze(E~Ed<#% zbd}u-UrU^Bj{X&7O`+r`;+vH{63?hA(*J#LHvIuyM0L^#k5%C&m-42KOE%pMz+f*L*fMfhK>_XLCk6lyDzY=;?Gm zT0%0(l*m0sLOY(iCQ=EDW3p+qmY&BmUI3BX;Jc86rH(+WEGCHisam7G+Cv29T(8)r0=NLAP!!x zVlPLbi!~r+5|Mx85#HB=Mpvf7vomTF=3;;*E3LH!-)+@0@u6Bw#%asiv;hQ9+@qOT z=wYAe+^BO!hR0{9=&Z6;Ag3NjzxKrHAR>g8x^;w7i z>`_h=-oH|`^yJy{8tD}`bW<7l#x)@om|fnyHi1WXNz#9ebbmj#@lj?u)nmx$OS?Md zwK^%@HF^6wgwGu_+^>UA)|YqNjwu>S0F+F?zg3#_Oz`Ez*c+`lgciQ0?W}qXDp6*{ zV_k(%@JGQdVbQ-^y~|Maim6wQ{5f`~d5&!1yX^YxelF#0o<#~K$Q9i3`~W3+GN@Kv zQr+g*wkUrwpG+KaN;hVK-omX_-G@~TswLYqhsmfaj1eT~{voBl3Ig40@zuXh$?gXh z>twj44>>wj3LHEHg@H*ABTe&%=ck4pZL(IjgY$opRWskeO9X#%4!@WcxO=e)GuYG(m(uk8k+S%lXAEW08;Tjs(qtNLd%m8TZ>a-3tbUz41s{v_4iF$=*kL*l zCux6JVkT;6W82@C*bKu%>P7iN|>0O-vN)tF*@7|Ht9}E_uT_ z3}gK3#r>nAC5!_7-3z|C|C8i|hVd9yVT6}V#0*n^|Bth`Yg1KMx*~4;5Rq1N{2)1nr(Z*Is+S=bVf6%=NU;EtRTKqefMNK+Az)(P&h!OyeNd zw!gOwjci0Mh-p@Fjf%4wmpkm=Zsr%?3#4AT#1c`r!t%?**&iF$eRkq$ucCM+CYi}- zT4fo3W(ggQ^N+wk+TQFO;I*(1v&$yiec5$#jT>=rj9FPX#n#!7+cUfNoy+WV5dVM-9yu9p-N(g?`Q%wIhNtk^ z7ZHo0)Eii*`gDmmJ;d_atv9pl{pRsCJd<>2E!%SUHRY(acsLIZy&y;i`f>YV?)#B> zm^kKj`iYa>Xm)&<&efaO5M8QLKTY+MpC#=;7L#UX1pV85kLG3U?kzt0D$2pv^SMud zY|4h$Q|;G5FKENZwJ&aydz(W8=bp20W=8Y4ngiEa4EUFOtI=$J&aGTrWN^4AkNrGv zL|=;aXId|BwmCGnllx*=qIv%joO)J1InF0vPVY3IonU44&$<602dCP&&5+T28OQbu znf3L2CzV@>C39!n``qhA$0u8owhy5)=C&aCjx#pKa5vd3^5LQS|vWz9-WSI)z{JulWhUNkV$NAH%$rk!mccU`hONRJzTH&n#O z479OiCfDK%%0e!gE;pr~wyP>$HoE7<8zdqtEy7DHuQNBzvdgEGBLj)J_gutcy|bC4 zfpa!Q3>@a}n5)APkKT=CpV1ZsL@D$Zz8M1(g+tC4y-q)w*gvP*M8IM8Md6Z8N8{6e zHad-90GI-Jn628+IgpYrho{ef>z5|!-lBOJhI2jF-G$+@s$$h`)wZ(AjVe4O_ryF6 zD&{N}#-P8a+llv_RK6E8TAKcgE)lP0>ET1x%_^8)jwY~fIr?tc9gZ?;|ntEHE z>n^4I@M1Q%0$H}+BOZj)tDM}|8=i#2QtuCEE*==OsW#iu`MErv^Y$dKaz0#M_Tg-D znS%G+Z;j7WiRmX#KTc)s2I3GWM7i;SV=1@PmpJ+Ef zre+)?YS{aQDfgdNJX@Kwcztx|&v^gEj`!oRms!0u6j#pVY?7z-Fh--L2c{ExymSPtGEe-{Rm|DADyWN5iosJ99VOpJ(mq!asXP?O)s+ z-LedyH|+eZh_>8+pYG||&!5Ou0c5oMd2+sxtT&I+wu;(JOyeotu1C+?$3U}pnD5JA zBF6Lb;RrspA9X?it=flE!99beARDOkgD6JYrL{W=-#a_(xol(G3;m;Th~XQ5a_xPlA)ic)v^XVY)?6Mj z-I+%s&(CdRP1m_DH->Q+FutApE$joKKgoDH+LqpAI^1cl?_`mYQMTSDv28^(fnv<} ze6sk=57X1xF87izlcUd1S4ZBxN!1(9hwNi@EA}7BM^=53t@km~W*57Sc12u;hYIuL z8jZ~3;ww3S-Ei6LP+_(ceDJKJr;SlDo-Qw*ji1j)o74+>@@Dt?fEmdy`e*_uXT2 zzimh7lg(MR4^~rknuhJfXV25eXg*t`d1S}E5$!Gd?8BBaOto!)!nT|jN!9d?V269# zGi)c?d1gF`{`suXc+GCu@4M+ZSZ=o?S#4!@!foGQrg50E-uXJaoN#@`sPCqwb3T}G za-!RRlu7>B?Vgic5Y#ec)6YKk^SECw^T`^>aD20?=5kxkVz(d2VW@?p@(~tW*D7Yo z@;0%vV!xUVkGiqggt&$tSwtk^V`O=0(Z23INto7y-sDUMN!sscm3>%FMB53zF4Nf zr=N#KvM5(&o}y@V9`ixt%){GtU!LX6Rp&jNYN6Q`3d2kMypDF*zsAY=HgJ|@b90PWdL73XNa|+KOqm!)h-2gf6JQls}WK{b2BWsII@v;A!+WKhW z#|t@FFdP`q_@xvCm^|dKf$de>xfXi`9pJ@iFCJA1JA}&3(4I2Zf`r4=pk0_Y=D&*qZFH zpWFAz_&MDC-k2T9!epYS~F)~0vGYnnIal})cJ zwwEEVq#smgkJmKZ_~L>Xr6?F>F44 z+{Vk*zQ^wmV>q!7n`fBp`0YseW;OM0BjojEBD|A^2mMog?dksDx?=Zn@z{PD$wzo( zNp&pvXwTy2<8BQ$Uj~f(^Xa{OYD0$z!JN_c6S*<_WEHr{Xuqm=^Jb8oPR7F=^2Iqy ztA78o7?D#16C?HE!$_C|f#-97PU969aoIZuxwio9mongsOL?KRjI_bsw?ilUbi&EQ z&(@c9_<-cPz8u@T58J`5xcWoKj@Pg~#P%xXcy+ma80$)-MAHYCE1+lPPIou&yv>Gd zdwyFt>x&iM&~p@Amf<~I-bOc0=k;kru|6C|yFq+hte#O_*P1!qe1716&B z-8RRI!*Od)obx8vn?>mI$uuL^bN`k+U145t658Q-u#fBYVSJ#o{#SWSmfF&@t|41o zKf!9}ST}~leeqn+*_S)hH}{^NkNTQ^&Bw_5m_an}(Xn$rZO@nHd2_zn>1=w?Pm{aG zw&~|RxvXv*um3qt+OMX6e;=mvJOsl!dbdn%%}>DQWy_P5XkZ-lhWp3*cDO&#f?zLc zZSX$w!`JzoO}}E^Kh;{|Cw{%)JsX$D<+8D`-Go=?!ylS+|4|wDCB6=OG(Bwds#;J@ zorImb+r02xSdaG9QLpm4%)(7?H7HMq;b6JM*=W1lERD}pKQv!|e*S!{mfn(@`MsIW zcKLqSxQD3-HtW7^?!V|sI~6Ag?2DcrH%sf8eI4P&M{^0ooj713WB)el&rTsrw)1}5 zpB;KE*ak?ful;`i0R5DRO?{&NN4iIYV5A526AgAFeJ2(TJ>5V;kT&?%@$U-~W0RNh z1zln@Op!}B5SpZaO@x4_kA_6DShRa;O7W4bB&u$g_<%BlE}FG}NlY{pdp$0a)yKtP z@x`&9OBsg~bB9*dIej|xmVSEsNp?@}tYvL7Iv$w5?4Od^xYe^>xj6baznVVJC~&r` zS`VIOVcCs`pI?)EzIjCVYL{HO`S|j0r`9p^6B@`9V|GD*9)q5{U5uGKs|JyE_AaB% zd9$I*#A_}`v3Fg+=OoASB<;S~^hY8;GQ@_o7$@w`t z^m_F(C?11*usf4&eyVhLP4;cQ_E^)CIHJ;(vqx>GYbZW*tu)Q&=4)q8NN|)3Mw#ZB zX_oHY?0o2d`{Q7GTdY3w@p$0PR@+B-Ge4Vp#6C{OaYddN)0mz=ypd^K@7u8%ejMWK z$5U+6FyBtkUUDwK#t%HXu6t#VgTcvqZ+?_odYMcH?&@pMWM3!`t2jJ*J&irAX?i^8 z-m~g^S;oEIv|pd%`Q3SbmD9wz>-xkSHHmHCKgxW6Z?V3%&9m!iAH@2N;Cqd>16%Ay z?Nze$`8byI&78RNaVrM>>Hhk_F}(dH1G?VR@$v&Nmi5OrJ!Uh152fbk@N!_wU2{iP<)d9`^uX2N6rYH@ zsW0}i4e98k7RKDks2=M6X4*oTS#f!}mgmpvV@AA@dC<)d=1>#qo+*|LPsYxKyX^J+5U#S+fDbkbiZX;SZ^=f_9!k&r@&O@lY4j`{Nt9D{j( zubjD7GrLyWg=+Kbr=iU!+eE(fms7tFF#E@Q7XITk3y;UoTNYlb4JC1esV@<*#GJPW@1kkb3cxybFMQ~4)LRcw_m%?y)Hv#Xmnzjty=4M_8lWqs zCL*cKLRu0eEeTk2zVL<#y6PXR2KGc<|Lpf>n1ICqX5a{*4}az(37BOSp;rf!z6eIp|)J23r5QKe0p3Jm=B@iS1YSbqnnTdo;14-Q4pk_uFOos(cZXHKhq5y^~CRKdx-w zbT&?-2r0YE*g_(I!>97pg@+S7>UqrfmsM5lC1fc-WZ(XvI0(R2t;P4GF))N z+>DQ`hWP~X1#lW;Ss~!#E#V19Tv!(wnII_!S^9?H#1A#0H>6mi*hZ3KJ!Lx;|7DutQr3-4IG}#z= z=NAJHCl_1!a(7)k^g3I?@UW?JT?5dz5&0&0?0Zal3z5ndS}qKJqXgNXP7C8gVRs*$RMZ@+oPio&?|FF#6?HER{7MVkGMci-2(>^C&(xMg`Vgrr=6 zpmA`~IBBAP03@+kL3?psozEL%zYNA#s!^peF2*)x&VyPqgwF(l7`D}wYHGmU? zXoo1&#tTTaoi3?i7V3BVBjk^zqaTdzhE16+QwKzkaFMFFl7JMt8O#o=HNp@KL9d2}R5-5-Cjwl;k}JQ6A66YJzd&|>#?+d=@S|0j(XIW;fFC>=czV)d z>ig%J@*C8|SE{h+l?T3UcCm%IGJr?8sR{989XQ>v<|Y>Hjd;Pt4D__-QD&2{I{BE- zY!EBk7i&F`CTbz)Vr&6VaKuaEWKiv3hOJcVFD_eei^{c4zK;1hz351-`na(D>Z+v- zW3E+y+kzHW6W8X(a~#!OBYNNE^a%$E3jt=;#&L(QHQ%tb^Y`WlH{=Un|HhCPF3^UV zzkSKh677Yr$|k#vNs%FLu2N1t5VjwM!a+bV3i!sv+yK6c%~s2h`CK3_Y{a=?Ae941 z0fIRMMF!%QgCtXV5nG62WDJhMn3l_sQUhs!2SX&AVq8^zrZ9m|OkfHJC|U@Dftesd z2VL(^C0)QR0vkXBk^sIRkQb7xDK%Z;XomycdUm7JqU|D;>Tcbs3pud@{<%Aa-K z80tQu?u)*0^kO$jhGe`l&@QJrIqImmFJMd{rRL@`;TVLrq-o^@11t2&9>SZxFl2y# z{25=8grjWRE~Dax3&9;;I;bwI5DeQGz+<69VE8i&gc11zDv_YNwHgoNwdRo389A70 zL!D@lX&$b%6>dN!N4_77^lZz|%69c7XwG8FFd*_s{_gz6%tbq&21jq)9W}KOz z(EmIBTz$4T zC1Aj+>+D%jC92iDQ+8ReA|I4pT9*CfUgi5O4ah47y8J+DwqbR=LNfl&6YAg2W-1Re z#mPdhRPPkYI5JxLlfPd$8=qYvG%t)YB$Ev*NQ613dR=8iM5vw>I+EiR_PGs#_H>$x zDMCqn`)CJ#M(V`IJVA!65-ewbs4wA6n;FMY*oY^oLCPH$)(P>#LV^+C1i{K*ZHFQt zKD9t_j071#Sm+qI3;n6yuNcxH;sz8LQW6rOIPB^ZaXRrN*FkW{K-B2g=8K~ngp1v#kHD2ig=!s9@7>_;nq3={*W^92;U%SwKl zHAxVVy`ILS4HT8rrh8pR)Dg^r`nZe(IoR68nKp7f;!Y3vOrse=AcTpQ(>};Z{XPRh z5vUPJ9gLGx&`T1O7AbgUaP3UeL*mJq0YVw*?4Qzt`QdD`G7kF#opnK6yF70 zK(7G1k#v#fLH_T5{`ac+I^Orfv(5+qd=4)gUa4f*&^uoJwp%hRg<<+DVIau}5(ya| z#`f+y_J9-KZ28HZ_77O{?lZ=nWuQfLWgydU{fkkQBRA)vE-L5W{V2V~ybuOI9*3wf zx?1YnceVrKLxtcpE&ES_L6o^Q@J%;Qv~Jo4S4u3Hz^8zJovOD6>Kw)qH;^xafw$h zZp(3jT&(F>3Jg-<2*w+b@!F6P0l_Ly9FA+whjz$(qb?l;Wk`5QxL}5nQV(Ptf*M#x2T*IqD`UhX8$-M@ z{b=k;fq?g8RkI^mT68L?d~kbvajm*;3i9AssL2>Jha@ZO6r5*j953 z{p8s1nxEr$9{P@VRRdqu&vZ$@uTl2zYU6I*zHzi&EvH;Air zA~u^dMOHn|-4rUqRSqBoM?}>!yjv@oM7mBdmQ0pP4_M@-j%8t>mK+cGRvL&HZ#=EJ zAlL?fqG~x8`I-)r8(3}*h!+&4(SoLiql*Y3@Ww|Vrd|jiNyXRqy`GFL5P}j^1*S|4 z*c5y2!xAJlaLNM2yA-+nvAL?3mVF6>7)wBVRWCV+|6MO-EdTNQEV;aFu5h!<{iB!v zCYx{m*y%#A8tL*~cYkXRZ_Q$5pl{BQe8-r7vh}MT7c4hqp=!pQwj3dz*gh0GDi%2V ztQ2O%{b{^*#hEi1J^mGM2W;o8o4tdDk2WY>1&OOm*;|Up#yqlEhQ1u5LI$Ef5h^z? z8HgyyoEQr$t+6ic`&@d!WrHwDfm4b^*z0$(LonhoL4HDxLCiL1o9~-wy*kq`y%o)W zUmQTdw_g*G&QrUjbX9banRHGjvEsQlqXU97$}lAl;t`RuKujl7f&lyCh! zR~=)+pC5^gDdLfVYx!8}GenGka^{BdCnYl&-soGtlS*<$4h zrvf1?Qb*Q&St3@%3muAZA)~<28HJ4REgc+?$}m@gb50!V3GCszW-gT8gN<*K6V{5;OeH>HkN6-2Dru@BDt42MJ*yPITbPXJZTHmYA6c zVuHB1n&0atnF}&TN(;k(U`}E0NZ^d3n&~>{CJ2gv$1aj8Co&UMj?#3S_|bVYn4$j~ zvx3CnQ~Zwib*Cl&#JjW11)cA7b|X^gSi~U#p>+0df55fEW4%)T(EJ~G+_FDh|EuXQJX?){)Drenkty|X1^?V+ADW-2HMVjz9i_oJlnQ->{og|H0-6mg0+l45)s zi61^3j!p0dBBImE-*`kZ5uHu&Pn@U6r=k9qj106KtNi(g2LtK&zM5*j3-qP(ydgp5 z`G&su=1}ohQg`wDH}CKI4RqDjigm|ZfArt~mUp^(Uvzl!!(|mV7yv&!!fs;S0E!Kt z|KeXAx5&bDY#@k#n3eOuk0RxtZ~ycACs868lGqrquSzmQzos7`w6{&^U%7^$)j2rN z0>Y!WEs2?)CMig{B7BU2N4X#Q$VaiM?1iMpBfV7oC_<7N;B!Q}JoDpYCdlQyCwWG% zXQDJ0#03I5?s5i*f99D4|KyM~#s)Cd-$l9(34^rv0qZe;?td+NeNg@r6Y#l@;l<0k z^@47_I$mao|F&Ll{jvS&%>jGi@RerCOSk;yW-pxnwNA`Lb;phGkkP9x=+%~VrTqIx z^L=yTKWoB5KlK6WS)3{<`lv5pb)tzsOQvg6JcPPZ6s1x9@4=pZfR zY-FF)1Accj29J6uTQ?<=6 zK8?B^0Rjd}FL0&P2gJcqI(R@fn0NgVgZ|O?UVWn0w#roMsk&v5)&S+pP0uuLDSnhRyNc^;f5S6#DmFb_?O?2Se@xCe; z>J^X3$#_2*ol&?d?u*j!LR_lvt%_DXeB)QBzIQ}=E{xUpRyjE6eBxDizO|z+pMBRj zCQ^AuegB&OYwh#a^_0(`uBH-6`+mM}-SS&Aa=N$?%l!lqjI`CtC5}PYTVcvr)jAb} z$W*$2os-_~_K-&gS3EJ+8Mnr0V0cbld#hA5MjiRO58*FPL$F{AqO>-lau;zzVxz?% zi4ixx%&=PMqSHZqB2-_2U;@H^T~8%&vrITKbR=WmCL0rGq3VNsr&$l08UB*$ERhn{ z0M27zxQk%6!@wG+i5lZi?eoGy{-f)TesunSKX%*v^oRu0UOeGj?@hk#`0CrXZyO4d zz4h~#U9;o@6;LdsW9SbaiWvc3e!Amq|Fy!!HuT;^seG7QGRMt)pDVa_588` z?QcK*gI(*N-uAn_VFOB7;0hqsh+x(>||ISIs!|}zD^d}a6 zeEjntxtraWbRZj>Gd~LG1G-*o+#_j!w9`5FEs?0+W?%@#ld7kY#rc<@B{(pcrO3a7k+C3XzI@dx{i+=f#2ynb-eL^gHrLP zzN!}*;euLEboaKl?0ED`=YVP52fL|+TPe3+>^i!H)PR*oTJY}YJ8 zJVsm^1aK!MH%(ozu13K;*{3yUR zu6VRmbU^h`(3Ow8j#EW{Ep4qkMr^#Cel52oQ_q2A;(*lDI1YZ-@BMxs z^u4MB75Tlh5LEpxqqjyo#{cBL|E95gWBbeAW77+HPMf%^*XaM^T3s*sjWxocha1BX zvPNkhf9WeGUmJ?w@QARSXr5&HX-39~^98ACpsSvfpbBS>YDNiviCF1hJn|1NhHtGw>89ql zeZKhzv~S+_M}yI!jecpT|Ef7QGDx+VjIh8^^n}G!T$i`|8N!SEMI+g_6O;XsL+nC2D!MEI8 z!^VV8*S(s5h-gf|-H?b28ia!2*w-(d({mbE{cCYJ3WbiDIBTwcOFzd90Mf{tN+2NW%4t> z*Wp8>*6@$^y%f=(^?Suhh8m{y@W{)Pk*(^uzt`>+Q|fPI?5A(p<=yrl^E&_J^lf~B zLxQ&j7&MRYwSfm%ydUcBSVgRjKVdElj>YJ-@89Anh!?DSnT8et?2-w6u6s~=MSURQNZ zr-Rb2*Z)v&@LOMh)r^=35)c_7;8xJp-)dHWYB*N)_pX~eLz?s867j~PAhyL0#H0t&2+AmD;e*8z0zpmQ~LT@ z%@$vKv;Op<+Hq=@7uBV3>gcoacf>D$G|LUPzRkHqQf{C^X{3=3Twx2LogOJD?th(n z^L8FstL@&BX3vG{0oUgbD7az7YL9&lAptV};SSPduO)0GJ& z0v=JqqZP=+wn@ZL<0DgjI31BxG zyM6&`e!=)ttB<N=^Dmy)agBWvt;QJIxJE#z&P#@Y zFok7CVc2>~bt|3OZZL>q!F|4p?!1{jbV#@tqIXpz^sXBaA$Q`q7Q*2k$k%koS9F&6S+!D&8euybA?C z&;K*G@jZvt{rvZg=l3)I_y*Cp)**^Y%54#s9#04eey6E8S&El7#f7K*Lf~)|5iBv% zSisZG+Dvy5ln6D^P8YL()J59b@nfYmC5vHnFtu4vmu#)H&)_wOdg=EC-hwj(U4sae zj#c!n*M5V|kN(!>#H4xkisK8-6j%9<|2Nl`?AzDhdL8>U$NB14qx5?gulw0|&uZxD zr{ikP9AlDxF3?QX+uQ%Pz6B>fL8?dh|K0CnN@8Ssd6$b`daRp)vPk=d1Fu<@&&T0Zu-E$T{6iv2i5eF2G%+mM-lwYMU+P&Uz3 z%>V@WayEi1;4h9gD({BrOy%8;0m9`fE0)>o2sw=Mg&olmGZoK(B9(v;+}b zM#z4+lfh%_XD`L~ypHmzN@N%+UzbsUGLkHH`I^Jw5m~l>V?^YHFQGc1Og!C&Y^}Pm z5RV>3a9M6j!x2 zn&Bf!EEhw@Qq@2$ELD%-x>`CJ&uk$2n-^i&3~P5Z8&N5-i$%uLPJe@b8)FMB1N0I? zrN2e-oI-(rJKf+XzLH@#d-=^x-+kR~kA(a^^Zmj}rFXtDQLTgWnd>`mboT!p2T zgH;Z-x!7^OTHv^wod7|)JYr&+=^#T^90o^B7H-D2mS7x(%NX2nm{>*bQ9J*+{VHNL653;27I6r_%H zhBbtI`(dH+7si*f!{bk*KHEr5UdxW(KNKzr;S9m(_7mk@pI^;W{=~>zKglDF)a<|G zQq5I=YxHM^KKX^Y|CX2kp0S5LiOev*)0*o`L2bK{!q)@=E<7wG1uj(|cFm(=L4-$e zm@?ac_i_-`lK?}yA4is$8K?%+;LIYaXV_fYwm%-|twfu&jBS6Gp;KZSX}LE*B8!vb ze_)i{_hypl7+1tt6#>z2L#I~#epOmZ2e$U8vQ$e`Fl@)&)e3K_LI+EJoHb8X>syQ z-uRNDSVRo_(zN9G$2PcL1s*Q@Sk;qjf)JOs1@TE2kwqX@SnnEShFLmWTe!3!)nbb0 zPlhGM!kvN79lZwA(t#sE2|;D+H_-|6nfiPVK4p6PkI$9Aef70|8>2?$L8ySsZa;>9 z$~)bDjPJO67N0yVcZW2>EL6MpYn=$Yv~oD8{Y%zYdvS1d zVzpdbC^0dL$4Hu(z|12cbyoA3fYR-MO3&6{hqcNi&^H>0hQ6%y7q1y9-izTL!&d1e z1N!l*6ZlAJ!#{bv{^@PKdIUOq&y~f)zqu~^qfs|MGcB+DfLQpyI3lEc^rz?0*<9U+ zJ|oQpS?(==K+GUAF)jjz;t2ah$6VxUI9bi~>X=T657g7f#vF6NeqSJ` z<7a9flf^H+l>Fjb$$#Zrb{-9Xh_ZVYXEcj{W`epobe{zJC2gyF%|vd(f?_WzxGW1{Ea_F%{r_99%)mm_DPJZ^e-p ztJJ8{qj9@CX~KYi3uflF>(oU^c)D8+2P=-40jqRnWkz&B)J)HIy^J)Ipd!X~(z91a zr+4&w*Zxx2zWTntcfh=$a?P~p;hNh>RKBTyS2mF|S8341haDENfoj^} z84i41ha%@b!v1-cBjg-dLk?~bIYvD@8ezHA+gH7akwj-sfAteOz1p5OD{qm_7P??d zE1)Mc{n1G7x=YC>m9fM>5+AHSE8ZcI?^v3^ujP%nUs)X6(xBr^*~fe#%_+(UeuC;U`s-@ zBuMG*)oYJjGZO(3#Un-Km!A=zq`YtCnkumAn#?wadC((FJuE5*@qDiRWi#?U{~ICg ztoFo3u;J$b+g=#ZzEyK$4h8wamlH>|I_yB{yYLr(zWfub^S?2ooB#MT1N$8Ubp z#Y>(>Lh&~6)gI|GL_7(QAL*zj#ga#^O`-)3>D+B)WvAj~3T>?m=@}{CT0wMyG@)i1 zIhh@EB%}a2ao%}vX`f35ezihP@!{fRWFe88imO3vU|C(x+gNv{>N{PPPLwE}h$}wT zCQ-M4&*i;ux$FOC$rIGfe*QiO;SaC+uA~1v+xOnjKRt-QYs6czTZHk+^|j36VvQyF zWa;>^Z-f;hMjc;UStz0=kGG*u2o|%3*V@p^kIQ2dyKcs}mf&OECu=8FGg)fC19QD5 zzPp7$Ixac!XLr+&_68Sf7W9dEscIryM=W1|WV8D!EJGvXjg^+Su77^&PatMkbpF3< z4gPDrL-lNbFru5c&wsSk@A;6wIZoGmc+I7F|KLF{Jz)=vqRMD9{z6bPl*cVesbOT} zjg?Y@ImH|3_9I}VvlcR{axm4i_-rEj!?#huo(n&*STAx5SUfA~r&QOyzR z@C0@JqEVMezxkb-XZgoj6yH4XXSVExJLcat+|L;o?={#zeZ0j~Y6N7Fnvj@(9wxa1 z#VXQ^+`R)rEp&kDv2@r0vP*|UmPI&FQJ6`0=|Ups_MXcURqCqi53pk~CAtnhf;lOtS6HRE04U_exrZK-#_Y-8Zf)F3tbc z)Z(w$|J&Bx?@RvhaJTZ0N1Lp=pT*W3aSIM1TA;Vc&hqbibjK@|3 zYlPGWtT@#L(?IanZs?7Fz|VaP)AS)~fjaFYHUxXvzBo~(HK&zhWQ;%~nM z>SzH@LDasJ(iUfBkEG?5)iO2%X@WZBT+BYvY~~R;D@-Jx-}mT#+3=>anWiy4NQM>? zTUY5?qC6Y&wA6j}_Vd{gic0x;r%x}_G5W{6@T-Q9|Ky(WKXS@{>&&D#*Hn8Pzdysj z_uKr&il3fE8xK0)_EWaz%woLKfolg>$R9mRm&r4b(q=`xWFkUdMo$LR4){@H1gJo0 zd*(V+TYqf30gJ7A#>_M}or3ym&VxeI=c!maG5yc@&jee6#HtwIsv1sHCO#Th*^q*{SMdq@6wE&Yqlf=llH3%;vxL zTD<2~-hB#-y=?K@Pk(Eu*WYyuYW{&OsyF!i3=ie+Uj@+-V@4(_6H1DGH47w`RxKkM zOp2(9sgAt9k;fGVLJ%T51PRfLN}6>QVm~hIRBaFxdtHuyJp|(zapszM{IxacS)F^a zX@&W7A1RQO>TH%`hd~^%;0!l(N1MTtW2Q84HHEnbq2`C8rK?GfIW>%05b&8}sD7LJ zOw0;~CyQmnEd2kF_O8vU@?5*%U+LPPde`hO@if(US6B5&2oMfxKt%k~rKkZ>QBebS z^?&ag=sc`{`@Wy&-G^DLYKe&G40rtEJeE z-oD`peA_^O?!EpsSC-4)TvE(@@*FX;I86lz z)MZ(>25RiVwTDo|5)20PntXb7xS|sDKjxGDx7N;Y-7oUuApFD=e$5TmSZ_2QzUJxf z7lzS)n7uR~{@|)*UIx)>Tqx$w8R=X}^3*-{&<|~6VI^Jgt7WLcU`kb+`$iHTd_K{_ zCHstZ4LnADESS|ApLrJ(CUjyhixPE2E0_3U|n*k1`nj5E7==IV12YOxO1 zb#C;w*6fDWX@~7nIg4~ZbAalq6o}KM18|ss@R0ZGFvQ+LX`T_Ehx;0e4s?$<`XurP zcgTa6E*1Iaa(&zA@XhCVzxRLG-&)^$peJv`^9xJS_>$gp49>k=1nEK7eevSHigL*@ zCITMWNp90Bo>Em%2siw-*A-LNR(YHj2VE)CsibhLV(1irIEsrYVx!>|zi^i?oF?FZ zK94a__8mm7gGe~EI^!1y`ER&G3M-E|M}ujID^LxFCj^Bufgb9A0dzcCH2p2c@St<+ zZsKXJZJF{Ky8$(PcpU&nK)JsoemHyKhjD!KKXniqyve)-&RZ~U59 zvHhDl#7wEna^f137AIsLYSQRm8Ft*g|LYaj}Q%pk|! zCWuAchse(+o@6zFA_K(rARm9%J6dWY3*?_D?})x0LwKemLk1#{gab~ny_Wu6R4TObfSh5q(&)t$WhU+ ze@a*{UYK)3pEbx3weTDeWq{Z^-SB4wVw0e+ObEmZ(?-u}1&k>7h+IlN7)~0^lZMOb z>!?KtGIYoq@n0VH2|HrF=!ZYX>E+Gqw^d`ObjO7M+Uqv!U}`j2m^Z%pHP8QN+==J6 zMutj|@Bg3kUq2@{1nLH=M86M*O8#h0e+aWld;5gGxkA5qdn6Oc5&zHf8K{kt|5_9D zUm6pCT`&H^@fL603qe{N^)Gp+O?-;sn3|Y~V{3F8pT+SVpL%N@yw=QS&VFf$Eq?6j zAN=4K-t9HtaGdMAuH-*!arHef>7HPop4$#Ht1N7ziLpIgEc4)M`Q7yki~86ne-(v! zNkQ#D)bz83b0q}rojqXs%EbN`noxLS@N+_ZIit!VBrOfsFEdklB#Tj`D6}@+k`Psv zj^<&k($o<>fbAedmqMKSX@`*2hdhJI*$z}`HTGO1I?Cxoy>OGB`ini6hVy^e zS3F=Jc7AE_Gxu^;aQkvoniBeklN;i)kz?TJpA+%Te>IU%Y^7@*qIk{q6-c6RK9d0 z9@Z%-lQa(Ax)H5HF{LIpxbQVL?QssmQW|ELF9e6Q40{YPGNmdGj4sQ{` zwl@w9X60|ZYe687(O+j;dVVCLTv?ggPXyYP;>BzEr@3P(2&ufbf1CBTmMTox34KS) zPeU<6nb)Eu&N%V}a4Q6eckjUl?V5IxH9CcjCefFV@;iRja3jzU|5tz852pNU40$h2 z$1i`(we{$+oo_{lJBsB>Y+d>W}Dzu3dE1kqM%Wf-OsLA)3q{HnXRp(!0 zlIpTR{>q-7#Tb%!PgRm2zMsyDjk%9E?#vrkL)rLDLG^I&gxIP#XEdQznK)M7TaIg3 zcu=4pU`$bOQkIc8^q*8Q%@e3W^RLEh*>)PNm$%PSyP5wMe;97=#yJ1w&HR_b%MXaS z?jQfxFKlD{drxb={bj%3&6gJCS6sarZ&S&CV#dvQ`;9$++iQ*O|Bi?HVo{`_X3Nqs zvzU`QZ$ApU2sLk|S2FSSA~wmeuPa#)A$&u{x`V!a;KtH`B@rPr@}#h`T!%TKyB@Q3 z^iM~=FdNNVf6j==eFnJ!3p{iu*_!+LgjdNz*Oo9lgdbizX`{-B9g3luYLtj^2#RBP z+gN1Utf#SC)Qu_(%*dU*%!8m|EV4P(vSM_H+=aMfABq|e`-25{+ld0*^k?96L^#<)+D8PmR^0BM|%M zP$P{5Gx$7=M^+UwI`ihdf|ssaZA5__hC%J=x>92zy%h&G8LOD{niE;YcT4wrqPop| zCD}nsf0})OH64DyNuv(|CkzV53i;3dXu4-h0PltGc(;Y|$KEJ8+OL?${)@N8-gpjA zVcf96-M7E@rMO4>gCF0twPx3EU9>OXqkP9(8~)sTO_A0(oDU7AD@XS`%MIbtbnm&Q zP?8B|QK>>uF+Hhpxyo5??!%LZypk&1TT+?6e;F@9;}mk<4Qe$rlr{yCQ<4=3#@ZVd z+k!0jOvZDN$ghnJ1Go>@w&7P&%yeNIRE?6jH->b`%^4Z_fSQn~du?ks2pRYogvnufofBmrZeK852Dqy;hwBm?E0K4_V@wWxkXjfA= z{22S}H;%tqul{1sMBm!Ojo$3*IbUt`C-2yrI8`(iN9@E~^Al}ARj1npXPP~K~xB0XyfAzn;+ zKAW`$%1Tj})^Bv~vvv^15kWk}k50i)jHv#jjTX(m?yUj;9k+e*`Y)V^Cie2?Y=2?s zPyEL>eX^yQQs|AR{Bv&tQ&faDf5;-86=P%U=Y*%;gpp)t_0ax9?V<274e}cmy&I{$ zwikqq=3?a1p{7H&bZH(E)MZruP#2|R7#l@IMLj4SmgYVU;(ZVZ!Kyf;YUp^f@DZ2D zD!yT$EvR6DRZEvt$pk9y>lR8PpudolFFoI}Uc_@fBazX^T+_@kc{sd-cj9~GhgaDVcb5+k@qwb4qgdq-()Je zi!^qLNE`;tqRQnj9&pIu9XED`9XI%#21np+&@TNRr~7Z*jrI5VT7(2r zu^eaR-T?0d((oHkp+o*x-VtJIez0v)%p*S$#&$oZv`h?{6Ah%pQuU3S_5k!34~JjS zpKAi@smAEft(`!uscQVWi{J4fZ~u3@`;9^R!Z6RhXH5RkyqGn<7PCFK*G|3BXKjTT zz>%ACO0}{Z%!lHce`;&KFJ`De$r2`84_I3P?5s@1q(f5hHJ-HF#m>Chp)85L-wX`j zKi;4HkO8NYEU71TTug-;!D;YuphefJ+PlavD2Po5?OPk;H8Drr%n9vV`UfuW*Sq~Y zF7nSkzf85B*#S{4r_L*G1XHl^T9nf%Sa(A*lEpe()z2mxke&R_!k=`fELXV}Otk z(dbtHV2!`xQh&y=zsAB#kMz`Nk*H;61aLFbnA?6Hap+BG#VjMJbh}n3Gu0k8k?5P+ z7+K+&6=My@e}5P^*fNWFj8epZs{O=gKH~hzo^5=Y6GyNYj&QwP%qx+YemtJ&0)52} z5n49(MP!kD=Mil|i8tXQ4|T-WMsn=Qd`Hu<@xo(E+iH7D@=O$|J;p2UG`{DRAl@t9 zd~UTox^M8n4ewckdY`lM?$1De^r(65onDxtWJ2JVe|}CTp5wQ+M}rH#FprVVz)*rv zC0sxa(`We9NnsFQw$1(c;3Ae(X;ULZay{FF;2#q}ZUe^16g|p(N+>s4E6*~d$6x+c zk5g52w$DOa(cx8B!m`plHstIG#gXU~btGbZt_~l2`vA+6IJgy0n-RXP~2vG?CxV5v~-~d<=T*n8mR~^Qbcqy?~qE&{5qUrY|xb6!%V|z>&{~<=;cHr z&$q$pZq}eti-hnV=O1tc9Onwhk4wCl{}{1xfAx2K5ci?=uXWP;)3@+zZ~L}GoUBKn z7IPvR%{86+OmJv(y%MD>N9$l-rUa6Olg3-9s_^hg4BeL&6+`{HmJIb2aDHoeQi3sE zMcPfo2fv*@nNWL+#I|m3!b7GJ0x{Np(RwE2Q@R$I=ue?K>aFiio9DVC4ydXU3o%JCG7!fXUV4xc@)eFo5|8*by}KG zT6#7*;@Le!#*SWvoZE>&{F*QFYzo7I&GQyj`DvWVBC2zr_%Q`FL|+)k^MX)Ufy&r- zAYoAVA`UU5KC{iXSJ5t+`#FisDrv(vWoHOu({1cm=R&bbHA6)EfJ7n86>mz zn53{Ypeu0G|YYJ`F_qG*uZH=)*XSXRx{4#zIDB z6|;cF6lTmFgH+QX?oC;Y*brjrGIkE~Oww>`kc4a8=J-7XOR79OYv9Rys^0Gn;^4a zxTd$>+SmI9VY68vZGUb@g4tq%cA#Js^(YJ-7S@rOwq#e^uMEfMKGey?%=!+m#EeNZ zb`*2wY%|LOaWI7-a9Adkf8yLLNt)V)UKP7ZrFwL!Q6hUQQIBVS*67iut$spk-1qCK z2kqV2>wSOO49EAGU7feC)9>?%PJe1_ea|Iddyubr1mHJ%q#CsZ{4hb_-(22zT>lAUO2Usf%RXlJX=477xcl`ofLwNmLXR*mAhhyOv?>!4D%)@!3q14G1G4x)*sfKrT&$nAT94 z1B_W36A$HL6V$Z_^$gH?+R(wlzcQY$|5 zgV3XakYgBLDA+qn)m>Bx)kTi}CljLKb^=;FN%KcQ&$a7^ZSm5r6L#+lqsC@dHf;En zl(5Wx7ADByf69}?Voh_w^5I={z#*V{RBDLNeCdGJl0FbeiF%ri4nguU&KjK1V%&_m zKl}tgT20^Qge2cKkCDFd-!IHr^VtyJ+6&|_+MzGZ$iZ>=xyc(#g| z?F-vTwKaPFUh#|k669=W5P0a4%W&_UbivqMD>vrB(OIL{Y?y&$EAAH&1v9s^@^00= zfd>g0KjYPlr>VEb%DJIGAVhzFCX=^6;LW@LrQ`Rd|1XpM-#7-k_r3q}esoH$H@EYv zf8kpFe)uOLeIw%gG2Br+#5Ao_WXc`CL~+JkQM4 z)1>ERKI#Eb@Ozqd!k7>fR7#6lw~zEWa+wlGe;qukL%DHq?%Z!mri~)A8am&&(wm|; zNirO-b1J{=?bjakAMxOSaN8D5j49<`I>5~tBxsNR=(c|2&;Pa;Z?wndZ)|V;#)&Mu zzWOIl+b}a!??%9;ChBV~dx-a%nh@$I$!sUsj6KRc>L)Q#@!9Z`8O!38VHCqaA9ACT ze~3W{K_zoTA>Wl6c@_KfEX6f1Gisg|?=}pm?7X#V}S(&<_i#FWl_lc;Rh{V-ou!q~4dN5$c2@_FOk;D4o4wP{|i& z6UQH>wm)3kxDSV$;XGqc*N?&=f6O*$Aq~asfEF&uo(EUYNg{TiAnvR2p(0)vbsG`0 zg?g^3one5##qE&{FIHW;WR~ZKK5F8AUTfHaYlu1weP9yiP26wpO!Z<1qKW%`e_;yVXUclN zHLr31@a4yOzk_7|tIv0)ihtl)pUr&phdtnFUpDx6j6qowsKSsy((unMao9~mL1^3G z9YMsPa@OFQUmSqQP{)=fxnZu)AB6N6MZd5(uSne&CY1uJd^^c|I;7{+$$CrunIpVd zYc$JXIGt3tiqxf=tDzCIf8~@K+SvBJkT!ZGg2Umw)O(3-LT_rAeve2)V_CU0q5IL@ zeeOVt!rGHqukwrO_e|s!3T+cRX+AZOUr8|TJSAx7jvHP;))up*qypm&>~+@g?=v+1 zqUZRE-QXNQq`$|*H`X%qjGr@ens#k-@t3o7zR#b;F)=SWeON!88OnKaT|Z|!pGRi5qyY;V(%qf5^)lNh3Cn=^)bd^D;;(+oE5qQklFdoG^!Mj@Oec;>T|nH4ObV||z!S$G>) zi3iFyg;Wzxxo^zlf2R(iPW(8)c_?TNKi>Pm0T`(BHMgc&FL>)5{fzzUSKRN1k9cf! z_+Ik!%^d1|+NpPQj)$kqNX{(QNfe%0& z{OW0a;ZPdCWMdOFx_RIB)UUBTetu!&!|!uY|AGn146(XzQ2!}o9j2tAgV3FdAH8<_ zlZmjlPiH(2e|qXE$q9*-r_NIg4#`6DnJE`Q<0=Ia?{o;61EFDvc;e@1g!KclXG6qh zQRv0A(Qa&W15lnY2~0eDEX{Er9iu+Q`faxZ`QdXLBK{};>tAAZ$-iEYFzs6YuWH4R zN=JjRpeZ~;Yjj=qjIr){Jgaic&3J58 z3DZS!HI$v9pK;@Z2er52uQLW6GwAF3OUK2oWZs+6-kABrr{yPWON29_Vv@>yiv1FR z+)=at&;vgz42dU>~=b9)7cZoS;vm*L5%H+zR@#iHI@ zj-Q{C;P~n6hL?4HJvbe99R$_F72z$TJ;yx_?`=A~xLs^KXpVJ8ZCV;_e0x5ePNyO0 ze`;F5KmAQx=TB&6=S+GP=!t$ooyNKVM>!SHQr{@(!6*htSuG$vx>0a}VF`|MHJ~Ni zVEzH7j}6d6sv&Qo6T(=G3!o*v324Dde*;5$67(TGOZXrxgsGT3fyT&lD3JRgl-uP6s?MS%5UV=c31^_()++Z1k3D%<35_Eyra|Nan9EhZ~fEe-|Y-KN7A33$~ z+{~1L348{6PEHEuaLMty3qjGaFjj4-#ZGjS9>}>KK}#xNgWTy(mMiGe|jL` z16(i#FuXn^>yO96(mbD*4D02*8|SKP+*4c7l&!dSQy^yG~s#86^< z+tHf#0C>W1`1(C7Ne|yoT!0B?f49{VO@D;jqU8Wt5o_K0+zl1+yw~@C{WE)j+T?Im zcoYjjQP2l|&lSz2g9nz99zX?x|K*3+b@c}>0E1uUhUU^(;;^84HddZ&Jtaiaox@m4 zJi=`uF`~&sJooc81F^5dUh@)GLcPY7(JZD-z6&(ud2sOmi6ARH&#!<wCwdTZ&m%4 z74Ifhuk(Jc|A{}UfGSx6f0;*YzT}j+cYsM4-W55FSCI>vIe;RH$dU8&JaU2Bk=dus z#&Nvo9lqv)slYfYVBX$$(eF+4nDJW)tSl*!%dDdyQ+pnGsk;WWIp zxwjWrJ_CP@b=a3cL}bpFWg3&_cUeafS}UmU$_U3Xui2ijArhMRo!6{MjuDg41!f;E zwF7J{wE5m&1D=tXmf#(txxJ1k+jIZ!M3mx5Trpo0i2PEPdMYAns)=z}a+eW392>DV zPnRwfp&`>!yvqxyfAX5&fogu|5fM>r=Zg~uRM7*8V^YeJlBPU*gf(}e)0CkvrP(3N zXr8Z2;D-__JstprhfD#xH^1d;@I-ZUOVV$CZ{@f2P5|Noc%n1u5kazOe#qnbQ9*E9 zM!`gZLE`27u;gZ-3~b9oek&m?ISXY8;t)qdmax%7BU7Xvf724=UZiEpv5W|ACB)^s zO?9Uxc_}uT$f%+e^Dd2u+$#`m-H#^C|0USck0y!)Mao^^qNWb#7X`t+@*q5vAdcZY zl_9yM;o%VTcAiE7Rl*p@C9tbmn@Sk@_q%h~V7V^XqQ2MIMMZ~DcMTlB;~L*{j7VrU z_M2>Z1riT{f5a|WPS#O0Q1bPheLWsX-e<(e}@eg}(^~O4Y?$1fg{c_Le z`Gp4^rPp{nAmZan zLFCCh8uYzlAFsg|9gx`(WJ&PEMDJD-UIuMHE`^`_MxK|@KJc!*mBcH9wh;&I@mJcK z&-1(_f8%$$C`$o>XwT!)=YhcY8aj#vw`B=L$1x(VYzo<&mOfBBbQJh8_6OableomA zTRIfmk7h+7vuvD}1TGxMM8tlseeAzPkKK0}+5GkW{HSzoY~l0!9jf5?_rAkjm^9-K zNQV6o-;FTIvgwB%T()7xwLn2QQJ{a;sLlEAf4_4VFx@uhMy6Ocrkq7e&u~m>dmKX| zBl1?s7zFr0JU}`ASG$V6!1C8Dr1Y+`x?J<-oy6E)bEobo1K+P7UfngsuFI$ql(&-P zWhuNS-OTEWkS2ZYy3BZ`tFjbvlWqldMXDx!>$;4;(uZX!@+RG>)&A%GPZT*4cW%f% zf2W@c=GvME29Dd(rJgJa`5Ko|%x+;sJ&778iH~zkek-X~@|r4U+D)65bSbgF@ER=` zqc&@nMnOk$ibl%tS_+{4+>cAb_aY-k2|a9&F&_Yp8J97Rfpr+0h(^Hms5r~g7u*?8 z*Qv^^DJ!Tepn%Vpxh^A|mba36WhuDCf9JDlT|wQXA6%EQS30(D_)4$qieycC;kwLv zrDOZbCf&*EiUv*k-KA(58`53F!=*%)B$b@u+GI#EXx3f@5mjzh;99C&7aq<49yot_ z&HR+=z6tG;164K3z0)>-&Fie$A!Oem{2`@AH-4kN(AT41hR;fAgHLbTF=O zW08pu{>-W|u(^}5TdBt-*Om0^fB*bAcf>P!EA^yw(`4wbp@;7X+xg$F6E1-0m~WopOc(wwk(iSkGOHU5rY?>%0F$2I=$C#IsBcjYwtKvFVegfGS0wlD zrH<<)v77M{5bI^PqMgm=2Ep|B;;xDFpQO8ZZ}7#e)Dyh+e_R*(IM>bsoHv&S+7q0u z(2nl?)UQ{TQ0&E(jIlq?>beFXCVmbnel}j8gSrOs{yxLc@9$svE&M!x<$ryC<$ryC z<$ryy>u2NctIJzy$g)(;xjOj#OV{-X0MRjDl+{n(sf0b|N;$DTJ4M$}2Icz?suFJ{$`>gr=`hNe)FY$B!%HMoGyz)1nkFWgA z=TlP;APnoVqWD}4d~8!Q$9mxUjQK9jbBTAp*YlHCKJ|M3u*r9OJ-^Z9Tfd(Foe%9! z7&`UOypf=NrmJhon!Z!vyPwj#5@@Gd{53}LHv>YOfBa|<&%X0(X!HH#e~)RwzVmM~ zEML3+nmfA9J%YOuSSPFL*CS^@2sGN76>0QPdX1C*CL>Nx>ne`O#@hI1^VJaTgYMRMO|1iD}cfCiTV zf8u01@AQvXy3@h)U~kf5t=1m-cJrSb=8#ByR>mln?=*hfuWN$-)#4|eqTa#n=UT^e znV&!T8PI|lKj{V_UG68{{7d>6j)KUa>j~&W@1u;oIfMjAA1sa`wG)^UHuz?#L>}}R z-zc(L@Rt*e7w8;3^gHY5H6jem5E4bse_$rCmLS3KAa1ThSWB|U|6MGP<*#enLs$I& zzO&arZbr1wIN}l$QZ9TKnmm)uN5i56j+W8 zmcVjKFmtT8iS=4bq9N{ltw@c}wG<%n{w~%B{oMGRti<1i_2~l*re6T)}t#J%9VU!S}TVxHtU-LpOR4 z&!cz#hw3Xp`(57&yx!ZYY1gKn1nU_Kk~jTTkp4f_Yt^(P!*)#np1=3qPo@(4t-}7@ z#ddG{8^tC6)jqHH*SyEPX*cOxqV5z*F1&g zH1og%%Sja3zvnU4l#l)UH6F^}{lisciu20sSA4nE^Sz$V1jM9F5jP8cXr zO%mkQE}K`RAOR}C#&cL{g8lal6ywU+{u;LP<2@c~eX*gt0&??z^%Br~kU9C_qMf|XfQluA=I(mSeFpb3L@i=XaS;G0TlOs?^G4EIwj6^HUtPZ?Tx?%c#;~ zu{k8x_`EhpcVe9n886K(NDk2&po<6g>-A? z*1FR^$6XuiQLoIQnY`*(k;VAD?6gzp`sqe&+F$pBE-6-GyvI7@cO)9!meD39r`Nqb z;j5LISLFEm-V>Yl`j{(Ei>t5u`}?qID}TnGuk?fA?Y-{XAqcR;IPLKN`mg`nuDBjo zxBvaW+^+ZSf2}@m|I2^<|Nie?ak~AVyH&FNf8U>u*`fXqXEoa{{vU63OaEK*b$P1~ z+yBUJt3#H&zERsurnmp-Y1)4)ugQPhuZRDAv%0PR0~1@S*zG$1ZM`bDpS}NMqm4{Y zzA+sPv0KfUfNi7V+S>eQFTPM?Dm{u)?>_158E;n z$81SL|E+iPzH)@nsows%yJ7y_mbsC%+o~r@3H}`#D;+_PdWE zPl#FD+;OEQ*D77_4(G zEyv|m+z;1CW2sSZt6e*qZ_ak=NbQ`qDs8%V4)+<1&rXon+Et&au#Cz{87Fba+RAMU z*2#6^2`w{w{YPPGt)f*B{d9Upx829^GT7Al;Xdwk>{$#8CpJclXkca6@?78Q2b}D<}!l9q3MXNtu zwmv$e;lQyf`8k78a=z?m=3>J5$r_QIW%a~`{ z-nflZ6<<$b_X9poTBki<9PZI)otKna)^wUI>pHye_Zb_SStN!fFE)>p23f1!f9ij9 zF2+WFJTlL8k@Z~1tyX6=vyxrkTs-{kbJ+u0KiUKEjQ-SmC^02ZOKr&H^Yde}+Z&d@ z58P*IeVhksu-I;oLtnp>R^8k6h5GECKaCTfTD=)5$)x`=qROwP2Q?8|dem0Xw;prl ziVrJ1X4~tDZ`_kL8rUCtGMXoof6;C_TTHKAojso3c=^;@&(BV)Yjv(c11wPw0_3MVPExkt&b0T zZyt_Sa;KL^y1RTxtJm&Y(|&3#A6Mm!c0I>@^xcj(51&20+_kwBBP*ZIe~vIP2aA5+ z_!tDO6z0L`d{C7+PLj!ST@O~(;Q)hTMKn+%H;u{w%Wj9hX+|!avz5N{XX}Y^=^bYA zOnTR+e?O{?-7!9*o}Jl4)fwf%7>lWf*3~X6^cKbJFC0*NWUk5#YVl98PtT%bv3}>^09L-o~ zckx#p`q-D#y4dgcW_HU?l~9%gJyYqA%M?zrE|SZfZ(SijoX=p|t4uKTNwCjb;u3YS znHTMFo~}Me(~OyFf61Bu@wq+CdeS^slT(^q$<j!3DC$~=WcxdNU z+p$f5+y6Lha-jlEcO1==%YYv5lR@tq`81!oT04G3+iQ^OX_+`5!)TmIKhrPN8}yR% z;1QJ81&v&}4Fye~e($I(A?a`A7QQg(Ba9flv1fB5-vo1XQs_>7|7eG!dr zA7Zp?_slAZS066-yl~s@9zN|2@jCvKoR_rNxuw*CZAD5N?^$u;!BCs&=cuc?88zqE z^GRR!2+UO;U$@1fH2+F}ouaf9XU&ljO-E=_Ph_TpqpacJVRVwu@9PWs&q4C-@_JLMh*jQgog~mV4dz8g#KkmtBf4ve%)(MT19~{i~akW?!Cp#LX zy?d$CXENDOH);~^Y5yF4IID;IG1c2+6CQee>8zsJIr=#jE)YsrV$ImYfb+wc#t zy{t!1BRvU=wqBm4XcK!sTXl+AzU}Mx&%?lpvOzFQ*1Lo6NEQ3WVGd%ulgq=3e{|cT z{2Vdae{-wrvuo$kM0CT&O|PeQ6fNq-E!T!;-}vZspW5JlxI4ylGqtAEWp;yr4GiN# zDt{4t^nyA97M7d7JCng`ewlBAC^X0SB8n$fqHj(gYA`tthUfxf%=4&+B3LIv?(PX3>trc5%(8Q-3X4bX#u*BQ-Uj_XR(mHqQC<%$}og>x*M@ ze-hVS(p#F-^W)rx4RM3;H2$pRsef5+_x@_x-`o$jy%@Qz>dBtFtsv>z-99EVjh1d} z!`D#T!_QUlC`H&lPIcXiHb%bLpX*DTJ?eQqHS)d&o%^s{8M+&t$&eU%kF5r$)_D-O zs?{vYmdBB4b*i{in)9+fY_Dx`68pJfe>lA3wWsB1eVpBspg6>!18qL1`tcQ%+< z2T|=#yEL25I18lV78?eK!w7oaIT@Z!S?#msvE!%gbJ`L!hqptL?8pApmc+i&xb}E{ICDMj%X6FCf1kwB z&Eh0vbxsK_)gGu8t(Q|>{rGE%9ZiqYX!^5iWwQL%6*^N!;74S8hl1Hubuop zTG^yM?0P#vrE{dYE6rlWXY1&2OXJVO%7*TG0=t67G==@`p7x@^D68;3zsgT~jcH6r zU_Gv4xSpAHb$+b6Dk0I33~4DpeV;FfebS*b<+bj|I8M{v=x!c-LQ*a6U)G5nq@SI`aAddX z3|65waUflZ8)`$>+M&G|MX@5UUqAO3 zhYf4bCX?>N{HPw6Xk49FJ$bUHCzI#*C=;`1rmpS&`8Hn7^TLZBd6+xe{*aF9R431@ z7WwX24X<`}>F3$Qa5B@Le@(XG&8e))H#3(`4Yh2YSV$70%~XGPztX)Xw%V?Z>ek#n z#mBZj+DzRJ9j?6ab~@84HoI-_xjqF$S_a-p%SpSIbz0tceK$H?t{=uTz8rSp zby&=k%cbLg%uo{ex!hW7oLm!E&+N?E25CTyC~W zIoqA2m_Ej;Z>D{ZTmHkeW&Ppb?S9AcseR68sWx2M3!@{tV^iLyM>e^!);`!@qD1f| zd#onrer@fNRy`VfbwyR~q+ZW)&Cl!{9SY?+lAGrCJl2nKfA93!o$JXa7}JmQqK-uC z#8=U*d(BsT)wc9g)Cx)wSke74+|Q@yB02@&GCF3e%O}-ol4sp+@qDpcgMx%V^NMq=Hb{bG`}2d;59Y9;~0M zQSafXmHC&JIt?Cw(|V6Qe=-GQjAxU}w7Jh^O;(nH!wromvkN35$sdLW#p|_Ps#(-Z zoS5>;Qzl_lv3&|GQY8qwguc9R_M}a6rWd!goQj+W$~ZM=8NF00eR@Y0yxc6~BdUb% zimYU4H-dK4&1J4WwM~7KT}%^>J&hF+tmKReL*=XWa&h?*g?{&*=YDH?SrGW98DDZC zY(KZOIt{chfA$|+=drUZtOeG0$t)rg^`i{wy?18x-aAhp{YTNgk&=NCU}2150A}yC z!qqafB_uc7o#WhwU5#k*vKtK8BQGdV^qd@j@MEX!y_ZwhTKN!{bI5A#wY!#$Lqf6qQ8s--R7QcR_qQo`4LQ{g&x zCMzt9ax|e@@Acx_xC54(H{d#) zPEe0H7UIlvQHMiNi10gu2@-UmMDaqN71DjXEg>|{eBo9JyK_}mYQ?!U@f2rb36L3jF^!M1Z9&1%p|Lu9h?8yy9EGs1yre4#m0vDV-d%p$1hXxkzLi`)x zWzl{CXxj5{%>RfCpkDc{iE6sV0{z)!RQZYYeUYD78ohk}Rxj?;=50q}eSXnHF8639 zsiiH*yan|vgHg7jX^1*i0UJU+oa^z7=)$1T-`rI5#);r*!nY|CAbuhi|*Q&w+>`G!#m1vS8r4Rzj6GPlX` zLd~&ofS~LhtHtMTM-=dc&ileGs|oBdl$hXpL2-PTbD=-UQ+{;Adz$u!F<$JT6ahxJ ze>$kUqxSF5UHo;B4~4y3bxq7jgTyYGAs1>?H^1cc5LYkT&B;j;>5oKC`(#b?z%BDM zEAw(I^SmqbI?oDRV`H9i$@4tlD?YYu5d;vF=eD+|aXe!Ke6%i)9&i5?61nBmgLT?3 zs-Afh$T9{OJL=u*T$6VkoU5CfkkCzhtM5>5x-YQlPk+{$#hgWe<72j zJBk8$A9z>c2#~xrBYm4eA_roquPiCa_OXVlh9?!tE8eDOjIfNk%P?k^V}9$?0++(1 zV^t0uv0$wY&p?`QW`ma4zfV=}YD1*S><;+g2NdV1H`xJRx-;GjwDzZ4M;`j1#-vbC zkbYE%IB!EG34m)+1l&P`UO;S$e^6Yq|5*0_<--2Mw*HU*`M>^OnCt(|a&`X0a_!0g zuw485|L;NlFU8gJU-G4Cn)>HK75;fLY9}7FB{YsW`eVUj|61t(c*^?C%U})1XafC_ z_MPhfj}|zsome`lF&B%2rfFdLtJ(hQF+FYxsuGJPNB{K?`mY4_AEen%e~{;w{H^+b zwEfAXX{IXX)X0}%PC}ulX_^%Nk^SSpB@#aci-^sfggi}i$e+NPmi|t~+LBbOV=BT> z`KJ};|91W}4=|_YNL+jD8Dji-cnRb0AmeYGxYA(t%n#wFl>Rn%|NrW(fr*D+hI=yqOfUMRuj#ZZNiD+yh7*jAv_f3 ziL4suVx-BHElt<4fAu~$$T7FU(G~U?bA`w1SEQLukegm0sMag!m;fh0*uPzDQ5!HT#kJL$d&^s`zGH6;6!>ZFC0+kr4QKjj#M%G4H$To1z9|9IIoH zh}mHH-I(9#5&Xq`m$Ly;sLe31@#RRrQNs?60N1vt>1@mmFf~=VH$A?}G7S zSt#RG(-jIA_>5_2Lgtz{2G`tP6t!UCP2D3fVLlq5hp9=nTbEjX0v`-tU5q`xU1me27yp{2Y*Wp%1atSQx5vAa;#&a_L_E%x|(F*sUnw=v3M z&jm3e#(zOJtgXgjS#DyW8abk}&T*j}{oeaDTpD5XZ^%pr4I(L?8ZV9u;6te9@2I6o zMN#E3aD3|YBE{vOq8S%7J@R)QTMK4sX(%?7XptTk!tOqnBZ6Wkqc(;TWMY` z;eS%~&EZfr-uNpSoUtVeAsk0vv)V3Ctj+)`e=@BoI}mgn>IfH`<}jPz4u(ft`b}~| z$>1AGNVsW+g;6fyCzr(0CQ1zI_2bsNWJDYRiAJPI>?zQ04Rb|{f=-I(?YOG z%rx%kQ6(4G?umvncMY_p1QKL~!o!hZu7By0i|Q@zMO1)WHkt(32Jk2Kk*6GiQz(L@ z92d~9#Db+%@xi}Rs_HnK)j^rXlf!b5*pmi?PiIf+sc}CA#>^CM|BD?KtRqz z)|xS^soh|%P9}^l{jEUZ1#1uX6w5e4_-oTrFlP`4Ot*PUVSj%J znmNS6q`Ke)Qi36SdqZp!lzM#lE$IsM&MI@pO9uC&miqdZEYK*VoYBbsW=$yIFWsq! zL-Ft8FVbG}{sf5w0nKMZENOA_)qI5Ky12i^JJ8O=lHen)4Sa&4bK!)o3$}Mwt+er} zSBG&B^Y>+wgPz@c*7~dT)UBY3IDhj|5JBW5Wnq!ksC`GB{%3ZE@fxpC_m17inAFOv zA4(Mb2g$c>fr}&IX;wF( zb_y1M{Ia0!^J@dhGS2zKb-Y_2$5O2BeWzywW_;;tL87q3PNJ?o_#t`qj2EFoTau${ z;}OQn0`-u4A;|%N1;1`9>^6wFB`21rU5&i6pZ6APk^Fuh!CF%w*MB3i%$je9fsBI* zUR%-*gc1Rdm==aCJm-#8_?574cy%wLC=aoJ`a&Aa!&@QpM4&%aBt|r?3NLUg!sApZ ziSukETLeaRE20E8woyGs?2}Z^-ewws12Gy!!)-C}-`)xd_MvD5@x8z_K(OV7#o0~Cynn&qjFgPWx7tMNmX+vWdeD%p|h~0;B3$*JOT^zLu9uomsg6LC%u1h^I4o`Sop77JE`RcUCg6+&uDOH!QXI<-bmGc* z2rQO;;ynV5N}}q!kI+P$@PaNVMmS#HTf=!`-0R04_b{K^Dr`0dkQjX2UKo%!kfbt1 ze#arruj_1AfY@bCOz>=qe|qXsJcG7}*wR?LK}b*50t%lZv)qtfHBU+lAUx^VF{Pb{ zB=9^{qJPuCX=!?MmMV^GTCC*xefv7)f9z67i6AX{K@Oxh9`qEg?g6awPpWlQRTD8EC3@my{Ob$(Ii_<}*zX@Bs2Yrs?Kg_Qo2A@*qED3r0+;;ZS4V3`20 zj@}l=b&x?x+Jcox1~1x(l>s8@cFTvRZzhj4@VbD?K3vVzMJ-oRO1mKS5 z?W^=Qxe3Ip0Yw1JrD2B0{jJ(VA~$R_gV-K$sV@yy7IGJnnx)n)uD^bh1UZT${X1mP z!TM?{l@eg^thc$eM}!^b&@WBj%BXXVi~mY+L|Z&kV>&F*Ob_Q-rwn{jXnz6A96hJ4 z%C{-~CHrFdNh8`dL2hTQu;BZHLanx+KXfkJGVRf9JmbOu7Lx*24naR|49Q*FEP*M7 zmsO8BBWzldHPN2t-`M^+y*ACS=A15Qv2SKJpMGcX)98FvLBk_T{3zg9XRPT>eF&2` z=h9xmla3FlkcKN2HpwH70)G)Z3#Fco0R8~hJoVn(6S&%+`a{2;MKG+NvIupdXc6Si zQ*of!?g6HIjd(pP$(J?Lb(BCYJ21uD3ClAY2;RV41b4=i7wAcwPWv*Z)d@Io`~mzG z;S+)=%gWI$V8I|_CesTEo2e;L6C zdlCh=5~owqHL}&4HJYq2Z;y4w)=zM#OXdpPtOb45pdx2XlVcJvR*H=ztE{*ROrpAb zTxYfg+YAQmkGFwX=~@Cq_f8WiJR4WKT_^;e<^pzz3Y8@;&aJh_j+YBAUB!bm8-FT9 zCbV+4FQDG=G#M&GOEB8Vg~_hA*?_ ze)okN(M=hIAv08&u~Vt-Yl4?J)wP3Tc~2?;yCWYD1#v#!%Ox0Z2)QgiXdE$bv8xV2 zrxN&2vwh0WaMW`}MusSE-!p*Qk%vlHd-w>Vw<4`O)_C)aR-^beBVK$)%AkBM7D1>n)#D|Xx%Q@pWV}uuV zp4Ep|2UW^#tx|&WIo>m^_$QRS&b}58wlHp4Bg_+o1b>L{LW0c(?n&0R@1$6U@QUc& zM`**o3F*4bYw5fwAb_t8=TlWX>F6i`ag0(|04I|}e!Boz0qR!*6}v12OAXk@&Cxl#S`T*Dol7+1fwhOIywa-5V!3!5 zS!iqUcYoOXIq`K>L2<OnZ3z!v`8 z7dGcii6=je+GE2r@(gQ< z-fO%Rk2+;pI4FFQi+GqTL*1I{Qqxn18YeQcr^YpXiCV+(QN&BDU@yt=mHbK?%g&!l zmQ#)yYS6ZQ&T}z&aqOU}GF(hO$~{bp2MWHBR- zb@E-Df=bDYD#Y-=>Zn}hS4?KZsle!i=Ibk3-1npCAd_|cz$Axrn`e&f8Gle9JAViR ziotg{M+&lTvB2$GeSS$Q5<0=;;OB)8qJ8tY4E#+ymR8`+X*sbu58#r@YqjVLcHY(! zG!8z+giCQJzGcz@uXXQKDmroLno-k4K&k_3vfa<7f8sWY9HQ8n9k zE=Wv+jCPxGRnP4D7-yRtb#%CEM(XF&Z3Fln|fR(L#I?fWk1L<>63m#^FW? zBtGdQCJTZ_zm315&m2=$+c>>%a5q?0ykCnLf;RsN$Es5(0VgM8_ge zwe!N>-o-S)uYqq-jQRfUJAWI;5BPiH8nZ4F$yrAWdL3Y(km7jZ~Ud&Oy0T=I!-px#8Qru?Z8Pe32Sl>-_ z1~v;*klN$LaLM>J$Vxt7rGYv!ny+7&4SP;Sv!B1}jrE`B=t#!%7=P~VsRs~Eyk#Ga z4jrjINxBdhaY2Q^lNr?qC+IZ01cds9*g7ho5=qN8A{Pqzzv4_{p zuh>oN5PMYSF@nR+VY5yk=mepTafS8qTW-8mj&gPVT2{a_T32eqZLIjTiYoyh=q<_3 zujeIUt2inhAtE$)meFs?< z=aP1?F7$1hP1_6n{SJ?-!C%y?-s_P5+Ul^eOVvi|ZvX z8TO4wKU-Nh4K&$R=7v(gFy9+nd5dXtPYQQl7u!aucj>s%S-jDuY3hrJY}nE;rBauT z7~R26sPBA2^AT{2eB$*%HkyX7--|?`MF&92w5&!jQe7b_a&P43uRv)T-K1)!?1r1d z8i*fX=cp82PJjFm^89>eo?LAXWW0M;j#d1U`U21FC(>sXWv;@bSyNgOR5NzXHgIXW zDc{GK2ZA}IUSMt2FIdVBNlYVazpSUKyLE!Puf;e2&^Y&jA>e)v(K~djz5Q<-=z%SV zfLrk{Z#b4YB#+lpZ#+4WG{CMPqpz9FW(%A9?;hY6M}HxGV4BxSQ~>#GiZ~J1GItwq zynVuq(F;rDd~w`^sxE&np{;6ltdah}RW8FZZ2kRUZnD)|izW4Ud|GDKXLePA`}Hrd zLbVwO6|us}nFO_1LTCX%$jjmn4AFkTSwpNOU2O7Yr|Rdhk7)E1n^$DIRDes77;OMM zrSS8?{C^ixl=I8v*gaN)Qqda*PJE#bCsa)#g`qGH9Y*k7Y!p7>WEiy(c1^KW9T!9U z$3kP8gv3tFV?6*8H#2xUWNGR zZ-1$A&7$KdDEQ3xm-q5o=12tgMR)}P;Cv*igMSOh7?Oi_CaV>dbc!oAzu2`haCZ-OxAFG(>m`3Vu*kINI5% zly$Z<$IVff#02Neee*h1Ewr5J#X`Y4)_)?ffjK-MHJ9$nr(QGwn9vW5bHtQ-)c`vp z$)d}x=^~3mAmATdV^+E{dP^A632=S095D4$1Sv+s5RX>WS{fIfgrmrl?#0wW1WQj# z(l%lYcx+I};MMkfDq^nG_miULy>sLeKDzGrInl#okh+LP3)X>j837pVI9Av8y?;WF zScQCWyAr*_R1JHs1|a;=LAvE>+Nt;FU(&X}?G0;wPFZJ6mTYg_IYJ%l;f)9~oN%ZF zm*L{-(M2<2lbP-hY^3fNJlPV6CY4t-y;&{Wq!~L+&)hkbz%sz%Hi-a?KWWqD3Rfv8 zPf~3lArDnL?jeo~t^cf&v zg&z>Ua3r1L)13vNlc4sACahINMIz_X0;eZyTLRhM7*Cc7`dIKY%qjI-AAi(ST6&%w zBF+@;LZJrD9a@!?_hCTlTAfwULp64);$~V&#*ASDRDI30j8%O6l)gG(9jyGJ5qJ4Q zc53^f>(F;ij7`z5ap+04kAJR)%Eyi9w{1A6r5CHs{#o8i#8YM}S)4&n%gWqI8bKJO%}JHN|_~J7yi)UEA0G zh8IDEx`WEybB7R1`{!*Rt|wFdZco{)U5M|J>O5c`nCeSTd7f?aRDWte&xY9#g$EXx z)~GOrgsHUz+>G~5y)6LKwGxdVjPiJ|fhjg7jGDLNw;G=%=8RN-&?Fs$rgjjrql5Fkb9;;mCvN z#GeYBD1Y_GG1C4fh}PZ{**`br_Gyh_k{hyt67U!Awr$_1-lKn)dX9+yDD8A65H(ui z2EghOu0pDM+v4twe|TDFh!t6T{jIpcCL(==cx$T|hq)k$IDd#Myw8cR1Wx}9EGL6L zz7Ma?aE|$MFX&3$^}O6Smx={vLT+p}c{${^*Tc<-M*2Cl2Sv+fn@w^;rI zAuJ6VQ1(bA*ME0(ps;c$=q#ya+YNV=NGpXdhq;9Nun4^&PHt+&lLIUAYfRe;3faz( z?%>So#t-uOSNm$ZVVN%fDE-vFF}%RG1th(qVJx`DX9Rw+WUV)hTgYO3HWfV}sZYR` zn*ubM*AT_2mv(0!l??>0L_Oxi8Tje>0D)9^YxRg>Eq@F|Hb1+rg22UNW@AyKXNHj_Q@GFpku!)p>8<9g00aws;1}r;iENJD6s|gTx z%=hLGcz-_j-pB$Av&-PFj#};~!_vMyC-c4Wk2A~qm(G}DPWfX^rjPche z*5J}1q&+gX0loqD<9}7fh)G#~!%#+7qad_S`SJ{B%7B1*anOos-%}4r8`K+)hV_|A zMmY2Um`v-4HK{vFaEJT_RYT}mO=#17l}vtJ4S!sO#fw*{{}~;65e=Gv=Vw@cllz|` zp_O}HSnn|>IA-z<5)yBkpN|1jhH~J~mO7{+@pUoWDwq2VR1EhPC5^sJi0FeUk{s!P zXUsLudwYA=w&NeDlW_9r>ICXNdUC7sL!qm@0A*-upU!kP;TKJ#S7^|a%9N|TcAn3Z z*neHT>>m?8xqDt8kR^q44NY<&gy=CfL$-S~x@`)pnIANUml@aD(Ki?u%0?Tvs!lV1Ggoq9=Zw!8#6s57>Gjv=Vn9VCq#7#6eMJ zD*B6+aze8rx*$R3()&UJ+ZQsQCp9d!$EkVQIAA8urByEe>E0G~PTOS3<3ou`Ho~yS zK=HM;XO-jgEXOjv5|7u`3XkPlx+V29&(ePhIo~kXq*5Gpo-Ixv?jhu*HT3ro&wrsC zK}%fb{I^vn_S565{<}E_+(e}DU5$E-|Gg8^B-pK)? zj!Xm><)T}n$h6GL&-kV4Z}1sfW`C7l0Zzlo5LUsram^B6udA(C@t@r52oRo2gx|m^ zhRE~bQEL>9OPQmVQp|lu$=7BOKS*(G@zktJ$YYvw8kY+lOGjk@gsOi$s>!Rms&D(@ z(MlLC(4E3Cdh=7{AroQYGCi#%p$ zac|%}WVAu?m5J#f1mad}tAA4*TYLmV73hSc6{b+&#mW{6yC4!G>|xHy$t}A;0bUj1 zoC^}(515L;+2=Q5$+TQ4r~1JR4W!(6E;X@C1UdQefy2>U9KRJ@^wX4+{{h&KFk+}# zN5g5L=^5o=She9ezES9y#yGU|JUr|$HaoKl{|cbD3{gS=?=P0P`+wPW3_XBXasJX@ zH~9P%Apzfy*^2yIJiyzt!G}RTiG7MRKPbwUU|!3pJnP?4wL9ryFetSBhu6ZfW<-pk?i5vctI3*lmLz+~Ce|qxK~lh;lS|NNH81}eX(e}koEgK9Hh-BCI$%dP>>{~Af}MjE zP~d)3voXYB+YxL!TL(f@xpCEBHkqY@wjo=Z;OcIkz<=R|Jz;V=CYX3?MeV(Q0=H)r z)hWkfTz3*KN`N`DY{Lj4v5~wC^GQpkE$&a4(Lg zdb-}+@oI07qkr+uF625R5NmkE_RMc63xt|}qXb|Vg~3xo#>%O2EaC+wwCHKL*$sPn z1!!x(LAJPb@Mv4+*w$wly+l%~$)+HYl8PX|a@c2`;y)qJ6V``PC!f~kQ;nJ$k_QXe z&)}>>78dZaRf|(6y74K%+F|pFV(xmBHvb4dsZ$J%*ndQs-2mt2g>L}>G&w7JxV$IL zH?40UgiE-e9||KSV8S>46&oB%N_hzVSbf$26~5~_tRJM78wve3`%!1S)jC+A*Ptbi zH`U6`QM8$I_roFaJ_r@_fMK#lTK=Xs0APqd-Dbx15JBa9O$jC82BJV=p`_>CgrTFa zZENPi8h-$C)PYx~+$@_(?dPr)UViv}YAJtowFs0JIiG63L-}gdf%6yR1eYS@uz*bM z41ES}+#`BiTeLaG-%n53&d!p#wK8S>n{f%zzN9CPo0_tzfKGiod-S+0eR_6}uA0=n z*fU5yypEY$U*o3@#9yp##lxas##RA;@DLigDu3!-BB7R&L0|1v*as!h<}|R#oCX8} zS+XzBSRouyMnUTG2UJ$>(Y?aotlAAdSFaQ)rr>20+tpZ*!!xnx?6<;)Fas53&eJ;Q z!vPE!L@J4yZ=)r*XOj26jRT}n;va0;8Wd197S*z^Q7Wy?l&_#nu&DI9`vDA; zU4QT;%(2XKf}jVK;2KNGcD$q&9K>#@qVsg06fQ9F_q)<(h|~9Hd|2Z$ToFF%1zsJA zeJTwY-4B4VX&^CRW#vBlHViQ=1bzj9Ibd4kM5MZprp!TWgb(&HNTW@rxuqn_2}0Mh zBSmvkgEvLjyOD0+0fYvrFp@X`CT+A341Z$VB)~366&_qbTy)gE>r*Fz7SmSRPTw&N zUIqoyB8MRL6=sB!`GzsdkvC^TWOV)(in@?FhbHNnun3y?!++%?t{ba{Q~*@2L5wtz zW9Q$Ey(gDPkadHAYmYxk2Atq>C!Zqyug2pPy+`w%Oh!N!5~W26A`;Q?G1#2(jemqf zssAPs!f+)x^{SuE-A~3%APb_iYIIK~EXVh`;|T8YV35JB0Y$94@JZ%pH4hVNbl!nL z0DG={V~r0FTD#QcHkZ%R@oi-g6A#HjPxy4`%;R7_z%Z)%wd8?pu0NV-5OoPG>r!g= zGlDtU)UvK`^nx-<4rSh^_saf_oqwh}vPiE~CsrmSLDzvsG{a&!lQeftA#z2>a@lBl z9BEKdg-fiGysp4euG_2i^0bG0m92E?Uwfy4J}haYF*Y`E-Y5M}ABpauHT z?8}^LD}zZxegdR~O^ikD1qY8{Z(WK>3P^jOnfyfrN}V7?r1x&PW3)#$>uVLb5DWRz z?NJGAuyfob$1bnctEiHS@qaqsoxHqFmsMfuzDgTf`4!eK^ZH?+2>&q^rn4l&)AKys z9N(^7B959?IkJPwS^pPWIHFpY>*>^~GelLc@HqrOgL*#ed5Q4RkxX)itQ?+{eNK0J z0$r(t<5=$}aCVJ5c-nWPtvUSt(zzR`Q%9H|)Iuk9V{liJxD_o1XnzRw%;ZzH_x{-Q zQegYH0ylf6FR?Gs^q8Gs8CZZ(*t`at0JX1on^xm~_YAu(^hRwla9t;%_W@-0-L24q zqcEvfP|p`GKq;+h6hdL*iIVx_xr@^R-1+{(vY*#=npI9JHqqKhj;~lFY?+yoWHk41_nYwOV!=S z7iM^qf#lrz0<;S2HvDs-n4xToViG#}>((Q@15xgq_~ttHfIk+fl#!i9Z?D?@tH7<^eG4^phMyLWl-zs1 zFFufLsqRinT<&CDCt@sxeH}QJavvRss zli0?_M}Pe=G8Z)f(yIX8KLkm(DAjxZ>5Q@9b7=eS2wwXsK=QGp=|5;4LaFvHF5YqZ zz)4R1^#G+%BORc}s_;y#s2Ika(Y2&4v!8SpX$X@n!8|Sgx>e1!$%UOIp_;IkD+t#l zP5_J_l|Rt3*-)a)f4P%4>|O+NA4_`!X9s+i0e>nILD>oo>H!6ei~cQax2Lxwgy5;0 z>#4!$(xR)D%5vR0!R9B#7TiyF*l34xN&RE9t4Ty4-+Zs_2)73`faaWEdxg}}=1wz2 zRBT6)9L%JKL^XzuTJ&MLerMmIUwo!=ok3;xoZfl3EVB-SIEVd8qHi)*&YT%aC1;i? zDSxs;t@UL)r-QRNt6$?IKiCT`(rG&upBFF-K0cM3FEh|1a+4hd^H$r@Sah3Sera~u?U!jXPaGxj6QcFDq5zo(Q!aPjZmI*e2rhz z8n3!`#Ow-?x>HfoGjDkzZ=Gk@ls z!6rR1ys&oDt(5ovBQ-&MG`iJ|Npg?b_RKrk?mwq%J_*@te9*xwuG-|?_|ZA5{p*18 zPf>=x;GFe!C*BN*;Hs(1r&UK|_;TNtolE!f6`x!qKQ@67Ntai?Iqwf`g}5E-pKiCi zao!~7gLu0axH#h#Xx-1Hou`fKyML_~&*w|k)<)|HyWm4mv1KxJ3wD}l_wcGd(zP3X z%%SXs_R)2cb|ufl)I$8)D8=4$sIwa@2$4U*H;(EIwcebtxe$*mJl?SIWr_Eyf_ z1iaz3l67_ure|WD+Z*OhsJh3H@j^n3d_t*!{d;clD zY5Tl8QL=7P@i|>nk8&$DImyp-+)vg1ZQn}QJXr_hn3y)-yxw*hxqo9^xu0RvYOj^j zj=^(q()haBU-aE=rzQn!^~w6&p%3y85qWHRJKk(=r+fP_Pt$E%J~nqO_v*2`ht*xs zow#tNQ+s`RQ@2q!t2>CS3fv3czcFq6ICHK6E}vV@8HY{y`l!k2nQa2@d&4n%1-f*Lc;eSv2{rweZ*TXt0n6^{;?P13XGG|*;arJuWY*Y7L z?q93p5$xBnyXNer+{YxmOk4JV*ZVPOJCKY3W_)cJ^5qvw!Vn^{m>eSbv_WMss#` zw4k=>K2R^}OLj9O?QOnp*1&ng>ixOBw`{67nIl|#mPa5#z3;V=WH@6{SUd!!>tY`n>aDA#&-OHCRf_B0DuHS5rVW?et#1pB>}XR{=lp`ytj3Rv2bnr{C$>lP}@xrFz`w&h0m~yVqK5r#IoC8@a4K z%n!qOnX@^sWxz20S%00_e1BQLN%+!&!^ef77gtPc@_$^XY~2T)B=chh>8Cq?d zJE5u@4oO=3zRDu*jndwy{nITjC#_+hjiCxzw&Ox1)lb&Lk$-BmP}Hu647?xBb8Mi+ zAHf~D1@^X81r--q^tEh_W zsz*RFxzF`*2TT=lTzc^1O+k0^dliIdzcbTL5m(tD36b-W)T1Vlh8CCMxCS@=Py~a% z-4UjB{qrz5*+I239COFxGun)*7YsuS$0=GJZ-0J4w*^CqVY9DB+Zy7UPi|HsSF`wM z=+oC%VOwe^>$MfbySKvE{%qVcBiPd`<9W5$r&mYFItk)^G^l+6`i)T@D*SX5F6$_@ zY(%^cWr=h=9J6XOT=7-HY!0khjg#_{|F&luue9y@Gf6mOWf zNq?m9p$;yc84P<3asGNZ#EJy#e2^O{Z-0A0z)V@de+JV|!SoiI_2)AAQ zjPR~bU|@9WY4v2+F7Nhc@q8aH;vMAvZwR>2} zyASt`m_<$YJbQj2PII{>wm=T zx~cW9iugR%>~mic@58>YIa(?E{nboYLC4FJudtgfDD;<8`HdQ!pZ$q&?(vb>dnhOq zD+P4~w)Mxp=rv@|=b&vp{kU5>O>C}$jVP_$$MiAw%Hv3{(rvirTZ^zx%DmLH^8>b* zWoDqjj~reXd?ekgqhQ3TH9hSNywWn8DmnS#){nLi1t;tKMFZZx1UQ zcdxt3o{6}6HM$P5Uu^PuUM{C@%^tVcy()1NTM8w5Zk_GaJQJxf8O9bMXMgb;HWQ9N%WlecF0F z#Osve@H8G`c^60BJ8A*ZOMh;4;G_42*M@{4amnu1LHXFSoO~&s(A|*B{E$KD&s(+K z!+ZZ+8K-1liOjtm-Iv#GgV1#&UifhNrx8wcZ43Io34%#{F^w-o<<3k!_quy`G1OK! zdw8uycjx?Stm-;uOjIllGSxLZYS&Z+_6;L!|s4KaeGGHA*q$u@#^_r^X zbXje<{`RbnTQ)M^{^ulcr;mSR;qCn9*YC}9Ctg0+N}nIzkg8>C0)5=ud*dMFP<&OV zd#H9UF9j(Z5kY08$Lqaj%JNpI0F^ftZS|KOyV6nVx3@`6s&-dDo`cIAUhn&%UI{yP zM`ca@khgdU!hg#qXLvkLrUp9Y0;6rjlV~WHBrx3(4mof$fqii1O5b*% z$u1ZFJ(cYwtS5%2R&8kQ{&|4Re&<-AI;Q1ElcL*!SASzU+1%c{Pkwt0?0b`=n^suy z=j`*gZ?ui0?To&OAN1I7_s_|4q-HiIciVe2aoN*SH+~jYHKo#-^_bd5yHhWOFP0xK z=~ysIo~>7p+UBPf{}zOsDaenY-}zRiKX#W0W`PT9#=;rIa@`Y0)b_>at+zqCr#;r0 z;?R)hbALw3LpAJF>ipPw#0MgtY`ShY0t4Td_c&GzZ@(zNp1^r;c)PxP^bN(R#2}No zYnUBYn`V=2H|yOo4$o(5Tq#{0H;ld3nbGf88$^=i9evJade|Nt;g~=9LEh}&chc^e z(Mm_YYk7lfFQ(dLf#~u#SYY~cq?B-WeCZ@kI)B6}&lIh7yt?Y`C7Il-xbn8Ko7m?i zJgyYin|7CucgX26Zf!?xlOw~HCwG!|j9v6zc>WyvYgX<|CKnP*wfZEgKXl6{g~)Wf zwTW!^#hK|^%H3CX){fDY7w(7cWH6e!f_u=w;nGR^)b+2s?SH_PTW_Wz26yQz`Ek*ny*4}VrMGxb~M<)+HxvU&(FbjoY7^KCY}7I+;kMoH<00QXo@N6UHh@b1Xw}0=z znmzBg<0)rrk)6U*!znMGdu`9V&%|@O!+q9uES1$`yesC*9$(Mq@!}tkMz2wUdUHL7 zrM`JT*WvAL%Np{yG88P!`>3A@XM!hxvMZKY z+HS+P8zqfat4Q6v)MtExsqDN&`+pMV;o9Sl>(z8M81J=5mT8(mcxR6&-|`22wJ#c& zyeJ|@9o3DyQpuV;4Yk9|6NM$2jK@e{3q!4$%}0>2NE zOQ~1@VE#OxZg~n@4Dj;ExJ;t`pC{KL3S0m&Kok<75pbaL2V{(B5AG>UTTPyFzLiqK`DGu_pYoD;|q zmD0=25rDcAs8fg^?4g_+b&N++HU%M?!c?5=AGEZ&DPq!VPycR+=YN-WFdMZfHo6_R zp?GWbEB|oDhpB(aGjTt$A@?0qY;MK{y<>5IG=}bT8dv)zP|MT%5CFhE_2_VwrfCC! zAnpF!=K)G<_A1;U?8J%uJfDyLyrq^W1M^Y|bApg}-9HX5cAvahK7W{O90aABhDPvm zjOca0Z6>LzZoBP9wSOBEq$1x8lhGOl>#eHd&`PP>n62=(ytG!k@$Rvz^x26dM5cOW z*83|%_P$weE>@fF&&@Vv62&<^uG)IYCXWwie_lMVy8^iNh^Zc;=`>17Qn|_WhcNXL8RkL|*(&^nsO5}~FCRBvW z-H)|2X0Mf&?bynHJSA}8Uw6f156UxosWpc-=l0E*SGUrxtLH5QE08{(&()`WRyDJ^ zPFcZU9ryToynlF>{k$6X_1Y+RnhMTvua2v_bq>y_wRd*Ax_uh&5|ina?DZhcn_ksM zu)lHqy7z(HSdJEzrSjWQg35 zPht@;)>@S;a$t71jSp&(jwTSN9ZJFceP;3S6F`BVgVoVdVqnrrNA*n_@`p*&_nz5O za27ro-10rgUsHj*a$o~Q41gt?!Spx))L@&vKJFi;$Ek`-&+`saDH%uEFuTs|+F+Xc zu9Q3PoPRwt<+|xi{dkl&>l7zj>%H%U?tD2PuJSp`U(wcbYyRQ4BHWICt1vlsp8K+T zblG8OPexSHuugP<{qVMBa#3b>Qrqa*>l@zOYVM{zCV9lzyX?7f z<;Py>FqzJpe09>=uusC&e*6{aw>UiB+A_YpJZ(ES9=TD^yCJl?Pf86(yMNm zo_{Su!?62)XT|1Wo<~jAE8ckfd%t~#5y?EZk=CqWh#OhhGr|cU>gH0H)3v|%2iCr& zp0K^`f%Q%k`BA3BP}b*(5U6b{Ch(Hu=_c)-)) zk}Gixv8<&>rB5GfECh-WhZp1Uj?<5Md4FEmRiUnq#o+Mk;*F_jwGPGc8OjU-DbY%f zNRaRJ2i8Q})y!-3G=^#4#?J%uAf}JXHg@0DO?!yv17!Igos<)s*1~kXk;h6NoZI{a zlcEso$gJNTV(j$#%xkB7L!&pJK%UY=R^L#uI{5k2`BOpn@g653_ilF^MmvclZGYS? z<-EFPO)=hHF+M35 z)YtE1RQF*i^^q(;Dd5|BJ%QAv(tjKAdoF3L?Vdd`hNBn0TwpUyvRG4noPmlbDcK$$ z<#~UtwI=O}on0Jqz3ctc+IB1Uz?!?DtzDKu*M@&T zA{~7ScW1>rr%fkxwTzL4r&{BWmToe{3`9I_ZJ`Ep-`m5bk#%mgS&M1u6j%J-9)X`B=kdT8@SHexQx#>;p(eK z%r13<{aP_wO^<8Kl6ZI!LU{1Cc*TF=)p2d^w7&OpCRuOIgpexh#xw)$E!tE*!cM-0QywU5k0?#-UO zPN(;_b={%&%>836I%T|W>~b3d#PY4yjkneoVq@+0tjBm<^YW_1P%9y`+pT|ARB0fv z7fC)wUa9fVYr?k|Z5yuZQF}HA&MvZ(^$Nx=*!v$^uGmfCWI{DHLK{Z|xH86AU30R# z-R-^&pINAC{l07?OLYzIcxj!J7Uq4-_>uZ2c5H&C%%LCD zJlTx;!E5hnQ}==)NVuWZ-n9@)h; zZ@uf~T3hGf?G|`jjE><#-8jT_m2BtRM0dqhMb$3->@|ryI`OgJ98G`GmEYO!j`Nx? zZrfz1nfjIW&iAUdCb>@d7fj5d3=KOKME8Xz_nP47IF zBPT3mah(LBm-)u488&}eJUI{WKA%Fxx{Dz$N9Atr?}3>JxoNx8Nt!5XW7?du-FCNr zBnBiewyrE;WIASjYMR7nyrwS5;Zg0)VhETWJ3bt7itQj>k>^Whx7s;+d)bM!7eu09 zZ?zn>)_q2;DL&WgfL|Z>&*sgr zI-=Cnx%&eevH37NU}Rhx45!1m$*x!VrLg2!`+2us-;Nx*`d};P4%o?=sRP$?2*Ghc zXxyx7x3rExbei5J$6Is5xbd0I)6qP=crZpZY{s_KyJsHn51>v5QIc6Ut=@m(!TcD46g^i`$*mdbUh>e| z3BW(BFt69wq`r;sR{uFtt}Cv%1)S!rT(vF%Y6wb8rl zJBL*c&Z~cupSE~sS2#Z>W!D`s1C#>fMp9Om0YWatx_pH4P4J>suxYMmc4OOO!6m22 zmLIuv9zRa`eg(E=W2JnKd*M{3r_ng?sT+lBO-Voa%3P}}aJ8G1Um{z57oW@W+;pog zd%zS_7G)AgZEvqW9>|i-dc9uNE6Gh<2(o+)FW!GA|6pfV$I>R;lpLu1SATr6>vSzA z;#w3a2R{f-R(1{4@)X}7fTy4-qZGUy$fDk~!IWa9r@PGQMYFxd z0W1=%jG=SO6f2~u6jxcWyUAv3>e}1xV{a?zFz6~cSfmgIe3raYZG?3>ouiC9(_s4mu~{@QHf$So%!=Ne3C@p2V&vj18CQp;0IjwEFZ?SGSTY+A!(T@WL#Fv?h%xZ zyX$a_EuVMaPO|O2R$panXr=Oa&Np>CkUPcJ?tGCY-m#BL;JwA`MK9U>-42r>-tKO8 z;2frsSIqUm-q$B-Rj)Z!MT~$AcPjt^Oe&yL4$#uV1ny zT*C1B*{=j5+>5b4AGc~!Na>MDIr!Eh;f`;qyziggD*gCg zAMXU-eTwA7jwnB06zAc)`U7y6?S*-B#)kMxtLO2aNiOR1earRp#cuftJ)u>X8Yb3yLa^BFZcU?Wk0uGe}cZ~hOQEfd7#&uTfSMnf|#GYCWL=bVosiL zS1$7D@ERV~^kKI7^&>=Y&-xg2-C%yLwl{fa^o)W|&r`zmho|15%Yftujd<`O@Q@4S zbyebeZ<*!wsep)iU}~9`eEpbWfzL&8x)kAKdiI3N62?p$>Co8*g0dd%D>oF=*j8Hg zzK8w!bRf{{S5$nQT~>30=zf0|i690K^?pc3d6HXq|9p6D@46zqVl9hJP26s@t-XGe#xV*`$h0oFt6ZOo5Z zg_^tb0Z^803qY(aJ2WgiG5`Qec7D+u%JZF2DAUWn2OwjY%?}>Q9o~P_5Wm+^4}jv% zcH2?<-H~DOh~Umy{><;GTHo?!GcG$89D4xd1A9_}JW~4 z_Xo?42~Lwn4u3$|QSSkWlV!&QK2;87?2%I0AOI9`xj62&?6|P(urTjBku1lbz_))2Y{N-$HR3r@2~)u9Tfn8V>f@)z_VjD?`Q!=fP>jd zg#8>AG$A*ekZX?hjf^J6!TQpTAVu@%@9R|qC!it#DxmcMzZ}7ZJr>QP<18tj%sdWc z(JX*R-e5EftuPBnGX?f>AUh17;?81^tz)*A=##jU%exix6%WoXwKnB?ng6s(9c;;lXCUgFc-}n1il|eTD{<|spH_IcO^?MCs zTw;GZKZATeM_g#-`RUfqAWYY^nX2Gkn# zzJD1L;8co5ao|10Y3AyBtVasRIlp8A;M0GyEP_UT$VNH$eMmD^hH)g9=J>s??52%z ziN|@(ve7ED`E}Ov$b7xwa!i4LVi9vzX^uZ}RA8S)avm=^AUi~bG*)3@7TR*Iq)}XS zLex*iqWP!}-5}V!VNu;6jS(THBIbM@dd;ZJk6;pkB>Mh@$icHD-cgxMpVt}O;}CyP zY(C(A0%rh@1grokHpi(Ml^)M=odDqp3g0Do8rkFGRJ=$;z*wJe(-&HSmHg&c^1IYyx|e6c+5CNVigKV zQ31QM6v{E1tBmI2AK$G`T!n8fkv0QH$Q(u12O=I z9x8L)+oC_$eI$eVT9#V>t%v)9OZ{B$^YxSYzsdUI0cR~a;m??G$GIQ;!HhYd`gnO= zD>7!ur&c)2B$M?3j5f(+X&@)O0e9F7&RjE)`l^F!(g?%0bxsQ>b?E|Ub z7*a<~IXh!Vm~r|sm6(3(?W-CS4>O5nx%8@Y8ZYjQW2=7Wf0Q%6u(4 zeJl^mQKW*59FifY64aqX41~T|Fp~tT4R)(cCOk~UxKW(kHEDlEdFCG{@)CO@p=T(k zI(}(kf5Ogu`#!HjfFzN2Dw+iL#K-|;12rhfRUewbuIi;10_nQefm(x102TmBHwfB{ zv(|`j+C*4untQ~Y@5_I6P2XBr`j-m7>;IE8@fTmcxWGU0gMDI1z?U#Z2ohhGKvrwp zds1WeX70;dfEXY?&+>n<1dRf)w5j zD*rnNmbL!k-`qE{`+^N4R(^VAWKHum`;0cmH!#kg&kOQjr zP8cXh61(~sR~mnzno{6G8#5C@83eKhM3?}h$TCP8NAf(`96PGe>ZO4!Hyo5AN^jcS=FC!8-Vg35fANo>m?qp@6B1`>6w&D(WbE&-c$ov{&$egX74*)?r4fw`ni(DVc-g2r7NO@byTj-3Lc^sFJzYQ0Ejzhp4A9LnUOK*s8 zOP+kW3Hb68Gn5_EUE-E!;cu?}$LGha{H-&2!rwlfo1)f#`?dM$95=UA8Bhff69Gz& zP5~4U2qGkvi%6WQ0LwP?B|uCGlFs#q`l6El_w&9*8*^Xa3J~%}y(z(u5J0CR#-_a| zJr*JLi9}2ZjDiAx>P!-ese$Y6uzoU<*k?#0!pz|m=l~1!rYLRWoeg{phYE?**Ca|I z1gM2>m--roGp}A|^B*EUmb&CYOo0B{Y&>TY?ueSB-Xq4KW`T>i_^H7KTg(yso1YBv z&)+{-U1rR;Ce2^I>Ozmq$KMw&`^Ba2du7IgiG2AI&HiwIspUP2D4+z1Al6~uBLy@f zpr#Cg0s%3Pj+-@TNOFs`qVH?giC)bSm?|VlhSbzy-sKYYrGm;W9vC%nNGB2I2mxOE z0Wx*J@?|gnXdto96qU%qu@h7(34QSlkOT;xd%p~N!37kFViDiW(ad#(4A9rQ#n81& zD+nlY0Wk@GAV>mzg+SrRxCD5?LJ~`BfK_uOxAnW~j?{;M=trlD(abF%8_)c5T(25& z1q=vK2H?}&Vt|(T|J3CY+kbS*H*QNW{F@_wIm6$aGEym?%u*1|C2!{*x$x3Cx5a{~ zk>zbW+slq1Xnoi-H^X~TM2RdpzNl#Mgh%UMLQ{2rM;7@~H)tRhbb?KhAW0D`8`7;f z=bbbFdErSH*^7pGUkODLVUWaq1(B1#31N^3Fb-or5ThVdU5C&7;GyIgiUk!B=ubKk zRMFn=VDC$im_(j=<03W95!46Ip^Jib6@`a%co--%(0pL+m9w$ zau`v6pV5kJITjYUoZ}1T`u=iw&ZZcuJw^~vMWmStMLgH841poHBlW!vZ>n-#Q?eJ+ z&71=!f&)qM%%}JUHfXDWoR(@~#(*#~X!wW(NS`-VF$(%uer1xBo8poSjxNR??z3QM zWP%|iGj0MI^ri*@t}4`qu%B~^qIE_k2oRKi7*xd?zwpdu2gOe>J^*~pJTqqK%rl`S z{o$5hzChzZUTSlx=N0tKQKe?IyjT3hap|FExqNo1$M`3&;zg@0weHT`XOaLgL@qHfFg4H<2xiD3KFqN%g5H#5r}0!mv3wQ8Ip&Zw#Fx8Sdn35! zXMKU@`bWT@>t6;1MX)z#%swJ&st`nf(X5RZo(JciZoI&Qg44|N*f{;+dCH$0{>ecr z#~VvDZ`x?yme)u0Pv2hn(5xwc^yZ(n%~W%We>FC1NxW$O@J6uU^Ut|{*SC*<`Q+UH zR6L%xs$BUJF#liiO#g`|f>7jxqnr>3s0ds{t*KWq5rSFAFL+IT6HbD-e4b)|1c%Bc zWkxZGO=qaS=gd(HD3ZYd2qdN?5?|B-Q9+LtNO%|Y8G`3bgnf(v^>yelXBW&HM1>?fs*qe@pRBK-7a@&h1Q-R(JFZH`dPfZ&bOesJbrl>?C1K7?-p3<6O#Ms^uNVi z*C!kM=5UB&-{5-1sv8FkG&KtUH?~azTP*dwxL{6CFr?KT&Sbpz710r72uPm7Q*e#) z!3moaqDa>PCK7B~sn78G)2NV`^ZNLTY+V)iDHRS5j&7U6k*`EqkVQa$Af&{R$ZSDT zNm}Y@?sq(#mcH$mB1IAygxepy;wtgci}`|A5P$JX|%7!jcK+n5D1w91#Z*9$bZmx}y!SgwnI$gY&xlV;TcIrTeGoJmM6OmSNu!|)) zf$Q97Ac$=(L`mQf2lW4yKWPjgCS3Ro^(ATe_AW?G8_lniScD{hdd<%V0;|{pl+53! z%cPR$JO#7o&?6`UEWs=1&6o>lCLuD{Ai{t&=r8^x&zuRw7X^|Sf*z}2Ko+EUCie#D zk%X3-|MtW=kCfSFxi;dXhiJ|tQkZidaevnFi-)tv#Qot;1Kwz*fBS=nGjI5=^CW*Y zWBH>UMc8MH-}D!MMovo5`QK|d6^X_NX~sd)KP8YTfHnsi#NzA+&9`E?i(csMW z2Xe*pbu+ZUG}8?ieAg-m^&AUG8W941<^>QGkB{LrPyw^{j%C<0_WT4lKr15kWbREW zwD-3XcX(bO*qh(lTl|FkdPlYG%$v;Fw;dQf;~JRr_dl?I=vVtJSS0@HpgEV9x|uO4 zCtrVa;ZDjraw=u!<}+@eoYxV7?#qFCK{G%sUWgf7k#O+bsxXM3E0n5AiEgd~(7D$J z3HYkiFYEt<`MkW&JH-y@!F0{ZfJ_4kmf*0FOwdE_JLR}8+dTvQ32-<2un{8Yf98S9 z@BQV-netPA^O8xykG94CcuL=%`>UmuFD5T|{*NE~A2>1B{f{P^@WukB4HmwXh4P{; z$qz68V#8NI{qgW!;4tD$dLV6736NsWZAi!;Oh60YB$-7i0viNDC?F}X2<(w0<b?a}wmp|J7+Yf%$dRcE@&KJL){ozkDFY*_CHRq2w_oDwLwi6;r zQld#EF{QDKK>yYH&`oCl?Px<#iKKGPs#|LX9TbouNO}tv)qB?OV9kr5&tyP+1@w)g zjOYl-Q3v;h8_)PELSouz;jjz$k(W3TipFN_>2L7G93I~WmHTKY{v z(Ig_p^9W3d=WU{T87pG7IhM^vC%rjye0YO&t`8_8Rs=)>eE02%G?T?a_Axv-QqM>; zwu#XE>bojeLgguo4wu!*4IzJchYi6{^JOmzHa>BP_pJ{+*FiAz*}10RmGeQL%{@>9 zIXJ>a6aLW|2sjK>fMPs8dsQTv3`#t%FGR+~$5IZ&d2;>L8x}YBfq(*~WBlwdy)m!n z#-`@DXomdnwak9!d#*fWzj*nxHe>}#Jg@a%TsEFHw6x&dY}Mp`_*XoQ-Ri-S5fG%M zPf@BlkjOd38*;7$&&fn^021)x4S_R$%s4amJ0eL{l`55KNi-8>l({v*o&WkpGpC~#rUYYeeooA!k>A2=uQA9HAg_(maeysUfKfs&J zlhFuB0EmYFQ0I|uIrrB^EBqHf><tEl&qN;)1SdZ^XbPVM7bbF~D zKyIA(H0{yT@t7lWd}5bbgvBj5dge)dKF$B?>k^W!3 zN$PI6rwbg2;B?ICg4ep1zZ8AGC!+`Bdg8?`Fu*(q^8w5NcyK{2o5VvNnD7%WI>P^J z_M832CIqkN>~GY=4*{;HL{25F`Nnzc?|B}PFQO%&$#eU*9{ zX%p$qy5*%Fu?h%OIPx7J_@n|q(`8_ovu**ef90vzqhqD)l$}TL;^X$_T`dGEf6+>j zxvCVW4&svZq8ADm(Bx+iUlez8DD5sM$qz(v0}992@rdq9@vT>UT|dFcocr1b;CP-7 z9I`TVzx6ub4}uG=z{@=f|9FU5_|Hr}E`R@?|3P2+ z|LK#v@wX3tIjAT*WcuUfc;w%nf8;-0LwxG}F&u>&!n0t2SS7tLOF2D4F(#12vk7z2 z0S!)2&=!A-Qc)T^Tb4F57SX?Tpe)2e{@>|9S(SaVmSE#8o~EzKUfx1Hfbc(f30?Z2 zWx8|_b0@I|Bx@BlEeTxlz=+aBj*#}QHueT61)_k?LV%vcvLa?q)%gVuPyTC-<3%U^S;su%tfe-8wI^XgQOm?T*XmG7IYG8O^_kre zQ@)v;nD-f^nRTStm1eF^e|;1%oh}szL!_924Ur7V-|CWuA;^rA*zD!9QY2@NH^vTX zPj)VJ;=I=u1kb>$*vrt1R^MIKo?x(|%Df(m(?SBU;gu$q84v;26eYi7%v*OhKnDo| ziUh7Tq?H2af5ZirTU`L?fY=gP%zV)3|HTJ?wa;(vPT#m>Utah-e~j z9?kFUEtY1!2*#t~e~2AKi%(sh)VdGFS?kxWtUKoSyc8+y+((ug@F5%st2o&O&HgAc z6>#M|2(sBro@?be`yp`ozFD_Vvmd#d`*29+`>h&(;cEl=-X}85o^bZtOBtKuapq}1 z-q@E*|MXJ$y>IM?fAHe$`Th&ze)aM2^{{4K`mPx-!u}N7e_rlhox1oBF2*#eQjwX9 zNr=f@Ffu)ayGtC%Z*j(e(j1P?|H|WnR+)*Q={Y-W4G?F#LVXqVj5Gtud_164JkNtP zQVcyfBt<>wFh>!hcWahaQhH9BK^C;sO9U&k!^aXv8id z4?bcU(wM~nYjX~-TcAJ(yR#;D304f!IX_(1(8~iWljfW2jvtDUk&aZ>+yslzWjxTE z3V3uPSo~}gFSCn*nRT@SZT1Oz^Q$R{#zWBkaM&NMe@%UH_L|m|f#)cCL(s@_@9U6M zM{)6w?DdY4)0~HN)(wLxfBmB+*SNpt<#Vn6;^qyM{v5j_`00rjSnl2VPyhQrHTV)k zRDSR8(EjmpqQ86`Px-;n08(UViUvR;f3>d{2r`7cN zzr4L&bECS_2Ks|?VUjo#=d$$y2oScw;NZKMeXHV--|`y8Ne7X>IK$=$)IEP)Kjef{!uX4gwGIE`JB+`bmh~~u zVtvp9{tZ;N%t$hKb%Qd_NkvvtePpn5T)HgE!~D+0u3Y6rg+!N>4aFR4N5|=GJl$#Y0dy z?gg=X2~l&zf=Tu928yqU>d0YlvHOAtrAxq&%}(9Qepb<*`=IsG?mgx(?}HAkP2}Eb zSYI>*8bYDV3FeQLRLtgEN&#Qr3(z{`rXW<8AyXOZ!6yJtFEqx>W~c{WDeA!?f6L?< zjwq;4v4JtxW7na@w4SfaKj*P_6P3FA@&fkdpL*~RD=4oHe#=46&M49RH6c78TF&_{ zLCYf{no}{9Yog?Xm+n%U$gKcr*CA+Q7WeJ)ur58uss{BVC+&;pbv{iY!FU2SH=f7j z17bNriDgQiKu`IDw{IdaWlohHf2VPcs3Ih*$JfUvjrl|$b1WFI8OPl5n7Y51V*G~4 z$0+IVsCEJlG6ZcFnCAs0JVNYTe$w^({e#!peEb~kr%&`dV1nx@G#2ko3W`#w*JT72CiMh|s_7Afd1e_UL)V(Auz z#bsD50cQeqLQz&Po?nUD+|7WqRX!7Go|VKjf|!PQy5p1Y7VQ_tFvkFb=HorWtZD7q z!hMXsn1?x;5}YrhB=r_opVm%LJDw(p5pIwE*A+asd}?#4P>pAubpi2kv;pBQ!(0b*viPQn|6`FBLhxqeSLA2h<8vrTM}h) z8O&AM-Sc?V!Qzu@ejRb8NJB0I>hylaE-onwrS2#!=M>B;bX2#zf6N%SahUDoPI)iz zQ9!6MpH*KjQj>v3ob+iQ#4#IVbjMi0&)7H^cg6mnVSzXfBczUL$-#? zL>G0a%>!fV;x#vqUNiONR#J|-fn?*{o5in%5`p!dwPeBw&m&NXRIL4;r|{lIRBbPQ z;hL!o+JQ3X#B|2lf0l_;Lb;b{JCa1sK`$;|c0lOf=0eC;wa?xX3Qpl(IS3u+AigJ; z@Gv7^8m738V%pes(k@rQ$0dY0mQf$jl zuLec;Gr`qN-6OHQQ31x#yzR}iTe%-$C6|5?heSJDf2&E+f6rdsL?%deMNIeUH5H5c zUT`BQYqF3kU>H5`Gz@7&(6C)xzi%Tc~_d`IPXhGZS1@oewugmS1^4-%l^+S_c7yAJk}nvf&}E zTS7@9UCdX2k~xKYshkmaUppZz%Pw&*H%lVD5aaBzbkWEMRdAjxE?%5F9`U;zdf)-= z1s&1^^-eH3$;~@3#D!)~1dWZ;y2Eeyj^FUdM3*(`f7KYf8D;30BlSroPgrx*TgQCz zKI-N&>1Rr^+xw-qN`$F2s=a9(0*V%xf8vXdxIvDxlOFAhi}vu=vfRK0f7$a~B@3JR|P8GIu`k z?+Et|o70%SN+Ot>wrx15J|JBuOp`L=1eAz49vrOGT0~oYAE>6Dr#S*$HW8iIq;4z7 z=J(d#A!1?uZxv{|o9%=Wtw@|Uth0ou;>CZ-e=R14_L&R!?~^bnuLRYVUpMbT!tFjp zzh`@R0+fdVcyYi{cja$5>JevhjPaadqz~(@I{qzwFhJuvyMlB|#mR>}$;Wlvw5dmw z54!7vs<;n9KL*rDmbsCi-mId!83H9Jt4!?rg7NFeuBHWI7R5O`mG&JEA(sGdxxpS?5oSnsO5Z5OTHrtr5i-|nfC8t~v=JtNZ=zzZ; zb43SR@PJv>-czs_VPqU86Cu=I`l1Gxe;Vlp?m*?#E-T&2j(LrE|bg_%2MTf54dX zc`j{?75k$OKgN&`xTN|NBfE;157VG(QZLm7uXMzfDBK$B)6mXR9@E_9#kDknP^&M2)!?-!xC6mc=e-eYYJdA^j;}8u(9QLQfIFx7OaL6<00&Cq);}Bz< zs<*l}KVr8|eLs%n=u3{aFYvdZY7^Ip#_L5vt2Sb!+jGS^)-W-xJ%0pYb-KmG5&k}Da^PzG$UAFIlxf-qGlolJJe>hjBNVt3! zlzuX%idrP*#jRw+4gVFuGP*L3Z|k}ilS_Cm>8Bt zpIk$cyX>Df%MpuntZNaNR~W~3ydSZ9_KO%=`eXAwAZ(B@Pi@><&OE9czm~P%%eAOV zpMj~PO1?%_fu4?{s@rfqe_OjCQfBK2x_2+pU$H4Xma2*&{;+QtN!kS?k3o=TXcIET zdbtYYEQ*1#dwMW=nQ=};y@1&!q`ZleBK1Wkn?{IP|vj`&SG-lSZe`yN(??Yuk$(Xt>KW3(VW0*y*E=#5b1zA(Ve^vn}#=)|^_o%a^N<_-V^9xNz_eY%%_dhy+_Za8~i$|BieRjwC@)q=6 z#>4}oRhLoq#RbnkK+o>yMl5m|JZ;*|X`UK|^R%#WGj%>s<wNwt}mEvBb zQZFqw9)e!5!d_RNRAnVp7E?2OfYih!5wAg#y}Q6Yq9y0|HeNQ4C$r;t^SFh3uhHKW zronw$1RgQZe8?{s>d&!4KPQ^pDZhkGgQ55r+(!f{e?kDekNo7eF}7U))UQE*mUp$W zrDI(Te`Ym^g`=N?;zj1VC15U8I?P^p6$Ak(=8%htq&#=Qy8-q2knFqL)ZHni3guPg zVLQ1E!oBo7x46nwu`ZTvyW|bP_hdcUPsvPqzc7FJfc2`YU@f7v^{T(kFn$ipe-eT;3M@TVgOz;E($T-SSRGaPDvj`f}DCtuS$9!undM=m2*wVx73 z|DE@{dR`R|aWZuw#c@HrFy@Zc822zm$a#|{l{~~b1$U`%;X0#`G+4k;9Bq#8q}vcv ztbzfqcqNv6>rko?7a|xWg$G8z&a1Xi(7aRhe|Oy&qWvQ+df|mCBOkUt14FCa$3kgx zUeku*kVi2ms(B-QTw432?mr$(QP83J3}`Se%(~u(HLzex* zn6#`r_bk8ugCpCP&cDZy>|blQx_fa}3vsB6{F{HQiY!TB0Y>?p&LUD38K`m_!tZgU ze|aj`WDt_JrY2?__YtS$tm0JPapFiwW>4|56*OG}C{3xBis+K(i1}2i{3C|scuEy~ zaXm-7<&+!O>qSEQMXsbGRCOtIO=Iz2_ZUz5f=vqx^auOa);C!Hmx$9kIf_{q2S3k-O^R>n}b`hoJ6*KKyq^I8;>yeh8|Iiyh zcz_kJxTb);WTv4C!RIkBS9rf?a|KRKWju$%CVOIrWJo6u{+YgQiHf!ps!joHdsSMT? zmaneP0+N)uAcy=h6mtq$7S?hXZbcd@O*{(kqEar@>WP6)xS1}Oz)QD>h#8S`BcJA= z!#QMLw1rQyhd8{_x2~B+e{M?P=o2QX*3pn<(be>l!t5oaDr1mlPqY!t%;u9$xO8Zl z=*wRg;O!SnTSsO6WPb^p4#|kT&S*I1i_b7pKb#FG8>Y;d7PYW;(Qh{D$OUkUQ6JVR zt~2dDdsUn9-kIDxBm&*Ma443A;28=dh9ufP<0NI7X3f8o}Cq=YPjKI;O^PYwB9oODUp!`qISG@??nzIZvw&4(ag zX2i(^L?7}z2YqwoY&h92^FP9KgKG{OIx?JwOCTZDs>B5 zJn-t#535o@s#K65fA&PY2bCN$UBpqQ=5FmY>XK7Zmn)(g*X=W`vHclK)ds<`^?2GC zUK<|N>O&~B6VQ2f$cLHD*;A3wUJu;#zZOimbSs`%B3A+MXfX__^o@dKR{F^eEcz}8 zz2+bC=T`yB>vurZ_v4nwA$9I(f-zRFZ6-WE9Cy2W+6f3x`jQ<8%{_OqUh-7?Yk>}PIi)|ff1^J>*9G^6JD=%tJ2y@YLuSII-1>7haoo&LVjG$H(x~Eu=z?-4#NIdy*6T7EI?VYKx}AOn z%h@M0;*BTeQ&_hd%d_j|Ip@;k2Yq-WZto0J{q zrqE}HULc?-q3N1)-El&*Pw9vdl<v)v^gKQsj`I{YsY~g8V?)ODN622iuGHM);CaR29F6Ma_ zZY}qG&=ZPdNfc$y>=L=8eA47p>v=?_i}in`e}X1_$z#tLm(ixTBe;;39T;1ZsNks?1#gVjjDva8JslGK!jR$x!AY z3xd5q)RA%OsIWc6yK5M65WnM1*eS>M-)r0J1I`7<^7k5-(T*OmOC;+P9nkom!_49g ze~Bs>)y0TUZ#zz@^L)UmkclZDK#7zA;zzRQu>V{q25X`{Np$adh&vcDw^)lg|E-4d zRD*8oagR0VE~)s3yGJ4oj5kRiCf7^AoSHt?tYqk4VRVflL>q_#ACOP4D;QXwrXA5` zC1`OO1B`W*M|-&vuYnd+Xe6VP8K})ge>3@&Sq!C8j6&&grCfMePhjcH;QJScXDzIb z(~^Ga1BO^jI?1YdeI0P@fH!9TI{2hInJZ#nuc2Mq8} z&ZoR0h(+D2W1h>dksqH3?~VB|c~&EHv~Oe$WtRl{t$y5kvEdQ>PP}de;z!U+I6!(#4=2A()*TUXF19~uQ2y4$C{8sJ&+iF z!x#L7A7lTBOE%p1<^4O+f6e?*ujMl+TYA7vyU1f?oS2x5zpPuAs3PJ}wxC49FyF85 zFZLF*7EVb&C5L#A*?B0(LA z*iAkcwZu86J5ykM>W4?=x@)cqJOR9D2T84Jt$F4o-&5@%|HAAR`1|pM=U_xBDZ;-+vvcL zbg08X8)j?kPF0Z$J#|mmjQzppvHE0;{fL3T#H@sPspt_0AySTd{HX8g)Tqv_%g3z{ zZl$^BI`AuHa=ZE{R`*mnfi66%Wu9wJy}{p2zB1&U`t^G7f0@AW5)#CEbcDc6QcMfg zomf_7i0X-c-|oO5rcT5z?WU_O@=%s#n0F~L9l06tyjTNeLw~8sI3uW!rAO`>N=7_q ziTd~(d81schnLsHVEzmB@hNUK;#zv#(~7g)P+|ATqdXt=5d829{w||O48$>>x}UPM znzBQkha-rVe-HCZXYcI|?&E>SNhVUcq&0shCQDt#JAbb~lVKeZ-TR&zahyEe?fsR$ zrxwTC#@55_B24`{~QVw2Z2bx`-=F)Mr8T_Y=jjR-Rj58leoO%Uw!MNvIM? zi0~X`>2mVvgchTIhP8E7!_n@cWcIweyi=@h-r}UWe+kJfBnlW16ugkyIt)YJ!B?o) zKOZjd5Zum2o)Wn|s|gsnBMy8ZLOUOnmAx^j{4 zt3>Q;fBwgu^uCw#|}9^FiwVbUPKGw?;A?Ad0QG2P;HzQ z69RUx7fa2FRNQx2aig|!jl9aS|9%VFT+N-Ce~D~-3UzyUiSzl0d>Bp}+q#ywxo9x!M< zuj+2@{ppHAuma>3IdLx`HZ()-0S|Pwg1&wT_WYjLGY$!2^SM~4ZXIl)%@Z@neHkq+ z=mo_J9vf#H>(TZ>UH;^A?D*NS8*F}ie}CE^Ud9@;-*n(H7jE-wV+~btnqzw}RUY}a zUe4zJhwtftqdnC?*YUm0_hVgBpK-+HGsPFaYI_33v#$_qk_Gd64e7R0H`3$kTlfrq~4_*($f()sa_;?~t_njK@*3n=S1g0htTc8tZ=|f5Oc# zX6w%6hg+fDj_b-4LGH>>H&24JpDQwT++Kcom%^lO)~tBj`nKjxaH?ik)be&IFXPCs zoe4JReb8Jo`9kKZarViZlp(!wvs&drtwSsxv^a0X>F&A_r|T-j*Ujg{l#X?_;>ag( z?=Js{xAkK#CjKKnVe1Bm^Vs`ee{#Xw@;cb7mpDZ(t%IGF%osBX&3o`*$sY)0v8&W$ zsyYH@#)zI4MA`g(djK~}6Gy)%A&6(h6WILb&{@3>$=TXI;-N?R znZWkMnO7fbQI6NH;{AcYp+C>hlvmP-HK~FmR%kPbHR7Q;aO657e~^fIvXmxE0N4H8 zg=|%9Tj`m?EUID-D)WP-%t47cJj}3;>*e*PNm!PP2WXfNt+!~WORn2IXEU>4**jHO zr{I#6)`VWJiv?*o?aA}HE(s{aQ7k8S=$3U3#G3e)7jxhA-pJRGo%nx`vAqKy_D?Rx zUO(tWd=+GAKb!L^ma;PIb=Ez*N+raZUJ}*4#ipf@6r3)ui)gQN?|-Ga6x;4K0HvQZ zW&JsCGI@swLwX>@`g3V_RzL9j_>3{0DIIH}#49l;Oy2%Cp7o;-IQ-NSj2v@b=YbQj zR1Nu(S6ns7uQ%{Y-qR{e?rRY$kq<)2h-tMdI7y>aX_ho0C1vitz?3=JmutL$^+n(f zJo9^*P$d}C++(U+QGeaUWaw_vrSU+|d`iteO2rTkCUI36c*(5xYH3I^TVVgA#ms@n z68FvA=1a6X@Pbb1v3|hD36{nFkfX%DJVBL>oF3!6|Lk@B8CL$c9Eq)^zdG~DelYe! zK|;*=LX#km{>Z6cQc`K3_r(KKBH&y7>>3lyy==vsHh5klGJkpYAd!5(FRq!=E~)gh zr>E;cWw$DEpIP-3rn>Q}5En@-G6z-efF>|V0c$#1E zx;)3liD2J8oN#d?j!RDJ&hwCu%lI#U;NNWgRZJ$BXTkmG0cZoUhVXMQJ_4*g82O@y z_;)UagY9&-UaBKD*zzgt$GB{Hed1IcZDX=PJnyo23V)1<=QP+vWOh#m^BAjkS8gva z;*@V?Ei<_!i2s{QxMz^e3fhY{qBD$%x)J+)@p|e$^2c{qPoL+0XX3J&sHyuB?(Z%a zrX!&8Abl(oa&7Js(n^(O2RwX1mYC4j$e;>HS!_7A3Ur|58TKHobh=M`3%={tnC>0!F!uW zI^#t?*k^ymC6BxT>m{p_CjzEsiL8WbpJduSMt`aL3`n>1lnDK0=Eufyp-S=KZkOn8 z!3nInK`g(dZklZZI(3LP?ZzB`;#bmGPE$N2dP_xC$avp!&_EskFN_*Fn zWx3`_xn{s0ML+%W^rh>ohO)F_2IQMadFNl+_~SO{Rp2Iqekqq?Je#KNV`v%%O8HQ) z9};aCyjBADb=TUQin+?xeI!VY$uFnI?=zoj9)d3BwC5=S%>t(@xSk48DSxt5JeRcF zio_reDnra1>ISL;0m)9>%?(E-n`i;# zKJlrIeo>TAIHIVbMGv9>R+=QXEmB~{?8*Psn|wYgOOTj9bq5<{`_`UZ6*xu>EM zPLZ=pClm`~YW3ni>VGGyy^L&2C)ZN~reCT2<;^u&;awBt$(_S<9!A^l%fE`b7@9^n z<2h3PZ#yFm@cKgs->`L)dl_ zb2!gCo_h=`S5GA|ofcWOr<#Aj8wT*oy)b3srn8usLby$N&wtV1&89>&8FJOe>6M-n z!Xu`;1*q~q-wjW!e6P-8bH_FKFimo`_I8EiktwOCZRVTeb^%$MFRQ@4xx1ea&8w^QN3btG z!=#AljU2p&n#hU#L@0x?r@s&iH13up%|;Z(M_4H6Li-`J|rqWLdsozLi18=u&2)pL*>( zxGs?~`B20kfRdz(g?g_8S7$DAqp?DO$pm+J;vsLe&VN=cvHT9Ceb+AQ#SssD1-e5V zqmR!`$LA*UJWl$jZTmlBwtjH1C+@TZmeVi1q+PN;yI3VWWp9`_lp-L1$7Fd+wGnA! z5R(-Rvky1PK9(dCzMgVI75N&B4zlhxyq-OgSHW3Q%4h}uz!@R|NK|8Psn|Ua(@ug; zHoV#Jx_`q&rcaY%XX{m1GzoO@j&{Wy4(E^TJ9Vg68P00PQCH@p&OG2C#`Sm{s~?+6 zm&fz#ey~LtqXR_fK zm+!m#XkUCi@1d1>Q8GOZm|oxJbS6Q2zn0tSl7D;oJSE6SR3@$#3DZ~a)@M!QgHnZ|(C!_$jL(S_x{JL8d0!UKL>G)}L7!fm zK!2oV?ds_R`oFG1zO1a~JuK*@c=ppKAo0}2eFgcOo?aV2sMVS)ZRZ2|KJHD+`L=s! z{p{+#`nUok60Aq83Z~6it25426S`OK zoaotp0bE3&m%-3>Ov#37(?!S`r=`nj@f`8ly_?RSIAD!0>OL{nHEuI%6iL9#FrV|B zQ^)eA#{C(-VklqExWUiu;r$_weD7+EBbKj6f9q`j_)j&gxPN$noT&Qb-+!PFb2)xD zbyV+^ZjXzACtYL3i%nBfQ7+2)A#E% zQ#Wy?3GxNQ(vI~Q+xe*{KicX)V@}5!oFQSy7Mlr&tzwQ5MHD zTO@t#!YY5LOhW89%GjlN4$0JAfrr(n@pMKzC_K>jU!b};Xr`&>zN}rnel8G0YSL?q_j5{zCv7j6N$Mt?!^Pz8 zX1@lq8SbDWsh_VO<#sCI<7269^Wdo_Q)*a*;d3rfu z|8h!<`v$7xE-5092#?aB5NRtgcM2xqg>J}=dcK%LcE?RQ3F_=CO@M)!&WSyD?sBfS z`WJoGUvXA(s=7oN#MEW!1J;+TGWQT)l|t!P7a}EX2u*oQfq(m(USjM-`}#3nCT5*g zMY-X~MTd2U0LdXKBeT7r){jRpx7cua^{Ye|j1tzJTgB zO=Ve&H1=H8CV!lkI!KEb_hI@zr`LD};hB&2%P`}HRJEZGW$RX5$dt}#pGkYz0M=-! zj0{fuH6NzM+~G~g#OS*zXpyJjS)<(9xa>1WoYwx|d8&TSygmAIa*XvH&jUQ*LdCQV zqrR+|add$AC{Qk#;x(+hmw?HOXmH;aTC{*D(zX4UR|a9EhUZuQ#=qe zTYzb8-Pme*S1hQxC5rp37l_A<3NQh!q&U?(EiI1@9?OE?KxOEf3m1K~#8Q+R$0&>B0QTju-_JGex zY8+Te$~|){_Ci2gZi1vpMTKYml!MTk)OzZ1s(&NTL6@aqoOX_<@LUDqM1);DL3Xq3 z&Dz4(to;M9(X__=&G^rr`dNT*@Ndbqf#W}}Aw`@NM;vpY;7+^WP&!nqa06e4t<@qS zyx>crZK5ah9bsVqT3u~eCFZYs_QYnTYCNcflrF8Aj{BQs(jbqU%2gTHLaeAI}wHKx6)gh^{#I{@5MY!`%mJ{x^k85 zU;UMOT$5|%YyI?+%)tLO(@9Og#gbRoLny?cwr#Cc;5p58n%xDP_Qj)X6MST^5qufO9tIAMh?N8q+%*tMUKbMwbORFjJJ5n{G~}?; z;3|IyRFJ#LWDT1s2<0;%8M#z&34bf(j}SBb{td=uJ2SqglKj%JghxEj#*QalWB(lv z5~5q|&!>h4z6ywy9S_C;eqn!n2sc19T>~H=Gh`3~K7Xv?9&`cJcr&1iI2tyvDIfyB zKRnMCRstjh0F2Ybz&GqVaE<>3)FGePpQj9j7QcYtegyZh>cBVR7U$zrfmVVFpFf=A z^OYLb(00I0@C7iVOM~N~AT)9|exEQ{7Z8Gr$aJD{{H#1i@nk4qr5EAk?sbwFwHZJE z^B;FZy4JUfF zLOF~zM)vs--DgM2u2Vb@hb!aJp@%B_h&Hf}BF@I^ZPzI^ zULS^@B=&!L8AaS1uRC3*N_*X}hn^C9{W*$wV6W4z!*M>&e+O%wUM7VY;Hv}o5%&*V zw-Gg4l$qnYbYDo6yLv0=F9Y}7>X>K)pAnV1Bc!%tdJ8WF$_te``T$Z^T_y77v!l>K z1dH&N12lQIiXat^(8&hoI35hw)C-})qc}E~9?^dU2nW@KVG;3+(l1jF-A3&2UcBoR zXzZV;haOVE{%xa(Q$ewRs_T@r*MoHE3ANW>qKLcWbsXQqUJvQelfhnpiy|(@>o~rR zy&ly@FCriXy)w~YbP+*OP=b3CDfry(y}0%|I(^5zRMNpj!%8kNmtvpgMkiaAOCCnm>L~##(u|E)(`Eo{9pPpH;{h)hklONfjk_HMZy=goOVk^ z+_bHt4O~SOW`dXhx&IM>y5gu`t|L0VU(kOmGp9x`aGd|+K7la680U%=d;Qt526ZU7 zb*7`5_;_NU`aqrZ64p(*O@_Dws_7hnRxmxl3_g+pYsxb;Rzs-0q3l_?=D} zP#+Mf;d58l)2*N|tGD=j-9NvR!dEiHzPLi$qS&erE`f)JI`TtAShT8fUrkn#|L>+d=jGB5w{R3=fOa|GPF57#|q_l&Hk!yuV6bq$J~$xbvys) zAVYnPb_M;L%n-V+MmvWz;uaYGMcaSL8)UWm?RhHHMYAXZa6jN4bqlULk`_8T&gb5s z+}xeF>7d=J8Y7v-Ho3)Ti>o=kGK+dZSvBqu(+=bg_my$3JOgf0L^aIN*2K?$uHo2b zJKVF5ZArZ{B+Y|yUurBMrMQyYqip^AI+tH!0KE|H6!-mT1C!sM`;7^DbC`d7g0_lY zfBbC><&O+<=upQeR?Z)c>%>JyKGRjM^}=nwx+M@7^&jW|Y*&4%f3_3<>3&;5Z#+9T zA8@ZOtJJ$B>lKCBD*e?<;2G*yH4J86cYn53-evHQwh0}~eE15nKX$#eI5z*SKVOD4 z@~dO0!07py;Q56`Z&AC|ED#Kdgou) zid`Q9EI`g|dI`+OEU1SH^Z5+YVgY?w$^N?EBcx!FB+dZTA#KSf_AkHw*&+ zaq)FQ@wGd?PKN>1;qw|_A3h)33w*sE+mElu_T%fZ{rGyo=Kx4>9eW!4MS$BeeY4L2 zgk!sh8vDf$?Ygo5nQhmN{daA<&Dei3wwsRqAKQJS3@vE)qc?wiK>KYL-V(s?mMO}e z`RF*u_QSYF@jG8e;XL^g?cr1VN3@BTr}meJ_T;C1f%m@)hacVXXl2{^=(yDbQ~ONm zmr*D;elxbeB^28}p-s6wwY`lZPJY@Ne3nz&XZ!5a^EGo}?_GZxW&4W08eD|8gkXD3 z$Mzi;AJK*qd+&c^TOsh?KW*`um#4Pe-gh_~N0H&ZC&zKA@VVLSFfP;|+aJb6XuS8S zZH>>pJhct&bDY|CU8lRz?~K6nZUqM&yBM3nJ!_STj}EF`2eQfh`p0%m9l%)CfH4on z$#(Z*;JD{q(;ncFc6%s6!+WQ|S0C9HO+L`=@%<;M8t;GbK2RTBFa`nz(BZuY_(W|! zhA}ln;YWk{a7ZHFKv5;Qf7Wh&gHgUUPx}x&k>&N20u`(1(AI4>U0Rabb&D@u<6`DY zRNMUUJ>bb2V>qi#wxfh&{`&n3{7JO|zTI#NJl&I(@_;A&e~adk0$h#J;08K2xl<8= zhsosbdDeeIh4I!1@LW=(yxYkH>xo2;-&*1Kr~*721?xCIwReNp7usH@pU8joQIN0i z$tB_e?D?GIzW{uJeI;b1=&51E4UF$?DEsWX#!z_i^WG4OP>|z$0#GA(`1D>83h7UK zqJO{6RDi+RpZ6LZm>E!b-mVYuq{skX18YNCF{?8gU7SaV55nHyd@ih z&r^TG`tbXa9{V}BztiFOGQ`=efII_t@Vx_`hDiP6z7alOj^pEMxHlxm_m9X|Lmf!% z_c&~TykUQHW*zP#q3k`4of5^pOp6dh0vA}Wk{@Fg#_&fsE1}<`m@h9KF=k-U&HvER-XPJy56g2VlCJb`YULjW@M0ay7Pi_JLmXi^)p+56x7`u~!v|G(z_*FM63*8kW4K5&!YK|9^ZO^S2Jyas7AvXI<#u_<#N8Z`{AQ?7w)oe}4xt z{LFCmzjjhE?EUv$`{Vb1|Jdn2c9j3)KlTliBUp#F3pMIAA z@r@kCX!qZE{cqgzXO{)Tet&lRuRq-XPy4tSfnuDR)&FnajA0lH;Qt4I{QrOb96JA# zul|Px6YjrO^Zq|u#@{9`{iC}5Pjt_4fA!0l>5ptvZDwV-zgKt6@rOsd|NK{A^gqAn z?Z5a4gyD0>WElKUALbt)%tL?swfLt$=l%;K=oz zFGYV~LYqzh>K8=}Tl|x+dkcTUKlw$q@cXA$)IWVRca~@V`s!gUe+xF_2&_;G9^=@o zXbU0ZBE%P+HzfUk$dBQFbmjYx{{4I1@#N3A|F8Iy-yaWa{kQ*r zshYPjJ^t%|IkvMr-lhC6|Ly9o7$)>i-h zpKJT^4&{GLf0GRVp1I4mZjS#TA?UyFTmB#CHvHEjJ<|X9y9nNMRU!X9OZSq%{^tUs zCH?;*XE^|4n1A?XCgy5};ppoBn2EnB>aP53`Cu&wNRY z<4bx}HUv~rwMssG+K5h%@f9*!#-8vsx{ZJ{>9CK1ZCS{91eW~e8Q1v{u7z78net~u zol(UX?q4SEqy9QZoF^Fn8X*N<2Q7_lyzbMSfxmvYb`uWA0e>q_Wv!cjGj$x#ne6uw z8_l$E1;4;aU3yJiW2T8ON#eNJ=w!cfk=R@gY&a{ZF4#Jb2qWH?G;pg02g51?w_W6~ z0(BzOwU5?k%(AA6xXKhgtg&=Ucds%3g0J(Uz-vOc`39>KZSDXCEy@DYXroD@h1Kc0 z&hs?G5g{QYWq*vd`{ipS29XIlJLh%D1qSPlneNYKhte6AS1x~ z>=M0~>VC;ggvXhS>t4z`t6?N~e7doJWs^lHEesG}_iFq5Qxu zi)o6Veqzh^qq{J>V&`^w_EL!Hf-TLtzy)hh`t@k2#A(-biMniB)qiofcoq(I0)KCN zbl}BZT4zykdL?eifk@yJKFTlYqwzaKXpI8~(*uH8cyu$8Uh53tJeUw&y{&Pgs?(M3I(?{+*Cnf2(3c=R7`%(X7K zDj8}yG|U@QC5&9+?a218usJk^MjMjG^0La(5d5R_fyTT5{rdbT$D|$mdTGFazL$p? z4t;;B%(8dCxtfyrs*&%P6X_jsxp(+eXg&Tig_%K@s&lqy0)EhN#(C}p74=AqCul@)o zwy8Z4k|lb^afs#K&Hc(fBMg^r zA*v{wNk!B>FS+o|it~IF?#4Pb+z(a0r+Bh(_S0a>-)m{UZeBztKi9U!cHeEU?w)_u z>qAV;vHbQ1+^!@3blpbadbt%~E*xZBi7S4Q&2A8@=3<#%FOVM>d!kgIHd;5Tdg!Lf z#~7;}xLsHL;(j%wRNcyEX0D(ttyju8ptRym29AcQ2U=@ogW)rH!o|g!`prf>jW1_#-|$O*1x#2&yZV}QQ`4k(O}oZzr2+8* zo3mR(^4_;w@SOMFF2m9nEo5(EaW~Z?dN1IVt36&Rf7bTG@)U|BeG7%;MCos${Ek}A5-+T=El<*-^{oE#TB+0sSzu6(sMx%c{g6IWh#DERA zou`igsUE-GfomRIbG5obUqWiu~I$-5Bo9FPP7$kbM9-P9J#G?aAQ@0m-OOgkPy&kuXiwiGg zBF7!L+~lg--nPB#=2ZI~Z$EH+)pnc}1A@(om9d(%rg#t)Fr9z)ZZ))63&Sv8bOYmR z9z`<+gb0^Pj$f~CX2CNOMF))5?msV^AG>)>okjSCf3Oh(A(fxS`D~T4R?PlHN@^uv zIqjcu;}x}T9~fn!lj1#&PsMc++DVeW7?jC5fH%!q=ARSQkLz5W(V za%UvRAF@%w^e}%|UT*YqxW#`=eK=L}UQrtiFqPm&$_31V)EmsHX;mJER-fCQ3uCIU*VSax->JZ2;>vfRTS65*#fl=RS z#qB4z={*OiyB2jAY{)s70uIDCfu(h&yMbC~>C9-AbhKZORb&d1IMJ+B`q49#P1ABv z6VRCEo~b%xEX$;-nAtn?*mF#9)ujaZ9o%Seej#9kuGiy%26!kD#N@p93bC_W((4?J zTwGDm4=;ao!`Ud^ZAaeUWs?&1JD*k^&3xuz*k|Wz0T>sj^6_{LpdkjD$r|*n zZSvu^ZOip$IYmB)>2%^;{byrq^87+EV{;?rZc=}PzTvO~Ryv-T$iwQWnLC=wA6L{v ziY+R}uUmzz^Vs>gkqK2sRk6<(W&{qXj*L$1w6o}ugn2@$VG@xPe$8*tG$jm@qYWGD zz@MHI7CcuN{LyP&i-S$KPlPGUG)qD$WZYGfsTM$Xes(E@oP+Z8KRwT?^H%R#&63~L ze;9v9?(JOt#9t{0D%6s|>C$X@vKb$X%df0W&-IVUgf@lA5h_ecM_Hhr8-E+n^CD?m zm3%yBkM~`0XUL%cXfpzF3lFc+qrg7h2(aeZFY)XiJ4K&1;slONx3qYHMCaYhdpc_N z)grBzKWC%7;+FdkXulV+bvQyxab5wC=!SoL!S`FN^lL8$)xq2gUGMJn*A;c>R(12$j76@QRpzCs>mCM#Tdj z3X+ff?BX!>GI0f3I+X4dKa<;ac4DbkYPM?V7~tE}LngL3Na#FAavmgp-+EyzEIevl-4MI6zRt}M32tbJf%!aBY8H|QF{2T ztgla^OvUuNl}_B`&#w@dMa z@YBS9l?VwmmE+H}mogL?jZYt}yyw`Fz(C%T@1vYzbGLsIfEFAE zUZe`XqRk=?p^=~VMjP|#Emp_cAdDx1Mr@zG6*(8tuRw5}1QzPoXHH@i`Pprr9mmm}=2kwhoaE zVJ3RxHmWMaR`t@t3U4fK&}4sceaY3t0ubIe@|Kj3L8$ocV-I=l6mwL~)0Csm*~F~K zYyl>v^4l1@hGYC%*tKxhT1oMI9XF>DdjUUVBFlAF5LZMnszH7^KQlV9%+^MxPKUdi z0Oyj`*<3+oJu(BFTRAdB9ATcer{mD0zyVzla{(+IJ1!pq1{I9~2(Nz^&B1doh3jzk z(-wL>RcP4Qv_?sw5PezLtjo_{*&0LbSbOmq4l%{N;i4>Sr7C&QCnOW|Tr&2$alY&# zfk9VHd?0edR66ddVrvrI;9I4wP*IwvY%+^8bd+=R+_~7FB&LxjNwah*0IsKo=}Xqu|9@%4jf)`Cwn3Ozu2p@Z>1Pcj}?SN8$tQ1yrd^cX!# zg~8_JcGisXp}2NM8sjlDe9tZR8`{r)#cbJpn%0mvpte7GwQstqc3qF-sKHPj+--lK z$l5uG!ySSIk$Qp1V;J&Z9QCrEFnEfmz)4nB02?pyJa;DySn+>4zrGv%A>WC{M9-XB zTo0n`gcDkQ{0_9FF_eTM%YM;0w$++dWzRKOCyq*LbUuAiePY(Bu`z%O&+M69s79op zvnl1!l>pmzHJMv0KltrxGp#PW6HYLJ(FM6%8f@KHg?7ReO@0|wYCIh&diHE}t$b$2 z7guW?^2e*Fd}Xml1*EW1=qJVGx%X-fQzCf$MZfO#~@^Xjf+-+HXqS( z?s0p$R=TU&3YnjyRvm(K-c?wG{eJE;4U<*%+BNIqRG@!I|7j0t*WVO#%aV875*@R; z$*^JNAHU%Mxr@!q>F`?2I-)40@{s84)yIR$F z*UujE7o?y-(e04lO_t))0w4}EIio+@Bd!2jJv**nM<2O?cG<72kutsQ)fdlFK@$Rj znDeTnvm1Z10(M&owh{#nrr&G-@^qi{$+lmOMUgT0mc9VliJTs4mu($)gYH?E*0hxt zO#+-K2qd_J6{3*LkAoF_;oMaMe!au^^#_lpW|mHqZp<|bt+*9Uj*0;$IYj}1(+XDp zb19BI_bNkm$&UoZ$)bf5u-${PP<5Gl`eBmUuBCryJ>+OoCV;_79>U={>$PoP&Sbz~ zZbE&-LyyA?3GXH|9ajPwXz|w%GALV>lQ!0T`KK()N9O|h!X>BvbrNp^ZCbnd?SchzlFjUn{) z9WsC*j)ER#i(xmUbW6{Zjo4%rq~LjW!H<95sr#_Wq}Ah_SaY+xpB;#P$6<~Wo=H9! z9^JUop6tu?&yFAG&1e`Skv=|jyy`BU`Vsh^?zn!RONjtTKOh*;)Qi?yS5=gl=Un-S zprj}n^^SJzjrmkiY*eEIt66wDctPO|E=^?xJaB6MdJL(W$s7pUyk96Us620>BjSH& zw0xO6YrL!Qj7_{p*M#e0XP=eU14wS};yk?e&#&9I>dY%nZxug=56Aw>4Q^bp>2 znf?B_Cp9*{EhymBkvA_?1hMJr+pmI%_~QVHN> z6|OTJES4^y!e6pePh-Xw$GNuiKr{Il6YnT;_X^$EK-yb5oRnPo`;c3JLfU_-1uPE+ zz0#ssJ$qITg7Vxwz1c%8;$z<=Wo@i0G|bLq2BwOD&ASc{dg3K#&%9(_0qiDxFu80> zgS?+cX?^eDJUiKNwoO(bWSNYt@gR(2;Puoh*ir2d_J|K-V1f;}qS1j}u+F36xNWWb z8^aLJi%>?bXK=9hJ7S#isvdtpT*sv&+}i^XUe%FjKv;C-)>Zp6HTegTaH#MRn`Bs%ZTmZ2&~*SmorWE@3I;7Pnop__CrthL zNHb;ROp;P1d4k=r8r}o^EImN;*3>|F7^OhAcEmJ)#4o*e0)k#%Lv8zFkpNn3NAJ&0PItCR8ri z=2V&Ub_>a(MRVo^{tz0<@H1XZ(Y|q^SytFz1D(ba2oX~Ti~cdUe#(eUMrnTImx;r9 z(iYyi>F+J@lH-yPxFOlSY<_h)Z=Bdhy`eV;2k`N0NZn`?oa_@0ac`0`MNgr~4QjiV zZh=x%r(Zpwr3!yB8@0LK@mB1GAW=F0q9+a@jUjyUf8#E54Pz8Fq^frq~p&%){)JVFzO3A^F zwhGmGJ9-S?FPJw4c-`Cbah5(xQy z5v5^B!t2rifFoXzHEtsKTMTZ$m+U~bgsHgNmecoA5pqIw?SKtg@FgY-JMpzmU6bzU3bfLcQ zFSv8jx_SP1+9a;1Hvk4g3_x_IxHYo!qh=y0m@R)3fLro9c3I@5th>bJQG0c#H6HLl z4diM1h7yR2`Z(@Mj=$O*!8UOk!0*+cZcmG9&Hs*agbYWMiB zT(`9>52QSbu`E~35xqyzV#+GMmS6Xm|Ai8gpFYZy?2lg1r4mzVF|+))cCY#DJcz!- zwr_tC^j5H{>e~lo__OPBxN^`}O^WI(5ui7-U|-!#-0R4o{TNild!1D^B-SSt#@ls* zQa$0e6Y$n3n1B1g&xf)}X*?T}$!~ys69O2R#B&S!+^DbsFR18%N zcz%Lf?N8?vv|?}x zfD&_`q@O23D`A0!Ky(o$fYPY`yPzIg-FZ@1AusO~NfpWHuv@xT2`Fc`2 z5gev}_kIT}gHFXBntu1%^;}X)EhLp$zh~MUi`;_fC7m85-nUzK@52aY4M~4m=owRC zo0sD?KSjF7p5}W=GMVJE?a4=)6lr`lgeoMU*liC1NyolE4^mhOjTLgZB)x_7cAX53 zoX}Ug1EC^XyPi8(i{>WKg7%w-aR)Dsbk7D^JMPJp31M$xDA0k$OOHG_Bs-E5r8FoE z4hhdM*?K=m^L^sg;^))(FOGkj=TONkx2+%Dfd1rx=-A=(JNvg}hh~%rN|N*m1XvjA zSu6^EnNI>MLKaJ=64qjmyQ_srnB81foRBTXepFY-X`|2j_e_xA(TsWo3PFYdHw7M%O(FlK%bX-Q4hlfI{ z1Tx_q^(d{z!>NPBW!x z4){KId;TPP28KC4vwnXtcysenzpfY{B=Qi`CVo~IwS+~AD1CxHST^MAFDYZ$dI0Fz zz7b3UJAXIWG)3j(%_Xp`q45V)B4(2JYGxQ14=nNyfJ!YT&|KUfyUO1I1T*JJp>RFQ zsW7UU^T-z_!7XOKwPkswW-Uqyfkk^0W{=NK`Cd}U&-P(7b zV*u3|lIENpLZfM=%&vPTjQSgj;E8|Cd1ZicJ6Yw~~@hUPy%m=G7@0P>O%w%M)pE^2p*RD8Z0bpf^=M zM$%I?^+e&KLgwpr){%5UZNqu=%$PtyQP2v?pOSkR%7A0~r>4`y!6vL(uzIiXu#WjQ zP!keO=x4%m@nHehIo-ZU>H7o4^4s%d{BGjz{F5-i4sRJY0h&%ML{>a&9J)a$i{3zG zX#)MxpbLLH8BVx2>SpJZ!dd(SH6>tFB`>&H+k`jhCNf?VJXIb3L^SaHV%5_sx|FuB zmXihRC>TG;*sgwB3$~2FN<5U4-Vl~$1Oy|M0+}wjFoGn%Ok^{b0)kl@?Z+s%KptK^ zE21e-;xSfp+r%mTzJmgo!{P@~!*vnT?^-NgSX6)J2d;SDPeI%XZUgFeL!LRLJ6h^m zGy(v|liHGfY4>)6K>G_oukevAxKt%3k|P*Wif(^fCQ`rCxSUEE1a#3YqIoXO8D?_imWa-2A_;ot2V^Ly|EkNx>In=U zoPLsrpZ3z2P!QVd=8T;ZQcWq)4qR# ze~@8f<**Sl6+TH3&H!1p5X&P%vido{FlHXG(XXnX)a#wQEft!$KQIfe1s!pHlGPq5 z%fu`zxeaP_Jb3a+q3^B60%;4%Z%65d_qCd~Xt>A#H1UCz8#hKAb0_bBX?Cgu+`%zv za8q?hfbm2331&Xr5)7?0e@r&nFSma@(@{+EIrei*-?Y3BEZQu5%LpBPJgg6N{5nzz zKiU&In}#hF?Qt9c?Z>YzpjgT18vAM0rZFD^;1I$h1_WTMdY>@LYM}FHOpa)B-rjJK zr97UmAjw3J#o#iihi1-O;f?>AcZj5(?q)cVVQQk~Lsu z50mx54ZgHwHHxuLc||aIV?H#>m%=&S9fdIlNojwS4}}3Xf0)&QrAR^m9#%pqzw4q) z?7yXLo!SFEP4FicdZ$t-gRl&RU`l)p$;iSn*+jG4&z|)@xEDO19KRrBYC9xPJa>yd zq-+NyQ6zKb$=R=nqcB(dCbWORJtbf^i4x_AyvM1tX|g(irN-#mfA>OX_@(bJ5r2XM z5XGfTbI5yvaZCI~kxD_yF$HXLvV_7(b=Z z*a!4rkyHIG{IN6X=|AJA<0*7!6#-!xVVi|w3tk>#RJo(xeKl#}Sr&iQ1bH6MbexX} z!kG8^JP)RbwEZK;o$xnQ=_VP)Q>S0(ZHYD%1?5?qz9kBjU=BiWG|s&OyxWIrg!Q8G zKnJH78si5ZmEVANdDrF+S^}BIOBWz}da!DMhKQ8s z1~=x3es3)H2+ru94h?^+H5nvzEbg3*|MtGNJFB+bt1WP;VJdjYM!%!iWNVGKa3@u@ z232TzK#uq&jdQ?Bl&B937KC#mb8HfApXXvaS5b$*$1x1E(4=CK}=zaSK%so(vWM>6lX^L|WU;6AeDG-_*G@}G}}o<3(j zl@s$kC{WIpK1EAS>lTUxgW5g5O$S#=@<9F$Od>Sb(3gG5J_352D<-(-BX!6K>YGxt zn#H^TML@d0q8Hg#34|qoVP!msIY%v77UfTAFkC&YoK?^!{$$R5XVorjCWM<9a&Pn78w{;-Mms@2SFnVc{ z6;fS2$An@Eup8G_#})ZG0)$2xgCcjd~?e>fKC`Sey+O5JVR2-)>aj5rIw=jh*ov2C?&|G zfE>q4X0W0lm@T{d;5oIG1NZH4ey-|F%6o0krWvx%ZY6(VZ&vqR)2omeP*MWKN6YZG z9gBL~(^(tJb3Mle#d4`@9zQOQY>AAn!SicZG+OvzYN7;CEKB~REBzDdk1>hsQ#wB? zU8o4x`sY%3_`FKf5Y%%mMMmhfW`tMIG*9c8Cj>9mpkSI4x)`@Q8LnG$?a8cj+jqg~ zw8Zo6o>YIPqP7GRz#cnt+UYYhGQ^j=eCnoz0~aXt?8hc>!53@pRDdJOLX~; z73;-`yKpuoa?Ml^Yn``snNzRx$O7<8Woo#qzJCmcb60Qbv0{?JtJGw^C|NIaWE7;F zjF$^DKZPhvkDoe!)!`R?K8M&DW{P?8Mb8tKa5I0QP8d;FM$M_~gA@we}dUt8_32_?CGg4flJ z-j@8(DSsW^@3~w1t}BdG!nf_gGPH3EA|_C^^fKuXPu!|NN>&t($126jX3XMJ!ObhY zSjvA)lC$;vmv9T}E9Zn^_yaToUacZ4ZdXIM@98%KEg%yWXCyKHe&DoEiL7~cLlBjaUJnR$u zRO8#@@w|lnEfZa?V76fBF3|PGej+xr<^VlDe!MT)-qrJ_LUOGf;S|}K28w@eOU#tC zL+yjUs&Gyx`D!1*0Y;5K;LJP~Wpk75v^aWJ8s~5>s$dq&E#V@oE@wn>zd%JyOQ_SP8( zoT&?(WTn8s2)G&)?3yxrlt)M`A58^kn(!bD`N+$*ZwxU`TnLA)qjw*sWto>R@d@?A zkQN48?<9Y@tZp%BL6K>L0L3=w4nyiGx%=RXPlWP+()AuY(}H1Q?7f%1K@K5*86GcC zoUmWi1;95q-Ly%==mI8Sv2qC__n4)(!ozq%YB|c@D^Dmo!6^HkPE@yrLT!wea5bY}Je$%M zNNj)ZM+wPjb93lV*)+M2E1E=qj|Su$o>RC=a5zNeWu?(6j{#JM`+fe_Z~X6b0E4$3 zOeUkwD)P;nz~+Tiw7{2mN^Enkuq)Z4nDbfzyG9%p1iB&$thzx)u*(2s>6rmZ& zQdIerJNKp5`sYOzH#gu-ssqgi0{KlT1<+Nxwe{I3nf{5Imp6y)J!6{Wkz}nlW%N_n;Z`38(VBL zK~v~Yv0;Y)tQ-Ft4^d<2QrS+0R33ka{|&?b6X;_`(vfs6m2H9ko9{oA1OIL9a8l&K z)8?O0jbXUK_>-=oeE$)4Ij52MXK&iFe{1`jk3o{ROLhB$HyK(zw;LKiw40SU6E%j`KYP@5|yv6DhOg#v}o0g#ec=@ zzT0RN*HP5AP}%Esy2@bP8xY)>xfa2q2w2yFWLR^#VIekuq%^Xu$$r%S8@OT*G+Y@8 zvs#bzrr=ds2qnUwCv3f5^kq!N7NeW3ym{Ksy<~&qC-&i5P8Vhwm>{XsbkdTp*kYmv z=Stgs5rh9xy5cUQ1y6<|vOQQs`_eRvcQFpATFyKesTgX3(PdH4yW1De$8Y0G&p9ns zgNZ#<7cdWhu6OB~kGUtTQOBZf#e@(X0GuB75M{~bsrN`Uhk06)_mqZ(h-&CH^EVFM zNqPH%Y%!M(C)iWX;LJhH-dVBWXgzKa(YG;~WzO!dD4X6!K^({-@Lc>eJ49jWMcu;j zdn2h}q&?tL>Rpd;275UPV6GV>GKH4%r!H`hJZ`0bcOMXYV{5yqA|_u7Vyzl)|zvKPg#ZwyXsd+~B`R*xjHA~>s-?EClW=bQ)v20lC{qX3P93bg1nEb_-vFGX1YFa;r7 zz;6AXmCsJw(oQWET6Tq8{%)uQx<;UXRWt<-R6`JZ_lJF+vR|z48x|Buw2mJRm!_EOw8!JT?a?p`~h3=iN zH#aCHa-=U|7VUDF!O0E_ACACEI@X65GAvd|a0y@CrZN8V%jk6Ze4d-#JkTWu>#sx) z?EXfj;4;I&eAeCw5Pe$!VTTXxsIolg8gY!nm#C#seJrw_l%A;MESWntbdFu+-^;RW z#6_p42iguS=mT8Q!u5~mYYTyYofC1U%%m3r;wLV?ae0tma^WNHyQNy}*AQfgilPTo z3}(*yrSzrU6IU#CnEyF1V4?kwc<~*})JEb_e4^$L!D%cC1$MR+)4!}jzSW4p=g1Un z9o1Vc0%3$du?xVkvt);i%?Qez*~%gMk9q>55I9M34~$Dlz9U`PcxpR;c9^LWDX`X5 zvTgwXBQWei*zB8Vb+ei!OvF25xie}DJfX%t-ouPWBChbf@a>H!oYloSg};{{pR78$ z3W70}vq%}d5x%wXu7YAoAXj6E>6O-2koYYsF$UvTUQQM$?I?W|ls#rf-ohEZ447>& zra%$pO*D*M4EU@8(;z>8%{i4sqW~pR)?%*;T7L6T?YYK`hP`ko1GFG7g==%5Y3Caq zVASbLw}1!U(esb`#YH!N);53FMcT`!T`vKX=00vHP-Zwt_W-N;@&2l3fa3uve$h^s zz?kGkt-%?WbhCYXtuQKmz*yDNU*1V0xs^~*aMoX-E3{Bq)iTn5c2GY7#!T7-bqV{h z!7^nEHq5bT;7#{?C4oA8S*Z1VVi_vH=!O+F$hrrhZsbEeo!C<>( z--UX@GP`R}#33@Y0iY(Xhu?;BAj|{^kh0K?5GPjWOjxt2`xse$lJ?YISrxHKu z(~Gt-{Dp{1j{XI&yihVMGrSbQSv~DUZHqcE06Ys+R5&doJdd^$aVJ@l8-`oj_G0gq1N1l)Z+|%eAW>m!^IG z3j}y7fjq3?8I9FY;gg{R6U(XA$g$cm)3z|EaA2)MV%Sx84y8ztio~3g{N{ohUu#l3%5rl=y&;r#Sbi{mrT;i06P48)}K((QpSW|->_Pwes z&14|+A2R&Gu#HpuWLrHLvUIT!GT~)6wir6s@j6YEc8t?|!!gGQ0?nL-@5DW$31{!9 zBeDswxpx#Xr@O*Qm;wy)OvBpWeQPUHiY^al4#8@t)-3a|q7r&xEUme>lTAoV_%Q2u z0?&Sb@4kS6z}Ri(;1&ECTxArHMDVsT^;nFinLhdIIAA|D3ELHo#m+3OUBnXe!9i`n z!S2n!)x8RiSkylWT}lQPq5Z-7Aq^pM6^>OqZjdms|zxzQ^M zZS`3;`6gEBVfu=Y`r+WYU)XF=G=hf-J-?@aorzp^n=H9Y(1HyOo02wU*9_I~?>Fu# z=BQQYvy?w`gb{ZBr4X@4FG$I| zSNlJaIYRixUvzA5B5TQ?tB^pS1*?Q)nd@agW|v7v>B!o;qD@B*0(4nc`%xO>_EnL8 zBF)C``!8RHT9bW5Jf*r}GNjkYGj8Jc(f{~~wEWdbIoKnxgrLOk)ep!gP7nu$IB^gS zcBp0zUzw~{gQ>5gb%w^Fq%F^P|2{hwKef||A9+vD>a<%=9LVdmsGB@FxcM=&m+x}= z7V~J@FkliL0?-9_s&@1|IS%7`-^+S`dwF(~sMx*274~~hdIeg-oeMc`44vGR|CBR# zqQ54{E!)kZn*+7HA3LMyh7FP+gqULfXc?q&dpB-aVWD+VtZ0{qKrnw9RFDE!jkK_q z46O25;ae7a?LB~;1midk=a7db<2VBDj?;1kkdg(~z0koX@rNhbLxHA_TKyh>l4Kk& zC66`q?_$~e)xxYtx3-UN1H#r%!dZs4P}tPE@I0FS+9z@Pkf`-Sv{&OE>EaFFvGe^2 zr`I8&`Ew!Fb?04D7Z>HBZ_<^Fr47s!n*)xW*kOXg(;%qgSohd(QgJ1EKSFx-`xjj4 zbYb<5Kz4?*@wP-LTfP?=W@z+(i#k;(+sVZW@&F!F{Y*7}3S@$qYjB4HbDLp~a>3Gw zrac{Os#J740#|rmK>ty6U(6j&xr%@FZ*uoemurdp%zZ@Sz^Cnwn|r9$r~DndsxxynD8R@t)u)X@C&bhB*&e|y;E00 zb2olYUw`ctqHbfqP~bA-#0Njqa091mI1mYu_SBOvs!PjtG3f_un=BfiC?9X)IpS*< zVmwywO|Rl&J#cb#&$|YW8Lvp66ahD~K(gTbFy#K|4lp)2k|!@Uf8Sf@4$I zpkcVaO08Cpzl%0UEke7mLy$tP4Baa}a|?6ZiME|Qi0T6Y#=6Z>fWPy)48d}0>(23V z-l*ZV#nQ)ku&j3!dn=u^tbX=nf!kP3$Fng49nK757wu^jGLB<^cA^p4xBkA}=hwFr^Q4 zi+bquVd<9@^jOD#;B*@SVE~MIvz$?q7*VJoQyL#>y+AXY6wx++huy(I5N_(*H3nPa z(>^X9K_y#UOi}Va2!b0SJM9=o#&ah)yVmJ7qNS{dg$v5*mC)i97h+AKU*K4_8T`xD zTe-IIqLhzvUveUlbla&P>z=TN92IJ`ru?}fMSZWs>hrUIHmZ|=we;2l^R-}RnTjE7 zPKt@LL4fE;{*oxm<+GHnk-r#|b#D`h&sGVHt{<7yCAyy&kfEBmg)hbPKy8g`uE)|@ zG%thq2G#|OUk6-Uu*f#kvb3uL8RbUweVuS!6%*soR!xqin~o!z2{g~$vJ4Kh!a1jykR$P}@bzf(HfS%>v7madMbc=lBXF+oDYVn@pLZv@oi$!aSLh=O2q;p zq2Io2#+}x*v=qfw6J9BUH6W1l6G82z<7cbS4NzTn85dmq!DZY|YJ>#)?5klzn~{fq z6x`ekcCNkW4qP$2Tc$i1BI1maZ+z$59^b9D9e-$SY*ctEhMeNYA6jZ5+*v1Sq}ls5 ztg-xV)M!v!BhMM5`-^6zsYwJi}a6OWAR91 z#|Ru);adE9rBYvL7L3QlB6*++kHQt#;=kZM2QRxY*fO3n67pfLPS`Ns zj_~vWT8nu4K;};`Oyb&r*H~}0RX*36b}hCQuOLFuEV>Ti~6idSL7Kx++q< zz5?hTcNF*UNGm^bAU)3g@yvdI)gwn{quz~`x9#0XYIu2SQL4EuBPU-;7gRfsajriP zk-xK0V+2(fuoc{3%8-bOZn4WX0>%^sgc%5mKg;K!09M+7)_)LU(=hlhBoT9INr*50 z!*k=Dl>!#AHV{q>%-we)O=!6*<-(0< z!iZMIr->y(t0Kvv&nf}0Pjgp7r7sC9RNtrH)m>Y*oMTpRi4}27D zman8-#zj^pF2C8hVLrQml1CON#3Lj!+<{6~UiT_o@k|ucWRAlBdNUM;m-Y1hW=pA1XsbQd{LrTu3O&Yo32CKQb&b$AD^ak`Bs*3v!z7t0~HeBt3}a8r_H z=rJmt0D|6YB7Df@5_x3^6Q&L}B}o0^reg6|@=GyHEyyUzaNx|X2pR3jUZ!e7=NI+% zm=NA3w}85TvmjKp3*~R`4ZK~^0{-*nt8XSGqtMYrs4b3!|t_S!U}Xv)z-1AS{v7H(2=Q{`KA(;`D8ISUNlD7AFQSf z`cL;CfbBls_AYV7!d*hVA%K%#MYY&YK3QQ0o`hI`YJeHR2ea?l0Cw@^AT0tmcObt_ zYJh1_V5ZwEV0dmqV>0D!8_M(Q9rt9U1-Fcsmre%pnWLs84^(;unQy=6ni3OD#*bqiD4mw|zdigXKU_Y)4Y1xkojoKr*~#z^}920RXydT z84kxiMe{jgCLyj)AX&Cd zxhv}x0^Eg3^9*>(97HJ$xD43PrD{hR-xZE?q1ljUoeNmKA>jV&!}IU&^y6uxMkRCF zLOS-C8}#jB-J1-+Va8;CdBW*%USr78@og-*4Snc{z5#L1KP;D)$=kcgu2m9XM9Kp^ z0t__dIx@xR`Dkt&G zCxi!!W7`9Z3}UOyvoh^D&Z2J(<(Vh2=5OXl-b_Lzb5GWqWgno~8}zY%bJsuL7R`48I&~OW52$ z(lR2!$imk4r7(g6x<(*&EH4EpW4v!f1XI3+0Mglfyjzd-4~-#QQI%S!YJv@S@XXaW z)xho~bBAXSqE6o*v*@Q@Brd$=-}=~icXuAmI9|igv><$ce~y7UGDlzXJv)r5$76Aw z{?h2rSoMM7i+Io>1#6ZDyFmISU0{BOSD!90bIVQoh`}G4Gl-Z9xS4{`;l`}?s zQ#%iQQw(0@U2zmEFHSJy(4O+BXYfMe^i^DZv|krmn(R7{z_8_GTOCUGMo3_LqZH^^ zSH;7ej$EOC?}PLkacK;X=QW^Undr;fDyjFN7*$&MxtYe#NLm!wrVRr~#@kAu(m+NX zs*pr&NhGA4*59VKp~@yXB7<=DoFQP$h-J&&uv|~_vhD}$Fkd;=;0tFa{fJs9>hh5z zHwX@=4z|O(hA$oy%mF>^`uSp^3xy;G3+p6KMaRT{8Zm*P&#{*!pjwlJ_yTk)UMr%S z;2<~e$1>!1t^^U+*%$vT%6U6(?y;F-t~7{O!di981oT)vzB%VlHxr$b0Rkh>hO`fk zf;(V~-pT8j4N47;c(Q=9`N>Bt|d)=u15GtCIQtUSt8*kqF{jd^n+gM zHF4{-C$}%lGl#Yfnq~raM%+U^m6WUrrw=24K;-dE;Wb#-?FrZ{%0gw#`~D$=OTykH zQZdSC%@kmi8A(N7wkuq4tp{$noXfxdNC(#Pa@r5twk0G6GfB4Fnbg?<>lhcIKsvm- z>Sd1{Rm>uyJP{8B*rkaotN_-2K7}c9U5JV!rPWfM3ro!0u2C$uxokq*eU$O1gp1iEOcZbz?gm$Nns2z%u_ zm7@XWDAqWIV{>rY9u~lq#rHdtH59sbR+`WBc(NXRY>fHHm`21Ptqn9t&V#Vjys=3y zV$=+%)#HB>jLgOy=}SI&j+vQ+%c94Bf*poZtPZl=C2^qWjw6xjKLI~G1nE|QJd5pD zdr4(qI+=l(Mf#}BV~&K03k85bZA0l8>m&vz;5)?HgshzzeX!FdR4``8vBKA>#Gowy zVkHi!W1S6k%7~>e(BR?@sH}o-ek+Oa8TykM4ZRky*`vI)TMez5<9RKJEB<_ckU`}* zk_YWn_xiR5_1Z5y0JQ6YEgfET7-blD$r3|tTDz1azSvmX!3iKc3I0=Qpy)5xMLYit z&#Vo4mJH4nf@uZx8WFQeKlcyn(>ZQ46_cCq$jqW#x8OG#lN*t3_C|vpG|1^I*0~8T zBs*zhO?&2g`~2~oE)GO-tDGQz3~HPpB^qnaAr7M2iNJ3g!kIV&&Tz{lzNsmje`chv z(~x9n-*R_@uz+S!Ki##jg612MxCaQs4@!SwZ4DSrGQ+SI3?{Y|H@wtwLdd zFD2FQN(C{_7EdU%Y5#4osbDfxoRpSBzjWqvdm4#Ncz+Y5@%@JNSqhqegk%iIxNX0z z5*d^IdFsg5yB)VBI#u98f$K8NmX_B=cG;16#@>Sj6^?m1O$F3Gu8|^OQYL^`{q-g! zt@37OhH`0wK%~9utPh`zP%AkZ>o?a?{~cl_br0 zB924Liv8^o@p0Q6tvBBQhg+jrnI$k*OENg}ERZ9nN zh!+JU_I%$1E-sdw*9_rfc1h;LCGVOLb$)Rl=tI*4gN{Z4ecAKwfI-Fmimq#QgQ+-{ix1G>A9!JW*pbH!4y1um0?4Lx zT`LP|bZ$O3cT1`gkE=K@<~y!8$)Gw-f=ln$r<$@o^x0-_!$^>8)QybYCD@SMn@@BZ z#LbsxxTXSs8XCL`iUHus`QLgswEgh; z!AjTX@g3ZsbNlPyI5$wo?;*IkE@G7s0f2U9VSfyBS?|q1T78hx;ZJ-k5Ti7O2;4o> zb*ZqKF|5Lp%W3%r*H|C*SB)Z;*{Wq^P`Ov<&S+YEPe5>gm3;b?oQF6@utidPP!hV^ zV>jO2SNyFQ&~8N~dVd6zfQ~3cEgv&svMu-cm-ic{dD~3$9V)7El35}hnFXrH1eS#L33^%)(Tgdp=Bk~F- zd}barUU4^0$?QzP5;QkSct#d=GM#eLF>kTzXDvQzoWtmfP<)BBrcrIDn0G=3k`uJ4DJb8KHdn$ z`zAWFxwAAX_^fwatF1rAPzwkROYH92vkn}8%vw!GVb7#V_(7{>$Z08ogj=d~XiF=k z?k^Pb_lqUVQ;3b1FFz~*-x3TZ{1WC>IIb3Me!_cnYciNXf8J^>%Bc}VC*>r?6=2(d zWUhf)?j6HS&4Pd(KLHS%+;r_K#9K`o{O+8mDNr~a2n59UVL&ds863;$Ma2uJ#V(3} zKv{In$)s^wmU&TN7;%D}qvtsxS{lSj`V&+!jAck|I9TBvYbWV3qe!u*zx~ zLG5pL1)VOmK8d!8xDN6X|3yk7GHQf>LWLr5$rV2hRevHHED`?Gs@wymQM1TaS7g4g zurA<(doW51+%yg-8+21Kx@%`f+Q9un^(T3DXO!>!ET~WAAsmZtRIu+I3PvTpDz?n! zx6^U)cH_)m;fl`_9qK$m|$!(g=4L4pufUcstRb&Ypd7#+Cenj3m+ zXo4JCj|!k(Z#Ip3ly^n>?(z~R+=5h1b2ZqNmZX2Rw-o(y5@z@cSn*d=gS*9(N^syO zKE>)4R(eH`gT|=!sH*ZT+2$F4**!Tg3qmXtZr2}5gX5%uJ%P5gu+c+SMYv>`)h9G` zlJrMS?Rcc~frZ9Equ?oUu@aHA(cSXHP})TIcrwM8KsyxbWf9YEikdHZa^++W-N_}i zF_k8hpT_+-6$j!}Ax&M9`?**kSzr~MWdQVd^iQgvKF--|0;$ook4AHUtO7hR)E3#B zSM0)Av!ov*_g9e8ujB=!`ZR(@#P;ea@T1F68|$Kg<=MD)ef_YS$6S+lS>-4Wrn+P* z`cCx%(@+Mut6q6ct73+<8~>@l7=xDvOP(4auY>#sxtv3Q-j`@LHZAq2w@k}qG(_|J zMNdR2w#b{u&bkNqG;LykD;5bp=URM1tn5D+z$0on`#HC(*VK9kM)lmuLLwLJ)gGmr zUhC`w+w5i&LAt=pM!5Wg&lsRcbfVC%6LDjeNzG!4s)OHhqWtlM*}d{6GiS1X*s~;0 zn2!Wt8wVxA*!1SBil%QBudXpgw-h8MONCfry5CSn@?zyC?M!)p0bopm>lf7UHh(XM z<=9Sdif7Rd)sSj%?_G++`)soOe1gg^4ets?tLYPX2b5PuQK@%FYvw9sUs>n3kOn~R zyHW75sI%>|rRl*JITcBnz0cmDa~px2(GJYj%O- zFG>4E-A^cmh$%eXLnURmSDLYM%i*2-bFKtC$6{yR1Q5x8()@)@z`j#>)oD-}_4Amy z`SThQr3F8ieV)0A{kLHIC{W_B+*1d5PZsmUW6O@m;^OEU;~i7ouRB6rf70rOlNH^b zcm{sweo>yhz3?W*zl&yn0u>tG9vF=jSKCsj_ptCkYIlsJ4@I?WAz%7d*F4K%s2GvTNBNG3uw8l0Y&H84FjGGdh^?V+h~O`ZQEU3lKYarmgE~rx0_y* zR+*%DVGm^=Hz+>^_J3L9;3sm7CCMK!PP0n=RDMUHBOb%fuSK2`LEvPh$~Psz^ryW! zh!WcrfCqEFnG6vZm@)xC5%obXW_N-v7bOvJBQA)8=Z-j0iJ#>{v-lo-O|RMX0=P6p zB6t;l@Se&|!R6n-n*@<~XRtZyYO!PxC$TLOnELA~%rRYbYd0*Nj+6L~1wcHps+cE< z;ds*Iw=(B^m{IK6Q>YDGXnIu60KIE5)`-$2tjFv~4b-CkLMZFBqFu#72+^(IHh@43 zh8PL*Z>EEQ^B+8+yfc6K)Bl7B|Dk*m!UyEW>bfS2k^YQxxqmHUl*OSE>^ve<0hqI&)(G zi8B6I5S_)4;dRu|6CIZmN#N_qlRWumL`$p^POxN_S|Pf?0dWX~Ixe3FO>R&@tO0{8 zOf@^cT?IKDQVQmfwjiMn(8-sB%s`%|nxQp+M`}|w zxuJ1AFH3=6Dq~M{B>AUqi9#6@Yz2|sul6yF2luOiAJ^I91a`-vryYW0E)wi#VI!fX z#pY`&SVlgQAxZPp`J()j2cz~psd#UH5_l_8=JgDU$qWr(;C$NCoNKgcjj2i#__2m% z&BCf{-zqvJ=uH`oN8bZJJ>EipU2{=d6?nxN7)BUOfkZ;QdI?6L)rQf2s~PWACLn*p zZyVb|n&!#lXOApJ^%6{BXJ{S|ENgabuUdU&SCC$(Pay4hE|q*hOora4z}{%pJSDHq zOz^`k6z#JFVTVLw2^t4XCD}wA1T7FR(@PP$RbwLd2Qi0~A?v1VthR!^Y;}y1=JdG4lbNlHV=GzGg<1dUY>26q2-so@H zXqIbuE@O}`L!2)oOD^LpADd%B95XTqjT^Y8-!TAWWDYwftba3E+Uyp^^ii{_3gVAY z0F;WMu_f%!*BeZc0&d?O{(Sg&=w$_JcK!ach+mQEUheke&^Ql&4rdQy(lRiNRFFaD zV#xYLFuLVFgO%$x#-r;^jrvoFFOs{%E;*`#%mal1syh}g{?%8peiTbfA-|O1F>^TQ zRp69|KF1q7gWo30%?>YKCnYix4Roe_m~fwg*=c{!TkyMP3l>OD+9BKEAznxa#-n)cW(x$2)wETv zD^v82}K6jhh?Xf*fkL#7yIZumEH?_juZ9H?bq-R~M z2_0-8x1F9>oe!S_`(0f5Nq??T*%9Z^9~`z*SWTzTw?iQG-@hC1Ftd#yj1XHKgKjhk z&Zm!s6ZCkB)5$Qu9~RR*PA_Kv`hKD8JlUO#PH4zz=D#l{?9H*B#Zq-f{Y`q2Vzsm0 zCkIckRp~pcdB^IGKToQ|KGRG3=@rw#5j$$nPNQT{AM?+8Vs$<*oPPv!dh*}={`_bj zcJnCT*MsuxfJ&nx_oq2FYzORQndgw2O6&RLZ! zCHM7kjW0`I+V#=hJ%2f!EKZ;Ib(EJ0eNNvWDv$fKcQWkECGzfWu;G_$*tKxiD7}14 z$M2Kq%?hfwnN8kzL42`#d+#i(@MsL{ojd67+-0_#Ki-mT{XW}1hiB`snJ1s-t~VL7 z?(G)m%bCO9XYRrr344r37kd~@FPG=YonHrLS|`)t`~2w}*?;(XzDA2|UL=-hn(UGF zAHz?1IXxNf4#`l^>H9f6ldHLALoqj3JL)CbBYI!;-<@QJtH8Y3qu9e6h>W+>w6U9ZCIe6PDvZrPjo zXxf{lv-GeZ?0@d!Jn+wP&73!49KH9mogbW%=EGpUip3-^vT0oAfgyveyI`}^Zu?Hf zy(r(zx9RDlQ;YF4Uy#xZ50Iyu^Yl`))s9{6i(MAt^Ee-@-rO*$R*T(4EJ7alvS+^w z`KQl~M-XkNsqcQE=rZnq<{#s2nXPu-bggwVa$6 z$MmpL$Bd2A%X9kH3#~m1iBb0DXo%bebFK6AzI(i%0u@cRmlfIb+r*poudX%s=(`i= zv-M^AWPjy+JH3_HT$Fv%TXb{x!{6S6<@_TbSh>4QsoC2-*IO0tM>ZqNG2HfpzSEV) zcGU~rjx$+os^v5v9PEjGp}nX}ET`*yfhb`Pe09EoYVB!5`aLljYLnGv(x*(EBTp@e+RX-+Oz zC&RAkd21tx>f)N^olO)UFUR~t*iP?qF5eAdZPzQu*k;+nl-6>A=Wztb)#aYfKRt{* z7_Tl0HlMiDzlN(td^=|Q^}IY@QsZ50ZpP(cyc^H=?R$i0$+B029N&)CZDAjdn zEq}WA-tRK6^G9-~&f|Hl{X01?PyRi)%!g`p$ZKP|8>Y_N{(U*ghUoI103YAIMW4^l zJ3E-oKHlv!8Iz~~W<1H|U7}G}$ZC5#y-kjfa8h@n_uiGmad!-F!w!VI2bi;cHpQ1^ zf3+$H{j?vW1weeR!gph}pS*WJ_;$xe{`k2x#D7)7 zb9dN`V_Zg+J6VtBxBDAi?VoP=UXRUze;ExY=g^2fb}-kWVt0X4mEDEL%LxC znO`lh@u>eH;?mk|4MX)-Br?*SEsZZCu6d&(rGI}C& z5m)0`_0hw#`cv#z{oe0CK*^nb(SEFZoz(jA!A@_C)6(Pu|Irf;J@^%kR9;SNsME1t~` zYxeoEUaw}3?;YpkY3ZKQ;qsg>mt(ZE*7sFE`dp~^V&b*sX3=oCxZE!3gPKl%ew}x?JGJ0_Wwk<-&rPs>FFo816(>GWwSTczH?p?2llfy$2VO-v z@5V+UdJdlU9LM&YCmp!K!gm8VbORiEp?unFDF8n}z`t)=c3UUi!!wucjlUH{hDB-n zRN{XBc|Eqg1_ju6y8xsA^XJ^Ge~it#-M*)0y;zlLLyjmU!ybR|e6!Ct3V`N?{$|`v z!9vZ{D*)tV?G&uT_AGxSsB+cUldtCbsL^_LM(53f-MdTu`m3HwH6>qDuNvMA?aSuF z^t`c;r|E1~jP=y2ss8yeSzGgTW%0AP_K$p$)cdTZrRWrp30GW03dUTH$y<>lD+&ZZOSZwqcgK1)o zpIgr$oreN2-x~AX4D=K$)J&}cKymX+Crl6SYJOD>Y!1KyDF9y4Ck>J(4S0P#iJD6` z@9xSPHWWajd&^+CeLxf(0qo`*A(DW3R)%JpV5DM;W%1d`l9LyTfp_4P04jj2=lF_)VzpcGK7Ynf zUd2mOw9m%Ap8F{dPMzrI@AD^Lm{nwJ#)Z}F>>MJ)dnSJdtCO=MIfckXpoIPy+h{}} zWg$@`>qGqa_ux9RKnNnA$q)pyKyp0E3=CpTk)#xcd@u!ykz@q2;SD6Un)Q%DcP721 zUe6lPQ?DnniVQ%nC&}7pi~FzVd##0c;SlM4?|CE{@MqHZ^t#h{-kga{Y?JV(_l)&= zYcq`^eU5)fB@ut}W&3$@Kq|J)@Xyat#{YUx-s}^E(Sd1(Diu%fxld^-GmMyt=y=Bb z(pvOXWG<1?A9u3o2&!1qG|y1QDTXai9m7J_LT=qH$)ZJuj^akaQ5{&&}Sy59HeRK zd&T!%20MvAB(lI{FjwH-dfPCfQW4m2p#5By0((VT_p3;lUbpK;H_`*YA6J?$*t5yeeb zk4&Ga07T9(aEsuf0vTh-U}{&Xc}ij3?6qR{!r%gYhCoRZA6AqA#UK)o`h1GOTsm2d z7>u9ciT7+}xHyZ?7^vfj(nKND7>Xkn6rF!WVNtXqtBJ7%mG3anz=T3fprpu3Fp~L$ z0z>C;=+OXnRDE*wuaUQ)Ja+_t$#(p$McoZ~qfYOmg5c4fj(C&_zSeeOLP%YId{R})S zsNi;IT*$;kVv7kx*x_gQjsVpumn=0XHOD&&V#hKv&ePn-DyMJ*o1i2h%CNd2H!(jA zDbVd?IgTe)ENnqJ$J7^;`YeQiYcGGP;L+_A1jK{`jbQ=k2yh+5^V`g?bATuNgpjrN zrhNrU7G|Y=NYdv|?c3w!Yd@aaF8PxUyfm4A`^zjjYhTgCg_oVrBpbB;DAqrlp)`t` zeLXb3spsKX`^=N0;<W{O49Kg&3HZ$$J0c_7z0&~8X`hajun4JcFOnD zOic*Hlj&2$l%_WF(Q+A$ODf_XsfA4;rzjmC0w0(i$N))dPpUXH#mKS@INE6>FysfW z6nO8X?3x=94(M2 zERa1B&gV52n#4%@UIhqLoHBoZByBuo&>c?2Q!S>|@PP!If&v64pt~g5D}Gs}QUpTw zNuTjk`wCO<^Oqlf`F9WVv(uvlfr*6R)Tv_vsoH~L0ac?JlJ4WbV+@6`JF;Oh)c0xw zuznKB5YI}}grtq#2OZawuh{u@7v>Zt3s?;8K%5dLC1V%mfS@^3$z*?7#cVGgcLa%@ zPKQ-~u$7bv!h)ORyd<*)=~Sy?s^5RMA6YP0KkqN7e*dspZ{riS9ggrXd-5sPvCbz@ zivwP%41>(hkRNPO5|F)*oigwTF!p-ydkP5xDJaOuKLLG*FxybUZTS%#k!&1LS zO=u96Aearxz>kz~Z6r)s3Ph_oBZ8i6H1ZsvN8*nXM^cYvIof{$wQOZzTN=*)^0PKZ zhNqD%2pD&VC9$M)0n7k>@)c6FcAtsi)l}Q!%RfYO=D*@q%^Q1p;i#U*D6@bf2N<)f z;RGbjkzdOE!Z5Ck9hPp8XZE|5Vv&n@XUnyp!z=!-u#eK=4qYq(LIDUQx^FQIB@O^d z{X87n=lNf7-qwEyC6I_HK{z>qVu`IdrYWBhwVwPBcnoRhd6%>Tp8^&7ej3gF^q%4s zKVIvW?Ryeb(5-3gP(^)I^|o7x)^8cJK~hRku^}K3$fU`Jnvu9u2D#LE$NH(4#Itgs z?>tc~YcY1aw!Y#NfrmUq3CQ;D=TeXqH~~RTFieCbh3|j+)!-2b_3y=^`Mpb)HU6Uy z?YX_48*Frn15;59^919x2rNO(?zN<3R;;Or36j$gA&s9RLT6Ql5Nit7Znt}deSvEQ zD;LCXjA>$k#+jmx*^;u^QTA+6 zAcXlKi{*cSDS<3ZmkV1&tj;5>1O~yTCgLj#@kskJolDepG+tRNMhP`{k$+w1{u>IZ zSPzrh!BuVJ;nJ_s$utC;=QN@fkHkD?KH47HF>3ZP)aSp=@e|;n_t8>~pE#P3q>kRp zFdpz{WUw!MMw;06@}2r#P@h*Z>idt*9f5-?Zv%g+>X<3RrivEvlg+RY_5@-)N9Cuj zN}Hr{K*gaR$uclAs_zTsVu--ct?q5v>G{pQ)F~?ZkP{B1(D&d$CDjwh% zBSn7|M(N1<&gnLtF;x;Fr=m7eXb`3p#BD)+<>&@Qq~1mpU3!5b#`aU@$PyF;GpUSs zwrX=cltx2p!=5Eb>`IX~T&E5RMD8|pZ zXq*=rPBtPMi+hLWADX3kZi@JW$&y=Dfv|tkQ>#-iBevh!T2*99Is+k01Y>JK;bbRrRBcx@)fO1zFCr894&U@>{iH}i`ek99iNR%LRA#8jQ#0!U`sgfAP%0ZG^ z$Y=&_@0-kDx_ux3o&t6YdBtRbTf2)7tolBy_Iw*3%f9n*+TbgFwyb`wRiFD@-*l*RU=2c_<5HjFUE`gp#yi!>YVb}S zpQ%2(41RK-24C{INqGO;hJEql+gpG8)p+y=(Ka<1AE0tr;d$pF5@yUM)#mQ-WP!r4A zoKxsKm>Bx|jekm`9U{GRTpN>X!~-Ij36hsq5aw@-!U9F_o}e2mOEx4GQ!5zuuv}T( zml!E=w4S-E?GeLh19Bx-g3e1*p0U(;f+Fe<@4;W6vgkg|3P}#L z&O8`)0~Cd3md^uJZWw3C3+tYpL-Kten_Tda4A^%r z_`>XmxaY~}YwbGsTmIzYtLMyGGE?~v-|SHiqX5b1fHlSuNj}kksvKehLR|dXIIx3o zF;IjhCDc#e2qYc}_?!^Ec7>1OzSAY~Xn0JXwkSQMQj8V188&}*ekSc{3Vsl%JW%

;R8+5J?i{0 zDS&8y0$I;WsRMtq;WsT*bIrB3=TXH=_}}uz4kL&O>kBi23jPOd9HW_H6y>x>fnx-- zP7N_aFeTnoqJ}?G>^>0;A=$*QF_=SQNo%Qbv(@Y$_p~HZ?_ZNwW;C@W(>a3K+DX!9 z0d4PpZXGj4++zQNSRg*$OkpTf?0nU(TNR%6G6tzl>CdoYy9s_ zpJI|@)%f2++jH$AHEVI{3oo?$@)JjXuu;@v`S=G1J>&W(FV%LgeORLJKjNuKqhRD= zh$v*R3Fh8;bMSpC^_~pRE@+>c%md%K{TJ^&sm0Rs8HJ-H7=~xR=W&<3Ik=88EGb5r ztgWLL(C&Y`2U6ptRfJMbK~hHR@)M=JUH87*gUbjjfCo^n*FECGu=TnfKc4^OMsbhg zsP%0>`0vZ7>8~#}o?tceU;bb%(9zxbo0b`tnAsh!l=APG8-4V*0T8=uvjwdONo`!~m_ z*Tt%+ja&LmC4ch6ka$ng><6pn1O2lmOqYM>%n>nFBXY^FlLg4nGuQzYPaowsFZ2c| zD6UgqE#MFjk?}SGA(n6+1=@cjaf?IJn-IZeN|o~DkU~C0urUaFiccpCJGNvFt;NPW zu*m29F6UDx;zP4SfwcVDB^q!i7i5I45FcrFd1bu&q zhI5gZPfOAX0d9hd9FB*vyXfuX({LnegCHp|NHGCH9q`#OgnO?PW=Lh}W0Bai2q-9d z#cKOLvdcLWOk+fiIVtpGibS!eO8aqrPLo8RlL?SDe%w!3Q`dPWC#Rx+;C6&s8)Uk6 zP;;NZ@=7lhr)?!{lnM(~N#gkK*aUyNJFuZRpGPb(R(6zDL#mbKKYNd@?)&(>mNO8} z6olO|fm-HJ6qB>p=2!apzzN}#YyWALl@uD6J0WYh0)Ei3?#E`Ff5+qCzkQn zrdl8K!>3mfGR4xMQcjTO!*y*hW#(W!3^BwJD64Gq5N1(HO#-ZPq;pTr1H^MUt7g3#x7+?uV?+dkqC#oE8Lcdc0wmm?wkutig@eGQ()slI zX;+E1E9BIC{+&Dh&O`q9wPTnK&F%8nb1vt-#@~nHNtLur;OGodi)B-WkWhFA(?^p( z`FX6L8wwY{{JUieXGEorwfcYi#!lnfCXXV6?pU}XloYr=uX=n7riX*8_H7b=rnPQujYOI zeU8!M)-T_4w3>)Jh6qv2d;x-xTFCwMZG?$Q=qHT@r<_or)YSeuQOjzAHKQBrUrjV`exPo zK`*63V-`n>inn`#@WS`w+?ay?+YqYcjPjk^CVbJD*-+Wp7Jc?yVCI7rfKj*yko0% z!C!Thw_l9bxS+*(*t)Z$-xrb^;dv$lWOratM66Se@wL$x2K6r}ZY z2XwC7@PnG;wy<&NTqA|_&mgI%qu$@SnsmF5-*@uM9*5cEm`PefDlK%G9*GhTe|(gwOV>o%?{sw|b5Jg+qS6hc zi=VPH0*dC+Oii$sSvBhU^~WGN8N+h&HksA8BtC!mwvEcn4v#k!MBIZH9s(CNcu2uf z<00G6!!O;A&O?61*27O6)_#PZT*c-s_n^73RdXK4i;xxj`ZKEsuT7)o6=dnr4uwX38iDLg_#QNm+~zMVjGKGcXM*6V#`F)>%8nv z=H51z&&`z-76r}Q<_M>PIiM@9#;|Lf%Nc)jY$r+uR^AGPu!u?!D9R9wOj|HDwdbEY z|7`AC(cCu$L^V8;mk!Mrk2KIcEkCfoFk!NT>ZgYG?RTm_Y7)@s1+wH|hT1lo^R`eG ztyyZ_%&Rz)lUz_Kt|Bc1B(zVZ_bC)M^w4zK#`q`qIfJ;&Cu+q5y5*=IzWfgp>pFir zC-sOSX->AmNMb2$0WrMI?5Y%qsKe4;-k{z|3_?p*#(%^-k z8?Lrp*B`pMwsu$vOV_$l#t$#9u)ULC9Pm#aM{{ajIJ~$BN!1#t=gMNtGqs(I5>3OZ zJ3X=|)z2(+ZB*APhM6DEi<$Hd6m5UkdpjyvZ|hO@^)nH}hibz5s|6L}LS;0?Y7|V5 zBa@tBV_J!oj=$fSE)%=p$>-oJY@RP{GvF2pl*aOcr6wTY&eZ3Q_P=i9e+4d~#n%?R z>iij%fA9`jgI5kMcKN#F+;8e?Kb&#Td#*G;RzQ%vVvWq8F6SW{CciK?veADVH-nOx zqSSsFX+1<6KclTJqZNorXY}}}e$JB$@Il+Yg;l(>zwpegdxa}#d%o)Dcm5@Qv|6nf z=lc|czrHtb^34|qx77K~LDUU~mlR=Ac9N9o+E;^RxRNFkO}>(LAMqKUnj&y%}6SduFJ4B_>^#08;W_c7D26XNNsc)9Sw#A!ku>Gp97cK0539@S9eHw(!jQ<8dI2^7rg z4z7gR*rJN@R_INl2>E}4-De*QCW~;Us5~m8eXSU=>hWQ4SUq7*yTsS8ZT3$qs`0Ja zzew-jjXw>zJbdwP$|##(-bg8k>lH#jM6ZO{6$Gs?o8u{8!y*3CoQ& z#2`l$iJ@7hckb(Vq9Y%Eb@ZV!bak5 zks44WtXZzO1#)DApcH*L`3yz`3yQJ+ePZorYSJl%hL>srB4S%V&a8@ZkZeW{#yZjU zE+{9Bj;bO2CRE!!*ZEg1zU|)p;W?MyA?mjpgT@}dwSGh~t!J};w9V0%uRg! z>orpqTVxY_?9WDoq^|8G4qDzuk57YwN<wcCH8;yq zB_nR7B}ad)&VXEH95nlj4qdGc)KeI2_|f;!#+Uz{4}8aNt)JI>8Cx=_UwoCu{HvwN zeJbS1u0|Hy!i)AFimLC2gA8*S5bPsR|f#kaPpTz_kur0(;GP#^4#)-lW) z9fQ$xQr`0JxUD5H^;Ihr9r#|~kF8E5>v^Ji?=RiZfUBfG6RqCkOYaO3-8enb#frP2 z>z03tnUIgON6JyFaZoE0C6;AIpR+lPCe*tS?Y|bI5HewrFhX81JkMl_v8jJYgc}Yw zSX+3tTfFizW+LV@Tx~>9evmY_3M95>O)(Bjh7S;c=6T7cJC`2s|=>&bs4M8W9Ra)|8as;H?C;IJ27XCKI}ldjSJm*LqZg+IY&$3{cRHxH{^(-&9G~H2t?kdz{Cbk3zH0gPNo#+DUwY`J zS-t$qi%TSuf9ZB#*r%;6yfnebr(#QFGUm zBxT?`lg0g63=u;n;wF;u)sb4;X>6z(E!{xwW;vT;to7c^g`T8HNZl9FKL5Vd_a_5F zb>4Ai6>sX-j)?!#izHuM$X}dXqgOohL(==!P-%@miqVSntVJ*~qO2VIVeU3q)0Fg^ z@4&_#p-V8q@y^Agn5f8NwS0Qd+vtCMkck7^;Z`)5?Jq7d*|>PqGb<^~YDKM_cJzAe zN4F0YyRk>(LnV4}aL`+`sGa{qGuw_VrnG`dW(_@?Ec@pV&>-D89Ou z&l0!i*hoft|84M*Se*L=B5pd_aBHE$E${oBD7t)PvF=~Vg&0Pj5CmjFBE^4ARL|dT zA-mBk9--EsY_(6_Yy3$XeWt>VKjGScJ=-(THpLb%yMOuk`26cRfBXLVpFKs$J{dNpJqJ>PFHx*@^z-O~W>_Juqo+@y zW3jfiFU;}M_ar|WiWl$ls>gk6U%q>f4oUleot(@=qit&IS-v19ieG2l)LxsO5xqZ# zDf@(ok49@CV~hoN&F3%?#NYde2s?7MS;=$~f?;h;9`L14f{gEwH1vPSveP_GR+Bnj zxV0L%rren_=2O@%BN@dI)E5Tv8no6NQwu3Zu*^@w%s*L`aD;QO$!G9L(R7Q1O0bvtnNyz)%Qtg!K?Q5{I;ydsp0=N^?mr_@DsSA=>(?h?iE78%X=el$n#-XzNg! z@!7_8%L<=K7q9y3Z+;H4%~?eO3B?J;X6IL}ru)s^7rBmux}OdVa0h@2nCO1GAm5Q! zKbi5U(I0<5o+Hy2Y+CLI?fGdvGqZeOr1B9tX$Ce@p8nn1{<_-!MveU?+Wu1b z@?XDf$$Tci*3JJ|_YtARi5o$Pq4X}{}XM86k*z%~NE;UTE?i-%bLsh$7U z)M(rJ(S`r^{l8)a`ctp0an$$ORN?Ke|0vwFpE6(9ew$iDgPSz3;J?#J5v%+MeWBO%&MaZDfB^X&BC*>(qWvcDoTq(q3!o&(`?g zFMK2#zh|;G#(ilk&mJCmLtH<>oqYlUt^(08ZhW1}4=1ca{res(W1nFY}Wr7Gusv27fe=kk{wU)JrS9Bba$jN zv;Ae+t0*rU61wO9l}CS-Yg3FCmMSO_Z;>|`El`j%B>Xc-ontpsL!)jt zMqA-{FZ^D;@R#ms>$knj$)BN{|W5swpy5K z3R3eXLoTFAV05x&K{4bGDDHq!t@smc1m@H9^Cdk9;~QrkSdD~QYr9)B*iwH`#1aXD zEZQR(VWj``3yJp9rh0vb<|VpdyFi+LHi2~COigvKRdX+1`sKktIQ|yPbm8zP$Nv>4 zVI|k5pv*h*t}iwBrDNvs_Tu^}#>E^xh0xp@N?CiKzVk;FSYqVNfL$k5a66~pT7=`L z$~nm4J1%Mm>u%AOOX_Uf`fz^}M=TxBw>he1TW4WXeDyQ__yPUzm$ricGZhb%D8ykI(I~?Ss>ec2JJ=^M5q`YS8HWkiJ8I*8+cD=k(vTfHVo) zeuHM+dJn!hZ+x#fT1)PlA|5xmOU1FR<6bq?Kw*ip{EE`Zg?4&|FuTF zHme~vU-KYfBYou1!ZkG?Z@46g9~knP6DSNacZo<&7!p_K!$eUOMMD(iB?dVp9E~E1+m`98ys6 z3`sfxG3ql*5Y&`|DA+pBXzjScLsU-t4Cwb55M~HlxW(h7WadMSe7)xYRs9^`t?X^OpL&8U5G~2{)1ng7FrK z+|)fC1oNclC5(Rv&9gF_`e_vhi3LLSu61J{sEr>ZJ%9OK>-e2k=jOCL{x5#&4?Q89 z|I(%Z;;@oFi26YM#G-ylv@Ra0gd=I^r8G7sD$LLKp{e;lViafx%)+UWXdSseH_MM~ zczmouu+Ykc=BAnBM-HT3+UnQ}sHE+{Tmox!Yyz6+)INU@zVF)$qbJ#`zwp~1zIZ64 zWQ=A{HYMrkXv1)cfLg3FCZGgGhw^5jcxfXwHI+a}nlXmBA4<+_B(Au@?Sh%OM=<0I z6BkZ1zFMLU9673$&Zi@AHLeVjwVqG$-9Mh{T&(qL-?bc)^v$mxi9d04i}}vn^ld$O%6S1SoOj4_b**t$F zFyad&rQPVbd;uWw8>9vL)cQTdz;5R2l=END|J4umyNB8^58wRHf7#7= zM}F>@-Z$ODAJhx(PrYJc(n zmdAgtNB_Zn0Mb*sxgte#Z=3$_(O>oS*4O;5^S8R%CU5xG)O*j>{ol9?t#3v$vTT2J zz_Q^uPz_COAEP{qSUlVp%v^vkCdu0b$tK9_f4?OBw6Febn}8Go|~qkMKtyJ$m)gAAa}I!&cv8K^}jn z-cr{jb??(dxe)?SYY(;GA#PS!ZJiwn$Rz6h=seArPNDvV;d>r=-Ps@i*YpNm>znTw zH_w^Vv#IAU_z;G)3@K2DfZKPXWQkR|^n=8~BBJfQ3SnZ=@&f@l5ljosyRDo3Rn59W zTfTf;Tc1Dw%odu>Kf!E{Ii9s2Zr)3cdnR#eb+>s&e zYM8Kq<2@-5NqISwBmMsk*IW@fT3RsD`@V)upjv31bA!|AFMY`G9uNE5&o_Vf?0Y6s zg9{*G7KEdk$l>1cU$G|R+osPD;*W_h7iOgrk|r9jDsrW^0j3@MT}bJe zskmYxoBi2Hwt__J{Rsg#d}wZ^V72?BXBYg|Sj4(NP(PpX(jI)zRB2{B)T=wuvnC>+ zpe(t7WCsVhc~e6j7pYM3*%f~bvsw@&SWaw(5paY-Sh{7Xsnx5P@1f~CVMq_n5ZX7t z{BWbQWDDKbVK=y=_|c{Rfj9m`ht*R}&%*vor>#C@U;S_mpTGlhA+j(qsx!&n;su z_od7+isZW~%bA`HWJ(qgC@)^_v&eW011kEiD7o&(Gu3#5^4pzZP%yi#FQ~#+TWrzwE zz>yzbbB=!TmZs!={vUr_o{k$cX@0_zbn@UbLQQ+w{q)CNF;V z6%3}5zv@)1ecm5EB6g~V!~YXAgd)(s59wZlbeNfvuCScGVG)1(?lU456m&ADE;{%Y zR&0tysT@6^ZIcr$-Jb4CHN}3Mt8$9;Iz==`ZzVE&i8KBD9Gfzl>b_GXk||cs4OLMy zqD4Rdr*svdA$Sr+`dorTQZkLcpG`bGHCn>&SqV*UkTUe^EVRAJsQ=I2A9i|iX9G{R z7_G%>?D@yf1<`+3U+BP-&ePl?cvi7TLC;CjcR&#E6|&Ut{-0;Z60=25d~NEeJdU?ox^$5J` zJKa0|-TM;%^uBzU-7@~^xsfyif7bcff0Vy8@GpH-+gI1}a=e)f;sm-58`t~~{S!U5 zv2utt-}zN<`@4@z+lkZ8GqR8}F+|Y{S71TI^%1$GfQ#29f;}vsBm*uz;P^p zooz~a_kw?Q^O3cOSFakTFMOFKSj6Tb&M{@j6R145|Ma1H&E%=_8Ln<&(+8>VYL(+! zwLHN@H2rF!^P21P!E9LLHi>@&{hv+A_>&Y3elWHM`D@vmSl?p?} z3NtYBRjAHbjZ;hNS!~gY2K-CI+V)Xt-J8gVpo)JdD7w}hCP{hEd5Q_hvBM5rbCg#z zbpe<_Ez`*3UYQt!IiDw5#dj2@qn zG6li^=!Y`>rayG?+YVU6cm7jbpn2LKeYVyqBn{sQDZFM~wsQ>3G|@e^y0=H~vjOu0 z^I(6}+P0z;sYzf1d!H?HJ-b2ck@VhFywM;CjHUiQebe8^4OY`MlnC5_hh{cnupfFM zB;`csk)P4)?#3q&bX6jlFAWoj#?4v$ovS#SJn~N!MpZV;b^jhMTr}xeKuc61JPSt$ zM<@)57g06o^uTybG~(afOnD7O8>K`sX+~sp9mid%pIqYc^b+hzUjoA~33^QXQ~Nbj+Sk z5Vf0H7O-ma(ng_!nl-azm3km>YEl^~7IbPFMVDRkD}v#v;a;PERy3#)d{CWeJ0vUX^&*U>rAo7VNJ@+&5bT>pQ5R@Y){ z1+@=W%Gy{{!chN-w*~+MlpX~6zlpad_o zzp=v1x*O~&;QZpZA9G7JJadPXeGpgVF^Ezn%jO@;tO7mA2KN1>zk=tamBkBk9vIwzMsDVR6ia{Qi2rd6&K3P1 ziqBd9v-m3i|E+%iAHLNOz#_*_N;H4v0f-nlpRfWh;Clz$KobMQaYVqQyuE=B@D*tH zGY}Yd0zYtPGh@J`tO5J$hyieo7KngTs~@G$$en?rvVH{F926@2>7W_+8i6O6?}jH&BK$eJj#X} zYl9GecPmdg@2P%cyr4jau>|;-r67I_o6;DX=HU2AMoET zt2n?7VE({ieHAQkR>}_J$`=rx#z3y50I&{cH~<9?2S}AZ0JdUp0K_maJ97RbUp56c zjJ~ftfjD!i!(HL~u}XdeW&U~J&-;JT&%Pks|Kfk;uRwko*nZ*w;Vb9@a0gZh_>GSM zxWJQW+E-pM&Jur{!}hQps1M|ie824nyL`=SB1uEGwD&(v{ROu_QhYA4BzE{wd{j5`bPdb19*x^sLkR?dO_A_1(fB!fF z6vSWwNa6lm+hH>xGN7#33p|WC!pRNYfx+H((B|B}=_*(Tj&Q61Tr2*$StXzVKWx?$ zP+cW*AW>KXL4HmG!*MbKL=?o%Kd7&Q^;H}JEdSrt*8{48Ru(Jli698lA{QMYA4rAV z5QMNVG(vy-$n?032=zd8gdCCNygd=zMBMht>E?NcWuy}DnZiVEQW z$satcsptTNVJ$`;s64s?d@t%VNc^a8Hj`a9P3)_r;j}PpHqeCi>QsOOY#SeDBOO9L z12&lf%PXk&J?G=#ET?CH)pfYjBwl8q%*r9Oiidx1;Ml4}H+Yc^fYVuX`ftR6dleY6e>i(+)0nGjN?!S2$3qd@N|25(T-E0nS0KgRcYDskaK^DyVh{ zo8Utl6dHg)totZaBH+Ms$}$6Io`EsVY{*BPQ7ebEiJi&+j_b}BkTrc(%_8eWoU}P&mnk|R1F@=SzD^>K+ku1eZe)sa zPrEysgEtiEz?onP&1Mf;YxXtWg9RQEnrHaBn zk!ZlF8895&+JLCWGhp>^Sx(w`!N@!sl;3~y>{XAm$^nwK@q*c|AOLaSCy68`GZ3aI z?AjTyP9%j%%>s35N7hE+B@g6W`P`~9Z@7YkuKgJ4uVZsP=_Y8p<1^vpgG)eL_K zYA?pI2(ma-lZZ1DCG4tMVEsr3thUefEU?M%ZRg<|dmI}V7}E?@R4^;2!8eXOHqJ9S z<={5JnOvL!E`FvPh^!4n+{PZmGJ|ebe)zN;+#*@PHqb5tvDqAnO_B+)F%G@umrqUC z6_u}8-VKyd$rSi)@Jqku`EP8&&^dolxgNIxZ-#Q_XW;ZP$BIp3){5j*GxwtlfL-B9 zi+JWh26>A-*sg4xgwG8eK^q6|Z#0@TzCluHbDT0?Q1@ul*~i zcJeUhHX1q+CuGVYCR!sbGhpV~Q8+hayEt%`nS~KF#Y*A0jYm!Gt1~ctm|TC90mnhd zuO2EoaDM5Jt8?5YNa}M2Vyozt03=IOaA^|)69$!sr>9&6Ra!j*o-QmwqzghKxQVI* zGx1DP&w#HBiyH}x{~Js?LfVknlZ8{5V6-7`F&1!aFuWiD%Hl$Rsp2rgS#w_=NpmNUem204&+6E$N7eRkD&B{DU$G1%) zC@;~QmzmAy*%3nRz&D9Ic2EbCo`FQy1^x#<%(LVCz>i~R-7h@XbwT=pqx0H zJI8+E4_z13FPxiaDCghWe+EhAVQ#D(V4Z^=7MV9k4Ns@N#Tb ztb;h_SlBnQv0#1sUTAwWM~<*BHMnEbk9E_b|KbJczvQo7{u4b@aDh|~*q2w8dw^;N zS3ahpO+r7-I+4zTSnR)T6?0~7vGccmjOI@|=#Y{dXd7_T&G5#Zb%f9jt1WWp=8Jp4 z0Bg+jpT#%1tulYJAHL16uK&Uh!hgVrwE=JaCw%_E0h^+~K8^*uG053>SkW75Jp;JT z0Y(w>f6l)SfP)LrzYsb{-fa%wEN-i8DCd9LPjDL$OxRX@d{)cMchH9du?Z@@VdWV{ zp_`yjdTLFVx9(xqidZ+$r_DfUok<~qBZMLc+jW>{0HS}H{r0!Qw@(ay%FNCNoC)a8 zvGh$twh9NBriJgD|60Zat9zKTg2S;z+&YtB072V0)R+{Vg<}lZueyvNZQ)s}$e>irM4x^6#4LvpVMU1Wk09S9JZ-I8ldU2)y)}H4E z_2%|xoEZ$Q(|}45;?|`hZDwt8_+~c;)YVNqOm2S$Mi2XypSF@VAiA;M00xdV;qyOW zP_{u2`>fw^*(TapFI0c}rS5wnTnr=d{L-y|Z0E+R9H6q5frS0Oo&n_F(*5QY?)t0k zSvXdylIibcX#Amr0+^wXcfWd`ZX#;N0lRonSXH*zt+NkU5&Azt_$yw$IDf@c{#kvk z!d-u*uWTl8cd*)%FgA%ez(gLk8$bx4e|3&yvzqze<2n+i{sq_Nj?Fq&4f6A~mjTM= zKjLTI*v?pI9{PAz{3XL5`}?ikacSc@llx6iX0Y(uVN^B;R^@E|ct7k*E2np+Kh^O+ zWy8?#{6ky$+J~ZyTI_KfE0|TnSsNReb@G25L%MwIt}tJH$B%z&Kadxtcb~9-0gQ51 znc{)UZA|%FH|%%9=O6qX#}4>m(kEd9t^?(zz%hw+%=K3nbs)hpp{4)8A?^AEHdxm4 zpuGf@OMl0$aGcGGuH)+yz_kY7yyicE!*>HA5NJTaybs&j8zkbv84CtpNIF5h* z?frVjTDlH2IDYWqxJF>+xdOm#=4BQEOtU{}VV*1D*K-A?e?2#T@DG@Fe(=BP;0OPk zj(_mK>3p8QQ#l5FcdWxLC=Sa*{v+==;hc^0#T(`s?z#TK%f5I({NmlYbk)K=NH~_9Pl1Wn2tG^7Jq-z-f;lu*YgIZe?9-fS718%!T(AB;Qyq5@PE<= zqyqpK+ObqYzE~i9W%~Zo0ayXzr48gu_=PwB@E?5f<{$pWFJAV;Kl_76{_y{U_klWO zbMqg5Lk4jC#%MGPVAn_wl8j z{IktmzI8Y2=6rdD-)i8Z(JTw`EA9t>7L-ZPIQCojZ@7vA>;3~5((M0^D}3uc{s2ut zvcDupC4hA!zsaSBbo0zFxp49i{x6w-C>5;xciaZj?f;Ieed+iecb?~J{`v0Ifh5sz zzoIF^xf$4JMS(KSpzdd2bb;LbF?RG&0OzU>IOhSknD@^O{MP3Y+zvp&%}4c(2|mLq z+bPI6zzCoke#`eT{@<^>>P)lzGZCr)?ihCf7U<1?rrRo@Yg6}~hjewJ199zt!v3=n zn=9O(kO1UG?mI8hc9-}_4{ za$JFo)EN-kOn}!Xo^0euKyd%FlYTK=_mbfM^oBQO0sxCx#lTmDhM1$c+20Luf30Xbl=Kvv+DB`

%&DiCkr4Jk3c@d*LEM`(fPzzjHV&BK;v1t0|=-`F>ha*)CG0MP({@i!lUtYTaE zufzI;?kCTYkhK4f_X;qM)1Tg3!T0JAKX%v^;yDg{j{p*QPoso_(t&URgfK5w;J&dw z%x4vQLi!SiJ%H4Kq+&TpM+Vpq-t(Ak>bR`?x|Dt34;rD00+}qe1hz_hL zhV$hC%u6Jcga4IB1^I`6>%V`le}?o15FP>c@s$&#Up8R*{+Aq{@Vu=9-!H*S!r16n zeJ7;%OCO{MPNDMxykfb6^cH}9LjDSeMdABW0N+pkF3+L@JCwTx*K>FSXwLs!yZ^N3 zm;Wza$uGKs4YorEpRYiIat8BPUcd5s^;5qMSOUm7DBwOs>AOCEfWr5u6_@8{` z_J^J+fVIAQ>HO%~e_F5L{jOku`aM8-=i6WE^&x#p#iEd(_^a;$eEx>|rohelQU~p{ z|5bkvq;EM4=D&P@$_mVX+a+;+?Ami!1Jdbl?Dk{de|fL*mwLGkKL4kB{N+bPE2M*%a?_4F5=;7{$$F_3Qw9?nU>cn-+F{(u4c2Jwxb_L1R#A}V`Ad;9T=+n@FM1MlN? zzxQbVtkQjd_JxP~`j3As{cHV^``%0bUBZKR@Isda?E{+R29LN~a5s)c z`+_d;+pm2^JpUq*goPWx89;^iU~n-|Vf-zwgR95W?@M(Y^UIDDU~B+|aVp?lz|gKN z2d>u5z=rF8>^upFlq`UusQ@cN`MVn=q61#V>W~=UVJ*mi0I#JH6{z0>Uyr z!E+4a42dm(1Y-em7Elb(I(!LC7(;hOqjb9^8d(e-VA@XBd!1(rR#&FG9P8x(aGVf8P( z)AAiLlTz~dM z$Z%cM{%`-wW0;=l@?ZbUGOguuyvzUc-~O-v{V}|j|Lu|H<^TCB^=$g%U$oRO@BfFF zzGkm~oSMhKeqr-+^yR-aOWHU2&y%fes(Ses5=H-Yp7MWLr~bbd>5~2n1R~q;c;Nq< zrE^J<|7(HXB%}IS1O^<<0Px*(0m9pN8(#MxEsnBJ0IJGSqUxFRte|k8J|HT#Ev8yh?GxLf zySH?9K|%X%Yt>!Nnmd&r9pzjbY{Tzvek7Z)DF@ zms=S#=K^LxXcI4~$~DPE>zOCK$haQ+!_04NGibUZh%?xK?o%Zw z8n6pSWRTmq(Dd>lmN0sz=qlWVs8b3uc3Q)%3q|$3V1GHxl%ljsfJDsH8!e~Hj$9Y# z$d)*N6K49pUXvUd>YCNwYmB5$2pB+-d;-7kKhDG9fS+Gpe9M4jXu+AgK9EnFU2Y7A zQ+cFb2=;7%&t+_4U+z_yq4uMH*4del_-X?sd!g8hbHIIjZxvt5+qPP(T(J~t92SQk zTN)1Rh*oZ2DPg7Y1Mo?pks!whk;m0ncmE7hKZ`{CTIx?$?*!nVwmhrG@o9Eh{k#x_ zpdFq_2XXTFQy`Hlxk?!&X1GD_Ev~#oeHlq#Dv`Tv*dMLt>GRXSmLe~I;=TS{qJ*Yt zJwot)DR0y>_>{SPzMh}Cx?Fhf;ETh?u-4u>g`XY7w!@s{R%58Em{L5IAl1`)yh(8J zy6|)9?8W|TL^^kGIN*kvAI)&!Y&`W z@ffh>nGiefo10Op+3Q(f^Qvccbsx0dJ7SWTV}<1SIuxsd_Zd}5O^fA`N?(y^)LkW{ zx1Cbo5vNK8ag&pz@uRETTG&M)sbapIW;H3N>q#}Hb3&A2ocgjL|?&K$B z-bJA;lFsxVwwhjlabcARZ>kb^r1(}5-3FmH@mfG zbDa6qr)bVQevFRXoL+)9QQJm6xKN|H>)j1ZeR6dI(N0WJ*$}17Zf${SE#gb?3-_)z zA#lefKREo9bj^LagWrVDm#h)vjo?Yvs4I{WWQ4!gnQ zsD0mWMTPlDGL-HV3FWeJ`d9C0BV^ zJi4o_jE+jq#Dl{Z@)BD+$_UwW*uW|y4XG;K##jz1C#%|Wml@#RT`4!MQJ=inVrk>9 z50h$F2j8cEq#$U&O;Dgew?Z91i7i%xwRTE^vB@ z%J~$^EqX3T!3v+QWR8`x1JYi*$Dow4 zDB3Kxjmn5I3k#f^#;A5n1(@2h61&sn$(iff7-mLK6ALTe{M;IX3dn2++U^JgY zSjl{Un(yq2IZffe3dU6IjWf}wPT<}J7hL)3+akX>@u&&jY6;?1Y&5Eum&lBzpmet> z#cLUaE=KWOC4j`t;;GCnP3~&W;kM(t6En znn^P@&rESTzrsme07oBW!gviskJOx(_qdtTMKCS$t`B$VQ(~`O88oF-sxD3xqk7%( zwpIvsHDXXpwo<24ez{V8-3yh5^_Js?<@4d5v<74P1G7hgp6#f;$5!G&Q|O`9Cv|##fFjF zqy_j{N`WX8dygLYL6AC;c!+9ixYA%v zTQU^C{zyV}Z#hn-Zun!=2V?ZkLLv}nwaCaICBj1Q*jbGOn&EFgQX9p{4+||r)6tV% zo$&M$p837AG_futU5_V6?27n*PAGkfwWH~sr!=Nay@q|<%*~<(F4mWQ!r{{=iLv{< zq!S(%&!VL`3FB*a&TdJ0Xv=}z4_@dAPAJ1Q$R~m6@A_NsSxU#b5x0-|5iolN_zQ9} z1|d07b#or1DxW~+@1WsF_RLSX5LKLh2ifW0Bd`w0 zyWs>UtPWHu%VuBWCVcyUl&_spJWvg*Y`icCZ3P^5w>G|$1)&tlNC~QXy6bhvT|m?L zr2CYvlyF;kE4A$HLXI&5xY_Hw^q#XD?Q z)bZfnqOB|Mkn{B9nixY~G0pW9ko_>=-Qlt4hXz43B5z<}_ZZ86-0O`ljQZ_9zEb^K zD`_{pUxapDxc=SdF%zMDFn3tr8J|6Hub?8uJ?VogO41f}W3@w_RbuuDJ*JwcI)fnv zWB4BMW{!m(oys?d?APeh$phSKov!syWQdcV!;F>edYX41WyOrs)3{o&1mIy2c6 z+EZ>k6#$+ig-!YP;+~I=$%!_0qRlBR^HZT^yrfNv#EtxaX$^4*#2J`R@oB4C!1So| zybfMzR9>u6&UY8H#wTrtbitA^x z9%D+dUPg))ALheNcS7pwjEQ6&cf#h@43&mf<6^j-En&ij!r7%0)xh(%kHlSU;xk`g zV~@3|o`kG_tedFUTSDbyd?(3>VHqp-xLi$)Ys=svM12y^*i z`18?IIT#fjv(<=t)g1BEYEWbP$%mMUzJrcOwuU*Y&VC9_{IJ6`dHKA3+xJhjPy%1L z`M|!^kLL23{;(8U5pAj^4RT(L+>H31) zxASs;tdag)cpA?hp+LLW;ih%h@tuCGnwu`gJ50>*o~G{}Ji1e5KWP3i>5oBEBe+}( z7bq>)Jnz?eod%xv$)gQlgW&;U6UA(`LEV52V8TGxf;UK2xK-$|=&N8@DOS3-0<&G3 zOcbS$KcS|J&7n?l(OrGs{$8amvf zgir2Fia(1H^{H;CF8%P%mLttbOC)=zsSld)N0bvUO+E}!G@fPn2zjl$gd3BW6!*GB z=Aq$vL@QArZEwEXh&+60x5qiMk!PQIUkX{ zY9}Y^d`?_?1hMWr`h4w$8n))Jh)i|F(c(HD!|1BxmnLQQuQG1|F>^7#kNZ<-BAmOh zmzys43u}UltuYa>URSHQM9NewJGr%X5Op$Ma}P6RJT&6^;guQ8h<8T*;jhrc4Yjv~ z`%azvhu7mc>2-p+TG9-Bdn1Iq;*~{zW@eVN=yzE&wO1BNmwtP=Kuc0i2lJrgaI>Q=sI^WGSPNUq}FdTT!#x^pN7cx@%9!NzRR)EJcNDO7W#IK;?b2utvZ%O z-7Z2WghQ^n?z)GN3xj|DTcn( zk$T*QMr4KF+RO?bfShB0j4h=wPcHL*lR=NY8nLeOq!ERwGwh~|^mqBzLz!8k#K&{@ zO5O>U2HH+Aqx%kNC4NNoW8aKN*Ve3#)}CvndbfmaB;(~Py(OfH_RN6LFU$WJmU?u^ z=(Se1yx*um0!0r)akq~vF{-aGs;hl! zNpD_HbiT>%xm)nH;o|2lM^}4g#r^R-RzoTwi z(4j=WG|#l1@Tw<&&G25syNR4V`%)>_^AP7B5hxSlz5$hl2vIwq#ZF6`m)y%7$dTsnBGUF)?>-<{8s>8UKrEaZO zc`|q=X_e=5`TbKogUUHnExCt0H{C;B%yI0tXbz^kg5$M+jgbTzV%(@~ZR(SI!3zLpXbWUL5lV-k!?!Pxj!)Sd2sJu)AOLZ=se@z z7x2XLmd9g6ZKd1ic&9?MWOO-Y9!Wfmn!l#2hECDy9a?j+cYSdgJ_!iGW^&iia8X16 z&n>l1yf-9&RlGTK-^Ak5n6;!UHSxlD!g(6%jxKL(eh&HFE-K=U-{Lnuy^5?+m?!HB zQ|6aTgWj!sA>NEGzXr7BE22jP&;B6|Xe-O$ewptX0;&P@uT!bTSva_+zf`<{W(Ys? zRpG00R$(f00UAwdSLZIG4@bN|gGF5fjb|^m*l@Cc_iCr<^L{y$dViNk9D2q>^!>Am z>PJp;IM~^8eAoJs)!e+eQ}TYRw72`K6oY$_7=`=33hIpBC0TdMC$&{VcTwoiVL8-i za@+p$KA)50WwpE(ls_15RuS`uOJJn@ZY9P!Y_(^|pACENQ73PVeSg-9Bx8$cVVEg} ziRPDop|HO0hjpQEF?!ggCMinDfGO?O%V;e1^zBtUMvl0FKr@!a-wl3r3wX9dq6n2;g3{ONKg-#jL? zq=}BNN=MyBe=;{oypV~CAzXQPnV>UfM3yxwOCmyiB|;2dW)R~_4$>x$_rT6(Jrj$6 ziEX2pvmBxu7kU_V0w?>9dfxcYYtS!3Y@4=-R?e2k>5HJR(bbK^3Z8cQwA>+HpS@t* zJKS+=l6fg5CyLXa;AOJl5AfE`dMma3)y$*Hr12ZEq&l74*XL8O{E=EU9daM#bS+Yk zqb{-qOHmOCaLgjUh@wzu(@o8ElL}gYJkI)q0et?57yEfRl>>1$iW>wh3p3Dw(9ER`6OfuA&k>nue8ZyVvDy+@t7RF|pByz6vPeRt;W%LKN*| zW-lCj^`*em*8a26b()sLvqM4Gs7vW7{(8ZuL3=Vc(WHoGBTjnmpV_CE$-YUll1x=( zRUbTd$?qTQV6VGap0wQqq%DW_7L`f$N$rPD>xGd?mMwoq!sv%NVob#(&)d^rwxq++ z8KKHe0F#7>?@}7Ux=T0tp15IucB2^NcqnnB&orCS`H^?*T_^6DfZA&6BR(Jt+QIMA z(~6fXP7TaV^%tamq}7{xY^c`~LE!o~t*A<5m7`53=!gFCnv4xjUrM&iTN$l7W;wH+ zpC4mV*XFpzto6ogB-uPM(_>7p-6{lh^s4Zi9Qaj+>C|-_qB48nxt}V3=ffQfYO*$g z;*KG_Jszuj8I{YRq)+C4`7m0VPcku`d7Z1Z#@js>#M8%m_Em9Y38;5Jy%tlf`I5eq zvk=r`H+LZ`PBi+^C3kTZF*d z0-Cq4V@{HxwM#v6f2aU|ZGl-}xKH8(;Xi84YqEzoJ4Zip3(-Ak&!|5{O+_awn|A+r zclhIe)hR;9K9sKBt|?72pR!;@My|WX&$0G$^}J!?+NbldU#-Vw4^zG~NI#xT(b31~ z5{x_DwK_HDAp9Vjp*<%(TU`{ppr1Z6GxV*U=3S(AYEd4SIhrMZh9O5iIqp;^1)Cqe zKX=7M*X-5PLGZd&7Bvht<(TdCT>R7?>nIQUs-z5MAwI+H&25E#oP$X@V(%UMowoZ1 zpY-w3^=lV$dD1yPvug-mWKnAK%Sh+5g~`%@vjK0(=nY;D$n4aj4;S-BUsH=GEWee7 zlJ1bneK(-2npcc}MCu{H+qMt^lW2P)-~DKQ*501-k7@^{%^HZDP!qReb}MdW7u=Hj z^+{i=g}deaaI751yrxx_c9OEvR~qYdYG;wKlT^Wp>%%Tri`!DwqOa}FK7-HY#h(qS zq0H1giaZEMY}PNgmXYa>EIs7aPi?Nxlk9%<&s*AYZeNjqp7r%2mZg}|LittpoZsmq zsy~S`UBt~T1QiwC9n<`M)M)1u4)#00l7`|`N0Xr!zG0WA$T_}0-PyPxgEetkU+g{Z5MxSqn3IVT?xF(OKAJdT4fU+J^O zKlBZxna-WLHdOv+8=Eccm)E^Z^5R3D13PwqA(1nf5ItK%mfzP})9Gv~Ow^7+Pe^zt zU%*PwGx~_B!j5*rX2;LdJg#p#ZueGntCLugLZgx!U14=f@7Je$OPurLEM-{>)`k9X zgudJ-6wOBw9q-R(dvY?Cl;um^5wa+q*aC?PDr6riZvA-ppcCzyV>qS*DOc`yP)fId z{eCygcQ!nO`$yfliS#f15~P%nsuE#|dQTxAcg!$e42bN-26wMGIh%IE;m%|HbwU=i z!g1?nUl8A>h&_9Zx@j?b&XAstQ+{mTxV!gCgwJ7yx6iE&KNCi0?^)lKijp;X#Aeiw zUwNa+gQ!1DDjjsXoiSO|k>W*eD(04dj45CjR##JUL<8o!5)y+uI5MOW;tJ_ymC1mR z*X{kpA9_knW{()_lUk-gJ?)a`m}U0G?BZRVtWRFWWuBZlzIN(Iq4bq_J=;a9N8@g8 z?owZfb#X7(>!2Tp(29?km_6p^BmSII2zj}oCXW^w<`~gYzVaF;{hNl4FU-w<)wrJZ z_;rfw;2#Nv_sTVm#F0pKr!{noyN>Jn`0zuM?ufa{Pg!s?+PmKiWNrwFws5BahnOcoPJLG=I#kV^n5%Q7L zhF83CMbzv#nBx^SCSH}-e6VSMLIwp!HP_9$9cOUIt(Vh0u_wcZWGY@tF&qXxWao<$ z+IKr;8==;1=G^1Ef|cWGNEAh&$$(p0F96NgK6H0ed@ zVk3!TN2J=Q*0aTXy%v>!BHI?Yn$0PWOVEq*5b8%y91_jl`q>pTJ_tyD(i&mcH8ngr zk9Ka(t9Guq7~DoO7yK;0o5YJacRG`7zVU<@y?iueyLm~6X*SMfF)@G2JPxiNw_dKlZQWH8^ zWk-nWI&69C=gt1W%$>bpef20UjVCYuT5w_vkoB3J(aA=EKB}eFZRv}!EYsK`b4VShS$;=X#gBcVgWh5q1-I4q>nEYhYuHFwR}1l;D0_ zR3}8SpwQlywW8O5W)lu+@Rs~QB?{mxj!B9}BoVUcj4tT{#nf1~b^YRhDJzm2HOkxz(<8Up%;L*6 zDferQ(c~70m+gYVC?X@C=>(Ab_tYFkOue4y`MBQWvl#X(+r93Uri=w+RLCv+Q0V9z zxu{*jNqAtvgWYU;-jK%btKjGj<5C1`?8&+mqme39rcn?{R36c6_^%)=W9i&hU= zy&{kMl9^F|13uHow?FMeCTEwHKK+tWEqWqkr`Ec#AJxV3;)f~9dx>R>U%Vf4Hm=!Y zd1Nc-jWw#5bDkSL7c^EF9^DjNph&uh+G2RPrUO)w&W+yYvev zvBfbvm6WZUpI=c-I}7HLUB|)I5K7CbUGrpJ(o(GppG<%*+*qI6y0+8=X?I&Z`_GOnA0oE-e(B~Nir zBY$dtaNKk&)1de3jkKQu;ZU;Jb*rFFQ{{}xp(B$^UZrDAI#2Q`GpCT&jU?uCL^W32 zPYIV2OnpV4Wjo|C6glB>Ol8pAc}2$jAWUzG-?y}}R|k!Y^&`<|xW6GWyIn)s5DUUR zxIFwSNs4+i+iJL7qAjPVZBOx$xpD4;Ts{kb5%TqIF7Mg(^s!gAxmm-mAMzMSG>JSl zIY_IF)6P1sTd zCy%#AJbm2PrSj&7{i)oGe`M}?yNyrZ#Z1{#>dDP+J~}^zAVD94NfC9+SocP{wYRZ< zdmiN?ba?6@om@D5N4m?~-#hw1xg&IN$K-0&<)$b#Rb9NNEwvBF@~_u_aTVHj zT+QS1;H8IaVd+DU-=B$6WBb~u%9iig+9SEbZVu=^{o20U=ZfWdLiCw7Mxt`7*qNNq z*If{A@Alz_?{Is^v7ol%9eT$OHr4%_DpM=G*|l(!I5xNBrQPwCAoykZ32x?gqbRjb-9 zyCqa#&msL`0s$!2WqI7{MB~VFvsSyA`h~a(w!^)Kt$w#Pz3wOKPjtyHRc=X`e(6B2 zS?Pgy@5n9RQTo{*mWQ8)7xOEQEO)@`vUkw5ItJZw8;KM{Md4)zPg5L!RT86GUk_<) zL(M0J9v|;1zB`v$kJWx0sXZ5>FQ>OBa(M*Q;bmv;JZi=+pZ_;o=dH6n3}okfac9VY zaw@h^&N-`74zF^Su72_T4r{<}C6SzAK@!xQ=je*B?zJ#OEg&f&mP*F)V=paHTmYUU zf2>b~QmYbj_>7lX8((36W%CA*%fzfS{O5g_snJ__7^>0OOFB?mDQoT(2h1-wP| zKZP_v+uCH;G8da_*Z8pq^pqPKJZ){Tm+cy!3&>9LB@U*4rD{iiFd<@%l#h$6oS2H7 zA#7+z;k@by3bg_wDAtMd)dAhZ&*&7z z!A5&)owmreh7nZjN9xcVh0Wr|Gg2K?+n94+x^5VPXQZ$O=rZvlrRZFypeQ9{bz48N zDcPnFW#5ja7EzNem&}VkhZc^`;44jU!>jpbZ(Hhd-fdWjA|&cOTL9^=w!+&!2L!8Q z;yA$DwGioO)cnDJ=FHSm62p~7d`OVK`&C)N^c%`0zcC)Ni|3Gwdr<60P-N|$*YQJu z_Q@6_d=(h=LfE=?2rOPS^)DQv(Ch|!2J1SPFc*{;10B}%XAb&5$Hv}tLDCu_jO?mc zNQD~d2>3ycU?HSm(+T=e2P@c>Gr(`#gW4ufa{dM^h1XDja=J)&*(WKhjUleAK@zSPG(zpEm^kH8=#?hgk!6v zYhiN?TCpX6xIu->r{29{QjXUer&drD9c7$}Qrh z$AaC*0MdEX?10m}MALC}j8ZBKUu!^mZObEhPD@RH*QV@QQvPbaP*ev}>%6O5j6%?o z7}#3FWmRO;j7I+X-4m=0JrI-DQ1IT;>WZ^*QAc`%=l=86W|Z5&mdM9+N;EAl2k>FI zOis4g9SB4E3ZO;3&e|~pK)d;kbMnprcYw9qf-s9bJ_~&o2OQv}qK+eQlK4I}@Z!0$ zsTBx+U07#;*8{_IjFO7A5UgSsqQI7%HS<<{GNZ5MYl#g5M2y)USr2)=txEqu47!Fb zAH5yI*cT4ocPx2IkMj>-JFLQU2m|uD@g4+}fKF0ffKo}i((x0dSu4SJ1=@62pEh&M z8X^OZdV-&{iThm9Wh=w9KrVDf@hN~0HJaUjBNR$w4$cv+*M9)6Kdt3bwr4+jF^OBf zVf2i#PZAI^D%PuxlK4hc4D7qj)zh}p$9JQHZnSadb=N3}obwn@OoggM!8_C25R9~C zy!_K%-PV~oL-7d(?EN0}x#ND``p`cC?upiJR(oOn}}<{lCk9 zzpi!XKuz(v+Wt=LC>V$^>kQ2=%6a6TMKoXL`HaJ4DxhP9a|uR`4F{=M1F7$b_3?7u zGR$FYfr&}re{xThG%Mzw4HLW>^lmdK!cTBmQ37v)Lj^ee>+@85J2E%=J--YQcy zw>%s)AjFY z_q9bs!@{yo4}^Q@EedAVYeJhR*9eTzR8(;C>U%t&{SL$$rbU~#h;ct zL};5~UjeS!D&d7fzQCeAX}1J_vg*|1-(BX1#jpE0*e&U;m0I=+vCM>BtU+3Ks_V#T z`N=-T?Do3H=Y?YcGbWd(k*(V7Z106B^2<)Odwm)O_W94aZ?UEW zbS-VTp9|cN?*gHQD^NYKoX=s(UU7j1bcih<76Q-y;Fv-PQ#iGq$>wu^11i6hxDqQH zARbVD82?xX(3k)>(UQwF;;0_v!k z4;L+D_;kx{j2u^1W$Gq}jBt2}jS|zP7vox3WwtDlL6^{*q(xnS_#on);AA^YB7++% zRh$D|t7#?cyt8QO85_PqaV+=Bs6SIGFJ*eNRAn_nZ-(#Zdgm~YkjV|tl3m&3?p!EI zE|ZMj(};x8xH~GPHZDbG?xK8yJJ9;XQU80-Rb+01h7rk&lYaaTI5%g%Ic)2K=invw zB)vkw_jr!r+r=b*xo7xA#w-{?-u!q45%*2*J7`u~l}z72N-tG~RA!!&B~Wb5Sq-C< zl`=^M)ta>o{Rsnm&MaRj^Y15MOt~E4KC?cW<3`IUjcKAf?glefs;1#)qM zjNe*tYU6FO87jaJTUudMs^5DiRMf*sR+et7^TaxeF@INoF7^dtq7m$A?eLVt zxcU}#LUCM5ETf1yB#fgQvr2@|^`g`Vw7M|?jdAST3>${EelD0cCeQ7^a>n*h_r_*) zK@Zs=M%O`qKIC`CJeO2(07SAFA=UQ?V0WZJN0DaaLzSKVVv@_(BCSzXQNTSRi)N!_ zjKv;1DP`dAeS(8C_(X)9R7aS!iOc-BeLtah&GIp>BN#CU=BsIj^Urt4?a^5x9B&L5 z2ra+|L^!l%5;CzJKj<2N!DOP>4Xr2VAV3!f%$t*cW;rRRYlx+|Lgf?hj#+C`SP~X~ zdBFJc(hs*V$c&R(xe(e7AVJaZ_SvC;J%Yd3mK1tp4SiZ-WWA|9rpL-uKzFi{y9#P3 zdcVcP;Y3`LDIt_O(ulXJCK@;EnPd$70$G3Y&s*IhZBO`SUmj?XL=mmuh^4F8WxwG)&d!~>)PIP+KAoR~<$;pX zi@G~kp>dEad^0mahHkUQpiikgAcX0>RmZ%4@2$eZ_!`I3wN21VT6LNh7^rTqNqnM@ z27QU@s0Fl4(9QlfAl3#yUK~ZMGI7fCpr^AEG9A;M&D3;^eZ?Wt;yX<9Wa`_IN2nOn zx>pWU-+Ld{C&U1*V*7c!+YG-iV)yr#>g4`h0la2%nv;PFV6`Hp8xXwi^xu3DmDuUay(gtO+&I6&&c0H@sti$Q%MXK8+6&-wY6-|o6K{1=% z>riP~yR`o3(T_(q;|S&lBvT~gfxk<&Cuz{WEc@Czt~WTYw#a}LDj@}(VgBPk{!1kP zPrm-&{`>#&)b?xWVX}-c1|PlZ~RsJm)pOlNOF}N zGPZLX2@J!X|ALVJ0oNUWSz)v@lVGU&;|q=dlCu8MBd$l*r*_Ygf2PHY{tr6w2j1ri zRN>8UgxgB~>*HtqFBgWtAX)t;@_ZDJ)L|G?`kPGYue~sizF$-6oulU8)FJ=!`DZpI zu`$XT56Zsj-~92`9~FuD7mBV9ccV#_xtZr36|z}{CcI6krJw76p04DAkJ8yLa+j*c zGgn{I4L|RVa$_{4&lbOBZZWgTsc$VLLo09x`t;icCAhsUG2hVeDs7kaD*)_21Xe>o zEb#{eK-4~Ehrorsd}^$a+^!g z#kZQq$Jow}1oTNP9GM>zv;^02YW=%dMA{e)$L~Sg%M{9g6>cINgtE$6e9I#)nK^t1 zTS;^8*L5nFcSN+AZ`h_Mam51ugVO9XD$N1A^gxeTqy1q#Dv0bxSz0c%K2U$mNr@dS6#GrCeV0=2r9wNhu)|!UIoL; z%`(w{^ChJKJ7VbRBB@Zbss%=NPXe5z{sM2MMVZl=orT86klb(_KQLQHhGibdO)I&_ zjZUEWONE?r^WJPn=$O+DMV_0A5b*wtGQfaCbNOYZpey z>O6XqSJ}Yt@xE5%AgBQuzGs43wy}w|L6!-Bv0vo?9N_q#$P0uT{DTG;!~|GQK)gHs zr4=Kqy{w1Ksc$OJb;XYqj-R^{e$I54rafzY!paXaJHEKQo9Ogib6&e2mD34l#sc|4 zraXBmqQ(Y~4O;hM^SyUDiolz2%qK!bVSj!qxc32YO8ey?p(U#2c;4iCdddhg0Tsf3 zb86TFm1jsM*Y5y>VKdz}MHl*4LpF@YH9Bgb>+xDxdhxQ8lmudhO_gTBpLjrfaeqD~ z@<(GDTswer)p=0Xdys~>qF}rMYMC}|9z6}EUA8?($!fn)HBD*DU^Bqs>Y(cTJbrQ3 zdF7jXb+vE1m@NsQb4|!3MHe1LB6Q_{&pTsSay-y7Nnma+iK9#}abjGXG*tP>+a!C> z6~qVw$m>2_SS(k1T&)mPFclFBkje1aVSGQk=(PgGC)NwT8&%HN$v}VfOaN0FD%QYQG47Mx-GUJ&{ z3@HVGpWx&MhSiY8X#Vw2j7ivcp+A*l94Gryc?# zXD<0F6a4D!y!IK9rtxE)y*_;?00}{GmN(@OA9$8k?b zQeHVd|2dsK7Qy)(jcbj6KjJ#fvt=RGZKhO}uq^n>9D8SGCUkWGRHEMPl_{)~7MFe1 zFsPoZ*Ox$Gk38t*AAu{4*fqZLj)BA~`iz)=deZGM;lojyNWpzAdk8@9x|$PbwmYJM z=C?h zuW%MUD^4-t*OJlk=R$r>8WIU;p8I|v3BJu3uME=OXcg$iZ3wr_-GYsNaNElfrm*ib zr&PZGS{vvu3msg|PXt^dt5Lgc5k}p^H_M^xIE>GGr{!?`gHGL*6eXCsrb<2(7_S*e zPw|pi(XDTglJ!h~j|A61{d9M9dQxXd`_>q8qV<~}C;;&xpBD)Pq}HcZ*0POEKBW;l z5A5oQP-xy{a07b^x= zv;uA~TfTCC@`7wuYp5t1B!jHsUaqClYI@XSx4-li&=B{1T)d&X_tPw8eFN&w!OpDU z?t={UVNo7R=)QWu?W+oHy?_}B(vWc(`lXLgVFE=ayH`4OMt%d8{a#M6B>Ke8&~+mT z4|G=lJq=EHoTN;`IVhT=yL=Qn&0_Av-t|2-_pQr+NoYix=ZO&d-j2%8>NBI;R_?|G zRJprXG)wCWS*astOoWR@IehuPW!dpk@H^7!7pt0SDcmYoq{mcO`yeT=jS^nR2L7lpm9GmPrcbZLcz3sPf5`{r-+s0aisB^H-gXj-$Pa$FLWVCMD4^xfAq)MP3v-*!|zN zYDk=+#Rd#`vL4xcS`lOI8v;=Y6fvjAzPHEByZtr?muuYX7gG)!f zRB`g-kPD9H#&8$c5PX5J7v^jaWGvz*b&01b!e4e%5d*XcJCygb-{s0;C{-de|7%a591+9SnM!Ag|s6pQ+VM%ubf3c}w2myd8uXe#-WGXAkn#{tijA)M=)a@A$ zKU%L?@AQ15_hUKU9fQv0vz>`;)6c2`7)WDFjJj{7L*{r0-AHbK)HV@XVoYvaA;DKT zx-L0=#!3+j;6)&?UajM{JEg{UIHF*~D1Hswl`3GuDAVF#_Z$B7w@AoWTk@Wsf9G++ z@P6`EEe~So+yse}gZW7=^gs)k1127z1qPkM)>*$` z9rB7N6CVH$*GJY>ZluGOht!@XqV*#)p2+b4+Z5#882eIh+H(^1wVX};6^~}ab2F$e}Q#sfWlg$-PjQ$V__WBF1N~0Ux&yRp_BS-;om66nAFxd)OO82`*Xhn=GM#z?Z9Mt7VX0YTcwM}TXxc?`Z|YDs zWRsCosNNibZ)mvKdxbD*f`H1Q*Z zUqs)Sg@#|AS-N{w^+5qgxqUnJy(@}X`Fdyg-LN`vfo*GEQonf7e@zOlIzO@Kc%}#O z(lF@MI3O+(VWi2xhUA$%(2a)SCRIhdYe^RcaQ7;)QxxVHK$cE%K{Zyv)rVg;lyOMK z-0!#F6DCZpD}3;7m!5x-a)`~ZhS~D4-HL=%6a`Y1(8ixxS@n&>`KrI4P$W@1Ug}1~ z+0`sN!G`0*<@iD5f8Iqv=|rM$f7xH0Qxt0fg^mUq%9WFdCjn=Bwua#LKx)Nb zp&Md)-{W#LEk96MF1_J+7qDWSR2qqSrqOqCgzYL^Y!43!P_k+%`i`=-ts^Q42K5Ydk~AH4)RS2R(^7M)L-oAgpVm90I4R(2FbDwt zL#MsoBfgJlFjr83)DJ2kH1rt@3mPj`;j1P5r&zB z1ee(gS$SDdf0;I-xc+SvT(8HNuu}XbwKrLlqA-0VeFy{E!Ru%pTm-Cr>bL%0!F8bMi_!*H&;RQ1gbMV`KGQ>A=X;H^=&O4H~X2r6+Xkv71A4@9H`UB0%1 zu8#CpmF4dM(!-zNIZv34ZAe~?_}b2}e7sX{KOaV6c(%ZPR{;iZ=#ZNHGmM5phML zn>X=EvVqZtfo>pseDOGVZM+9ESbY)24hS@fg&U(|I$~fOA~&9R1C$87D9@L0%RePT zj|jIh!+XZ?OX)A2nz>B#sz?&eYTOdQyam{te~cfjxw=N*Ky5dtg?=g;>0RB%FUg$_ z&*4gitNBikmsXZzGpqR(Cguw1@n=2UY%QI`1l8}x;|9#wR^dXbIB1Gldj;zhQ$^+# z*nW4tSuBpZfPHPZS{NW2J@b&_P>y9=xVVk4I5*{THe2?^L}R*Y-XiSlA>22hmd?|Y ze*s=r6Oa#|-I&K??p4PTOA?i?mI3Ewel{gLh>ec4Jb};;fekpHw60P$re$xHJw=jr zx<_!+AS)5l`Yz&lHF^mbMO%6ky7{OF5SB@N3|_m4PwV=+*%y0;JT+lQ81rUepmmDzavzVYGS?a$ZZIM1;8P+5Li}Q)^ZLSBJLOu(+jgr zm%7zT_XjA?$~ddis;-N#;vH<^pjUWt!f2^A$7Q!%H;IqROsmfyLe{H_#s}wABY!Hx>Yswft*=DDn^VuBH!#wk*{8>SO z57!1gw0|VSKxAcA`4iUI0CkC4>XlI5;eX2@IUVnw)5GkNrd{GbDj#V$Oa%X>ayHQ@ z&oQTE`jis)J$b`Zo-|0*DQ7hhIgwxpIL1sD16&G8ppHm>uCK6E+X4%te?iqNOjUNi~i*hvk$b1Il{V+ zJi}7V;fGPDssCB{xhTUBAk%DQ*R%D z{3EO?8{f`$_|rosRI>Q7R0nIxjtPG33PbX8Z-guAN?^>m$uBnCTgp}DWxg*fX& zwO<-`IG?q)(B_X|Q(|Pr#)nT?%p^fH7t%F30@p3(i`8aiZxZJnz4POxz2MhY1|v8p ztf}k!vT4t%LNnugf98!J^j~t~;J(1=C-fvS7D7p)BpT%VtlU~NupNF)TEogr0J>l% z3(itK*dBAfA|uZ(=iG7v{OA$#lUjxT<0|@(_Kj6pb8(~%AUOs*8yR!f5am2ZI333Q?pTrzbncY z%Gj5nYSK5r8ARgw-Xg^c4YM`Y;YhU7NW}UmV4%T?t^}Hf@cj&`z~0_)yPz?f_agQW z21c{|7#ZCnp`ATX$dwt-!P?MVL=XZwr&KH>Va#exlbnZ|q+fhZV94u1Y8SrxnEyHT z!re?C8$T>fe^ACy^8A9hA3Q96+kHn;rD^gzI=c0X{z}weMVU_uyM_iclyyLuOiv<_cdE zdKZjT8kJn3;%&MfJ3rLrGjX)OzOz0MvH}?!44oA)f6)XEnOHR3K*jCLxCSd>y~F-n zVg2Rh;BpNsJv18R`LN;SnO|;+B|W09N7y)jxz3he4@`FoUES>AJQrha2WCe5vXBna z@(r>v=t9Z{4HF-i+zLKRZM5La;1vYT@Vn}z9kKV7WG<-_X=M9Zi)CwuR9ZYYnFdYc zLS^Def9k$`{4=}>q?#KA0HzS?J`)3sHH;;5TMDGa4KrhaCB9@T6a5FLU=>BPnRNrd z>d5&LKsZjlDKq2mQy&&JzRSUpOl_B~ky1k3WB`rXbZs7N`L$kf7e0hUIQt6SDOU#9 zFbhxuz*6(hH)rBX_}k1E@+4@LRH-QS1(T36e|)KlwO3EooiIPG-T;~~n2Kr6*~>_& z0>3sFe1&QntHV*%2zIxoIpgKo8CY=Z`I3ti;Yc!RTN?vz>2l^EjuxIDeDsaNTmiC+Ixj2#D{8jyz`EGQZe^~Bk zR`xT<+evXlbQe;@3c2UK5cf!2d#EV(rqH1%YG+Af!@wjK4hU@3j^!dQl?SleNjqo8 zM@^&qA>7v4YtHo{9_C?%Br~n2*CULF;T`1^9qo}Q**QpM3#R!tB|_OUm@+j8s4x3C zgxyiEnceO8*0Y6}bqS{wE4nG2e}12_vV{VjUtw=`fmjj6Dd}%+_|zK@GWRUqU(axN z_t`A74cy1i0B2Zt%V99i^b;Q$Z@f1oI2;G#^KR`U1%|U=ddj|R*XvzhaJ{6zPYl^g z2Y2Tob=Xty-9c#wrz9G!o7BDCz<8Jti%wiD8tL{dmRE$(DEuB(l^fBM6R0@vuU zqatNS(XE>N5w@^yqc6QJyqAk!!@F1B&(f1+*1@0}e#DCIWQ z&6xUl3d4Npy-u!TR{ZvnY~@CkFCR4@VT1nqDD|i5>J&hWm$~RAmg2e&!Y9yxK4#O= zkl@QM;oGWf@2EOYiqhhvnd*M_S@8SlbGEM(JYEG&T6Z?l`o4fTR#%3GXd(eJrW$BF zVaS{QB9BSy^g*_`e^X>-y%%-mrE~!FNaxFiKVIX1r(M`W`&sFJOkdU7TTTO8)>!m! zb5@z36EC5nOJ%KGJ+3kx4Rup9uX@4;A@dd~3qDNW<*JwM`^a-7By}Z^JPa`$z}pc~ zHwk*Y{P&Bp;9a%)^~#e$);`RV9Rt$(Nb`%{kl#>O&sw%Me_Ty0(=wG!ck4mrLn8~* zg*%4@rFlW8B|Obe!r2Ml#%`7C4|{${W-@}n&j&Ycc90$w9qRekAv$H(g$nP)n1W_J z8HG`UIZnv!aTZsXl5rZw7&Z{pn`X<4-c^CO-$wG}P?t*V>4yn}Z#-`33l;qyTocM` z<^j$$f5zGze>pWhPH?m4)qO?^w)gXMATQN#ERx%`TMHcsue8KaVT%vsIuGwL^CW1WQXQ-ub zc&~;Z&RVK*nFd(E*}h+IS{+;6E~(H4MV@8r6dMYM(yXI(vWlhjw!)TY z-RM+uC?8zDbZkX47_t+!O&ca*^{xrwpMNlTM)-Yv{uTsc_q7k6bqZ8%3JFJig(zpT zyhBwee^JJ|$&Du?S2exgdh+~LW>(-I)miTdxar8~3Q?AtEvZ9eP8@L0=vv38gHe<^&BN* z;(YD$6kJT~Wnj5s;B)}P zGLXo7dg+7Io=Q2A2&)6&Of`ETwUq0~?vWcICscv}z7I2J0KU_Ri z-kHYEw&x(ym6By0?O@#;Nxx*yVe9$ZGenYaEu#s{ zpADY4g|MbV#Ob4wT(s@HFC~gcaDblC29dCEo#A?gHb=WBgcEo>u$la=f8%)UlquQW z^{F}h08%2L>MhXdo=ps2Xq`-6R!K1h&ucYx(9(3Y-FY^H5RWM_Vy<=gTl8pXzs#yT zU(Qe@q{xql0uA-YwhySBS97GikOrDMJG-W{f zwOxY*`$LxrbnhddO?MH3QZZWOokO$f06QY=G{PFV7 zh3scFQNu4Z#sr(QoLEyXEFiTw)w3Rk@8XP)1m`TZypFty(Cq+<6cxfMF% z<@cu}(?GX$y}=u$avqS-mPS#uC^SC9lMKhQjqlr)k&|+2d~3H_nKl8B= zfuT{kMm{;UM~z?n@i9UNv) z@bbE7Qc{-5n?ouge@6hHM5TMLfSKz#T2D-hiXoI6DGA(-Pkq0>d&RhHySTo0v+cye zJ;~vwUzFnq&Rn0dl@6bWmALVX0X+}9c?$ld1G&Hx0^e<({D(C>&MfS+GhW38%# zDqqV*U)@L?5kx$dOQjTdJIUXX)$m`j;>{%KfRX6B0yO6254sN|zxqj&DoCxPML^P54Vn`s08Ox!w1>%b#D+!jKIXRQ81U?b>kKB`78UY4;o!QLs z`IZU=O56ouWYVXo7SmdGk=S09won^zCD8REjcTCxfBfe^8T+zsRe7e{ALIqt>t!Vb z2(6(N?-VsO_FaHqe@1Yvy`SfOzwey0$5>;pMh&S{X3d&a1anremut}W#9}jbJk`zJ zA<|3pvpl|byDBv|U2`~17AY>D+vZq|%FlaNH+y%!Z#=v{xJ5WbBPD;W3-emJ8*{eM z=R42ue-_i(E7~aRc{H3)vN?Z_oAq>RK2FvCJuY4j*l%0it_M8#Keqa)qI~w)Oz*Av zxt|~N_x7?7gXU;nRD!5oa{_7YAL=SeQrV5w!sWc9Jvf5#^CtYKhv!|5FV zos{~XOzx{wozY}N^ho#d-daa>8q8tD;nplWwG8z6qjAfnEpNhJz7&s_ zB0n;89eJ+_)~2JOVR_TN>9tE@&-c5Rb9ZBVYqjAsvrWaM`^WQW7Uj_xZ5HP3k#YK_2U_Ke@#=NRU(?ll zc$wl5j@^_F#`?}WdDYVKVkPs2#93#bjgjxBFL77wvbvn)({!l%@to;oH2pyP+?{s% z#J9q#{j9`eg|p$!Q?m1*Ri^C1c8}Lye+O{_b-F4}7cpK(r}Wt7WvZ3a=OaGtvqgs@ z`Z~_z?Zdrn)F8XEUPK*_HZ*7}+s2z9Sb+*s;^;REm?dLYJd?(xQ7q`1JYt=Ycgy2(fs#bjaV+1Q-hFx($6aR`C3 z(62bF;XKgn`r|$KU5rBqqKd8aVJ~Mpgdnc?s|04eOSHU zT1KnM;nlR)#UQ^Vrw$x8z{NzHh1Y05wRSsu-&WJrF0$9zyR-)v|7C^M&T}WC)xA=E zq`F->)@VD9TYmJSWxdeCp*_zA*5C{e10$*pPxV>-q@f*`=r9_kxzl+xe?R2%A$pJ* z*0gz_)b4qHq)JvCH1m-kF3aU4AXL*znxh!gmF0gnlT|_JptA4wslK(xXrnyA2$hx; zTzR?5bCP|=&U?Kt#Uajv8%3i)-F+^%K?m#eJ{XeZgjP+SPv+ImS~}~_kFs64a}slt zj?&#ZROiFtboB7%;(eS9joY%FGiM!``->)&bGCkmr_}>_fhv~c`(!ibon9S_wzOcEvXQvArstazQ?Yg`D}3MB9o(p{H>0i`UtguIpqsK!&O2pz z${S_+8LU^U(zcZ4e^@!qUy4|@s~27VJkYnld225p;QsyB-%4cw_P+OFP29h?t5Nw3 z_)z}uGfW^@peM6*ct16rxeAOX+K90bX@rCj>=9)|6|s#HRd0Bvq0_<;d^bWPFyH)G zU_>c{(#9)lIpSR4!_{{At;KiOUa;pKtGRRJ?*$%!>p?Tre_Y~WHvDM2g|CZ=qFup% z!GdaPwmQJzsx9ei5I{~e&MruMkSGhvz+90+4V%WbNC#1l{3}_+u@eMBx9fNrBuHQL zL=Cd7eq!mN*hRV!>$EvgeLh)#isQ{d*USqF*D#;ir6?7{yJm}`c8rK^qTnf$KBqgPak%9)WZI=@m9OhwRk<3TE5p7DSGIW**G%FH-b^8^37Zc4)uGR310)b!CG5s#~Cfk zf5TG|;lx1KXy6;$Vy-RS(G^i0)fwQC$`m=?sd1QU8xWwSqKZpv%u-|Xb`OKlC-H#o z_Mfx=`ixNYfFl`a#fmJXXKJq2<@5XC)qD4_2kRn=R`k5N9>q zy;P>`^KLQDo7*_=*6O0gtI}Ri!VPSof2Nr`wKaYj``YV$w&#!PeH!~vEE4UA*R0d$ zuVyoUwZZ(=y*AHrxp}I#a}-lCx-HY&9zWFZsSFdU3`1C|YvpiQ-F6G=>&W!9*9ItX z`{vgSO-A=79gZJ6c?|shaax~(@XW0Xopq^MzE*p@IH=xhJRB^_*A1^P$4`Fme->zI zB-(ASysmhP!-I0(B@x!#CrpKh@Re!$=e^LuSPn|he0jrat8Cw^PHl7TX|IT{%+L1A zTq!U8Gb)Eus^8L(V_mMvCJTP&`VX#)y zY1oZE?rb+)an}#^>UPgY<|A$of9rNhnk^b1o=LScw(53XjQDNM=9{hOde>9gb_>_9 zQf(EePuuNwsTU8X3x7L2sU}(8L0NvVcm+p)xiZStW^3NE#du*YR{LstRuzBq7TWGS z3bxnLBG0DlTqO`5Hl7}jqWj~iU5xeIF)!ggnlNKbi0@RVV#d&mkJ}kfe?RMMa5f-B zVcHc>V^)k-dhjvyOR4P6#=JA{=76$&Xy{OgM;rS zW|dk-=5W?7^1&>%mAzeSI-;|bJZ@IG<(DJdzg=V$=hu09bjQ=${uFtj`D0rk~da4u;qS%O{5FNB8Ua zFrJ26V?ngBS;ywIecIh&w|af{m4Di9jKRgfKJ3DX4zK0PD)XUnGaNGBL2X(?uenr@ zu{mRR_slo)q~l8ze~hcW4;$a_rd7xuk4HXsqtaBvC`L3rg-SdQFXwq`g+sT%g)^z& zRqEGSanncl*^SVPbGTdwk;>y$FiRaewT$k%7K6oTSKK(;9E_P$d5mtJfwf5nhO&H| z#ehDFTk-I&{`iC~air%(K5kF-qjikIx;_eQd|i`z+m0*KMph zhI%?EO=XAS*@_}Ifa6i;#{{RVSG%e{onPiIne2_@ATnZNnT~v5;+cQW8)h8#))DOr zm^~hF&?}qO$98fc)hOLB93!U}@l@1kl9EZZx@{EeBAy$;kL_fOZ|i`Y?a4lLE^*TH z50x>LppuoOo)4&2(jIvqPBOjN#F{ z@FqH`llEYbM+bXd9+J;AFSoPRzMEW(YM0~>{V;SURC&F!dcCsT*~(7!1v8g;aWf~e zf4g-HJGqbbqRsuuEa?KZTy~dxoR-NoH#}XZpK|n>e?0i@+)=Ddv=IqHyS}9p zmgt)U@$&K$&o-l(m9KCU>MwJVd3Xo0Iz2nL<%rjtk!c++qYvoX@xFcTj_)HL0$Gf` z;(p=tTvsjM=EdoJCSz!uo4Y&=8%!_nR*7rF++`=-+^wy(hlB9&W|whr$y#bnHTC4p zE@oX>e@{|R!tEwwPJJ!?>{*8@4@)JbtMg$$BkX;BPW7pOo)22K?CMl|dQk;+vAg)U zmTnI8_D&CFgfGi_yqVfj-fYr?!_vsjhGjQWpne3d`U&H`Uj+6X#kb|?QO}qAXSq3e z{&>Hu>%(9lN2fz^c&pBUuiZ4iEBaQwVLrU%e+~L<$DK8XVvzDWYq!JbI=;`2&-W}# zXNTwfnl_1=TnqLYC9EFnc)oS_Yr|Ey+_|KuqS}a4Nf*Vf^QXheIMPT{pCi7QAUmbD zaq-`W&+HtE*xBDU+R=SE-fDEWjl?4}E2~%Ao8QrAviD!ld(ECBZI~*xJrDNpaGlKS zf3Z>9>w@URL$}?F=(sScwqC;PI38IFSNEG?WDhJS-8E~!9#^Jg-iSBq4EKF+mdU(f z{z^H_X3oni3raQon$CDQ((K9sJoZMzJpJUawK;Bf!K>D83Ei!fY4vK%A^1TSGrw5N z2R2u6EYARpsbQ~f@wJOM)LWO-+U;(Yf7sSNZ>xrJd%c(SU>9en+o_&2(;D6`&zZ`M z{dQxi`?5@RZmd(BWHFpJw|9{|Jd8Y8Zf*j04?Gy%qfMHvQl9Z^=X~f2z7w z{llcNrpMXZDJSSQ%fVf~y>#RzmxCFuR`Z#8(Uw{lOw~@i%{W>pj5qt&dFq@#k<%(L zoGE;=RF*KE3?MpuKwlk8AK%ua&F1;IhmXZT@yBdD*e*3A+uq04X`6J%+1qsIi_dI% zJ!Yyv!MNIHAMGI4fMeTLO^vhSe?`W_;=XRu^TtEw66I{tH6hc^c6F-sIaagdWqMok zneD?=wF6qPLTt(X6R(VCocRZ(T*V*9KF*8vpeErmp+H}c(5c&7a4(MjC_^bgc4?xS zx;Z*CW$x||bP+7OwaV@sxFX80L5wzFL%ulJN}j-4IN+NC}YZi|U?e~Hx0qm{mm z1bs7Hi1N)OpBA9j)Iv8`xLVzEc2O=0u36WBT{si>qjp<9(sss*30G;yuy}n2AK|o} z(kb{&G_VpxG;1ADdcJQur@GJLXYSfTd{_7JD6&ZEaWX6S*4Z?UfvaW*N<)vIq9U2qEDo>of3dX&-D_YSc5{5J{XChD#)Ef`ExWYtWV*en&Ai%O=kaVgn-1|g zOPcj;(|Rv|zuzwyoTuG%{aOw80vctZAIGcDpgldtX;$2fk=gD_bvf%sSrn*geWbf= z{Mc;fi(5osI=gS5b9k4#x3>&tXJ_}(=B+ZAKIg;X#eFU0e;I8uKkJY>A~hfe!aO}D zqi*+-XH*>2!QydLX5sp68QCdh{-$QwKNmOOy}ns(h2iODB=vC565UGUB#HKm+if## zhTWxJug%EXB>Q}ytKMtbjGEziJYF4vO%{vgGl;sb8;knsJ?7o0Iek8#)e!~L1BOQ{PnIH&X$LIoOzgKp=VS=e+})vXu ztlYvV#0!#fv$`*S^b=E21g!pjR?~kS)%1G(IjISN$anhv@j?ReXy|_2I_=8r2|#dv z2vcVCColCUEde0Z%VI33_7}$Co^K8uAok>ONoc#dpvx|$Z)Gh22$jbp`P%PEOTD&( z)=B?ue{tEFr*p2WxBfsVUjS$g-x^7=F`O}bT>-Rdul#u5la>0Dl;kA)$~^NxgW zdpfh$@0huPcy1z|nY5+j|NGx%mZlcA)9)tdbEd%;W zQSS3v&f}yPag18mL_9ZWOV3`EgzasF6-PPlb>zs9bNZhiOlH7YjYn8*8Su2G$WA51ic&( zv5i4s&W8JS{O4yBs70CxbtQ02br$QnNi+?v#$nyKp`bAW6~k2^NQV1BePSe3e@g?R z1#yukklBc_Fdo6hy4A=kH~V$?{hYOZwXmq2%SP3ZP$>rD8O+#Um}_c`>!(eOFuQt4 zodmYH+Q6xyv8Mst$S;iQ2wYtg$HABp4L1HB3aKcU!3hXPTv&)P1PNkSu>6QyJ_(;T zmOch9_$!4%72`Qa7U(f#0QPHmf7`_BbE<>e+Fd??Dn46nChh;VX+?XRxgAXq&ttvG z*1EK-$mQ!QOXQ|o2E9G{oi8R(xF#nLl3)WtII}Slc##)j^&!@0Za8J8+t{q zT4ip!RVK$8O@Z!o;S#q-`!EnDFoHl(Vug$X6LUvA5f-seTb^MNXL>_Rf5xf5Biu$n zG+^Xp0kKfB7^5QAac3Dvpf>GbM?{R*g-9JiYz^GPsx$8}QV}rXMcqw5HcuO|tD_5Y zML;l?V-~7!Vb{f=zjj6ZTBD~EnRNg~u4{~Gz$mgA!CI7io|xD!Jh90$qIK%c2ss5# z@vu-oXgSf5ph*CrgS}Q-e{etWo&XV0fmTBd=m@X`O>e@Wd%mq;l3+n}w|pY6a$CL0 z5*>c`$ail@+tHTxdr8{U?Rp$Id6oOK7GR>%Ev=4yUti?_{T}&UtG}OT$)_)O*;?1! z&aLDgiB+qWdnrSDn{gV{joT$ygRC&|l^XVl`#FsE+t*iMVTOLgLo3&T2sXIK?g&S07F8NyIA+sHw$QVW8>p zs^z|+i^gY9SqNUrfBg_B${pTM+yTO%bJ#JkU!x?CRdxa9wzeQ6x1+!IGW8-`>qU<> zL%qqm@0v)z&U(AKA~Sz%`n7$VT_k3CT{iZ5OzZ6|e_l&W>+wlsiH`DEFXFZSeeDR^ z>|%)?!{yqp^~&u(L#`$4Y8^*xMnazwK@!wH4@Vtr3o~TIfBMb`H#1t=t{~SC#5Cvv zH!Ol;ZSWd#s%z~e_CtYzXMWse%nNIRi~5G)DfdF)2iJrclYt@fy%hq5C^kt2H%m|i z!f_zZ$95Q4Yo=f1Dal3pXFR7Kp0`f_ZkKuC3!_5$iP2QGKN%4j8Bigz6$72+26Z@Ic1P zNPOcZdbkPZq_rDYVwFl;SH_AjW___r+OndL$v@*A0xsj7+G@f7E#ArT&{Yu`nhEC( zD3jxPz=-q@IY#`AL;X6ge`A+dZ9jRBcrdAHKrCcWP<)p3jlcy+3l+z6fh>S+ zX8|CIe?@N5k7@qI+Mn_BZ>%j{8NV-<{1SZ}_{MWB_u)^x@9i@F#_}GE4p6zRSH&Zb z8<`eljHIfGVG?U%x^^|Sz%6l8f189cVq&tuff{Iy_0&tSfsa#4+g9U*S_6MM@B$Hrt%7eKk)HzTA#j0)q5%U0nxb%_1z=Fn#s{Qc_d)c_?|$OiH)eq+=nQdJ zaxbjxCFWhJ#wdca#_$$cgPJPZLPJ7O#9R#)1oy>fy;5eJp5UH=AwU3>LmR09F#3c0 zZma)0R{mo8@CVb^R@q~?iHbOuuXQC4m+>=V$gbcGZDW!k)3lIYV-3byp=)-#e@M(K ze+rZxSoy5XfIB3}W`4u&7E^Im@VkYFw$Z%C>v02AT(R9QFDnBFN$lQ<>duR^I^yheC!B7a!p3LJJCC+e<>zM5bSbX<~zf$f{0={vXVsAT(EqZ%9(5s0{QbfO?W^@-eTHOGh(8nnWf z{WdEpE$|xG09(KmuoLf%hGc5CVz_>?stHZTK$(f|CKCX$gYjkm;)!vu7k?U5ENag&rW? zR7^p7zf=ZSYzv!7utX1c4`2Qed79hFHR@yPmp7O{>-U@E{NlhjN2>DiFaP`5`xi^b zLg~Y;ugBZ>9R$Nje_2Ebt{QNr+g+f=AuV<)VI;{WxRE?I+d_*99eB&&X_`VX)MSwz zS^)$uI`;X^GsZaB>E*=5f@;RdvREY+k$7Sso;d_OKt@S@!dT!n!_Y?eeT)ckXT~mHeg}t=jkppto(%I95L60(yf3oX0%OZ3XcGbD_TCEMzDXekj*hL#PkWm=XhWQ zQ^yc{Vv)&Rf9~5#QHF40(mxM!-)iC~uKm$Y7Wj)nGRF0BM($ygR`J-YwlC~yD`12G z=iB1sfhiR}HpTY}#&iha5RRPlr^`OTh=Gb&>UUo6hul5` z=HM}jBlj_jcxx97p!?^#^q6;htQJ-J@%?(GWV{#o0`jdb6 z`Qm5etP6L)YTOz`hEV*>ofEIEXc}qYGE1*5C<-I!5I*ZdWn3T^UuR)A5jJ;ii1yS8 zUxpn*x+eA0>eQ%eWxKcYN4SjPia#lsh6R6nAt&=8sZtTx;=(v%U*RuL{4>r@j-#{x=Qy$43tw%{e&dQXh{gT5Q6o8Ae6b?pX^pzl ze}v(>KvJUvqum8$zn`Vb_%-u08H(IOMbUjoWdu(b3kD+~46u_RKafXH(}xw&vHPPP{LBCU9m9Tgq~AIAS|0u7U2=c$e=kS- z)$80opAC{#LJT@mCpfr)8}HMw9MCA_&*bE8^~uRzx7^*=YHGCPs71uk8<%cQ$RuKO zgW!3lO}(`uatcb1u;8lGj!_8WLy-*UyZUs};dM%fr+s>93L2iuE8>P$kri+fq`F?4 zEjsLl?%JIi8Ru+bcaw@qzm{S?e^>)}WS}$2f6JlKfB%~cC?69|ric%-xv3tb!elU{#KB`q{@{*Di9KZ2b>@EaO1+qirN#?eB+sa3a?}cKetmzj`fCzP^NGyiA4bgb}oiowp}` zwX8z7%WRSFLa>Z*`LtN!g}uJ%YV1_kEW932aTzp%c93kgSS!|q)kj(_yhvT3tvLB! z5xdS1GJ{{GhV*jmWh8Bbe}cWI2l?5m&(GKI3uZ}0^>sq zxrx;Xj-}QF1dhF*FQlKpWSpkoIQ^rE=Bb>gjEDctJ+iS^#D*rQ@h`5{WoR?J9TW0g zLC7#-Y>^a;O?+iBIgIUUlCYl7{&jcP!$f55<)=7=mkkKCh0 z7lH&iN1Q}@d3B9~U~TG&?XF%J{liI+Bwl#d>r^h-5^n-E(b%_sL^`+U`~T#X{d#k9 zrmd^>ziU|c6Z?PGa$APCG{ySz6}Gc&3Pt;n>&W#Koh;wylLHb(lCWkLmWve>{050Y zDJO_z(k@d`HN+0C2b2u@(IpaXw>u-^-pV&y$yh$V0V(Apoe=w zKxCa@L}ab2j}?9W>(c8B|KLxwhz)t|GT+oiX6m7y!VS>WcQFR0J?#b3i$i&3J&&i? zEp1?z?GiB=5pV_$Pz-m0q=s%G76Wtf_RPPW3Nr+U9QU~yf6H)RN!6vP?E)Syn9%Do z!Pm|#6%o!i%jh}q{K!wlCz&>JWP5nk^=GJWdDAf8xxaZV$q%IkOl z!N6&)BZiQvKYWCz>nj9~M`Z}FtMu1S`m6cPzfWIHl72PGCN}+6_aoXp_f*?w)j~>7gwj62PHI)5?tWMN{g0G}A zJIuvM;=!rb!mIjM$7y=IpXK~?!Ft@&QMT4w_XA~S^#0t$)i)=}#zgMDfMH#7lJEBh z5@wKg@bS969|YBI z#15?7GaQYK4+NO5dVNoJi%=r%*6VZ)vA%ZeuR-pwzw035m(W9AZ{jnbXx(!n*CPne zIerRrdEo>Ftp3M%^7G{)`-S{`m&tWe_5Snan(&|c;;-KY*_sr2;&JlV@49$I3Oe?< zAXG8(+XNe&e4L;7R|w)U;pPyy$I_x`&4{v+F^JPZ`AjAbeiC2}g0pc~ zo~NaZSATs}kCpSuAvHs47i(}FMpAzOM(z+!G{jtla5o4}3q-^c!v>jOypo&C>G_J) z&rgs)^ZWO@u62}$dXtZPPTJ3x^{?sYQ|8DMmVW}_;*EKICh1_xb zSA$k>k+2Flj;jB-{)?!ohkFiL}MOE#NbJXuBVX7svm}`P)BQ&M(JkCRlIsO4bss zj&*L6ul2vW_1`?;SJRf5$|Lh=1at z-sH&PjZ45XR**$HY)%(70aU~(;+$&L-?hSwP<^96XmC2cD@sri--vpj!IdBg#S2Q| zudl9asmq531Zl64AB~=}{`RBe_v<<9@teV?*YQKjesuh(=lXpQi2EO0T7LcHbzRY$#mdgoUBnr6caeLz*ErA)vl8eZ(p(bfi2M zs5@`BXYrR41AAc)_#LVBga)P&As53s&KRGpfHM{rTku5Tm$@kZb#x7J!K-Caa+chF#aABDiElfxO z;rM!>mC!W;TM%l9n!xFC(3TIkb5pq!^)Jr%bt2O1-=xq4tw2yy@B5pw(2HX877*mh z?g_@JLJ)xDXBis683u$A&40$xKt0Z7EeC_J)*yRgfB^rg_rEw?dVa?K=zcwK_*eeW zbMCy;88!dO*F5~)TW`&~Y#6~&!!moAuT7z1x6<O7-Ta z$4Rbh@kh7OWL%W_&RBbnJ)W7a5FW6sj9!pB?R1~#laa@BYH~f7{%GNDFf^>xRA#cCED6WjVzgpza&rAbZhwp0`Uz`2t z3mTHY2YRtOcTG4sA%7su*=AGeM})hCx^$k*Mz2i)28KSCV$`|=UIm5A3EFVFc@w?^ zo?mxD_j3``moklgzOYkf7<+CGZlBo+ff+}4m%A0Gh{fB89tVSzYryS20d`ROE=(e~ z7{Di!^Q-##!Iy7YoL>5RfZSVs-^RChP2#C3Xl1oujkxC_LVxGj&e2L!Z!S|4>lUB# z$f*pb7kcNq8q=qN%ICqvY7sx&Bc^`{5EYmeX2JDlZ`sZJ*H3>x7gPoz8D_rJzh!vlg9wL%zb4~;@E2s>s2I7E;t6ZKnUugtIW1Y z(7jpF4gz(*7Js%1Tp3J?1%`qni#-t>rw=4jT5^IRyIURayFq1FDE@Gn&-Qhb4HkVq z2wvD@k*o>-@=p^9S980*PbQ&JJYvGEF}l)qSPIOJ-9p6rLb0ctgar!EU5``QpNOE$ zPQcyNSk2GMJ4aY$7^=RWc$0k*clM_*BJTSzx;*;r^?&%mi$vBnb(SSPPqczta>@`e zhB3b0)6?~C7bs0W0xhJz8!&2tIY>X#Jz^9ykQx909F;=p@lB5B%JG6e{}m(#S-b4m z*o$153s=_HKUzrj)0U@CqQ{2=n%r$c9T;v>c&k_`F9Qu-Z3eavRq6p#TMd5tS@;6$ zOxjgG^?%gW|MZv0zPZrt`zmA)OWz+9#a&p@CPE9};5xHZQQ~TX3(bmRK^h&`I4rXg zN8fzH{S17l*2?JL`O^AZhsz|k{=~+oIr!8oI6kJp2?^U62Yr8)tfBP}TU{zjhDOZv zLF=1?=_`7679(I+`Iu_43$NT#sR#V(I0jvt zV7kC9a|O(YMX&4C2B$G_u3AMM8|}g|M7%KqXb7Wk5V{XsHHk8N8-aQ?d3^2bjqJ(3 z^?!+vvSf58_ahb1NZ#R;*`g9_J zFu%ER6Ps2j5FT12RC6Kwjhwd6p^(?n&kEP3)=YH0(%tNf>sWcmQmfJ}LG7ffSD0Gs zpnRGTOpGtTT-dJ0Z>TPP95!?5C!ws19e-Waf#gx!&K9op6Pb66zqO`Z#>j7v&;O*m zNv>G1pB~W6?DdWMoo)h{_+@MmSoA%{R_Pvews1Xib)bqv9d3TegpOFOYutm7ERUEO z^=>9St>C)V%33$kAXsP!sJo6C)9QMQ!`y(9UqRMc0-?n!mo3^59+eT);zwtd`hR%* zqOw1wl6@rkpZv*-vf*Ey{I{<5&1F1@;~(B6dk*?I_GiA_Ow7|c1le=MRZy z^%WbZ8?bDp3u4A~w)^_sQCTuGgMVBA<=_0pF5>`8oxAO|rN4EtKebEur(Tv9|K#!E215OA5$f@>T*clKL2lQ;vGO9C6UuVnFf=%QT1~xi+SO(!PtIVjZZfWI zPS!3SUQ^iQl?{sm!pQ(L`gxc6M=P?0^slvyA&1EyeatO>{0q^a{&m^2mw*50)_>~I z>n}$2y%2rhgv5G{Q;F>f;xy4i?+|fPQpQbMNXsp5SF7S7 z2u|FNq3CqQ!1UKo-x9)npCGMCkst&yibBB<2!UMwT&@HOX+bpL=Af3C2rg)=h3u0< zjX(kD3)stk>d~uM>um7TPk;TNHT9uY<`=o2eMsx@$#_0y3hCQub|0KR%??W=qZqe1fS>Y1Pc%o@Fm3RXoDSyo-PmPk*X1SQh zP#`yOEt5TU#BYWjp$!(yB@gwc7wW+#aY)dj@1+J#eFQZ@m<5DUqvw5`)D%>gom~$C ziSN|+K;nVR(k?zlz#X>*qH&e1LN4E96Sf$=nn{U8gKRV@LuD?2$=+8Q;)Z23$SmAY zSC_ns$h%W0k~nVnJb#=)bf!trgQ;C5tTJ8+4qnB`2XklJEZS>%?=o zb6LMl`Z_UJZ3KPXk@^16ZQ>G@ZbC(-y2UTnZI=)*v;`7F%72ser1oR6GsAFZxZo5k zwWxYi`jyt_wLkgWKlHnwev&wp zu_RPqy;jugLM~DgSNn<6vJN9}VbXMNnn3z~=ZndI>whq^mXZ6=u;knO@PnDMr@Z;o zC-QGUt>QoT4oD5_fBao*S&%HAw!Ch+27+y(HlFH@N@f3wQ`I>&COSk5v}Szv*YBG> z)@}^*>xY7RF!ACo-~VBBkKk%ZtN`JzDO)fqdzxe)SNL4vlw<~54~8PfPy*7~bp~7= znAu3yTz@zOvF&r}t%{Myf9fq^f6oy?O&OydH&Sq zrFPo)RxQ?dK*HibQ5ey|HANd|5;t?BJO!TD&oJr5dZAEk+rOXRkVwx&1EmdpPtws z9cAd@49R$l5^txm>&e0KC&un6(kOEA;*G_N+R5#s73AX;aRyDSvh(!}yXP4g)f`S~ z9Dfb-V7NGwifV9hF?6(rq#Qt;W5;N2s9cVt^1A%uW2LM(-9nyA`+oJn($#+Yh76F` zEBnp6xbJPC?VtMa-#Fdh;ynE$xzRBK+h!e$Gu! z2`Z_xDksmliXUD%aW!F@=rdG+n|sV$oqse}Q#;W~q(0lQA-0KBkDzuRl|;#^1RUOw z-oGVZ+Dd-$t-t9Iqu=`hd(B<$`y!ToUViV_5{v)g`wKM9`uvmgzMq#FB7}yop;TXx zEFL!JeXka*>w_&A(wJMAOa`}&x|21GuaDP=sU~JrCVZ8m#OMX42ioV2ZXX0lk?LDht)-j5l5O>tE!kER2S|`{ zGeomUu_=Lrt3ddXipq8}ODZumN~j=T}e^lt9@ zqLM)_^nHtl(1bjE8jK8>Hq~sz{ zzXC;sUN9|OfiZc3kP^uuMAq_7=d;p6Bvl|Y8Zq+>I{qQG=SB^-=KCGh7tVUGwNX%6 z%=_iX_-oC6{Z6J|{)owBWWQ|U9Hw`@8TXrc-ofc#2sqBjh;Pv~2EtuUQ-7H{>S6FC zEpJS|GVICtpe3w49U;kfWszF+o$~SIMpT6{`}x`9oio%kDhDIF{oaq)W?k~W23kG z1`)h}pNM$IK(ce+WEyZ1uYX$&m64l1C3xg=1j3WrFMec;5{TkU)V?QC1&fkU9GW=W z>kT~H>-$-Lp}I}qoWfr|wnxaV!S)$R?G?Alyq8WSNL|99RHC-|e8CtI=2Ij(A%e>~ zfv?{V^(V%QRN4dI(f+qT_=Oq@@BSB^rH6iDXsFKji#PKOH(9{+%YXNAkCkcmjif$Z z>)(Di8-mnSj3HGB^-qGdWaJVY-wt_5yH;WnNyL3;Oo!2!3AN(}BQu|;_ z3;|N**@;T3fTH5u{2cFGffnph9te-J!`0*%UG2s4dSMW9#f3hlG!!A0Jh5-mem)Q9 zK4&HRz!(27kXetp)_+swPVGFi+|wsH$_TY1Q+sG(Lik6AIo)!B=ROd4o$e@&QyVH= zGy^7M)JG19dFp$$VeutImk)^|p~Mc;<(k*26VX!y;wbp*#vn`0ekKXyESr_*g9S6AG4ST(5^GYO*hR;KJ#Ccc)5mYVfVp^eS z_vvD41XJLUIP5X>p{9d^5D3MQ+Nnqw&dWFD3n)V@gMZcllvHw8UUq0vo&DFY;!P9} zqzojVu@aLvp@DqyK?}nV(?)|p=Y?L(sU87RieXiiQ>tKk#v+Q>APNtJiEz%K`Fytp z%=hvLtw%X4neQdGApM;0zjPjdV~)qfGL5^fO6jYQ16NS2vc$~cTiFZ-Sg2LQWWZh-VuyviM5>b@uoYiTY3~JOrpgftgWfllzxc(WU+tne!%8p{#9mw%I0AQ3 zM67D@*>^wO(DD&O%WHp`cbD43#EfEFjJx{gl#|1>kcYMIaQI1B$4eDtGt@rJhj8oh zaUTEhyAOQr5um!|ymHKX%h_GF`)`b{`t^0{6`?{Q@S1GD51BF#7^YnDR^5 zYwR5JWR9cZ*(jawJ?PiH=Wq1nIX$PmAOEkAH9^i)w+Yo^A0wN&^MeY==4yiw2Y>Pu z^P`u^S^5ML7BUKb$sP{U&|D$yDuv>ej0DTQ(o=r|dG?PCXL$o0YRB-y~* z_r+NKvR{lLvWC6t0xK9<87qTlV6xZLoZ6Mjr!;gS2I9*YaNz!@dnX3d!#u^*wjqU9 z{^)Uf`3JY~N5Ar0mh^t}7RKdU&wtXZS@t4ajEA0cpJ&q&b+b)9C!PrMp`o@}P(Y(| zJk0h}iQ37q?wOQFW_`PDOhV>mDiLav>g!38&H78~qkCoB_O{iQO@sCn2YUyT`*8m9wa@;>mrMM+7%wQ5Yod+*(iO;3C1>S`fk>Pkxg1w$Bgu&y z#)8l}VwJ8vHMwq?|;`EE=u3Xe4ZVW z<&K>>35@Uje;C{WXn;JL)?Q))iDZgB^Kdr%O<8UOzdO=LNQEod9KxhnaGza;`viSJIl^rt?+|}%Qr%? zc+`r`=#VOm(F=S81_Hu5)*!^HgUn0V!Uo(6XxVm1TnQDG z1DF&Km= z{l-KfNki!IxoX;yCqw3;FjY{L&UnjfH#Tbxb$=Hb2tM`=hO}c;fz{QzztO!UX{H18 zqS;?lLq4g=38xmyLJ6hR-`%*$#lm*IgyIfqoik3I0^F#6Z32qn<0_N#h{n+HY`{wpR2Upq=tHzGO&yWJweoT>R%~OXTq30k z_GW!I~DM#BjvjphYf#= z-~Z3>C}0~El*!j8(px8???2k}ft`sMP zGHM&CJgS0F2)Cc(b?6*od+|8?tIcslJnaYfxSC_4O@v!QWVB`6ulY0e|)u z?n}ClU_=y+cNJ5zQ?x?OLZSNHk^2BiB#Cq%(I;QKp{*dcSF8Db01ql}Dk!1*!oTld zmi(K(OX|Psdr$ovQ-Q{iI2&1cf}R{v)*ty^S=0MH>ccb6_YWTg1!KWFf`I?U@BiP4 zkN5w7spS9lUn+rP1G8=&0L>vN=YJ}MB}g1P02YCv{z)Gm&gnV z%(whppP|w6E5I*!bZ`YN2GrxaM`yV2;Rd<`Vv2n#{}v_qsTPCG-~muT%YXa590I2H zbQD6yf!~`gh&{~@;SF#})g>*@0a`&5LI6q!e2Aa4zb!!8YcXg7-vQ3(`*~Wwn&7a9 zH8=ujM97v4fwuowe=^`1j0n=HKFgs#V*=GZ0X?jKPXr_QUs`r@dlNoy7 zms512{4vK)kmvD8==f-irC-u*TETFEkzZPX&hXyDf_khdhwF@`jb}XGcO{WumB9>u73cr6 zX3?XWb7S~fxh7DYuYc9UfyNU(^=Jz+2Myqc3RiCs1}G9d(71H32Cs?b!6xPF-nzmT z_@=jN*p{h?AyUwqM%taS~V%iaoM)A9Gz zN?~j84`MY6GU%)(%jI&(EtmOnAuqY(^0quo%lC4@EVt|BF)}*8y~j4&eKl7&wqRyA=3J)H{gWpvrp_ zy?afp(kRE8@8><_e)j%4&==uo=F%u+9iXTK#4C(5(kUP-g!LGZu2W=Z`07CElu;-Y zXVt^lr;%>(B7gFLlvXb=21S43jwl{sv_T~%-+{{ea+-F~7xbB|#IggMTy<(=R{2oS zdL{4@B?e3wYveNzk*rTCt-@%d&+OpDX$Jl*MZS?$mauWWXdS@ zVrlZfY3(kJFz$O1_XtPlE>98fnPSKUJu3ZBiGf%B%6|#iG*d<)&Z{)-L_`8C^+YUV zMF-9k)2!pOC~)uqgyrqP6vM#jH4%kMEd12xr5*n*XUyez@WgZ;=e!ou4jv?sfYz7J z^}!>RM%&J0?~}+ygdo7y}OLa}R;lLx6iUHm?rKPWkrJA}fnSKv5aA-n=IgilP=! zxI)5DKQ&!fR6a$#?I}+so^?Y%{MB{;WGxsvhSYX&4^BtNneTv9Ax2`Jo0%e!w8LLs z0DtuE4H1PMhM4v(aOk|EF!JjjMz07x`&S#xe_yB5tF#{Y*7IjA0Ixb`W(!Ua0lYfk z=Jl^I+QgF{fbmIyg;*KINXvy+2m7ug3btcZh8}r!2s-+H1>>G)Ol{^F`aGIAh=CC1 zIrE{S1Nq?}TgSK;B(?59EEGKzK(aJtC4cpTXQqnsNEIDyg;5GU21gfOUZ4v?B-oLv zgBdv{r^n#x!pjbXm-{PE8bzA#{^WbQE`U&)exM7EP||r7afgI9hyf^{sh&3Qy;6}@ zAdvgG10k6CVf0C$3yAUx9Y4}MHAQKPFb1OKLeYVk{UuHL7sv|t-#V1)`#a_T1AlMt zX#X#CANoh$8F9~-blnjMb3D&!C!jP4C?A?B^%4+9lg=&Q0W3PIXYjuKD>^-N1hDPf zVTY80DM4ozk_^IL}Ey$4hj& zSBH|WBZ9Aut6rl1TbbADF+{p9@PFU@rt1j#=8xlm>^VQtbwT>(GhIiLIbY7l0i!wp zrt5+_=i{!UJBU(;wL@iucL(Byo*#7G>bNf`&-_}KVvTfPObZ2XdpHU69;3(b z(y@_t2<7vb?w}4SzhEQPTR=(HWr|p_5+P^~A1*~A&Khkx?nDbR< z91x!KYh4$_Z$2I0aL(88IG}OPf9SfPeDmq}mUF(I_vw*ZCh)4)$36{p2t$nM*@QyD zn$KRk_BzZx=~-&vvDe3UuYc41ix=qrCC}^f-|(5T5;$d~XL*|1BcvT{_;5$pB;BVG z33LRZSj}@4Vl%T)H_wle|Ahw$C)u9XgMID|=Q$uE__m++f!*aZ?bt&H`}V&|Z{j{R z<8Qysnb&{lJ^x?y!|K6#|C@e(^8uZiADL4)<_6cf1c^?Q=`pNy41aq?NdA5QbpWyw z=>D?SF?TxRRd3-wJ<@Uhx90@zK``lD@!^!dI=-ZPD2Tb2>N%1-0J?Uu>eX6aCWSqk znIdL6-P1Y{3eqYZ5WyEQo!6%80D=z9=dCcGiQczNM|vPF=wy6VdCnCIhJ9Yp_jUhR z#(}pznzFK@vPD>sR(~)+(0a@mt)e3;W6*Qe4s=6Mz)Pi_A*iW$4m@=}H*rbNW>Z#} zq@Z8U2z8L=^ZTFi7(JWEUviAihY~3|@cA&XNuYy3Rzf_dMS7+MJ=FhX>>VSWWscmG zLN|?-9cY9(_mBrYE`Qn)Q2vehf4bkUp!l}K>QPDecA@RSqksD~l`HT+;~9jb>?`M> zQrQ7d|5MpX8jURYoAzXMFWT!mfY`!`?k#lPAty}rU-LQjly44yrRkt4NDiqYgoVAM zZ8I|quli#?QeN%F(Zn4X6+KtJb0zg4+M%Aqp30i^_dj`bY`uz}S-)se9`wlZ)$h4f z&L6M{S4Q!Rw}1ZiIyc_b`hHXzj-LB^49mZ5_k&mP=4b8^l~vNH{byrn{NX_XI=aW( zGoPP(kugILy_~>L)1z3vI~R~h_dh}SN4o4q{*g}kcl8wt-bnM9^GHbIQCZF{XH#_SoI%hZ9MkY_||BD=JnFhv48o``t$BlX}mk0?(s0xc$A8xvru)vnTr2bXP?V<@I=DxI znMT;^k!NPnEJNCSXuoMXo#n@Wx_)S1l*+E@`2u@oN@K-=)Xq(LuA81ae*LF^<2V4{ zw0bM_V1E-jUJ8{-tg*{xUerONGNGk^^J&}l7J6FNap-#SQhPVctyIoNqD@@Ag_V}C zdME!+^2gav-4k}OrfHYXwbQ&hftmMB+cD@qEm1l8*ZcJtS-K7yl^ zdvzkvXY_p*x{ij~o}U%^{PVlVeh$C%^K;U5GfdNedS9R&vYQpbSFGcwC`U!hSC9eGu>6!sa@}fZX8DdB+};)(`WJf z?2IFjpWk!({PX*lUZKy?m;U?wrT;#E>A%k-Z3lo&*Rhn+eo+^G+K$KE4nP8>JLj}t z!ha84_wBzo({Kt%=-X0x5;_;gz}AC ze(84zQ`#$)Dc#?+J6#vhAKIR_(1XjnfprbYOv6D2vPbJf9X3x zY4sSRx$a-IibCuDLrdH2{-zb?ddI(vqvFxJm%qj(qwVJRKjVVsFa6J$C@HP`H*HSa z?f$0K=63w1?Yb`QzV}WYBx$DzkeVXZ&CoL|@RYWLtm?ohJvsj)J30(dUDZf+9)CnY zMOF9k>&#o@0pNnWF{R08M?fa67h~89kT$>470NvPlU7OF$zR2q0(g85057_mf7OgB z=vrIS*k)ksLJ47J)AKVI^9{ufM1nJDASs@oa~QA^=s&b#8Fx;Ca#0N5DOGL?0#UNU z|0xH89Z?-l5PKJlXnbJq`38BJ#eX0av4BBvCKM0}1Y7tos`Ems|CH$emhJ<}Y|EQy z+;JK|7|@04FQav@&DSb%Lo6-?N-y=W-r~-+BE46D#0{Ap7 zrc{sZ(sZ1niMB5yXauQ*h<|>UNhY+Oj;2F8WYV6}F(8Bx_mnPyh(Jo;uhaHm0E_lh z=x8~Q_Aj8{6Qcd5?U4=o35V*ZUo~I`9DGWPsD7jJkS81clU(*`sNRqvPn# zpucogPg#nhc}(SaBg=> zU9mE18qrhqq3w!#SO6~n2k_57Ygb3dah|8}AL-w7ds1`=>OoqtD}&~{lfpAb~d~?qb)MIU03o&Se*3ki^cOlFGu^jcgnuCWryN85>2QjgpY_ty zwBEyqzAx;*`$IzeD^ouGX(#+;p7EacTcZ2&d8V74RJQbYy{GiXcYb7`^-pxYefP=! zul)Q0VGnkSzBq*Rw>_Mv{Q9r|(Ei81KF38svq>+cy-?^mgHyUBd+G;m0qp4!Y|r=x z8s{8&^OiWDbAR&?cRx+!XyKgVBvpV4a7g38H*~#e)Zdqy?CAX9{m(j5!03UY-x)Ef z9hFb%zObc!U&s05Y8(|Y5eN!>xk$&~?r9@Ra8jhx#<&DowErH^cn%8XZ;yT_&!=`< zF@=D-&r>@jJ$EH)AH^0Z2CP&51jK+9@EM&{L<9-Uj(;}d0%i&8UXX#L2opgA(jguM z69|X+ULf@AMO}&~m`#qD0o{`?!RBG`2pMo;C9h zIvHW}oqt>EzqMQf?Elw4UnzaH@9S%PT>q#4`TzXi=od==m)kkH{vSV2bzY7Cp`G39 z>3?_5_s+St`R%{WdEINQuK#hTl~mtf`fHwMum9i}=D&4q@*jWu*;D#0)YY#KpI(@Q zWTZa^ZJad#-2CX$NXs;!BK&)q3e^BD%B8g3`!z-rS~(=C^GZt?w{KOBa;M8Rv49Nj4n&K_S6?bMKi z9)I)3;~_b`9`oJdvtKR~%=T|B%!MKh@~6Uw-a$+CjifnM%3hqEYhR<~bDOk>t+x7v z5Aj`HcDKz+J|8OM^3W>X5Qg0lgs0mlWz8VE?au6@B)4#T*zNk04SjPOqww-%2BuIp zlNM?p;kxGVi+*_O_Svl$kX*8Qar;z)d4DuAb3DbOHRMKp&<`f>9aMky*&~}TmiqR# zT(^?jzedh7^)Nh5pvd^TS{xVUpug5t^jw`x%irmxaa^2GpWCJ)V|$GeD+$3m;Y4&I@eOyMIvUUa`WxhZ$IkFQLXcoE9t^)WB`j(=BQ z?}~Zp*E`c5?d-Vn4mc|}`wTz6ACwx|7vGvYb9ZX*?)}7+7XB<&FZJ;D7uPa5=$D(9 z+Ez%?-D&Nq8zv#8F4=Xs3f(j4xpkrp7`@U4^_bwB-3DZ8?*sG(DK@|DLS_7bToX;*GhI>+wo48 zmA#ifKl`0`SqGc;6KJQ`vAwe9XM0^8p3CbyI}Xb2-OEjVh_PGVZd++uK7V72O@i&J zdmrEWaovKwmy2cnSsmZ*bT!|W^z^pd&^*WNXUX3duj^r}cyF9f+pS`**Y}5;XpTWo zndd%^SMR0)yl@Xq7dA#nlu=h=aVw={QR&Cyo#R7p|5>N+XkvVB@YP6gWKDEA8V3~b zm!3E+aW#a|EgAHOaFh4x{(l}k^D(}PJM?I=y~V++u@$pEt}_{(t8!~JslVN((C@hM zvc8*UKPr;UW(mi9T7T>8FP&Biv81M}GC!P@>%9q&!RJzMqFi3AH^!k`cbAV)Fw(hl zI|s6Obx{V_T?IlcG<{g&THQ%b5T6jpJLYj5+MN~+m*#DS#U+fA4S%~V-s$>EPMc&c z#=A|SRQ<-S4jW&!D=l4jL4Fdi1#zV?+na{qT>bbpSa=Y`U~4EFCU?fjI|Us)DmN*m z9yQ8>bHk&=eDvv~4%S_)dS}@5@1Wg2uqSDKjEY{b{wKa#MF7e){rmD8QwFHopm{O7^mcI}f>1aBSVY-}+Y z5^#6a*JM)_#((W3zx$I{=xN?u(L&oseR){dwSSJ2$0Jx~#`YZAa_w@m+?ske@y9}B zpMx)dy4PWkIZb12q-wWGsZ=Cr)|`GYRz-JzYI3Jj_=)m*D4J=HD}A?3ZJu?cfNLb>4P)+Ay`k!F-bdb_2dbJ z-dj7`(2&JhA{P7FXzI~bUT4ec%uiHKViUIarxZ*DhJ(r6K@0mxd^q;UdB)e97EX19 zx12j%c7Gv%3$3@t1gHGG+bcZlrfw4p=;CjYz-AwUeeE%3n8=|M zFIjbn)_Nx#wj^N|8`8Dg`zJb+vn{QV`*;Y!D5idQPV}Not6UaH@;sv@P729(hHBOM za@pZ1GCF2wXKA-iD`PyRisRe$DCl~v9A!;gi{{hxu1O`m;O@L9rg>f6gcG-Ij)1$7 z@qc)9D*0Kg@80AzhJR1hAct-861<~{9mUAsRtS6l+)DMX+`!}1YNle}_q$d6>d9** zol}kDoXvaTzN$T0T%gE@cUQf=RU{rJv{exj@8|1YC-ttBSn=uW25VZ~?RLDalLLHi z+97)HVSPo1Yu-IWnUmXco2~itaGIQgi+_>xs?T>NMBb)3XBBI(@-^+&(qo4Wm+>lT ztGV`=OU1pLhUI#LCqWNQA}@BRWm`3iwUh~}z?Rj$Y=?q!+~32`Mb^Z^dz_1Ha(<6t z!A<1d8K+0E(Wl59tAdr`nw1HvTyeoaNxoU5&2S;1|A;GFvCq47BH`z9=vcc_@PCD9 zyi$l2C@*OxP2{PVI@dKIH|xtVamXPY#@;=I5j#q)D9B?QQ#;uOj>hYmC-AzK6$nqM zk|uIaOuZ~ySlbP&Z8Syb4sVmlf!)Hx5fiDCD#IVoy>v0Xpy03Xri&teeJ@HStmVFk zR@m!`2I3}eJ&~}h{(~iDs_h(gM}M#Hjz+(_bD~m|kRFqS+;Bp$dmj@=OPjZxo!@d6 zz2z)^JC41%mRfRJf`742+0Hs%FQcyEj?Pz4;QSeV8QeQWch(m!Zwam3vUuy{v~5z#4BuTrZDD;C~QbkdF&| zZHaG&xnt(_2(OZQdxc|l%Nx9X=qpi7St4FN?-Gxkuy+oxW0ID+TWM44UqHX%p4>a6eb9US3Yj>%_LfOW)o~r`itFWF$+__|r=a6*%`*X(}2~({z%a%z0D9;(j5z zW_dw=Tg%Gx6*~Jtv(jtOuF4>MMLGXG4JWzY=o!g|RVr1V;V|iiP=7t6$7!u?t2YW+ zk@an|UK^_|v7>&^o}Z$k@u6it2})RJJ*;v8Y+p5mw7GH*fi*YJ@g>~sD7d_9;X~kf zaXr2_lAAEZ4m_51?RHE!_GGaf4(oFeU18ffW5HcTZ{3l%Y-MCoP50%~tIyZv;hhbi zMRgzC{_Y>#&3d!4t$*j@J$l>MkRIF9DtHy=N3894mrf@;1h+hX+voS<0k{14xYonO zI*&Vw`8(UK1jbu&CpKKaQS^9BhMoo7?HWIt_db%cQqAKKiRtmYQySNDraoAjYC8tY zaP?-4b7632jZehSOKvr088W!^)*tC4JTxsjcG3cBNOZMc4}WV zC*EqC>u2Hui)q=&TNQ(JeIQMZlCsPB;=DO4VpVQq72?NE*zYGPDlk69%Y&t_#Pj_; z9KdAs&L3kluDgZ1-5y@N7oWP-mfdK5oC~jMeKg3Jrj9f{JKV|Au|~45EL%x>wf=b4 z1g>`Lo6KJs(|=f6hxQsD_T1~0FQP6LD*Tp8S9PQ9<#KD=VqdQ{E7I+wS@=iclPqub zUgbqYKD#Zya;=ej5_`KaeZ{;u<9&?{`pM*ng2S#@KXYmMG^BH;C79ta7r$+H&Q3w% z*(H*Qr}DLV73}(h_RS%|=Z!n4pZiHI&0DTnoA;d)yMHxTcV40w+07J-a$D_Q2lu@& znB67m2Zx=c=$f2D@p54AhgG;SZ>MQ`Ig2b)c{RVufKywIRAy;$RjXE$vp*fCK+-wO ziEmz(-flX#**-sM!*^c6*+&KrS}ESlq{Fp#-))bf^(s?oO00^@SFiTsA00K?ocyUT zHiQk{8GqyX-O7zFBYv!P&0Dv}f&=XnIeWt+URr0i!^St+&joh**ke&xe~NtZI<0kb zd4A#*yGfnfLU5-$hLks%N6=OOi@;L?%G`i)$59QGTZ|t$y`noo~q{YD&~_FB4s;yNB-XB zEPosicnCJ>4OUMIKLPbgG-X(0RkhQHc4=0~2}b8s%dx-TJX>d(6y#>RlVNLmm~Bn5 zw6~GDwX^loC({1yvmfu|+ojg;2XEVp2ee4d_wmH;7b|s}WzQpLrMk9T3H?*M+^tn& zkC~&o>)Y{iS6W40pJHAuT zrJS5Oc4(aE)7{@Q<{a)$LMffAu-TAxnwz!y5IX%*l+w6AaIdas^+w@L(R=3J!rSOi zEi+mEEyxZ&ikcIwWAD6?j+(b=bHViqGzS;E;B*D8>V}Jg3 zjpe0p7YfpH&USaNJd1Y0hm@La>b93|xz#dIU@A}^RPgRun zJHL7IEJ63%)pn%z{dt*bjgh>*q&4u9vh64hnK^*&ko3-4*1{0~x@$*udM^ba1gUCFUF zpAUD5m~-}K+u+moledZ1R(x4(Pb!n#&EpZrFOr+544ZfFfs(XzrqN&BkFuwg!smYA zKFSL|g~eMsi}8sWV|Z*fRqZvr|GpegpT@i@DECAy>{_?MicX-~F)Ke^Z-0!vUb1C& zc&v+6WF6F!6;Cl+p5j)&owp;lgkw?lk)v1keKU1UBQJp|jy{sxXM-p2(%g4bq&>2A zEUS+wO?W%nyybWky_RzD-0y6jKU?8bR)x(zb&sOy6_Gfp$m0{6kY&NcyXX|ZVXIH=4^?7onGYOt&{rZ0lO+@a2 z61|)Im@&Obc2gZ3M8i{++w-v%gNtA*m#fF~ zZpp)KVwViw$5%1W$9>|3uE<(yzY5Nar!Vi{D_>zcuijajcr2UU<<+C@!YW!`*D^%i|t~$6+yi-7N;i4 z6@xc|UGOgKgg=nGH>rR3_pMnRTrvsrVx35f=Z-vX;k#s}5bhTnWTOtNR+=>7x-`Ne zOjr3;U6uCLyuM7$@ypYmKi4ORBZq0@ZOYXiH{05COrso%{UF%2dl}X9nz)C>_U>QE zZa20Xsvpcc`fO`9?UX4qv+O;vMKuP^`ub8d{Vr|N)%$&l{B?ixE_S@P0&uTRVj*#Mk?>ZWq0!0=eZUvzAQN=%O}&lRq-wji$i}7V!a-vve~na&`kY%BvP1< zx?Jg(Wp=&0_oq|FXm{(T{IhVJ`ndFu>f=@_x2x7JQmHmEI{Vqaw)pl_4Z=dq{SkJj zIz50)oX9RCvl4&yysuURw(Co9aO&aWfyF2rj@WvORnGJ_dCjrS7A1wuFZvzcJz}SE z;>s)h@gq%_cC`*Z3iq^E>(lzVe3%ti7dPCQH4Vh*Vj{8Ve@-T`M$3HsHeOtyDnAz#@M$%_TjmA3uhm0}*>dL&lM3Zj4dblVKiol!E1uv?Cg@x62P z{5WZu;XtV!_;=L>;jM&j*y^{Y^VV!%3KM+p*;2JS-?v`p_j^Yy{k45AZ)&IC1dnI< zn`0jzwuN5JeTsLtHRs%f)%LR50dTu(CA9T>QQYnQQf~EH&3uwO3HgYn(5UVCy}EtW z+`Q*O@eF^eP<#&RcwOxVzse4;>sAVu%T-4}FJCWJc^)|RW>&m_XZ(g^r;j(HPp=MS^`x$nZMH{LO3LgHpx+BrOT@CY* z?oZe8blfd=j@_#IBYU4dt4C4jui+9}u#5DME;E16A-HdD134^|$dfMjJzTENOV#(; zZH-%Wwf9HCsKn~sn(@|I2F0YVxG6ie&yG>2oZGej&Q}zVjdsx^Ri0bh)ruQu`p?q7 zpj=#C_`zRD6YkiHJ!$_%q~}g;z(%t#KXpgm-KP$fTU7 z%Q|3o%NIJyW%w$dlUFQsCtSppGhnweq3Uq;WD=l2P%^=f}Q9`n|Ba8;ap)|Ko{6)L^v9s#57GA?@V ztnk9gVDp`NeB_7Mf;B!XBi29W=xycX_6#S@(H{K%&B}L!J)_EgDKBNm9K`oNm=^1G zw)5|gExKB-qVx{Ucxl^Pp9rPsFV8`Bc?^ITi}cZkSL0r~>JA@{%8q>{yOVg_o&A5$ za^llt5KABZVkMhbp1RMbd|}L*!}Yz!SLcK`&N37{JDZGtz=vDV#JkisbnW`weA3tX zalg5YbLDl0=@!hWun}|i1MWS3`nRiZs#*}3kZi4Z;CGvij>2cFI>!34iO*hrJsh^3 zA#GOLVDsZ^9b>P!lJ{+~4@4{Rd^>+Gox^2o$BF<_BW>R?;rrG-ZGHXK_BX?H!M!ls zR!Nj4x0c5g)ceME9=UR;9sCg@p2Zht_r9#J=jmo*EHfFapQo|H>^lgj7!^Bjxjf$S z>Tm`9861I&*Qy}Yw%jg~U{qElY@XV7dz-FD@gcgIqN&r<&=;S5skHks*qDEV6Y0_w zo;#nBT}#_`-BXs+bQKR_R9LETk*uTWYewoGD*yQ8MgC=AP9k!YK0Pv6Z|k&nO6TMo zjmsPzRrr+UHK}Swb){yz6P{V(q~jjhi`7Nh8U7nCdX35Un zr0aP4@WT|M}irTh0p+Y^B zyQcHY`#~Rks}_>)Memi*lH5*J^Pt*-OMqQ{_q~JBb<*na%Dl zF2~O{u=no=uMSNtI*<4rbT!Y*+*Oph^0X>+ewps;V}D-k&UJXXRm*>bi03IJ^)6bR zTepAJLz${RXe>W2_51N^viI}#Uf1p(#M-iBumpqg7#Iiy#7-VUh%XgtyZNi8B z>s(zh)w;F(uxdkF_ua7l+*>EB+fjUQP~BF?lj*YC)#k=)n-~df?>CAvG1u6L`d>6fzHS5Jf zXohyRYKBKtcb8MNX3oh&s}Jnr+RNh0-W*_9?J?#W{@m>72jucwew3SJwKL+S%03;{ z`3j5*Dnp`J@X4tc{xo%cL3!fwp6w~YSLdU$d+?2 zeLn8wW-eK#Hq8?C@edEOj+;T4oO_J?eLf_c_;?af2o}|G0hzN2^DnH>C0i)y%eSY5 z4;nCk_aE&2NwexY&@GDoAR3t3G)^NW%e4%01*QiEGV4Vk&-UVDGviQ9M) zFXBbK#)<_@mr^QouYwlKG-m_z@PDSGt0ue8gz?AbAAr1?pTs@M9ye6NMf2V7PY?B9M64! zpzGo=nz4u226HyF6#vww8$TU%>TsByVW`IMu6^j=jbM7o?-|eozLMrYs^J8zeqUa$ zaq+9PCLDr(lNY(UlU{?$^$?CVPKD#agJU7+1r$Zwac=}us@>VTM8HYFf-CY8-GYsQ z_QF77_eK>n?(7hthMGB?F~-E&LC&#%TU&}KLmVjf8ZH-{Ozn!_a?DdF7LPUl2$!+< z881>-W#X)vgg#hz)L7~nLiuQMWL@?1MwJl=&I9h?}rQ zM($*j_?d6UgeL*b?3mokpv!1Xx+qK2TcbZ`6B_58Kb~WSFa_ zey5-PR(I0ozE85lbv5d4^G`T`@Jn+xksHI2_0$$=4!b5t%RO~FE3BRu>UU%{4qUrt z>{bbLUk#Gov{@EvIV+soe5kO;bfaN?;M|JSvP!)Db6c;dzRjHVp|xBkSdFN@b#u?X z=bm=Ty!qh1wh~jh?wiTQFxThOxy4pBY+U!YI^W0kRlTfI+c^#}HA{MbJMESg-173e zsIy@5nT9B3i^*NLn9=duqIEZ#vDyxT^9&C$+708yIVkGHW9m7(23L_4w>@3*o9|o~ zPSf1y^pU&8Z4#f1t<`Pyvp1Xj)kZWOYDUrd5jdmkq0=U>{e8Ya9I6Xv{I)B_-haH$ zr}Jsw2I^#p6pH52;XKlR=c^!lJ?!+B%%W76!WUptmn`@^4zfJ%@DafF|zf4bh>+(^;+rnobX7_ zvv4;^8_4@@$xCBZf`~a8hWKn9?#b>tZ5A_Y>W4G*8V{2Bc$ny`8mZwiCu-R507b>x z>)y}##8^(jCf2|m4N40}yOvPDePJ(&nqut7CO-J~d*W-$QMc}nopC#M-Z|_nYhc~a znO&k1vy69TwZi9rbh;YPljr-rns3u7Hk-&(?W0Dw?}>hUPi*r&S&gpU$-Z{1dJVOk zw!e+`7sdJbPB)CBVi;T6#6K@*)4}B<_%tG^vKp1ZF+9q-FW>j6AGW90i)z{U@s3Zi z?(E^37EdoZIm%LTn*|$B&f7)nj7$4;94y=CTA0c%T3sf8gE4nY+g=QIub5i271|9H z;$%QKgN2CTFqvD*NttZC=khTfu0va_M5VID@SQxAbU08p!@avL`R1~7z=}54nUY~$ znQQmTu|iQz>C6sB_th@jD(iXr90mGgVbm7uj<$aG4;Od6SG)Ts8i1Hto7MGRB&Szs z&(5wA>XmtaoHrY<-aY;2+uY;JQF~n~MU3-}>#l{EdkI~dfv7s~m5jp0P0XH~Elb03 zr47fbi$}V;P}IG(yi})4(VfLjEXV8XAyjAA1$ghnPJa}KrYSDRjxl7@mC~#02WO8x zmu_u)rOiu+=)NvC-XjayY^G=BIj!>c`bw;Sy>TMq^4_)PVpC{htLsk=)B9!E!6D$< zn@kszUFFRd6XUe{q~|-X+C3_bZibJZdGeGgDyNHo z_0_ZS`~B+DW@Vjy+hZi#$L=ybI;X=cxwuhF9P-J7xtGlS+$gvE=c8nK`kJ4L**ZXM zY2K5%D9ghtJ8O$>_cpEwU8aZXO-GPj8*;g9r-$MAebA=)+qfF_m@iI|bwg@?Xu7kr z2|iYRn7>Qm*-@}FFXnC&ZM<0=_(S)9)IRUK$>nS_S|5UVF*NAic9M(dX;ZXRH)t8z z$zWm)Rzo_sg$OfcH=NNOV`W;mLp|8z!R`XPnQ~F}H_;yN8cjF!j{TvVO@sArHxczt zh+A98%9GntDMWxfU^_b7gVsH0pi^YRd@}7LVg9T~%>aWGIR+Q#NXyWq7|Gs*6xv-P$PU zhAVcB!tk2BZ<9UtZ%KNYpZ6+6-r)XdUW1wHMi6%C*m#_@m)Np$`oP9!pMAbh7Va`&$?=s((eagDQpMKo`3#?r#<8&Yty~AumF1&9AfF+@{WX93F4w-Rtzr zZu&wy+7ugieNb`mB3-bwePynbXRQaf$^IRFM&s7LOqIoP^bY*RY~-)vxjtX8&m5)A zu6?TUGbkNxbL@z*e4golEyd<+kNv`a#;fA=ybs#NJv-dz-8(zzTYr=5c({vkm`3ia zYDQ_&`44y>_Z!2z;f+>o~b5yxOPp*?joexrn{Sm(o*PrB;Uf zs@g?LSY2|qe3T2CH?cAww+|n8$vQp^*z={2XZCUX4%0omp9tH3#tZL0LEccL+&e3H zG(0D_f##2HF?~+U*Y2aI^YBE|`c$&XfhO(zX^yrZ9j3$O;#s|wu|qhg&gAw$ZbBcl z1~;7?;%2{WN9p;jzl<8ibfC8t&V zj+&-XjK%gb$DtD}+mv00tEX*DSI5)Nm^qhip|z{fWwS+pPHvasJ$<{}xZMSGb~*$ zY%#Ewfm+{(Z~Z)HzB`IIj|LztgoN`Mg2U7bu*e4jXCkoK7N4a z&ArcvJ55Nc*R$D-7o~q0();@9D%+!R>-hV)_PLUO)A+uIdruWQf(#!I8@ zK&(Os;WIx1O(U&p2b%kWi8WF8(`;&HpJtQ{-`X%%glVKhbvQhn4a!U)P-s{B&7ZC| zPgbaO44dz+sGPE()dguXI1sJ2!&Glhk-yP7PS1*pE1yBo@vl_AEq zsh{qDMQ50KekfFf4Ez(14|Z}Wu$3IN)hT%_FT3mRdbm!)EE{5c{7lAb>aJ+LF%6|I z&x6q&i4 z%j?tI%#ZGb4o1y=@i_0a<#V#+-Fd^8tEef;X9d+0?T?OT2WWaYA0O!QEW=e%`NLK6 zVWy%W^D%!OjQSiu@UxmtxmO%c%gyV|*2?0wOJnugY+tiQTdkeS>YQ#gOS@hDx$L5U z#u?0&#bzEi{x&}iZ>#;(dNc=h@ln~E7#DEk!)98&{8G2-`L4LcvAz?+-<*#2X)-FK zxvh&zUrx8}oYlJh(NNr(gX+239(UvXaWmTGWum|7ZFKTiqx|xo(@W>m%ai8*2HT_T zGFoGpG+VKP)9d-F!}nWT&Nsu;8djozaW7|?b)UR@EskDhOX7s0*?!>F#X z<4lHw^0LVNs~wfvbw{AMSNpJV+|tHTdEfh&owtp2KZq}S98J#7esfBX>1?Kt8l~OD z+pW3vw8J?)*2m*=*-Zplt=H=ViBwfxsvtf!mpH_Hzrr|PLxG_UEe{f_wLHh=nnPf6P~ZE z(1&H^Yq}F9GbeWF=E&;JGtNgl?XviIjv4~hd_l!w_jy$wp zQ8yYTth0llad&5Acjd?mu7TBmUYXTy651(K;xPVp=w#ihGp^CX*}=5xEWb)!rXSUC z{`uL27!8LiXyX6;pMS3@@IRImc)fmC6!>9SRy|Q75D!Q6L(5HIH*9g165OM8s35r+J7~pl&8aQ#x0Ty!Lq1AI>J&qNKgH{4 z>-pBiD`=}{X#_ltXlIQ`k&bC+ZFHX*NXXylMu*g}T52MmntEq#^(=}|OLLuBCdz4U z7m?x>>BPhT>u*JW|GDemWn#7I6@}^8E7Agq#6(Rx)o%BBBq-4!rE*X9?PVE)NF2nK zN=JUOVXA{Jqch^^TATPC;???eyZ?R6X*;4C;+3HVJks@Sr)7A7IA3WKU(vYHV8%Dd zRSYE5^_J@=jim^}gkV;gXuSx5k)Q9SiS4@;Fk%@HX@;MFPCY?1#A}Md9JHL-n5EXW z%xqwPeUC0Su+-B;v@^PB0&H_Nvn3-Zr5n9TV$`skou`HFC*MA4qQVPgl#C{NlWg@ay4iVhv$J%BL=x*L+1jlY z>wOzXid`IkcYaiAdRuW*R4ZK2C5rt0ru6Mi6uFwH)IgN3rzkG6r_LH5Tg-@Q1{R0& zh@>DIS4=&fr8s27bl`T-CLRrMWK9Frp*+PAaG`-a0lxITh722sPU{v%CHS9%V5?OS z6)Wjc7)*yM*D$zmxgv_ipaa4n7O-Z}cz#hUPD{6cL~hL>KH$;i{vvEC+y z#ZyD~*@kw~e$%lR$~KC6yG257>z))={yiR$wC^ZFT-va8{eQhqO{Y=FFD~IvLt~dU z{_#qq9Cu5o9hBouYXt<+R(vf`Gr^6Ay~=`0i_#jH?=oDz+3jJSmdpV|C) z@vMDAjCIGZbOTG5OtFcU*w-qIi!!wHC|ohn06&Lw?hNEIF-&d~Z!o~mV-_nj!{kJx zd9DAn>&+I4@ols8YUk35(xFEB3OSaOjuvUF z15xap=Dj^;>DKGM$3vUkEZ*O%w^O~KttT9ncIt*`%1C4B0~6mcGV=5h@jNyy97r}^ zkdD`iiiuhIMkuBTQ|jlIEXJS^v9hQ!U>CQm4J}j5K0k2@x4uPDSQUD z^6-g!|BU8$Ui#PN7;lS{M%do%i98t>agq|jMFi*F3mwatVq(AW{ihk^6EA2`8HLJC5c9U*@`oBrL z%+tx=y?v4qPPw`Uw)`6#NOb3dTOpkht;a)8iU3IKbm_?QBNy0X9z>;owAa1FK_5W* zzmJpl8t;>Lh95wJN}FJB2UBe=J(zD<+(i%+cE7iT1sU*CiJ z7%F|zHx{5g2=Y_10aR`ZY zw7n^F)EpPX$9)Nmc&=7?aAk(a56eoq*vmCV2&M@7et?I(zF^LOMT+Inlb$r$58}tq z^3=3jIkwU-Zut7@uTSRbgf>y;g}uL?cw9gGUG-V}gcw<=yW4aX7XAtN$i_i+4MOVP z>~4@oQSF8Gfilxt>r`b3GK-H?E!W}9)fFov+r~(zP@}k95;wF)R#Er<$@vT)ke2w) zVIb{c6(rIgCVD1+`yv^6IJwy^9eEo4KGXNNJ|3esy?Q~?|4xgf>;1-0AO8=3(WMj1 zux7Lqt<$O>V5;vRD|0${&^$UBO|*fWcwQ7?jWcaMrasCepW>1PP^0rG`$SNhMV>Z+ zR*ih9kew$8xMgXDa(8gOC#}LwVPPAp2L>H^h;CK|O{|Q6X*3e1cG*oMs4lXC-RC>U z6qf)Vpc1eftu>w^D6A)hKX%aLH!||NBqS{P_LGdO8bzAW>&@tO8h&Q86G{=VtA}K{ zZ^UrH-N8m_&o`xG%6WE%Ez{BE&>^AfsLu@vSm)srQX=m!WV_p_ABU4ZUgPv?wN`&W z-Dia*eNjMvV_l;^zA4%Mg_Go1k{DfL%*~|B07`0V84s+C>P}Fi_&h~8XN?!kkPgRS zjiY)OB^&UWgBja|-EPIUY684u4&xdy1H#d3k222POIqm4r7>V;>6{@e1BL?D^y|Cz zxW%;L5rHF+Ci9k6Fy$cg7C~CQi6X0|@?8wnFcTGjY3p^pZU18C-d4WYM*5uoZ_rvl z@Bbga&`H}%uMj9>j}hc{bPx-r#}sgPw8h{>1Qzom%{?!G4VRM+bF`& z$@{VKzs90SJ3afcnLqhn*N@wqYz&Ii;@JOS@!{2Gu)YItv?W-;Ajv@BFH=KQYAEh%ROQ~K~B|@I<@+#ik zd}WhGj$4)?FbnCCGVltbtin>Ed+Qv8_q#3}o&CqoTtN88&fK4TT)yicU-O%PP&pod zBxe1-=h*9_KY#nlE0T$qrC-|=iG0`I&b;6FvtLmyT{=w_Fe5{el8iAU0qIChxOlFa zf)H5Fdc=H9Lna0*F|+-^@RepI^Hmm=h7XAW)69X?g~}?-%-U}cc(y0BLd?pnAky&2 z8X&|y1mzO=OJP1cuA<~-;%dt}Y9~;CURO{Vz|wWtF>WcW7vXFqHgE;{$TcA$mHzAN zTZ-P^I-UI$_v3q>eCF5WD)|+(8%`;Fz)&Sl`0^#C4h+6=I;1i5=O?Dsk5yJ|u6F(p zK2eeUK78!{jZ?Tx5-qH7y#=U1m)G?p4}V%ZS2ki-o=dR_y|2IgpWtD%Cu?T9|?E z0Ar!ah#e}iMahe!oSf{A-;)S`FlJ<91)^|2<5}PKV8LoV9Kn@g0Wb%m0Cr=2EgP_~ zo+z+>^0{R4Ytzzi^5oZcd%OL{fRUa5#NnUZ_{VqTB#vuwkwfFI@{^n%S=aEbk#+)_8K)b%hyd~`e4iIgoSJ!!8A61lE^ zw0yeP8*8TdrK`;7aAgDt2a(+7`?*4~k%$@rn+=~gbp=dd@TVop{K1A*=X9sIz-WM& z8j}LhkXtLqV8U$KP0O>Aq9y#s>)RB+xU^J|_I}od)wdh%eEvRQJx5nvj~k-HRS~wi#itWAh~JuYW|dhX!8IIe>75*ms) zYs5O*t<_TeGUH3VrG#0KTLC;1gi!3*ZIT!yEpU5`b%}iPdVocQfY~o#CI^=Kw22~` z!+KaVAD;z6tWqpqnNDB@eLgW9j-afuU^41S9Mk8M=+`#?h3%7x*XND@=83${`Wc@Q zcH5WbAu!{jR;}QFXgHxY^@FJg%+g$K89lDHez>3p8fJ!ydVr!%4MFU%oHenwTw6Zd zn7j;0TUvTHaFgxhbkM;CzjUDY7f7y9i?4lu5wQ9^B?*80pTvEepIGq6S4bQd+Bq#q z_#02YxcHLC7dHCjLdC$Bv45F*ms{mV(QFm*biDD!)$i1Qt88z`R@E_MTf@gVF4-D5 z!3X9p$3XN|hqTS5H7#Vx{A44a&*4rdS?#~_IN$Q4!fv8W4@gYAG>*AxJ{N%4aK3y! z6I_n~jg+p2IQhMf)?cUkzg@?VxiNh>9NELg4xS!^lTU|rzw+^7IE-*Cq+jLo{0K|_ zqm5dxiwZA)r2a1t^^?ne{Zc>nWQ_gxzr+z)x`BiXb~}W4v@G3VdCNPs2}@hwl(Zp0 z2#W5xCXUJRGP2E*v0Ui}0}<7lKTywG{~QEmX80-Da-tz^itxP=$;?XRqk%5F)PDW& z^+qiSNZj?U8xWs1q>%r|SC0H~Rt=veeti}@2+qrYfa}`usmS))_W84re9yn~XP-9u zb`Q>Y7*(=edi<41UI<0s{PEi^@;*%ZnxA|}=99_~c17kmLg+GcgJ>FnkOX#m4@ObI znKc@|Z-W^->S0c7$-(KIn6!`c1Y{>dRZG7Y%kd>EQa^SY>wSQ1 zf+Ev@&@5v4#4q03b@n*IojJ@IOMBT)%gpz!z*)}^`U4AX#2!;7+ff#5%uco=`fodi zY3DWZc5giPRJO&gL=gA+v^+`Ei14iUl)v93%Ya*r&+YJ`%@5!m!%mExd($7yv(zu zu~-J2G!Q5rz20|C$tM9b=4<#2l=WiW^FF=w+ zgz@nuTpr7Y4Bm(ncqRoIPX=8gaPk>BmM{Ree2G8fql{DDaQKjl^j1JBkr{TQr^Qr% z^u=XAdC%Xz{|~SG%jG7h_buPp{TtW&{O9dqAC(c?!fnMFE5}nt8paZm7Di~T$jCyM zi<|{oI!NE$wj*ZgS!6{U3*T{yak+%;q|A)%{z;gHH6<-v{qpgO%sj3a*WJAeKoF-g z?#%;P7&4>k^Sp1nnSHxS(LUrO-Ix7;Z2k0e@(;G^b5iPm;~Du^Jlk0FwHEI_h|S)C zxbNe6WJcw39r0tW14%4tL(KAyWgX~>eZhg8d;kkAI5BiWr3S|7$0_b)zNg4~$XfEVUApV}+3p8N^fjQbP1)anzWpr4>yHPkgdO9Kq1iSb}q%a}< zk7$TC0wX+X++e8@XAE^VUo})mTr3?;EhI;*36X3%!3{-U*`h*;sNCRf(n%UHW@}yZ84D*C0RcJH*MCpASfEwW)74#I(WY=i970bDKvxpy5u% z3H$O&vGhL|F5^WZ&o|-cd>JpYzi^oRCx1gdK8}BVQQt1R?H-8aj2^6bg&HsfvCMbL zD%Q#ujh3p(RSTGjm+S2qM^L&^IVHh@Ezhi6-KRbXGDxy0hzj+8UcQD{e{@7>UjO}vcC+$TodGEqx!Hn&z@ktDU4B4A2cAn|d9J4DQ@L2QTcWO#j^ z)`6h9yQS!$)EGV^j-hfeIQ%=)MM@S{nH&ccz^GgL!^^OaHdCxOR;+hl|3JF5(;I86 zck*-6$8efO5eQm;*xz_%B2G<9P^=YJ2U&7NS3tn{ZijZ6xlL_gmVIK$3W1mG4BF9V z4;To>Tb?ExuY_lWaP%tm&-~`G2q9j+PoN*eYcWWOl*3!--b4Zmfo0wDKAc}6s*s3; zi%0HYJC4wX>pR)snta1b$o}SEKL3jkdOX&qZ^+Z`TYFA_yRWZHI=!zsr8f@}6NwpO zk&RB-bYOu#4=*Xgk5>cCYduXxd2*TUuIbpl+UaK0q*ZVX9d5r*%<`7p(yHj5z~MA5 z?-mM`Ao#YobkNRLwzt{WSJI#M{&^NH{lSYeSDg|hdb{px<46-rx+jv$aP6`3!kZFl zO`W-D@9dI)`W$2gznE^TVyYuSupYJ-#->Pb(}v~dYWS{^UaRjzQ}yG)lbF8%**;s@ zJ{tRHJVBpZS!1uDogR=8X{=H2GjdOd(|-JFg|pJjQ*oY}uV-o+{CIS@eGkL+U^ZOb zq@Rt#2pNp*zCM@0R@VDiU#Bzw!fV?vpG-PEp4V}Iyq&`!qeB<YyVtP7zva24H|98dHg9NN%{tOx2%Vp=&Z?7Zjyq^@IJ zmbS}E(11~5wmUFI@^XXZ0>Un%i_Jg~6y%BFg)#@E-_|Khx>-KJkV@ag84pZvvpr)xN>cRCg`4VK{e$<7Z;jDX;6 zTxlfdoUmlH(x}j2+S#l`m(X(bTjs4WZ38~F96>axXM*RR4KJ`aIB~-%3NW`W_^TFw z%ZI}R-MC2stI}@DAG`VGj8Vg`fAz&**e8{7bc_Bs4Z<}VL`6Aqc)bEaa}Q7VIGA=$ zp2Q}(U`_S~voP>d^Gk6+FoJt74+4^Q)O8T5$0+ zKJ{#WT6$A!3(RtDD8D?`T8Q^X> zV%kK7%=@gaud9CFixWdGC~x_Hisdxi1|H%4^|)sWMQB9kh`9)lETW;*MXZWEZD|iH z1*J+UJV+L?tkD1kbFH>;iclk@`>kNhzZdBVRmNnY_&FOmL<)Aq~F{VQ+V>e(*PGiIQF?2`Lk`9N%m znCvS-HLI0Iars%5;9&=kETrbnst8yh6Z-}RT+>Y{&mpD)Mm z(>awB?ji*G5g1toEsac(plw{CylXD{{8Zj{Q+<`O^Gcium+=fNVA8&v8bAK;^$p3( zXnRB7Ly4;WdIE6h20j3mGnS=?ga?=Pvc2>R8bsf`=d0h*h~%$-zT9g+=6c`o$Ip$j zYICU&u*6%pN=327fK$!PSxcA=d!tzHxfKjQMMg8L@(}Sbk@3PJM)1}Bgg^D=7l-6O zyp1nw|A?pG9JHl3ms?+5R}<5C?5z&5?IXUrfXIrqP^C`O*0AEj^Dqd;$0*jhjnfAH2{8iAbt;*Njv=l&VZdzlqFD;1{48p=Ft z#kd@wnx&OK3YOp(NoVsCT{J373hTJC)(}+29PELqvzIgERTQ`vY_lr4yvTYQ(CQIfHzwfuK8iZNxdn7_@ zuHNmYOzfbsJ*~W;S98puM7U+v>O7jvMe+oZFn(7q4u~)emS|I0*6%opQrUiA9hCUV z3;JAu_B@u0u#5m$U@z`a06ejL%lYQ-E41{;n#q#_5W7lR9GGF8XCq-MxAy5d_F=UN2G-R7lbGS$JXcJy5A zOzk3btKOdwiR&W;uLPX>IM4cVcj@0rVyHiV&nfd1>6gE{neY1*bPQn?T3WTxU`lPl zNf@IjtE6s+ZxK_TNtt3Ti%g?roBWp9=gHDO-cr|v68H5QhHTlcZ(*nsXQL+#|I%S@ zzdEJ9WG%hgf4mQE(yGjwK`k1l^}0dSV00M+M?zD4#upF}-|KmhxX+C{`n8krCs*#Mp$V(7g^U&Hm$#Jsh?pH< z#4KPj)hcUa^vlAxjq;3H9b*Z55hpyM3Zu?xm>2uAk$UFR z=N!Km@nx-vgQZmsWh-jwN$h3{!q*sSIkT{@6zg%Ym>rL9&|3FE5aSUXuSS@Ec5C{z;K_gjzXX%9tMj-oN>+}8UUpgTq{^$Uc32G~}yUwmUG>g-4 z?i?*$fvxS(WMCK>pfFtR@BByCNC~PyHv#o}Wyu4}{u%$~5WPR`wH?1>MB;Qk@IvT# zZR9K`f|&;MD0J!5Xrtfno#vZHQxXUiio1V4_x|2UvCj?qgSSlS?-65vf8(uQ@46e? zS;B2YYEt@nHafwYAbs7A{NOahk0|i3uIFdI%by%t>Wh2bsNS){%A$x~sIzLL+Zt*N zyjt1;pJVQnYYo+KOAj`tDF)%8Mk#D0OpOW83Cc>^PySf~_`~5Ge*FKJXWjPkTlN80 zu~kSd@cv`tolG(b30k=0fr=Z4fh{i&422L&W0Q6uw($| zp+4-W+~QmPsUNO!uMh2QQF0;v|G=hUT9|E;*et@EUhAd|-GB>=H)pene8eTzKMw{g z(vqAjgek<9euk)66UODIeT`cDhlk^7F7Uu6p@;j1O;C;>S8p=63NWHp& zVwehCZ7I9!@9R)tWaP2r8c05EgycQ@@$NILg8dX9#lLWE&*S+_zeAp_!~S_RQ}SX} zuiNkal-2twL8LB!Ci-$q^d@;1V9{X9tia|=-OeKmV}f_Og~5+rBqj)py8hM;skGBt zwxcHbOu@hH*!J!Ct(i!!^%CK2IQ1;XEj!rhTAKutpN=xC?xT%QUb-Crq(nn79zVQ= z9RH-hp48#V@!8Mk`n7jCem08ECZLFs-Ih;pWW_`g8kFjP#1^IQ=@MgBF9#<&4$^NO zK-d7T`jbEX)m|Zu<1xp~$SR2=C&F68p|3B8Qon-TXs1h!&KF1iTSxd)AMir9?dOmA ztevuIVF#&-h15bS7e}iccrcY30w0^Yv>IV)pqOSrOA}I01Lf{W@Fo3K*7wupU;gUP zxc~pqUqN1fDSlJ$5RjJ!^orhE0&X)CU z4ru5z1X~LuMzE$v?({@XH>Rf$KL_{E{Myf)>1~>;`gweuTasJ`RIBw-DyB3Hzmw1xu3qbMWCD z_j`Ev{c8(}T)y$LmhEd}hzKyye(MvmjTn*hJU&o#0|bUTJgeB}cCyin!Sr(64(nag zYeatZJ_jc4Ed7t2{peq~wcW4hGkC6-A+vA8U0;`*{G*3TzjcJ~d1F%ab>Pw0PJj4+ z3aKrW@gq!)DR|0}*~Q{LwTVv@W3$^wygQ6Vh1P=N!Wyk3Ag+$0p?Q0wqfbyQ{hyY8 z_dl(XNS17Z$ z7J>te6n#r5(@IwAvKEos0~G0J6f2m2Hq#eIEwTok(|+nYXN>7G29#(EeB?M~jfk6= zu_7Tl{nnB0m9&!s;8K2fUhVYd_)3B~mHMCSpLw+O>$Cr8{`#@e&m{=iNczn$H}NMA zl=}G8|63;k;qi9ep-dBc4lO;N0yWmd>j%abCRqSUK(@b=NbLrR1v5=%BgSf?RBOn2 zf3+6_l;|xY%o=-}+Q31UIiu=vYGzNb*osOhdCYl@^`t~BL5x#0d{!>Tv4_zPoR+`MSU;3t>IUs#b(#iRRlH2Y1Q=XV0H3R`2{b;QFIbfE|=eyrIvOju*6vc*B zksxJQkTXF{GeoU`AO>Vg2)tNf!&=4He~JQ6vF587aiV!zq?u4ljG*JNe7}Xc81l&1 zS~7H^0kzmv&M~ys#dDyx2|5@86SLvkHgYNB@^7YtcO}b|X(-e_PRa>k@Q85N>&o|? zF03COAo**4PV!@ueJ!egXNk@K>9geTTEb7i_8Pgi*OTBs^Wh@J_s7S*?WH~te`4jL z#(avguwHs>st7ZNTh-M}w2KWzlAx|DYGy;IMt<}HuB>q0Xjnwph(_0~vfF7aeQl!I z+6d(QM17-``p!n#&BfI-GQ6G7laUpQ!bygVhF$-x#JP~Vudv7O zzq%-|_djO0APxQJxb^LSKW=^de?3mG&`98AkC%FUQ)^$`DeLL|+;lltJ+?-bZ86?_ z=T0tjD0m#BXt`ztC%vw(l-E)J(e*{Yb4ts#^}o+JpV}6bzt1Q*&#^}Odud|}uTUK6;m@3YfB&D)UmooL ztLq5-|5sG~|M?YFL7vinKk44jcYsiZgVaM|&=`X$oaLlY4}r$Hu5QgxCw;OtQ z1$e&xx@Z2L-q~2`+?*;_e^w$ZO=Y4?;H6WSD;IuxyXwX7I`f4e zf9gN|zxcKe7(fgGn=tJLaL`Xc1WF9}j(Kl@1hWKuAbWrh8RLpe`Ux1D$^kyS`UfD* zfpXa+pEKfBM<)<~t379M05I^OfPdX=ViTsg&TE$&P=V3|<~kQee+L&nHouaa;Menw zt4;&C0&GG{CY=HMLM6h0qrSHXKz7|nm3Y8S*LqP2au-PE_5Qul+U3vRCJ@2ZUIX&F z$7Zh3Zi@Z&(oAl&0`?Br=C}RAmt6hrP2vH=(Oym~AfSK<_#V(#yiLm>fdQK%#fNU)llBe=GhIVIU;*d!wyu9jMIN^^zY0LHtFJbIFIpFZgeL|1B?CzU1v~ zN(^-B2jANSktt<>PcHo!e&i!pfZi_t3}7;Wcinep6PCH|%ghqczxt6d@Ri@XOgvN1sF(DEKeUy!2aLpe+P`{_HOOMYiYXm)`$}KbizUf1;aBFn{JDMFHbA?=TwZ zI_8^JCj-~_&YAz%^HOzk2Y@u-KY!%u_R51}g6lK~%t}uRzx0gE+51IXh2ZS<(4Twy zUrVBYBfCy}eTmS1)-UOA@VWgY=Ke;6?!aJBmv2=&oR21z-W(4yuIN zb5#a&&S1cB0FkrpW@O+F*lsf#aL;q@0HH*m5ClOLbW%Mt}J#fCUnCzE8K3)O)@P7}*yH=32j z%(u*jf4;rZJdIDe#DH4Q%@GulwG|VPS(B6u-sBmPVKz`}l{rcdu=Ik^q_S6L0Zwj)Ljjy_!d_*?K(M^Ln1FVD zFF5uYu9B=p@fqgILJ~?b4SZ|LC$=zsM%WC#e~BcOqghwyL)a@K6?8727nAu5pUrIA zw;$NP{WRM2nTyYZ657nhG;|o!Q$8{D7a!AtSW*G{#lc3ofYOog)$s(RW&%4Kk^s0( zxdCqZ_LX9wnft6xH1X{()^oAY0E8saOw_<>lP-O)sY9>bfIUaPZES$2!^mqQNw7Em zf8u3R&l7MbiF5M_m`&)L`2BQ6y>DfW7P*aGMF3t?L!JOV5SdG@*pl zg>N&P$?M?+xKTKS&E)IfvQuLqsw$Ape`7hmXAV%BQDY!oze%uENh$ymEMIz4MkAoZ z>kF+a5S0?$7<jWsaK80>D}x9ZJ?zNnSlj zHoCFLv4dRZC&4N#2Hm*JeteKiDtMak@YR{CEWvU#(KKt1mmcQ%M1vpwp}9#?ff=^1 z4ftBV0273GSA{XKOcdm03-XEPe=4)o=82Xj8?nO4v}%j5>j@x@gyr)@Ym<%ep^fN& zw8fHF*AuP(YB#6J1h$EI;bcj+dCApPHqM@-3DE3iBj(6GLFqA*1sK^?HeiL9KCrS! zhm*+x6J;*`F=f}~WvLev4F>7537`A3Ze99Iv!VJ6N9q0i?zI~W>*6(je+y+c(Eh9L zNGdSCb_eM4(!)zOra(tuJw%!7{=Fl?_6A&I1MK~{*7JVpucv~mUFJQ$+_+L_B+lR1 z^0Fzox@^cS>3rVv5SYNa*6ht%W`6i;R_292=3ItZw(^)_=_`});+Ll!=*D02wcJsS zL8z)6!`st5(e*T$+P7UOe~+D}zwNQAa{N#GHce*uZ5Qohr}uArrmCFu(@ss3S%2G= zW2^p@zp>FRt$KW3Nz8o%-gOR>zwCBW87-e^`VYJGLvD6`M$6R^(1~@;G4(t#@FX3~ z#tHJ5rxO$9m%c~`SSG+9!od~3jcyXNfeBFKWCF(WtZ<^G$}DMqe**Z$4_zVw6LL}> zUN#WBp>XYsU;e1Ovg??VLNcndPvsYvee8e%*Z4|R*Rc?H7ODpZ4R}>GIp&sVXP_v|s$qzwJtS?2P?wUsMKA6X0_-2CK@@`9v4F#9h9L z&rR~{dpF=Sn_704e=prs6|4#g)@%OKzVla-|FfQfDs=Md>&qEhlrA4z>($pLOXh3K z6G!L1f5}SEC6vR3<13%zFFLrhdBk6O#gAZDMve!T$NMZD35R^ao+@BA_E`R_@UY4N zp6eGTmFAC4ieA``48j#X$Smh{(3n!Z_-)rI^aB|f8^$g4JV1+a_ji#@n3UC z=;ZUnCX-qulo|v4lF`NIfAkY7046GIz||{*K{w;lp&a&#T;G_YLvlB-c38=zks|Mj=##yHbzP=ew zD(lb-36Am!M3?{mI~~g}-~2}$Lvwl_F~&x_dJ$dx1r{4x%T>?6J}m~!-|)VWjth_3 zN^n#d^i)ltXRQ%1atsvn7abwPY;?R*?dx5!brM7>| z!{bjKG?}fE7(%%8+!{e(B84d+a^y3B^6mSs0nWYVKhypfI9ovIe}yam3ExHE>l(K( z(JAZmQta|gD6xB_j@$+;b5vuRRaJ0Tj{gV?f8uX!`R`#lRRyGich`8n_VVS;|7<*$ zt{wMHyXJV#{o&z{{k4C{J&b`S!O|~15jF-0B%{Cm3>zC8W<7j~-*dMYG_79=>k ze{f%Zr|{$7;}cbt14cP5)0G?W%dh;3Nl4{-fB5>r@QMlXpY~;=Yi*g>%TLns*IsN| z{E1tyIQu$Ff3=5$8q>=Eq+Pw94J^}{*yO5fstSl}u668d-i-Jwo{+fL6-VQL(uYqI z=qhuh^-CX}<2V4Ig{$l`SDF7Q+s6USe_y|sSNYfPKk(vJ_I}`h%0KWw;>nS#|f_Z^$Ry$Z~(YpxI|U2brSvy7ud)5%)k#E zK26_qOkcRSdcCV~R}4qz-?IBH7pf}JzkV;j<)7cHANc$#*FW&za`OZKEw?}Lf8X-w zO9!C2i;fp_jf(@;#Z#f4zIzVbS0{$=xXfR0KjWu=;I^OfyI;8ZXZ-vZF8&$+FSsl2 z1e9|8Id3Fae8z6$#C>&VzhwVpmd(84R8jh|(W?(hGhg^C9(I1i!&l7rC;Ts)Ho4#M zX?fv?$G^-S*!LOsvY8i6;#aR9e+o&{7kp;_z)zfX!QQ|4f5T4GWOBb@-xux9Z`j8d z?_X_3#4p@8g`~k3{-T}5$BE-cHDewgQ|`bN3_k|+A3iTpIY7h8seJcrh8=*EnXN12 zGa07*RkvN?$|d?!Z>#MgPm-*p`5;CTm}2I7t0fLMd%%Cjvb z;B_5lWdKkBU0wV7(v^Drn`sjefPsOQU*})AtDax^k0bLs0$YPWP>R-7uzwBMU_5~$CKd+Np zU%UU8@%$@Z^-Iq`>hX6x9{ADQ|BOEAYfoQ2?|<1xdHw&6OH3|!_~OSG?uRZK@Z7I{ zCNQq~NXUQeMFdxUz3u@g*F5$!Piz-la_Q+0ozq|CtZwuZaN*Bk(TlIwVRp8=QXH=r ze7OC{e<%6kzvjQ6`7-{iJ~I?>Hd?s&1pe3}f8JQYO8?}ns9#^ZV7}+8!F6=@o2N(u zpuWy=s^I6SqH(D(_ccxFdW3tOv%Xa6>+imzy!NXv(!cyKzj*@z_~z@Cw@az&YYT9^ z^jW>+!cdpJ?-zaA24KLtK)?1;*&G<=wR<;Re}~8)mz|oIU2)R&@GGSau(pec8rV6l zUc@-pd1>ytFF;$@Jn3Hf-gy^z*FHF(1OLk7%wPNAbP9wkUpiv|Nds8U2G^Z@6tD#_ zO`0}20qE03L<;m8VBhF1UX0OobQi}9kr-|ha3ezup&Q%;AqM5DQu+evIYDQ}_W6Hp)Ec`WsYt2_cn#-`RCe=xe@ z8Av+r;&fM>?z2Sg4Np)oPC2CQyQpQ)VIcX$BKMBhvn|0GtqT#9eL%2t%Qv1;QV5o8 z|0TXO^5utX7#E6-vjS7td2O1KB@Of&UB7`ECR@6 z#oQcG$j#cKu4S{}AXi`!QIOx`u0T*GL<@D(h?AzvYO|_OS?tXnt&BkOxut7*I=z)J zx6?#on~Z`|7MdTtAM+FU5@KoZFy@iu75G#;D?o6-;c;|gSYNVEbHn;Qj5{Q@`JDw!8ge^J~KBhXeSeK8&f#(RjuyfKDTCwh|B;WvuoO#f-vbdKQ|A9Wsf zhlO~Qwx9s3*pY7jNpD%hu5OL&3+-86nb;2lk3PLmaBwJ~5&pt^)?qY+e9{RhjCy$I zOiJ^$;=jxX%bmH#sY8K{lC{@Ky>$d=y-#O)ai&LhH8#48e>++m$2(iM=d0H}Np@%R z32%1>;Gzl+*&s?uROkEXcQ_q5-bq1B0(aePPGoF?Q z$F0yaW#v)s^bUPT#X*_r<0boMvdrv50pZcYLod0t7A*`VW^_B)L`{H~if}S}yf|?u zda!*QQ-_^Ve2~j*EN}QMFM5c@=y@%e zsnBPntH^J;tg@c+X0aE+=SeUu?jTLBHr-a~i{c>-tOh|vQWc|QM!ad6Cg8FaikQV6^;1rkLh}$H+$B%`?-Ac%r+j*bvzVKaQ#|4^7%@2kd%mVV34+S4}%NoSGok}|(7nfypn zy0uA(f12dB)^P5Izy)(Ge1a2##6Zu@N{NZ8dvYZ!(yjsBDt97urB1U-I@c)kplK;$ zq)E%mOfXNTG`FENk2c35*I*{5+UIfn&{jZa6uG@QsQGXjzb`j0*$7V%A8SkJe1U#u zgX6Je5OtQBt-Mv-Zs0UF^SV}g2~1^a7Nu#rf3|bmalmyxbod^-gG*Z`Oj+n#oBEgR zN{nH!i`(Lm}tUc?~gwK=)fNnOwu*{DZ;U_!&p{ zI6MRvGn6_RHL`WCp}5bp zcv+{@2i$=U6Ah&KR>5o`&zPAhfEG<_WDe?OXRotz1IA7di+K5c&ln!7h4SdqXBMbP z+$#^OWIcF#fn=|siZjVEpz@(TiS!I-Txs8qw4#~(%%572c zh>vD&tO^;%Q5SUeBYStu!%jVZpqrz$H;P9ofeF~S2Vgo&D`n6^*ZLyCc~vQ+{75Xf z1n%$#0Y2BuM+1A-9(gf%C)dae!S>vZ&sJrsZdkH!l^j{EgP#1@R$QQ$yf<$F?07oE zTc5mMyFD}_TG#f24$5S?rLcc|f1YIf8Nx6q=C|S55hy9%&$Z5iv^UH~ZOPImPZzCV zrjdXX_&9>8&+dTlB%us7EeD>pu{6hPQ8by>a5r^J!a?46+WkK5`JE|wl54@8MQsLz z=c}ATG)%qe=AOO)bcgY(NZvPj*rp=C`;P82K2liVF`nn0*=(t ztg}Y2?x83~O_mDI9hSx3qTU=}W%q_~x>E#4SGuu@mfm*9u^BB->(&_|duZJj;&rP> zz|gCjuVJSs+K;e=NPOtGge8z%^CQ4bCs6hmdxHf!iTAkT6fKgwy$t+f4x8NHo?zve z&)hLD9JW7<=Z4ByS#lDhe@Gr%G4SY@+xOl_$kuoVM|P7`hRC8ZqUCB%U;fk8=N1<0 zove|J;1UJ^uM{*~n!RH8q5_QKQ+i6WkQ0@$Oc^cDK+0GZusaO~M79h{UUMRo7zXw} z+G0~qyxY1^df_eHFbA%qbaNWM0Kxox;t`sc;_7Te)$Tc)I5Q!te>=V0spEC;r`u}O zrOAeO+G917;@hEN+Gniy=c&lCVQGaTm9o-%UK=KyC1VXon#FF%y2VQzaMQH3IFU?M zlaqyVdkSP=-vhB#Cgz4~Q?+aI9i@7AJ!hiX7+iR(xzesZ*vhQWJXLA48)8apR~mMW zMNxJ7YhB=!y2=x*e@5QjZWukQo181U91|o<4Fm?1n-6E)%~G@tSe)!egRf*MS0~J@ z@UD?c4_wRN$>BU6a%J3aisUPq{pLYzH~whY%cIq4<<|85j^sW6yagUr;sw_;5L0pN z*wYWVg-iq6XWd*%tMu-K@55ZgG#t0vMo2C0oANlb3<}ICf4kc*@_gCLH@X`90$ntB zyUFP~ImJH6iya`y+T+SwN_Yx%%_;c6$-9X`o_#yrU3F9@o|P)8zEM3o>NTWK_(bh> z32~2+l|0NIrNcrJJUD_SvmnhK4zC|nKEKk@3d9$=Le(gJ>I=)M~@pM8a2ewza&3W_)3^8WTP z^B~PqHsO(K3o;m+4U6o}49i8YbxG;X9X;nnLXJ*4A-LrDV$L?upRI!o_H(Sl8QqQ! zlsK_WqT8M24Rx0s33#6P%K(&+9`7rJIfAG%RHMt8WDH z_NZmSw9c(hN!3)HF)wm4mo=6zciA3`_leB&My!Nywr>kYqc!EQq08BX-#ZVfRMr!! zf+{NIe_3#PPf+rwyqBB}p*!cktX)Us)3RIklTU0%CV5<8iEbFyn*U$?87wK ztPj=N)7SJ~S~6)SRO{fOXW_FU7Lvcd@fQ(CE4ntcaVvMKoy(UK>Wh(|$=n-KJ(E`G z$fvA5KV>OPjUXDOB|wEEXzt+l=18x%P1N23e~uyZaC{7#4ohiPoh9DN7wHFEH4jcJ z39T-_dYC9-uI9L4;q4EjW@gIyHCJbO22pzb6yJ+PthwQ>Dony@^`wsY~nLT{2DPFncj$w@{nP!FleY0o2fA9#cIw?bZ@w zOu^3Pqk%s5XFflEGVes=;HHyb{Ca$N!Q zCg(tmUlu}UNCKDWdV!K1%Df1Z9}H7Ke;iY{h((mCBrrEda`3I3I9Qrr9dtJXAv9r) zd%iexoy+W~Qz7?|7UPFQ~<89eP}0e+Lnjw!N_wyq)eVq!YCjpQPTj_#hlf8<%| zb?l>}tTa7G9Z?gpV$Qua-_q_Jq}M1Syw%hD=(Y}dm)2VOgrqmUPSR@Q^#UtH@JJlC zV$VJ9`exf{%|w36VNG(h#%WftT(ncmccCuKqo#!2s$YSEJ-vPr>Dg5IJ7R4!cw!^{ z6cUUn%YY;7%@P|-_XGP?j*tZze=Zugf}7@6%6F|5I{e|7+p|YqtIcD639{H5$FUz} zCx-){ZBLdN-cvQaarI?D;;R^x5xskN*Ac4vZQ@**%f76J8{;;2ew*OwAeV-ff}!9r zeP}&`b0lg+U1pJDA%K+SMXNpC-b-1w*K|&DM5P)(!g8i%kv-vd%1cNQe+yr%fE*RH zJBNwfi#gT1UV9|A z`t(7@9*g~1Gt7RRtun6#;VFe4?RM72WZ^=oEDkc7(B68$tR%+mRGEQ+gxu&QwdEi# zY}z3r^6xnABl%J9-Xsq5f0+_gTV}7%VCg-IRkc$vV&?U;bfBz57}Y2%%VHXPankF3qI3Zs?0CMqNEyS8Gr3~+i#3Q(eU)RQ;m)d zgm=#p5M_YbwCvs^YY>NXXmn3yTs)UdvBqn3A=~58o+jid@3IY}f2R__UE3+Cv%imF z2k=^zoyNMgLa@Lf?P4ZL%_$kLHDaSd$_$doHz7#+rg3_+`FH9x=~F8$RTH9TTlNk* z^a<=R>2BtPa@qMxe zdr6Fys`YHs1T77dAh!#|zeWHWzG$+b)&|Xv!Y+7LWvngkeQoNPM>My3+~wqT92;JQ z3GFNozA<6hc~U0fI|tb$$u!J0+lw$tI@?_c=%(E7^*PjKe{OAK&+!h|5zVvyyk@e0 zJV#G0-1s!5r3X%B`S^E7>0PCuVb zV!Ss&hAak5w*Fw-$Y|OFv-{f}M!=3%@2Q&_cpuN&{=9guilKWbFq1Gf?`(xixD;~2 zlbil9z*IC$f9B*5VtE0BzJK_QyvP07-OO_O(0PXtJR3=dZB44@uLITc%WAy~1k)%f zOUtlX?mg9<8{Ce3ocdW9dk^t;bP+Pj9PGhJPzg~)2B1OnT^@<$`sANdhV>q1lIkFD zLI{j1W##mpaZqr>T->>bsgZF*Vp>n!cr>l<*?QR4e?$s5((W3gjhQq#xy=2R4@!e<2CbsdRppsP5{=l0e}^uaO5ru3iri2>`}D3NV(2(Jb{?;f zz-Dm1d1yoc-tv7{EpjQ|wq%&=8DbCeWzQzFY(w<^d*MLZeFwTLVy&XE zfBmV{idPZJ3dNEz*0TwzcbrP{-r0J+K>Wk?4yIE?Ukv=}rB0d+JozyM@@yfN|UE89~wTQ3r@s&?ei#V4A(n z5|$Y4j&B{Q#NNo)6S|?=)8)DClcs63b-4hez^5mo>v~3&*ic$?4NeXHx(F=eLjjW* z`sg>Il}7hNS@n=}R8YXi%_5WMln5fqy@yB>m-9w(;Xt_r&sqlA-t?QBeS+Qde^(_7 zP?R_;SZd-lwV7?)C935R2-@LbvHUz(ccWzuxKr_N;Rq9c$bu`<8s&Y)#=4XGJ?78l z(`4bz)Nln0A&>Bb(dy0DonD#N?$zgVJ}ftQ8wmDF`}lL~n1rm{6{-oH4EUj?UP8l8C@hm-ZFN#%k69+{! zw{|5#ZS0>K{=u-KyA@@2Ggj=wL_$MUoYooYnF!8OkO$SF$1X#BOY0uDe{1}mc(+p? zBVR}qu!5_f9$T` z#{J=dxVISIX9*0QdF@5gf4x-*0%xJ0=j>)wm5xu8lx=}{`fruHg%6PiR3Q?Qs>7Ul zINm8EtsApU#v+`JoaZfQ-uG$LlhHu+J6coTO`aw*IiJXyl}bYQIvY+{NaP#(Snq+C zk^6FGdun_;z=r+_%e~Y;=jzegX9eL{0+7~jdzNWfbGy-S*LLODg}#DYVZagI%HC_gtK_3MVNs{I41b>zbh^Cp;ny-1e_8l3be~l%c6{s+Dbr*H zc|eLqt&GlW760Kx&(H4z3#IEaa;XFm`Kd%@wpTGlMXRgjS7N3(4%{;EJ>lct zu!jh$q>xNmuF2F42;^-wgv7qMlzQAdQN69H)U#XoCaAa-L(EZ-m-vBk!8p7RCDbUI zW+_^)sME$of1aCUU4gXbXyj8B+st5PgueNVf+I4vBZ(1>T4BA-OhVy?Q9J@h>GO^3x6Myu-fmD54{I>I!UmdE)UJ zbR$8#M1yy$9i*%LERQT1&1{FENr@=nbY%9Kc?Cg{e^N=3uU=ClQJ&kCr%L{oN=@WD zhXHGB{)X4{^ETr6_3?y6nDx6WoB6yQX#hQ_2&r?;1r8ZdkBS>_?vA?^a_S#PT_H16 zck0xifkuq4!Zh!_P~odbV7-L8NaGd800-%$D4H87vI4J8hNtZ5)p7Kw<;*{G&6~nx z#E@3hf5co+OBTb2@>3_v)3mf*xzafMcNX75UR;%iA+|})eI1hsFAYD9JCn@gwYCL! z1njCY&Pe38(NNTk6&~*g-P3n0JKf48f>e8M^isqLzkJRmUv!?uxmv247;HXUzv z^CqPF5m`p7J&WmyJJr#>U!n9)qVX<3$jnK*e}JPej{@({49dqd)JyBemaoqPTFMHGfuojGQgQmQ@r4?vP5nkr!L1xHgxwM5hJ9LBYYLahA1`q(ZEr0BKB-71j>VZ z!AuY6=C540>#>Uk-+Y@%LwFgS^42~)?oW~l^Fo(3>#Zg?l&Uz1Glh;UMbw%i$A&^m ze}4lM*Ral(-m}iJ@|p{->3cDhue!P;=Qn13Kc55v<4^Uv<(I;^u&EF-lSuc?Q)} zAG+fuP03C7-Ypo)z|S!1C6I*3f3L*OPWp*GP+y>zyP$k1#;P@{eRB&9ndZl+Cr6uF zJIEdxJ2v;vggEzJv6wu0RTE9zG!{0sIt)ncwmu#Ns@Qcy9ig}8LjgY8*xgg(+(}## zS7HlDLd}J$x3k<@qn|^WvLgXyDV}XQ!f4m~Z=M$C7 z4cn_F?(>9vDZ*k0TOWAuT>j{UwFDJ~NcCXf##vo&3!ww1s2?#>v}r&Jkj}xcKhGPqDF zklB$kQcXKgkFWwCwS(&mM;!gtDV#x95lKFow}d;~lv{6JT@j7Yq3l5&8e$P`3d`|J zL&66Z7?SMJHONt(h*-QiwL)pY>hquwv2wYPJ&=L_6OKMl(@{Rb&iS=|B4f_nL zB3d=&C%e4PZ^gN56g7v74)Tr9Q-qja_{zWpsVLgOg6>MV;Rox55n>I`|y;> z_k)b@*gKjvno^f%fBIUgK)fwAZd!dG5J}2pH&Rg#Nc9h3-9Nql(ZaZ{i$kRKv`Gu( zO+88UNL5-4QFXC6W{pDf&cGJ&NibaH5aX%;I)Qe+)(M&!|PQEcfL}=*qp) zhg6nO&SM$fqV@5k3fLaJ#u9xM#BMbd9!zC7D7IkJOU-9S_HtKDN?FQeNog7rs8P&! zBxgh8yr;7!rVoj?6{PZFJdlHJMVm~cpqE--x3Yi=#D_dfo|2nIm`dci1fqerzUSN1 z`qrX~t{Z!Mf2Qw`%$TE}J&&DKpn^P**XNN^o~xJ=TI?zkMKpv4TCD`Uv3hE6OGhQ` zvNR2>wjOp%TOjw<7t`xn^pUy6eBaF5?cvAAJ3qBxI%cpmil=2LpSaPSfVY-pVy1s` z5Qcs029K0-DcXHPM_!YkJ%N_Rz@D=2?6Kp$(pfj3e@M`hgqUrHqOWcCbm)i1`dG;x zfo6!(UipDKqGz0{gN0EBMLy6jWrnwvZ105h^f;e|iD`RZ2sS@_1^0~q+_hHVJ0}?A zlg=K`J5gj?-X==3@G=%uLNjXC?M9p*54+x}%k|cgb0&oFOPTMl&mB-Pw>?RVG15v3 z-n!#be}l}lpLi%OY`kNHvaG&`FmR7w;0)4LLGp*FEb&P0xWdfKriL`h1b%IX`IffCnADP0$(}F?xA&O ze;CGChiZRyFOT#-w8*`iyvDPK5eA<%f8I2Y&&+#FDlz7?!QEl%G)KTt!;fx?5D&eZ zD@|fANJm15V}zTunYi9l`1?5VUd}!Ygb2BnF6%1yAvHIE!KfEzhoX-`N-?~j@80lh zxIG>$FTX!AK%>)QXU<+jnS<}yMo%wZSn9*cf>}&oZbRy=Wn!~a3*}&=@TA4Be>|>Q zJoG0hf7Vkj(G(<-Vw!o+=|Q~EN|MmhbP7#iT)7X_x9?3SF$m(&h_?v8*XHvEKfAI3 zMtH8GkqvL)zFWL7t4urUL`p^kg^17u)`1_eyQ8hy&WQ)^G;> z7k@DlhLW_eDWU(caqy6`nQLHX2j|1DNB}dS;X%9^SC{pC`#iN!6aMd!s+>% z4JbUzt`{5(I>p)u=(d2a4FjN!uW>IQ-lU1P$yp)@1 z4zQaVB9x%z-qZ;AnFag1>4W^%#af2<&rXICrp?5a@RUSn5;kW+7+*If4f{{O*+)2WHTCQH&I=JF5h^}UwmZ5kPrOvh%~JcL?osT z@Wc#l!&`qQ_sM^W-OWjin|0^l`SiR^NQqmqoXtlvu-hk^fx=bU+#2rPwp&H+s`W$f zcXAHo%$1o;_T)oWMIpDEEleY)&>7cy9nF%aud^(oTFf)6cwk8NeTBXTySi&>?3=%ycU-;^`6Za`}vK zNVa~RFhjE>&){?1Wqjiv-0T!DUAJR88E!m9pN+8G9V%o&OH0h!Y}C+W-f9>RV;J?otfC<+CzY$ zT&12%bwviBf4BA=SFpTd8N_U5)CnEUo1>>%s_49~_WS7kJBb?^lnQ)ScKqBv|eNbxzCAMJR`FKW$=7#F^=ZbXjLL1apZi5g{-|HY7NH=dc=pB zr%@=($xf9dZJaP`2&_G10X9RiUHtrR9=nwYo{HZ!f0;R8kKAE+e-PSeToMXYG&khI z_4RCj5u5PqIJP*8qhI6qwy#nzLCcg-n@{rfwb)J*{%H5-jv$JJW`H?_!~jR)z~ zM<$WJ8(~OY5zlKK66RH@7fsJrwHR34jA-ztw~E6-nq2-!Pu`m38tk)!rLgC$efCfB)q2;zPJwOw zOL*5e`hf-{dLmm5D#VEFh15aDbw+Jau!EKlZ~CuICH9F7o{aHq4ykTD%vPw@Zb@bs zf1BIgeSKacqJVD9-=SnUI%XvPqbr+xaTbCJTxWmHHZqrmas{IGY=D$mI#K5N`aBIk z$6`U(aqdJFrIXMQZA>Uh!u~%{D}Nv*|HuFNfBo-3lmCe`asI`b4Eet}6TAM8%F6#A z!o>1#>cq_zA+!C&4sbngNwUO>CjY|&J^X?6vj3%2v=i)lCx6tF?4KSC`QuMc z056|xGAE(%lQ}GZRFZ&;f6rJ9FKjzDa}x3ZF!Z1ONq-0-ZJ9r%Pb#8F`D^D27J%n>Uv?Bei4A~){ZUz|ewM8$ zX%+6A*~|D*OD^1B3S0n~+056+dNFtM`~BGTk2AVI>T}iTs90GV3tH9zE+QlLDVMMa z{AVrvk2CKd_exVN#;=Ftlw47nf5$ve3f<#KOvdCx($b?p`eVYM$5UR|y7jLXssj2w z^PeD{;OQk<-!0HhTLlRV9Ej^>MiN=Z)<3hDhhZNc&^t~>9+AH=rhAq524EYs7bR77 zKq(TKa$c%&N>sq;m{nCi$4F4*r?H18+^nn%$X7^QK6@e9h8`=_+4fZU$UgE?&)K|3 zF_FbZQ_&A$Tt=d`C0BRsbx}@k@-DsxZnm7YB$9i`wHQ`bQWC)C+BV>Hqs%<^tYYa~c0yxfKKL%pZDmepy;>ydbL?n67%-e z*|V({DZP6AL$EORf6dj3BxuBtOS&NU`Xxn03sGP4CfCn)7{^Vj`9XXGm|y`|OJ^4Z z8c+3A!MA-I0^yES|3qDHVdctNIsfzqC~iN|O@E%_6*6zy?UF1cb!~sSn|m#d-ao?v z!8P)DIxM~Cj7pSg&ZvP1^m4TmgYR8yOs`9hN{e+HuJkLf%^R&rvyaBYDv^$M zAuXtp9Okw(#VoWANF?J>q^hKi-p&iE9-ZUS;m_i?r@0$k?Cn`jS3FLz2Z~el?~K^j z?R}VOAw020|@w>s@m zk`U>eeVyN2i0bu};CCN=%!?8YM&AeX{e4P}r{ZlhKnNSzeqdF&#>#_LflPJ7HB_B^ zmG2EACBH8hTnAW90MZWV?>1p{Plj*WE>isggQA2=e-Vy^VEFGwkoPl(pmy!^bj(*a zdcoth4Qg8?8j*X5ok1}n&nr$ej}mxgT(@}p0IgfhOC6A zsECVfjV=StU1D0##-8&n8j2blZgdvp9DxnC&PlzLocN8T_2jYLl8W2%@tN?`RVwlh z6mLiSe>I?ZKD(&^vzI&p=oC((i<#`JImY$Gre^E$61^g4hcnhx7BzUbuyE!0>G~%Z zcm=0*F06Rd$zwN#tvm@v9%e~JCVw3>aE$hWik2g7J2u_n~Fbro=s8>&D=A2%J z2nYhxH7bos6s7R70qh=XAyza}X~B5Vm^mB4(3v&&8Gk>)1U(RJ(e!xI$g@yeml18m zJAxY&J=2=8>sVfMzwy9rRBC!Vy_PDGm>2m+w)`jY41& zP`xfINTRBnT@+7%^PqlrC-$oIZ~LuqOhT#HI0|})L3}Flh~@V{z}@32s_t=4PED%B z5N-Duep75%;&%x5wl1GMq!_bZ?h`dvgJPlXn|S_)E!?T zMSqKm@C~-E`MPP<>2c&K$2`w~aZ_m8*(v&`Vu{QQDJG^?$RO76YesWc4RRkCXLgthOzq7tdJMS1iaL zTv9tgMzbL*!87pNU3Tkbkl%Ro3)$jl8hT6qw=(U_gtn;AuDN;)?v{+TncNJ$U7V=t;kcC@(KJ5cWO5|ck$bYOn*=x zCBIC!bYI}dgyE|-!3kP0gzLEF^{UJXq3*KS7_u8@BYUKr15t6p7sng78g(0XnW*&D z260oLnyI%)uJGSx9%m&n1dVlTh3cao*UVARX*|~b%1djt`!JNQ8s&l0IJT%a;7{px*f6|La>&hd2Ikf zG{o$UZ{9JK7NYQ*?{Alwg5>oEaFktQ!c>`AU>{|m-z)9wo;$m5=l!L^?|&kCIz~*7 zOy>K2$UipsH*`EKXT}Ub>FqIjNr&*asE1Q!)C?bl$S(d{EMY#VZL;Pj%fbtGzFh*Yx%&1z&yuFRvE7 zoOojE{ykSNjgq@~b{)gcjFzSzqW{ zsbN1{6owvAY^EotrSI*&V{I~|;{!aN6T+bICYf8Jgw$znQ_z9*b$|Q_$jkgqe~ZmE zgS*Rvk&M!l5(nW@7UOMYH}~##I(Y`TUM9yvs)g0EPE#zW~F>lb2H6O!; zmx)D|5l5hPdX(vJuIUJG#QN-McGDjT>}GAZGi0@MTs)xJGS5iGHE)P1xQ))^Nh#-C zNx!|&N`q+%Km06=Qh!w?)|P%VN)&|}Gaq0eDBm{M$;z8xlj@L?N#`!k^!e0j zfB_O2#t`Bvi}m_4>MTLOv-y_0ur8-~-M|A=ko{G+2{oE<$;lqhV-{e=!(1z>P2Tt; zxKM-p!{$u!()*yfpWxPIx9Vyx6BGWKVJ&ahF&)V|=k|VAZGULyITYU<#m7Or`=s17 z@|5eotrF{*{BDeh@ip*cRy3@}tSaNcN)GeZ0}NJ_qt}y7jxJ401oP1=>|*KZHfJ}J z`hF>0oKO?#CzNh60v3FIVAJ*&OBY0}(g6BcA5kEk=fhmleOKvNWt#K4aE>UOjeNDO zCR%D7cf}F(Gk@l;FgDlh>9wpGpKl|2q(;{!39X4^ERe}Bi`m1!>`vQ;Av#6$vQFPV zyFEd_Ce++f8`=HDf_<_&Cs(UCPR&Q{bp$aq^<@tDfD&>R-cih-Uo&Z2Owi+&8a>;k z%A_3pt{_k|2$We~{5}eS1~MV)NZq_rkkU4ehRdQw%YQPIpR;j^+%XRP5NhFDc>_s_ zk(#yiZPeAyJ>-OvX)lrF@)oLS8_z1WIe$Gkn9dD~d5zH?fF^-c?iG}=`9&mzducTG z2`p8OR=#5@Df{OYZA9;+_17z{HUS1TE%o({x*OP-DMb}`)UxJ5>B!jT z=}5UFo5gAV`RIMGC$ld;qbZ(rE~EH4@8Nxu{C~O*MqEBt?=z}n28onaj)o`q<2U_^ z4BL9}bsuPFX}zT9!D|9CWNv}pZPKIYLSVO3PTvnljbSuV6#xBfR*q8my9y;NB1pU_ z^(mQ@>>jDNnM$+}CaPEsUk-!1moyya_j7PhLU7@$D69-#pT#O#1e563bR8DterN)a z1b^%i{YaRcL6$_$=~|q0#W8UOx#P#7sF^FmvA%u{?5iDy6JNX1Sy!kM8z{K1srUj1 z_BZQIS$`5*VgC<0()(tSVzD`V9Ql!){uZD}R zZlCs@HL-&*#^>?2B8oXav;Dx641eF@hx3NE^-?(zDp`)%K3#+R|}}(*4AK9>xrie8_LXAAeO=DYA2) zj?Cy%5~_XYh9L#7o`WdHLn&Z@_N@Y|gaq2!wlsHpvsEq7uZMaeEs-N{<uuO4?)_z-?=yqopcLs9kxRg5zFSiHs^ouqd@S=sxYviIgiD)5H)jowSMa+5lB$Zk!CDc?PJ)m2q8bRsXEsG4vK;!^z;8;#%*30} zavwEe2-=n!Kxc7aR9rgR<&a7Bc{ZB*M3|jF^loNm@<=ru$A?z|Zn90Ygum~>M-uz@6b~Z z6VB<6+SSI{^7V;_0ZnQ=3qm(@QK!^4a9nW(#sh=Ow+s7fJK1hxAvLNcsyU2|P=AA!qd*_xn^Y9?NSWki zil%<76MenJ+NqLUSZSS+Idd4Dt?bZmq)&|aeON^{Usbt41x_+#>!w`g z#?z{J%Ida^HS4xRR+`t7(Qtt&Hoi)^hu4nY}79)1*I;T@AaTMNGvDdv^v zQE$;)rmyxqp?|ra-SGMsY4@aWY`XM=gTZp$s8Z7{F>&6zt@2U`u zP+C(X-n()pE%2aaXmVmeyHOMKCthA%ny)KQ5a8hKrnv0?bT ze;QPOXHQ9q>yr{WN5psmf2APFr0K93lSY<~m~_-s(0`l^O6gO-Hh3N`2*Wsf{YD;j znJaYAZ1!$|#@4K3d$ywKR#BHZ;gumymB-6RgE7dBc2b}~rS&ns83u45c6``I9&@0# zy>jV?d{yMw|K>dYgl=gISZjvKA)q-k;k5{60EUW`y`# zspTb|Jt3K1L(vQ?nl>~+3t8a0L8~;r;-@YU9Xy@hXrl1aOn>8wM)l}SNE@AJb~sOA zK&woLgeh&;3#yLThiGXPKSY#X6|Q}cwPZ^AQGd(qJ1GYJ#35W-*Oz#uM+AvJbAJ@eM=_FKXq-@BT(3!a)ZyQIOU$IR1+p?bNTGQQ}q ze_$!)Z^QP<*Nz_ByJxo&W{XS|bsuCnt;k%icQYnpa=2G&5`JQ5$r{n1VFDASf|#5b z-$`_sx@$m;_4S@5-v@Qr)V0mNi$W6NhkuG_Ef%e#%HI~9!gcix4HCv%8=`PBB%!W2 z;YCtE4=(P}9TeZ|9X)+%J$rWVQt*nBBYp^h9Dl@MZNQQpbUgP^3h?yJlVIrKMhJAV z^a_ETh{>#4dRHs64jGb*a+r96Wa!Ycof|rj(J0z=WO+=9roMOtz=!L7=(M+x_bLbz`F-s|@|AOc zR@#syJniOutcf*_)?Dx?+i`!+m&9StVOhYSzG4#vO)2zb=lnUEKV)HXOExL zbBX7NJ1%NsSpnA>M_{FzT$>z*jelrUx>@C&dn($L9#T#n)x}bwK)oFsr1tN2ZV`00 z@1W@%eqS(*`!V!jD$#N+BfuK>8Safcssg^tT%6~TPQ!8p*JkO(><>AT9bdlJfh`}x zHmzIA???I>LhrmK@4jsi1lNGHq4Je7l!EhoC-j!TX|ZKqNMQqs>N#?AzX`8X6G%)$G{X>0&V%p(D}Mk zuSZ7Rl$a1`a3QkLSLS1qfrD8-y_dHo3I6g5)%IvEr#s+>TlD5BxHgN>@kvFa$|MNp zpXE*1@D-7LNmuBh(C!5-s9nAQLBn5p#nCB3m~wSI9N3uVlj9fTDc&XQ6UXCBA8HOTI$uV z$u}~AICiK0uJ#+PqM#b%#KK_CW4=UZ^+WG+rP$m))?9*#Bv*iAj%j0h(Szp(d*nJSne19+q82k!(xH_lz4y*Yyd{{bSx7$O1bVD!vmg#q^(k~<;Hu;O3i7FKuwXUp9 zQanmG(Q#&oi6a=)ZYge&pGZVQf18eg^0u=>t=Kzfv^VXX*UeaF-`YIJOHF2aD}$_a z^DG&$vBh5ZgcGf(xr`Df@IpHDx2Pl5&o#>v(vYA7wSPKZ>B25@nl6|?sVh@+Xxvh( zjZBU`1v+reGMD`U#2M=ITEjD9uPVX%xnuc+v^;u0N>HhU%F-W6YWlcyI4v3OL~M`h zFx2IJRi>haWzw@qo>gUDwYpw;ii+cT7JeZ|iXoa!PwZ;GBa$H4BClcV3g#|-C2ti?Q%ENGeax(7=tt6ck$LESldJ`oz?bg|9Uwa9 zV%P9Big|Jm%U#=r8nRprbYMHD0zTOhnVLgHGS(+OIUq}(-q0Xq&wHMMoytqFcmB%n zpLH`{vF&%F>@zB5&1mky65kYk-8$DIzD>$*EILj;4EL0gPiv}p@H1iF6H_SS*MCd} zF;zr*CG%6kfF&dlo+W*r=mkkKw`&3U=xOR*GtBss5zP(QbHz0oBF%Dvfv zN)Rp8e`fm8mK5rxyN4b48zajL{cMa?zOc}Rr-BNOM7U@M>Nb)Ir2Ca^>wakP)J&Ug zF#&}@24NLGm-x}*;VfF$=||fQYJXxI{B^og$i%1JoWlJ4soS2;99e*In9W!Ss~2-g~mrFdzVt7y6a zxCo7@PeLQ$Zl4P#wXZ}-8PYRfD1%U9?6BC?A!)yU|aq|CRvlSyfVKGYs(V?l4tg(lfwGWrTy%nv#( z@^jL!%8g9xr1e6E#vD_!%1q6s5=)G!^E@A$fYewHh5f^2&zDrQCLVnkK44?ca;3Y8 z-VPVUp-g&5#!ABYC4Ze1K?hDxRa0t7&72}^3G%i9h$r@_VK^@D$!Mf;`p_T>;hdr0 zLQ1a<0_O<~$CS}9y+xN5?M#km07ukUVD16@RXvtY&j0Ts!v976z5M@Hf9Ez($?Rqu zABIB&qmC8k0@7wTG#kyTd+sWWQ@VK~5m)FYaY7c)?KZ`T3V+{Ip5558;K{+iyxBGY zFSxI^vsf7PI^`KMa&XR)96uQ0XV0aoX_pTPYx z1F_ac!1JvBQvrBUKsl8bfRnp_g!Fe0%+t<+aB}^R=pLH^SKByXaIX1%K4cIB3kAZ= zzpZ>aWD#uTfMESA5lK>>ACIYVK4EwKox74LE#`Lks5->!? z_T_3_@_bC?Gpfr{)7UeKW9&qX#I1Nwu$R$&!ykvJe6mv|UhzbBT zqH`uChz3D570)5V<%!H03rrpA?3emrtQ>Efm8uaq4S!Hw=j|rmtSF2fi$HS2nqK}m z`p@y08jQ*)GcnW8S3S-V1Ni0vtRQyYkSnC zn~+82=c98CwjMzl2xf>lKBB zl24$^`hR_L6F_*i0eT3Lrw_6hAtNR73XP3kaoF;d(>!O&CgCGqksOAQ5KgtIufrix zf$WFl8);YpA-H8$W$&DE{jIp@VkS+5SVX>|uCS8gF56F`2$ho!nw3(|^7SEbzU9yy zmo6?pp7O3V#1m$zAg<8Zlg-%!b8@z^$_yn7_@5_3IH zGf%30FAH_C8j$6-jUQ$xEkMM$N?VzfE*mk98Cg&=Q#`X(im5=P;;_`{BhJ2hxs@2y zN`H{qUJeeohg`G9;?#0nFapAZ%vQRW%`y0{uCl^fuA*1brd?k-l^|OZ^SR0U}dpQSol)*ni`hQ5gg5ASd09QRkY&8ROLKCsv9}UN6+2 zV!@d`^mTP}II51iam}Lj_0r3yj4AHkFIkc;bx--QrMQ^ zgF@ik)$iHL!4u)N>ie>}tmtg5o&Y1SmWmc=-)XQ&*oQIJ4!a-zws3YJWs*#a4cv!g3#aehYE?Xq(M=}ZKn>u&K1%eZ;=Avc)<+PlmR?sZq4v;`d54qKGfy02O3noL*MGpU&i!aq zpv>BP^qFaLqcg;9b0toxiSJ&Ks6%RGdYCHqnplO>ESiUe#^fI&tnP%;#9WrBL#)>T zi5Y5fr~P4cWJpDPal_|)bbPHuuan8u>aGCrH@PhHGb$V$Z?Jue73M|r;US48@px$^ zZ?*NSh{9Y(fIPQ*BWsnMYk#@CeK;Vr4S&C^HcpIBaPI=?f9o%W1&5PU`#tNQIJ!WN zXhdH%F*!=oy@*{IO3F<>jc>eM?Nx@Wvl?Loe8lfZSX-awxYtxaIjXikteuUOsjG7k zPYvS-176doT14xV?cYb*Mx>=m45jt~0y{M=ULhI)1!d|KZxK6+Cii6Ikjwkoisk-;=rxBpuA z)YeDb^&Dj$8u@Tt9dB<(buUq-=bcWJXLVYldH84|$nLFlFJ0hT5JcCC80hY7ddqJXoj5hI?G$c zG2eD;nq3i0B1=m>mQgi4Py>k1pz_z7^PuEWN0~lDZB7d2ZGW@=*X|{&pq?Vof5cRm zcZWzyKz?E9p{cS4<}Q2{w9$CgBT;xsC^Oktx(9mxU`@8~hcj;bgc?VXqufMY{O>=j_p zM@I;2?I4YjFn^9GJHAVcae+Fiqldj-=S0>&tW0g&Yte?RP%G>r165Z+s3TFlTw=#C z;7*oPaD0vxku^<%xL*XP5*GcQucwfean+xvFcaZu^p?0En-qIq<8*TuReJ}kmlbVi zr+u0Of-9Q3N`>N+_AYW|n~q`Ir8%5}wb8RVQbMnjet*4YJ$+l=!8To_8Fy<;Ev&{h zMsz`g6^=A2BWlf0(xZG=P3B~9`^VOt?kzFjIUi{5QBl;Cgp1=aqGre{HQG!&Fsp~W z7Xkx;fY;`^cHW~+kd$Ax%>wiO3WYdrH1p6<^S&q|xh~4CA!r6w<)@PS9j`|TLZN!W zP;oR?|9{}3Pax=#DF`|K*578YBzTbkalId=qtQLfGioHz4c0%NzsB{*~l%g zyyJ0AO^i4+&fFJ(p`$8V{~7-YGNd-x)Q%UX@kN0sHkd>1mID`nI!tD2##}*){+%zj zvB=d4+qAKm)QQRSdy_MI>=`!;m^UxQ9=Iq@+v|+wmNNqEJthqz0>9RjL`=?DOPSqh zoqsD@WBqF9dBkC+v3h3~!uwX_jFq)Hf_DGo>96rUCV5#z=-eh&n!nzh{a6|AFM8MN zcE2o;gPU6GtNOdkYeDEJ;wLQ=z16qIP`;;ujk($yiXo-pL!-@|NpQ*G8;6%qc5yf_S2Ccn`m!EjB(R>WYAK4bkDcCAfpDSjhtk`ZxtJn4>k!;{(^w3xy zQ(7Dz4FYW}xWseqOdhWn?YCQA`-w+XTAzuLyUYApbSiBx%m3-x6j*Ob^W(L@n`15Bj}mz{zH(osGxHkz_BPrRLk z7Lqyb{8D~5W%E@IWZt7ocHrTA*kRM(vts%o8PSSCP4evjsvR zmCfMT_%Mtu{?V7*HSASNImc}U!hgs)n<*JEK{rYodwwJZ&Ee{#Ee)fp=n?RKs0Vut z?j}fQG+a<>FK9!h9@*fS(*VLNX01QqDSr#)1A6TM zu*k2!$%%gvaer!D=AUX8G*c!GBd|N5{3=%S1b|)oR1BI#v)iMN(r_NfC~MCfXhU>i<2as`5plF^T*y6|3SR{5Nw~D zLOKwbUwi!e=Q#1pxN<}}|7`nf6Fdm2lrUJC%jAM z7^e;x;8W6L%S_F4{KH805O4;l)fUfXuErT)%MyUe8Q$xFb$|2^Ys)@+)-=!991CI= z)V#o)xXIML$ACo_PAcLZx#j9Q;2k|>fII;A>JnR4^&uCpJwuiAR9Gh_4EqdZ=k$5d zbHL6+9%E$g{f;zekC8liEOHw`sc~% z?as*m#LGW^>VNc?-s$%rpZk_PaP0r|`D6cQe9qtT z)VZj5a`g9m{B!J|jTb*glaKN(x6G~o_VfJNzZ)|^pnn+F-?whfO~#&8=4Hs7jkJs% z3*d|`_S`3yX*|xQSOHx!&(%MQlK`&)3JZKQWP8)-W!PtCo_p$BxO84X`oT_Qh`GQZ zBA^#p;0pI1NWcJU-J2%Te{A8BBJ|W{&+t`OF?vqVwYp~Qr@w@odKg=deqw~|9f!)` zGvUR5Ie*6gvKMd|&X8@}78sTn*fZ9cGiO=Ao4g&Dzqkz@S<*$SD;${xE-$i7Zt=iY zS+I|K6Gx7(>oRu+EK18RE7xKlyGAXG;6!v#^SsBLjuJ4VdB-+5>w;m0LGc-zsd+`< z{9R${P;T~L|6BsH>9%xIqt4d>^KV_G!1d=#8h`kbW%!ro1fu8aCa7hZXZVooPF!|y zxCFT_@F0*GsuXJdi3FPiHa`NJXSteZfqgQ6F09rdg`t?!Pc8oGTh=F#KM{&X*UA3k zRsgZY+;aCfK76L}ef^O!{hwU;i=*4<^zh%9{q_5sqXfqe^+*0senJiQZ>;^zUCbH( z$$xXofBzf5d>a0ZH}Rjij?w?b>c4%v_|a?s_xW!w{2Omi%$N+%G4)TUZ>|gwGaHC? z7Vs?l(BATr8SeBe`J63j;3WOwd9L$I#q9^lnNP*@Rp87K5BgqqcZ&l)E7rQg0V)=I zYGj=l7V5m0qnPFyg>`CP61ZC9ahs5a4}WDeJcGEo!97=E`Mbs1wuuLP3s##Gfy{vG z3|xm|S=&f}P5bUUwty|yk`t#Y>$K*HD+0g2T~F?na}RYS-WX7OLapn{V;4L)CFYcM z4p5QQ%9Ca9oOa?4b0cpZIc3lfoXmXGpaG28;HwexsOa(nALK;R_zZXiLO%fqcz+4T zZ6p5WtN5pjE6af03aX5RNj~RazViP3?%aQJ#h)DY{F4jpKlz1e{Qu;uzkd4fxlFnL zWAne{n(@E>ilx7C^fJ2tasK+}{P%C3Q@j{8?7#i%-2aW?Z*F3QPJfi&IBvaV;2p7U zs?pJ8(L8ZA?U_-|-Dd4w0@uy~`+p?Oq39zw#@O>5bOzC!a~BxL*|Y{xrOQXXp%E-D z=hOxF`B4cZ8z;G1VAxY{-D`jG=TE!z3!kWw?+U6 zoXM(u?Y2<+5(|6|{6D2h383#T8ruuc#1PHHfYJat@-cT^mRI)b zG`@vLH$|K~z}*mYQ4zs6`k{Z{m3(SSYYGjr4HVdcNVzYm;TIMO1lN&d2)JuPzVb^n z@@k$%MAYqm0E3tS#ykssyMMX$t_&lfs0&WuNw2eaY8P^e7XU@SWVaax*4mQ3Y>{pb zA8!7awMd?8{+BNL%Uni(>J*tSU;Gm*J*4>K$p_E-zUNf^FZf3{r_bjpn!`(P{@6xc zJ(JTob39G#~hZU_?uj98)^=pa5#!VX9>hqby(xukkhzdMaK!h&J?kP%Yb*n*28 zs3>Y^WVXCO4I2bfAZXV2sN5bXWTtbuY=EZ@f+@y9g+UlT&Fcu${=31O55l85LX4St zmGPV<2u3#M%J!;bmVYx_ms7_~Hk4 z(@iAi0SS1BfLcad?&q{V9-qV4-z_PZr9)kHrS^lt8XzN*dZQGUn}>E%?Z=AAiW*NcHh;u{DISdYSN1Gv%bI zMA*(K7OAbHnjzsDq$zJoE*UL~y`X+KMMSYEDGIps$+ke8Q4rtc z^f5$BLwyT6!52ehXJ-LNowK7Jln6n+Kv1SC5=`d34cr-ia7@5rfStt47nutOcBM)d zJ$2VF{eQ)ObWi7xr9Zarmu%s$vL8An^FF?8@Go1M6FC}qq~$R^%bT$ zjF{2P1~D!Z;I8u3G90Gw@G)IL&KU?kH!;g;7)^QWHawVaNX{Y`v{WzH1_@>$$sLsQ z6n{jz)E|vw39h~ezDJVXEGejqskGWg_bFotYM%tR{rmwTS-2?9`NKq`W0g~cwYRF< zokzg7HN$#;IpLQB4V0hgzoe&*4Y+rNg;k z!)|_?Z~i%0zjT)uGLy+){P&wLRNeX0=YMD4axZ<_v|l!p$o|lUum9nX4E<#f7w#`w zrNf_c(fZ-D@r1>qwu^)#oE6C+>wJqku6^c69_I4TbAC$4LZI7{H@758Q{p zuyhb9QF?*~)WE?s^B2^jOE7T=o`=}le3T3h4Ko%6_4E3$LJ>h+-WXy_yNO&1(SO6p zD}sd;Y#%q96xPKDZbD8I)b}S;P-@8tBTli^ZexNtCr!GfqBbyO2|lEEDl}$>DaX=2 zSfUC-bEJVNEko`W^M{MxpkCrUh~}vFG0)L}Q$*O#C4$W;$i6>}0^qbXVrQr%AowTZt^tC%wj3?YhWdJ5ZirO6>E`I7+!%SZp+oJ6g0|qiv+Izd>s{uH z-|KzC#KjMeLNEkiCl)br9VPFqu;ZgQIAq^G)Q=3yhTpc;;fvS4u!dA&Q-5E!-Am5> z@S-K2^2vSttc`C>>c@V2$ymip=10Prldz){Vd;WYK0e!Ikv!Qb^)VIF5R8I!S0Ez- zGV%=*-U%1ST@<8?hi+q_ee1~|9)=7Vxz;BL_@d9Zw2{aUfJDoBVv^K=LO38xYDF-% zicM}FGmnzOh_Ve#ePe(55G368M)?IU%VN@piMWzf_1CX)G(M}JW_#*bfeNOpSc zOW(X?Nmq};Bm12#_ALk0{A9y#J@e8%-XnVhL7zVTs~zk|<`_ulHt>u>V<%A{1jtJ1 z9<}Zb^nkM?lwM{C^2$IE8K}`mTud2SWH+DIA>vwy#AxZAss%LGEbqHh+<0>k;%1WZ z!yQyhG8_?vqZ>hW&wnxPOAJSfOpxqKavoMTEQK zogPFzN8Ev@P2Ce)zUM z{(`G?y62BI-0{(O9T98Ok1yVM;mf~#w(fc-JARc(uPf{Pw&555D4o>tuSh>wp=_8h zdntWl+<)MQ*?*Unrz>;+4!|kgT^zX2s$Vn;;_B37?oAK|sA$Mzp9~ zCyM=$XDiBG+X~%{(SS>l8)MLxw$LyQIMSOj(uK>+HVZM5V^3sDR4{`a(hCSmkjw@j zf+=}Cw{k!jj+-?`(&fyCMsNfv#e%uedK;5RTl7HhzJII;YUzbS)yFdJb5RAl6oILO z#G-!qsPY`aF~J$ZO}QjcBAW;4Mmr-LmkZL;&(5xCvYpAZxm%3ICeADqMHGQ_C}=Kj zM23ZAbWw)DU5Wzj=!poKL75uLHwYOuKw-Sofh{PaQjpMd6{d$w!3}h{%pI!qbB1)0 zAy~ix6@P-w4Y5a}E?hGDG=p*k`8FjYrMSs95!7V7rax;9eG_6Bnu&=i(jnRLA&cQ7#b*MivUBqcCPB zO2dJXfdr)pvpGQ`FlBv3ZS`ihk)`W+j%1`{CW4|AW|{5GrrcpciM?>GB}sb*3PQaB z1DfmGR7S*#M$@@i&NM+IEF$F>xLu|$nt${jVu9gCK?pGFzBY#iyL!!E;|=P!!*E`5 zO3gX($^`A` z=`<*|$8qY&N}r?Zr1U3<6r%-Q%zyLDinKFb3}PUtZm7O|n>HDNqFKlHRg5@8Chr?= ze%viuk(n(sA)neLA8E9qks-V2apzavxw8bl1ax&Dgi|FSqNjZLnSV9!I;1g@UpC}x zUJCZKO&1+@sChqShrh^zf7XE#IzOlL*@QEtpnQg8wJujK0oMS@l#RlTV1K(v#^W0n zE@pyJ94c8+JH(nka$~yI#qFSTS0`9|`OA`{MRn2PY$e6fgHY2U2aZJDfS{Q`_1P37 z6VLNoP`Ek>6;rAxNX`g?(S9~r>MC|B(je^9rB#Y;av06&9i|nItT``TlC#`t(!!A0 z(#$2MNA0@d#tkGh5YmGVuYWa2?+x6e!fQ>5G-WS1A&uZ^FZ{A8RowR*uT^W~^xFs3 zAK(6;qWyb3u)|+F{O%b$e8rI6;_q?7SM26nr+?!t&$hhmO>0k1)4L&=;kT8mqw~QV z95*-!+*nd4UhZnX^;0uUQySg;q|Zh!pStP`3-dTiF|T}vH48-TgMS=_dJ{3_Z<2@R z;ZR@NhipWEV+fT#PfdL>i#FI5m{0F>UJ-=Si;DfocC@h|b*(6k5l@AuC9bPiK5#Ew zATZbF+dXd4Wr19m5bL}fkEYHeBe;cYJj`8#ml!y;sSrx!IfSP+2BCu#4al48MXUP) zMF_;m08#B${r)5jGJgw-`U6Wf|FzN!ogbHc_BIGI`~d8MRVmzRaTuH}<;Q`-9c8Lw zZ9WQ~IKbaxCx4Htso$^l5r6vzf3dkbzd`BDZ=L$G*S;{*j^6#k5B`iFcX+6JZmOR9 z!ei5Kf6{Ba^}KYyFa@yRDQ6fpZm2K< zwi;1ow+OZYQ7Sy?vpkGA#@Yk5XmGG;;>p?PoM(qvtgd z)UkovrWiR$@*N099nzo>j|m1Acn1`whf#+kL@1i;L4{G^ZjACwJPN7aR6a!wZ269q zoT9}-)2v1)JZsQYaPNsgz-+w=__g^;H@GrQ4Nd#;4;5#;> zY&nG^MSsZUKk@jd9BQ(EiuamGbTPv(O#cm@?Nafc6GUg0XtctT`wBN!d18c*T#R?I z)AmDym$9GbLBvH64n$0Nz0+8wBhHaaD~}Z0xd~YS!gWzNO#-6DRP?PRU>Yq~;PpM- zU9hv&)_)^YTsp)Rv_7!lJdBC3;aYpN8D$JjkGOR==HmypLZt*v~xwV5E=i71ztm zKVmOk4omjJ(|>U8rw{Ow8=b6p#rrzB^W`Ug@#Tvr1=XxJd+D~U{jvi+;SRrjgQu(; zx__$OA{)9QE6@lkL+Jou$PcRx>!XoKU>NWzHb}4z=uVFs?TUns^*l`-C=XVyVsiAL z@m~JmnQcGa22P+yF6N}UyRZ<#k=Eez}`KsQ57$gmr{=ltAA2?tVO@oEf;? z>ZHrrYP?jiue=mxSE{@`3c3axCEt>x2p=i#a`FDOU4|ZZPrvf=)V0S;$C<2PFRY^D z_0Dhnr+fpVY_8w7=XZQLeagO0W`8o|;k0;Scgo*=>EY6y*f34kr|FU5EJkuR4oGT4 z=-#&db4X=`kW8t{O9KrfXTj%)!rL0;%50O>j!VOqsl&rOTZ>UZ4`PLasXSONGh=Oy z|HMRe7-^;s)7Z{=Y(0bze0=9Ydi5K*M`+vHSnC@Kn=7<~%_Gmj>BDM91b^|&Q9hp) z)VafJ*J(j8EVMpi*lFm}iM1juBC6*rhso$WfwSg9xTeKX!f8WZqXU8g4+v&@E=b&gfewR_4T9&^IDe6N*50MYXeou_Z9& zy2|qkVrn-v25$L~y8<`vWs!^F0)dfC_#C*)7!nNgOgB+QfiqnR0DnCe8hhL}ZoHik zaQB9?lL>eRQa(kBk;o_&<$++|bdvtTUW{5S839S>ri$l^vEZ162A2j3bi*k!74uVm z-B8&VQrQ>g6wuYHUFiHe?T;LdzsBhgnfw*!{K*UWW4^z2MYo1tw#qM@Kw&{0FBrdc zkjf?a(orhT_Eo={|9}2JT^MQI~&u?Z=|C?@Pi;M9 zJwkzF#4Wq8^5zn>j@-jsz2Qs_y@~Wfg+s4F;WoTdxD70L=bsb73#E^LV(DWHhnM=+kbiW0=q#5v9W* zRr#eU5mO9;eOixfO@LK|Ph&@_c%6$daNR1o1%>2#pm~T>ig|y%MxwrLkVcVoZSGrw zwbhYAIetm3}V9z%OgK#Dk$M1!6&X|L&UU}BCZ&)6DHl}p4$+s*^=+F$JU!Wbl&&;7b5h8Wdc{oh3r~oZ-iXviGSCIr z^=boUNPK=5e+OFV>vX`me4sRP_P1_)bkgabO+_x@#T*x@ zR@#4&h)gq?s`n=xT#P>HK{DX()N7@RM=;PvpA2fma+(?k>Jl8;2m`?|bqvV1D-!rj zmI~uQ7_($WrgxW&27CcJ!6S^CQY6bO9kQoA*U6Y4S^qoN>d$6Xk_1mua;;CL?42Tdmmied3D}f1 z^37fphsA>3`H|-;-ws4pQYlQ3y5(@GFF!ILB*#SLwhpd`$V$igIHL#!6^X7^K|_Bd z&>B}r+{N;M=|ts5@OrEVGamwRvm2K!lc>*ScBa8J(?c~b_Z<>b(?sbON!Mb0sOio0 zY~s>Koa-RnjYBT(_7OXKR0UQaa+ODi1^0AtO(PeGuu;&ZJb+voJoi%MJ{XahB+Z=T za&6`i2T?XdO_f3pYYJ6DL`=ex;Y^Qt~9`G?*4^`4f> z&pRkQOvRCW$-eQBzv;5S^6Y~TpFYeBL;JB|kcT_}L&>opnJ4Je-}s5M4^@9|2Q2v6 z5!AA9*xD=sE zkfKh>LR;C0ZCRtGOKl`<$&r6AHA_cYVoMj9oKYgUfko<(1_l8a_=x$zN27%yXkZpv z={8jFMRMmKranY&>u^di&k)4Z4@;;#Qs`TyW@ z3TuZj8=2rzkQ5=H1Vp5RlnBy(hDEe+ki;q_th1%svC+wIOdX8V&NhG4!`eeoY1u+y z3Q-6$#nA8>%c)1zdmc!f2?9g!QxW7PMXjVLul92S9&j2GDU{E*Cn$<3iF}0(8N%JM zM!NyeERshjvL70_8L4*z5>R=o$`*gVzqtpGzqS#2eQXd3I4EobjePR6chGC+A_&2+ z(Nx$s7-)3PqX817Nnw9?!=`5K#M!~Jat=~XNRWA&oLtFh%Ke5&nwH_B@vsgecJrwH z{wQW9-Fuw_v@S5|yMU(n zBw!6P1(sX4+I_y!g{s1z-7Dk(u3Bq<`BRUYH0otj?dPJ7PC>#IWF^1` z!sgQv=oKDH`mTRQ@z5W=rrNPygNuGXKB>azsuAvUlPlEmi+t@M^)(Ly^a`qZulKwk zA+SlW*XteBE=POu!(sqf0+yl%6el!ZF^rr$fFDJ7iH_bnaX>RBP$daEF?Nm<2e6eG zUA zu;8tF`^s)04+y`~BWS*a(u;N&OM<&&k|v=z-Y$`%lKstt!45KoMFRp%+{?kffo;Z{O>mwM!-tfdFGfrM-BH?7g4k~st-2dH(t zv^G$WYw>>}*nknlBCo+cCb6|iFg>u~ZYLPaNJlB#AV`8*)FkH#Hl(i~SInV^M4p~% zalUe(igc0J{8e9wm4}+L4an|37x4tEalY7Th$TLvG)1+jRb(0)5}#nh92j(Ukd-h7 zXqCj}rUrJbp&{u6AUYNj=)Jp_ZNTNt&8gA2yWf8-Z}}Yoq!V!Yz#k!wiX=XkX?&6p z-d0KIOVCleijOO1gi=>yc`R==L^Y$4sDU5Tgo_%sB+?6sS`XJC4m_`Ors_!GRzq69 z#+2ACDr>$ic!4Fxigu+Z=-vSDlOXZC-x_ogB)&vpl`{tzZ?VnTMM8LQT!@Y}@KY4$ zB0_(+JUSjz)-UrU#FFb?S_8crts%Kye$Sn=0n>57;_bavEXoH6I~!o?8q?T;IRH$v z6yFr9E1+}&D0RTJi-E+U6wiK+Njeb}3?{mI9R#UAwlYn3j%(-^yct#yF&05t6k?^YcP>34AlYbDcnFBmMttEdX zu1au(bW|PE4Fr4uqBw!55k2t2z14`-VLwP~9+ZJzmNoQ`Y$$Fua_g|Kr(xgwD%-HQ zENkSw%6VOL2zftF>wtw^&1+;!8s1hlAfna?ztTArF|Gr^R%A;EQpFF%icwl~3VeXo z_~D|eL{WHojo`x6nn>u+d7$_gk<@>A)ltOH>+wkv%`;X1g8`Os`aAD12mEf`0V)(9 zs=46?T6KIlzzqN_m=;QideRd(Bpe`5z!xuwT?A!*@ZMiWU2wWhR#T0^`*;?{^1 zI0S%P1i|@m&^ey6=keTh!UiFI*c_+3J* zZCI$Qbz3{w6*WR%>oQ5{ElRK}oGYL#P%@^x)mW=pizPLzZcJ;u5{h3~3-=oI)@W&- zgagU2gc_#Tngb5vAXRVE^8$bAHH_4FcuesJWg=B;U=yVVP@qa5RcBELoY@oX*iBt@ zb%kvM7*ziX$DxSf`c`ANt~nE`e->^VXuI;G<52o6SK}K!%iFd=MOR)q4mF?UYJ5dk zZWnEX!>;`9V3?};bT;tmU?ggk3r5}N98WskD?B1zO-d4dh-$Y{IyX@BuxL&Qo~e5GHZv9DBz zAl4ItRz_zrPmw`{a3&H>N#KZLGmy9F#%*x{!Krh<)B|*JAqRE#vVoE z0gdf~-fEK8j7Sl*|Gt0!N_M0RajQvIJ9##BHZW3iq{jK*>Inyc4-5&BckuB*`ih4H zBvuG_Mt2fAeaMbsda;AnGnKqf47Ta?Xz1bkAP@vmq~_HxS_x<^^-kY%r4x_OdyIwv z!ZqB8JxFY)a0Mf+-94}U`#lVgO{QfBWm_O6JGcaX1&6n_s11K*V<^3Rfg zb!S&t|HL!YI?;cpon!EW(vztX@C4HhYxUJ1?a|9yO=MjIBsIzoyy*0yZWVBU&F5iG z6yIpSXt2aec4H7h;hA0A26W_X;LULqufmdPwuClOdgVD+8-Q3BcSfYLH6;|k%GB6~ ziPBkLw1%|--HJ|~Ub=IkUK}{2t^4AwHT$kYjmZcx)&>53aD}{hS+pYn5P_CgFSI0*#dX)95*d&-Hiun)h@AF4NeHPYj7Q_sUwBGCVr?&iC^T$Dy{eM}F1u-&@0U z_ixsUS|7D2sU*-N1grn&+Fdk#_!p=L{o*X**T7 z(mQ#w`R9ykI`Av< z6hAK7wgo_jI*zb9)}O~=+X8=GFVykt`jg&N$MKW?dHke*9zW@y$8G!Q_PuKBTdj+_ zmdby}a8>_Tj^iQ#L=;_8v=31BYe!crI)JF7%N$4H1bRoe3>6)qC*7#7)&3Muy1k=( z6{>7FB)a3MJI)=4q1W}IJAPh2Jn4lxE}!(>@$pID9iN``-SJ1)4nS13V~th6C1RcE>H5WAbkF{yC!KuuKkDdi&;Ebw9o^>H{}-LIJ7H?KKXM~e_8F1ans&U? zsJWlxyX4AFHQ95Gs(U5~cl630UVPJQ%I5o_f30awzv;Jys$aYQk{!eD8R1=W)h4s! z>zO@4NYU@YCw)zsqJ8h`|E8_$n$vIEy=wd7n|9XK`+LopBUSgCJwedXtG1KTwWfdc z>TEDP>F<&k^{r-9)%{H?RJ#}7w4`gtH*M!n^?r|I-nD!B8W(JIb+@l^VcC=ZH6}&Z z?r&PFtNWX_QtkMmZQB;hr{7sax~Wy{UW*krqjc7a7k7z`L_!@c=kom>1`bsOBN z^E(F>PNj9oQK952#Vm13_3maFIJAN^4-!W(V0rpQn*{g9k_L<7;vFkEjt5W}Ui7>V zAkdGiBtEX{1}N~+q-vB*qC}~AbEw+??+AtlJFdzp#OhoZu#{*4h6|ZMXD|V_px(odU?ou>45{jAfu^vL=nf`_ zK%h_6u9ZZ`S3B;{{=&OD4<*V$=dj`^1$_#UM2A;Dq`KeX|5Sgiej|T%Jpr_TjZ+Io zsm4JAtU)AH`!m>bRIB!90wvw}3iMEPUHepf)RzDVRa-m?^}H>hlcL{pq^WUTgBt(Q z_jqmvG-}*^Q1`iD^Z(wwKj*XSf7hFgT-ATRg+%hXuK)I2_iy!L zsjmN9{_Xms^C!RltM*|~{TQfmTEEssD%5Y+uc4yfs`2RPp6%is1+V)wAeG{u>a|X! zI(O6-BIaM#pQ-rs)lT>|zj&(pZ2*<3otmmA>*T-`9I5F4kShwY%;AvtMVs|Mzut zDvs@c*z3h^`M<*AvFrbj?s|P|kGuc)P!$}XSLCh``R#u{Mw<3N>TC8Ne{Pqn_Rt-C zZJ4{&X2$m?TTq-S#n*-0dP?<2^j`!5zyHt)KnTu=IAsJ;hA(4UJzV4oR>FwB;Q>=` zit7ajQ+$kE5y%ZL$^+f3$&FcN#6K5SzL@s8J`du}8$o3_x}2|P<2+*XCLg|KBXU@I zkIm<}-^_o9tPNMa)EK<&dRvq4%#4LN;#rDW0kgaf51uX!N6;ZQCe`Yp{RzZ#%m6H89EP<4o;PZ2iwob$u_k z$87et%|mT_Ja3Zg?YzwkHVn-7b~qW-v+Z0r z;?3o_O^tP}J6PV|@-knokB8pdWVxG=!RJzLS5`4tbbZ&} zbuP{bRkkLlm*u;sEd)mXNK6C-=+ICN29DKpha)zw0N64+N|=Rj^K|k?KMsRH17k3@ z%h{vc(v9a^H5rii4Khq>gfm@FY-n{Y>KT8ze)r4IgFCm%lb=|tbj%lx^-15t58duZ zlXx>ruNQ9kKQ&7u;}l1r1h(~X50APx$Qr$QXibN(ebkB8<`W{ zLSS!n`@XWJ?hE`eJD%Zj7dAD~cu5glw-u&GdtcVxhQ5y-568xFbj_dN;Wn>*LquZ7?{*}F<;5y5aA}*K6jhUtCbADK9 z$CXB!HJdEul*G~7ExuH`D`}E$?Y(nZ0_OtR4$E6Z@=+APf;73ZL_k<<0)ES z!z1&P)FtH!hSkVAdFSS6JGq%^CQJ&9lS04+8`PP51-LZYLM^wKyD=EA(ldVx*SqW- z-p&S?#O_bzII;Tpf(|U8_PC+Pei~u(!apNxbrIS0M*7n&d@PNDASXCnKj)UST;@jD@@b9+>8eznfHR;BhE zL-|=I#Srt6Rj#-Af*QPI5?z0Lh|J$K!F2i=IF1%X@t#>xb6%6p!5rx4dQqJsW8}A! z{$!18|B{ehVrj;e_xXG7c#aVq{qk|G=0zHx^W#mleF^Kx_#D4o-wMC)TCL{YT`jJY zb5X}$L#9Q4J^f5BF!e4 z?XTCz)zJHxhGBF)uS&1^EvM((cc+e^MI z4Ow1ho5y82eP@5gc(W@(cPy7_kkQG$N-caecNcN776v(rnUOeUdJ=oQIy&4~7w_x$ zA(LJl^!xNF7{(P`K_C%5Rv?I6!Rani-I zdVGv-n1|)@b{3CaTIxyNA1CGL#;RZiebbK@$wz{Xakz`D)w{pfk2CwcU8j9n(`9+Q zRBX6Qdu4xr;k>7}*+)NLSL4CwdFYI86|O%A_nh7j@7ddB8}z+P5v?3&mlcoM@m5;P z%bBl^zWbhX<5s>+)5+#O80pPj4)-_YM|a;JVeQf6w_9|54E^(6M29k}-Ee6wd2sv~ zUd~YJ_~x%3mH)nsFQ>9k%3iuSwXWe$hKKv?wtj!J(#7uD_xNZxOpe)fGk8RDGrA=9 zTTDWh?e1si;oY3`n{Evqw63Pj{^K*6XudwF;>{+zXwbK|o2B8xB>h06vuS&4{=OK6 z%VY!hcrBaLczQ6r<0I@1ZzP=%YhCaS*`}l|$~EeH+<;8OAIXu&OeD>^=o4ufo>axZgycUTkAh;FD3YO*9NrG!Lr5thpN1Cghv*Oxx9b&<-c*M@$Bp ztVdTc%zTEDa)bTB1^XW1oAbpwBy^$|*5iM22;Q-1Qj)GbC$+ZDXpN5b)Ub=UWmx3P zusAQn;sd zmdPYIC)N8sm+#oe_kgz7rCEl{dOJullr88yzc1o^w6S46%{g(ioUE}8TiEaL`?!C; za!+0|dAyeoYc!z}KVCU%mF4(!ocTWLj^$8j^b>0ZIN7C!~U&kwoH7MAT{<8FT9;%cTJ zhc@@xsbx-)Hg#8ITWzoYV9>fLT*ZGzWW6$+lI?X<2XC5u?0T+gd75Om!AE4bdnbP1 z_S;!-@zGS1yX)Qb*luTj-g|BOF@8HuhxLBZ&lc%b<~cJ;U$f@~2it7^rcmr>3r@E-p`M>X&PpyP41t1=}sF(&aP-?`Ebp`9?%#50 ze;k_k>v^}Wi=nj}pGE`Sse8fx?q$<#+q+CJ%cxiP-=@uOlkNBO`#dqj0jmf5kHH|x zYIl7U{ewMNX$RQdiiV#Vy`;V4=7i(aJj_LqvFn|j8=H-( zzdqXb5!om!;zKpeJ{-86=c#|c+L6I!k*2Vow0tl-ZQZ%kcQ@L`okh#J7S82)Fa_wbBg4<3d<%vj@4a|5xvj?pdYky#Je=b()&``v8n1u+M^!df__255 z`TnDoY@cm>X*+7C}lKF-C?C7RkKNiO7NnwEnk+Mk7gc|RB>vGbT! z&KTgaNIg3_Jr??hbCwwrY4*0*#V02dd9)A1wcZ ze;i6<8D>+=hDls+`$IfzHjiR=JfC{{^uA~7t9hI4M|1xz-0**ru5&F`-@aNrf;=7e z_X~fQyAyL@*Bfhp9DVdg)0ITYeR#a&1H9^c^PWx~3-o?1FGF{ETD@O_P0@szKG?4| zE6t+$X!@|*!S!IEu_>m@_ue^*PFhqDYq7H9!Mm|@*wwr~R&R^!;miI?Yi`&S=_45R z=VwwufSIJj&3aXxo8j(avnL!lr2Ls0eW$LcyK?&)@E@qzm9vpRaW zSv8K5^LlRwY<#ue2I2Tvj!ZbZSkcc-`xXt2@eqs4a96azUQ&gy#oF)wF<9aa1L&TyJ3>i=){ z-elWRWoduVd>{t2LNIjtzVBxOfzt>fiJqs=g?2=obI*S>|C^PSYt^74279xGz%Vzw zF*CQ%QErBczkS4x1-kO|mHS8(Yjy*)bybz8pi%s zvvkfbL+mY-)@S!)ADuvWLM}}wl)HHC-tOY+9cO=@UF;j6df~)y#}}ks(Z!3+XqkfS zLx&zxH0CURV0cT=%gjxLM1%LLj-}U)n(V3^YWdEQ= zshqk#mm(r>9_>3yCj^TE=U8>IVjZOV*g|+QEzxFrRdk#>%I%u-6CpO>O@r_1q zsj2ALE1Dk_;L%@ViF5I%@QLSA`D|!X>@7Y&e0*`(4WG#C;z{LoMs%e1tSeh6Z2Mf3 zi6|Z~5@qO>JoJ@E`0_2?8S9Q*r{|0(WXozRzbwy!lJ!l+76%zkjNW-|42kh~j_6h_LKV zY8$@n!P^g?&(G2Mk8Ac2nqq1Qay{;gPR0y&As2dscx=S%I#IL`x9E@Jg7)r=*_KlY3(0=yEmp$}jc!e{EhqdJv(FvUw5 zItC$8?y?fq_K-L6wbUv5s_n~kH_@%0tG&uqy=s(lYcA}qP4L@#Ssh6v$N9D61KNrC z_V5VkA4KG@G*$(V;!dRULynUNJ{axpS=HM?Wiu_&W!gxBzv#0b)+2vU?fPDbI_#AW&bOv}Z#H4!h>ykownI&xh7 zNzSMT62hIF)Lri{HeV`<=Tpxj>f%=$XL8KWpEpTt10~O?b09pntJ5od&oA_@KiMH+ zKdT|-2(@0Px-rc?D=~jYewN&uRBBqklq#ubN&ocCe0$L4=u1(7kFHYQv|tY3tRrN>ir31DPR@KjK22LaADF_sf4-hC?CO+{ z={6vXmy-48MZT0~G!e<4)_HxecOo%9$7T1G%>Q8fB3&;SXYYSXx*J`59T^iZMm`}X z`rwhZ3=Ax49^-NGUdxUBv|9dnDH=U4b91E~>$5yRYvq>@<~7=cMmJQm4oXQgMsAC% zc7LuIn0?@q7tIIdyIe}*z}_PzSF zymo&qn~s)I98SuMAT&Xq3mTb5qd9lXs z>YMBZdH}NVOJw|_dKjja*mmv`G*dggwbH8*W1TKozCc`GoVNyMX2;xZQuZCw z4^!`>bn5(GuPj9?FI;!H?jcS1t{qfbO5j%NNx^@rG%vwNbC=%g({J_*!K^QHB1wOtjT-3&HCA<^bUK!&i+8gKWaGTF@t#dKJlWB6iAyTNw0d(j=i~ z3hK~e*j@x@Qa_Ma-=60_^f1l#)=bk#2Yz~p zHg%R*I7F>GL@7jj!HKZGV+jl;`FSsNai(-`u23$+B@eZP_x(eWl0Dp&d23@wNsxbW zW~g;Wrp_n5B+&{hYOEPgu0C-oKi3(TNT0l4Uf4*`Ua?HietcuYgwLdp-LsySu5V+q zjrsnvGz%GHCuS8c5)Pqb$cbXwe4bUxTBJ9Q%CKZoj^#I)#>)a3Ftn{;br9f~dCU9l z-LjCF#_^>Z-f`U$V8eJ$5slIY^lpFPQ1i{`i9{pzFoeE{Ih;j|PGy6*C^X$cMcy*% z7y$4OtS9kbxSr=<{~o9G{=_LC@K2nQaep=d{?8+nPPj8?px1tVpd=ffoOO zKmoXZcO`xk23_|lhFz4B8Fq$>rTKdi{TnidKM~JDzMtSu=Y1Z1H$YNm+FXAxW&aBd z%01Fw8xeo+R&+CByJY9D;VJBL;&v0syyIPp4_PdACl~Y^Zx+5^B~qC2MEnJX0sv_7 zPe7V?qtpd_0XgVVw+6>-%W(`4n7p$6j0A|A0dQz`nax z757KTb-mv5UYauZeBSdWa&v!b3QDL~eYiCzty4c~_uY4S-cLe$M+x5K2@*XopM5_n z&yTbXncA5stKS|yxby1nE-E9Sl zDtX128*`Z7|B|#sSL4{nijwgen_1Mf4%^^$^X<;foe)EDciZTry`q2jqnGH(v?F~< zld86*i2i20wTy;#CaHSwCL13kevna@y(6aZnNVPHB1ms9gWT)AH4@8~JkyX9@%Cn& zWJ?jVCT}l)tw~-4U71n?x0Br-?^pPd!ko?C&l%-5nM`E$#^YsyCQPiS z?)E?zlgw8J9kojDmMnjsxyM}I#$LbqZgsHAjf#{p90=7j>zj3Z8=bi~JMb#;=_PJW zhD6#4Du-(Oh_~p`Uk7h~t3lZ&ZGI;$Lm9(~ijN=~v59ezD@iu0NxO^gpnah|lbexy z-F2r|QZl^AeHU$v)K@`Iz0SI?&hQ?)0aX_2HYKizdh;;9Zd89;dR)UWO?xfzNr1=f zp;roD9MPBeE1E@m>Rj4zrh<%Vr+d26i{U~!2|pxfU}3$5H9g%*PDSG4ajf^T= zBDL1qsNJ|eYaw9*e1i$2|1LzLrkAhn}0Ya)LYx# z{Twd(0UH}goi<{P90uuOD87f;S4%|Ntdn2kq1h8T$_3l?_p4WR*+Cuhbc58oMybE% zQblq4Oc6}O?TU(F`ioFs{6&&0Z0{nD;hxlufnCA9N-|kvpqBmt^zaEy2c2YM%_8Hz zkQ;_Pr$&F|W-sNyXN?5JbcVwqac-ZZ9KT5xcbo2^uU2uU=yBDFTKRZB`iIV=WAkzZ zVO`C@9JKfrzk^o|ROyPokHOW{DLVc5P{2@jv8F;X9My2=NvJ&JDP7KL%)J<)B2Up7BE zEY(8=wTT)b(JD20RDd^Kd82a88UtCFGl9~|8iO~g@}!s$5@`%*aiYyS&FzFGP?Z*{i>IlMNd}T!70D*1*yUqL8lVAlRFqje!1L~Xyi#vz2Eq{-&{gNny@k) zoJtg<6~i{whMHaU)L*h35S+sC5BaC3+Xha zQ)Vk#2XX2b@EwEc>k+$+y19x&#WLxn3obt~fOY$geR@4YM0=*rVl+Cn88(wuye^sY z(yRF9Jj^#XWHLgj_aiNDPgSaDaV8h92nJ;?sN-hHe0T{E7GifV08a^s43g7HcA3cMi<%{fc)ss;$c8PepHuk;q{}K zx9O>AIpr+9EETuChxZu9$ulw#>!UCrH5N9yM{Yj!p175zzA3blQ1c`PmhOKE9ks5P zww-%TycI`7HQR*Gv!&tUM|VY+*j0dtV1N5J-tQ-xxbHh1r4L_+&k6%7Pi&Mo_$<4W za6&H-&)8+bTY@M_YCTx7_j*J@m=+Q+b00MBq^KUW2X75b@NC@DAo9yrt}dYIK}pB; z$=xTpxL`2UHSb{|#N7Iyo1cHtZ{_~Jz7L!4ts{<`w9|t}TQ#(ryQiu+v~{bT^wUg zNIZWn(N|TQ6-TmeQs6ul1TlFCBKGocIkQ~W+$32VaKIBYo+fIEI#_?^!YvG6lq>e~ zy&3sbFF)&157pf*PzLwuvi7r)N%<*>c*%?!Di{@o-q!6xk+5Ai)-ueoUwj18!ArRx z56ayI>YiV3g>F$Zz9kIet*?tF4lo^=Eq-Lw98P=#-lOmW7b%u1glZmjTRx1br&^wA ztw&}*DOCm!Bv&7(`+|RuMjfMgFBBbn^NHXatTr;Q=oV_dk4J!^LM(Ba$SxGMOI~&zWH)8lRi!|_LD%Ll; z?V!yJKOA;Q$l5Hx9`6h3hUqazw~45rx{V~&T#X>jDj`R$?t_2MqgQ*oWUDvUUfF?I z-fZv=UQSJt7dz-GsumI}SKq=2SHza=ULq0}W$wLq?0v5>iQOI-A3EV2 zU#0mHxN)(|A>Nv#_6drqzf8A-8ByFPJVUiITPm~aN^!D0vTrgkhrwF}3)PWjSRit* z@_sML*T+Z{z7%uHR?0HrJMA?V^oD(|$zEkK8DG{H(~DasOI=L)UmJhY3&nK->F z`%h-}TbC`>sta>ZPUxa@SRE^~BJSvi?r*d5S*Iem2?l4Z{ACpRgK-W6`IuuRFO;`D z<7YfyBO`e(G!jtJo@V!_%@?;EaV|IdwZX+16NPFVgP^rwLqQ|XEw)w$f`u3R`G&%Q zJU&5t)y04NRFRf}+a^vI(DY+N%~&z0SH&BNL3QzEFu4N0J2m7t zd~ulg+TGjMD~Ih$eA0Vs+v$rn#Se3TCFu)}a&UiBRY9ABY$%rI!kCke*vVQPno@5E zRxn-dnZdj7vkT`eo+S-YjP%-|^!MUc-fuI`z1Jtc6<(9}yFTp1UKoOHpewG76+~Vn zsqHJ%F!A4LV{K5E~uR3&&G|~&k7CP(d$%K?2zNoY$DLPE9?oa?xDOV=v8Es z2i-Vi!WzB$l(>;8}3#FJ&@PRdz*da{#J^WB&BJ$6A zBB6JKMe=!S(iE#awdqlJX}Wht=p)-g)npHgqQ2~vSJJ+RAa5NFI5EBgr*#U zL7k8A9*ncV@9XyAZKd7b=~S`Dz)6ZzO!)0;a=aVu7m*4L8C42`_(bv&qBqVLB_VDa{{E&q$8tHpCs|?%LV{FA- zJi|xD>-aOa9+0Y^lrn4!UY~1!rM;yD*QGL8Q=t`Ba{!>*;8@>9#*u*!0L*_^dHn5} z`4;Z_Dd2USg__FKkD+&9hT;okDffUf0xF8vp!NjMfggZzLT7|vwKc%Fq5{2uQ$-Pn z;f1RPMFv&OgH-iF4^gMUbmOVvSD~+OB0liPcdkf` z<2WG@R%iIPt|W&v(6=hd!18~8`FH*lx+<+#v^S!mf`)HJdE{tx6<8u zjF$Xd;s^$h`H;xT&bVaWB|?NqPp_jRv?aP-SbhZ>kXb7+@q)$ti+CeG@7=n#FusHp zd!@}1mZ;!J*fiASbDiRmBkz1z5bAZrHSvwp@$;E2pMZ7ic0Mw1A_9M}oib~1vO2t` zeamzU1=j7Me|=~iMf4WTWDW|;n{0Slf0gJ$SM1}%X+Vs8D6D*0fNc8JzD+zr3Z`-d zQ~A{podZ@&5Gb#&Ob+g5D?;GjWUejwgJ(*2X*t@zyjnn+d_9bT_(6?3Y-H6=hb>5D zVQ2|kzTwihY71St_UV5jE3H?1mbV1G)~j5~b;AbxA*sQICsGN3vGKu+zX?ufj_wf& zMq%==-ZJl(CX)&`)b^JhefFW09$F|qbU%7%QTTA^9PAUSN^Ew&1=29a{ehx~xdJ76 z10|AGWx#3rTzPZLdXi@<9Y4Btdl-7HUraMhw ztF7GLm*^6PuYu2Yd&Fz%6OitrOhSHVt400X+{cBylKO~(%6qtUQeAIgcDv_t>G5!d zhXL$DaNV~Ew)}s(a4xr0L+f3GdrB058r zG;Tg0%;K3bypta(CANN&XX-xa5oQx{8wfWo9Y<6U1{{AL*xle;lfc`^nJb~6vOBjq zH|U`GIvQbeEWtLkL|oIcW1)N{kSUT)&e7*1ATCoEzpX`)?TEBEAU^xRi>!`*U%_R2 zAp3T~-W?)x)5Zx6bxF$(~^F{_~n!^cSF+&tXdEzGw%qZZ;)&&) ziZ&#rMRb3v0V?s3L0w~F;=0@fdgl&IrCHwt>{M3mHntoW#hD+?Li#VTxxW3Vb=pt+ zK_f9GZ-F;tpKMMJi~6`XLfW=QZ|k~l_E%y`^CDtKTIH{&7tDgOf&t)U#*VM2=+jKd zp<^iY%~d6}tVgGpj$@jxyLnn(p}Gx|4ufk}uSI`o3Q=3@BWxjXppvWucV7nrBRVCL zJs1&B>-o_gPLA{^M|70U#H&_zS_$jY;H}}#bg|YJ;#&B)%IL!s7`I*0&4kl!VSnhM zC4trl#PzAE>(^+JhT zXn23R?<&n8##s^Sil~-A#QYpyocEE@m1GV18k&Wj`IW-&AB$a(p2f{u^zID0qMPm` zzN{697bK8STp#tACD0?)hf~~ zS8Q&Pf&`OZ=J&8O#9h(5rS)|wrR!eq$lHJL?$V~1kg9zt0k`7HiyCTC%m=snuor9t z!O(04PK2`FyT|Yj{2Z~+yfQf~3fopn^3@yPX4ICNammPsrak9qdU#&iH^XcE>}^*Y z#=L)CDfw`;3LCHV)ImL-GL=OQH4!J(mE{G61foN`mq*2WqED~Ci_Gn%A{J^k<^X?V zhj;~0!>z*9XOn3Prl~{@cYKa9_@qwJ-enAV6c0KMDc`wdjH)oxYaOfoIFH?skr@dA)o4dUOggjC9}wqDUNX!qE@HF zddgO)oB0mk{LZ*BE!iJDm+@N0lWe?|nYwM461kHEmU7gN~H9hvTciBOTy}>x9Ki>6T z>7>u*Vng3pVkb5A%F;t^+;4cLI;3E`GmV!buH6LQ=*e0@@TCuCK?o;K%?3ihmIAGp zvppw?PCux$#A7k~)G)b3tBc5u5?PZYn`Io zd^GIFb4If9E&E59xSxyRJ9kvKPlu*d%NGrMJMYcdQa7*cSN9R>r$T!Q!hs5oIJD zFQTWo$O}zkuEJ^+e1>E|a%ZM8p)J%c!rPvWDNwFiKYDTu4~~IS?1fk#m%rZn4HCFHh1E2m$S?KFnQANy?dSw zQSI|m-9KYaQe!@PJ?71Wj&I5Qo$L2?nfRG__}VR9@IKF?bb8m~)iW}G_T8EY8T9TI zlda;Zp@Qpu9{nYg4=;I|ZhrUP)Wse?hj{o;>@u#mCYz3D-qmqANa}@Y0ZG_C(q0}u z^%UN8%^j5s@oADT&Vvk;9px@G3ISWO?u9~tRxZGFlGdA=M@BYdd$0(+AgIf%$lY8i zWN_~lraH@8;$F%1cIn=KAAUJl)JFZM?;*nbe!C@7N3+*EZ9C3&d|z4MCpQpE6wx~? zIZCVA)Q!4ZZGyP+_QDE*Tg}}KbNSxftyW!cqBIz_M{BDofTQqldm)&sE$mntZNn8b z0J9ZOgw^mdJczl~S||Vq;KOr%`8H`^hKaYDf=W$-N2+Q7RMBdGjrCUI5Va5*-Xc=n zY+)zzSQ1iM+6wUC);G8Q%tzhtURdh8UMQ_slga|j__|5u3}&_{j&;GAX<+Rywc-AKvkyCO#*X|Ko+Fq&`VSugO$YyH@k zUys_JsrTnLt^7h<;Oo=93Nyv`k?^QQmX?QKV*QqsnS2$Gx(-=_m!j0%f&@`|;gB+NTrn@3&l0m5Y(%rB6Z!B{U*wr;gCs z!zoLVdDntD8Ke>CTC`ssPI)<>`((TlS$g~Ys{#^$N$LM$*2my)d#{kF>WOrUZ+CAb@_SFDY*|)rVJpD{!S^HK;3k^x24)njbbNG`K&n%wmwP0xgsANhY}Xq*`t=iyI@8*WZx z{E@H{a=K3ReEn8FFJ$((Ywd>7<5>ph%;?~%=b4egeKRls0I#o5+Il9Td`lhd-_kgL z=2?gVn45TresPGtxj%aMELazz<+y`f1iKfZZ%job_`PCK9N&&zH?Rj0(J+FJNe!~!aF;ppj zq5?z@Jin-%FOv9#O-k3o#Vj7E`sYo5=>S4Y@VnxLE;2f@^%phDxLgUO%LYb?2WG2| z)DME|eU|#Syq`=Vk@!KffZsSq0sy7v!5V?;l5-%nco*ZqEfhRB#sHlG@2(aPP4Opx zN6=5Xn^PZW0SeCa%q?uiz>M~hPkV=MIh`30S{zP0wHCSsa1g??bUXmuFVxz9Sw5ow zg_{8Y4(B~(&bj0#s)UD>%mMj?`C5b>ZY9-j5pR$V!$fCDGJ2Ra%- z`QXIdf|M6vsv6>IaIPpT`-z=qbG24a+NTlltBSrmncuKkY~)(z?{6Nei|SG$2U1gT zQ;`EyZLDkA&}u0OV#z^K7HX9jc&ZTIe9kqn(oRG{17>+!<(>w%!c!)HsMUYy>3omP zFTUpo_gW+p7ydkBo&FuMaX$ZV{PsUE&VQlPf9v-b8hQrJiRWO>= z?xq`O)l6|P|L8|nI|Za1*g{s>T=}HciH(*E>sDyoS2tk-&$bX1fQ#H$Kfc`j=tty~ z16p7ceXwGb^3{!4iw$*u=hxI0Tvg<1x0JX}?q8#z>^kqUh9@p)AcT(vWU@x*Ul-E}a*nsUGwBez{oT-0FiuEe*~`Hl%3 z6#1gY8r2nLX&XckxS9Z61wgeF4pZVb^~M1a<4?O=RRAi|08IgSU<@aE006P!c=N=w z{=7q5DoUq~obow;*AKT`FJF{VZTh}1TVNGq4t6pJL|BAOnB&aw2^cZBKg*B_oxePVA}$ z8h5kzP~w9>&(5h{#5qw!>V;uS@PyU$ykxUVM1U^LcbIK-h;K^z_& zzw(B%{{5ccQbG1<3X{l3XGtsoIO|s%Nm~(p@x4v)6PkEXV#66dW?fgg1j3w7OpH7#ZDE6^1#xAtwkL4)&0PISY@ zavNfTawPD>g@f>|uLp8lrI8oXn)5_XUqeHd#@}*tr`$34%KbRo>+A!6VFk~zF8Mt^ z{OIe%52nAz6a1%tEVzHj$nl*d^GzoR9`kzQK;?kvKKHH?T=T_uk#Ggli1wqYx1G%M z!$$yrax{HX)$&zIbj1(nj%iGr|9rOE`XaR|l!F!pX6h{c2RJWH%q^^~!2HUrdSW2e z#Uc43Zl1Dl;wf)y`C_Dg`*712#MMSe)%I#2pgqR@{TOVVEI^AB8u;Qa$il=a`sW-xz~MMS=` ztRmN~rt3k9b09V7ZkhjkS(Z~y2N0^Fe?M0;R`~^P@Wt1K4r(`)dJ(mVbCZ&P ztb$b%ft!lx%X{SlH~Y|gJ0!U95F3O8I_RTC;B3OWS0RCLJm_PU~&kdz| zmATrDB=!7LrNZOr=xW_>1yQqDGm70Pwu6zSe6KH3NMgW4JxoCJ&TZ$hHaQGcRtYGD}b``DG z#m+wI|B;M$N)%m20x954&1F3fes`g54G$)$R2EooC_UzrDt<_wH(g zUKO;n#3BIk-!O>EiA?*ih~qDRDvvPFIfduSe~@dxkYj(a$N$7OO6FVP;E6XC_|q-C_$vCJ&uTRDk6ga?mQgia@FkPrY$k%uz*B4cRyRUd|DV&%t zSLJx9n!HQ(rimbM9HZ`B;6PDucqUdjEL7mifmP)=7n&A_KkCmed;}kV>JURDZ;a0p zcV9hRu^&CC=XxdG|C+xR;h%X!`>%*ovpwfpr_R#mFYIuE{hCYs3vTc?B2E+3zYubp z8~+1o{QKvL&HO1OAMLgF6$GS-MdY8^;aZMh$;W^2@XX%ZB^okAqU^#V|OXoan z1tdCBl6<1SL=)7jVEkWwA9(lwx3B+K?Q7-k`_DZ&U>Y0;5)o2=slbVjP=g!CJyhAF zcwr3}r(XZuSJ=eZ;9OI12V3M^U5Ub*(!o;D)Zcb2&0ekpUidA+0oBr=--S`*E zvOqvvoOsOt@O?Oc-=$x^kS{*^#BCPmI_Yn=<_6YzeB!_B;_K_OwgNlX1Csk2)p_18 z`}Z~p7=d$L3xsCE19%J9&=F4zuREx~`(N5Zz(bh0TcPsYE8twl+cVee^Ao@ z(hm6D%b=>MU6-o9nufc`pY`r}cuCZQ`A7nl{<1aRa#cYPG0f=PfP-$4k`GqL3YP<^ zC}WK!xuGI-!(a36wL9nC`MJaHcY6WhHzrk0e%k5VE*t=boO)_{K9>trL-tY&yfrrj zYASl6gU$heLc`QS>cQinse+3uNS-R9t@FIVK@`0%0Q-j_-6HsPf92Qxzw5tW{jI`5 z^)I|CG-vQ*6JMDW=q6UJo;F8-iScAtb3x=#qN3z3IPJsYSCu2YP68VxpLKp+&>?&_>`TV!>v;<<sbt3@)J?4}yU#p>I&f75%#^{y1z9Q@5uyhzxLRaC zOe+WSeA_|EEmsj8fl@vmh71N>em@RwmCjm!&hohy`R!L;PyZtb|MDvrAM!wd`IO1- z^k>KCdBR`koqpd|^m0-M=393rk2~ji|Mq&~Ar-^$b3OE0`taXf?>w)y%5Y%OtHG(& zHT<6Uvt2e_fBZRs7r%ZSn;-x3-^dCwtg?QtuYUYD-*r_8Rx=RmpY?!MAH?o| zcP&*>Jg8RD7u=MGPZovHlKD+k&{;t!=L*qTR77`FfNydNgq#A!_O#caqFa!pLI}Tc zwBS4DZ>cZV?z#5+S-Ws%09MTcwP|v$&Q~PD0~p%pc2=lzD8PUZ+)+U%5}{y=P|Nks zt)s`SV)ZK@wfdp~zUySSJniaRdj1=K3HZhl;$Qgfu=()IDb4z9{ANsrc$!KV5eJs;3VS zT;RAT{Qz*^_wDDMi+}o%KR)yy-|e3d+m_h*h@7e8~D6QAw>6}S0?g8xr#jq}{@BQ^CSSZSeuR((Nud=O>u z1xUG?cgD?JTmmnssRz?Q$`<)BJTcHVy7;0aHiGbY9g+aT0w3)ENWT!{I8S8wpL^&o zioYP`i*UuWyZaLPXOw{|G=OiS>R|uv?*THT=G9kS-2)g8ATIJ>zce7xwGepMm;pb6 z@c(F>NV#AAP9z29@3v}x)$OOv)Ls4Q1N>iWBcUM8Bg9g$di*(;g$PJ$5Z&>R5}?Wp z-Uv@W6934Ibgw&&Akq_2&aL8AsJo1Z(S@sF%y)nUu9*rem4Hh$gPpfC6m z>JW^nz^|4As7}~_fLn5sDMB}hxuBB}Sfm`AsvN9UUQ<=VZ3o^dG!AwV@2i>*9|vEm zdgy*2j}J!k2d#bXFyQq4H5UIZO1xN*0fy-r-~EqJ;>7@Zpy+@%c^3i_+(fWLjR!)? z4K9d5SAd(VDJN3ldwz4E?5d7mxg62}Kwxp#)K9-$f1)LSULz=gsRN-CcnB`wdX}U( z5v+x%o_u1PBe+D{_~6Hu)l~v7_Wc3ZYXC`b~Ymq-W)U`dcYI8 z?t_Se0Ps!%&3};2#lMPwtUqXSu>R~ckE`Ku49Yw)k}we>YL*S`OK&RzmP1(%H=>ALFMwN zUGpR`d6i4+&X)uAF4owtI%+*M>aIT2W^7j8R87WTpA83mr zHcA|7FFwqH&GbIjdW*Ah-h_q6WuXI1@XD=GBYRWmkoTD03jLK-GbaKl~X|p6a7j;;FKE=uGOzW&9(1kT|4A1Gz?0D?W(OR&L zNh+mD_cWy6#5w1Q)kzf+bJN8$!?z+Mdt;zyjpm?xAAYmr-N_dm z^NT#b`9)SCDBpsMFC9PA5o?{lW1CnKtS^~<(~7|)^*^k*&VQNldz+^pdF>M3%L>?k z5K8L(1VT~H+|AB1yYa(qV}aike@OO#Z)WDpw;#XPC#g5beUs}y%kB4acRv46iHgUU z#XbKQ->rPfb~gHw>v~7U;mv*hqB~hmzZq^PP$|kG?qzRI{$K@Y-NhfWJ#!EN<0!iR zA>9j}667cd?DFQ#6!>o(xZm8wcW~!_<;@h+&R*Q4Jk9v!&56^HZUrhO-lXWwuYYOe z_=o(aZ%XsdQ%aj8!8vF|W^R7{j-?wv6o_AJ{rA(@DS_rgg5s6x=21&llKP^WZeo}F zMUX~zAkdb8@J`?tb>Q%kq|%fJS2z6_OEFFT#mnEMd@|2e%PPcOM1K?M#XM7g%}qZ3 zaO9n^-)}*EnP-YGX6mLIDOtT4P3J$ey}xO5raDi?!YB*!8Kn}w&szK{U+PUn{&QVm zncoEbmyM;Yn}iLh!Vdd7bm&G>sp99HQSr>)geFx81Wg70f8f=f|GdtbvG4Uz-}U^- zqi&~8uMRz%`^6#~-}-;E!8@dXQIyW#`yrfK88}}G+Pvv*stNW;;5jv>EcLtno2(^& zNOry`X6jj6ZMSoru6)ZO!|WXMQ+_yS|BK%~nVVIvf7o-L4H-tciBQhb8R1Fz0*8vZ;~&o5OE{*`9W<(7sgoxjNs&F-8B z^FzOns>;bf>DYXez*ImjGxMBp4p|VW;(KihX!hsYOVOc9tv}Q_HP6g*nwjU_ejVTT z>q|-hhjPZ~rNL-_-)s5*kaUlK*kpQ^JeP* z;e3|Dc?AMzSH>LjG9`Ucscz-e5DkVse z@)z-bGJAOUG4AWCUtPB@CeiCfGa)auE%S5T9PVp#{hJ`SJLdmm^7*Ek?>oetbe=*wH^DZ~0^S2n4Qtr0$|TL z_I*9}-a8NJp;s0e^$#rfA*Yo6J8sw%25f9lJfH~;PN++|Ij zsQqq#BDlaGtIH?vMO!9slp@uuc4j zx^mZ#lToVf?uXlKC|LDO`D_?``;eT4aPjv$fA=5%eftn?)u{2?EJUidiS$! z&*RR=u2=msbNzple)nT2nx~CjKkx7RJhRQswd>z@v+?S0JKgtn=vIIB&#lg#x%&?s zbeBi-hbj;6Y3qk7|4!RKRQY$>`Ju|c)9z1va!-3d@!x6xC;mGf{KS8!U%zPcABz2d z7j6DStpBFXzr_0c7j6C%_lq|FiTh2Pf8)|$wD}L&{);yMiF?j-K~eVIhZCrKn*N#Q z=DFa0&6n=!U-PwZyz^zzt#AC#wD*nwnGV14Khwz%9sfhI_dj&}FN*z#j{ilr_dj&} zPu%o#{J$vnpX2{Uu|q${|3kK~Kga)nEAIRu+5gsV?0;DHFKf=tzex5kgPv61I{J%o z|5E7JAC~P|a$nbwy_WKy6@Hfl;&6o20O}O8A!bpEu_HS(Sn`QsTR@HC9{Tr*Q z>U~AJ|L?4Jx?E41l33d>YU%zT&uzx5p#>i(@tE1f{SUtPhYnfi0h z+h>ast%;*3<`>IuQOYl_{h+9SFV8Mo(|3?v_1Ap-`y=By;@|tSoO8(3ms;Pg-vqm0 zbN5)~H`l&W1y%9YuUPwj&-3#`w)av&Q8djlQV83B(e3_Cvv&tY{cl+I|BhM@|CL&& z{!Xoj)KUDMS`Y7sIsbuL*NVSW>;I6hQ(Z`>)IU<|$p5dX^@hrB%ADnY{vBy9-1`&X z&qe(Z(&wH2rKVZwri}M{JI0ro{#P>nZPAbWyLRW#53&8BM30JZs#}!)9icr?Vt=py z_`k&Z|0UM{bNRUI%fFE1f0fst{NE@cyvqantNeV|`@Z*sq3&`x{G6A2``^#zo}Yft z&v(1$_xqP{_kPHBuA$t2?|y&t|4X0$^LE{TS?|wz_lqC0{ij`c{;|?{WP@ehb{sesSDAi2ToT_j4&!|9^%?|CYCVJ>UAj-Rnv2 z^8QmE_wV}eawBR#6g-3dX+PARHt+N9{%u4w5Yv7Ji~q0wes0!((%<`>`(5Ss`+hAz zvCsPj@r)8EnksHby}6$ait{Nur1_l-DZ8PCf{Q78r$vj)DEld<0X}FfO>0yN$6a&Z%{-ZxJ;Z|Dh6uTY z#drRxu^d7p{F!urB$~b`Ief`yjHXSDrYqC^%#Q?lPG6MvJ1y*vZ~71UE}fc7{cNw6 zPHq3S-dckHq&M21besB}F7v6gZFf6}er z#|2sYuXNOQi(ly}Z~R)nZ&k6(e zW}#fxh&y_Ju*SxKUP9#Lc+e+!(3_YjNC~;Y#|E*J=F7tSgqkXi(P86{0)DoL3((e&N@()>HXmvP%lm&v&2lc}DZ?&|pSp+QRtZ4XC502*N52 zmh>ZkUA>pwL!wkssbE=4yh-IE*`F}w+i4ce5F@LH@jhQ~T7h){tBeS&l~Qgb4%Iuk zpq%54>E`$1VAI%x>Oc~=QJPQLojaAGA8D;dS z9eJI)vy>}=2|`;V`KB$PS92o{D&tx754T~i*nK(y+#%G{pmU6!BV(%g^@7)V#5TDH zn&&K_hhG;&w$YT;9Hc_(!;wKlLN7-6*u3fUkSl|eY0q$)hYOMNV{H5Zd(vSAkho)i z%u_Z~`K9pnUWqeURQ}p^@0y(616((`Zz~z)e;+$8K?Oc!NajZqqN> z?_5Sh{4&}SE1YbIqGn9gB$UdFaO~!XJojEVxauZ>vvnLKD40?ZXleK;ctJ~e{;9OQ zm5bIPxhGY@yP_Jq5M*S%*6~Y7R=kjZTc~pigZE#Pjd|SXa#$>~*yerk0)k=B)Eii^ zNnk$iQAt(PMvOGHU+5k~81m$rIIRzA>T7ymc0hUaQ@6VrQW@FtqH-&oE{OGK8xCVD z5v@-{SOo!czM^!8nwdf$c%Sr4;S>6>e0dQ7f?+AP9=Pemj|qSxnet?=X&<$J^01A} zegscKvXZjhkV4%dCLl~3vWmz{fc8TXGcKK)p}eS47|f*~R1AKv`t&%MGUQ>AR63Qd z#Xu-|?xmV;Ntx)JBqPe{Ny?QJnz~;50ytknR!fr_H%@HBi{3sq!3E4nYq{pqTKB7W zo@a-LC=7mT9$Ju4K6$wwMRRa}5W4g-=6)H#QeTjDRca!4twz^4T)LgK*k;)kpr7$- zwsgbnJE2+Cweom$C3jGK1?g^FN7vGLR~GJq{kBssR8oFi)|VR!Hr2u+y$eGIG-WD* zRFNJkFfMOFe#wnJ*XU=?9P?3FUSPAQg$%3rf!;h2xPQ9bc}$Ppr1w_P{4jj6Pws7gNAIp0TC z;pKTUYaMSvwDyO{o5MG$_yda=s*kBMTN+LJ6T;uAw z!{U@6h!qTNJ4>#abgHMKbr8hpwCq+|HG*4Nu7mSAu`Wr~=~s1G`8bJoOof2_er2qi zQZw%<`_-*=bgaSYxKl=;uBFY-N#o=X^2BX>HBOd($)p%OoX(Fdq|Ibr;5m9)2V__5 z(vcY~Wr$BkFd;;LLO9EuR4LGn;;d_1niMr=Y46ZbA z2U27Q7?`^orspFD>a$ZT`wwkjA(;U4vx#Ak6~;&cj2Ll0pH!Q|art=mBzsW7#W7LZ zL!>4;;XQD?L|UuzpgxA;FUVIEI;s1^n7d-34vK~BKuqsbKbc*KG z&izmY$Ei?C_cUE0kS5vAew@#)6#TgBTS62u-K@udSMqt9vr9NOQ}?A|%+)(7X+7Q_ z_*9FpYxbT7nQJtdQFQvC`FfQ2x|P)UA>fE0f5NOy%SV8`+K?!OC83!mmFYJg=AV!V z()#idbBW)8Uw^1KjB*0Jdtz7P_OyWk)SS)vv^y38hXw8$TX(Q67ljvUqdZB(2SIjl zNLj*vyYm_@EbZ1-Afls!@P6=7aA;u;IE~&`9xrFTV50lEMYrn~`-PUq1tsx&Uwi@FkV%G88kAgg4Xm*EsO z19AvO(`Z?u%M7sqv{xE-s#0}Qm~c|99s%(hs4SW~krA$pN>y*lc}f#jh5Kuf_fdyv{ZS z%;Od%(a9*&;&w0$12dKf^%h?UuK`|JhJCfEoJVjrp6Q!QePE4>1?#<;g{&3HRav2b zPt<>Ynh_KeP**qV1E!aWgK8Hp*#p)=<)h9MLk7Y2?BUhf0V!jvK27veZr?4WOXY`9n^Q8OM#kw-4Q-gxAJ@jTvd z_e~twR-mPUZ6Q zQSlM?b3%}UHcAtVE=OR3mvhgjjXAk2<2ay5f$gHNz?Xrc>C_|2PH@M99wk45y6)iP z?UtsOj&POZy)yN$&}BW6*fYdwsu$MDq?b4bV2bMbF_cyEadr$(GLIUhLE4R)$xt5C zHj7DJa~(NvA#^L(ak6M;nerun;UbE&{qnhX6F+Rr1FRPCG|?3y3G>5g<7w*D0)LWc z@mfQiY$m{j;FJ&m)ONfQb{pkQ=`g-=36jeZM*{RYaLHpyVKI$k91LJd7-f}83a_<_ z_N9q6-VA=hs%cWuGp1XPhvQlor#%NZHJn5%sw7+4R_F~(*K3=`02(~;-0TC5faS!=HN&8g= z33<>9{9Ljwbr4y618jDGxTt+*X-UrxnF1T`vB(R8yXG}U8}%V9>4jaxsG-2sE8z8f zUna#4o1wo7yXu^++t}#elFa2|X2@U(N)ax@c@8e+9 z8|i$IK;)JM`l7-QTCRntVON?wb)#1re1>^p+aP$~fcyvu>k9T;nW})!JU~`;iGrhd z&>yn1aP=a~%K>wLoX|t-LNvqx503E33~zg1-H^)3sNE$xWuJ48T*Su>ucNe>+N#kM zc&)p|&pBH^q9-Ek{2-hPpwti1O-!7LOetpH9|2&*L%H8!F|KP9NlPwVFXBo^rTYg! zzJN7o%a{b{zzux**#}PhsX%K24=^HGH4b=y3=Dfw-2lvg@u}WNj&ej1Bd#^kbB|bA zw!@lZ1ak%NNprf9#N^dYt|fkYBk$N{d@qRcfG`sjr5Dgiem$Vn>}QG;)ldArzf4Rh zZ6qgsi$&PI^-meT@?*n;^IX;{9v~N#vPld*b@DjZS!UmjfH;4Onf(#U3_Ct-s-PK3 zr6}N-=fDbopIR6R!oE^8dF_0fuV~s8q-eD%s-C6xFfDw3_*EYn0Kr8eNMb^VkSKtA zAh1y)(UtE2)nBIZTvw?!U7v&9fUt`(!vYO~G$8*xQfgHSetXH;+~bkFbma&)aw!9-}-RKBzuXyofmIdVq#$468SdFylP|1w+6$$l>o03@T^x!(NCY7;a+| z(eU`)W^>)ZxKhiD?Y{>InUMGNu(W4HHVWBR89U%BFiFk0`lGC8 zAG3&m)(z8TtXBa#-`RadJ%*FkYH-g73Y44gE?F(^uN^Fi`IvPFgC|veSc@&ID_R~N zI(?c)h(38Q65nBkjBkQ{n8$SnLmTl#m>@z%lX&XoY5|EV=^@qdc?bZ2kBCgiFdf9K zf`kvpNquERK6+D1yau+J_4Z*!eBk5)^G_vzfA(7hs0tR75HaI0iAD<@p?Tg01Pw(%PG zVYlY#!)8SG7hPF2zAB4RBal_s!D}UEgTb4zUqCCB5pr-z6Wc6uH}o0Ea^DMl@!1G} zBAce6gkwCB-wO)FgXsVoKp zCY=7jDjx#{!G6{gKAzVDu}T`*Se%75VOhP++aUv+;|u?Yq8!DD+p4VYcKV6 zUJH6e1LD5GK3d9oc$LmW(SgN&&4(ob0@Ps|VG!|NjY}1uY+d6-IRVq#p`2J61o7Z0 zlRMVe^6)$rUX3t0`3iD;Kf-Okj;EpMqtXnfipBS-ivvn%?^9F^Z z&&V4L6PIFIig*xwJS*%ukknpRvA&ZM$C=lEruAX$qu#c_@z=J%B~IBJbaAR z?ZMavnsqFMWD8rU@p68peL^=-<#CMRa2OJtPCwaj<@rjTmlF4XR=diga%iNLIT%<8 zOWY_SNiBs|-sZi@#f}k}xily2=lQ|{>S#UWK}to;37|)yK*EtOy?LRJT-?mHa{Ws6 z?M&+)g_(I^401LeD6{TLRYi&Oo|}}V>DEI0$Pl{`_Pmozv=`HqorXeWH4S~HSGb55 z&+&|eY%#JBAk$fYdsv^cpvvL%%mqk)HZ{R>E8wBJl@Sw8@Y9Bfe0!2iq+PN~c17#K zth*77;V1IgzUv1^`kb;Ze7NeWLK1s>!5(!4Pl(iHtV)8>>S&tiI4U2SJ!d@A(Kcw^ zYsK&5moRF=iW&d`70~_abl23;V+8>6L)tYmEW7WavmTd!W>bD#jL7`~!tGDw@WI30 zp|O0CQwm#SiFvK#LTS-Q)EnQ{DTbXgc>6VF3D@jU&a4BMF`&VX$tW-P?XHuXt6{ zkJRm$PJ;P=GipfBA4r|v;qp`Xde39f($k2D5q9Fi2y{GKD3>Gf-MsDNIVO)$9}a-S zKDkXKkjZn`5uwMyzPUx^O=w;d-Dde35F|>5=bpr2h}!u@xko8h)GO#wAHIBXTf49& zt!~6U47vDjgSm$$L~nZ~KPp9w`(&Y-Xhi>Xa(3#NosQ*2? ze!q|O>t34qKW5kU?QiV59SA=8hh4|MZ%)?t!*-gJRT?k-Vb@dZzhc*o&wt6T7tMcW z*YA;`DWC1V>YDxE?7H^+JG;Kg42=9{*OBCJ?7Ey&Jn)BIzX!*l=ruIKkRz;wc3kc?7Evz@^5zi^*s{h-XE?>ly?$~ zzu9#^{5R}+>+k*(c~%#^?nr!>g*z>k|*6&IY^;JKF5yY9OL_uDJbh z>(prjZ4^MwY6vfo8KQREhEGl~CqJ?W@5M=fStgQa!-5cTWn?|4riQEVI!!*SHIs*V zDvn=>t)bAn8W_)^FTqF6NB1syrjN!-zEmk|B&F)R5G4VagUdwaXvCN~?g>znTujMp zZW+K_O$Co0Pa!;3_3aA&j04vmPe$HaO;Ve|=R;MEzsO$3GmuMr{!445>fypOTvd^O z!gNZlWRz37u~Z~0B?1xOpd7eiBbOq{6HEx7B(L~QopJ+N_lt{&n+i56;RPtE5-$yF zF?a({pC~`Sb$?+&q3Ncjs)eWrr_jCx(>IArG-7#O6KQBS+2|j0v+u2i6Q9=tD}q5` z7^va&(Au%A%|>@LP}`;FI`Gm17Sr?RWT_DC5|YDEHJ+f{-~eR>f-G9*(!P&W2h%|x2ot1vYS6Tc^PY%Yc%#XX zFA^mMT4YB9^Oa8Ki-e<97vORNK!}~IcHN?RySnl`q&N!f18~h^sLU9I79IjSC3+5K zb~Z^+@%Y_fEA+(o;%*m9+pL9U~haUwmG_N>i>VDYqxFv_HkgPHshTXyAT$ z8al9-pyifLN8J{?Wz0P(Q$pdzgMj!Xbjcyv>O<8hVrkKki2@bsKKC9b0yXNa_vx$@ z;(I(90S8fcCG*@1-vM_{Oa)`hFi6lvi*@n=fV`5@Vg0m{%6xou)946)yIf8}NS=Dd z8)S8PvAMofK+xo{CJfB#BD@K`s&*(X?2A47FrmDSvN1^CpGV$T4HTJzqY%(Vb(vJ` zrNZ+o{es-Z<8Z#jb??-nI7?jYc#$4dS1;y8_vbrII_&c#JJjneJ{cC+K=LknZ5RHC z4Pfm!PttH=RQyHI-IsWO>8<{WH<4UN6WwB*I-9_E8v6jAS$xqRe9}9X;HlwvWzPnh zD@}mAEHj7!`{V`;-(5Ml>GO=mKsrS>n(Y9op%^#z>i+iL`!M?EvKiM>GMo>keG4Q7>*QmSHuvMfOwz^g%LEz&aUo(dOZLidJePmO@ix%c$7d^*}?Gf<@s(^?p0s_MYNx!NGG^ zA!!$<&2ozgOhJ}^#gXD+h7JxE$g3Kz33E{*N5|G{u}B$~8#`}mu_HZUC-&9cGr)2p zBq0u})KLY0t*8Yam_h60J?$kvhYvqS9DS)WqQO!FkrkC~tkI$9N7B-_+fHzYX?>Pk zDO|q;hZj{#BQqa*gO4(=BJ&wxym^sXxIJI$N3ONS%#;(ycJgt9G1&y4-{9A#;?=ImJm8 z<+UIjnUEev)YuPS&V^%j<e0q(j)o4R)N|!unwkpI)%DPOrndi;U?}?s=`0$79v{ zGkk250Dc}p>t$0RQoOF^}A?y#C4ehB=(HjVw+5d7qDe60$8De7cHgAfrOO%UyXx^S180{qTvr zD*5&vcLu}~k?Lrr@|4qf$R%I;l-`kA#^?ZtKzP5H=d}29UYHLQj{PYb$w59{#qb4L zz}%%rybx~sD7M`NHMk;iPnm8psR@^fvGmCfENsSkP6d1Y#SYJpRxnfuG(DRzU}}u} zo@^kzf9sS@YKKdO75zYYV7sECVONi1L-NP!eqLqAApvc5$TC-MvDz-txC{Tn2{i;*)Tba>^Al6kL={-2 z15eThH6llxJOf#uh6vM=+e-9>+J?LY_*A&!s5&aaJWF9V15gE7n zG+(F-EH4p(E`vA}A63hgch}%2d$ga;86SB99X+&!NUS(bO*?$qYUD_8W*!JnOooDb zGp{Qf&FkGIP!xx6ax()YJ6K$)KNF7+MA&dzi_@OtT_Q7x^gAtvr=^!i0AG}!Ww~&8 ze*sA`3KO98;m1KkLw3H~AxJusrG{#j;uLElf!S@aH6x=r&(Xd~#*|noOPjsjYoW|eFE8bHP{Fgs*`Ef}&b5=7JBKjJg_Mp;j?dQ<#5#q`KQ=Q~ z^GC%&lz7H3czB>0gcAP6d$C=|KF)E?7%Cx%JwI%{EDP?x2z4!+j;qLnDD^tjf9ZCL zqof-&Aj@Wx1O}{-o))YRCz;LJ97z(CgdM61PL*F-aQV)`Yc?`ZjNm_QdV~rx5{B>` zs01&^h`JD`^>qnKBQ!a<%kgNt8jXG~A!(H3W^Rm)s71_#v-!20VaRAEEkz@*sV#*F zAZ=`m4g zIDqvU4|W8P=B>iZlE3`L#R*c3hA*f2vc;^Deyk67r=4R%E8A4bM&}E^NJu!e+04X3299sXd8@ zkbHb|;UjAanY!uaA~g91F|)H7w%C*_OmaA0^SSpkC!v=cCa=!&RwPkap*gxTXv=^X zDAQTY&e+5Y%A2_%pTqJzJ zPvgz{0EA~)G0RuY*o1w_;0sJI2=QLO*>TniZ_n>v+3`%G1pnW#_x~( zv&>W7#?()x-2Y(^e=yIXV(z~k1dKMHeUUxspS}ItAYeDsZDL2jaunrgTd`{v8?xS6 zvwTGd(GqpDz&!Vyq8xdq-%o0HCdBk8+gB)qlBJL?xL>Tdv_`{4?b<}`y2~$8Mi*~Rv}z3qS-Ob?nW73v zV~r;-=#OWab1c*z?mZG8uexDJzkuMPx!DT$<7{@#bVV&OV1=c2s$ip|z`Eb8d3e7b z)`bOM2u>|9fA2Ej*hY^Ek773BN$_roll11aBw&Gl5Iq ztH6^SrWGWn$3sBd7b(T9p81orBwi6<84bi5~)|V0eC5Q^_h*qjX*8St zIu_p0n<$ZDcv(dB)-}CvtMhtlMjf!G4b6&n#ewbt_|tg9EhKW}NP*VY$7yCi!i!MFk2AWMC7>K< zsC8RDEP5%>^HBubtG7FWa%d;G%2{}nh{@ZVz<(+*?gZizM|9cx60G~#%S>#lB&vRU zAtQ*Eozj#5F(C<2d!{mD#aJ~|xgz{Hp9$~?6fjj9mZzmhz)gc(y^Bw7rK?5@Ww&&A zf3Bg`LQmmK6r|A*1VL5WWDg!s*IG()?NZ*rw2fj4_LY8+vrBAsIxG=5EaR=tb<%b* zfHc#~YWjg*N@AsIp@K%*T)KPY{9O+WZaf)-q5vw*nSw`|I(FYy>O}(mdM8LW_fIbJ z#zcI(1#!ew`>@&v!(+3Al;~-PoxZ#Df1!HyK31@{Op@0h3D;w$;tdwe`E-BwJSnzK zU|8hY$%=wkrDWr-;}D~Y- zK=6WFjs7XS=lvkyt1Lvz^2=dr0vhlkoRYDIjPWR8$LIR z{OlI~8j|NlC!l){&k4$3$F-*1{)pb{@>Y>Dr}JZ;k_yM8)`}kcR#Eu)f+do%QO(#| zM;D;kxLWj6_0iho^KP{gb_0J>}s@;I_OIfR3jfK9y-Vt;4=i5fl2Eb>Rap+CSe;ax8L$8nYy=kh; zRvFuCnh56`#%rtDCT#q%yPu@wVYQP?5k@}`S6nSgY|$a1giiHfXvJyOUiAIqve?_I zF8;_4vFge-1k^dS;tW`D*bI@}4G)K{;gOEwaS<2zIi)H*JL)i@v6?jXhK?1tz?1(h zz(m6tV)8N9YkN&vf8&fZi2#TUOw%2RE4wC22ll|zWEx*(N%)~1tOA|4FPg>P zQtBblGu++LYt!*f=^rJL>(gkTZuipctC&%1f3S~=lY?4XGXn3Ze8DP6ttolemc z#`7K}>BAlaO+`MWzjDv&m1}*ILOT-9{ej2C;hDPm0C8fFssmD}s(e27?m;WD#xNU& z`cS+)qf|iC+|_4c^fh1GmTfCdl*)X|r>fGsgqdS0hj$#x3j<1v`t@)Ny;CTe@r7M>LkMk2oyKKE7fxKDSjq^ zVqp+wKQ%_Rk0-U0nJyrT|H+ioG|>n9AxQROap+>}M@hq=RRBQm32nYioiok~XrfPo zR8m>Qi~$~oBycBmL3`{8uIIEtw4C;|b@%5%HrroRr3g|jA$pEevGB2XO{6ZFw&w_u zAw;X`f8Gj;l^6RDwQHHVVYY0WyNJRBKRkAdDVn00+6Q8Se+CIYW8*AIoAhqw#8`0#c?kF`4K|?Q zjcJ!1Uv=6zE)@QVzc2)#<+F~(Oh2yg)P zn+1ap|NZ~4cQ4(l^7x|gKadSjP_gkge-a3HkZ=`yiV`l~Q3HJXGXl;z*LrTdPVK$N zdm2?^JT-L@AqgRb0RP{#+CNUs=Aut^b#=MTJU32~*>uv(d*@FZa7;cI6HwxlVF{gJiax?H3chIBQz5Szeab?zzvNwMgghSMtubs|)|Mmv?)k zK3DN#zU~jRcc?!5pnh&IxBB=HWdnbI1-JX+sz3G5Sa2az70SC_IrkBbAnc=ui)#hY znrNrY1~>|%K%6L;UqgafrSK8BvbuHbvbou`F|(2n<$(i3P(A^kRU`!dU7?Yx7LpM7 zd%C~}{8V+tlzfvn>K02%}j<#(#Ib}8PB)K+>1Ifh(86hInz3J^4~ z&O=5;Wt#^sNu*)`Iq1kP3uQ&|s0a;& zI%5irnWpAv9pg>~C zfJTO?9`!JgT3v~cHvMxQMoJ68i?{mJzZKY3V@iH1OTT>HiPNtK_bE|E{?ZVX!1mYj^CvcUh?!kHnlzJeAjR0+QVr-1>{w69^3N|Xq zpsF~;{gra2zfjJstx#}r<@9V)ba54`fYVh@s;g@NNwGk7OE!w*QUhd?46weE zTnF9*%M5OAsQQ2S3VOQWcKPZ?91WCCS=0#1M3w9CGFcI%Jb|U_JS`!f2QI=cpvdOZ zOamlb-57n-8g*JUPk0aZ02?x-1~R*H8({PXMBTQ$2=`cu*?DfyO=p0kY+R9jQ%Wrc zLq9xnd#sG-v3+NO+FoU_%8&vLg*zGnY>@*U{$cQs)eEY1|K%p)?@IEMyjdP6WrkD*4oRfO=6G+3EDF zUt~Pc=X~(ZUulpU$}i`V%P>|db{7{L2(?#Nshp`UKX*luZtTj*4PWX#;vBU$BQACn znK&LIk!*j+sIZ{8O$|WQGHUDyJUwa*={Uc6D8x}zfLw>W%M)M(pg2X*HTKn_kR6|v z1|$!KU_}Oo1@LK~G_T;y341P|b65ZikH=J(S&zoZ;jV-kP$M?Gtn>d1D!A|rodY-80@BR@@Xg)*ZAsYOKuD3N7?%w+no z>)U%Zj0AQWY{`(~@7Vaz-5bm_^JzldQ`hW#)Op1J)W+hU_C#COd=d>PtQk;#84 zu}Z!-dql$3H6S2#4QRGQ)IbU>lnv(PKlNv@-KcW9#8cPzD;2XClE@?bBMYL;D-o417cj*9KqK3>|tP@f0ng(*`Mp1k* z(%EHN=Ew?^GKCnh#RlZgQm7Y=0_s!_h`roXyw}+^BdArC;H?I{#T;kKhqQm4cm(HQ znX+h%0FCWp!y>$bFIsQ7-UMIlC@{NJ(Ap?hSnN-n!4pNE?ney3nXIYd>X7%qTU-H( zlma@rMyh4eM(6~LonAOPp!s5som3-^s^aS(@)5C+mH>ob*TKVo;lLx719#{nnf1fp zZPx#7*?8>8GE8~JRB3QVYlwf@{D6}gMS}oF*4*+<|GcnefFOiTsRMR>fYRCk$`d0T zhsR^$PebmiOx76tj247L%uUIv57ExDeJa1_kgG95e!5{ic1v|bmRw2N)v5N?w}ZkT8Kg%E!}f__|QKDQO;Yd#9vzo#qQoc7#HU){+*nt1gP-W}*HpN7lw zsbK)zJ_^yNzl3e14A-Eo6RLjt;euV-+~{7$lB-aww^S)nAL|}q(N<`)zj%1fp42*^ zC{pNE=jv5CSJD|*3zGH z*c=jYz}gczxD%Bpi(afyFuX7}(#XFOuVdfO+LZjw&(C1+%Q=IQ@#8${FW2+w)CH`b z5I<0)Y+d76=L~;t^k`R&O=gd5A6?Jb3=W;_Q)B5(7Ex=NwH#llx{-V5tv)m@SDItU zSNx?l4m$K-X|g+7Wp~Oi`DETlS5t9%vqNtguybPsmCC%#Zo%oG%ZsDv>>!TPe!h!t z8=t-|FV!aqse<{qsV%vPQ|U@Y8IbGKUTp7sVSNF<>iB=$Y-%Zv?3CHnDYFBchK9p+ zwxmADddi~ES{Td$%GlZXU#>UQqKf@H80X^(5TupaqQX$3sadAeVO&R$Zv-G#eKvnL z&L@Eo7b7b?9{;Sbx{;p_J(_20AuZmVE{qc1~cq~GJ@QphH^>YxAT?rTQ(OR{oX!h z33yCj*NiXX`fHAJ9H)E}xcD-Rxrh0*h|O=5pEiHW=9dJw$E<$OATYk>L`QD8PoY^3 z*Wn2Wjj^6XmX^))8$k8(XI}5Kj98goic48^?}9dEZ8o}Oq|W4Z&%I90us^mEjA_#& zN{yQs-QwAsp8l3b^JilK^=*@6z%7 z?*Leh)cNN+r9kZ5@!^V7!5vBB*=Lyyf3AO}e&(5pgF*O}CW=3N`LJ)_Ct|o``XkKtiZAI-8{YtqJRS0Dh}D}KI=W8zJ-a8e*J`C|*?Acut`2`E z7~lP=f4x3+V1c~^-vIk+kztsNbFLzGQg>buI;x^N-0qqkBB}w9<&!HM*1^^XLEHE` z<#mwU5*^e#jnr9uzCuX(9Qlm`l2h}?@t4cat3UP2=JtN-+o!TxPy`Dcss3ll9m$=*blRIb`g+(I+}Mn7duAFGm@Q*cSyI3Phj zj{NCd$hVqSZ>GYg&LR~`IvZPP?Vp!@--Tsy!EOHkl1diA9>@kTYw)Cs0YVWzsP+a2 zBqvjzNtQ?N z`uE*emOs@uVB_aX-%kOMl*M1i2$v9>S6Wgv`YOct-CkwELg0@^FF$??ALx)?S$N#?>uGF`-LLg%3Oz;%gcG0;^#*`2(#J-_ zsACTR0alDU9>2#F2#hXvDWiXj$M2wEcl->7UC4-Sf4C!ORzL@GN?{C-j5^+2*!t%b zh~w{nsACu_(m)+om4vvt1ef%0sN)O(;`aZ$)Nzc#^xve8Z7{Y&`4i{;@2TSw1;)-O z#gcZ8ZBFVtD6hyOdlpK3*n$Pv@0vgS&M1|R{ca?T%?Y^!`SUxDtGItv&rP{W!-*VM zy;ltv$rjkomOq662W`|1MSB0MIZLCXNd=2>=O}1SlufZooE7 zP9=5Wi(2Z5Y)jdS?#c~l~`xgDXrLN#@RK%$48stZ>EwWiyBFk z8GA++uREoPATB6jX&LEwB2gzmJyD>^0x^iZQ)ii_<`kAZ5c8` z8JUU7_*@-NVcLIyfzUGyexyJ@#I0t3>J)Sh+1f}?YsQxmmR9p)vj1Y5hrdezQWRGW zU{s_`W#kY>!VaWl_#yXBYMQEGpT_1kk=?DqsAmMjgfVJuGf>dq`;HOHT}S1}c{Hqv zen?NOhp(?t6twVSZ|8Yy8$sG*S}W)#^-4R0B>J1%Ez8XlK-T?Ng!JVn#W4!}6PLBV&JD&g!=wmfMWV{gC5!M;ZT_rZQ9p8hfO* zi-A#>#evp!p3MX+UYtV$&)8YU@)+RMG~im{68wLGYMoK9 z{2_oxrr9`+`%0rg^;5e95*urdB=grYR=3fKowL5u!ZM*Sh|Tf5R2%FvC!IOfSKIpg zHi$s;ukpXA`PWEcmpi)dUKT6QaJ*bu-TYVOc~Mw9uSOY& zQ1w%Xk-)9)ECV1KQ*}0V96Mp=%j{nv%%=(?bJywR}!!YjF zz;YvZV7X!B7TaMA=_{%KV&BOye*DYhhn2-RF|ym}fMoP!Zv=q&rK^7r>t^Sj`2178 zfib=Q%EvOw6s)_Z7~0~%E*p%J{}cBd3^Z)?#k;d{HY40a`vGQFIio@OUo2x1^KlJce{trC-w`spV<{7IjV1o1Ca@S*!+h=+ba{AP$* zyV2_}vUuQskL4FbJfvNIGsHt$`b8EGY4czth<`a|Cf^M4P*&-SAs+Z&B7c#^1OE%Z z8RCHoHoh3*F>QbMMHUZf(MS;gQjbRT#SjnmQj}i|F{>9@|0auBofzbcAs*6}!52e3 zq_w`u;vsF*G%PqLyY94srY#Kwh^w->8FtP}BID73;?w}Aj`Z?p>=^KX%~f@5&I2P@ zoz4_|o%00wiy{Vpric-c%aJO64fBW1ABf_5m=E-ARF8kmF#P6+wkt!BtARCGO zNvqcTCdfvvn3U|GMc62+_!qW#V2Sq#DCJ!Q`9y6%;Ic8HZ#(K1q`*giACgpN^e0;n z#^x|7IZXL}eMdkga{2S!01A=CpYIOviK6{{$G{}W`S~6JOwcduPyeu-m$yXaf4+;r z1>O!IhY^386zKIxb!yp2c@FcPj1aEbD^VSLUcmui&IsKv0@nHWiCXLbYP%TlzixL8 zMjH5wjx_*;Y`rO?RvCqi;rt)zU;=}IiluPMqOYWEzx+bR`rkC^NT51|k-kG5l*NBR z$aa524F3a>YycboMSj$Dq+&zT1o%)(0}Z8|rbOhFO2~$>*=_PnKA@HGt^<3~l@!MM>cZ9p9Jy zySH2w5xE(EJ@U4Zq$nVhWnYc=1q)e-AOtAdwAWfDwiMx1v-GL0h7CkmmF0Q?O|~6E zEEzb+G$3Xy1MrcgZcj0!sfU0eLuf=nEJkZ@`R3k0QlnV3h@r6%c5-lA1u&AhH==>W z`+%Cqz{dduhCK9+SXO@Olhwb$yHz89LYO9~b$;i6bH2iyQb+zGTUba!4;#G8LL9PC zlf@@29?UZPPU%iKY`)cTY?0g^A=XfpXa^);H`CnCE5D6A_Q z?f?f^fS84K%}P^dG){JAX?`y|HMp?(o3iVR4KYiO+UJ))d^E|w+=+%%B3-&rwUy&d5#H` z4(E>|Omr_8@H_Mkw0WcXCH-s1-+Ej?zNC9Jr z)hWvdD7lGEk)=XlgV*D+m^4r=hR$)Zt`>jOL11ue7Y$~As zyqY#VbU;{*$-l&CWS6@#5ubnl4{!Ie?x?aQiu!@nz=LSq2Htxo=wXCMAUr(%{g86z z%2nIcK6UEenT-U+06`ETAp-uf{XFIM_dkB47;2U1<|9;6KTIZ7Q;I@;?mQ6-isk6P z_V3r<=j{A$QN}s{aVLxwHGXqdqr~I`b+CXz(J>wO?0h}JE|Jd z8^QkiUGd3*^}QixyIx_h=i(*mdd&4sJuI5IM$mdn6)u~Vy4cHYdf}gBo1ZoIS6s0D zm$>SRD_*z>0Km-z5EFO+_zBd&%>dw&v;pt{EF>dIP=Oirzq#}0_l|57b5W7BA8>vo zF|@i+l;L^HMz?w#s1$!F5L6!obSFuJAE*=~2Li@in)F0l`FC&%eKlJ1;^6{bywa;?Y_E}E5=)kLETrM<4g~W%)L}`nz z0>5p-i&AvvKWN1^y0}FCs>_Rfqr6GtyblK$239dq8GCU#^oM6EYU{(yb-(z;aQaoJ zta8xJWM!3AF&o2NvDaf#%P#`d`20Uzcbi?*l+$0-lw^M$j3PEb<4zpRj9$t)&aD152-^Wf(C{a}@**ccvYNEfmN$Gm+;&Z;VqkOfSA2N>) zRZ6_r8^+m+?|%Lp0_`^w*-ul^7!;@TR@IjmU-O&SJdS_T7q%Zx^Fwe^+{J78g~SEQ z#TQ+~s`P(t3zR@m+f~03y=>OihW;g03I6+as#J5Whx)ANuR6NguLSd){fpLPtE>NC z1e>O_6vf&;9ZL38OMdfV_1KF-DP(>U?9yi7jl23!KwbQo`Q$rN##yNBJr9eN_rTJkh?GJysQFeH> z2Uhm!@Q1Oa8K0^8{-WK=iKgDt)h7b(uX+1wzcd@Fzt(XPgxBkSck!R%wg2|T-Tf27 z?Z1B^^7hYff6&<|&)6syKd#?8zuK>_Qdham`+T)=mCi_9u(8$Er@-sD7FIWi>MMtyk+J@8~f3FN`DoVR}AHC4U{?FJl>AjN_NFyjULX%Uo8+PJbE8hewoXQdE7tc&x>*VmS6Nv^C?%3zf*sf^q-Wa5B^Y=(|@5XgTGOh_77zV{+-Wc z{>En>KYXUd{1}zRb}X6qRW3m z;;!!|zWA`Mf$$-E)#}4GZmLSH3PG*c=a=^R{7Q>|-_EEybc_1)UH%{Pn8KI9JN{^c zJ6lBj)voxF+Le*xDck1*mX3aLs;5d_1n%;m#)B$DLBIAJy58p}-VXl7eq3B2@o)C? zTTb!guah9ox?P-{PI3N+@nyqFVz+<7{_hxCie^LiA|%sEEz`T2Q0RY7WeOYR>Gee# z8dn=CuGDVSB5}_TyL^2nIpJIRA?yM{Ne7-l8 z&v!HUi*p2{;{R(tPx^4IRwsec#je&DyZGS-lZ(FmZbx`AlNWE?{Zq#AyB!22e28La zxj4x3%S6imtj%!phv>B0FVU&Bpjc+#)=#V;U!14c z{TG&A`_;DEf8EcI-&|__`J83%f8Xv#`-}1vnCr@D_s?19zuKU65yt-kEonL1f5uf- z|89NP_FWvRFHCgC2E3Gbd>cya9{s_?v8qbp7ccquSStCPTmC(kQ&oS8P|*I`pHF}J z)XjglKUZ5j@J;&=o!++&zx}WKIqqRXX%Z+cKTPR|tGtLv2Ce_#6$Mr0liv_{2^cYE7ic~T%XfR;WJN1Uy}8o^?Tt{r=RZ^cB?NcSz>5G z0-qm}J#>pQNPh3%hsb|3sy|3Q?qc#NLb-lF{4k>*ruZKyQTfwfoF6*)I?v{Op1#V% zx0=w(e^ahr?@ig3JF)3Cuc<19edx;%r5W+p`Gmy9*LgJh*Y=^)hpZiG{c4Zymwgnj zY46KEey6|e<9GVYK7Oaa@!~b@f8)Q?-}vwJH~u^Qi;4Vll|O$>{_+myuiq-%bq+@tpK0$iU8<@?|9ZXpOn+amzwyO2 z-F)Le)9p9@Gu?gTKhv)-!}*J~{4$)sSj*VO2S1rIKg8vy?d)F?^M{m_zJ2sdVt!c3^!OnzKk?v)XZ#)it51JP z%ujq)eR<12K0A=##N~&fEPjZ~4=wq0emKmFmZb6D#N{V;`XMequ{ra@VSZxCFK_uL z=ZXFhmmgX(KYobI4=qXk<}g3BB=h%ulTKt-of8!%Q(-X_=wH`nuJ5eK zs+Nrrf{i+^lqY|kJF3=ot%{Yrw%RF6FoJ}lf4=il^@q8n{(-riEK2#sU}B#uV8`G2 z%f-5`*U^96p^OC$put8<@Jx*>q#h_j3cxbBmT2SeeC5C6 zH#dK3{%70c7hi_(%jn&|`}ySpcj~$l@{8e%E|yXBuI*m0hZav=`T!im4A-!`44<1|6xJ95AV2r+s|jcKRl+eUCY(K zj?ejNpVxBYXTJIJl-K-+p=^J#werP1N)&Bg_5a~(|AFoN$Mt@W`@iQ}mw(_p|H*&$ zNuPf3;arDb{iwSB|LoV_>%085T{OjWzm~Ho<6*H{)HT0e<1y*Q@ru&5{Ptli zuloIN=k#a$Q#abY7)_V@-(fR!t|KKCvl+_~Mmybkwje!e_W+=Y0D6u$t zt+2O5uZ*){qtWkE$4=;-Hj>&*eV|Pl_ZBau=*B8bONVuH2QHqt2&SRf*Rg+F0K9^_ zSOy%mIpXe*{b5CV1Z-?enBdEWbR*!a#$g1W(x6DwD{F%$VbZ#t(V|s z*Wn`T3J~?MHCBA}ar5z*e&T=W3P#4xqf8-m6KWF*tGC(SOU!(c1eV83Za!%f z7eGqEY_{~&DY@Uz5HvQC>kGN4ABw3$1LNcTJCeMOcU3iIOot#khedzvtXq}&cs3pw zFp?Cadr!jadjg*HUcjrXDPqb_(SY)km~DyK+i>bqt_s>W4wwi8*tAi56?oh$ijzZ{ z>1@8M?xNnEjc0#Ls~H5%2}VlL0EYuq9U47$8EW`MZPr23o3EQlVFXfr`>Kkx1VZjv9l^aEga|pF=Q0jWg zBqU#0bHM42lK2KoxBe`YtltFZ;l-JP4{xImxRBLcKU=1W?xTMsXjomFo0;ulOo-N* zI@ws`jwLq~an_SJKD(I}F!C5d&8%-~#dizx$d|s8owL%(OlcR(QFjeVlXfX7|CUNv z4o@EVs?IQE@Abn_;ls;0^#z@dAG|$7Tw$I_tCHE`q-x*ysVc(B1P@^^v={tjLGtMe z0yDFdvKxhfM4Er~dR=!EP>i_lknZ_H?>7mz@gih)Xz=nqoi@(u;qNYJ5yWT{#%N;* zXq)#!ajkqK27Yq!4T6*+EQY%!K*D_1uQBolW(McC8x#mOa$1EnP=vrqS@e?uG3plB z%KY$!$2+^63O~!YnheT3z{N@RanBmydmuVYHA8e}Qp_5spyB(r z5Hn6)W(zqE>=Yj$;6bs``K)|Lq5F-{(U$8GZ^K^{e`^-rMbpcc{7H*tZ&qE^j!Pa6VF{l!Lsq&Ol6V1e!?95`Q8it}ydYb@ z*3-wN_xhxBTDn`Rs(JgqV*!`%C$BO1(b=>&R2wcoTIYRlk#p=0@1=0B6FKF5fm-Nz zxP*W0fUoGCV7o)=UO(*}lut(09l5n{l~*(tC@Hd5Sq;_8Oyx9IDsc{1i6ym5hvb0m zJE-Dzi3g<+_tB~NS-28tPZ6KI(t&)Z$(bEb=OyzKZ@>swJm?L) zhxU?UFze0UR)c}+&j(Y#_#g<+T{q%c$M9oNXr+8$cw`HTnI{&H!;&k8w~0T9-9LX3 zCVYv}!;^;$)-^0qaN4#ol^v*^RpZ!hLMneI-8$6z7IYP7R3Q91-YH-DP*4}3@F+#k zqL0{BG!GnlNQxSslU>oJ3v}yH?G>^fX$_qF7=%GrJ9HTqX75Bi)wwrumR+FKR6s4Y zH9(o8Qjt~*t!RLPK8?bI=P5h$8f_PN}WE$^MDJY)H$#&3d9dMPM( z_!vf&6V5Cf6V>Z*OE+#{;bENE$#wvanGjbBx}3HIBBVO-9W102jQGCQN6vpBn}es@ z6SjK<{4jF?s7PV853aOsC&|6L*1%)tiA3H4R?c|FIBU&F6@v+VuRW}zRuX{llAP~u zvKptUDN{WiraNDcOAj;`410`^boIUL3D*lHSC%^>b50`G%-OaH zfh~cRN1{e##y7+qbQ%CpK(N1VcmRg(oZjVW;5n`zc+YDQY!NGUg##@07pg;91H>j- zAZgU&*2wP_iwT9cF7<|ktn21LiIitqZZ|cq!lz9qa>Jh%fCNZxS6MfIRE~m-ZsB>z zSoB(hnO$oiseZO`B<_i>-CaW;j=%)Y!t~vGrgApua`-?O$F)QaE)CP`5JaHn=UF&+ z44WFUm5)^OgLIQzI%*#D;2=}b)MvRscgD%o^`slQs zq?1bM)WEz;$rH}{Lw6RkVZ0hSf28-$OtnFvyMca_%ASf3-2xW2qm6BLsU+Imt!Z$QR7^9@E1@KwA4J~o$jC!vf7 zoQG`!TgOF<@5-k=oBD~l)c9!6I2M}>DHysGC!EWy*pm*m<^f@0Y)iV+1btQ%|8Obw zpgD~83K4@K+`*cEt(efH1P}6HOL-J6we!yX-W-$#ceMWAK#s~^mB7)7%fmFZnc`U| zA`>xpy>~f3rC^-N2AsK9lTb3Ze!9cf!=bzfi)D~jAC9{cD2bw$e7a}owPdps+s?<$ zy_50s_SQ7BSD8^A^`fVs1a!}4$DGq}E%v_XLt@W((PBV<-wx}+_qKf$m1~p^!sF<% zIn7An9*Jo#u>6Hu!Wo*F0qhOqM$1%8E~{>pNa7;bxC|UOQoB z*=;K>0b7=KDICRj+UHm~;W<_^cmuqI!XG<*t=>=`+)m6g{emBr<$U)va9YJXWPIrD zw9|P zodR2KbFcNjcCrHi0)v=vgNH8>Ha2ic$i13|l{e#mi4+2MF<<2KFlO3bsw5Rvts9j% z8Gsxy)TkzfrV6H%Oul@1u%ew*!$Nrcx5$znnw>r@@xc#RjEJvjPRO;!bl;!@)~2? z=~5mhLUs}zOHV!3li?8Z%{DD*rXz4`G1coHW~|&Y{YZ&`fX=L38XsVB(!4#BqZOS; z7mLi+$u-gHt%CU@{>GPeS2NYk#>U~;d~R~Q;OouwRYOCUZaKv8w)%UX5rCXXsGHD# zs8yW^1+C5j0MUu)4>_HlkE8N;ZB(Kh({IBm+RI5?FEcj_SB20FW8IwN^WJll+&(9L zY@f1@7}x-9i4KnbYvx~IipwwDd+OeJ+)(0Km}0p=#%R<-4v#k)e2K6or&I|gVWK1> zbMse>1d9spN7{mb6F}c)B}exzn`1+N%X!9HtOv8v5g_(U@rh`bGraC zBZW`fJhvwkcZuXbkzI8PY)2(85oNV;;i&4(G*xD&pmH zRSO7dw>U_m)?d#Kc$KIJ?C6%(c3Fk6q)WS6-}1M0q#-cnx?FY~Zv|9@O)ufS5Xj98 zI*Yh(o=8SKmaGedg&7ES0h^n5!D29Pz`8JtjDD}>7sA`ojnL=qS(bWo-{l+%o?Fr` z3rPx+lwwU?2U=hNq|mE7E+lq;GLkh+IFWFF3%7tMUtp$*Qrp~jf(BF17*HoTap1Io zyc?cxVTUCtK)rmbb}|BXE6AT(oh>iK;qVPaiERPTdp$1G7`ko1<;gMFY`xe}hU*gx zT4)MCQNcC&(98PA%pgx7ctA7o@Yk-I@l~yF%tofkX)HTNu=Pz$U+e3CIRT+9bQF2o znmI|3NYA%0HD`GTSe=W?lTE-<-+w>k+9@2WkxIVU~Fcbv1d zAMOhctt_>^!rQ{OH^sUsJ_)E&F9_RVruURQjKbaDkJH}~ExK)nz~|4kl+shnBy(7$ z$W-!TyB=-Kcj8dLdB0mJ%iv$zoovw=hQk~WeLx+lo6-a9&f)c(ybY#@0!9gAJ(?*?|V%11}rqLWnlIu==gbrlqB1!6{wrpC_e zF~oFcw_p(UiA`-f3w9tzuHIyo(b2@>9q?782#$qirl$clq0P7&sW7G*?_Ap`Vnp>v z{Bp3FGD(q+`?Oj1c$rg(fB4Qt7&QqQn!4e(db+$8rD>4KRCyRiITwxS%Z=Bi4yY+#Hh$v9RK?u}II(ZTgF zH?;=)k{w=sNp8k@7QuP;jw{1vD+A*=irhMm5_#V0s^x1S*JjL79mdx3WWgDy>j?u= zyPjHqR<~$gn%V??_fP@8qcAql00FB!YAOS3svPqwhW9F$_o~+K*Vh|(kHuT1I*V3> z7}+1>eP~-axq$T~tz0BRy8gM#D_xFam4`^3aVsA@&6?A5-NG8Ho9kPvQLZ>?DeVPs z8Z(CoI)y2HXLS@i;WHBJ`w&X=;zar@*Q6VNnK&DlIuTDi#YYpJw-+C3*sZA!14*zg zifF*ja;6`P@c;RrQ^;BRdtB%8f&ViKS%d$ikeA5FyB`V}{7{AEH6W*2O9Hvk4~5Jd z#lNDE_32+y$eH)=Ddhfoz|Tb93vcD*4~49f|B^yJ&_5_-Q7e?>heBqV&x@b!J7uGP zVGVFy&jM`=OBM?N17M+pzM7Lgr2DPYU^wkI@f>9JHUzvDc;uZ!6at z^oK&G?dhLT$fREcmTb19Fr{K%7Zmxo>X$Sk@lU3XRWem0a6%`bV7J!?1jgE zP&y$ZB8Lg~>-tMnI=Kke%*8 zqGSr>qLj>HrMz#)(N9fyynAj?o2o+QXuj^f4;fgBhcM0ND=8!5iXq;4j9^somfJk_ zt~usqf!5;=yjooeZmqLf0|h>iHbRK^p?kG`>q#b}PFcxLEPC&y#7bCyJ9?8+;qzkS zATDc@uLLVN^4jy^KaakVZLl07m(^z9mu2-iaVPNVOZ8M8GS#wMXgQ4)Mo^L&-p&}s;-oItG!Vbz=)F*=Es!=|Szcz21^m>49-%;-)$WLJ z8wE8uIZEpej)ta3jYI2yY|xsaKl+(`&?r2QS%5{g?(6D?W_{AAi1&!mxpC7Pj6>`d zRRDr4$z+@?-DT;F_V0YNxP6hj`-n}g5cKyuddtPPH<;{Gc)(ar!IjNBSoR!RTLmV( z=sH4}J1lCJ(4#EH&b_#xIJd|&>(*G;gIg#ux)RQh9)^XFbgbWhlwJo<1$Yepz2*nF zr@}cxi+tZb)N$SP(QkO)=t(L^rU=Adm9 zPEtIR5L|Aq9F7$ncIg|GgEiLPUBsMr$W9BGJHWb~6(kz%Cja#7G+iYq$P@HQuemmK{xz+?!`t+3_>>Jd&ZmhR=?uEs_0!J#HsR5g@8D zURo7kS~MgIB>NDk>mv;j_c5;{%j?`x6A}H!JFu}IQXX=DL)D|=7%bpY`LvY)uN;9> zhgw#1H@F6%^Mk-nRKLXl@KV#{W(xU10QI^9#)~{0F0F~kHAXC5=T3i^hO|p#`R>@R zc^KBa4efbDpQ~9LDm}?ec^i{f3w7v4K%v~n+8%YMQKY97AG8^&c*T|ebSs5M(piNv z&&3#+`%T1u>xUnMqYCjPUAk9}A;LkxX@y5?PKnV&V)JS!7fRP5a zrIc8%FlpsrIu;5X0|C@K@<8RML_(+0B|Uv#4ZC0tC^h?7jXc6r3Vpy(S79|z443t& z-b@{Pv1^8UaU%iI@ubOs374XD7NzS_pO^=$NDJ|Q@-wq-aVKFdnpyysxDl3M&W==# z^sf4ce8VVD3OTLCW<###5r(99X9sP<6Sx!*JjyE@ewT$;9PYG~BsjobiC(E*OVR-6 zkjSE5i~h_|mVYXAm&21EKAZAz=Qm91O81By7!;)FxrlHQDI=7-n(FY2VN)Jaw}j5> z#jr7d6qua2`p5(_c74tZF`D&JMkhn+2J!*?dipG=WtbA{qMK+K%uP_^#Fp$O9ej{d z&6RGW=qQ^!bMZZ%xAPr!M~GN5e#1f{$&EbWi+;~e$elM?BG4r(0*Rv5q&-vvfc(dU zAy$ZcfNhy6e7pA6^k8_n@$DP@dME>8*=R3fHeTeG zLcL|MW?a=k?XdN6y1`5fbe~!LQx+c)p~vN%okCNieRj#FHBGX$}MOogHT*9KqL^0TgkBR8g|wY%j}y zT@*;*fVoGqmhIxCPZ)vN2Z7uQ(Bbz;ZT(w)fMsx2&(FE7{k092TOcZ===x-lBx&N?N3F$Eml zY*c5k+SFwpL?Sb{D4n6$$gE#Nr=tj9yfg|o#uJ~)QeXeE z-o-v%>=IQf(pWDj4@?ICrtP3pmFf&Fbn0}-gC2ob@OYQk){N^do<4Q)b`B*pUum)E z8rttTbh^Q!?$;rtYuCM}MRe9Jk%tMum zbvsmyO7*}Y6zMAI$@?$ zMjD%&jii6WC{!7J&*qeu%bQZZhz6^)GB2mVL|*x1m#kI8VpqNt4OMKV--WwEYue*+ zQ*JnhJzgO=rh$hlD{taKE$l!`FeB-B>%7_#nnmoQriVP)K>5nWQGs7xSaXym5kiK9 z>1%9zwJw`8)geVV0rrG{yRgWQVtl@*Q8~N!8;)_m@JuZ4X>bG?pd$t@!EJnSmBCOj zhHS6gMecA^DD#ZY3Ewu*;RSSRGX6mPB0PUM+*oxZ**C+l?lsU3!LGydIPbpWWR=u^A-hYVdALPd?IoEE z`U=Wd?BTBv=2hILg}FW^v;td8?8qF(y`LZ;y^AFil<@Vkk(UhD8yc+~UE#H^QZ@yW zvGIIY!02jr9%V1-m<4tgs0`|?f-GB`Q3da*tZiQ8PQKy&Z1p!TQuZO>^#Re-v8^=Q zE^hr$^Zi{;tLYto@FsytK1&aURP5uJrmnr)b8d7!kRnORVs2xm&3}1OuyS~{ zk6A3P;S%*TXfd@1FG zW)7v{*5Q!cj58tkc!u3iADJvGvFlN>6`h1=kUP4zQZX@qMsCJHt$_4yB-ybOXUf=A zL(=8x093MAB&7XC-LrJZ2vHFh6r<>kDy=1IF&4!uJ*{njhimNXhGY3oO&0s0jD`*i zF{iRSLsN22^%jF{nnxWTt6MrxK^|Kc#zK*Ijw>vj7A7vI9GR9I?oBxmt$7?^`3@ag z{8&6Hx=w|EWQ%ipLWSFw+Vs(jLGk2iLKD#~Uw_@FdHvpJ_s8&lZ1+29QRW5C{IkEC zED~aoE%Y4zvQ~a>sHIW3c?g$4;n6k)=IgD%#9p2P3=E!!;32TfO5wq{lLk{TY#>pn znut4cVaFH}fdT`Isc@HT&&8%rXssw$N}2_D0HDayZmNlT$^+-Z;cw zo4Nq$kSeEYr<7t@kVWO4)>eL$EoWJfm8m^19tZ(xa_^kg|RPp_@r<=mq1T0Du7PmAD|jrfvZ7 z0>lRp1I~lI5y%4V$a}ea$b)u3Ss-Xa$v=;#J@3ogx>7y7Cx6ax7~Bjoeq%o7c|*@c zhcQ7VI=YCpiZh#JWWGvcY_%9dAj$+#x!qd$BPU{Nv6|P4b&$6er%D+qYguoO+7n`b zr7e2R``RYlEQD8htnZ}uq6ntm+4Y81qkRH|osj+fbm-V`daAVJ^QYobBc)qjxhdOd#1N)q9+)yZ1F5 zlz3q30F$#wc#CH9?2?IsvAGG}`YH;BkLWbTy*k(S^#*?v6t=gwW5>XEbK~fFr`Za{ z!e>Jzp?+U_euQu?Dhsag+6hhQrO)c!OZGBCR~Rdo`;N~6ttp$B)Ao* zvIMz@bvT$=y$R$x<+jv;q`T73t{NY_qCIHEV2@Vubep$g?M>%47V&js zrbFdaOV~Ty(*v6YgE<0z(c44jMoJg0OsNJxwQccbDtUNs{(=mgT-TeyEsaR`ytlyh(*@VP0J zV2-3~{OiHZk|hW$P{22(Fgjrp?HBN}yNtWXgLKmYt+) zp3FvQ)TO>Tsymk<;=a8LCdxn$jLxP6CLei+wTa2&{UOKNtFBU0)WDbh7ROs6GgU5+ zc8WhtZF`vD{hcd+qzK(p;$>?_7gR)0k^N(P+!FHI#R~ko0%1WV~ zRrE%l%64@leD4`BGD-g!`-y?~x1zzFHQ`# zFU!Kyj;K}+NA3wepA&~6lASqN#qMN=-I|?f7#hp)2SpDRq-W}4Ibhodv(W=t37qKtYzmZ&iy**9o5*qnama+L@R)=TM3x8=?@Kj{poYQ+)Bc?iL1}H|? zu{>W2m<+zn_pa`tmE^|Ij9f88HRjyFfUsux>GS6RcPQ=Ko(3?C+w+R zWy!|AuD!K5sQR@6g(wK4rQyy@h_I#A7{ux9EQxx*bQr$uUIEQZ0pkztvfG8}=mL|` z-Lb7Rv%sw*$KeWfN#uv~yW|L4jmUSc-4M5bvj|Yo)0YgemOZu|773^QcxhIW*>4mx z`tjsW=LKTZ7FW@HzLKU4o;@#N_nJar!@1R1>!2_8jhFP9TLm=F0%o>|D;@xi;Xpx= zIcZ_&jgp@$rJ0Y~o~t%Os;NH7TY=pG>%QGGZ9{LpmF1vhu5X?m9z={a?~z$g5-$Z*3niRn#{l=Fxwk4NFo+>nXiq^hi>L$4n}z3t;c^%ws6H+m?xWjEk z_5?A%wReKI!`7?!KsaCb$BA~2K#uYcPkOwBOl#~G$}+?$79}Uq=)3qj$=fc_?n<^c8Rw*b0mBQTm`KlUxr7OF*@)DrrO7s4=Wr0I@ls}cL5O)L?nW(7`*TIc zlep{65@5qfZB8XnEURZTN3<=n7Wu-iMx6r>{(kPWZI2ipDDDo#Tc8o%CshLQ z{9CF7n)m)lmGBwyzTQ9hBUM6!ht@UEh1c?|_DPixh;(%2HNIp(ilSoSJK}wfaQ{x` z`#V)acAE=|lA7XwuT%+~c*+JxQA~2x3jKclEmcDK`k$vtm~Z=?A&VrS=+CBIsS-ZC zH$`*SCr!eYqM*E(dX3?1%H_(GA&b!B3Qmv#Os5hp;x_63rR~nv71b3+VPB93prYVm z0-^6hk7tO|Dj=wUSN}gjM^&vg=bGR8zrA-I)IbvU25azMBd$BUr_J>Wjjj6U@O{icXA_q7rEXmT%bqz~^BHHo41i_F&!)55fpP?)s7P}@Is*9}KhEfU#0DaZ4FV+dTbe8nWRiTx_!KVG06>o7 z_!aVu0AL!L)~jDloc^F2dP6TiVP;knhb9sqE=yy7)88effjHqMZOW`PY7QEcj~N7L z4~}GUQ3O#wVjk;kPtw)rIvPiGPz)Jh4EaehAb1?$h?b>W8^5_ouEVqACXGSmupI(;X|Ih~>6243F zSLxP&n9@KbON;06LLndrd`N`v671p>I|zQ4qA%ir98Ymn&Z&?Ia>Ro%_d*)+%Q=i4 z3@1YYqXI#mV(m5*`V<#1N2(zjM>&rnz>mbGIaY&t7Lr9nai{4Oqk?R>!n{5ts7nPP zS>TAUTuf%pw>-pzMM*7~=l2}ZsazmfW62bM1k{~s?kM3wLJmR@J`z(nD15f#sD@T^ z>4DW5UqoK*bI`0Zdw1ZY;$+PVf{?U79+P@-CFLqA-*R2ZgWu==VIU$oH&T$t<(E2; zU~b@LN&1CBNPp_&4|m}Fk;gv_#SJ;2QA@I&U@+D?f_P9{RFJk!xCr5%UjPLb?RRe@{r z$RpyTM<b_N-ng_|vV{02U*2(TOfeFt zREC95+KMoYCoag5!UID9tdAhDCXmNG1d0N%0bbLn!Q9TknSnf}Es*D*(c*^5VkJAU zbmeGX+To|H-bR&fRF4sicS_o3@F^Qih=hM76LQ}IOPQrNbLnH#8gzyr<%ODm2)m;~ z7qR(n5b$!ukGTwTMJ8zz#+^^S_@`IJv|a~#=$c9>q`S#x6$l0ejtUP)MFKfz1wcH4 zcMhe3dkQd>#f<_9wE=K!$T-${=jw@htMGIDpTfI^Tu>2@F)#tmE$g0xg;`K;rk8G1 z!3ZHtBRIwZIY&xVBub1S4{`y2jMf?gEDV^W2lG5Y0?@gX&tqXb$`^oZQ^*ws0GVk< zwWcUeurMGq2WV8w^8CLEhSFbwx3OJF3cS>L^y!Cx>b6mmo5ezI$V)$1`Agkb?O_H} ztGHpl`juAzF@MP`n;rX_*_ixJbk}v|iuu+`z10k`&V!T(dCJ3Fv4tyth9Nc>;Ix^Z zj}e^CCBT>`Y>xnGi685uKgAjkhTDG%PZsI`zE%?>P=NI33lHR?N!fnul27QJWvt^< z()MKaoo>ZKalyGUC#e-6re%JV5AY83!%ox?aXNaW!!;UB<&@ z3|{C56QkD6_Wo*<>!%kPcK`&geMqsXLq#F0tbC}FX`xAYkQ>r}Lc{#j4N?y#1)8~$ z14nUT-s|JY38X%1v_blHK@0#a@)YG5Nnpel^`H*OW~7Ek`G?U_Tw_H=V9MbJ*UNb+ z6tw{f;^Q0;s!LSzn9NN|!jG74yfz)MU5JTFA@w&v4u%azt~AnyG}bBvq^+rlpL`Qw zUtRc_abkLmo?yg(5X9B0Ye|2DDk>TUcXZ9q{0tH9V;KiUn7W9x2M2%Z{D-#^Od*CE zP?0t&2oxxrv{8cv@&L|ds&g(1zz`~g89|0(doFdW4&@755Ev4`xe87#Puo7U1mjcQ zkUFGTl}UcfOL^n>Ck-h;Q|v8c_osimnI3LH{vlH23F$(AjubeGLsS9;u+QUpApNT& zYZphJfdJ)HCf)4|BH)$AEA$Vr}$Id{PD3{++R#yuqEo;K7%INX%wIZ)^-l0xQ$ zr9s;J&GdY1z84N8a zLe?1s@@vh1fjUJMUZ}1g5@zXpK1A22etn4{&#ZhJqzrEf>9?!TmXte1MFW4!UH+#%D2`tsj3^MiR;z5y z!tv~{7B&Mxo3w+Y!H4-mLlmT)0q@|0Yk%m-WyG?7zme=u__KY97*PR0G8K(7q^@SL zv~S^X8O0MR@1=cvpY{!chY$Ikzp`s-SsD?Od8>E=*LGQQP9X#fMsfhugi7KM5n``7 z5DX|E|7ic758>Kdb=elf-ey(-1^`QdqGJA?TLR$8_hTsf&}_?GXQ6jQ==-1de)apy zwZKw;2BWS(QZDqGuxbl)h4_{Rxk0EV-q_nWX>zS`Q%0Q3*cd(r+5*%K_y-^V0M?2RCFbH>zb!s(e}HZU)F$0NiPNq???N=|;{aV`*A3h6l^k&H0Rm+)fz)ZNXh zR-5SHV}M0pG3yjG-5`)TEfNH?zY74#C9Zie8p>x(R#eRGD8cHhg%=_DF4O#*EJ3v} zd|$%slm4If^d~>|IajP(ur%}rjXfmS6~Lgi0xT3655bh-R07#7!2IanuE#Uq=TOFf z3VB8ZftS1%5E`%R7{MHQr2ghMwEn1? z)dj+30lcKcT-tYQ7g5}TpY3DKf68?^o{*2rWANS&!}muylJR#rmRkOvKbxNW0tIq- z=4jhEh2$CvROI{Cgr4GGh{3i#A^%3pZuz03D?RJ zjvqOeQi)eo(IoSK?8=`p>7DHJ_i7c_D##6i2e|@9gXewQ%EtG-fB)3ASH~ZJ*Hl0< zBH)Fi473MEF(AK;(0VSzGDDV9Q^{?L-?Z2dCnhiV9Dbcgy#2Imyk4#w^M7e8vhD9> zegfX7z3;BVNZO0?9YePl7GGb9BC6zG10vULLf9Sw;^ml;_;-8dFka?glrr$U+-k_G zX#gM#@=JaK^~+|Nlj5NXc}rb?9Q7N(@xsgra70v{$^njL!C#y4wn_kQRRL7ojm#l( zgAV{Bnp1cuT-I&(UahlzNQDE><@zjOj$HQ{s~PaGk<9;E0EBe?ng6BadLk4HKOh7n zDh&A-f5^Nyhfz_;mn_p?3gzw8Zg3@n0?KTwJb9t?T2*;&5YAXmv7>t}v4M>*q zl=tw6;MB@gI#>iBrdAcDjj}Wf;AHHx2=E9jRS9gx!(Df!G`1oIAe)NGmr$B1s{Hq z8-`qe{WtWQAOYb4rW`2PTLB^G zoe`PPW`G5M2%Y8&YR+RY$7E4n5?_upYK9y1ebSB0_sZ-`uTcQgd`9Md4F?6RQsnc= zh$^rdpfZ|0a19|DK}KHB*z5sh_}hpm&<)TYHA1)YH&O)M0O7dYXBRHyHqW*O_uSlp z`0rlxcgO>98jTxJMnTPC19`x|a-4&nf#Zv6L8?=Khg{~VRZd=Sz)_$MpI`7v>(_7r zVk^g4=}@a&KIMN;!5L&h;W_`mPr+HD|BHfq#EAJ{QgF-92Ki;_*~PW}A1Ju0cdf0u zYI1N9ee#XXQd#(%EG>Eh@&Gc-=U|BWEiibcXe*w#P70egdkX+25GabGD(dj2*3@bs zPsCM!ufst3*}-_x<~GaD{9|IbvL53S4D#c@rQ-hhrvso9GC&MsC#Z7;JsALYj>`hc zUw82!;M2et$5@K<@*2d1Oo&g!N6bOIF{+r5VJ85>6ItHgM1S&Gz?~OfLO%gs=Z!*{ z(PIF{ph)f;OgI2Y6$WSuiUf1qBVI%%Ke)bsKzJOyb{ItmhC;S!laOo3m&+OC3Qs7d zffuhbFD<0pkTU`Bl~mCIj|r1f7!c7U4L=OpZ5DIHe+eeSK?oWP0=&)_%5H#10na~i z>yskkg>WO(L)a_rhdDFFg*_w3oP(o(eZ=Im64S+s4)H>{A;RGBqxIpxm@ugGUqoDg zDAohNaB|bn{uG_?V1$7Cd~PJ4Bl-NH8#G@ma`OI{6Vnz#PB<me?<& zxBRt6AVD!^fvA;)YbgXpY4frtk2kv0G@MUoC_Hj2<)XeygfVxSJk zqa-5z1hBpwpAb5S4-qHk`2e_o3SVy{LZ-IVL!KWI|EWL79e;CRGf$YO>cg;!Ld^3T zAxn^J^2)p!Q90>^nSzpoLf0LI(626ps+%X>4+`C1f5V)iAmM292^R@d097Ff$_W*! zFaWW@J#e)3ZQ=A5E)H{qdYjNf$N?xW^})FMV);YCA=THkvg9?wxgfrOEfm7fxxZhN z`b*06!_a*gFY0b>N3O za1~m~ofEJ~n-bim4e^S^ZzY&RvNAgwA&+Zq$yagVsl?*nDKwFg5g!UJBBaBIdLz7r z@}c4g5%Hnm5<;PU!U<`A;eV(&ex6{lgr5n8_6aA1Mg9q&=ZXJNbp5|^aMGU~T=a{B z8~>Gqga5+8O+Or*?*1!5r~XOMMamCBm;W1E_8+M|k~a_M%k3dT?eUWc@_tZ;{!5w_ zgva<$Zwlcq7V1qQJjaKEt92a-pKyN|F#b?+gxdR1aE-1b=@U+W@;CiZaqc|vAByg- z>qz~CGXo)t77~)@2EsE*R6KfvvIsZG1c^Lijmv zVX5oi_@elq&V%%SdH-~tpKuWVOa>icA(?34_zQ(;mSb)cHt(M)#}OcsUz^dK$EWxn zAA^wp_uSkGK)f*s3$|POP#9kop2;1STna7Zq*}J!mw|#YnM;vzRH3!m0tku@Cgr;S zlN5vepG+7s0MZtA>KM0A!kXDiOOD6>^)vo&_DctnLTkT&$+?*)KL?4IaWyF4WVLez zKHrd!Md^CwKjUUc#^&mWM00NZ|6_uR&{1U05m(NQ(1q5>*QWF#ay#LZ4w2MfiOd)O zo}1AuS2 zxbc;cc<$-nXy^G`x5EG2&VQ4G#?sH)^S`DW&%Zc4LT%=;l>9x7`l}8?K>Gdvf@gz& zjrX_Y8~;!HBjdzE3i`C{swOs#2q>Z3$u|Db zY0I|rq0^Ra_d};G+ukRBO1Aw^{AD}%#9y|5!%zHWJNi;<9~$mUt$oP1A8KtO-;yu2 z_KEkU);{rmsI?{D`Ae;R2)Qq{_KC+0Lk@sL=5P`q+wilEh9O7a_Z71JecxE(y9?8X zm-wG;e~JIuj+gkK?es(0eQ3D$L)m?4xDRFbCFI%Z-{|LlI_d%xmu zK1AG~lw114#4SAAh5upV2+sz0zeqX4v&rob6ZeTb|1fb&T<#Ysx5TACMBE>3CM$oK zxP`c*elc+i&jx--xrJxT-5(}yq1^a?UrgK**Z3jjmbeA~A>w}LBQO49;>bBF@edQX z}z z0(=aG-{B%a3N(I)OaB&bf?EuK&%a+=K)}K#hr&V}0l528O3jB_`rO})M{-{j03#=a z7yuRqkKaTW_+JxZ{Qs}xB4C0)kDGt#wlD3)1M)RA+IEw5M%#9yJ6rjlvfJQ zgo=_7A)bDdUMT)WM;*T;nVx@$JS+ca;%iCg-w<&BK-kfMr~fWJ;fL~n`chQm=R7|I z93qe2=U>X|w$N(}p~VsMtNTTI@i{o>AF9hDl$Ah;wg0p7{*>c#9`7&V_WTlK)$)3w z)(mo9!$P)wU+ej2|2Fv0Y-d7sNi@mFh5kww`Y!(q$@c%gp8Q?929l4(LLB}z{%;aY zU*2zeUtQj(mDQF|6b(0og5nfcj&qAqLSnnGmJs%>wo?y z7~k(-7Xnl#|DhbLfWVO-3|)W(AjpLnm^JymB?6ic$@BLBr;rCo1-OO$hz#;O2_Z)| zaXk@6E^rJe0u%s$Cx+sPV&K$J%FzipBa|_^0P@u#-DKP z{g8GhG=7J_e~05=;U@kSzWE)V{tEv^=F#XEnTP$~`^Ue3_Ad&*!+-6cH^;vv^_FnQ z1LG&(uA(7;X$klJ$^24&j)&F;{)T)0{5#zE6>fs>_t_am0l@t)cs_6LE@NLlF8%*y zqfhXE8GR@6gtp+`{wt#|7?tqH=xe3wY|{+zVqOInFMuZxbAS{Osv5WmDwv^ByskkW z4IA)y!o8b+jN4r=3PE3A?yB<08KBCeK3A*P?2JlnY35pJUOw;(RWmH%UP;DOt{ z%i)q;76 zp+3Be#OtZ;C77RCaehBFGnglJI-4#1PS%~2bX|~t2ca@V`uoULMY`z@MP5BiZH}{@ zJed{qC>ZKBoSN))OKAEcs!l=6MNt@1qFZ-fw}tJUjV=f34iZuIhK<%#S$B{|z&&`W zP;w~{w1yV#YZD zV9u1Rfvf#G-9P5oJ>x@<`A|_$*347qF6+Fov9I2Z$5af_o3gu=A87tdJKEDOtJnKk zOSY`NTL;N83<0lON}P|ga1wpHy@m;MpTn1b&N}*(9239349ZJ;de17YwfEp&jP`sk#DhGnobbvxYD@yP*wjsC@gM(q+rzO+M948_W0&_F)woF{_s0HCAa%em!{&l}Q9(mw&O{c0QxE@sJS*%5PZ-GzD`je;Q=d z%ICH#!tKC%l1vCl7Ci8p@pnD&lI`tYJg(d9F*&^-f)3Ui=a@AcUk5QSEQu43AU?kn zVI|AC_8PW6P?*hF>AP)Zz}|RhxqSApJe*!SR$iTMZXD$#yK<}@1Je@7LgC`6qF1?j z;?zBL+jtIFzY*8q5eLPrZelrwh=^{RvkW!Ptx&%Mfp8DNBr});N+d z#}D|(XD?s3!E2JEdKlKZ5InkYymVSQ9<5<*3Vj|5lT<%mW4b$TH&M&$sZv!Pk4nB8 zX|%@`v>(G((MDHj2JL#2FN!ip=h`fbdC%oSsZ5ksO79C_4(y&Or=z3JfAbOFYx8>X z?Gp{o-5!Z3bvYZ}x30h?2nfdp@b1e~lmwc{x$MK^YrNX}I7%>1Fz$u7qF;^tfyL36 zTQR;K4HcYyAm3L!mjhYg^0axr>7Z>HY20EfJ{(+Ol5dKAv5b!DG&@{XxzWG37BAd! zZYSC`v_cp(2QGv@4(PK4e|p|mXWoysGPZowsy-?%o$5H93R%}JO09n7CYQGL0d(}h zDyJLjfVXCj7q>Gd!4Qqrt-rcQD?T>#A->({QG4tq`rTR^+dKhIXVGQP0%&dCNa&v9 zxH%02@L5@*&((meX`{&u3w5=;21)#MhuN;BRXfh_yHg~QQdjyme+tb&MQPoQ%tm!u z+`0QEjP{f`Q?dX~N`K#_SI2$7tU-TV-JNWEyh6!=3$8AA)YU+Xn#1GLB&oDfOoMz8 zsXvPY4KhP-F<>B2>XB zRNBGr{p)V`&N;?+f69bz2Ir-5?>Uu%M{R9DC1e}^#usono{~X1p|E)S(_ELAds-#~ zt{L;1@IoHlUrJsT<5dqXu0fajtMwG47WY{>Z#MnN=cje}emaLP4Ou)~tM@uy8{%YX z@p$rSwGDMuF@|gZOnXHv%Zk&UT|IToTlxll3waZZ$25sVMte>4n z-jvo|qdY;Lbh>tZZv;P-lM^>}*EfG&Y4$`348LA?FOvz!@N6(vYZ)qK1Ow5z4cZ!ZfkirjB4ofkHQS)Oq9V< zyE%-+HKi1rIi)65Gf5HBGkcuA0?NIFfOU=iEX06Yj zoupHGOO9&~nvKu}y^spW3fv`)C%=Q!Z`vG8)qQX)trfxT9`kIJ%n*I;EGH> zzeyZt^yFdrwQ;)1xA5&(%p1(7+LqX~n&qRyymvHH?f3PKSG3`=4dZm<)zxI`tp&8W z+i3MLe|n;U4@U=LjViN_yU)s|Vx80+-%STj)tv7|J7{3jv8i73p2_iGO*3{|MiPb$ zT{Q8zXu@+dTreEC4cs+3erI96)#t;c$h%F8#7&DFHii7SDIItaOMz=2`BCL`Gui=h zf8$s&-EC~&5c>k3JU8DRBlR*yS;Ml-#cGp^f8*2bw08Qn7OkmQRZ#MCh<9Dv7sBDT zRh1YjBDD8U{PJ^py_F5%8#GAxRbv(&uu?Q^?Ig>YK_~|$B@=tiZ8X4p$6NGxvt?H+ z3T+iN11;|Wc(dBE>EUf%tu;eU>O-H*j@MjP4gE5A>q1pkz!s>BsXIN_6Y?WHZdBpi ze`EUUcM3weCmlsa6|>oiJ0?E$hT_vM+SG}kDy1uu^j(YWv(n~$MiBdOpZtg<&_F%JUrOrI=tQbMVjbr8J zCM)dUu~d(RS$K(f72Wt{qy6=q6?YYCe_?m0weonJp^yY!I;x_Q7 z#G+2!=vAIco}BJF|wOTOM+fAReGAIz`;n|k6khIaV$by*j{LfgTcl@qu#(zKyn zzdB7Yd*HQju%G?&dL7DYexu-##(Y9;6Lz!RJ^E^Tb@TQn4jJ>B9JjW>z>SosmTKZX zka`qtV0=I}1_@^$Y*yc_+pf4tm;jf|q5 zes_agm@pCu&zEeRK$_Ot++l``tcy3U+;=hoiFQrfJ=SK8RP6t8~^NH&(mJ91KNw9t5d?7vL^AmBQtKS$^2O^Q_T5^ z%HG~;D*~}_UfyoL=TXxo$L?XmrbX`4(NZ^VHc@WvG8b|6t?-QJB|oy`YFld!y-PQD)HeJR z!P9DMX*jpnE7D5i!D`1XZ*WC6ZFCGK?NI4ao`lZuzF*hWr2E{Z5B(U0yE>{>T+4pE zVK=zSM;Aa!#x~mrfBRys-YjYL=$(Rn6|63Z_t%yMiGA$fm+bV??g^{9Y-`fTeSCP| zc;S#s1!(tn#25PRVw@Q_Ii9Y$&X!$bI<{qU*GV&@_HfMSHa81~t2fjRmxJ}(dP9=Q zvErcpyxrDYV;Za0W;CAQ0vx8s3*y^I%pgqqA8B1WVx#oRt9r3$9Qiq+P#&<~<# zaKR(XUOV1MkCxYFr?$TMKBc#9GdR(Ng1UwDHopWgKN@diK9)CTW*q|5S#)&@{$gnD$yLiIFWpz~n-8~O#zPbAiD$hRan6A6quMWOq>W*koGU_e?A`?_}b`-pt#e%FoLtvOw41WrnSG9 zpX8z6V}6Z99QpKY0jq$wv|yf4^!pJQw_J<>CCJFvA=F*xYR#7UG-wf@pQSUbfv?g` z;#1U9^*8(470=xH@?LvQR%v>Y=6CAizwgo*Mgnu$^vGr!VNKUM>pc8CEC&yz6#J5C zf7IUAai864$xC#uJ##DNu@Yy#YO|xI?Y57yDYw4F81%h=3F`s5?x}g#!9-of%ycXi zbn;8OJ@V8hm;J{UWdZl7K`jWnfrXFKkbGbBF?XyQ}I3DwQ34S)zvvk*!y>r9x zNn7{G$&8UH)x7RD#pCT>_Z=|bTWhTIg4yo4k@jF`JdN8*LHy?ARNVbCjE~0OG+A=m zasHkSPGh(kMNk+-A=RJS>b0Y>eZ!lB|4sUVq^162uP)OXdEJRa3-T-(B^$ z2b8v*vggw|?1p(KG?laErnTDPCJtfm*k_x0F^XAN=<)2Kt3tD5S7!kzV{|=n?XJGeD2M{yuwEEHmHB`rf17zbRlM#W>G}S+%L<?) z=$m)1x;5>KyK@y?kptU4s&I2E@HA=-eTk_L6x`6~jAD?VQx3qduBjMmzp?wWgvQT7@~sDrHO6 z)VNLf%(_!+pBW#~e|}193%AB2LO|Tx^{5{<>$FU&1R0FzNoAJ4)!}w250R?y$~*H` zvi6L&-09w^RkzRTwZgxYS0@(YqdqIsW&4hwTREhmWS8p~TDQhEp@o}fI`wMD8Hpqc zEwZ$}mE7-#WXErLd1z zUOmONaNAqYzFK=ilp*SP1eJa>xu=)$aefI3#yzq zGCrw6$iKqIe^|$K-X~}3od!=q6`I0hHv%-oQ_l{7tvUjve)8`Jz7;c}a?ABwyz})n^$w+hpWMW9Y`JOo+dFehF0Oa+ z0(4QIU8g^@emCeF^PU{Qp#35=6Q#A`dO*M$(-D~Z(P}{-0ueYq&{(T;k9n1?C-CDmMOh8BVIUi z_8I4=pfWPWcBu+IHod>*<8^?bc&`&_b=C2S-y~o^zOI{QzE4}^vxsfj%uTj?Big3< zJ!y>a%++xtCAPC^%?oCCP+#9!wF#P&8qb{+e~Zs*6pR_t&uo`Ouig22e4e_O)6GzO zoUB_P=N=7lOidnU8kej;HI#EO?$LXHJ86u z&`6M1qLZ|*NqL=51$=_ ze`*8jgLJN%!fdF1e5`IswaV#vutuw}Yh9=$A6^e|D=NDbA1K(hp8!mA-jmQgjKMTilM^xRchB zortKr_(q&s=PNecSyNT9Ka0jLvmCXOTJ3EI`+@Hr=jG?uNiJ89>j%SJb&8$e&?eqFQmbWcwkYuGbx++Wx8vpRJ0bjuA+L8QWX<4)|8k~>9= zo?P-Fm&*R#uA4bY>W!Fg9$=?le|GepId}5|@1rfwkF^}sCs1QnMt5XbI^)_ z)dSF(+ME-cWV3pY%=2zd?lCJb^0`sA=a=D9_kNd!q7u`|3f&3{t-7an#_mZx3eS7> zv<{td?xj;)?BX^ri``yfyd-BLdE$;Iq0-LSNu5!G&KS$1pBJ_iyk*T5f52{Yl(oZJ z(ou1`x_sn4Wq2=pxCZlPb3b%NaOv|}(UZ0^jlP&OAlwzLp_9q?eZ1uiH&)=jbe=({ zdsK52w4^B;W6B+QxOnyLVP1A>zNUBgsI%wOTlc%n%UvNBV7pz5=2V=0m3jcDqszv) zk8#@vXZ-RlrEd@YwjGP?f98*ugQV+iZ|xFpwLT|Fs9u9(|1@p3?d#(;x>uo)Z1MeF zzK?n@g{Omc8E*+SM8Fj0>2;OH{m!|(1idf}PtUIF+(rA%RdV>vyJj;Gt&XZq9qv$h zQl4uIw4mIaNQj%PPr9x<57#KKBjzEHno*}B{Sv&cZnO*fA8FGJb0H|8mP`| zC72!BAH$^9tk?sZHNgsUle%D$x9ay4}qy(zAv9ger_`PS&Vm=_>% z9Qf{U$D6Y&jqZkV(`1k4&8{uc$SH8PjSgi#DmuKT=$+}RS7zGzM~XAizB&bYn{Cb) z?U>_jvq=hC4!TvOf9(vbl9^4D7;Qg2p*mgd>2iE*!-_kzRBlTw<;}PKin~`U`uOCSJ9*91>gML#Rlf~a=5)Kkf3UeWaclNR>A^X{bUqas z3u!j!O+1~R`_FY>DRP6G*O~D)K$u=dNT^@X%$(zQ$ zcR<=b&wJYof1XoPjQu58N$b(?go1J^^C;SOaidNrCUe^N?$P`AZRliym_+Y(pbJ|J z`9buYSGO}O1NY1cDCtgas=f3Iv`GuZWXSZ@bh%uHZL@}nAEeXKvG>8MF&~sSv#4Y5 ze$b>Rej|a+mTk0^KOOF&IWdJlnXh}v%86IltZ~kxf8AOfH|If>Ho|Gm9c&M`Q?fY% zMmrj6m-v}A_8ptN9M`8*eXiVB8tu1IVa*|*vmiJ`*L!(9utQL?zUasrZ#`N^Gv2;W zZ;N{WtS_R!8KP*DGiz6wj)!1pPi5wxyM_xQ?|E5`*Q4JBQVO}7a4l~|`~uA?^g+M6B1*VBD~$mie4Cd;gs+q0 zC1)Y^uci^ixPIMrsIIqxMRk!8MzL@Lb|M?Bf1NbPU^qLc=S08u>a_CbU}A@=I6by} z!fq9(foybZs|LGSQ6FnZzN_S!q}vN0AJXu&)=kGXA){W`9M|n1T^Q)^=iz8eb}Kto zaw=@@jkdkQ#$*2m*SbqzSsg!zr*nMr%spl5hsTbNwoiS9dn#0SQZserjs?Hx-Kc+U ze_yxDI}p)R+@{0r?6?Q{{tnLWTqz#2iqEK|Yax{s&84+IOTu`vIZJ-<^-2@=DQ62Z z{kST$rMcB9RG{Yan61+Lp-a`)Df7qWHDw3ee5Kn1biBD0xJ{B8jB(vHdn(q$RX3u7 zdK@At^B{W|E%p39x?|4`^N?+>w^v#Te?z4|hs{0$_#9@IU^>Uc{>&dUw|`~FwpKS* z%RlaN*h4j{hkUoIAJ)6<*Qy}t0toqC#_tcWs(6gILRv@huG?)_)k7HQPTTICE)#c0 z#z;kn!a;r5t4D72=3Z%x)1g21&%2>U?SK)E4_EiZ@9nTx8B_1Dc)4sS0o!%je^R5e zv)tGDvDyi~{uVd|(XU2T&OySfP1mK5axPKB&i70|-bR5=wRil8x~_Bbvmi!iPriyP zrahcZ;~*bR-z}pTGUeB#vs>YIzjW`JW%bkH=0)~hzNCSBm-^&vpAz4Hu#$AvW!XuO z`)DVZFmFPt^cp$ySuHuRy6IZGf7d47?2Js7#$*T@!Q(E$^#2id-@4L5Yl4O^hyd?G zy!W0A@4YGD>PP$UIjzxj&`AkZ0Hq=U*2nu4v96XoUX!-*`C+zT~+Z^jq2SuH|S_p=Z?U}OnVU4vdbntl8+12@%PI0F%Wb_!0W~vDeZjD_3eo8Lu zIYM`9Bk0;Dws{p!W;iSeX^J#|i1b~Z1b}b~^E=_{S(U9w>v+QZFnI!}P#Q20CY9U_ zF3Jd~MO}>rVu|YZ95k3me>C=xDY-+W^cU0saU3bcoh8`vnP!4RPDKkA4> z$xT*RDRVrwn9Ikbbq`cG3Mn^1f}`WU&|r)77$s#{7VNF%^rs$}SB&GL3*0GRzxTbA zPX+Pg`4xL=uGKb5e;ZNcZPcLE$=|};)kCgyHgvGcfi4J;2QX-Bd{m>RHEzEvS4CYs zKdR9iXV%A7)B!KRLpS`1#xG0P8%?A7Ublk)YLpa@3y+-JLg&Dq-4-XBlclLuF$)qe zk9{L-`4C=2@66BNl>&7K;y5>m0HMumz|+#{J1>@Vu%Vcrf3cMjqP!gj&6zVDwhSQ!H=R>Q;aBX)e}$S zt9bMQe{RDrJP)40TD;pzQVugdv^_j8-GyE$3hRlAJ zx*X68Z2CqrAYZ295n3c(U+q0nI6zZNCI)k4;RDp zwzDkKf5io^gOdBg3x_i}L1|yO&jXZOkUjv=N>Zk%Mc}YaFh~qAal1TF0Dr3JHV$)W zkuL^LoGx9c%GM2tu5bMg9uc~XePW!Lw1im8WGWI7+XNIVQff0V@LIh9rTg3WS2_~euwqa>jGQ{%COY64(6jicZC2^250LCpJ(O5>BSjfFrO ze>MjOupgQ7Z2tKbF|F#;?I%=QyL%r#4RR1T4S|V!yOCK4+ih)3H=J)8fuucmA+7ILNiYOiyWcJd5f43xV z!^vI8^1X7txv>S#Tc1 zMb=Q}dSj@GL>r^)2>z z{nnr1Z3qyK7q>2B^qdpEn}C*#J~e-a$M{qlg=5y6AyzU~@^)C*@>1l8UH~ju1Ftko zD;)d96LDJ4IuUl-C3qCR_uNS_IXm!Rf)fO>K=Qk%x^C`XP=k#SLmVr9e@82lzVDkI zPL^40*ZQda7&KfBtSvkMr*Cv~u0q61U7Ok7mJtErtX+&)QbK9V<61@tX;$h%A^~}{ z-)8^S6@T9`0Fb8<4&FuS8)s*_hp58^ZZeh2YPni=B=CNnzgoWuNWR$>qR*kN_k3vZ zV>dtF_8kJMnz}{T1tdD`f0G=1$v->jX~Nz8@)Bjx>HAf5Rr1$_pAy{czpq|GHfpD` zz8|bg@4(B|HL`A7!Ebu*tEwQRz640c>Jxbvh*Bj?AobnK!ow7dz}T|#{`O&A5blzo znlSs!k>?@AWK46V7Mk{BoZV$^55`9k%%H>LqDbDPXEVt6^*Ju$e<^3&N*LB9Lts^3 zj1B3H$Bu_d3nM&aRzLs){E^PJJ+NZ+KZ~&7G8)OGSmFZcJk3quls?fMOT5|z=SsR> zUcIl?pz(q)uG4!3DR0ad$V3g+N-4v#CXM9xQ))zqGJLcoaG9rlu@H(L_M)Cfq#jQv zDm0{4PQP4^QD*z3f3vnbjO&x^BOwnV(XRP)v_TB~O>MqgLqzvcO}y1uL8pPiXQ|a> z2CJ02)A%4_62Bqy!w?{vMF|RkS3oe~Ah-GMNTMs|==_Up_9XWYL(RA9BOOF7;n1FC z$#!*OBR9}!JWN{T&SVqWbpkr{>~)Y=x5Brc%UPWaGR3BPOa#Xs-+c+UwgZ#=_XeXLFfHED+R;S6%hTQ z^D6su=gYeY2o}T(P?@72P3_0wD&XHU`!wY^kB?juEOUq*__JC_`lH~kBZC%onh6qa~ph>u5KRyWgYJK?A|f79s-iSyfKN0gMGN6GpylMYPQ zM8Ty-5%m5alz7!s z*4c$g1PiP^^2@v0Bw-!Bonc9V;1?kmiehtM5KFc!y-E2VXS)&DyZp{gZ^qgoa8aLm z?H|XlG-5%^L7~fOoRfP@FAVqmfJC4-s|W6le^`h+tJk)_GRpDh8CUSw3|pkXb;~=v z@ub@YB&@?v89^=;|5d=Gep|l=Cydn8CQIu0VIP2&Codj4ego-cL71-?_wr%fRA_19 z72t-AUOi896}L#aa3A_v(TAVY)&(N0VDDhA&?2JtL%N5ko|lr=0qkjU{NqVVqmACM ze;p*KeE0@PDRDs;H}gH`6DmHd-H?*%Ibt}N;F+9`Y~QL z!~4v}0f1&6$SV~Jq+f5-Z$rotRL=%Fu+|GEneVTKd z)wFZj^?^)DF28uujH_c3f$XGcY9-2>nS=B8#=O{4Rdi%~5yb=c>hv!C-8d*h_YYRC zSZ|2hv9BN@@D3NECrVQI?esM6fS4a|r6C1(?t8cqdzHhP*{F98KDl&dxw@N~e`fjD zJZJ~oFS086et0MG>7*}g?l?t-Oj^Wvr# zF9J8gdz@7W1hou(Zd85y;46ZCPUkR7qNayQUBh{OLC!6CS#~GTgXZfW=hWFqk|-Kl z>tJo$CLpdAVlHdy)cwlM_+l{r&Y1ls6{*8@a2s(1zG~Xroa%_H}=RiQ9_HPfD-+9$?pLd&>&sOw<^OAq!twEfKC3?YuyeU$o)s z_SgTda9>lX!!aRd*14)t#x_osZa}Yf$bRH41bWrv3DeEYuH@U5W<-aA!mRK*-Ig&% zb+lbMuYJI6<=ND^6f&6HHI)5H5NVN?;em5d~xL8`y-=a0K^&LGg*P6OZM?QF0 zusK_IpFj|j)YXb!1g`=%1ktzpb}n(dFUS)6Eh|Of65vNa@y)Aje=;=K>|ecnPbg_A z)S=wT19WIM8%dhJVtXyc?{{OADI}+QAFRPGH=g^VGWFo#IKW95qGS?;CO6g(E{)$1 za55u8t#Cfz2Jrc4PG5ZnNU2BaR#l!x*LqwxD!Rh}N@1gPyP^dgrsy>_h&wC7yUrc3 z%t2OAC?72cR+({-e^n zsmRO#lPWC}3>TO^@(K7gPLM&h0aEofQCH;*i4@B1aarySf5}|j?bg)sT8+D}W zE(y2&wMrrSe?)p5(t-SbhcHsFaD!qcQ5n`~WxrN-9ZaHG51{$L|I*Cr07#5p=Mn0| zny-b}Ggwx+>Hy1yr~D0w^)X%bm1|#zrfg8Vz77YwtC$ ztqL||&9+U|-m}1=<$Y=%2&;9}?7MNv)*@Bv%xYZRe_&ePyLG5dBwabQh=~{o(QID_mVddBd>dx@}j)KSQRTK7y4fSzS&Hyx8!On7QDaxdI z5bE%be=@cPTV6ASLl+)%;MDol*ij1LQIVEKCf^YM3qS$>R5bLnDq5kt%CXKG456KPMNi z4v`rq3Mo&=;;g2Ie`SLm6_x}hjSGRS%PK>1f8HWxk-^FfSJj~g?c!%NsHDG;oQ8lI zH2tKodn+th#wQ|+WozWcWWQitG`e%IzJgm=7FaJy+L4>`8r0^D?<533!gb^(yAp8E z{pV~?vy5|MvQ@o9m!8Kb@$gBq1k>;dJi>U03>?d*@4e@^ss}k6>{siP?gpsoM~rr+ ze@^$&Piy1)4c_enbTDon#k&a{I6EcGcAm>x)=?MWLT-sXbw~oK{Z@QX1PlV4468~v zrAU)jLdWuOVwXuF1{SzY!};>jXx&OduZ(IFGUAO0wWwP>Q-)2QM{m1(z*$_t{q|4s;%XvuBy*7e;ICW zMGdqcxRrfzHIgzZmdvvUy$QXaG?D8H4!_zo2#mX2j>->?>ABx#9No4_6@`3TFb@l> z9}OJbtVes6j}~4XAb1^x5a~tZvEfRoVAYuPWi`x?K4!TbM{!*0mOEx| zq|JGp(e&0(I3>Oy)ppFSqIJe)wb*NneZ_;_p7rtMs_;w`g^eP?bz?#?f25|Rv-!XZ z^^_TzpBcz=I9HcXujCE9rd`;K#9#LnEHw3ir5KfiPyJyxOfDJ$yEOAyO>)NZ)WJsRL_=lvRpFc; zGAvTrmuYD3Lc>f}<>GKamGDL?g>MBfsw%pBhzs;&)!roHE*qthpJf1X9K-3K5r~Yy zzw$o7jLDg0>EyLyU=vMkr)els#}L+YHo{Tl7Pl7pz$VzUVW>7Kf6HQ+*u15DLajr& z2tC|7S^SgauN{o{RcWKvfBXIG)od<1^k;Tq-FTX;J72G1&9Zh`p9zzl5o1QaUPCrB->P zyDkQ$_^HT^x)h_gTG-TO$Tpa>q7Mlt2U}8=2^HMcafE=afAl=JM8gA1NKRg7RxriF ze}EtOj<<+C-*0~L6|6MsF}7T4sGN|og@!6Ff%3Ox@e^QC0D-V9b`}a3%;){-#!*B? zmLLYtzW;&PQ5zpiF@w@2t?F5dVswGpAC?o0q92&L*AZ5eO6rtEfu{@JX@N!|&g%o# z`w?uVHq>L-{F@$1{8M(s@O4JsEytjD_%DQYq*)w9&TmT1gSac z;U=C=IPn9Wk)nkTM-A4hz;*A}Di26L5>l_lPuqz1`I;=xb=l<7HXW2-fig3Y*w$@&4>I^|7rOseOobrh>*NsuX=e;i ze5>M4EHWl zmF{xpf7tTuJ1-l{{Z^)zcZGClcdx$Q+H zXbSyDY7}==8atF74Tc>uCi#eR6UloM=)QDsAa}Pilcw49uM4po0Qns`8{Jsgm z1SuiXF8$H@wgX!`R&H;PM^=V)TK$8TX=e)sWR^uav`@9Y?mVp?I-Eg2^dna(0>L0vEQk;+_+1VMZ6qj}j(4lm;YP zsb$?1Nfq!pUG&A&zSi2HhgaPXgf55z0 zk`mh!;>`Z3@n(j}SB^ZvAI}+TYq+j44MKs09X>WJFNw|XtF<`^PO|oQPvbT-e^sll z8Ut0^(^(Z1P}qipf}#q=WXn|PL4%lvFWRYozhM^GwPYe8v)gI<_yzO62+-@q` z%ubWx28eTMX3yWfN5TstY6Tp;e_ZUFbtun=?tVVBJNk**pihnB4VE}!93~7>c1kvYpz>AW z=efpVx|89ZX@!ciqRNc1wSIw4z7X}r=wVW~$3QI~x9TBh%L)8SfBL2(Uht>3m-`dA z%G`%9jI8$6+2t6`KNb}cl{u9-0rDPTg*M4R+5gIC550X9IKS`?h`_DJ%O-}9TGV>y z=8}A9Vg_k?ZZi493zg8MJN7w2wOjAKA=IKv63JT~yRvNC6a+8?(mETAU@=Y-!T`3j$rC_lMqL26&8Vw8`S5fy(_D`7$osi0CAhG@(WZ9uZDo9c3$-DRj_s)j$)q=owipyT87mia8d@CwUK#OuT{lm6}>x9Bzu>? zOWLp94`&Vu=S%@zYjD3mk7gcl(Hd%za%+<~*5x?}5?mw*H8~hR9>MTWzbuZlE~}Px zjA<<0z`=)ff9R8HPHEG-{_@CSvv4m%G5vsfKEL5EgKk5gNPTfW8R~HKF?He8DO4Qn zTc~%R9c^32J8Q3uOrT_C^fPx2u(ok$l(hHf^_&GCQQ`UnbQCRqBnNz!xie%!(giVu zod|%PQVp5G>@Dhsy0NOV-xB+$Iq;h;%^QzvtLwc;e>{^oi6EYqS4~I!0U3ZhPU>cE zYYcL}i{|wO1tDUZR}ui^qY_a9(Vr@D{kMX=Sxu(Rf~tFeg*w}_^?S!`88iIoRJxqY zxka$F!0fr8Yvzz$dsV3Im4BtkJ7 zxekZ0y-duqJ)=UUahOa}qhOiJL-RgEJ7zs+x(JtHB-a%k$|5-=L+|_b@k!}#N~B-0 zSxYi>syYFKdoCL@tVy>KB$Rk>k~H1Ax34)Je|SB|WrK&ArpsjXyc!${62RGmR}WJf z7WSRP%3%iHwQP*O*ng_gn`Gp4*w`ch(c*xzoZrr5I1nO;P4@tI%LTmC;@H{>Ozn+b^ zO-dKG!9L_IJLjjmQ(xjOn<&eB&mY6}e|;_K_gmHtT`F~B;I~dfm~71)@WarYL7}Rk ze?P$!S&?E>R_73EJS7!2hx*cmIH^@h8tpl3$soQ-$n3c0s?hyRO@>AsCPE2VpeTUB za%{0an|K!eYoVl|TSx{6!Y1b_{smue(>BFp;To>Pr{X9tktSfIs-DuX<^Hb4f13~% zCe17Ks7a4iExo<&2)~pqyfn=HN|PiUFk@3a8m}{d?HT@l&(Ufx-z1LnJIFD|8fc)E zpq=|0M1tX&>`-7h;1OAH@L{l%vkFBADuwORQl3`@?WNevq86HJGg&Fjg zzl_88K{tObgK&IWO1M>*$Ywy3 z1_Z6f)ZLHRUx){S-pvgvjg?$*rRXJ@%7B#|umTjHe-u;4Y%t|5 zP3uvjSTw(?GTws$`G|<{CPa=OIyno6Q%VU@l@%y> zdC>5i(A5o-*9CPFX}i@FetAvjQX2ZB2Tw}S|Sszx}MwBffA zI_6N}3?CFr%DCvg6(i|QDs22;-YuID%3Iz1m|O0V-oiNc+9B?z^Vgq^s1MBr2u~1S z5zYq1cNgt_CU4vZ8W}9@#KCq(#Hv0nEw1E`!Y2V#BZlllp1{pG)ycd4D2b88Q)5ER>uo) zH51@LIaBXInITIse>#ajQ|>}Wl6e6o_O-@+I4@t|N*|3d1@G;y(BAVy%fuVG(3D&? zqW5Luor{H}?Qgw(Ih;js`OshRU@@y~dZ+ z*T7L7FdlEOs;KU9#b z7iVz5-agnJe!mgHGqGiMRVwR%Nlxr*DnZ#x&~*Z<|7LfpUXxUGK;+?J^%=SD*U5uw zT_Ew9c&2I6e-T#F2Z;eShb^<6SK@R!2TUgc`x-itLlsOee~h!4_ui?dR>zK3Zf}W@ zDA3x`{9T}ooI3GN^_aL1xDOD+KMoz}tVE->b>e5PN1!0fbG zlVs%cN7%)JUot@2>3fD(wPytcg}J>1()?TPa^t{xgKm#4&j-Tqe}929=9mb{GIvZ!z~yVIkSCjpR+Z*4}WgeSziNNhov3;x;^Q!=+hoFk;mZ7 z$MOpqlGwMWTUNH$=e1pu7FcZK3%887AKw~x)hW+%aiaHHADtspb(`DMhpRE$!&{Wd z3yFIq0P-*u5IuHomx5TZci(Jeo0ioto_oQ-t4;;BZePUE=v0y0U{LoOvJ9&i2U%_9ssBtp1>frz;^YkO!R<0({7xS#_o%B|Et%1U6ep z!^XIHs@XK{-_c7MVHlKqLkF>ieXDDExRgiLf^P!XOazblNvT8AHCmT!PC}VlOn;CL zlztOzWC-Ar5#l4OdQEozHp}UUP6P!%m!v1$BgKqe`m|WV`X`j)E^Nq)PHd<8YOD*; zb)_9$rHI)_z{c^zf)-qG?ILRWQjbALVmHa5k{d5np^)ToO>w{bkh;-UmM05<-)$UT zNtxHj)}&{U4H#T+D{yal)gb8j=YRfq;U|AaZv{fC9`-xz@lKYr`F^%;5Lea7Tv!n? zcsI=t06nM(oi>c=ntL#~o4GUg#wu3h03cHY1`yocBTFXo;*QSo-hv8k|{|es~iS7qE{-;D(+e7nH#tx8PcdpeuM!F_LUo6}5S)NNB ziSX${eld#X$VX2+4U8+MIQQmD;h^5zpuC!3CS%%fdR;GDUMQ1Py{HzDq|_gclLOj5 z_)rT4UEm46;1s99CU&VIG*}mL#G~KgWuWyp@i6 zM_7KntlDr@`+Fp4Pp`Z!{geH$+0MuOq#uBC_dteWmlG|!kF`+H))2GR z5Fo-Wtk+Q)4~YV&wbYKc2eEu+g53tIzHnL(d?cY3VGJFGlSz&W}hpMW#~xa z!-Vas2=;37wHkt)HGix#(Oc<6T64=oD>h$d`E2Xf8TV6-Ql@FFRS-+!OR6zp5es>E zopB%Kw0FE)9kxIu@nC~10M}}oQ3Z{Fa-@%wP7MFqiwzu#Z0dX^~#ZlIm>NxOW#}w}AV;c>Y$l+|<`c z;I%*8XIRAFkV7s*M{U+On8iU&tTr5MTy-`fVpo100N{v1Ll0hC@UQd>GA&E6%^*OK zC1Rn$lrV`{) z4c~$$u!XKrS%W&Ga?#7R$Ih5~tgm0W!B7qU&UFMlsegv;>&`94cHbyP*xOAtEjRJo z3_gx=E#s|!15PTzPam-ie9iLbz?1EOrdx7=Aq22Ac%78$TpPHoSGt*AV1oge1CE)* z&*xWx@ig^yvLvvFJ!Q%)&8^=ptnFcba{3I-{XDNvLgdfYLpIxwJQYa8tPg$lQ-5(+ zA7;pR;D0#mTuh%sp!&p9S{Xx$sZy^03vmZyVUgYe$=R5f$5=NnWCz}!cr~rZEE1k)LL7!JTzL}R z_(w2U6d{(8Jw6{d1RU=tK_uAT07&Kd+Ys#oRGV^fdIxN#aYCn0*?H4KAXZO2hLHJQ z>3@4pW(F}YX;6H|{HufXW}G~n*xC*D`b_|ebo+~tl~*rzW3DGpNkvaf zuk85)dPeyZal>H#hs^Y!;QQzqgZ{`84saozFQ zqqECm#xU6cra3k$(6Qd}Z3$GX8TfF}`kfQTYt#qOI8aREYrN5trfZXkk|7AFTc|eF5FeB6i#I<1?QP8hc=Y({@}0v+LN9e*RVDdV%N zE0&`Qfs*9IqFa{rDS>cmKGkBmU3<6O1&@^nS1J{fd*4jHZt?9=>3HcXLmc`0?i|2H>`xjZ0x8Wy>GRCWc z$Jm4g6b|K5P5_a=a6vQthJTN0C4De(_LJ7<_jtk{#}TEREmu)5p*3fXkiM?K@0D7i zvb?{%9cvef+~92w=o^05{`DU0KKMe<<0E#AhF2+0ug1Fvm5(uOM93u>q!%&WszOrL zp)!VrP7=aRVVa0MIv{vEmv^F^pB?CUISG~>%}{G zA9a1s*zE9Lcx}NR7-7-Zq40vS11(vj0SzdOb2b9s?y?cReU(UC#gl=(3C>)-p(Fby zOWWn*!8ay)`G#^W2>9{*SBUm>^U!EnUivVoiWi@;i;f_%rGK~vnj-{m>OwM;Ys$>c z`W5im@92KRZ!tcE@v0|}VAMS-Pi^2u=`=(ydS8-{y?ub%G`EK5*cK*Bv5~G?LyTx` zSY|!zmPq<^s&>g7JpZms`T)=nqOXEdn8~i+FpN>O&H*&lK1NIR`JmtK=WsjwfB1Tj ztp|}MXz+owz<)}>vhd#f9o{Fe(Mk2$jv0^!m`MkOL@j(6?lD=VAgx40IL zC~{&lhJGKSlmfYDBSWa?J=b_9=P(nvafS1DsY~jFdqW*WZ>DWDVuQ@{&fYVOxY|=! zELX(dry06lfuOm!d0A!5mL?wcp%yE3W2wxAyms!hS$`i`mf{PF)ue`p?{ySY*)vjmk$z2(z{j=JuB;%h=_JHXs<{4i1o6FxTCt=msrI1vS|}_3~gki zj}u~#=k1||G7Mx-5=KL~z@uA1q_#U@r8O?r-YQt?CF-07jw}jSlL!Y)s}D{M&boed zX&qLV(M-!N6S^wsyKK0obxG_eTsa{kc&g$Z1Al5M+(oWd3dJO-MSoQ{rNWmNA(lg^ zcVTphWscA#z``sFRssC z+Z$IbQ{SRI+vjz*NC%uDELX&bPvzvc3p)Cmtbp)x7mOYGoo!Z3Bf}F2WB<$|UYcP! zwpM`f1&3^BhURcJS>hC^8CWPMYp)OxAAcqls7E8ZG4Z6^1OgGaKBHiqkLjs7vy5Di z)|a1Qy(Jxq^r`4R2q^*8>OIgvp*>#K&g|i)4xVC6DPJu{#F5OjMVC|B%WbAqdx!q( zoqIFvpBk2n%Uyt8#gQKFbp{sU%?zl^>7U+|oUXiouN=FJz@LEaGYZjl!*9tG&c&SSowz9_LQx z%A7S>@W;Mu(yn20(0kV4G9-}+_e0Y5$JGMCc;!w;0hiN=j~}*<;*KK-(*6M@I(~6O zN*tr8%IOrUFn=8uHCF&TdR*+3x`*4paZj6okm=@`< zV)EDuGi2^ypU2m}q$E~b<|k#I+p%$7xMOq#;p%5p7RSGo^JAn%Pt?+zWPfaPbX3z2 z_33p-x@8tjDz_$We`kZt0>2_L75H`t>856W`oV*wTYUMbNFZ>c$b0Wmsrb%Qb{Vk^ z3KKDXhUxka)KBbO?|~qm{hXvQtCY%u3i#SAPiNTPTiz!5ZW@mXuRSXsgD`_l)kGY) zDE$k3Uh}A<9xZ5#RvErJeSc-o%d%wDbeHA(g*>OJ5>M#`ReKrDQOX6ghODCE_^3YF$OXjd4HuDoZ(!rLj^k} zxcFzu9LE<1wvvyo_?U$1yF1H~)aWbe-T2|~$iCvgc#kB5cjc-=53s+$xVr_|u*Z)N zdRjq&=pS76_4CT)W{3T(P=@wOhsC(@QGOrOB{^uV6k}(NL$LR)47*R_7k?H%#*scS^7X=W$Zx47y_!;~!I+!LD5%?@ByTTL&Bb4D?%BAH zcc~f>reoWTYlz1_74sium^wXjn=$%nAsHi^E?YqJ7OWlS#Du$T0AWr zZC5QJrFueM6wQ4Z=#8D#3%H_X2*)$fJ{iJ%g3Z}fN%m&udHdGIUy`PB^N#aq z{86_k(i-gZo_`lv4RA$cL$)XONwvI!I*@UfYIy$mk9&V$+@KZRfbhmxux7tz@_oq^ z;16rVdSS8ka>JxO&;1LtT0uA`E``Qv=HgiOQWY09AAhCE7FqS^-fI)|FJ&wUM2Z(9 z`*hW9WElQ#q>3T<;A}nxf{AxfWpBI3m01UE%=htBUw@3l1QVly?s6n!6O&|2&iVZ? z2+9A1?8ixfV93_u&i2DxN9nYaOlQRpRx8|;Ea@mi)~J1iqd;RQ&4i(o#$LESol20f zc*?+E^##gZ(bAr%-oGC&LGskKieit-@5FOi?JV!DMFB`Pl9E&UW2GjFP(QJxVy1vL%}M zdtE=K&o`WpPh_ig2_=&a+Bl4~zBBe&jH}~~?7*!Q{EYS6GZXEJ9ekvF`xP?RdcKeB zN|1fuAf~RIX66Rdd@t}}I-7=}(DAW0ZA94@lz#%F`=Iw4=Lj4w;5G>Olf8S%O7DX_ z{q$kB9zzzdFcR?EeMLtT-#ry~_*)~l;$da92h@U39>8l`W58`nAmlX1>mX(7{XITy zzS?8Uy^#QvVkbi6@zGiXtK*o-vXk@;ot+t$KR$O8$}dws1fBymftzK`X{;`zSYhfdjpF5D6)faI_q_4@v!2cOeVQB-z14`tOB>pjDud&A4Ak*< z4MZXMJ+=8fg!HJj2}Lrj0}sA4doM(0l0bPQ51nsW_K2DU7Y*iIo;>3%l-Sfa+aEQ) zv1x>vQ?I`Z;iwrE8Ez8Mal9SF-N!#c`hEnFy-O&Q94Pmlzd{VPnXrpIYx_31m-61ZIZh3}9DrL#AKeZS?OF>ouYcRr zb>512L$*$_VI>lfTqR=6J5|Pa>UH1`kkj??L{zt%T6RdAD`6Nn(h6!0}=*uybNtn}6eA7Waey zMAc%?uu6z%xGP9akhQ~+Z~OfTtJ!?M8hDznYv2=R!E#J$`vct1ZF}~*5PfEicr)xO z`Y-DBoCVLeN0g=(S<+%oX}2ttVAs#p{fKY;aV+H7`Tz>!H*W|j8tC2!W3yn`W;yL` zB&L#bNupE64Y8lsU6b-LeSZXa2}4N;$IStACX2f->eXapL=cf&ZrK_V{oAIH(TX36 zFq9a9A0ZPR28?8$blsMKmCwa(;P7P=wow4lu02cV%Zap87Jh|1UQ^VuUwtoj=!RGi zevQT$%}{LX9uNqQ$ICa;DK9+25n{hYQ6sFDaD`2Bfl!hdqJam&8GlRK^TSZDMz9U& zK-XJ%wucnH2rhFhpNM<>Obg|>F27`*E~ZaSm@z*^t_)hT`YCq^fn3|Q_AT-fAUcW# z>23%9XbIvH%!BO+QPvLRJljGeY-jxb_1jmT=r%a}U6z87#&rEDu%l)C32~1op^15HB8^Z^ly%iRFS` z0qZJ+{za!NY=SFoR7-qpSdnQ~aE<=^b3-ps4SA94f-zu*=VMOOn+?axLW)g|Td9%# zzRq=d=fIfh?IW=~>adk5Nzv6csyV0A+F2HHQy_}8_Kn|I;D6GQ9hNbZH(F0i4ChKZ zd^ADOC;Wj`JwCMBz%3(lA5pYvO5HO}IxVh?Yf&vYT;smar)}IY?(aLhvTv-ZXPa|9 zCw{6PGb^aK3rb`(Sm@rb1K`3M_+*a~t(LrJrOk1Hc^LM{`^?T~g>(x&BLuBJgatS7 zD`4jhYlz6qQGf9%P`QZ+@qAwvA{L`a->!5sMDOj0BfTDFDbh=!5291jPxn6VB4rwG z(YNGw>O8vP*Bs0uI~vQmpJ%#e2B+;>gXqS6x z%q0_g$Lk2^!KQOD5Bo(EI2x0hGlmMnp<>iTW}wSKktO}% zfcLHFkAGrWQ0>u@??MO&jG^|`UEKC?lc;T6V;-QJ@HN7+!# zl-R6Q0zkE-Lj%39O5UEiat1L_;v^zAzN|Z8@D2^qNW)$Hs&y-&$F&LMZ-MJS;sy!IMK@2RPne18i&&ydd3ejw#uyVnqms~y{nK&ktQ z@2y~e$WxGz_BGeOqj7v}JJdWj1p{>}Nv(@24eq*G`XZ?kH8J3NWQ!-s;yxB1&bgbl zs+PJzNNzC>(7V^OFuEu0d66fF&}quO63I%`YgDeVAcrQp_u(RDtG8Q~@!LI*M+xC{ zd4KV`*TgVo%--uG;bg3=5PF7B1YIEnKm|GH-3qZ=`4U!X#AV}l1!*J!_1q!k!4&~=fYes7w5tvv%vPTGta3VCZ0K!jLgi4+ zHGEv@<%NBf_%&dNtyT*py`Ebg|Ab$=d$o(kYk%NSvISuqn0j5@0DJlkc&)+LQ8 zQApr93Tjs^=f^vq{5`tCg9#+kcsi|Uz0DI+9pyTEb{*o?LS!AhlnqCX`N3bIF1{Wt zBPCQ8Z}9CPiSmg|{27sNjtsBAvi={@PkL)2f!MwYANwotP#ALw>`>3L7LnFT(8JVd)5HuG;~JaNZTj2AguQ;e?H+jpyU>tjR|uq9LMxwzbKbcd$&lOi9-c zmYptSgh@aPRUA4xG|O*uT0btbj9Bks;6X(0TneU}*zFiX;bs+|MI+pe(=aV4K z>@4r0dEW!32K;+05n(;8h+_KZ{yG3QouBo-vw3x;&*G{FbmUDFB$mVMQktsv$!z_H zlf9wguIHFbSQJ?5UVj_F=^RHLq0AdR5Tw2prt02ZH<&)*gkA3`_DS#LFr?j0#!6Fl z4ZMYTtd%(udh{Zj79u^6Lsu*7*QVx#_xSjqBcXcORz5N2B(JsUAguW)y^y=o+(7q% zBf_5ove+bts2 zNMC!jrNuqo0Q1j}H|%K0=0y}H*)ay}DsQ$D=61XKdn$1Yf;Z{Jep+3ux27-|Og*Ok zY*U@ir9|M2kl)aweyF_shI2k&Kx_3rR5(5u2;Z?JAThdRC9YF{N;Izk&*z>}_hxVL zG)}Ph=uEWszJF^`z(F(SlphJY|Kw+~D9?6(7Pup&@6#lP*$^aCNz{!q%M`+f@o@)* zD}115AE&(Ao&!d*LlPDnZ_T|3EF?}Kap4~h;hh^9%r>tws?* zpOU0hmI_FGWnQel5GSjpc-P+ohvSM;Btxt-<-s`-(THvJ^zwyb^e8*8!d<_(KYx+qQbu&v{>83WdT~ zxq;g<(#lecV*DKw(d)`$JSJIjkkF5K#DDdh_U{ws$=o&W(UGL_Ev1lId+(=v230v7 zyf=nf`65no;g(oAX095onKeX1VX^haCQ6#oTyrM8Z}97WcLv-@KD#N)Rqj|`j$H!3? zMS8~K+qAu|a9{gE9PpEwP`1#>qPv)s^kN@%mA61*`LZ;VkQeBV1b@1oIM_lgS-xe{VW)*CAT^s4ryfRECZAz@g zsG>qmd~{l(js56qCS_w`CfkxXBCfJB_xbUkwY;pcdWfwWT7NTSzLU$| zQvZp!?}CurvWrk1LNgn1pi!h`a2S^1YhZh|q#PdxNQijfX@OrJ8?L~TcrLt{PKPKi z-;XWtZgn`S#ggZ%CCle}vF-Pre(oy_-$r@!TwKF!2z@cxC%Ic z(?E4_6G%J80%{75iZu`YrhmVpm%#36apij85VX1S#u2!5)d}H|O7yLX`BAnE*GbvY z?2L}fbk)I3RLSHq_srI*^06%pb!b*~?W}|A$${Q%qZye+yjcV`rvoB5VX~-`hq)|s zR9msRQ*t}oeDQqcHK%j&`s8z`H}U@DpLCS-gB9#`rt+v15@&4*GJh96>%I_TEn&}j z5|$`|>MayrS|-%zE@~j>YP1j=uNZ3r5>c)!Y^axLymBwvlq68S8TOPMu8tj{kg}((D{u; zL}*ZM9)?1h@%`DOpN_L}1I|bQ%)r5ho@%pKSt;$~MSn08>{!`R>0|}^?5v|A0+u^y znKR~q4|6Ryb8vrhal}`(q4q^#09e~SWCcxI`Mt+wlTt+db7ScN%M5s|*ZhV_@A4QA3N?q+C7%NU^gWIFrA^PX?ep8RX!+T}z8f3Mei&I?u4|Divsr^Qvn+f! z0)KG^0?L-^LMK#F3sFQ%yqp@skuG9H`T`jRg-Y#F`q7Gafj%1XVKk?WL(MK#j6U0y z6y-Dzb0D?k7PD9yUzJ6Y8Pk!qGJLD&P0Z1of)`8kxOR@5y&L3($tfDe4=xcBH0{Z| z9IWNRpZ1`(^xQN|A8P%+aW+N8sM@N6>X*#q;iF35JU6&el$r5+G3j1*!}UG|(JkZsnHTWgPgB!`kYM}~&fdQ$pa0Hxy1gmze4lI+jf zan8flhy`NoHi9OzeFlI-@qbxfuVFUHNQImObzv8qHn)pau_H5ce#%Xk`XpMgyb-#s zjK9kmp9}dJK>a1ZCkiv0EYUarT7AO%RP6&r8apGqdm`s3JV6PoS69~7YZ%RA4>H z$_LTTp)NWqW)!oX$U%KXu;(=oHG!I)8B(oMp~|d*MJ&0I8IwP?HGl2-J#HbFLb>yp zPW?fuycEFYS;6v3yY3L}BbG4s7-jvdOi1AAk9j$_$CFe&;NBT3ECstAq1E9FBLqq0 zW~?f7H$sG*)D(nDIaZW!jpyZC)@QKH%daO>=0e^|)!3Pihb>v*Xv5*SwLJGV*qugt zHkYkBw4zc`^@yL?WPdZS=I+Aom3|1B#SA+#4#dzHhu^c;DAhgQ4A1x_Yf6c_`Pj|+ z&G#|B(Xn>6foi5C<80jRP_hlbc<#OO&WI(u`$5o4?s1sNk=?P0Ct@4^i+>T;=S62{ ze7#Bk?Q{wo@#lWsoQiS zS#;YA*z+V1^pOdq_7X;o7=0}!Yt3}-guN?nj;bliUe6b_#42rVXy57N1+OraOjGQ^ z?$kFHwRD8moIdig3BLaULsXe&@NEcMTX zx{$0a`AtAAqxb3+f(8TQN)J5w+6FH^(-h$7wq;3?Z&lIs=$%$K0Ao*wV;RFVNYdoN z$lI1(rCAjBx!FxH1nvg1F&K*bZG#w@yU|7!ISN3WM}M+kTZ}&pFFsgdiGx{W7y-Sn zDv`{3A2(ZGL#q=MZE-x3d7&M^E_dJ2$vV*Dh_pJ z)&LoAS-_3Fh=r%phb?;phvl~zqZME{0 zP3d=z<*;eyU!@lcY6%*BPZkW{OkQ&XGJg?M(#(D0^Eh94N8Gy`cjjs+V{5Yjl4NdbC4X*_9>Wx^+yoKHfl4UKLu}+_b691&Rr+>k| zkawkM!M3{x->6UN63Vf$VH?V58r_t)6`j=HnWn8GT-&B86zHX6ZrJ)<^K+2P^{HNo zsZ_$9i0kvYobR%VNDXrn1V<3o`vP}a^U&C>mzKQXE~|s8+D>5f`4I{Spo|u zNouv-`@XTG+dm&O6@tfuq)1*Iu^Y8>J;bmZJKzTdnW-2;5Q)6vyoR6o{s|`}Da(rE z*?NK_JxH^f>)m-pBRwdKs`J%;L82{4^Lj-jTUXG&Hi#}&M^jhqCOKIfCpH5|vZtb1 zr&IHWGuy5I_NE7ap=X0?$A5G;Zzg;WULwd1VFk4aZhDJ_!?8#HO;=0plkvd4JHAi)bIM^VKg0 z2fX`JiHwA=>L&A3*#v<0gYQcHZ1d5oz+i=C1wd?mTOf22kMx)Z(hh|O0DSaSp)}Zk zRW^Tp68b&<7V9w;`yp6`QU6`R_xC)R2!0(49B#cM0C?lqSJD4eHGe9a0MH)tQ^B;S zG_-M^$G@VUz$3pc6n{=j1*(nQ@{}GD0KD?OE1LdO(fp}q0zf;-Z-07^0;G@g2m3uq z?Eji%QzlS%uHZGpO#lev_x|Zw{HkWsKb1@X=s`TyJd{s)p1tcE;4bTSw+R5FV zlii%7UHxEPS|#~Vv0fnM0H|2wdprxs`&SKBsDQNDmdR8SqklE4qqEV?*O5mbnLszv zAYYlw*OAYIsFj)s(b?3H>?srvPoQgV=f0k)t5Tw;SiCq!WTgt&|pyZA+x9NTeIBk*{cwuL#rG z=;CY7qmM|SkALs=WP{vfLs8?4cw_=NDv75xi2<5G6I`Lxq)1nq#L$+@fQqH7j4@9u z6HZT$`ZcDHN}|s`jV#;~>D6ZgITfr-Vn7xLDEHGkmA}R(a z)^F5kwzq3kNw}1Bg{@o@8|E;UDzx%v2pROXG zXSG|bGM@*TAYGczR?^@7HyNzoRx6#J0jlpdX^?LV(-~=$8xNNLkNv;bA_GYT$3fc_ zXRFK+^GEbi4~t_eNmfC}vA1>8z*0ap`K)a*?73!iT_w^b(VVlcz{(i9(l}6Y5#5kG zurdW4e}BK_SPlNaf3F1o;7aYbXwKDOV2c7>O(Hld8XSfUEQx06D4`Fjl)Z@OPEBJ` zm3Lg#)o>MTsrICu4R^5tT`?#A1(+6k!Su^g&~MjY0ssbF z1bhpa7C3FElb%Y$WqgJ6v@5;2+d}nQLbWpre>s0%{bTz!8>H($`(b+P!Qhex-L){i z0+`-v|6&55j~_<-t^4`$Km5RS=6f7Tq-!hWTkX>CsFg2^W)3ao9j_OwN)v0Z4=alX zm1MAo#IQ9mc#ETf9WhL8ncrn-pI9cDMj2?5NPCma0Fk8+PZ2T$eBW6-B!)gR8dxx+ zF_eGKSRBJQxv0XG0?us8LB)VmYgB9ceTpBXBmBk!~=l(5+lvzbi?eYp^alYJ5 zy8{k}XJSPGI4xA&#Ml$feT%>WG6?eMS0(R&n?O*}695Yq`$*un6$9=++iJHI{^9s9 zwgdhTkG^(k{%|Y8{(fV9ZPwC9B+_RVXt#g##kODE|M?^v|FAb2f4TEL&Xo=NZH@9J zwNjORu`T?que3qFq%T(cJe>vKbrE}DTpi_7Bl*$^wC@|`Vx=j-;y6%MhQUSxS4)Cm zPnK4%BEwh>a~vHHNM5gjpIES}-N$GGT;Z**6M+_XXUBp=%2-R%7zdYtJq>~-h2VdH z6Q_6&6%&maJiVKquI1bjs{cvJmL^?73E^%b83y<-7wHaBw21v+rL=E81Eg z%1sTT)zKnY9oD89<-qChdFL4Y+T;AHH$)7kmHMV|&Pg{QiPkY~bdj0(IKa3IRGb=!=ee3oQpVUffrP}j(+6s8u z3wSH~UwvjRRA)UzXSC)&F7uZ&sD5QTXtb}-f4M1E(yg@~qO;bZv)-V)DI(Ds1HSmv z948GXfuk#v7C2h_K6W}dlRtsXfa8=7jRs$wIY^7Iq~Eg2avW1dmH>Z9HUiqo(wzw` zGUPxfu`JhugQI~ZF$}F4q76%&1*_oDXudaE7L;Ludo z<|NUEP42Y*zxS;sTDPSCzkPok6fhC+EO7+2_I-dXtfxeO{UBREZa3=AFIImwRlB`^ zaNYaMn?H_3E4F0w7EFJ8JP`OZ6FeT!tnJxcPg`_dMRZSF$vGun|D9{r5HYY#@_|rLNK9%0@9S?y^-|tC=r4cf?td`wzgjK*c-A*3_#^t&k$-CSw+4M} z-!50T303q|`Sasd(L%R>*&cFEmG8>i6T9zj9ZC-6alET{OAAgXwecQFyP-ItFT^{_`|P1UWESTi}XJ(`&W~{ zIRyX4mw5vclz-#L|M8T6^Z1Nz?VG><#FO+FZ+~_7Z~gdZgCFPo*`c(voaQ}wJIe7|;69-w6n8O{Oip)@|#>*~dda;V-`w z#6x&U^aLBAqJibB-$5(aX)8llyuwxjS67nFZ>#|hfq(z<_ha&8E^`HjBx#Fd=o9uB zD`G)qDPnsBoWnW4T475vZ{y#-K>m$q;*bx(azHVINBv%fWsf2?2s3yc*Fie#`H@EBl~B@JK=)RCe9QdF7g z$LW;jn1AFdlIcnd`b%J?K%!|2cR1k5w54ajigeoU66WA*+FEJ)fF8r4NqSYWA<%fn z$_x>VH*h2PZnYoJ(M<>idG#HiYv1@V&f^;&nAS#!&P4zD@;8?F&)3;)R1&JS(yg7i z`0p58`x~GCFF%@p>d*h`zxRzT%B=&<@idM*CV%=k9Io@o;4hbW-RSwDvzO$i5q8_V=Ipa zmdDWFo`zL%LbcLw;Q8!n_f~}q4jLDt3ks2r9OkeX4p{ydch&OWya6}hvM=tEIQ)#Z%-p@zy5M7i!FyW3BZbzPa)Z>1jmf$7V78bBorHfaH|Aya(KOhZ7A zuipG30nI~oh$$#VDVWzt3!d%*ITuy@$et6TKD_AgJf&Go4xCh=KJKS|#~9k7T2kKd zJ*ni1^7MY$C@DJWg^iqBnWm)A50ffjst;=2$6vp-s@<4pxk;(J9{S;ROO80&?&Rx!V+6xI9{2iq@XH2_$6_?wk{xBp;1^X#pZG9f3Q*|Z zMcukSlQE?FK`qm409Qb$zseoT^U2qgm@21b>8EAkLlFgYS!b#2Wu|vt(*`ta<|1w1 zl$l6SG6ofQUfTA`leqTF$z&IN-A>M*@dq{iLOVKF!|zG^d!_mpzc>G(laKpsn3wV@$$o$Q{&mvl z4?$nv1YH|3?S`!8DKU!Nrze|s^XtR3zx}?PM2>qM7SHU$rC>kkP zTR+@bKm7S50|Q!G8!q@^_vS)qKNs5ZC~SVc85L2ZNN#dT!Cf+leq8~7V`4uO5VNsS zk*`}eHZd14nKB3`U)l^fvSxn_9!s)UAQ2p+Q9h-rpo2VC*D@`v9VxysWcv(3<&dg! z??*53y`MS-i8sQlT=G=y(H{uty$rmn2|!J9PwfZ4f|X@D{`7TH z=g;S{?+PwD#QF2F%zwR^!cRK&NDY7Xshx2WGq@`` zBPJ$E$-n>whM@|73cChE4P(Uhas5B`oGuq98||oj~Ry#Ip;{Po$QCxn!d0-`=c#_GPXL&mLKDBjAtkZ<k7_|<78^GVtVmX8}TW-R#1QKO0Sx1J);Uj42fSp z4K>`8qr0mat)@xF_w(z0C?LG-dj{X7%Xy z)aS_=6SVRmy|7`gj&TMf&SPeMpYOPtiFX(m8sbjY7M=&SU4zz+KgFYxx;)wM82?xh zy%(UH1d7juL0L~>lE(Euik&88PW(Z;#!tEYHPdAs(c%`iP?7vuq^6lA+9A+6TTp{f zM^(IG|$KIVuImk5;7C~ss6VB?G5x*$D0|2e+&9jBTf z{hTeI$2iwJQGw4pm2<6?&|>tYpIPLr{h0G`lbai|jZMPxNv62(S2_aYM_}3l<4vIC zY&L)H!+KFnn3g0GJCADrSEBgz(c-wcwK=)Nbt^cK_kvbELLL((B{9yEd{;_z zk`ZOZ_IRA8rRw|rJ01fQ3kFL`l;ct2D29G{*|Gumh9!>IZyYaf@x|N!i{teOgDLUGbOk*rA<1fgxUncq>@EH9z62=Xk2^x7CxycdT5W zeX^5pk9!&S!wJ_xEW_H8@lh_yE+2e(E!w~l-a}f(jI5s(0XfYk9>z17`mxU?=y>WK zxFqu~nI3p9>h@k#&I40T!0z`;PU3%<7Qy zr6irK{B1+1J6_CPzNXvwQz}k~a&`>L#7mWex5<=3i{qnky^H|&m%;IwAMLl@Io`I7 zk=s#U@C}A_F!(WcAN|x7KhN_^90%9VA6St1dH#i|h7#3C-sKB!bShwx=LUb*31TvO zE=sG11EOTW?wgS4f}PqA$74K>dIm9^{JVV|FvSH+F&5N_K@RD)Z;*eWW=u^y)FJI{ zjQ8U_#W629GtP|YM!?7&gUJJ;CmGZE_kAXZ_~N+L8yU*kQlXsP+j!W3cb{>vKV@v( zxx%iu@A0y0X=R*{>*8*9nB0GnN-jwFxC7lV#N;lHW4}CB@ePHT^d^>Ghxm-0yZz|DZh=XQKM!(3{*V*CmS=v*;N$-rj1y%@{}c+X7$dL{RhyKi z@kzy{s5Bv|c$_usH8rvnUH(WY-_I%|U@m*8CKF<~NAx7}pk;^O`ig&L?(e9_I8^iC zr)9vj`%33@chDG*z#+DH;}!%mtYURqw}TDyk2SkIjju>mJ~MK6G4}Ly4zXz8oU!cV zS}m?4BRz781*OUl(=sJpoC^~BJT1wbw+jwB6Cl=$j!(H>d+E@3Uyy}J>-cS6$vH{+ zQiBt)G_GgFTzQ?!D1meG#FNcdc=AOZ?*jzmOaT5Kb}_^lf9#Bnd!6GU!~CZ4;k4f`m{%Zt_*R?4!u8VE(&{EV-!JxA!(YI=I*uN8<`(S@!pJuT3G6;x?DKS9> zdcayo+x#sw#c+)>UTtxD?3eN>1&Q`_Z%X_u@RihP%z~1D{aGlCN9?^J9&xf?aa|s5 z7E<;XzTRp6Jc9i_*{|PN%r%ZdF!+kzN$!&>U;MRg$9qvhPUgj(fPx2gLtw6#MCB1Z zxEOm;LQiI*f{cIgfDvN96Vmf%qRoUs@hp+&(iMv9lq&QSNHN2BJL~cXVq}a(X|_ah zP-^p;>zHcGJKG2~p`&e%?KY@KDDufnR6DwS+qgXNhz|5}@<#5W8uVo0Va!es*FkCJ zJ_;)G3*|nya{ro}{~-5|_$;r=)$#aI4wPp(VeN&Edl!FL<_py2%O5)6{V3yo8~271 zWxPw$B82MWmYNvlKFW8=Ud>6qV_QjeaqnwYP*pA;wP7NSCCY579Mo@ft`y4oK?bH1 zAGG8iJV&Ef=ZhMjSvgHC|H9e~3}3*hX1}1~0t^>kngT3&M@R1DhfLStA|5)1-~;(Z zA>w@*UowB-1KzZa5pVjo|Bo_~WW^6o$_1V`Mm%p_FP>TR;Cr?E>G4OJcKQ?MaU9mq zaRddWrrh|cE)pgKYH|UMsc2Vx+&;P=LYY`(*PpkkJ7?(rR_XS!b{>5wxKH)R<4asc z_TA|*#_u%lmq^1c;t0uB<$yTC;oHB9JARH&#Rq@cCm64&ivA+rb)4UCImizdgMYEM zqxJ8aCDE-XI$a1;q(W=y_3+B8F%6A~J}B$B{=&0Qc%`27B~p2&nx8{hR6R?h0|Yje;8OUyMV@ zPFa84i=@hz){ZM*j(BDya8&rmzWe19)CF&N87mcMOqwVHs#SKF0L+gNPd!+f#p12tevBM!!Os) zDW+qb0CXNu5-)In^HhoZYkeO%#gueK>KTnbdO>x8{v+p*b1Wc0bg*ut5sc$x#t2|CGNpsqNvU+17Kc3t zr8!kb3au%aBJffz>3y~1-A&``mP6$d_FUX8n0k^59OHO0C-;_X8&Fk-EKO8UO(gB~ zlIb?&6=!_>L6-^^5+4F}Z#tgj+?05W!PeCti-UZ6AK~~8l z{{XpvXM5%K=ax_EExGJd7 zBAEmFtnyBniAbFN{4Q%llpEof%CbWvSXZJQ!U;4nRTIwO>DOpVtCkPmWRi94_28F_I%Ub#|sMiM=tBFzN@&1hB-$cg}rj!Bv?Ku&K{%lAntD~9aN`J2@tm1FPPGxNiy0_;9C?N-&uE5ruQan?qlbJ=R)Sc0 zf_{bnh2y$Jd4jn3{Dg}R_OR`Iz@4A~D~pFM#bG}thL4k~Tu{Pul8+O+{wha$#+_2*u4x7VYsN9DWPj#$WgpM) zKy;SZ%;$}}^mRjmDxUL7ZV40_L%dz-2M&%0*B^$dWPcAQ-KS8N2e?m?bbs#==L-5b zWl9g@cvbC*|2hF>eK$%v*K#}Oefbjf4>RO~a-veE3#;$YHHRmg&k~}(6Z$6(a>Rge z3@*4)XWXF{NB%=RjFofbEq|k}a1h@y9^{-Y&bTG|cenBz|4G+tv^PixOUQ2$lyMRo zP8v;9Z33>wzk10*?V~-(8Fcsep0@o@80L}~ zH%0jum)yLm3)=uV#1@C=h!2e{&+LzJ0ajlBC;jDXW&00M`v36Vt$)0G@R@Fq)B8r$ z;SqHC%@t7^^~~JM zgT0$o9PKa6!HFmVL7r|Xsbm{j{T5psJIxEhQ~{va5_#FKL0*o-hX}BoFOF{D4vmLD_K0OZA6G7_eQh?CYVozc@B)|9D`>*jIL&2 z^wkj;{*Cz}j^&5P_#E-ThdsxQ;b^1UbqTcGO{-W5)%uX{DO74I%wCS(3j#WvTQ2I? zc#51UmKFoJUmUBYfNKp4A2Dr<&q|r?$BRqy3D)3pYHZ zstK8H$&_ibF+5^=A5gVtwMOiQHSQ0IG)PM%=ydc@=@hrJ>?@CW|r%V@9s zeDteC5$m<4uz&nvu|4w;ZhR%QU)}!Ni^8F5-&5>Q1Z1tmBfK@>qdYc_y$+6C_)8gb z!8!89Tv|>A;QucK@_+TalWejvhFxHW&&gb&ZF1!9 z9%Bk9zf7@8viOJ-SU>7Id2yG1l|L=!jhv2430dR$x(v8hO++=mf{M`Xc&5ht_bhkX zP)|gZ#euA2P7s5Acch*-i=-|-Fi#%lm}}+azTsiI#WjUGSBJdAn6t*cK}emhW;7oA zo#bHhb$I=&5l z*cUcu?c~>d^Jz8moQ{4C%3}13G=EkH#fZV2JrxthpZu;|!Q8zaVr$KH#^SFjDSS}- zCtoC9I&bQ!b~&e$w}el;h!iNVJT_p*gmf-qtbbNEt;Q3an|7YIJ0DUA>++u z)_?5HIJbB?p`+2BINEc8TDcVt_IO@j;AL0eJm_QoYF5^laijPj@g=@p?#uC1l$;ou zO*!NuwLHxb(l!vrJt!}LxRW01@;M=K80qz|LJ*>KOF)^qNS&qMxeFWb)*sX9_AX0c z5`Ta$bK0dP`uqL}>&qi=6?VXE-1iV7Uuk>h@uN-m=NzT+4}L^`&W}yHb6>747vk7Q z|C+33TN(J>WF-v$H8n*@*<>lU?uZZ>Bn^L&^EF-Y{E$+`DDZTqOiE2|)I+tdD0nqs zoY;||V!@wW%F?E!N=jiGKdfNA;rjQ&(SIH{i5pj_1FE6+A7i*46(-q)n4%2zILeww zP?j%q9pk<{*$1ZFqOW}9++D?g<#3l*V!(QA44;g2NcDPAsFG0K-|tjBTW-y*fI(Ms z-7c1gDtfH;Q?=FGcW9miRTObn-c#dUg9`QwQ^s7OEx-So&2@zOk&-wR*&eerYkw?Z z;uG&R?9ulv?zjuP8Tt|E07{81!*WJrmmdww%0phY3_&2|Ze+;-^ zXn4SuuE)miuP7~2Kg}~=$uLnaQ4Y79YL&;gQ(a_KUH|$rZoqZ%dwM9ZNF_<#@rV-l zh1abMot}#>FGU5MWP9#c&XDgmx_^B|JDzLf6YXTyu1`xw0T__$k!Son*z7@Rl7Pp7 zzCRRm=R?Fsvu|Esxk~=v6iZWV$%(^-y5T5CldA0F>Zl)n(quB%R}`Ad8o6#d8SHb%#;Gu0_DgMK>6Zersj7xA*QU2n0{fm=4AVm`#+WM6P` zaQ~3ua;mf&_?}u3#YdvL|7BwYy72@l2z2Ht>GOmbNoHNKbkcI?nU?hCTUIbwRI9$A z@Z8Mv%9%fEXKmwRl~hDE0yi;~-Z`i-!*JT9>W?qJJYVW+yOavOV*h z&v{O8IO0qF*@heT-bLJ2W}8q}dMSIk@-mN7brq?GFNGG>s^bi{&cw!TpMLLzmS3WC zpF~8Z7?(XgD?WF=I?1^q zMtj2e_cYnc{`~wy4jF>2aS$BT6{x)SNU@bwv6gRqL3yw5z+7W+8Hf zq~%p&_}t!hltWyj4^F)G3n4NiHB>SK zk83HZQ8$;HAre0a-`2lGoah0U;UN+le}ZkiwE2@&oO{r{aGmAoH;XS@wAQDuNtmU@ zk*CwBk(L(k^P!RcfZ5^Rw6#b^BP(AD*r+3}vKLmq9Am2TJD$1?zT;0g7j{m5~#`5Hozka=89N#licf9F|L zCF1_6=wR_+oWKXWhq zo=!eV)~v-ra(z9atSTNTXzqF0bgrrhSP;u!W z%4deTdGpCqeDKv#u9G@#kz2DM+3T8*yaFkjMOUUeMRM$dNqr zuj6`Hv%{LUYk$zW;-7xdX-=B!TDTc8Jb&~L-PY?|5$Wcn7gw`t@+6G6_`?S}&PkaQ zRcx0H-vpK6ioPzWmQTyE8Rn;RH`MrJP>=a^Tt0i|;WlE6`M^hyM$*U%HaYrxLmatx z3ZiTr8SUl%$cH`UwXFEJ{dt*N?Z}J#=E2`BhnRSVT=du@b?`Aaa!PnUrbHG(O@BNq zODoB*v_ig#mP*QLlg>7nk3EJlGkStpr#~)&qd~Du@J)~haYZ3{ zTIu3_le^N}-poNecRY1gvB&-SfDaYOFS;?_7?{O}c65k0JQC#prTHaJURyn*Pr0w( z@o|}5>?xF26O0!Od3veGM;;WVNPpvd0{mTMWlxlVnJF^ybSp*oP{(t=62|)iq|ht0 z=^KwX>YZt`@T+1DCXcwIKS;xWp^e}x?fca}wtJ{^?=4@3Z*d=mxQ}uhCu^0>d!I|o zK{)d`4h7~h9*(i@Kl#j;9LQ76hLa!v&3Ue^-%jw0u%ibcHT$cv15mrH6Uu>dFFNX|>$5hYyGN`1y%D~Ku<+~jvjDt)R7 z&N=#ButvY%fP4eCA9!*De}A+;&wc+R_GWdc#cIE?jm2f&Cyw389)oqc=3c_4b-pE< zcag1k(e+7B~D6>?qiIhBTshD zyFc?e?{|(+`6MI8;*)$dYAMCMEoSr3S9Itf$|KviAMw$?{N`9;-+#LNB4_@LAAHC7 z=kXG>Ue2}?&Qu=6d4RfvUf=`PCZg;n^3?J8!JXKCZzs#3G6Co!CF!eV{llC7#s<*d z+3YH}quqFYZ~Xw2Z+VRNvb7({b~FOtf%^$07C$sF;D=TQ`_dd^V7;tmf7}Os{P+0j zF|KpYU)`4Ex+<1lwSRXKEdv4jkZH_iX=t)l_g-4&WRc8|@zlA{-S*eKXc?^<8&^O(fX_w~%>Q^d#M*bMDab$Z z$idz{#!wcgIuhr&)Dd?Z^7Qi~C+Tb-pM04u>6G^)A9skspnokj9Is=`sX-ef*cMAp za=s7)ULYywB%KgYG463Lx;P`oNCHIxiZdU|N&@l+?{`POT=^}@5iq`3sK&|)9k*|_X6N?Ep-VY| z`{kZM>w%FzPk*0l4@w`5dRS{`N`VJ-yjZ>>u4dzd&JZ8GpFm-Ghs}V)bXeCYzKC6& zV;49^wWucaoAYvx4GuX(rt~ddd}-5ek%x$RiFQ3+^6b9-o<2(B1NeR=y?ha0$Ok*- zCRY5>+m^M2A^-iYDiUtmmg(}zH@VM^yLi)1Ky(qRZGSNqUE9kVJj?~BR-a1idL zqDj%e*jlXSLYDYYGL>^0(!l!PL%w_?edL#GK?Uqh4BT=Vt*xt_8G&_>DgGF6d6}m( z_&iz6hH-u$`Nw^0`JE0w;Sn}Y_%C?qzDSSUqz-K@j1``6p7@cR@~e?Qd0xvG`L`L) zRWb08K7Y+Yb+GZOas`WYj(iTw1?(i|76ydp9+(fyJ>=wm|05Pv>3dMi5b8>bwAjYP zS&G!nK*v}QW=%*#ptv{T8MnCLmt(A77c2VS)I_y6z2?Z_E$N?eEF;zExe)8Lk#S$F z{Ip$wBL(v*BfGZKrgsj23JHe2@#>tVd z+KY}`9?T6m3!X|&GZVwBx%a-DcT&aH-;AHIZcmGK4Sl>mfb;dsV=kcOo`1zvapBvz zRN8tb%0K4aUep0VTcJGqqG#K4{G&EXlXIDs57Vt1fZ}lu@q?!&=KX|F?H|B#r?&o- z*OnnM2{CpoTf|{;ERhUqd6ieD#BnA%bU<@XGiNXNWgVbO(A@J=@F~hnErWoDzSOUR z1Fo?$5UG+hZN=J?GD@`vI)5%Az22^s+EAr)p&AdzXJLo~o19?%L7V6CV|)j{CO!x0AD0TfdzWZJ=(Uc86ZJ|QqM91N$%O&~4rK*cmn<9813 zl<9V*RQ$^ORx#~PX;T_7>c~C_1-6CPO|(#=<>SnN(Ht)i7EW8_C@8;Bt<)- ztp(YC^0Z;rRv^Wvhky7%l#_`OE7-M&Cl=_ha^nX7%nv_uC5Ji`18zX>$8ilW9^t*D zdc_wUji4zf`+KO}Ql>iPm8raxM;zQ3EeTsySkcX>H>AZnxwN(Z!ZF@|;u>GnJ}N)q z%-gWf@&wgIEVQ2X$oEIR8Zh@M$ntxtTS+v>x;JxXw5>BWB|f=?YW?N2mz=14#!@8_ zCkItZo&x5qGh$37O*}(AMXMqrM80#~FWj+YcI5+o|_4dSeNsPk|sA6 zu1dWR$ibb=nDLn^Ia8BBccD+zB!+DndZb(lqnWSwOQO_Aq+=m;^ElKwY{(t>3>uCh zX5@LmDKE#?Cp`1I9yEhJ5wP67s>UN-LGI8#%sAR+f4uaL&mzsqmT|1JA-J-13NKju;golfH#_M$^iOBbwydr!+xpYnpIvXp6)(hdiv z7zFXEVLz7(FA^FYN6RlUI6mI@y!}g_{e?~aZ}JOQsq-ht{-b@Cmq9NQFi3i>i5l`^ zq#5e3ZwwSzmyt&K)=z;Ab`$adP=>`TQ62MQshumV7qS}g5ANH(9)qbPlB& zxk=L(4$-fbmg?2S_%@fhFA^PpcLxF!#DHh*j0dnktZdzJTaV7>J7s8Z%ngJe4t)BZ z!un3fILR>|@~5m9K`niT@|f5fv;9oVW9{2wN3fnncgejS@cd)F*?~uXQFrFAd2DG@ zK9F+Cu)ZXbp9w8LJf)e;j{KW;yrfOh3{&f(X*+K=L{hy!X`uephR<{n-V_f*vc9uBi|=ET1}<1BKVRMOUgiLG5~_j$&veED66Gyc{?bBj&j_m^Q@m%%U+ z6bz$KicYth@CqbP?=_d`{KTcaJ(+Nsk~s+sb?YCY!&<}YyJjlx-}ImL>MratHpCo-f9Q#@61)SYFOT&i_&?^mJeCr})NJqA+&@>*&7x`ce1Jlvvk)bK&**F6th9 z(HAMP@dUmT?tT>@KN;6tR;=n5wc?7`e2aO=Hb?naVO%U7=-FHwT~MNs_uPg)Ajeq; z_Tm!`k#SA5%i!sE&rK^CEJ~t`q+KjX8b=h+;mBhO?VQ}lgjIQD<4+3fS4iYQ3x;*2 zKqYi|JIHta1s?y8>M#6%+xiL}|MUD`>MN|CuJuKI1v|HhM>WZt8glVSrxvFAK5la} zI?hiucPMF|EM|R?Q=43H-hbrT{j8%f)Y-9ocv~;w4)vgPrkx&?R@ipvv}a$ z>LUk!TmGzHGVmHsHHmg1$OTbF?$h`ssy5!AbxQ7f&{q%P)OI&bn?E6mmL(oZq2ctYBy3i&#gWS%2q+p6aw% ze7;!yQ44(FCDunSBG!UDb2s8McO%J)v90Gdw81>&-HbXxl1)NTIT22CNGmRQ*UUgm zC7`jn5HK$SVv6N|SBH5E`!Q(UkV*=nV#1kJ@aT4_uJ}x;ob1v#At|l&7V|sx|k%OWwJUn6lD@r z?!(l``BXIl>9|X^qHEaO9LH%TeXiuw3`+9N3ljLw06EKaz&eoO95G|@_7MBjR) zIibwFQ35l6nl%#&oUF9Jn^sLojR|SB58+y}>9$=9aPYg`cY&Rc66fQ=#*xy&|FOEB z6pLeSbAJ)ryO2K?KR(v}D2Eu*HxKnx-~Uu2<}YjY^lkwAK9rrc-5aMtZatR%M7-DQ z_Q;vjD~)-F#Rj(*uQcNDp3KyWLX$9~2y{x%Q8sOV|6Ip;Fz+D;#6wbi4fupyb#4t+ z8%A@f=>9kQa}VG7h$evI_mo!9MS8!1T0Vl(fgs2soaD25%>^X-)Rq#*#^ef!^OP39 zJQe#Sgu<7hLhyLBhOFx1&*0Sx|kXC9Q5I*Qa)$%NFrbJ7H{u$NJ5G#`;xemcJ zp--gZaBinCUx=p7R**QPRoh_<^I7Z&QFn(t)vLslzx*0~`nEv~OHyTJB~ga+^v;j! zP6WMrOq@1=?ZmK@KL+$cMA-+IDIeNa@EquWJP)9yq~?#a7k)S3MbyADCo3JkBED(s zI$3<5=Vy-kiPv{ozo5$pj*1o_J3XUat5ag`(RgCUSxAcbdPzkelb34$L{vKp$7Ak8 zOF>Phw2}$u@||g3{eSts4abNn%&niYu)nMSSXZaId`Ej9|8qLb697Kc(-ABZqHf8_L&Z5Uzuy*}Tnl9)V&gwN zpi0dpo$qccSqNw))t!%RI(}h=%QuDKKTE zq5Y)|-@~S%3Zf0#oHLUg2o!t@=`-FfZyp!`Loms+V-DSN<_6y$Sd$HIrI#N7{omzfYWhAcT>b9$t7x3bydVciQ@jdHa z_ugA^KdL@f-tV~ds_xT|;el6IkA9s$zErm(|B637rlV&`Ts`l}3>U;ZWu440lg@jT z<-Yn&Wg@q{F6!V~zQy%5O{BbkiMQh{4TI2mp4CeoJhi(;zxfER`OBo&&!Ld}$G-k` zdi$N7kKpa*ZWcVT!(j|CVZY&T|JdkmHrYVUFwL;w%=uYwkF<_Iac;RNe{2kbDvYo?*a z`)~z&BwKO=OMH(6$3?;#TqkRcX9#?m9f@IzJ!A^JuhAWTXBK2r5CdD<-&I88b?s2J z2YVIygX11L1XS4F`CkQx4e?uQ~PZ0})wifQzFE4*Kb<3k(N{~8D7-LN_G7wvq#Z$_RW z_n@ywce7CoH9yYdpZ^q~ZG$6X_t}0=zwod$wALXiK;i;)wpP=Rf9UzJFAl7|y|{1g zt;HH2jzS|%tVah8OU#>`<{E&`Ryqd2_`ohcE)IUWnH@#@5H>%59$-kawTtHyY_SNK zf;$d-QnyXtW(oW}~m!ukq;j?I`(jyV3pZZvS<)9IaOqcXoIfC0~=P zr$aGHHk0wwA$Prhjh=sWkH>!gkN>p*t&9K`i%Kjr(Alj9_AH_n*7N0aZ4&ZnLh-EE zH4PVw+CaUiK9$D8LEEBFYOm*23uN5_VbaNZd>(a0XtV(mMOb03_-H}4vgccwT^Z<& zUPmz#BnQ<@4R>N)c!ah}=E@1XKqah&fGvEjTQFPvtgb|V(?Y5q@y!%EULruuwe0=Eyuc&J>S&NS|dAfNj;-PvylT zxse&(L+EG&UUVE&=v%#xxL-6(GioT0bzRdC zpQoLJ2sXfjXHgUaHpL=XFKM!ti$=Gc%$E_6uxqQ9ll6J-vMWRS-q1XLZ*ht8M63GB zP}&=Rnk7{B)BsIru>o4_?hP!b2A0&2e*e)z)5@WrQ*||U36L81MJ{TiJhEQD11YWu zKlU?>A{j7^uXmDfa%11wb6j6-`RM!+9meda*Om=i4GrPtTD2`+H`8Ccyi^aG&qJ04&IoD&-k!ua6d7yZ@yV z9vI@>&O$9I-^gC9!bQQ=h+<{$VAq1!%1CqYrlGv@49^oS<*Swvq|%YwXaiZqKk{Y7`I@KeAcAC_C2q8T zfqBF~(k1_xpU!!`!1MFt`8_@b~2g0M}WNhaEHWS-o=w^@6s* zmv%k$3$=_m!L@?pr}l55l>2S50TtGK+k&VrzT^4@bw$l^5BWSC$Mt{5(p&8R1InR) zuAQD5*5JNVP}J1UY0HUTE^!?uy0sO5d=i{n)&l%$5y#Dk^(!(1tpb`Z(xN4Dt7;-z zaB)v1((WOti>NF-w1JO^#v0$CVaN0q zb_K2%8tdpCNLjU&Tr?jo7O#z97T$7zW|1!=$fP56x`8Q<2g5zJBNWj)SbAfRXaaPJo zj&EVlhqUiVZ_mF*5tqYx9N)&Ck7}c55s-pjnW#6qh@dDaLES_OKDD|R_g+igH`Jvu z?M>8wnh50=<0!v;yD$IEI+Iu2l|AZmt`Z=5ja_6_g|KF%NWpzsv)xH)(^qIq{`a;u{cuydnb6>hHg;G1Sfd z$&N``;i+v43sx*swTytPE55Pc1?se=f!$xmK8v-rIWW6wt~c@)Lgm~WsF#K|$LCn$ z`dzcX%G)d04$m<)q(Rxv-&@F0UZY(>|F&ibT~~vhLmGJt4F96-$%A!dW z0f={4p=`l@N3ueHN2mEb7+g1Zmu*_uZ&i%}qhg!h;aU8>8|DERj$}@~6^t4V|_+Vil>luYld8ogC zA9bl#&+7dy;vWC2J&bbaU+xvVKcsvM@y%_7Cwfh%hU7`Lwl-wSiu4n|YimFLb^l;r znoGh|KU9THt?L^!Zm)M<6LL^kC$;l5+Q$D0r;5=pLyGAempMm z^YM5*wD0k=8rs{>`Ow~eE{68@bJ@0*t{>xX7f}>j9|QZp%=&{Z%V68>+ZMUCL%WS_ z2g0^n(f)vaAKE$C4j{Mf9-|1~(|c^!_dNg+__?6?Sqz`Ez6W(YUgPKE@u9uM&+DQ6 z@OfxId>+~lpL>6N4uBN*v8SZ@J+E+HaHamH=K`X1MOmd&@bt?}t5#*L)g<^W?{951-pVpiMkK zw;vzdlkfHg-v2J_zh%dRUE9uk%dH%6?K5ScM&Y{go1uUGEuq-<5pBx(x$SKfaq`{P z;Io|DKG|n?FZawZd+++wDBD-`)!sDes0U{ zefx`XlsVpebQ+fmpPNmN<3jzR{c%jB#(SUJ*7)4>bKB59$GL6Wwz?hs&Imm3R&vmZ zKx_te)-r#W?=4i%EyPXk*Wb2d<^W<qpx#@505T=e{;dP}Kxsa}YZ{{P-BQ&djd%k^m7@NvMSX*D>ow21 z5ImVLYL^0)%jnovG@CXnNi9V2scYQKT*+#?I9`7TJY6G(v)tqllpqhqeftD|RBeE7 zH=F{`4rHl3;OT!AuYpn{I*q&z00=E^d-_f+{`q~LA_Q)}zcU0~o$vM>EBeUq?*h0a z`S*W!?~n5dyZ~vw?>7a8JXXTMn;@uu1HOcNMP9(OFPqg^{G>4Oicb=2FnHxu!3V|( zV#J*hXGf>c%ki~Nui%xK0vuWg_7}ga@pmI^ zUnpYnXNB*3_BgD8JcH-Jqr&qX7WiFgSQ&qkgWurqJ%e9DsmUvX{%b{g5EB2sfSJ^W&b!`$0p8{hA{5&X7m)zgri>eA|EL zw$HUbK38sVo*F#<30{nRxE7CR$M2CI?mq)^js1Tx@QL$T*yq-8uW^1bPD;M;xz+}L zVVvdv`StX-=W1~rD;&q-pY1=->xQ|=%V{yX@nDAeyvOGn*4^Pae^>Z-ZO5$`=Fzt= zmz?5#H;O#lal(9LVaK(=y7CL#U%-Fw*T;T4!~NjXMpHD&?YQCZP`Fsv!#>>@va{>M z4C{W4{q!0raou+%y?S|(f_}+iB;W``V z42N|?9DM$%hA*r~SmL@J#@8lWl{o*ybGG<=8$){Bml5)=c!Bz1f82KlgA zo!#FKe*Zu1eOq(nwvy(1{|cOnIY-i3VDWw_UFK9vZMTn)+U{1{eSEx*4it&1qQ#p@ zluBw%zs=jk#y;#mZp8kL{XKv42WEf5MuL|jcqta!?Twj;k=+st1Tqte%nx7!$@**Q zxAU64T-o!t+vi__o~r%X@4mRM_GdePuC9tL!z-SW#<%Ex1nytMymQ5OA-AxV`t5vf z!<77n@5^uDRKWKfdicD@hcbh%d$7Cs67+n<^Id;|ec->qKJJwZ`iJu+`yXCEe+T=|@8-|{KL0YFzuP`{=U>Y6zpb7x z`J@eJ6<*)We_35!&tHG(b7lMD#r)N3uETc>@UO|dUVu>A>+8SIU)j$g2YvoJetxxu z?|zq`U$0(#2by{D`o-=1uRqS&_Ql)zo7XSBcSdq^J=fl>JY^ByZQ;wppKCvC$lLkj z%?G!*|Cj4;Ul;f9UOg}Gzm~uL@{Rf>TfBQw)VncW&42iweD!~W{NwAt-~9Ma@&EqS zi>r5E-CceE)!moht3d>HfB4(;59Hm${bKd%HTml9`umr!l7i-k*KhB>TD%K3-rwE? zdD++U%`ZG3<@t1K&#$)^7bj>Lry*g-6P9i$drau$<#&EIZW`Vc4CM;PNZu>01ow3J&&U{7! z-xVsPVP0WLLO*z%?W{_ul`T7J-I6G~1yd8MGS*S`?PEf>sn6)nGGLS=F!i8Q0Jc8s z7aZF9tVi&Hc4de23b_PAg~f6&;SNPF;=s!u!$p65cvpI9kuXccQ$e1^sW9M+4kq)P zTcHnbr&g~>;IC4WtQ#s>rsPiG0R*&1=g9H zcA2W1`Uw*oE>Azf!pkl@r9p7k z#@l*i243&}&#OSz@Z zFxw>AX7R+=GkL!Qx~#eSG9_+pG;8%-8VP6uHIW3rF`xen!_U=N_(&E9zIWDT5@%gX z@JP0QhANdN#7|9@)eW#ADA-+z_tQ=sUTwc4n%zFUlv0Eb5bOy_m{f6xZB&j$A z^$%%4lCA|%V_<;5rQ}BuO$9p5vUFGW)LN-Es4e}62CaHYT9)kZSxtzi-1qhiT^A5c ztGjw`U4={YYS@tfcqaI`Q8Nt~!KZQ^i}sj*RXEXn(UsHA| zrE$yzbxNvL{Ma-U^@=5<{4wcn5&)tvcm@ zaoVvMrxF+c%T}K}D9l1tdmlS=J^vbYrDgu4SI#7Of$o18h>r1v) z{*2+F4)IXO!;3lu0cr;XC}#0vjbWk>F%hvq`y?G1Lrg?aFvid}hiIE3n0=C#tRW`W zc;r!s&_V4$2Yn1}XNb1*NywuPVuRX$#s-c(pP+eV*oTz{1dU^(4IzcrMhZ*ORok!> zT^qwg9pa%1Xk?UJ#_-UFcxdC%NE-qQtpg}Z4XMr;CdLpG5h&D3Nn@CpLrg@(C_7@C zW0+V&Oe}%Pr&*Le#6$!S`IAh_8Db)$h;Dw0y7r-o)smzSfglbL$oykqQ()|hluOru}L3-4ZQ<4lyM3-hp3BqF^;%3 zMBMt6rI9`a4|)q84t^HXhfqQ9Kn2AdDY#p%3xU1;{CXoH0C%As)tfJVuWF~*SAhDeJ*F^050L|Vj$F{F(l(sfvnoH3-$A=2h}#4(3|z`*PP19c2- zdx*A(0pl#&86qx%fTfNa%Y7(dcA6V)RjNzdU@lbyY z7TH69VYdN8x6M)OW_t)Q>^5MGeGIdQ0K;wr#=)T;dk8V?He#60xCN&@gcx=UF-D`3 zJ%k;08#~5Oc7`ZBpMpyEAaK}Sz@h81I_3e`9s~}%4IF?^K>$gCZGXx(_3qD3elrsb*P8&!@Ii5V|c_NIcvaD)~ZY#zp zHp31s!icUpHgGwA_{fGlc$`Y^01{o1ZCTb1-3}^fsDoV823)C*DeH&&U_nD0Nqa?AQur_RK?IWUAFa6T#P|30=Q_VVrizP>EpPVgIq*#krhXER8uxaaj^!u zSPcl%6hk?Gc*|HBKJ?i>^~u*>7FfV2Ce9!er-5LGrdg({jB`qP*em4*hMBUW+U7X7 zR1Wa|zN6Y64g9cZRf7pJiweqnTc|xP)ZP|aPYbQLh2GOb?`#on0#=mdI>?g`#&oPn zLf=PXCq=Xg0^SJn@SgUZ=$90=IS6^W%`RUd!9~u0WPgT3RY%G~5(GF-20OWH5|DI7 z`PF0+yueNQ!}N8Cw7p> zp2XykXNe)H$^y%1LQ=xwo`KrsjtRLz1r?H}Mr zpFPf$qEV^W@*}9tCCO~DVXR*EOYBE8?n`AU|G=$ork7=qKf{NEr{C90 z?d0gpdi0|;O?}p%5MN8f$?jJuA#D9rr~z1im-qZ|RVMnBnWGUsgDky*QS~{en}6Kp zqRBPe+gW$UQp((Q!@S#h{XLmyVh&M3D}Up_X{&famvKsWZz9;ef?(}?kjm-@PoKni zO@bv!(|F61mWq6_{ysDYz>p=iix#)%r(r5ZI$I~TMM_-yNm8kXq%`T|-keiXjX#cm z=wXx6r}a&g=>g|@N{IJVz4rkL`;@NNQzsAWBN_1ovwFlM1fh1hnmlXnw83%@!Y$&5 z1}|X&U(b(S@;7Kes%w!ED9Jhvi>$y!{DAXWdsb!F?eN+qkxPSSuj-R;Q3J9zh4e(I z)Xbx+x69%U+mjn8-YI5%xvoDFt%8z&K+Zstb*I)Zeuu?Lv%I6fqM(wjiRZk4RYL{M zD)iR~N1L$a`o|OG#v2&9V_x&cuhFac1WisRXGJ;oBR8cX)ZW$DSxFe+189d~u$OXq zzc$yUcol5uQ`>P|58kN43JV;WHR^geAPX=EfP(#v8_wsFz&$qPKcX+VEuWl!_07{2 zkKDOCnQ$X)QEapBnbvVZv@u)cW&%$DNIvWH&9d!f@m?K~M5|TDyIr~oUDjSB(;(V6 z9(yQUN{pfPk&OQGg`QF8ZB*_H10m-t9NwI#x^5hbo{{drE z6`-vj1ZY7~N&%KV6i0|82@1Ik9>N%i7Gw)*@MZzTUoVixTjZr=OH&lAX89G46BV&VpIMoTf8 z!WYjIlgslduU@0xsYW*r&|-z*Z4p?NWyF_s{8nM4^by@cf3x5Gcv7A8g zjwyfnh!>P?q5t6m>nLP@4PpqLuhoiL%%7hE4}}5LyNV-t+Y|wx$9b;Zo&-imqP>*Ho-vZLqZxwt zy`8oiVB*4baWq3A%`&p0nH11y#n$NJ;sVL_1|KKr&u-wmcNde0ZtITF82&H#yW;A- zX!pI*qkGK#0NNlw5;ZXSg#IG#xA_}~4(L1ZlYEc&8LZoXk5C4>sqmlVpQU0+hd#ob1}~Et)L2OtX3hfM`1M0p&1$*~ z;0CiX3Ljb^Cm$PX1 za@T~Fb5b#Xca#cQ0f>uJL^XYF%Zu7O)m6FSQothcedxf46KRcMG>G)b8;+ZkOEQ z)Q12Gs|0m&83ZIGlA9zwNKg37s0z!;&@qb>#MjP$CtYw~H`Tg@Hccf5Qh#|dX=3>$ zl52~zIPu~wcv^meY2>ws8^oMQk~HaCzaVs8++kz^@BIYxnV>WVbCezfXZKm5*y&(4gOXo zP^KY&Ih3*SIqS989-XTXD9i_=dxSk2P(~+}F`$q}o363m1mZY1tf>NQ-o9MLE3%Bz z7$RF9L|M=W{FCxC+G0aR306_ICSJVl8vMYo=31a0#Khw>tuQ&COfJvK<#loYd`jR! z!|$e6pDt;df;m(SxC(m7^Sgb-&!?N9i6o7G*}5@b$8o^?1Qmaa_|^n@gb~dA2tT8Pt_JA>ljvKOMi9klOSH93%=$V<#7CgvS8lt^uNDnt|&Z8j&?;G>5^sW zM3YqAMEUQ00+5j@>5huTJN!S)@7x&B64^xM_tdJbf^^8nk1V>d1(Nl086r?OPb;k$+k53S8#&NZbLHE086T)Vo5PwtVpJg zCEHQ421lwSM^>>a89IVrN+2nQ1C^VkBtu1j`S%4n5^`FejuskooO$ zVQfdsg~6aDE(^}J#HGOzNvxz0co5N{_PuW4HP&H&rA zFqbKpFUZYHaY3d+IBsQ_ixsX9=XUe>POK*;DAX{>@ni08t5OFoKxX@iL;m#;K=z(1#UD7Oz zyCyDt6GY2p1kucVKyx93%G|nbr0DF2-?-#xuZ4?AhplNy5q}qa90W8)eumsQ@u`QF zX&fS$WU|?G>JdMC-1w3khf_BWlQ^Og%ci_J-Zh`LLv5ob;I}}pWBL_;BC8#1=-r2c zW{0+b#^KB5&Y9(Ro_!M%P{ycw@C;m^>|n!lEmyV`1cLcqEI6@!$DAgi=xAn%y zdBc+)*FXqvcPpXixvYpj_&pjeL!~)r`caOaS)Xh2eWxDL@~x+ocz)d1?TCc5;Dt-m znd~38CLE*BGLZF6C(fa_<~<2Ebf{9Xp9bUEw1mA4d(D8iI+>v9;ltT}cI{DghT@@^3V?A(pxQLcTiUY``pSTiS4j%s(L3Pel-;?i{-Ky;SH zC|mnW2FfqRhC=w5Po}Lo{pqQD#{&H$I=Y3~nA8d7n;>Y0j*igBkB4qYgxLx_tmHGP z1^+flvrk1Wyx1%j0e#jqIR0?XcQ}^*s%c)cFk5kUts8fL^pW3?e6MxS&+w{xqHjFk zH|tW*XLys*6o=UglgPsdH4AyTfs)JsD%SI zy%HM1Spak#N2}P6RwsyLnV-S;|5O9m5fcRqE}DWQzhP+dh-pFVreg>W8M-~q`3Uar z|Gooxe1a~2F45D+!GWtP9x`UbP?H~T__R$HyJM3D;&ddXD_wI`Q=*#ARcxtr$VS-+qXXA z1^h^V5Uf5M!P<|Lu1(Rr37iS4iM-Z?NOrKQZ#{7@D#OR;)3w&^jcfc9KNsi7_iL=E zDGg74-apAF%>z`hvn+5!3mU}R+Agvm_3R>_PZNQ58F9vWf3tO*vfN5bll7^lX}IlWv3_! zD9PYVB45?;CllmHEJmKcTvCYOph@|4Sre4yUvbnrF$64ci?2UI-?BALbC&S8bl>I* zi$aP*R-Ml%c%bqkKN2P$)>~k8ha&fXe&#O%+8V`8$Y+WMUCewI&>fUbF1y7%pB4oA z9jzi+#5cM(wr_L78TxfYH?(WJbC10hPoW}>Ndv74(X-ErS0&NIt+3tWj=l~!u=f}d zsPaA~(Te+URYD)~AT0Oi#{@Rfb60*~?)xN-7i8fFK6@;SVADMVtZF& z=l0yK`n~fz8j*ncH2e4KnQak2i&qZVU`i88*!zHF?0p2*R1_p!8g5?+oD+{+S}$9s z^w$lI+{b#Mmp;CS{8T>)e43UepswB9+7k30 zJnGu97^B1v>2nwB$)(s8v?kPbynV}8oZr#(mb!7|b%t<-le~E%!0j4;4gm7DDB)`q zXx%=&+u!3+4-=JjTx~K4heGOgNEyuN?bu8269~#vq$ZaImA~A6`>OQR<1gbhUyuUQ zoT8L2^PtFi^)OO>vEKx(Q@YGIAYo^A<+Jr>!8c}CE1IrI6yHy)Ly*%&5HF@7$rw$i z^ntHLvS~)sdzwz)yqI5q-@K4QZ-N+P)U%7p`$a&ay9r8Rq@oy}nrVunm`jvGG&ygJ zyx849c3WX?o7F#4A32pJ!V z6=~$vt6emzHNsMV?IetxO3|B;q<0XN5Vu~ z)V(S;yMYzFW8||0o9l;Mc{GlHD)-g5yC~Zh+dRe=tY%y6m_1QtQPkc5%p#p0VE6gf zYRIsP!n7V5Z78Hv#}!ZFKG3m%tVr=DTfvIpcC!e5*5yQh?FD9iq#&}}T2jK?jNo7| zj5VCo=7V2u7xion@p-y91D~y`w3>@@+^gBJpj-Uq=fJBEp~u(H%`_YkDqloZ+`*{} zyKhQzzoEFgR|KzTS-eSID#!+`>UH~AGC$iHh=XVU`~u;UFKz zTv;XcKm7@R?O7UiERu&0Gr5~Ii&LD$K3~D*uTi3*&tulyZ?dpTSu$ZKAfeQo!3J+3 z6>NlfKJ2x>PiXo+Uuj7!{t~|a{a86cA3q{C%?3_h_nv$K|NVd!8RZG4)nS*e%uw)$ z%kR=SOu)=9o8AaN80Bhf^Z=T?k08OTyNw^xt`=c`2E7Zj?p9r@;?4Hzse{z{;&QiP zGO}mo*o^92*=T(K4l;!nwc%3~O^9kt_T}YnMU?{*CGNU~M&g8d#I2m?_w>xS=%~s> zKboQXX4}Wgk06DwK=k_K0T&3UbU>oGiPyn+x0OdcSeUQ~hkoR9?8R|`8ha*>I*uKX zYVfar|Lec~+yD9R=!NIAXWfUOa`o;x0I;u1RlCFb)tDKDqYTGkwmLbaOaJ$O|IdF$ zy8{z_-TK|6cH`tRpM4J!_J8~fx{8y>PBAMhulvH@HSOYY7pKg(NW0dw{eh@*eJeZ( z7jH^{-@COIyvYhfB%qcMDg*{rBQs|se zCyhs`;n}bjih6n~SpL&LpxO0=iKn`yDTecrwy80A z8QBqdV{s0Vi}M3qPtcz^*Ne$N(9mbcN1MbWjLvp7j!w}(@FO2m9tx2-joqojT3kTz a(er8k+7oyW3cQl#+5ZDA>ng)7_#yyFWSTet delta 911840 zcmV($K;yr<(MjUWNq~d_gaWh!?Z|&Vmew7uj1VBkEbF!d2oORe2uY~BZ5uO)Q4B&- zSnTKXd_VDi$u%^D&=jG_%qdoCoOeF8Z?X1;&>YWdEM z`qr!E+12m4+}|SKSL4*&KDK4Xe!6x-`h z`M$Un$~dlJnCC*7Br5hNKftO!fPcFzwv`T71cCQmFpdq~zahST|Lxih@p|^|a(K); z+=8xhf^Aw76%~H-y*0l45aE9vW*T_CYf!`}p~7t5~ol@5-R~QEEQE+@j3g zous-_&Ox{OAXjJiO#$TVCo${@;HW=xSL1e_|J%iMpAE-*1>PB;cfo(I0=zdO|MMf* zPDkf|-lgyTj#Z`Ii}#I6UM2bU*+|sFOg`G;j?(ADX|;R`*(2GWS}Xh30qHw?2z0TB zRNh3kN6I~l)HTd&uX4YA%zYhwBXhk05Z_OgGslyx;F)k_tvC(U%!r5h!0Pa=nN001nV%7?LZP6qmKhmR7M zuda_&k!G&$t@p#ZyUwJhZ+%O%#LRT({4{a#q*Il5{A!Zw-&DGMI*@<4lW(5D?DmAy zDR_LjHe5#u#c$W;MtM~6{U_Z~L)$!WQu%9_RxX4;Kb1@Av3h?Wxwq$Mh<11mMYG7Z zt53eWy`aLr-!_zAe%3kTq~_!H{LXf@-5sy&)|Dl^yY_#K`ZFU19EZsajlZ2@|DTVy zXz(8JZGW$A@29(%85;Lu`#dpR64g(v;^~y&E;0Y4d5U5qD#CZz@Se48Wf(_*eMNhH zfXCg@=4CIL`&NI-2-C=Sjfd*~dyw1hWPxR&Bff{XUvHFg@u+_+T?&}C_3!l(9v`p3 zU!KF?R?~5-c#0C&cEfL36z_$erC#jQ>Yy3>ma^WZTpP%vsN#_Hw5o_LkE!d&$A=`! zPGEN5kB<+jXBNlg^6P1}Pgc&-OFY|FO)D9@b{}$-^PGQDa`}fk+aKy|Kj*-)UuxTD zy^Do+awIlG<8ZFt7b|a`SD^M1yDihYz_2|K?+$?%OLu#3t>E8&f2M*x+j*yovdNVi zc9=e>qC~aBd1bGuAj_ioHm0{m?NFX$+Lbz;kPipq>y6^?&kOL@jAM1j!sXTXy_ybO zX`jE-l-qwXjd#IzwhD!tId1s=7XNm*+3)e-nR4F4K|AlV@fGd8&(`s*Wx-RZF8}tI z=YuN84$;?R&Qa%MuMv3dR(bUq5%{^H_^Oou`DdvUoyzyihtE<0q@w!xNtMyYY5k3# zt}^;w1@?(@XPU=xlw|3{pLduZc7{fQyZW;mK16?>uWDbnFY~+qKR&)x`hL-lE5P5W zgY@+Qe0+Q$fG_)cPY3je>p2+tBQ)*!c_kU?6U^qoOtZ_;`&*-iO$! zc0Sv_nx=2$xBWU&2ZVh2<&bzmmdKZ#zLxDoX?AG)4b{mjv~gtsPEmbCmzN9g`K?<| z`=fvJ_II4PTOHibsu#znHN@{KHh&-Ozthfp*28&&PushdEPaOTKZox1R9)}5jfdwO z_^ImNdwbolxq(B!)&~B)``!)3z_)ky>fl{d5IigRA3-snJxrWlF~PG7CiowHy?oPG z-5U+;&U_UUgSR8n$!&W1WOOb_UW`${nXPUhSCF1Q)c%q&G{@AN=)pq4BdGCMG z4g)+l!hp_<(~uAC$nMs=sIH@5)A(yY!aH6Q=7CmvPko~@Hw?>Zjot7`%d;#BzwU}Z z1;)J)FM*oa?)EDG?P;&Cy-nEPolnv#uNvF**XMcUh7X4&TYhXw>hW;;+-bsAHQ#P& z#NF1P{pAGLY2Lm8$C|e*{+s0{Y4(4lJFdUiUHRs#ZwX&_Fn_tDXa(_a*P}`cZ};WB zLO;}tLwk;o-9NQuSJ2*U<3Yv!dQ11Yw@C8cb{tx}hq`+>-{|~ej?db9Wu{NtTWRRA zs!Ixe8`bv;T^Y;oqQT2Ce6QAvF}$;hk3Try$0N&*y+z5Nw^}oQshn2!gI0egS1Nw1 z`3&REK=lsBd~W}EpKzYqkdGF#t(lV+wMNCHr@=a-)8D>yGb~G*FH&lN}Z^v%+HA_-@bc~ z@5oz^OXW>h_pXIgozuN`%tf^id#@OWQ|eZgW-B8PB184>(RbQONOosO;Am#GJzfI) z1@#R$Rcbma{(RY5vhBaO=qn0OMK8>U#Xh=QW;-e_4Pk*Rj(eN9+2`pewRMl z-p&wwM$11~bzU0Ko5^5BPJSUK|Ie0^A1u7Lq`kW(-Rq-h4^u9NdqJ`&{&e~K+VrdX z(|B`{wBs3nXcqjTS@3@o9OYEp2W*Z10RS!?0YFH+dO(19_3#8j@#f7JnGBvTyZ2xtpO;y{a?lX5v-*@ZP9mT>s)arH+P7lGAr-J?3Ie#+R^7WD_AN?Vg z?_@urBae?S=O!b&Vbj#)$U#VcK6S?Rh|e zFJJXMDSy*jyCZ)D`11Jm91d)30nZDbV>)TU(FMZ(MEm98=>aO86h7=y;?+ZKS6#IP zh(3c?%2Y9LClQWU4{*Hty8pMDf9|j0K=OYNazArp-~8ucFW6~DmAqO_O!ehG7gFFg z4w9J1beM(Ft*G=Ooh_>tEA&j1O zS!Sr4J9+%en+xD?J1NCe9L0M!a^f@i>(*!R2*t!By-)#nAA0 z2i#t*D(XiS2gF|=o(b$k-{b?OqJNn>QB{TIrBeEW6UY0qp_yli>c3kyJkg)}oNCS9 z<=&6$l{J6aj2YXVS5<>CAg!Li9%bu7?5+y&_4>LKb)=YnXv>zWL+_U-}L*74Q9IkjzX}0#w zZ(Eh8H#Mid@3`^}=87@bDa-s7^itn<{djG2zEe~89Y%i#e-97JqsKD8g6F&UJAZM| z1^0)Tol5k}neWfNcge~7N#qO8;??QKoi>ht??dVeRRsA_*l<#;m{M_C7y$Zh9fB8H2 z@e5y?HxAvO*}cy<-A`9`F-bq&n&G}v`TcdLr*)^Fs{8UJeSgL3X~pX2I(>Qi{Y01Z zvp(mas{8!BdVj_48HaSkPgQ*7gn576#o2#&7oQvOi!;-jm7Kx0Jtc9mDUF|C%Ma`OlYkgfqV!-=w!NrQOKI zgZGv?%O~&3i}$ULZKd6J$ikj3g<&Z2Qa)H;^z5esj6>7!H-+A7_a_qwh44*dTyH(SjHfpDiAAc*haA?FSnWY7Fh%l}55e^QeqD*krf{ar+o%jglOWOE21QH>yRjGa+YT>bX2_1g7I zMP;=N*KL^g@6T_P>ifT;t{{k{w{^6)s&!>lf-8Q6cCk?_0X{B#>XZR&G2~12VGi1b zJ^JNZke(w+#eSatYzq7L8~K06Zh-F`-`an@KWJy(XiF`{$NT-=e*G~#`wePqSoNVV z;dfA;&H0%D`Rn`se{bVGP26j6-m2`(Rr5acf+4=y61?j-`xsQ}w*TzMmxE~qc;N^9 zCstC?tvrd!dqp|#LcMPv=BMH>`(4-A7rvg)+xKSobnf%{j)L#`e!hPP{5`&_YnMuyY3&sdvlhmoaoAt1vx*l zK+u2uk>xh|+~(3Zspl?`ve1vv&KCaXlg?de@1mSHp8o&+pa1v2?(!LY?$rC@?p}&3 z(Q{NM_l0wJ!4F%7$wPnQ-uuG$D=d76Bf}r|S@^ql0km_S@v!q~@+-c2zi+TmX~cIs zCf+R%-%YcM=Z~_VrQGl-c6HB8$^($At67x1oW-r(!|z1_$EVoO$;>M~_*(tg6W94W z^e^90$uIx1`*^iWm8XI?A=Tr-@5)h#wJUt~UC|T2W*h$R7^Q#wy$Vjq`tGrWKN!nz zyEq)%fC*0IZ=<-Kj_%D8-##o+@rQR&=&yZinCR>%Ubu z|Mu}>IKUqtD@zukhDP}SU_LEXKQ~`0LAg)7sr>ED2g#+ElC3#n>B!>cngflkxy^a7 z?8k?{JUoGquT_6%GTWWmO(TEzw6ObSzCQbrTHY3}nt@#NO8<+R#-?5xL}zaF4Sc9S zfTxy-m%5lIdRrGEUiaPzPCox7!Cim(R6IPo5@HU5k1^)T-#g3Tzy9e9)BphBPY+Ai zEU0Mp*FSCTY2k;m%sqkVn6jFZHGF9>&!x+&&`kb&j0b<<7mT`iI@@>RC_OmaTwSp+7z7>)pS}U;jirBY)ZlCM?(Ff!Lnjq8;uKe1X{gd>%gSw_n@v zgv`CM6#pRfKM4IloY3F6+LIAw>B~>s=YJ}>FS>tt;|oSzk{{k0au47$fTUMRI+_lW zD%_J{mOT0yOsl14qmFg06jYY_-wzLqNw%DwKW)hrKB3ZW z(AR(W!5*qJ^`~p3{uw0YMDCD4z;z_`pSQpNLEwK7_`flMANTdZc&T1GF1%rzL7c50 zU;Z^}b>P7hTgAh`;dQEf-MVy`c!=23~zYp1OHU_{ii$J|4D=S1C@RI6)OA0kiL8r+4sI^1D`RR`-C~l=jBQJ@jDV- zbr9bOa61l%LAf8~`5feYDo$)5KgEzgW%cv3O`b2j@1FJR@lHkDkFe~e=I^@t-OGQc z_k}N9=$>ixRU-oI9--gY@vA<}Y&Awd@*I1t;I^LL=ef<#RE)en=zHC&?-}}QewO#M zaWRHF9hS%HJOHIzM%~Nfr`U-?^Wk|vV=wPJ&4bJ1ZFM?JZwm2liId1q%rt#ENc?XZ zjIF*sfu$dpy<6g2X1JBi-^kc%*Mc`g*6rstE7pINW#jJUediSSZB=hu^$DBC<1hnT zFEgc|)Q_Exl%A%xkA+{q3+gbI?*#&>SP@sK>aBZQ4y^o>$)Z zuDU-7wY97KZRYI6SM*^C9`9(W?`}gFIC-GGJRMB)D;|@82lSKe+=G>;%RSuDX?BH@ zt~ILET-z1?jT&@5E=eASC9RiiNcUc@To|{SNZ;$z`_Ax;Z;h*r&%Z@`t`X7Yz3Lr@ z^!GOO5K60L7!}{czmk8a?H}?He>go?+fbpm8Tk?%4SPTu>} zJtus4Mt4JZb0@nX?}Hb@%3|8CioJ90eeSXDooe9Q!Q!O;$H#4LmoQh!^!Gv%AI<8$ zQ-}O}PTdKw&dL{p3okBJYJCQwGI-IM+m>r3PTaRt3IL@KmwkUUfvZQy{{+qXUHdEA z@%s*)C;Xuwf6$LV8?kHp>}O+myU4$R&ih4o-tWl=o%qe#e~6s$^&*tQTS~c2{PX!O z{73fZZOUK&3;eHtKE7PP-CPDWe*L@QCVx#b|DO+sI3+hjN&sITYTG0JjeruneDq3w zbu&vn(>F_lwCjJj*l!>Le7WKD;_xdr{davZxH}d&70zGjY^y0{QcEO!I4bQZA)}A5^mkI!3A}YbzxXTe#gJ zy8Zxs*`_^S9{UTr1>wPwLFw$})Lp@uqRH)o zC$16hjVSqcJWMlJ_ts|`0lN;|jDWd|NOot7_DX+7XD%BaU#=^4SI^eJP6h9vz3Ob@ z&Jn$icZ=XGsOYPc1^&)w-qqS4UoHYQ-wvi(8oll;UzcW*xq2xNtr)@{ZEJVNw!Zpi zYy0&Y>|fpmsh~dG9?UY-U8zfaAaUTAN>2IrJbhRC1m+MM`=mq#_QZVnc5ja@y|gU8 z(c6FbK(6%mO51^(tv(rv_xgJ_QdfHGM2Wj8!_I!!>{g(^YkqIf;`j9S_WXa`<9lcL zq{p9}zMgp zsNL=KLgI&$Q1;~u`-z9QW1#ut8~dqZ7F2)H5Bi;|e*DJq=YgAn=HDhVa1FyZZ_gzG zKBMIytU99&=(=Lpr6$iaaW@EDVb=rTs zy$)`kK;Y9=9Aw9gt;ObB@ajFlO@|Jt)ln=nc0;y1G@QkVe|;{WO5PI7 zkJsnjE$Zw>?K}a8TRTwd-8)YZd`LjIH9JrETM-D@C;j8^<@w|ve+&n{(F86_OrZEns}swbuy9?F0KuykIZw=W$}R_^=NnBKcLisA`@4x4{oV_R|Y zgu;gube~dbO7Vp68?q~NXh-qz^nh<$QS7R!zJwHqhP@>&i`VDv23yv4+-dhU+DTNr zY+s_vUt9OQ+hgd{{)&9zpLkpL4d|$&pG3tzQ;+=fOg-}RAbfo;59R68&LPUn)@pTo zPEYqfQeL0m3}wnwvLSPi<5t6Oz`p%bGI?Z z>q^l#E^x&K@=kJfpy!HR_konRNn6y_+-<@>6Iz{whuBZ}4*Q9D7zAsuW+P)#6u5Z;Nh=XpwL13KzRP3;_07VUz@_f07BmWu|7}UcjPd z_O=CE0y*)c`cn-%57d9LP1^=<`&7iXi&(qwR`M3G^5vs=mV3Y_prS?pmIxnHpW$ON zT1iG2)Wt+bGYBMdpqIV%U%X|+zfyHJb65}OxtcYp_!(Y z#BzxxYMND&>_lB}FE*7lCpQn*sbiuWmakWpgjCH+qtt4d)QnxV^1T%nzBRm5-7gJ1 z9~|&&Ms=w2N14jDTX=M-Nm`lB~i``5i#u`Kfh-M<^0;Jm#doS^D-v2Xnf+ z#eG;#&9Z-kder{{Km;YA*Ly%*t$ZNb!{I=qf#Dy*g=%1a*q$<6!H0#yJkZSK{;6<3 z9@a%3Wp}jB4C3v>W2V}N-fI|w0O0H0joJDo`{au3M%e!RqX?p!_| zlFUq0|C3au<-TubUq9aRjJMT_G!PX9UO%xl?WpgQGhmde(ry$Uz;pmIAoJtR#W36| z7-XRq41Wk_2C<{2?%@KvRbh5B)pzxK)>ahS7nj*9V1}A_Kb3#(5Pph%?m$ksr)60u ztP_9KK@HSC06yiMmSufZ{)}r91o08XI~?t#As;ZzYT5hvWfkH!yF6{fO>S8&R^5nj zTs3v`^$v&=Ua2F5rh^}W-C~v1GPgM837(sb^6Ct@9}eeDHhI#rD_qh7a0C93WUe#G z`-b{9kF1uu%_2{5-Q zug-w`;c(t$o+mB))D`n;OYXd3p1|H@p4C#fndb?vo6NIX>OEd~bv*6`xWzoL4%K~E z%kx!qH~M=omQ~>QnCHpB++v=mgK>Z7t@5-5x0vVYwsy~*^2P>q@0IdoH@f>qd9sPz zeW9#EeU5*gt)y%G^8*H2a^U|Y{(1kM^Ct7Wap!!8bKbaf{@=qkZ=b&3Wt+F}oWF0j zdHbsHG26UvsQ+M_?_GX=65G6QDt=A2c^@Hto^9T^bKb=>?;G~}@0{=P%-er=&R>^j z?pRKR&s5B5{vS7pqa;(qtRk5=gJbeFAv_2`@Ky@%+yD4>x6SAKg?}r%&rh`K`)xme zy-Gen{bM$uw{U-82YLo}{{46kdT9^(`KkFKi?Xt!t4h_|jO+T&WmfS-pf42%kS@JvmzhBPu|9(jmeH$Qb8y~F9tQV1G zLBEu$7AXAH00^#YlN4G0;h>Zx;qA}bto#N?B*~EP{hsWP)F%5TD69~HtOhrOw)g&4?=4uo}^t& zv{iaO?2|x?$W~Up9wLUk%E%xUhLPJ(^m>aJuG>tOx)3*};|*(x&TP?9w5Bl1Lco`b zbX35@Ce{nXp@+hadFsuY4m!;+S?M;SRj|?6Kpi!}DYl$}eakTj$K$OUHS|z25BvvHY{)&I9kEcP$YvPUOf`M6PHR&x346cja85MA<&>iYCKjZ5t;^m7B zcKrpYN2B&+J*vt|OjLi+Q2nJg3G!B5BfxrQ z>XXI19xqhA1}b!9^>|Ml^3)VV<7|QFp1v_Qh25p)$s~%5uEz~$4J$*#wXi67OfcEp z1nW{u&eVRfD6o;uB3Wp(=}4h%vR*@>MYmxq5pOGfH6S1y@rkLm@>+E_Z{V#&6Ir1x zMcwwCYR_q#k!pXndVkdFp-qa)%`w9(BZ!-sBVGs>zE>LxOS)u{76-0-5AA zq_&>$>O}1~+D0m49KY6pf!2s4DTxJFp*9vq3WVg3SIs(3)(q7*=wOiO?Tw1B@TA^} zIon+YqjtjDQ-rJI4rz2SddcE_q*iw)OTI}r5DaSrff|1~ngUE%k))$Fz8DsyS>Ww( zQ}dc~Qo~mk$YK*z4M#y{2S9IVqb8bJ^+n?3E3Y#d_js0eyCcEJWqUrddU_AD$I>L5 zt$kQ_{0^=wRIXw~Dy^1-x`pD#C~+4)pg2%1ONb5%*fyiXrdMBPg9bJkM(q_i17ioH za0i-FP6K}`y0&WNo5m9MLTQ*!85edqKR01UV4+MK)*Vdkj`j9@wuCfb?lCsIOn{6r z9F!$^qfrm75m?7?n{C&{7EwA9uC&LW&+4(E#C4lR40 z&uEXdnuZ6q^F@P~G++dd3Z|o`nuZO)NoLqhEdYP}AiGihC`pwZ!U}3llbk}6)PnCd z>0svYZB`x#bEOw|Y)qX-w7t;MMY|!;qfU@ChPKmVCS7Auhf$4SjmZjSve~BB4vh`e z*87V(jz!V}jofsY`3rsMxQwlsW_PTI1g1NF!vThow;9bGUe!ebNG&0zU3pQ&HOFr`>k?E=*@SI~rgb(XUN#9O{HdPy-U&0vENqhRu65!a{-BNDqG# zoE&j8IUy!>#YOQL%i44}n&GI~N93-BZt}iLF>yR)*Y=uVM~jx6nZ5>Li7Pcn4%-3a z8iJ$Y*qF457(=w$#Hr5~={V0Ct%O*B$Qm3>#sfF3uO_O7)B~W$s6?6@y(O9|mSCAo zVb=Lt-P9o{F|!tkf<`*stN@OxPwRinHqf>ff{X?y0K}7!-9;vBAipssHrZR%wm9I1r7B?dCD!J6$FcN zNCq_3#IT|ZZDy&_uWyK;BljpVC%e<(z?_nueob6Todjpq*ouu6(OP$5jE{fOyxZkG z#dTVnFsBDR&!F0nl)R42$?c)+DI1Zd%sROeqoz;IYKQ^A5zc!`wr1A8Xs|tgSpbwD zv}s`}do3Q)VlMA^s-LwmcdGK)tgfMm)7SY0pN&x6P#9U7@w2%KPgP^=@_fW|)Ue=L zmvN#MHi?GJ5{wBU^tnGhJTK2My18&q1 zCk`-o7xQ5sj!=M`cX~qsVn@$gV|C@Z63s=JKQ#bPB~Y z>zv)q2CmYc&Kf{{sIPy3dZVpGvY>R`LgM3=CUvkeH&;V)(9lvwu;w9ZEc@*|tuI=f zy}~*TK-S=eEqN`M9P}pnf*DAi$Z#N>foh{Lk9E0C=h|3BrA^mMsgy6G$&x_(c!Y{v z;c7E(#(-j9P_r&5<$}nImKsY<-u2;_f?_gV>l1B+vctxF3E6-7N?k>D3V`)Nf#d^V zgN4Lqf;07=NVZL@IYDQMv`3dP--*j#8(l!YJ~xzMSwQ zW!z$D!emjPBeZ|U?3fdq^>p1&=@7SbTAH+>H8*sIHr?d-ygk9^yoB5H6~bEKy5)>` z#*TeXo~-SlJrk9nOC=L!$k>xSZciFvA=rLMBaJSnVf`lR(xYx`15j(juMO5J)iYr+ zX=6nmr$OG$B~|1-t_3Syd&N?Y&}8xMpjg%CB%LA)W~hI3mR6KeO3Y=HAxSCfJGvrv z6&dW3(&*0j86t(5HjnczB)Uai0`Yog5O9rOQ5D6K_lfY9~ zG?S&>cAcj-)3i%LGlo&-!VGe}Cfr10W(Hal7o3Gm17Y}x%82%$phce1puQNiprSsC zr+HYn8MuEoURY7YG7>ZQy$RLC!cEUjmMy!1NVe1J_=(nNNaKZ$nhkwDieS#RD2*71 zVx7w4jnE^-bVfA7mOcWMjT%YF!o)?^MrRX$8sg^2bsz{VW{8n>;>^?i%S&RzYr>0f92Zc+R84b#G|ogfxF?^;m2<2_sI@D9KkRQ!oQ^uD>y| zI^9`y62&E%anKZkR8Hx|!gOb2kdo%`1d<^D6wq)aGCdO$R(5PFh-%N4!FtKnRUL_0 zsvAaB(}s{uR}rOlPtG;A<}ZrEg&M2QyzeWVF+!J9aoS#PP}}D)J&Gj|>u@qa3|4u) zJr93}?0i^55-~Lebv_+|i@uxsomiQ44KY{9me%t#xj!HH=5*Oy6@^$`gR4bb8i@SF+?Q1sY8y*Q4Q%#n?ov&2=+Gq?Zwxxo%IAVzo!EI)StH zu%a(EDGzi-KA9BMf?ShGntJ&tBW=$Wm!lb7?*KyqT#(LWs381;T+xE)HD@vB2R46V zufiTVBLz-^+Kg*VNj7Q@c?rdB1rs=6qC>btL5+FKlx(%mn0iJwz=9F-00W{4$Z2Fc z$GaSi4O147F_d37g&yDO@u`E)orNk&^N!5coJohv$BEG9dnw;Ak#GXMx)9k& z;jCSQGKNT7^IG{(M5vIO`mEP7nTCI?$MvRQX%N9rAUK{{(>mWnkS+qaV`pG2x@H5T z@Xab>J9xJycYP;VcSaq+%TTSu*MP1^K$|6FEI50p=Uq2xbNrA3S54H(mp#!Pt!W+` z4GIOYu~kt^ovI^&t{W%61Ro8yg6(FPr-YogtZ=Ng80$5z%IQZ2nVp9V@W==NoT zCE}2m21uaR$)4I#tc_du0|Y?=qMuEBv^nush`L$y+oN;@tTA_v(+GiedtIOPh5p=g zTb8Q;bG8Ek18YKzWV$os{*GGUBhNERZ#PVhf>Vb|1BAd~yu>hu4YqLgE z0+eGTkf4UK+|(5-Z*69VGUnl+05p=&8iT2>*Ck{WVWXvDL4=g_<@tu#gfKH9Mur@5 zo6S<$j5#+-`%VlrX)^N>YpkW#SR0r_WD)iQ(ijWtWSq_vYT_){t5kpGMOs-KjP2wZ zFeLl6#8`_FfB?GQB<#${f$`J;hgrC8P1Vhq=m`pt+QR`hcBr9J5F?Dhc|#7lb=TU!SL#!D`>Bd3Gr`cFm z=tx|&0=%#b(^|vJ6+IEji6KX6Gw&pEmMwKBhWUjOiRg-O*0${cfJ~0kL9?h2L7v0M zVOLg$V=0-BEqx*iPJqbiS|Jm=Pa*+UNDL=UkZ~|U#?hojp?#`7pYbkAT2U`XQUK3H zamtPXicAA~GNONa88^;TyaVL&%nDN$aP$D;5eO1U+s0M{+RBxDj5&3GjS{m?t?my8 z03FWeW6vcdY!ribkKsWP5FHI%_2d?*D3H@x)Hg|cVNYyX>6=Q6;kbovPu#|4I5iq{ zvkN591Se;1u~s`BveT7E3_g>^oT_#DCQdCRdQuc*F0g;f&{Nk0ISQsSpFwsm05m7eTkwf8jR?YzqbYSieN z%AyF`^g@3xwC0G+JDs@UF`7@(4JoR(nnSUZn%Jn5PP~AiiA|V!Tmz)W9N!R|g1Yj< zLAMPr`808G1MKB9oDp=MO0y=l7_>1=?=U3Uh=88fr0W2+MtNywgsp}a)Bg} zPO;YXe7u22R#R$7BJA>Fs|&gHesk#c;F^L#(5!#2fW@Nk#6g{t4Z63o=6OHgjNp9X zG?*nXF~cFftUT7OhDEgdkt-6kH%Ur$R?eXrQhOE1?B4TYj4T_dqTTjIe zSE~(NPPO}*&@$~|N>6Hr57#hU?mIO`G@XBDz%LPQX{L;;jA|CZvufBB=N(My(>>Uc zgBCdEIFn~ZapJ>qot@KY3tKnWe2=V8oe8HoogRV1^PG+0zKcLJG%!tA=@-b#5)=Yy zuEr?9x%A9IB??!(z;PpVD&mY6I^$quXw4b2Y2rA~g3czGQiCx%ak@C}SRFXogiL?d z9H5LM6GTDjHEF?%GBvTp5w%LyeiRLCRa{EGuyNy8@0^9FAg z8)w*>I7AOxa8er^I=La`I;X9uWHo;*RCkkDlsE4U{9YI0M} z)XHg@sMQat@h0!td`}2kGU>Uax`P0qp_`Z~rb~uv_NTVKnl8LZXhk_P)$@Oz4E36H zCetI*oG-j+7-UvIPl#j|7iI$pJzH8eAOwfZs0ahXj_a-(c$tAdMY?b6b%;yIt1hvHVtI91LK|Hn6A}oVNdKb%NfWC%A&U5Ky98atggf5 zE28h?+J>z6B5$T~*oYE9mG4lsK9zYY3G+0u5#xdEMRl2uM6S6|6SjY0CC$E*qlvem z7MbZu^Hj}5V!Rw>e$CMeT*uj3d*I_6A6ZP~wa9RRHKWl<({Pa)O=b`fa1<4w0?e;E zg+|F6!mGF2jivzCj5a=G#w{V>qB%0Fk6Bm_x->OWouV&kNvA*RP1nE>wnTKG*n+=d zwNwCx(`8#Sz0nFr9XfwZX>P*s%7j>VV9srIBWeP#>f%~WV0YXd%rys310GHn9GaTl zj^(JF)X4f`Q+CL7I8&zc73`^fsE$iLqD8^1V=STd1a9c9`8u4?;5wD_K;|ZT3Yf!? zASY`n-Sj60UDTWKM&g-?Q1rMZt##jPEOK-~$4Y;g$~7Y#v`_||4T``e z5F6@@M*c!`rgcITM;?bQh=t8|5{!{XHCZm4mhOg*vGkh&TJ+%oAugF#G93uE!XyS~ zQB7nTMy8f9;k3pn>;@YSN3|wl4~vc8XtF6DszW>*u9F2wcB!7H(lD#Yydf_Eam2PX zvK8dbq)7p`Xi$HUwPy3ACYnon;3yGen|*DJjQU8j%BcD>*}$YGXY)i{I-C*J)uk7~ zLI<_UrC&_Y@lpf_I=|6LDjIkcyQUMhy++YI@~|kOS$61dRwH5{w{%JZjRGF?4BSix zraGN9dhCc%S`@Kh$tp~X<3HWH%w*n8ABR$la=1}KIDJL$PvN;) zBgHkLLvX9fD2UtwW~m*?1=p^(5m!-tV1&-5g6TucKHrF)*uFa92Gsu5qk4HzEC2b1aTLN@yEo>QpEKI^| z9a?MxXp`yn_OvMk!&!nZ<`OGd{jsS_Z4d|xAMVZVe(c$Wm$W->0w3)unh+ghjhpbt$slRkNk!n);?`cREfA#CbR%x&+sC z*%5!?*8yZe&{3nVf$Ef8i%Wg2r&D>%Ecwo+FJo-bv;0|225K#$%SG~Bs4cB7f#FO% z3hJZIOl6co$8R7oy;?_^z^&83l=5L*Yv)U1(MG*OC@2&M!g;+uMr09<8GD5cM3A>J zJ|SphCW$0Z3NETlJgOTEmWG_Kz@|nRfHZ%D0m^p!{gEF^NX-fz+6r$n(;9A&N-Lu@eGa(%!T<%09-+9;ibDi1hI3yLy&2IFtRNiZ z%s`HYIoNBY1r>EH2U#?Qd?5^*jEysjTeq1koXf**2uf~in9k!ka~5!08`2>buoHh| z3L-+9q?6nmu4@H695MxR}qIUirY(O%!hB$^yYnvHD4JB8T_`f(^G&pi@Sx3(_aK-SmSzTvrtJ@SC3V?Y%7KnPk{PGT zfWC$#_$GtecAZXK0f^RSeJQfspxF$7)U`2dAgpPa$;*M?h+hmQzsK}ks6G!o-p!(pgrbW&}iy)y3HO(`Bcq;S9ze1Y7w3VJf{THc+g=j z0dDCH6$~4lu0Dvyijj5Nt5#z^?SZRWeHkxubG-?JW-_6Ad0V0dmo0zhVN^pf41yC0 zTUr|nZuw9v7}TwmJfVPmHqht&CBPC~+UpMb%9>apgs8DXhlmpJlnU;xi-&5x(zCUmUUs0S*Yg3t7j3;iuJ~H zgLc>BXtZV|4Aixin#zLNoK1?k#E!{+G-!;-j#7xG(wyNfYB?FLq_Ny=Dluze?r1b$ z_PDv)S|ld2m?Ln<=F;(;1VTIu*uK!nq4h{>M9O$0#N7s)&kKKS3dK@;poxmEx&ZI9F^>`V#0uY{3s~!WTY~R2j6B+}EV}lCT9E3AH%*3XjS>1HXO>MN1 zh+^(H(45)m!-P>4IRLc<*Xty7q9km8=@>w+YXMHc9U2YPO(T#QW;nIY)g(qPLsiNLR*Crg!_| z4C+IW&#ixa+Rt!+GiG2{NLGVaX=uwQ9{~C)L1OTpJIlQ7g~pV@rSr9jrx-hIKEQruf|Dav@CudlLs>6p4}) zlC-fJ>QLQYm7P9{T7SB$xyp)x)b1f74|)S$*}raKNA!e}Bj?M2F97D>|U z-liST7EQn$O?qp+kF2|3SMAChVLtBUo>t+ zyvBbvgyG}0JVt86Wv9(Ca!crh_FA&%?G{U56a&UUwyp)kCeRT&PTS|2eY@z`BV~lm zYmK6=P^jYSBE2Dvs2{Z!G3~)*47XT3uA$^~z|UfE5KCZfDXvs=6!<35jhH2;0iCAP z^G1r}0nHu)@UwQ7jYm8*W0BQL-86;G3Il%?THxe9sxUYoP1}e7EI`x0fN1L}&d*K1 zj|k3mm1=8q97mJFk6?n_%oPUjXybLv=j7az-7GVWZ0HQYj+e*kgm0zefifP(LEN9q zdfJ7o%pE|?D4(*M=3v10H&Mfq(9O6rUg!;%nD^_z8eu$@)d*I?`?9=lMB`RA@0uKI zY5~=MBjK6akT(l|3acR#2#n<{Iy1huAbp@d?tqBh>o=y-n$t43HX0%#L^LgIAm`H1 z4H3}EW;HG43vV(DMhVy7r0IA=)u%PBCsM>j(h1kcJUE&zI5?Sh2B^Ho7b4eLPUsCE zj8}$X=V{yz{2tRYlAa!<^-;^C*Mz4H=mv*>C+mhqBWX9;guSpBLt(z$5VL4dBgtq2 z0xhiFpeG6dT8mytIek^M1b`rQ_*l9S#zI zzZPrQK%3YkY86oqNBCHorh0$a?dC4pl2&d%!x2~X<5Xt{OU3Tiz|nNj6jRF;Ef5WY z?qJ2PX8rD{Ua52UsGU&X|S4dyN^ONh@XPk26FD zT1AnrcvHo5pC;>oG+#69*o2mwNOqHdz>~u91gZ^MlOfbsjfD`TpgsVp&B#XlPCN9w zIwYrD9qi8MfJW1Fvo;*ZlWyD+(v9Z!Tb+$q3rv19Ws;(i^k;}DGt)^E7!*>lM0$zV z2kR)uH`=wRxta|Y`aDFNM8I?AtS)s#ZfIqW$;g4Pu~RA$yk^?cylaM5x`_1lYN7R1Rv}j_wzE)|Jv1f&csvhLVUah?h(Mv%dc3qreVz5- zpbZM0S?CWqXfvLH(NYY&Wk^7OA-*i+)Uq_93$Bz+E67HDXsRevdE`lyuNxx`9}=7z zXx(X6B-}t>HiS@-WM1}rlRgqPAard3tX`n&yg`n!L0^g*7f zcIMU~@+W+;$`Z2g)u!WSed10QE6!1rC9q;Q9OL)G)Q1NuQd5?bOiPWNUOPjDVBL%k zNj;h?%|*%(K146-v0&DJWFZs_Nu0o~;VO&yMi(DPvj&vX3M6xJgw6yeaV%7rvr(c6 zLSvD!P>w`qDY_0c*^4w;?Q{Lu5|#CQvT`+$=)tXg!jiBEtGLbsOb&GyN)MfClLpKW zmvBwCJz=&4HK;>v+Dp?@Et0V%CZg1At!K<+DZ{?Avhbv-vtVz383bHG_2S72953es zste2Wnl?{>B^X45a7>8B#%fx$+E@!Uv!O4Us5k43aI7YlL~+-HUBQ{_-L74T z&4kDYs{|taQk?>sQ3@Zoa0$mXyDJi`zL}BaI7SGrmivIvK@qSU+6LoveAgnR7M5#T zrh8<}$v84%QPK^6L3P$7M0^OK9*5 z&og_RWY$tQHeF8hW5;DRWes%ljzJDk5t&&`V{JryjTzhia6}4r!xM<+AdFV^Heu25 zf^L{Fs5}I1W=ZehUjYH88D*p=+1ZPas%}#cbOLjU91( z?e@-U1jdy^@poLVA0_rRVmP*8T$6CV7>4yZWMXE2Tuvj(fX%*B8qhR$<3k_^I`7&a zh@ZUALh-iG@2=14%AEX?{MvNMG8xVac95s1iR)=*y8`6&zMJOe!|{wKxCa{2bh>6*&2rA2I7nJY@SnoaLI$+-`9Y;y29RTPP1{b-?4X)U0JvVqTCzsHoFE>M?Niqhu;>dOSu`tr!3(j}~dM5pQa4aa*^?`yyG zA<;{4iGOO6=Vx~Kay89_#4n%}a25u$P6p7n5jM|_fsm5bZPEy+48rRpHO zZ}sjuDvBa)-Kv*}nDn=oE$A40)4Bjc>g>U;d%=QyvJ+utq&%oEVkSI) zDrlGJV6My!$MY+EG@a~tPIwj&&}Zg|@ZhX-hWkjgInTRB5uBimw5yJ^55{j!F&D$I z6~_Qu2Z+(K5lQs8?egU7_8dWgrg|K9Lq4fxwg!w_kn#iu)?^-=4t}!eK^f$JwDgdz zi^HVN_rgN}fQE}9pb72-8Y&`kRoc&gZ2*-Dk_vcg^hOLE8i+ka8#X>|2F0N2$BHKV zH29%5CViPkSgmlv#hcH1mfb@s-Zp_j^1_JsSR;T4D?o36#iDTbN^a*?!X26@a*(F) zFMp4FEiB<={KN|WK5lbvOi8rPJ-@p&#gW6bdk+?yzKfkLKs0SgD$w{encxn8YP2Mh zWS$Y*ZUd;w&$PIZjhjMY;+v7YFsd6M(E4+bwS3Ext%W*{V`|ZwdtE@1_1#;fhHqRL z5nh6`AlGY+HwPl=!7!IMDCvi#xcvpoy2;76IhPmfoMgaZ8Q4nB#8Uoc>;v0&@o<)HaQ2U(5U&xav&Gz zbyUkTaKA=kVGWP!0A*49!!Vv-WP6sDn_J%xc}pcE@Sq%TB{Ktm6TrGDSOl*@!s(19 z=NW~|fU!{Q2oZlq$AZ7%m!Xhq*3QJlbTY?yQUtCw=-UtQkeq3MBpsM%qkB-%!g=OHrMk(_Bk9_d{%I=)n1H zajiXF2|n^>ym`0h6xCyKMWoAx$iy&$4ba*sZ9tb~a*>#;#C?V0Sa)3bxSLZEI?6#YBq;KYJa}mk zFZG#$@-SAyR+1r;_M;&~>m^rAio>9Pcd@Sla-)Prclhkc+vzgTcg37=Z_UjkIE{Ri z6q*|J`v3NUwV1evx!2mJ-t>4CRTzV!jg6TecS@v|Wv;t_A5xhle(IR*d8V%QJuHj? zIGr;q+o*;i)*s2sxT{cf26uZd5X80Fq8b*T?dKgihR%LC<#;&JV2~q$(bNrfDFVZE%8+^4t)xEOBNFFJKyY zp-FT91(jl*hfC2MDa$IGdk8#;liOtoN(C=T^3q!nq)>3ajUy2B4PB`{^+w|a{S zCJ}Oo#1RrWm$p)1c-T-c0KSk~ai78~8Njb7NAsc-QOj4AW688i6Zm%uxQ7wdAe2W> z*4!6=?i$-9n1#kCV2U93v)ck7R*VW43{vrN{KB|i`IK?nQFUZ057_OJ4AD`7rdf*y zP6S0~cRi9!3W59*YEs+~S@?%Ws%Z44;D6orRpX8V>#Fu4Cb6B74VXRtmL!dIZ!9Y0 z$*cYSMaEX7@_KkMgTEVz`qEVW*xEdsRdMTo-wfr1ejsxOQ%z%>rpsnMr64$(B+x?d zu#cJoV6E@DNJ|;bpj46{y59#$cWY3=b}&XMB>#Mw>V53xDwCkZdUF!-l|%|WGQcoo z5n}zMKwp&&4;aT@1aqAWd5uV_`K%3NrKv`t!F?wtC6IB302{98sJimSH*FC;Kc*y_}HOBnWA@$e~HM~E4 zvpCl;jA#QXnqa8N6cX}&`zctiO=Y!zYG>*cE#4{y+^0NjegAr-Q=`B!kD9tbUqyU; z?u_ldb}27*8M-=r)F{tGar|pBo=->9GeJc$I4+mo^KyR%m}eezrEZlSz(&S~d%wAc zt`$NMravl(dRuE1A(RPtK_ygaO3xY5`!;G+uk#iZjb7O|$HLQH zUC|VZ%)?wxme4C~DhK7TRj(;H&Be(MMZv~9P(_H+sjlUpehuSsPEHsoXL_5^w>eBp zX%e#gIVr{Q+vvNXgF$i@A}?xx^G8cThbQ$8Tn`|*-{l)urx{jb{#cc77L6;XY&F2W zwo(Eef{-IpX{vo6fh@MKp7XBADS$r+2x!tFce+4+2X%!% z>#P8{>@>n&$f+L}Dq43LEb%6Qzg3K|BJad+s>0rg$wS$gY6WbQ!N6;OtHG@IMABGf z+6aDZTMOY)KiP_aXjSoAjo>SWwlhP_b^r?MW#PkB%;-S5>Ip>%i!VtK6**?iY6jdri19t29oIC4#X7x>RM#oTaEg-S@+Fh zWy|FUoN-7gd$B)%HUkcDprhN%$zk=`9~8o9K`S*-sha`byf2A2wyoY*?2}c+O|Jg7 zb$EMc#OGLYR*=B9b@hBW#aku{TirtYZB|Igo#N z+Ha}b2BE!LyE4*5Ldj2P*NPJ?n||E+E9Lq*r+wh4@~;nnf3w_Gfu%PvOnsPXa?>Kl z(_=lCi(fy(e>nSkfLKb@UsV6nlWu__B0eD>^DHPLWNo!;JYCaDh7#$}blC06o!H{X z#AuJxy%XSX3gWZVxClq-7qBDoxv+7IjIQ z_vO6CFVjhXT`@)db>(XrJ(1@3B1hy@)`H0ELj?;3}9}+ix{aMOlcsprCSv#LDw9Y(Ayy+xZ z%bv1%jMq!%ktRc{z8lE04S*h@qZ!r%97CsK6AFZX?wygL6dSMm;-(3jDxwR5nu?G- zsfOna)2a^IVPenZoDo>G9qNsNP_c!q`^eNcc?hT8mIYupC5Q>lUYvo!swKbBviz%t zx>{6X1lw$|?aly5)|^GX4nQaw08LcMg(LVfPznCwJ!dnmAbqvT?ja-oL%D}SMw1p zx~p0m0yk;{k==zgk*OdMHXCNn3wgg;wiYHX%9ve&4F60$7)aPZEhqr!DR2Jy(dUXyfCnrRq$$&XKYHnfiNCG5_6tdX9X@4|wf_9>YN}0| zIos;m)s}&nX8y_A#~@FFR7yVMkcs@p4QKJZ-1!MI>I9TO3Xy3D>s#rO&c?+vFuaP9 z#^pFD}b-FclJmTkPx&U93gTCI8q0HT3|)A_2k2WrIb5pPR!@*XVkjzqF6oSmfqyVNiDYoF2H?kUb^rr(ZZTRd zuUyji3f(1bwfpoI4<>bbN92Wnf%OK1;vG^4&|o=H|NA;Y0~^sM&{cNmjp~^0s_d>D zdL{I360t%bL1DTTO#jN<#z@n*6|dzYK3erb=0H-R-4c--&K=VEsV8-R$>6s-FXLc( zCMsz%=9`~>hdGAX9~uu2|*=69pvT*1-bfCpO8ipWL#zu&Pf=;O=%#EwuvFR_We zwlc+y;Gdgm@MjiGIm^40+@MZ_X>>}S{2wJhwS%+Ea zZqoHHhrwZwv*#y3Va+Umsn)iIg76H1E#+Nq>xKFem9j-bRoSFB+32Nulcc34W^LTz z7{e9m(MOVwdco?4{i3*YAEr-jq?}7B>@gZB)J&(sVZyCd`%~eiB_<`tj9zbk6<@(B z(vgm-d{0o7p_wI1nBv4F5^%HV4m{vbnzZwF^PM$`WDq1yn*HW~-OsC1IJ;yccHOAX z-o2^sCM>3HqsH@)*l}JunbMOO@z0=RDF&1+SLrG=71}vHQVAUkC5K*Hb2>B$;3~Ye z=rEFvn)O9Wg@ziKEQxiIstNx^l@zYNfRWZTA1{NVU7AV=8CbnS(fv>5ZT?+?dawPK zsukr+J1sc;MZQ#j{<12LF+wRegf;?MsaJN~>#crcWEoV~S{|I=70jz#sIw)ap98#8 z$$NBWlm)2HyIfz8iP$*SQ)?M#5YSoQT1D^aSTtQzsRQI$ZjgPyy+Yp=v0*(Mb4Fg$ z2C2o0%a;+1zC5aqZcn6pH1`r71`V_U8-%A(6cFh+3AV7zeMThVx$my%6;#0K7JQaR8*(erJm-}Li zpAEWA4L7AxMJ_72UUaB2)YXZ+>Je4k&W}W(un(B5Vu4@jjYW-~kOMwTEi{}SgF?-PKHwNhQo_}Y zbS&IUL(l$1;Iej^#ub+h(_pt6uN;SAoodq$f{_WLwJE8&71N3GnIXRU%fNsLH>Ddy zk|R@{UzpmuTYFO>H*eF)Vs*u9Wo+q9;%@!>7J<~L`b zcd@b}vY#oN=LZ*7|CmlUo75?Sb_|ub$Bra$hZQkzW4F-< z)_I%>Wmj-F_@Y2P8AD6VVc*b6m`1u^+?>A7=Lm@UZGPUge!AM@z@`vO=@}YWkmR<1 z0cd*`dmsoVfDz+`MMH`^wL6vM?!70!fk2H1B?hyDxEmXs+znPdM;HJ-K1x6PJx|&q zlWGS_OY=9uPox5Y%^e;9a>-)BRUhmQ6HhSd$D`8wk~nS@Wn=7YxK>M@w+K6oU&xni zv|v%GtMyXYAqyn&b2OS^?ffL1EiTJ{Lz#2R>qkYo)JEOXs{q{zUDXTBzxjC0gY6wb z=kcYVKDIxwxR0)%8Rlo*(Y*8P@0T|(3DylAYLQZ42Un1fyEOC9Le0Ze7XJ zFVMLJrqiJF$e`@uc<{EIK=b}GTMRG%el7l3c(YwO5P`c4n+_UA$UGTk+6Bpf1kAvn zk`#33_X;HJD&xD2wmeAqWGIM<>JbPD;_KjEfj42Byy+n`72J!!6}lx z452}_dWTLq%Hx3RCfhktX7KEPIKIKQ_ZHr;LH1Bs`!-QjNpW21DMCHWB$9iDs9MGUz)Qh-YOKXx#L1u7&T*ShQ-Bdz z39&45%%Yp*jQQQWm0m?8!m?S4;E1#H&~tXjbI?qJK%KE}$F;Xz12x}~57y0o((XyWIqVVP z(aEuVcj{7d4sVj7*r9<0P$1Y5H0_h(i(QjE)XI9OJ_`Ji;fWDqC3baB8N-Nlx z8-5o-(;JR!GkYu3U1l|ZoAt5;8~Z(wR}@kR#B!lMWu^E8;)ic8Xo~GJK8tU?PF~c8 zh0}D>5t_yoDX&Y=*U-fJ@Rii#O@XWiR6GKwH`6u4mT9UeDR*RTU_vf6ZTvKjpK(m~ zIZDKYVvvX*e-epZSaaCDYUlIHg(CI)DwOtJ+BITmZkn;n^orMiN{RxTMf9Uk^!C#F zemMDOV_MUPaMs;!AM-mXF^x@A-zb@fH$()W76Hc#T5nt%dE+RFQ$X&QJrS{Xn`_mb zwAobth^KnJ`RB%kp}}Z(fqUnY!dqgL1ko*&MOuqZoPJKtm_EOM*8H=^d`vLFvWk8l z)GEwE=A5{!q4(Q=y0MGLzJBvNSK<)=EiH_8VKm~eue}kQm$fm&^C^&S6nwg@w3s_W zG+~9G2@46W$iYH&6h%*T2$E?KwzG>gEpcfD_%O#%4rR!S8Pl(sr!ss2(+-co^nNrH8#4Coz=~F@PzRoln zeJP)BMT=>*PH#RS_QwpetICOXa&MK{p7exkCeBEKpvEszD~7P$(~#8$j7qMa1?LTs zH3BaR*85+EEEiLIeYRb~gN{CEe9;hLaZK;pkJ2lo+U{ne>|37HPH1zk=k}(>RF+ya z>0{&VB^yhBeor>RkY`zbMycA<%AI%JoH)AN(QIQP_nF!Jt*aMRBLPJ9(%Mv$2fZq* zd}d^x_}<8EK)rOaNt&VSooHfA!P((K^CaWCgBsH8^_ND7w*Rao)^?JOH@N0jB7Z^O z@R}{UBP1F1r_z%vHH@_a|1MFRDiP$W-SHG=qr3`#v{IKsEL6?Aj9Xlh(0`xsxGF&P zoI{5ribb79x4EV?l^Yz#RGIeNq7(>pZ2*q}1mcd9-50l72^8L|JwPUKa8urepEEBu z`lARohrNN_|9Lh_5~i1{GP7npdp$r~-H(DbfFB~kfr*jhqw!J*T6jX?vRmVa`B`wk z2h5p&%&6MQ(sj>oxp_RM`Ik^-0k{44$`JnUE=k( ze;cong6ooOiW&$#p-kBCubS4iv<}a?7dmTyBw9+s4ls=R;p;^B;T}G!GVz4CeE4Kf zPY?R_Hu!?ttU5A0$&0|rm-m`NbzPz*S#)^TN}+@8IrMn7`UkQ2zYJY}5q4Of55Vkg z{rC28ude%<=-gXWv?NA_t++vMFEG2TNe9lugN@O5^=fZfFk*^&;Ycv-ZNQJJYH)F|^x#ON{LwMY4X^W@ z%PC0`k(RSAkrI7#Fo2JJEAqKfR4xSt_!5&e#laNb$F;!~pN9nCpVVWu}CO`q9) zou$IORkSA7jq|mbGqA5ql)YyH!o=@*6?u@BSLo-r>iz-bZ^HOr9lVo2flUTnC7&T# zxiR}I>8lg8@F*_aY0qs9y6el*jq>vT|v2 zH{63%iZ?`<6qB| z>C(3X?20h0TslT#(FIBZL`b@z{=CF}3Ce(7_Kk>btA#tg{_!JB(ywiHl*zxvhNIT0D+TgJiA zSDc-~hCt#(Sdxs12No&}Nv;k;WTPeU6Zajvi)oKu9ehk-E zxtz_y#w5iLh{|@xbJZs9a%mx5V?L_50oBphl-TMUquNwP>DYdM@PSxubgu&(6{)>4 z?0Ck=&ba#rqB9qNcMDZ%HT159sVzHz86oC{IJM}+oyNt+rcVoe9tJzPBbsS9z^40d z`!m*toJRvwlrCc0M4ZS`;sBiQl-1^`=P^c38Ssm?TZtX%AjWx1?eMC__7uLS*@u-$ zqko}ux`Xi~kMT)}pQu!09&)j}Fg0^FR>vtkAn`DH_$Uv53J*v=hJ$LPyZU8uKB!Hc zrA3S>uFfaa?WgTrF6KjOWiN$rKs8Z@zd;rfwJ5Qg0H*`ukU#DBvX(cVL{(*7!qOQc z(SkIf+{Z4dAtR3esWC2@G|BB|kYk!FmD)$VWlWtf4ki$3TpnM&S==WDz!cE)1BaU_ z;FWh@FQJNmTSrv5xVfq#VeKT}tF6L(;-L?#L?9*i^AP_rs^Q;;!^yNnxv!cLSv0dW zLH$)*I#CmcWGJnKKBQ{_+iyCrVn#|Hs+q+s;jo;xw0>y)`t7R zKp=&;C=-`$-D|AkSSb6Qzhx)u)Mnvc^H*Ipr8woUa-j=+{7mfibn; z24qjx+C)>yji^1n(r8|N^S#~$DrxfcGmR6GKZmsiB<5KHwL*;6qqtP!4899P!*)}D zvKdqtK|UP=B{CSaE#7{ru$7iNH^ldij(0WrJ6E|i29(Y;8{iBqe)OnDUL-&F1Y&_6 zT7P|it?eBd^1qU<7NN$hp4&3bM%20jkS1W4cbj=o}4z8;sZ_31_`+ViG-#M z=}IzoYG#H3zzfg(RxT9&p@~8UN2gjsDmnm4(^2``5QQ6i`FOPpoz7aKCteglRcs)N zQm~(sDr%dG&UQmkay!&#^_cu}awZeYcI82T>vHf4qMI`e+5?ZqEYfvgMSQ0HA`vHM zrm*~zXexkn_D{!2eVb%jw4h0B* zCmvM-jD(Q1$m93>Y~@0`(oq=>d4VWAbjGVVQ8)IK>EBV6!sZ@tyC55#gIKx9<&cBdil>W8B=mIjfYWDD(B+)M2TFxh)%J(Wk_;>smakKPOSCmWufpS% zfr>13a11}+)^v`5wavLrgn?2+Oh@YdJ?|Brb<2A42uoh|iQ`r~vF~EJfVTyARu3o~ zijs0fJbcd1!tmTLTY@Oj?2&$yd<=BL0a>~)uhax%j{ zVxd1k5>c++E>G5fsvyIE=%zb}5C;8q@F_iTp-Z?k3ei>*NgdOn*_D=ffht@1<7>0a z^I|5D{}4E&W)zlMiJtf|o_JBoj)AcOh&h%5Ay1EONi=|0@@yXFwNLGT9WdMn8c))4eaZ#L zr2CT1IHr>`Pe497&a6LhEm|Rtw0Ym?aU3`g?C}w<`#EJc<^Nu~*TH71hv_Gfb=H24 z`$dgW_Pbjh)~p%ZMS|Cu@1GtyHf*rFj!+@jq9VZUI7cNgyl4YX`PEZPg>5zl+K@mj5<}}* z8o3e{K*kgvEnUC|Se|`RFju0H=inxOw7C&4`f@aC>@M|Mff+4q zmza9srujsF_Q~IoIZ^vYo)l@YM@PkHXra1k>92l*j=Uh>D6RVV#+^H(l8Y=h2G}V( z*fmbck#kzqc$CAOmNTP(G7;n1BtIH=dDz=EIhKdtpx}dNkua<)_ofkZS!B?1{N%W_ z#>AvTxs{DnTS6rYg9ecG|9Vh~pTv|$zu?3U3U#!9;X?~p-itlUyxYEJk|hbEplX~a z`2#hqcZf&EnpW_(qHktF&|k`-{P+M0xOnusC<2YW<+X&m-ssJzpnJ9#eaVxZ+n(O; z5I#eH6FFqZ3~5jcr2?nxIS5DBt_yBdtc`Ys?c^x|Eknsep0%+&(O}<|wd1l$Ug^}C zd3zauLfnZ|;Gat6nI9s$_+?{g$)j#;{?dR3!Q*4nigZEBVqpH#`AjEO`|sM&Q0FB>&EOG9a3WBMR_q5wPh{vIJ)Ic zql?^Ht%rmuZJ6+wA3qL}b6wHi*(EL7{U0rVfqm?D#m{`7bZB zSzo*PGx$_Y!mX|*psfjhSSTHBk)0Flv$lfh@8{xx3$6jOxTbH(*%TPTqKf`L8!j$? zP;)5ANagaA?4g`%7MPJ7o5oI7*bQ6tjAuP>YB61m6*pBew#~3o(Wz(*6%*;ujKl*Y zDLoqPZhZv{;zlqAJKf?Ta@VLt>m`-gYdTS!D_&7+EX*uqH9sDvB z2(uD%ATHLigG3OWM80D}Pw6?%Z;UzWnhC6mvdccy45@OhA)Z^L0yzLgV0;a$yd7W! z0Bgbx7hI#Nkw|@yQa|170mSrz(CYiheQpSy>Ln6b_?gEd1o73Y>Umkn4(F#l@YqBW zu|}Gpv^;cn)F)8`{!yA%#Bins z`;IGtC_-8YPyy%aM*Y~Ih3y2B3thv zZB;+{MP6AX=aD4yl2b@v+_Wr6Y3QYf-;B+|Wi-9!TgK=l5 z2jnD~?4=2OQd&=nkdDLy^@)Uq3kvgHQrdA9?)hDbDVSd5?&9zeWG3gYw6_ChujEAj z){o+K?VS+D7r)c+Q$C9mNllx*=K;rmYD27gs}Nf9l&#ZbiU@otxnS@Tm5fxJa} z+<>1$mb390S?YbCo#kbuQeFR zac5oWGi~GW&>E#SH*9;#4ie5~Lg`dK_Jb&9%B%&r#s`-Bw`6!=z0KA`5*$Twaymsl z>E<|F@s^TbLm)qFBiC8URsZZV2g{TG>^DzeHZwsrmd?I^dxasKU+0^l_vtVQtRiQ( zm?6M|a=IZ@rBaXN7Bw9psxzq4Ek%zCyObhne*`# z-jePk#7R|ugZN?NKzR5eop6n;`r(@`*z~!A&$zaTKaA4qbW^Q_{>PH}jM7~?Q*v)n~E@(H6)y6YTu znE2m7`j|JC1{ASxbRhJ`5>B|@SAuJ?&T}wJzU}OP)D7`>L1W}`OAyEbRqeW5vAaod zABj)9nM)m1=`KH}@HHmxxK73!%xkq-lSIC!wulb4`MktY^S#1G0vjQnQmslsa%BWt z6JEj9XErmUc6omVY>sfF`vGJa)xsv!`n!E3TvTLKDG`}d<;_;^SP0>}xY)#9fLt7$ z2t!AI64uK&lZxd^Iz+B@JsVSEk3z1Zkw0ce>b&pMXe^aV zhN%MR31%tw$xfhL{dvE&dOm?`>jtjWa4~^@1r!qpHHuCpLaF9gH+DoKNW8c>p)gOe zOfK&IS5GRK&mEmS#*~ZeSXtk(=s{^g-ND2{G2DVcX2cjgSswX^n$HUX~3{C*UF zY_T~=5+A;T^Q9V)_OxX}npGD<(pSZO{}p;`12_OGpD{|e&hjC`+Oc5;NdFN98laX( z^bZVw){C$AgvIDps|I6nA##=Awa_f}wiS|a?~B%BhV0NWpiT0+v(ed0 zRj%Jg@$K)H#3f41QDCQZFndLJF#Bg-TK0IJne~LdHeJldwVS=y8(|xE7gZt-K^rCv{Rlu3Kkgp10mgkSRYLz`Z5N;NE-p=HC0` zdiBBkWseL9f=nG#8U&dl55ER~8*4M+ve20}nuKjvh8RCS`vnl3)CfwGqx9K{D86?s zvpph)1N6GfitMEi9yyL&&n@cML6e!ttiwg%3!}nRjBoU*-R7b~6KLbe{J40~HxUuH zu!L8?PacB)5>}G>N02IiY^7&aAITXa z^NM0WzP;xdp<{qmQHrrzC34^)TrA!vbFJhAkDT$pzz3BRnxu62&}WbAB2^(FEL`U0 z&P3}8v^YGboP~JeEWJLcJ8XYSOa;l zkg%#|%V98%H>||gVIg(-5rDG3$e*EP=F4kdR}vzE5`ex z{x-liLXx#ZfgR6xHbV7C`CaxIzt@(^M(;qd}A{FhexdtTR|1<)$7f zgR3e}vpoAYFJ^>5eALP*d;o~aTN8EZtFkc_X_NoJ|+Sn2{ZS78VVOq=i##0?7NNSf2-}=54P+ zvaJy1tQhB6H(^g+|BVUi@+Tn@P?urKQd;*cRbhflK+wZ3Cmu?yenCahq9H zWgf4vY3UDUOn~P|y0#*QCF{>#)Vt$m`i@%(#BP4~0 zhska!YuW7&9Ur*cp^N=Z{X!GC;I6=b;<;RW>Qf4Xv6flnKz)>QY`?b*kD{{JC;Il^ zO!1*J5wa<<42E{kGK`<;TkU}d_^PObg%A2t=x4KrUN!JXr-$G{$<61 zcSEcTaF%H8Y(%**v?Niq`_v)J4s7&FptBvkNICU1yXWV<6bx?#?MiQDq_tM?oVa0p zV2r~9JYQul!f{LTMQpLPNq4?~k^C~CzMka7!11mv2Vk5mWMx#i)BDAgO=*%bBN0Un z7BrCxEu{^)Y|kv2XnR)>h!ru$$G(r#B$AIe!xQLg7#B+U@IBEWZM~no#xjrjO+2G zDIRKwpOTFubW(x3-2@4L&=14-=oA;uYNB9F*he7$>us#B%h@1vk|Cm=2kK=Q$#`!w zk%KN22zDF7n1=%I%ToNwp~rr~PC3RRuf#c4)*sM+ZF~GGL6=LA7q0D@ znZA!o6`1^LXa4-{f^pw!b8I||${C1++c^c&K!;h{j`4?BlOYb0*wV>IJhOqR-)kc& z=Hyd#9~ysCQVsB_fY%DDx1IN>!5uLfh8FsP55x}^p5F`HV{SZOQZLYGymC*F7 zn4uf9;!to11+|NR){M#V=vh$Dihd;E%7zP!H;{i7`|RaKVuW!d0h_DvPgJ#wB9+zd z`z;(z)0r~rXUR6kTC-??DeRVj?}@<2dQahP!|?g1{j*WYo(>Q91Q6twJZvE_L7;{rGvMBxX`>U|~dQb+s&h>?cDKP;7UmsinYh(?FeAUCFy zJH)P7V88N39?PTAbO*&lh2MI5Vw9{Pe;nK!{R`VKRUi1Am9l&_ z$Zj|Ce2>%#xpW-mX^>xY4+#5ZkfJ(k@nnpOvlR8P!XY&C)U~|knE(h zab|J5f%4qNnkgX_pa1>qSw2X+`mdDlJXRfZw@}PqNt+(}0w0j=mM20;m0-&wv`03A z?(=*qAzOSZfA|d_fp=;-CL1%`q^Z1h+V)fDT=)Q}#?u!R&+rUoC z?$HpHeIgVf-Veg`z4}3&&+(*AAcn~^w&SHUYls{%-#Xu zmw!h!G+5R{1S`Y-uPTWjLY7zUo5Iid6i+Ww3u-50uL{)m;$ro8o3$5c=`<*1`ed@F5hPyNUqJIFf0#1|co=~tBD^|nCa2)T&~;yq%vcA29ZAoD8tq#Ga*_D2+4Dechy%eY21OS*%79W}d4S4CaiVqjl0$-y-5ZP3U|D3edg2neozf3b*q?m3!`N)(L9!^N@R&Hsdf!05|nr8Ji~ z+eq8o(RzLvK952=q^f#8+qame1zL@#oV?7ZYH;yZZo)v8>EFG9jbZZ*!@>an__kR?|D&;4Ym;SeE*kiMv4Be@jzX zaJaV4tqNBwqeYD{5*+N3l}JL zhP)fL5$9#ft0fxQ$Ae-B%X+*Pf14#;4dO2F8ew;ySgvSs)NNjso~1>F31?T6Htt?p z`G!uyoL~TB7|z}G6>j}Dsrt}S2p`BJxW=EUg4{8%?3_d)6o3Zvev@u%%Y#ikHqF8c z=<{2>ANXB2KQ{2@Vye;EqNIa%+H z^z@Y>9Y!RzUbL443SMi=&6UB%{0$dL@xRm@i(oj&RXT=4$lA#ZQyQXpL$?yp+ki~# zszkReN%XOr@7mR!iXh92mkc8oiHXtT6xmPI?}n1luJ8NZ$qWYZt0vN}(;9H^cUgC9 zJZxyG0yBwYEh?UDxS2A_e`olQ*q9ihNs+YZeD-E6YSJHsV*I9`!DaH8NV$%N)to2@ zrBGgXn^iyFhUB>y(4;Nfk3J}X7`&Q3!9+x$J8PEJag(XcrrGpD%`14}<5?>u99mVr zAa|`hq1&}@S#xoist?n(kt9z;{#>wtM?yXhfM`XB?N@@F)%>YZe?ACUsg1+$V?C>; zdz{cq)*|@K?(||!NQG_kP5Cwu7-c^@Nkad?a&T6etAd2k{|I7KFwPCXI zOG^UoP{>+F9^T=bHh2`DOlAvncf(C{7rK!&`HM`-0Os+J)n&MJp1RYkyXUbwRUA$6 zwHT5sSe)Z8b^!o~e7eVFz3^`#$fAIInEXM5R0=5SuyhA-lV(UC#91m;T&%hA zA-+$~?sh*173oQYUh#Y$Cn?NWjsYk|n-QHC2d#bR^y(Au*AlYScEV8g+b$6z1H$L= zn&PD$r?|PtC)niPB5n{@L^KB?8`=aS{KI?zKMM)|`FZvC7i!N1eL zBNjIW9>ozdfqd9hY+KcpPI20-9Pg+&D1Me9vE-P%CnM;{_|cOi!}d$SBo8;wsQSTo z6yGYe#-7{&e_ehxwbpVN8y`LG9gZpT#m znS74GW=vU$R}w9J?85p{DWg_aHI8_S(duew036mQ3XnBNVv$ESs|uGXHD`-wSdMPt z>SsY|bB_R}5c#vfo;t)xhJ>&?E}7!U$EER9lWApxfAk$lzDxK?$>= zkAk^SDHBPZ;OwMr+34e=_+A?^6J6eDS>_e0Is~SS2yrF*8gg9PD^bSQyzJ0IO zSgqq}e|l0U00CWoEePS!J*$jt2EIt~xX&O?YNShjDeBDjb%lf{v=E;t;habgROGfC z=+l^OF%U?ZGXHIcVVJ+do=E`bejMQGj)#~G0uVgsmfnv?NJZepu5wvfjE}uUA)5No z$TN-jn_y$<#?@B;v#f9`Mytg9|^I=`pcAxBM&T3DeyG+DFj zRShOLcvh4|Sf}xY#!E-i_zl7IWxbaCj0_AJ5lCTZ^$85w+sX{ApQD)(Ia%KvR#87@ zoTkl~K4}w#I4Sb-kicIu&URKmq`qjQ+<$UkVnSG?~>_!T%L>2?%2nxw< z@ig}n$oTHXwwV*qi`vl}zMk9(!kUuyJ@nX6VFW?}_y&+O>_L{wc)Sx=tC2G-)o6!UVpgL^3xG)*wbh!t0eV z0iuOwR3XD|4MvLtq5ZHl%=r_&FIp5U%y*)V$(#cl-NL#9x0ox-(K>`o{(iq!e;5Iv z3Uv6X>|d<+`!jGf{z|nW6GW9cA5+?!q0H;y`Y<}+A+XPu7a**8$`I5oU&1iZG(o+& zGkpaW$!CrG@p%Te=(3#$78`P$a|H#QoKe}36}paHi5~nk{_bPuNZk?grw$w`J&=!k z56P6Y&e>B5Dk5_1s7%KV$XsB1e`?vLB5<+{+}cumeKP4Z8m%DLq$(czT^?&+aP}aw zLZl|0z=>Z9gWQX4BKyPIHKxg0Jgwm=%aF*VAB}fxtXF4kW}(WUvPe!#%DAnnLa)lH zf`aslg_vU!`0%9nP|Ny3)ZWOZake^uUfcW8lb`7RI0C?%fQwG{RMPoufAu1-EwT3I zRW`8Pd_c5a1#DB(lUtSRvNm%f2-6kwast77-`Y!=U9qk<`|#aI*do7c>&_6uF8nuk zF1?B?7ybCuo_XreUubmz0&5Y2(267u+cFGjFsj@2%JV^VU*aHZo_%=B z*UeK6ufVNzIW`jy3@17K%^mrg#J1yY+SfI_rVNA3uoS@N4ntQVrU(_YdAV}mMU*j4 zXFR#UI8>bU!f{YCqcIl~Y8^X>w1o!h{StuPvme|}&^3O2*=SV8Sz zggflACLF9Xa>O|{&In3$@Y}$k7y32v=HQ)bDrRko`N9D7LQAotYrsAigX+{v&T9wb zlY!gv-@fXmOJGvjg=E74s|03C^7 z?kYaanLGF~B=}MXK-Bf34~0R43ut9~SjeCX6Y0 z=(|^v6l1xn+-J=8X)V!v3yXW=q6pLDCdjW|SuNRGeoR%1Ks%Xpt@vV=2sQG_`3CH3 zx|z`;hUjP4VH@qTlXB*QHGB*h&cATLYxZ}`h7Z^7%VBi;Xqr|3-Re00WW8-?A-+Es z)q`-=uHz*?fBE6asr=|eLKE;6#}#AAx?Dd);G-MwlQVsQ!G#e->e>x5eaby{4~P9GifXuo?nfdB;MWA#Rl(xU|@bBEi*T9#O0hKo_eWu|kTv4c*ZPErbJ-nfG z%|IO04r|!VD%jL1`8MaHYI7>dh&V=|OrDh>S`aMH{dNi!u>_aSS=Mgm zBBp-4f2Bll+cIEu)=F0~iTR>7=cJho0DDJ167F?6q#`7w)OcfQa00BgZfq)RdeJ0l z-s*m=wg4Rgp-5yHZ3Jl`&99KvhXVPRat=j2zH4tq*N6qx^Tt&Q2fRuw?D(afedk+1 zRaf{mo7w}skX${THF2ST#zOD5%s zB_0mfyXAzm#b~NCS+~XC?Vg0fIEzM^biCPJW8NGBEssa}4UYj;m6_0DyjmBl$SnGpf9C_#L(HU>GDG`sGppQ9>M=pg!f z+)HxnTkbuDKds5EwZ3f#C)a{Gl-+x!e`hFQvdjE=;7#3?tmWM6$uQCS?kgIX7}Mx# zHJ8`kIBi(gFSnMo5Y0^Yo-^cIZqofRmfAdhGKX>3;m#$zum3Q+1_$le=#m2@Em9zTjw0<-7Zy?P|3HM7?g-g9o{-swOhle zjr9VOqt}&FcYPdf%>hJ6aUa|rRaEnV$W4O1C1tnlC@Z;o>RysIL3W+Y^#i*CJlOSdp@-+%o3z0@`G@BjMGb07bs%zoK_)yw&_80L^4Hh63#;jo`AcGkVPr30 z7>45;hGAfzeDwEA&=mTUtm}V3s=vx$s*WYysgTMa!?4KuhqNCI!(c|zkz|FGZGrxy z-|#2T{WJ6rlyOqz!PCEHf8a2TL~0DfJH$E;Nd8;WNE{f3eeFL-NP)Bg@#?B-d`ctZy|7Ioqslo%leNaC2$Pu$;81^>)`TX*ykXFZz`B2B%Xj!;vt)(k1IB~L>>W&YUEML%l#1%?ot`WOo z1`z~nFXjSFj&@%0+>XP)Jzpaf1#+-!r!1;%8h?#8wG^MSe?H*g6%q2sdi4?Rgd{O( zM7f!998gO)a!Sxsh0HLUkH20f$@AaJ=$3eYkI&SM!4%z#BCp}_R}^!Nhk2|j3@yWX zyz1TsDwG*d!WB+7D4s{UNwG9{vu$qJC}lv`;j%MLfZHN7A%?z{__G6})$b(Tr?D_z zcM<`!&V*x3f4T}H$dLRQ1)LKz3F$D3&UxqtwT+P2$&)wmA`}X(69Mco-!P}^1tr=H zm(eob53as|01)Y(p!V06-LD*vFgxL53yXcshrI<6Wi9GlbobDsRv(T%!edI76Zf}` z)a7nQEk&kt0f58eSO4;1&}tby+>f96Glq_@LrOKQe~tw~EP;#!h=co-^sK_4!MpVz z>;w7$K z9bH3;R2DZL1H3l>5jOeaeT0kQ@R#=8(YQ1YjdaePMx^}&>Q%zg>8}eKwu^4nftp^V zFQt^wf8rn?r~O$Tk)Sb%PxdMwz$EsPuXawOg@EEl$fbU06h&o+CHX!g1g&xG0dM&4 zhWG)`s}~KZ1GO3t5-OXNCjJC-C!j0gSz(kOE;z(NQogY88pV8Fk$%q36h!?K*OCx) zhE59>glLskt^Q(z!+2DiKHC%+2#fVB;q7;Zf2PYB*LE8ZlkBSB|DvCyp++6MPDU~x z`cb|=(ohwQ-d7aLk({S=ea8vK>eZcdT#>x0Qsk;Xj#rLn>ad2jWS)waUnY8Hk075Z z@4VoH8jV{Pr;aRgw5X*{v*IspY!T$G(%{aIQZZ-y{5|xJgiusiGjMdcp$&U_26&!d ze=jd?g>*&p^2{qoX4bX*@Cl_MzK)q&8*%j8`W`Yvi1kL@SxFH1!L}?S6Pg+HQfnsw z;=frY$qzWLkQrkv{=|UGdv9vF6Jxm^klmk`abU2QyW*)+|X-C)Ne_DhIs#xnF-$s4&#mJI?#g2NEgYk(nJfY8>a4@6; zmn&BzFpI~PKdvsS4n33m=ls!N)VY=%UC^DOKjv|i@W#5?COS_3@#pL=@!UBF`Y)NO@UasJju z&2S$bmw7QR$cI6S9?sXWe|^zZ=K)BT?wF1@Sh-iX0M=b8ty=xy2Rjl06=$0O_L^s) zA4710InRljUr4I2p6yn!Yi@0X^4!F}0E%x*G>pJ}67`|vnHC>to>oQcVRutz>VK9I zIimhDppUNHluGJ$J)ifi!9hRGM|`>+RE)5v6UPB$q_V6gM3u$(f7?zT5p1+f(w6l$D`Q|GA2YceB3qExMBs;U_QQlCWhJhOvikGX_r;9p(b}DQmp@DX_}vpHJiP)V{&a zXapvBLEkVAQdJ%wy7yh~idkXFeL^Uz%=V(fMe= zBA5HhC5>ONtD8~3d=A08Iawzqu16@JlCDa!6hOPM>%yX58sNyQoATfxz>bnVG#`dk zrMU?VCa~5gPz+=LQY84zwv#y%AqwybBOqScfBlB%#2uaUfyW|I8c7%%p$|w_mYjpy zsq(;TWCDStOepH)CuE5Hw)a)NSSLm&h*Wvy*heylV-Rmg-9T?*K!Rh(PlaJ_a%v#{z%nWm-KF;|b(BAOH5XZegIO+%0jcLO@(ze)pRpgi{;Zl#O=lr3D^YT zH=S>vZSYp_>|;tbC7;i)!s_vD?|PQ*e?K$TQl*HA!kxsuhtj+wFbs0v=8w@)TKe$_ zzr>VO93!H6Xm5rjlZ81Ezxvk!DQyXk=h1((79GH&8w}L>W7bBNZ{Zx%>%Ih52c5V5`ZsGN5F}lnS z<48~rOxG#%svJR{-|7_EZ%hoF-6sW#Px-m@=)gH7m~e9KQGJUB`dk|%OubN^PXxk9 zZTIF%z&fllm&D54K>%3AW(X!K>Nt$2QLr(pX5Mxj**r_mc5kRbdS;v`mLjw zh+Te^b|0}>SSXFwRQh1P8KceCGK}!{(e6FHYriB!0vnOQU<#Hujrf?c*w;v|C2lO- zp<@h>`z{Eq(R6WEv!q!hU=ixwd7RO|vHiWkmP&z?D?LKI3U|+E6oxwyWqY{58~~cI zzv_>|bdNGjzR&wsyVGoFe|=cpkvR(e7}_pJU4O9fruj&5n(A2Ab^Z+?xEatKyb`Q1 zqt4#+KK|X!TwCQGTE*okNxrKQ0aWlu#OWvt!xB@(b6CR}B}2D3uz{p7rJgU1=~2XJ zT_Pw{h(eR%oFx@1N%%ey9h==_F42H_ZJ#Uot3%!5kL`snY`h>wfBi-3aTO_(#XGXt zVQN?q-#==B8(*^c2P|&0^xOG#m=(A;w>GeJIPhbQCk~Abr zoh28_e#dG#&k1+%Q}R%Y2MUS3RAKouf$OgB{H0BR55U=#07NODka6Vbe?x-;Vn|S8 zTG@R~ez-%369`@$e~6M6crc!9&fkD|s9qPizTo z?wva$0#AyRWYC7n3Zm#wwpnu!oad9VfFm~1q{q~Uc7cS^5Ok_{16p}$-2%j2hYV87 zH6k+>8PiRjfBE($7sAx&^8=6@7ZA}M-$b4&)nLBLf~$!|oloUz9Te8X=+ju~GIW)= z3FOchDBu8@aXn~u@e3iaMQqL3<0GC^Py2J|Y_lu6x;88?IyQ4@+`b$VO01%XPr5Pn ziVr7Ep+VXWjo0guY8ns#ylI7T;^SIe$F^TJ%d34oeETF9*>i33dGEjD8$~Sdn6C?8%RzDYTQGP5Mm_#3*)#I6+0#* zJI;Csx5NYk|6x+637t2(hbqmsrjX_%C3mVvS zI7?<1e=Na}7)80MvJDrP)k2M*we6=VCuJX8IC|mD!$HYC7%nIJiLix1dd9Fc<4L&P z=|DYcmRUMU7$z-akrNQF(~UBSiZOL91l6A#HG?<|WqUphW0zk0IYH)i!b!J%E&X_ z1-jCW@M@2^v8CkBIQcGsmxod0jI8uPajjPQZnenD+fHl?-R>4f(Y{IarTb5+ ze^XKMH9U*VrYyK0NmFv_;Ng^5{cG%B`lWwHrCU{33b6Wx-2t*c2bg>zqfZ17`9uWP z$DbMq>mdt3us=k3ksb)#3CTT^eZL?Y8$46^sDJlW*Sv0Fj@WbfMr3J`I*<2VLj@87 zYkl@|3n)3MdUZVO1;x~XxbJvRWuMz6e^id?Y`4Tbv7<}W=faS$K>VJbTr5V<+yUPu zSRcJ74#W{yBl?4{YN@NC(Z;lEoiIV<{)n_YPD@gK&tQz}DZIf|fbEZ~4FqptF!=-* zkPfy=3`)q;LYUH#k?IT=AHkoMh-nW$@1YDi9!Y})3TFbvZyGZ<_B~;kTZVpue?sXg z#!B7SxI7!l-1$75kiOh&?_eD>?ML5+ShQ11siT-~z_s;kH<*YYa+RUNi9vs6s>let zLWp9zkgZazQM@hNcu-Q@mK}K$QQcH%p+5?$At=<(dfw5^zVEIT5t$@ILE8>j_lAAT zORcuk1x7mby~%D|jd&)nfz6RGe-Nv~tVDH#22RS#Coj?TIRo z6zd+?F{+0vARu@`wOLCFry(O1(0@W7RFy58wCb)ZEIX0!U$Q#4NMf@M*Radsl)(a} z?ln1RZam(2E@i^-k{*_IC6*rAFpkI=8;N4hqXbN`j2zx-{q4vN+HpcZO;<<=`0+*0}vGP-B-~iJu6}K2)zr&OY zc?_B|qZ3=ZlA7bhLKdzpe5LKh5(2(5$(mQ@W-a6K)Uo2Winc1?#(_}MKwr!r;PsK} zP0&4hzBY#ObgZU*WLp&bf8$SB`gPmWNd**TfV87kcF7C^2g(tLm!fxehB)i0banzz z$Iq$u)$BEw*(L4g2j_=W>fJs8{qWu_i;+@KIq)PiErQ2QV;p!!T55#_fDa%r@ws~9 zot{0jGo|W-6nVLOT}R@pN!ze-U>O$xUT>wKviA%WI&kelHFdige+=M57{W9>n^olM zHi!x>DyuH)GgaWykXDm@ShcugXGizw*`-DTs9Y)2xfCNRe?kSNczMtAj`qOtvvB5y zJ%rQ%t!1VmW!oVZ0(0_4t=a|H`DVA(E3cEFAsj0n^gVAF7UG}Y zRqIIFxqtoXpT;Wwf81Mwu013Y^zXdQqMVq6Di$pG1Oty}q}|HuBp?)cOV#*MvP2WV&;ZBN${=V;sbr+?VvmRst_}c?q!$z&cJy>T9?Vm<}=TBp@Mf zQ8_9jRm&i@249}?ox(2s-l1HZ+&33S=Q_qBzJ$PyHy+r9f6#${sRO_hA)AtMNx#uU z5R>`RYy|OtDLQK;i(w#$9>~ErLuh7ZXJ%&h>9_U*BrHR)FyK67D1T4 zy={n{M4MElYcl3(mv>rC%@; zt?~4Q1Me5V>T*m zqb_Hb#Uloy$ehtrpm{u6iDpYQ(T(B4PDD8$vS>SkxdRHmo^{1&*#GJ^ciR&DL1vc6 z`42-Y>SvD^>elkMaP{aS<)Ob*f7UsLE1gQ1o5UDqZ1yirXgY?s0Dc!=5ETS9vyfiX zs7Qa7e@arczQ}~pw|0Zqs2pVZ^b-L43cZ~!4l!)ro;4#_B}a~EC=|HJpG(-$uY{ z`(_G8_K%!>ca53q+x#Gz()HIZJzosFD5GvWf4T`ld2iumq+}KSFx2RqCXD|zLSRWo z%`p9bVF_M^=Z;eeJtvuVg+f2Ht2X&rSs^ZJDhc25D}eR~mmB3(O5E^KeBi}BGJG?} zA8kvuNSv2K>44(sGurw!pLm$pX{T+rchdsUphvsSO_D!(>{-v60^UUtmje8)%s-ap zf5=!!&GuI@0W3QuT4j}tZdxg)^GlkO8IqJrTfGzUz!E_>q)vtgF-7uuz=wt>$N4u%S zeaiLT;_mk+L$YH(URoKSUF3VBet^0ze|XXvl?Q|ds@+TM)v2FW@XRG#-*I7=Wd9PY zqMrGd;MxX<7%2Q7m`o^7*P^P(es^CrwXKS*2^|CYylrnA#ageB!re+(w=UAY5WODcpasFfWzT}O;XQet zBVUJyT#W+dmO?rTtdLEp)M!y5>-Zd=buo{d<5AT7^KuZz%Xdsif=` z@3N*y)4=}`1l!yH`$iH*8rmekG?5NqN>5}zmexV7Wq?N##NQ0w09w~5eC`J4Re?e;iao4cV zSa0sS8$pEy?_nsx273V*mL8Y)Dhk=ISIL4#S0W~J*uO*yryBDvPyQ19NU#mjM>W`p zd9`U(D%(AGl!?b23b|)n4<$?O6J;iyr)<|OuWz#F<#<*WKI~^ua_HhIdvms-t|*!W zwvyl{IEHdd-;S$qVotGXe}35NGFvVmNs*9J;ka~)56#;jh&Y~{a23$ zRo9g>JCbA6)PVtIuG6&5g&EGR`H$vn6PSKdxK7A$ib(GxcpM}~e}vXnl47wMSx9DJ z%G2_LH+F)ZQ|RU4L-clZ4ifRhTQwTwQ!!d8HMiIyuY62GLsXM`d}HeZKsDMPeMY`f zLIHySIOgI1UQcP2=@}sb)S!a@F*aVU%w54)e{UTwy-ClTHf13QHBl5<#`qVD2z5<) zI#Yz1Was5|8i-8ze-*hz5&++YLvUrA@nKQ~l>z%tD-h@mi`N<93;-}q0$VGqF%nX8 zU8ltA2oQ~%XWbDpi-~)Z$_GrfmAR{wkj1;<^?!M|42Fm#A7U$WR-O3Oz1KkL z$#s>8qQP_#vykVdhQ0z&pj*-Y%HB>KNt&>-7|95ct}@=Ie>4~?t%QktMNpu8pDZhj zYe{X{X3l#ySTj6j@)UuJlN6T(`K1)l6!c#sT;Ak#M$y*jXyvP&Px3~Bh#&&+l&hT*s;IgDnfgcd;KmAO6>M zCfX@pOhgN0f3Rsgv4+0k(qSr?zUTzTCr*moNROqOzlV|Z zNTa(aD@)&A)*k54^f}!3C=T(Tzm^_G6dW*l`wCt$%~|4o!eDRMS3MyjbG!-bW7jp| zOJK?)f}kEiRK~48wP;TTYxr${%nnYJxLM*AHdg_?e`?g4e*ArgNmP$Qy#Sgz$f$*R zQq20vZR=a^8@K2gq8-$r;ljlS*qyGkl8&IHNWY4}PkTMlX5^;@ntx0|UtxBN%<6&L~B3xoZBImGx$Y- z_1Cf9`TNQ3w8t-JIhHt=Gd%}PVACv6v3dUvmbhc8P2Pzc%2xYR>I4z%w z!Ot^fFv-7Z5P5P5Jw3Sf*|@35ATCS=wA86>se#nj=y_lYBgYNsIfgqp*k9Y& zcca(1@q4mCl%a8GTwi7S*>{EFD4q&xfDuyDRrI>eh2PFSF&3j3AX+E#oG!DIZrM4U ze;<&mupNpE+CYN29Z`x2pi9#cf^HFjcUv_H9w>`c^a`C*vFtfc^O4#|NI>;lo-4eh z#&*YUG(v*pjT$o|YIi|9a9A5%OcOvIU+NJhTToa`FXRA$bPpSPz;@r9t5#~rh(1Kg zy>ojEb5c9O+#`Mwy1~CT(!mp|sd6o-e_vHnfuMK@;7(;;dGm#Hjf+c**v$8i#^R*7 zBD}mELCR6CUHTL5%Ie>!#*j#@K?PETfDEOh2>DBV0$)3SSQsE1R4ayGRo?{tkDv2N0(1CXd!`19p6}?6CHu;e-TA3fykO|CW*2>HpqO(#dz6l5ClrOCxRFB>q^9vV(fYmMzB zYI~;eA;7p<8yq!ZgJ1Tujjradu%Ag5ZW3Y083x6O!;NHNn%&@d?%rt1?PQaf~`VtCp2H-+oH26&*M*W8;^XlE>@D-J4s!hA_fJYxy>G6subb zjj&jh16F6wIh$?qhWb(oOaJ@Dq3B&FNVqbgF+DY$A`x%3rZ{lp2}JB?G6PY?0B;hz zi#l>Y9hj=%$*Zuh-Sp{xe;tBoda~N0nqV*62KAVPhIxJ!q|li3i^QDC3Y#@CHF zOSsmgappuAw=-?YKGX7iVJVk*qqxRK@;_6FjZq2kCG94&DS4Ylg~~M>my>ePc)<7S zHA?e}(3OdG_#XEq8DH&F!tz!Q!TY=CB@tKqZ)j}0C z#>wU>Pb|xt35pLnEgqTR!uMkO)d~hiXvP6FaCqpPQg$O>voOIOTae@8ABjkQsz82K z?kmk0ps2;=xZi=rQB8-0>$6THe_98Qf9qiz=DQJj_nv|D%HpezHPH#9aT{u@g{5;e>raJ5wK=u;) zSLZeaea~nc1xw5{2HkVK-=ZGf;h0NCbyXDtTHVj+re=Gb?TVkc3Cy^9oqKC?gUy0Vz4#{}Pk8U&ps-uT==aIn=ylZZL&uNSRa9OxV(W zS}hvj_OL+G6Ip_WuCp}!C^g&7_eY1|-u9HgWs6P#!%O`eUKZg*a|T6L{_YFEzEO~cD z-hM2T1^!QE*_8)2$00{_0poUr{6OgP4u4@KShaQ8-;vfZu~}U1J7Gj7AMdRF~WJ3)E>WsZ8cR)_qXt>Kp*`H}3xp=YRySSqJcU5cBr!4Ti#KF~i3c%?F9^~B}2ubw6ak%xj@ZuL! zQnj^`hKN8Z=<~&@mOIhDzWooh));RaxGkQ8w zfbZ@z_uC0_IqB!qD}ffZry3h0m&BXdG4_K^?}fTvgY37$kg@IelfKiynnen-Wbr1BKhktZO=!0@9hHiupvu8ND`#PjuD72H>Qvl z_hiFK?>nBFCbX!!*NSh4ADF*B@0SlQ1*B0%jDs0&nJr>7j%q&7Eg^b{448N_hCE~^ zFG;YdK<4`U;Ah&(1f5XH?_>5b*uoD$iE$mG_@D}kELiGAADiJM(SJE}Djk6I`Vg53 zyJKHP8b5xD-vDbJc+ zf$YjIN!5l%4&9{)zQgoltE_*yd*fbrMA0Isx+dTq+uF`nhEwlJ7SI~FZRbf{DoA2b zwA^s7t(nNd=%~4*+73Jv`U4)43O{1;RsF}jz^vY_encrh>jmj<;8~q7)&|M_E4XN{ zN)st0XIM5TL$I>u=+xa)()Tn;61C&G&C;GEHa#1|@I5`Rm!my_%liJd)Zk8=qgl2o9T(@C86VQBzffGSJfzO;a4`t??_$cdG> z(Bb&f=p1t)iz%rP79FJdK5bvMsYkZ)9triC1{`J1o9{Ykrdl*MCH$!}oSVeR8GpB?i(KGHRX&KE0neaR52 zNPnpkyKElpvhcE#Trn3|FsX> z2$9&bT^2nQiQbY!Sy==Xu}QG_r<(q0Lw^E=MGfgzAM3y!+S})Fbl1754(_HGSMp{o zk1(BTEcQKW4e>90JiW+Fo0XL?fbuXb{CcuLY(AO~31Z$u;ur#Jx(|lz5kc3gMb{C_ zcr?}_x^iK0Jx+(-KLckDvS3tiF2Drns zGgRf5n)r$cp;tUBxa0@$!yNFbs;>&iW_6-7z~84Le#7Ymg1B2lE{Ebk*>(|DnsY5mD>2Q24Qx|Kg&W)*-8Eee9aC$;O z?mye3=&$D=-x`+vF9!p5gDEQx3;hlU_T}$>#)D*V&)E`Wk3C)2ac{y8=YJH^Y_#Vg zLSUbZjW8GEUNBtnEExTFDsKcVEJk0r8TvKAPP|{o>@`&2d@@IbseJTc@5X`~EP#-S z=O>#Kbs)-w==hY3lx}+R0q+u%N-ey5OliQ41(%A}gQv!b>&ve&2b-Pc9F%(?6ipLU z6s~{OStie8rXj{EJG(A|@PASY;>0m3us1aj5A9?V%A;|1yjxr4IUr8wO;T|5L zlogp6nmRN;s|Q-bjgcgDu_=-qgh2q^FG?dmH}wu_l`? zV;Fa_6$m;WH-s}3aggoV8gL0HY?6Avp1p@Cm*|huMH=f!mqBS=$A2508ra@&o67kl zIl{!2{^l^iv8|qVO7+8Lc1!o)>?yNahwh}?NY*mQ> zXmhaLu%v_Q>g$IU{MYG4n>_0YY&n9yB0CSr4dI_?5^AT4jbp_ZKTLl7zwQ+wTWMOJ z;mpx%GGYin08K!$zv}_}HqQN&%W8JJ=oCh`JfAM2sO74re=$s1>)L<6k@`|{R@GG- z7Dh#3hyzAe280%IfTKOam3dBaxbVB;ZXBbj81PtKThvcizpwtaGEjejk??h_u=tQ= z;Gpjmg7`xu^5C<+3KOU!1@1|e{Bm9^daz*G3VuzXN+L=;lfiyHic^yFX7$OCd#39_)$shW>kK@RNdNqv#Ebb9* zpk_~7wQL^wJGHAzX^9939{*N0n)l=A4f4g#vTTe!JyiZcMvC!*7|C1vtDzv5Jc;nM ze9}#fjyH+eLK23b#6zg0Mi7&=p~w6RTrhu%P|HuZ8F@H<`|TM= zkT=ryw;M@m%@4wakI2WL{ucF2%czXUN1(pvYe7_b*cyC}Bcs$fj;8IX+<1O#QxTXw zK)9{uK)4>8AL?gGn~1fz)QM;{hEq;?=dlyqb`)6zq4F??B>XH+rAfPrV87ctNYgp! z2G04lYQwB9r31Q?jZ`R`(@W2UBl$_*KS{*2;@gn85ouH}FM> zh}g|2R+`1*X0MZ;cNI{L{a6ycivIT z9KTO+>edo6*^?qcfu!E?OwsW03fT*<^lf1r@7YtbXCs^6X55HAPfwD*G6eT=O&F}BK2po9f@nEcex>EGtQS#ac)jW= zfX=dA9o;ti$WH&kD}uA5Ed(*>R4g}e~0mLT|7e~KTS05M%9 zitgMcvf?dAEJsSHSttwy zBnp2BFOp16Ib?V;h3;M9i2&l|nb~6)CL7+f;}JnUmR}Wip_Q4_8qnGWP*&n#!9H6U zjvq^(C0g4O14v2OZLctVDr_t{N+e*XmPvi5V!bBzs*P^hSWx!z@0sR5;Z}yoA2zD; zu?eUi<5SEg$LU$h1Asm6xk$b>-MxClT%>>gEq1n%&n#;x4dMzo`URwX{}SJehT&$G z)o-Bs(5c1~@;gnV5K{RPHB1?x`x43{$l19YFNyRyWqLv3xE`zuY9j(d1LJ0GaZCsf!j(LBZ znev^d$xaw9Q=QD*-p_M@vTZ$IIYO8lL~{Gd&_1MQ-xTv&Jt?X}SMC7C)Cl@B!Bqo* zP?FZ}2l$j)P9glXhEZT;>)fsv0T8kgJ1o$0$_Ygq5tIUI8*mQW>agc?kQAh$iT^~) zqLskGkAk`pw=15Z61Zoz3aKOjH06KBk~W}v@hm%v?`oFk8P`ntoT4H{+A~XDXdr(Nr*nxL7ttqP>9c-CQe);cuDkF>1BnxW7w^U@7ILkB;# z*L#xNMn!po5y`aZxHKz@hA_V61m^}8?jtCD#-NQKuDN%BwQj4sSi^sgp{86?#BaN$ zO+DsSUG;}uz6)U+*8e!Zp!@7s7)BmFUiGwxs=jk3w-C1b-1p6lq*ep^ldA%Q`TUSZcAvvM9hzr~$MIds1k8Qf&=G4Oa}qXE*g1&-`R2#CLzPD?epx@~oR{8q4;I ztK(^^+xDJb{gdcb%a%0*?lX?L!Ex?dI^z-HH3s}f4!X(Xo$lgSSw2ygmGw4{G~-!N zY6b(sgk?meykfPyH_&VN-CS4YXDi$|wIJys`SN~c0ax?H*>v0Fv8{V`tEOUzvNex6EpyStMNm6NoZq%n6E zM~|#H!*rKB=UB7ShDR3OKiHCnj zV&K@N#Pgyg+R~}Q75hNop{>$|1=v!4Da!MshrTTgPHx%mFfbNDB4tfn2v+)uxMp%7 zALUtpF~y(!3}X_5#mukY)WJFk;v^{m!+udeWu<=wWepnJ0=u+irR5$%0?TTPEbx|9 zMPgwn+hCChl}Imvg^?a>r1m9W8!B9GMus511_@-j4jlBh;*eLnlHUc?Ua!xS`zQ-> zxgQn|DR^k$5#6!6?uHN*{=Hj@V7(Q$MSY8ITiP#P5u1Y=cyFPCmxw(1tEn{@j}xRL z=%asoKw8lV9#h-J`rz4H>WAy$whlq? zox`QV&lwqctlM#mW&CSBRk+p(7XEPi^q}pEq=9B$brm7C!6YVF62DI-EwP%?3BIzw z0%H^liFyqf!plL?ROLq26LnfY&G3C|RL znMNuRA@X4Z!{Fu+cND8&ctKf_I3gb8&9>t2S>o~13L2|?s6}*1%g8~wnc%|F(g;1b zB$WCG^B_^tU$c`N{`VgWtv3|5=yb|VvL^#n9nXY6?uN&ELvuNXU1}>bU~~^b9_4>R z!|82RPs~Y9m;t3jxXAq$bh_Mq$)wA(P<+@Sqg9g?n!;9@Yk9KkV|?Ptj}r0^^GbO| zxJ-eJK)hG#Flmvz3*3zu8}#do>_S(8j!ikur87%>{F|c%^~QjEryzCi;SO(Be8yuH zpJQ$6cohfLgujI46#~9zFTcx;*%E)n6@g}DL08^Wd&-pB5&JtXfsF$LX8MipFnm+d zxxZQ}7QyeAOBM1RvFqbx=xb4+Ub@a!?Ih|J+I5-cLBlk#h6w9yGfr83{5Id2q3Oh1_&DP{A_rXM@Y8XjZjwZ7Ql3;fwBhos^QS7*dP3}H&T z3NbO;4DlZ5$#QsHp)3ktypQU;@rKc(WnL z%8N6jW{$PrR$tg0oC=@`|F(Y#?14_0ZMou?tshEERHs?+64K;WPlrkmZDmCj4>VvzVXnDqt~jvx>7O z-n+#QTP6SdjY7uf=Q&Ps?_R}6xP1=)p3ra&0&joK5wSFdgi{)31l)h4+5UdNq_L2( zoIl6~D%r}O@pr@Xhj*c1>Z}EbW}%b;R5*d=uS7LK2){W+x<5-dXZEr~MX9z!AMNmL z{wq81O6s1W)!x&zEPATU`YmgX2FsymmR}p{I?an}Qs|4FjZL4{>ha+vR-=(YP@Uz2lzm- ze;-$oFUDutwlag1zz}o$Q`EQ`1E0(xn_hWelF~A>hQ~pR~#!vNVU$6BLI& z0XN_!Qk(-N4GHDMQ$JOLBcI%WlU~!}rm>{?_+Y7$GLk{7;|B|r$dPm>qIv&zn*r6tsV7N5PjY?JC@*}YU@_a= zooqnsxPN~W(upWQ-4L+o$n0tbI(k99!!_S)4#!Mb`J?F;En7EOpZKW_{)$`WBp2iv z;_w_+x>7tWmdRwdjnl@XJLq?gr2KBRnEIKJfxL%G+WT!vuoVTahX~3AASPJ6KuM4j z;qOledrrZi!LQu-QV+svLtY7$i$Ighx*Ty;?dg9*Wa`y$nO4J}_V77ta%4P8U04L5 zyv{$=L0neiWMs=f^GbwA*yAh%l`+I#c4XR{Q+OZU%u@WqBEEVz-#&Ygf4y1-;wHbt z&x2Ut+*TPU{VV~SLTx#>q1pWXjn(dP4X;5z@FU%w-Wq9j%&#gK_ESkU&UE(^&P!@% zSZRNW$axkuyxt!OcT_0`V4)Eq><3MDDOE^0%|`z$r!pH#hvJ6BIab}zc0;SFK`nDw z{#{ljWb!O_reI+fcIB;(zge__{#G2h3ednr?8E^@KY$1$oycq}1^^sQQw=y{SYs`w z45ukfUpo#fK5`zsllU3NlFKXP!R^*TWHf(Alt`9V1ogvaiN97CVf4h>xC&R5TLpFR0PCq31eHcDk%*F*Mz6<4Klgrbma zA{gM&S!Zu!@k6NHqlIpodS$Im=&9b~a~;<-Gxm$eSHo*bZ?lmi&xJFYwOHI)CS`vE zzyLK3hVN~Nzw{gs9MS^6VOy08`ED-BPg7Y>ohEG#t`X(XTeKa_v0uMis-XxILO_Kb6TGhRvuQ{;jVf^Bt;!NGqhvAHCO z*i}p=pg`{*E25gESF}MKt4dUdX;fegbH3^@3g_tjtcyS$yoqA*Yoew|ITHXDF9GO% z)c#e|GYrW1#VrC^;3dOG;vOvrTMho5IpB=Nw>emo{synUmU0=&65mSM?G=SW7V=ru z$)1EyF~04;CL7h-TN^sUMRb9; z`qVn6l)tGcNRwppmZPpm!wQ^Iv2;7H@Lyz`RQ7J8%$>FD>_bQ-(L3rU8q&ikaN(>2 zi(*KNl#ldwEO7(uVg-aQ#kuXTsRx=GWoA1Fs5E}^urbHI&Iw5`;diy*i&Zd9FMwMb z!>z?g#qd{sO4Gt$K(T+^M<^Rz_JbB!9Nkx=>Ek=c4dseRyGJn>({|f-`y*UVs(+19 zD$VD7EhfDW&R+?7$?{v#;!i`)7fXL(kB4uQJfc#q+@Me6Pha(CJ)pRxAm|3NV0TPv zNFr)U`mtZwu7c%Y$vm0yXqVLY zE@6m=rA0@x4cULNBANM>@&y3I{kB-Mv<7V+QG~i1J#4H+D8X+*ZM(XyDHNV>qGp6@ z(3iLlvq;zov)$(Vf*Qlw_j>(9Ag#h-w_mjyNhyiEZ?Vx za^+GkT)%NlVgI=+#0G~e$Fq~A3|0NG@=*&AdUbS%$ryi6-*6b&MKe(K{(B8XmKeOp z^a%KErXctUW|uWwe-uidh#(UUo&LCxkCq@BiYm1@R}wekr=UC5Xo-Ejzj^1Vccd8N z%x_ZePG+o03OFeUbmOFu=64M0?L|6;(Z-y}4)GCiWil!cbqgsAz(doLAJNW(G6phu z0>_4q0y}?WV4EJV#|wNKMoA_QiY`mXjQH@~;vh1NKBafLXQBs}6OLTpGm_!4QdIyz zJc;-Vv8#uPePR|Su)fGHMHKO|Tesniptyh`Ga!#pV9zbz)tdpfC2`eqUd%zgvVdsL zJwFvt$U8*uh}Mcdm9t!Tji~4D=kY_vu7ZVkKPZ1;Z)68|_yuF^s~=Yor<-2OhR}6Y zRzlTS$PH7}NS@R^Rj@<_PM2k2ke)n=qbK7>P%0f2PAL+O(L@9PgMVuK)~X1!+q$r= zux_owAwF-1)O1Yu@{=$zTvLNHem|bf^lnazp}C?T(GcF_RNx4WTJu3Wf+2SQnZ^qo zpiO_bTSN0~K)`6u8S{AH2N0@0PnnWiD_R9$+!OYAJf7D#PHNnGz?-fc%-+@TJCViW zXG=ueUcvNTqUy+KQ~=|h znlztK=AZ0AV^I+@gP}7Ia)^_3#rJ^MzWaXw>F|njYVWsMt)Dm=HnhPAgVuU4p_KN8 z0pH``#hAfmIQRbLX~xu?GF%2lBi&I%mpL1@ubr$(cn?*f^-bf$3FR)~bpb)o&xsH; ze!Kcf1J3#V_96@xQVkgz!}=qAQMy68pt~finqw97s-!`2+C-YbRf`(O$KXwPTx@?` zkx%aLRAJHaq1s%>134%@kj6?WpUwQR0L2U<#M?uwwB6NyRJUlXziRBD=; zck#MMh*GIS6eN(GnIS%B7P$7<)7XE`>XNFdr7BM6#n2|7tjhR;#5h#p- zXIL#Wd@hW!H$VD(TVG^{6xlj^>6RYgNPh8irdl_ zpcc@+&Lz8|tW2{Be23*PjX#8GVWupXX|0gP8ei>mUp*AIe1zTtgzgy}yFd31uVNN_ z94$jFnBk&25m!t`Sq1i;=OuQRfQ1DeL>p}b1aCuW&p=@mFVyaq*s0UU%1ZxPbUB5u zLz<_zvn(+GDA0y@51gA9wQPTz3*Kzg${Wge2tw3pbOUuwdk66=4hTXs%kr+GX||f` z=maZp&Anb&61G4+@|Tj&mj)3BBjlcY*O_mGfK!Ggf~Z1eBN8G;U16PXw#ZO~t5STv1ULi)T#*1e*s@_(Xs63YJD?t;A#^ z1S(;}k|KrH^myNV@GDsZL>K-R9s{XzR%$;PjCTxajpN7hLl3-Ab@8y7J8=l(tgY%g zQ6OYTCT$OV>!N^67jbX@iB zsHhD%mSqE+vI~yvqk(?{8m_1>-itrXHq|9sZNitRZmG^MK_69x^2bVTg2MlrG9;o< zsqGA+Zg$4fNvuvEaU-Bo=s9H~pc1^;58FCHXep^3LR<~~t1K+32X(vP5^K!5K9WV( z3Ap|+g)4-bLuc>4)6%i6(yTXrvc>E=Gk{#KjA$ffFYFNpDVcv*>RR+Z*8*o*C$MmM z8f#mxGnjtcMCmYkDtE1x{MEa9!D{bjTQJ4ZKiWY0NEqG)L<2#l-+|hWw%WVy0i&%< z!TUu$v^HJEa51H?N}rje_Hnf|z8yoT1O#s3@@tSiwK>DC1rXPxVNS!%i3oY!37+~y zK-x3|(HG8NLr{NCm$*vqtVu&#Y&L!5NvFY18F3+LtK8@-g1*M(NV&2K& zL%{=j)_HZiWKM(c4(eyh`&|s`XLcozYW)OZcIJNQmU!j&#$(+304QBuzC>pZA<+|a z`{tTD&r9eUkn3z3yg_okqiWxqy7Ozp`H7A^L+e5k6}f+Q0fIIr{H_(adU5}v@?2XC z_dsRG;XP@Z2ReTUPo|-mr4x&nq73+iyxBPlcWbqbCvqeRE#S-tC*}&rBsIkWsx-hX zlGGDi>R~f}q-l<7L}RRD;zm8b&`yrvu1R$!wCk zid#LQ63k7t4iJxP_XzK0h~FayJmeXFI`3Y^5|z(t=8?xRU5dQIIcb=i0r{3`$3E~U zJaS)~iPmVoS2%9eSx@My+S5ZX0re6y5zpi-3H*Pfdljeb+O>pLruPEfo#>v6`S_lG z)MHe+j)3BtKS;=K70jr|@Y2gWy0a)@;L*_OSr)#$cEB)Fazlwz5#kJ?Wz%~7RJmw4 z%t||pxfL#AzWQY1oavgV@_uwhyN5itsbBYFw#l)R#(|@Wy(;l({rS#8~uGXyE zpyU+&oOUJkB}xGTR^c)2H^r`bY~3$pR?M7T1l>sVw0iE+gQk;9Ee-Z8k1Xu!L3H-s zz(-%h8Kgcy-FDjsOp_yeJvsJ7V}-wE-+X_Ga*MkP-WIfTa>!%Ye)GJcgL%i1E)ME~ z8X#w@eK&_HNj;5eVU3WrI`Fa{UHMZlbt~I^dQK64Dmq~dYS9mnYWR<-fLP9nd28W- zt>NG&FF9(EzK8XPr1l_g&)_?^Kz`XX;BYKT0VNawHNYKy(Wn!MtboiEb(M0 zT>_<~@!O88g={#ST34P$Vi};$mjS{XlkO3g+ixqxfMmqpL0aA~C4~=p4`_jy7YN|DoFa1tu z2q@FdYm)6|x+FQu)6lk-QhqvfV}q&k8Ng-({L?YtWVl;IGoIn}!Bno{g?@j?Vnmn5 z&ND%Bmwkqk>T}wfVcwIGIE9nsBL1%T{BhJC?pi5pPGRzqXq)lf5tcKNKjpJcwzXG1 z8^@Kx>YyS7$pr{OZvsm$=lC3>B@CnB^}5R_rybMk(aYng6))A)$zUy zz~F5*b)sbcE-`e&{@7PG=;MEUVk9t0*8x@w6YfMdtvt~JKixxFSA|%jbxh(=6YDwp zgr$K~|3$y;iY@)w6&6`kHNVSNUH8RZ`zC#p2wKDiC#@=1;o0S!cUtv&Hi|782dc5| zC&xLXfD#5L0G_Q8-ec3iGh0~tBXINCS0q8IVwuX7jQ~MjRBTwygr8LV0iSxA;0z96?eAo}b^MyR_NN|rT zfAs-fN^&EsEcYVguTQ*yFAocQ|7S#B?|$03RP7ZJm}v^s&#=6WnUa{MBnv3f71Gwi zJQXHgQ54yAkU^?&gOmarWdQ<7BC}8L@+G)uxwRS++|Bk##M6JLC#ji8+c3kEB5-1H zg#_Wf?{@YFTk=9AW7})3$>#TD#0D3B?7nUQ<=U#NxQ(kn*^NM_iVCY{+|W-(*_8DD z%lTPLN{B2U@9Y#=r*Ds?l!s^Zr$09k?^rSPfzr6z2vbjcjx~@3GS*T$m*kD^g>}vU z5r3ti2Lx6A^BjL1>xdVamBm)$A$SSLh(|FVqn`>c`!iIgBz}5;*CM3etmA`Jtx$p$ zQPKu)f#0AYDBVxa>@?ZSB_iag?vsE8i)1{TWu+^dt`Tor-&p7rj^Wa55}NNj(B~G0 z8gt5%AOYHW#=8?6njnJaZPRwcH;lp#!y16n6D3JXzx{tfq7^u}vi!{Yk}3FF-0T}XjHp+}ds_U)Ms(xcX8k4Z-LZAVE_*6R{DM{~c2hxqyG_pZKy&8nfk5h4A zpX3J;czjYa%cstn{QAF46G`Bz=?m8rMHBLk5|d3Ao)P;Cc|+$6sw3)jR$ix9CZqD zRO#;=d-@S&-~SjUa@?3;p4Wz0fTj_FpcJ;YOqwR4zt^-BP}Rl{Qf?CVG%yEmd6dxP z?OQ7SF-Z_FJX)huoN_fNZM>nkunfX-$W$Y`LqgJ_SR^v)At)5zdP^9c$U5G%wm)Zv zRh)mF2LybhdV%XA@u_Xs#V9#D+_9;BRqqiQf18JC1j#eU$!Qp?n+Ia^0W5tUqij#m2Yzq=WN>r4%Y|Ub1 zWq8_tXhR=xDnMs+q3J%!+fuN4V(Hu>n|l}7B)RlV7Kh-zb7m`S+q#ZmER@zTqy%w2 zctWbYx=^GaEg?v-BO3!W=N?{tUVJzEkH?0J>B&d`DziB_gdLAs)thmFR0XFv`ec6( zrudsb|27il_J6G;szOKmH2zyR=XHwRKLJFF8VvSa6t8&ESkl^JE;Pm}>Nqggiht1% z*0oo~E#l)oI$@x2iQBCy-8?MXD?s&rMtF zROa1KRl+!wS8t<^aK)srT>EnAawtrXuG&}wx}4?7TJzEdAm2a$gk9jbb_&9ubhS$b6*L9;AxY+$!!aNwC;j#UmTeF-QI;#Lu8@wm+Y_kX$ z5qt^uVVFge*9Nhv;J-mr^y$64D2m`bdOj4C$`l~Z0#*+SULchw-Z`$T;MC!AS&#C2 z{S{WmJH2Wxjl%5u#TRs4TGxN=)^ZecXq(5GYuyXZ+pnVY8Xm&(8KV$e52uVa{{=?R zYjSGf4=6Y1ZtV=i3v}^DAdLn0#fzW1kkmFUPjI0;1BP(IIqej0k8I5N5C)?)K2CqZx3mvi^BN<; z-gB^}2GzecCzw|m4KNZC)@yg0)Nm!DNEZEIczPW$x>ekZUM8UmMxH;Jgc^gmrHPyk zR%XKX^TS~Rj)#ovf@e?bgK8(#lh z|E{$3^$Wx8hsr3wqC|fHGOikoWHuhB%%+Pw>bFjVDUvC~`fZc$u`YSj<1q!MZ*uX? z8c14?(L$=}V6fODU&uor$r|`Fi!sq#@D#W}8M8Ktt~^I{Aeg6p=gl*^^rR3Y*xrqP zSk6gfF$A&6;=|hFHcw}gxvU+=Vn%^JSG}=NWB4vN%aP5yMWmwUv)hFi0 z2><(f^SO#|7+Q%rewc^N!d~Ew# z9E6YM#WdVfoH2iFd12K63KjXE!Z_jrlSw~ZnFnF9X~Z~4&LznyrT8(zukl>Z>k?o| z3cO+F2|+b5{?XLc#)JP*Nj1P=@o*TqsU+DHQ9AdSzTHONV>_+4r>v7K+(a_9g=pFg zvEyviHt);+%y#X=t0DKiMCJvY=mFRL4ERjWhpN>lTvLBpwhS={t)5MZ6rNS|*a&1D zFgI&qNpvfE9%U&9K>G3}0*9K29NPb*HRNPiwQL{p@ z>Ewu-ule4O(Lt8=l_Wa!KxHV`+KRP{66fzwjT(P%GC2vBImFQ)e!8HS%_CS%86%Vw zU|qLwOKEB&uk1qAr;%>1&$YIKTF@b$ovkb-iWo(O0vnc(VyOM&S^4%OfIZK7SlFx& zsBwo7ZqOVyNcf;+XrgsMNnRhXRO>G>-CyPOVoPfy7(VC2Ba zzK4JL9VV`M%TKs&Ol3;*JJrFJU%DdteU??4ejp&JA{B<*XoCcJ&naqD2IeSdU{r=7 z7hB?|Z=fyOF-8F+_w&MbgG;e@piL2%Q7LyFb($7l9bc*O#2{qN7JB;A2`B;jyt?)} zfOF(wl4Al^ehJ8Pna?ex1NT*2K`>GO6VrdNPeq?bal?NjrV+q`VWGej>G>B@(+`Ne zwuuFJwSK;9>W&wz_JZSHtKC|nob}MuQg*Atx+xv>VA~5W5DOL27K2PZlepLiCiLDv zN8TrJ8HA2zv++3XF8cX%{|>)>8mIdb>gHLqDa4oosz-$YvC~O6#7ET{-@IZRRE2-c zzROQfB7pJu%4#;fSR(Cz(;K1&7nV`V48rX)zy(xBLduUZRk#Vn_(2|Z8&kO!sdrl6 z^vYbhl+{!xBO1`gD1D-qzXl#CvUWV*VLTD!12iiXUox{H$JReq~S zMAYO7qI%bkQ7f7pf{GN|fz%AKP%S0beC^lm8y^~%9h6gy&k8SJ{&1C&Lf?LChYxjr ziklb{k8ty~9wh6;?1nimz1+CK=}YkFNhe?g*doq_PyFiZW(FwDXDPW&356R9!(5*=a8%v0P@+@$SR4af3 zHcov)=3R)&dM>BVc8$3bisOxjT!+i@CoUT6=I%)*Oum7#@t5+zNt9%CnaN09+HQceawb;PZ-|`*cY1FdH^V+s_r8K z5W4W|+_XaZS%pG>2xagRi`kS18Rl9sit$|q+-pcf<@qMFJfc#>Ng80^j&82hjNRJ8 z(g&apiM5AxN2}jYPh|NlZ084mi@~*TO$ut9mw;Olw5KN=tBb}ysM&ul*48USsxU)g z%oi9rwg8=mMr24IFmH11tZn;f%f9eE9^?>D8(YO;=An`8TQSnTQB3%52X~&_v&H6m zRf|VQQ=MP?Bz`cq>J3|l0~o*uw)n8l*FB6hEusuzNc0N%W&u$kQ!6r`vMyYGL!;;; zHPAd_e(`oqOo8?NZY6&};8CB1co>dvRd^1IeuccX-E;-cD%thcJsduyQ7mqgcCF|U zrAM&~Gil#nc(vDcbyO<5kNRv6oR{3dIV!X% z^VW=T4uElP8RgL6d_swRNAgCTmLy0`fsnn;T_u8UkK2A*TcLUXLB|%{zia)BeRp8z z%)(=LwEpWoOjjt`aV*nPNO#N1K4*afA)zq@ht>c6pwNHaII?0Z3K#@$$KDs#$n6^& zbeiM<-p!K&<9@8>?A=22ZnM($_+mPKFv4#!%s>W(tD<#V23r6Tyb5(380Q0lS`bTQ z`5OZrr&&G6adoAJN~3yQ8o&a0?7GZh+@#2f>Ue>*wb0Fy=v=OPR62|XEfP8d+Qvzq zSxA1j^Jag#=6M#cG8kcC{h1nFUXd<8>+Hi7c*cAo={Wew8QKNcDdqY_K6+eIKWeQi zIS*yo|NX|KGKbeGW5<&P&mp9WAZC?erffiin)UYe9IEZeb=}?fYH# zYF5zo!r|o^>v8*z?wD4a#%DWn&23pg19X6W!2^E~Pk<&mGId2&7aEuHLl_Jp`^ug! z7c=9|8q;)y!YCIjCqaxuWB`qb?Zp6UyDd}c9KqgUhqG=)zQgiQ9cK^*d3BG=bJ{H=`6ADYM zw10p0c@2Dm2Bchu=E?N8O5<8C4_CL|3ytm5(-`;GWif@CGR3_GPSa{QfXYBo@yD76 zA~RxLX@|Gv==GzW-?2&d&vX^A0Q@f%XvY629K2=qJ)Zi-?3W7W=Uq$6F53>G;E;<= z(ma$SE~SbCBB7_Hh{ja_{c+|n(a9d*Q3QXzk@UJl5;lq5yf`pszn?eMu11G$zH}7G zM*Y(A*1KH|Q2);KYN!S{OnPAfRr!E>`~bPdCKRAS-G^oA;?HY%RbwxZ$)FHxNmKKO z+p7M=A1tNKTA<9Ap6YOBhXf9#ysPf>_kGZ)fLWCzTf4Bn078G%r1YB zw@>#o9@Xg2Mxy8!$?kz0Uu{x0UHTm)*=MNy#qo7ob>GkA_l`ew&{2>kV9M$6S#QGf z4YJfX*6j29m##Z%lwezFFSHOfb*`(O&zR^u-8I2tZtvW~YmdiHqgu}yM8Ddo1~){h zc9D$qwZ__uB8vjF@<$+M-KcYhQ|f=Zx>1n!NFn;U17>~ZpMtMn>UZcj12o+u8xi**;3uII>vGQ^p(_m zNZGsYBo$*XXg=UT_f%cI$Yn2yr@ogy~?oLW`fJYc5$pw>+^xQ%e@r~LFc zSGQtGMf;j{x%_I8Bz)-iJMMqjqXDO^_*dA^dZuk#=_2UyZYi9ozv_JDk7z(ajAJnz zC&sNy^Bt@#=e)IZF3U=}8iW;hslPU5XC!jiGLXhRlwD0N&FgH_*4-Wm0LhlgLfo_$(C6Xw3iw<3Z}7{x%Spuo4cTJr9>va2Az>kUcpvHf8m z;bj6#hPhKKPm{{1uosZa@^G$DzP4%%mFAswTMH?gN6#!Qp8AhT z#+$qAE=%V8L^qOahB|-diVo|?ic+BU+iwS)B3iufVrRau1yx6s(FzwksNd_YBWc$J z5|yvdO{R~I(tGJTX&N%W*R^+xtPtMAt=DH$6jd;{K1AOI<#|`7P?BQjIWzxuK%M%S z`xz!o@GRH1uueO=WOCWpvhL8}i>7`^GkF%aV_d0oQBrm)j^Td+iA~4-@y85Gc+*WG zQQ5u&<%_R`OH?N5UbYjSMBZ*eXrOYt?OFKqOp6mlPy{27Z3eb(E2=AZ@wa_(B;PJI zkt<$YIx?IqjI4jW(S1sP>gE$Mrv>+Z%)V@8S4iOXcm++?=lb7Vc@ubmH)mqRt)k$4 zIkt2pbY)oZdrB#YVLZS$B(S~+Eq5?+$N^kmeo9l}R&%15ryELy!|CMGg@!A{!lDmq zUbkQta>Oi|cSZ|$a11^^>}L3`L|6t!3e;@`?hY_}Y_5Mp2OXsWHI)6_a2XGNnkNQa z#$he%N8)nRO@S1#4jCeTYjc?1Ms+AU9TS95PhfRdzM?Bl#L1GS6m7uc2Icx7sl{wt z*FmJe0u90$B5_ntZpr$x==StxoVkeU#e71y*PXxhH*oh}lIIObPcx!dd@?bQ)!wRJ z1-%hBI#GXYhhF5BD|3EP*9DoG`NdOf3u@A?1m!x+zI(x%85K^Kcd%5cwT7Dcg_kQM z??;!0 zR+uZLefUVTy1-9oGjK9txd&)c@PW|qal6BkaPWV@7iK`lFcE_BXT+QInQgFkh{Zdj zzA~oJTh0}}S784GTrj%ZBv-%;q_}H3l=lOSSkKDQ&zr)IN4D&6Yxg$@(gxSJs~)VE z?7(a5m(23RmpKejo=U^Yx|I{M9Gco0NdSUkP?^2{fxl0bnLI>i^f*D~16EU*F1;Bqm|Fy}){m61ZC@OdgR|yigM+qY;l?i!LB|j4@VjjvfwRK93eR zT)owdo3iFLPlA8#)w~@?3o>cD>?l&{HkEws9C&ErPE5 zDkIBB3QkC{FUnD}4w->C5_lkwKklyIpEG}>6WS?ti^9&^)`9gqh~}pRhED+_lS?Xi zNn4@2-^leOd{JYVkqpP!ov3HTE51}C*N*~^=5~KO?umZ}HsjNEF zsdaSx&`^$Qu_`k(n>!s^&Ya~Md}0?}u*nfdQBQJGLK*B!48${-x=cSBoGr$RH`RaE zo^5X`XCUzhPw`73)P)e_Q!+Q)1qUhyJPEUI@U?7jEsaMGT)?cz4-|UiAp(JdIXiHO zct0~2pKKkEZ8g(jelWgDw@dj$uDcdKP=Qo88+ZI1+X;t4lZj=!Id7L=M_n;Mi`YQ6 z&klx4IMd4YcHKz%eil%!`}TKICX$A7Yf_Z z4*VK<~9xWj%(J%(u*iJq*N3$N0Zci4YmP5U*uT=BK# zUuu$mae{bB2kB66y-SdCeu((-nADcuESZv{p!&^mso>Ep?&A0`RmFLZzW2(VDIE@B zZ+E$#?Z$k9%3up9f@Ij+L^Fro^mLN!@LLz9?&<7oC3{eLp*}!W8R!XJonM`QX18g+ zQl>j`H(bH-x%bVkUhsd?vc=nMaPu~!9p_cfNgi~|KA*K=xZZ(`jFfTrh;pyoU9>F> za>#9*_ccIIoWFLesw* z%jP3rm)cpUaua;RqvN8u^I1;`4fMC{o7=O6_fOu!kr-!m5`BNM#Z>EO@RRefFTl6> zLBz~Xq`|s7X zfaL)Z9%{iy)yk&p%NJ=q>4NrW{bq8Ym{1g!O8(1W!8!CpAN`)~3RY8Xx%u5>E0Sch zSuL4f;#mE#sZD>ANKX-9q0ADx?xVgfc&)p;U7St^K;1NKa0+n|ft-W846v$p;yaJ* zU{j6B1eiZsm#cdBXWKx*N+?)&hGQ^T@v`kB(DllTIU7ujFtDN~*d0&5N&{7~?9x^) z?Ofmk6b1AuApf=f)JJd&^JvWHoY*Ga@QZKpg*uWtsMUWg1wk~f7^lPtqt`>&W`dq{ z4w1@}>qv#G|FPh4LZ<1`LLyCgT+0R+2RPMEZ|BC`k=@_ zuQ+!TvJdGxov7A!2TOX;3t~hZ+jJ8V3(3I{JsO2Zv93z@jx9rDm2OeXwe1Bq>T`3= zD}Fi{R3%ZirpzKw=C*?2a8jgg%jyf}WK+lSA>GOJfe+nQUUM1u<p_0HGstss?`U! z^+163q4%`l5&TM z6l~DMtf#L6qM5RA72&H|jFYE2(HTk;hI=j;Y-aOOU#)nM0_^d9dVCDsV8BCcn^}LS z^|g&9*I>hkl@THjS*Oipmg-!ruHj3c`%0drBm;O(U3oF%im3L~Sx1enF0@m6yApRF zU%y90<;(+66taXyFGJJUF0LTNE8F6|y8&|rE|OGvS5RhfOE{lyq8`UIFdu4pU&=zg zk%nZOJ$^O4$K>e6pDRk0d9Z(sP@#X8n8ts=AilZ1uChdmHQ$TL)Ed$hW$|mv>W<&M zNWcC!AbBK%a>&XnyS6k%wJpJ+sdWt^|67@ElsFWuIRO#v@q0qKQ`tFI=o~+(l*o+_ z!VTghEV=MWWk(CP4HjNYOP2+Zdc>&=A^qPF@l(5|0Yaduc`w*Egz@Sl^Lxu+k zX%;Q~mg+MC*j(PZN%lF0VP-A%`ZSE^n?c(|FJLzN%c@GE^m<-w9WYY}6oXB^!~jN? ztMbUtZ%1-rJg-g`{GN|+9!^!ZolX`P?Zc~_?%uXX)Ji zI17fyR8I8%9QciN`=J`HY%^JZrGRynuD=71hhN5k#4%1a5crI}X6PEOrtUx)q`7>G zk#&8e1#OkiW+ZPb6|$w7Nh3$UO>9dop^L1Nno7!#F5Nt0UF}PE8{5J^3$$cHpb_h1gFNO9kaM-2|a}&5+8~e;YnP^})$8^stDQM|=D)0^g=bW>6|WQ4JtN$00lA$G=Z2;U-$fRa8E3 zSna5Bs#BQia9P6GOXWCptzCm(rJV4Q<|mf(M4nh2p6v;!9%d-l>=W0f`XO!>xsHz2jC2h6t3up~{2FiMi zSu~%qzgZ%9*Y{&F5c6lwpF7O%FcR~haetGBs&sc>K8piuuH%1bemf^m} zx>$J-4JIkr&5`Z9c{t-%H|KY9YobTYZSsy{-VN^Y4`a^{XA05JlU;2VkloK7XBFKc zJllj8Cnnn5+!Y^xERNmU6Tu|>y!NL7#^-!*@0_NZz%m~VW?LXlpXcVgqn%%0mds$( zH?CH>6EVMS7SfR5cQ>}cQ0E7OcEtcvkIVK_0B=ve_CtX&59|(WGKIhojvKP>AW2S9 za^j$X&Wv+^m!KyOyzmobH!cecao}C9wTL-5#GRr!drBqNPw5}GQ6>V9upzkz~$`f-Gi1^m-6F)OOcy)?gSy_m4f20{EKf!_&0k0V%sc( zBEBh#b{Lxh8s`5QedK`Q3=0G7|6YNA$A1?9LuSM`oiBITPDF83|BTN-?XA#URR2uq zP2p;P_Rrxgi!Zp!8$N08In^kg3X%T6UpWVwW?!(eCvY7x1(SCrBiESpT#eL@%4y6< zupw$-G_U@OZT#Fm$|lz0!@_*RP)eSuF_{t$E+% zx6wPrrYHO!=#~UwKdv2c%cthu?L$S0+pkFwCs-@!B_VGj;#xB~c?M02B*4;9&&Y#+ z^1!k8Ro*L!MsII}$_yzdi`IF%Cz;e(zD+%cE@y=r_1pg3fBa3C3c?rU<*xo{VY7K5l$d%6@v))c}YwlBD)A4#oexe zao37;OoU#pVl@I}T~|==n2&7N5O}(O*L%{EeI18-m&O9#cG5BQ%EY?DT8lTCW&P&@+}b}OaSE%C~0 zcnr(_63N7LLw|4%Fyuv_CWCKojeniLMv9ygoTq*>D!(}fGC`R)m2n!5L1deMgmD;~ z3tatX{`MO_*eYg0a9Ep7pDjZP1V3whPL2uf^*lWg^3RjUB#|eNDSU@z7-UNhQS3^n zou#3b2pVvi$T&b#673+G)7G?w;JBz8!)^!LhN2f6xs8(ll}~Lm!H@HYC4Yj+*ql&Z zEDmWNJqyA^oe;>y2uG-YstC1zev(uWXD67TbgEEli;zsL#xUN>?Aofl>#9V=D!9{X zl-8=qz{Di##H`}Pr0`_ubj-bMiHBb0i9hmNMaac7Yc)x8oC0})0#GulmeZ9l;C;B9fx`$ z5F!x%S}*m_m!u%>ofr#Te7OAp4V=Pg{AzFfsD_?_GalDBo>$wSAl-Rd-+5XpZUnmL zYiPSQ%VTauS!A%DgE4~D4s<=#=TM2|t*KS{b&yTlA39MptbiYOY3Q7P!Rz`CAg;9; zq(Rsx1IkC?w03bouR+Cs04Z6mp3B7b9c&T*6;;s0I;3Hm3EasRo%PU6JypN-uA;8&UGtM0HP zM8(t0Rn`G0w9w+9;+>)vh>D6@;HRG`bpHY5JMr=<-2_7b)I{VRI*jOn@KcMr?KN^IJz@#rxaCUd9%9uJrE zPV4q^7lY|5U60;}U^6~0I_WBzY%6~c-h9V_fobHE1a$n~%Eepe zUe~j-lbZLiCtl8fgKT%>!;9TF;?(aAdy}#a2md4ntHW>uPmy0<{c|T$@;!xFLWHmb_K`n6!od%005@r-;iww_TrzQ1RG=W%EflBe-tbN13fhhI*k zm(Q1S*SlMBw>m%Ur@1R83uD$@9z(Kmg+1+Nb_Bc+Zx07*Xo+=X$z-j;}`CeQn7!oNw?-t(V})?yW!f zuAVtqud>)}V_0^2KJMR>_xN!QN|dg|@r^nof8tqqi(XYp5n8+-6Had#%q8u?gLv}P zquJeA1UtE~)A{OP&(d4MR*ON-Ht$GP+x$9^X^U9AaJ#2&l_ohCp{0hbs5X|BeJl?1| zd~b_g$L!uluD^Lk7KYjKD%NgKutZ2xY(;-Ix$KAYlUr;Jf6fD6v@fpOx`VRSZFiGy zbj??T76`S1+qLaEG?6}jiI%Xx!;UVL0 za4sM3_&qaM!W~C6o%rrJ+`0SVb$uENMK`R>f|eKG7Dts)x}D+M%`_Cm14cO2h*b`*t+!H^`d3B$M>^pv3q(=?_M$Mov^D8>^w?(^(nu9 z*HgQ7zj6{R=-Kc4-Nnf|?iW#hsC(sYu-x@mbwsANaoV@5<0J^qZWxLd91fN|Ebwyv zsOQJmdzK}fnmhZ@3hjux&Xm8Z=qz97wXpX)_kgx34DNT>TgQpW54E|c$L?}Bn%W7w zsIqN@=8~G>{JL(t7ges6Jk;g}Ust|=bn4S&|K{`^EQ)P*a<_BB-`RDvp7G1$?xiQbOA zcKyxOE;<{ELO%$$qvw5Q4=2~+4f7gWhKru-k*RL!BHL5TU#>FJgKKAh+dbf$u^2_XMPj3$Q_lNs)o}cH5ie}T-(gcq!ufuCn<7Kwlo-VAcuZIQi&Vo}f z*(@%>$jZaXJRVg~<>GGW5#!{n?h(%3i_PJ1>)Wl>o#m@=_3SPCu5);QEvUP_ukKgW zHr?9l*{|w+or)O6y-~dFK1TIUvHhuV9d4(qtvW2iPTF}7`r+aglI~4rkA&;3st+}ku$@Ik=;YqKf8n?I*hBF%JsHjk++d7FGVZ;-PE;=^+0dH1TmOFV z`-V})X2E*vN$!uA?tW{3!i$1*||F*~Ve z;HX~H#Ri_kvb!z1L+?3gou{!E-ICjSzC9f9;R&N#dP4|byKc;XmXJ?!356<9{PuU* zg>E>d#64eNVF}K*!S#AtWI?{LM$7y?IXa#p&zs^s_l$nN9MF1tpL(R+2vKr1Q1KNj zeDxmPiHC0fRguE$+Gt!Xx}BRp4XtPS-fmv>LHT9l9%c`^yL4~K+u6(No{wkd#P*W1 zH(}lVbac(g_U0{ropp!qsW2H1 zBpQ$XVmE1R9e!HGi}s=2?+$1yoXd51510M&5h3db5cBj>$B)-d$ulv#C$}L3? zbA&B(mE<6JL4xQ(vo!R*b!UNLay3jl2C;ETqB1q=>37V0*XADKQ5A zV1~tI+5Dc=}s^GVCIV#9zCD^V=4EMfO0u>9kX-28)-itpK~Ym-_VQJ z-AT0Qp!M>9F-+QR|9w3yI-)0cW^Xa>pWwA!a9F)jzg$fo@u#0Y&jDQnh};u8FY^be zdAQ8hKAm6MPAn`Sy$mhcr++#lY1*QQ&*&W6VTz_~1))vfiIiyOqb^na0M(Y+N)8fV zVSYWw3zV&yT`&D}VtY%uC2^iC24|PYeBwUmDh~U9)&woeJezr9ot579WqE@!C;ZJ@ z<3Nq7QL9&UMU-?0 z{%WwFNPjMEyF9q#mksTEZ*rKaDjmev_i^h6yR36wS;C<)9)~YkwqY^6pD#Vb2>St) z8+*im?&wumUesGX7b7$5quzUK@3;5X%Ult6d|BZmsxQ0kq(_dhkINf1^Llw!r@lBB zy<_hhbuM9OIh{ktyR@IJ;oa}|AAU_{DE6I|S+=Uj;ZcUu&2cz<-)nKCm-X8|^pDkI zn9qjS`2Di>Rry|--Q}K7NM-QZ3DZFtY*%}K&!fvj_c`B`gEWZHbd^j7_ZNRnOTK#A zmus{!r&&jKTVXO^cE=AnU#zyzsI?ips_M4a-4&k`W1Wo+S-UrW^jbm3TE4GClWv`? zEC$}7e?!^W6zc=XXffyW;CUx$U#izijwUd>?b^d2F}xeGANNl`TdHV*KmiEPX+K(j z&U1M2dQNM&&-@bID4a3|J;7Te--Pz+okXoTNd+*9eVzt|GVywn2QE(o(;T7r7EP>K z8WhHWtZ&A~o2Q0Ez24j{b4d1NcRD@Zy>9osZ-X)i{V5!e$NTw-yxHuzH2BCI^oMFX zz2EaPdp@7f&A>J{o=q0#9`oI3u=UP=WO3;gt`+n*kL7f6?uq!|oqOxYb9P>K2Sfb| z!^=w+QL^8+*md0R_lL!DwYpr!p${XQ(nhC!ns4)Q?rm?=^^;}%?hab=HVMk&cGLfC z@_OBLkGH|@729dPwcXqL+)p>l#Ut)MF2fcn`m=X7cy6EO{&E`b=l6a+5?L941SgD4 z&Qv3qb#B$6JKQ|><7ao>zR)6DO&7OnQ+9{sn)SB3=X+DO+E4^@C%mdb(i-Ax@LFvj z^&n}rQBfb}t9vyVwT9$6Xjy8S9A53=2%7=*qtL2wlwUXgX+2xzmLDum)*|ZcM8}&h zjom8jjKSc?xbi~YJ5LkSI%a%-NEu1f_rp9-r@O6>w?(=lmkcKXb~>Y(zG&XiUss32 zYtfyt)1@^V)JvmXRdG7r9|s3-cQMBIcNm=SH!<&AD|1l$>GDPfSv4T&xKN=gudT}< z+1;P3w0-IH!o=yAopZ>RL%Uc$W|@ll(q08!jE;u=wlZZJlZTZWvjMSxWW~f9ch+Us zrQ_*xj;@zAnsr~6>EQN!H{QQGgYI1gn~mHW@8RLPzdvU8(c!*cT?U8VWqlcLHl>Uo zv$RtFysh?+bk-erI>lq=?drBOYTL$TE_=HW6~=OIALXOt*+;)f_OLiadob2~Ii<(F zIguhRXLGSByHhxh1{0}&V(Ic~lTX#@JoPfg+KaVe6Plv@?mge^Vo!ZDhNE9raq0CR z_gnkY-#<|My^yL<*5th{pBCO0)SeP~Fl>S{{rA~$a>3R8*Zs+Gy;3FOb^DtLRfFSl^lZD2 z&3d<;&d`%bgPu)ChfBG6rtB~>i*gUt_s&!6xbPT z-aCKp(PPrJsyLu&qK|p=G`?7ny!kOQ+}fnB;`;e3D@~TJVn_CQy*H}kj)(aSRDxe> zuskU?MeAsDHohajRzg$zML5k#I!UlX7R#UpI`gaSs0;+Ebclb5eJm4%eC~pv$QIFhi6{X-Dx8|`K~Wo3 zKtM7=RN{c(g47&`lFx{6TK)NuTWG|QD(_Ukrp%`Qsj2|sOSz0hNUR6x+InICRvAkks7``enh{tp358fp(j{tSBiV6%l20@AC z7dEJ9iWKEiaSN7)g9O7!>>eN@0)AY$Ld{*J5HiNLT;&`IW=$|hpr{MdvRnk0kQh@W z1e?|dnJ?z8mNRzjh{qrbd{Yb|U_#XVH8IKsQo;}3+_lrlp3~6`0Y#vc{tSN@sl zy5i*V$Y4DNbb*zA+?clS&caFypMaQxDg>5v4n9mM6a?$bS^C9tdfd5Kvi-q%KxCH@ zsOY}v&?|`M^M;;(Y<1ohCz2zKJI7@hBa)}-S^tNyc<(q02;gmdC?+o`^>7+3z*2zk zyJV4fh@(b?XJtqUuf2ENZE}A)jU62ytZs7m8zVZ0xsH>%$=hSx@km@ZG35&zsW@wn zZGL!$RILBj^FFs17D|>@`iUDa6g@AC?9bXCmdelBd<7|1(j|gB)AN~yxfN{W$X;{; zm&gcFi1n_v6Y<%UNMV7X76Q`+DZC{S1@q-{no|!GW`Rf!mog?uGK_yArM63i75 zBgX-2(Jpg@NpZy$bt&Gh>>69kj6cq2HzATC+m8eRIYAJuqZ3~vAzUQO`_fN*?7Q~} z_pZpdgt1R2AapGFi*CGLb7eZ)tiBD5fUvrOwLAFT zVm~Wx;7VZ=kPb>szjl!oxDAKF`VS5^5ONmfz5z9_;jlc1r~rV?QUD+ zbpn_HGvTE04q$%_#I4skYfZ9Mzn;q2`qb07shcc-3p>)zUzmJM43ESkgk!@C;EL`8pD;_b_9Q8@ERW5j}ZY8a}db{1rU7J znMouH2ZWk}Vg<;Ad~T}3r(IV7{xe00=`Ltq(BHL#b6Q;Y)TcC zp5ca}}`qll^!gWw)kk^b$#X<=CqQLJ(cXMKW&z^Lk*JGFM40G3z;+p`qzl+YPt zGmO*70V$j*QmKm=gM?#HCZY3T)qa7{w?W2BET{)UxmG3>Qf1hF>>`5b93*q;YdCOl z>>;7aa1&RKj;mOqF)#r~37&wPovy~MWEl-C5cPkgjxsM4I^RFITg5A`@kQgxpO|uF zx)yi6w=le;qVDkrQ`nhlzD1HQ`#gVPLBr+FpZfd@O5jBM&VmPN{ujD*mdFVx>FZMCzS>WCDy#s7U~||= zGzRDZzNC-M*_R80QAF4sLiW$P-EjHem=P~1N|yZ1G;Zj>>-E;LplkjBq4>>pJYb%o zs#)t#Onq>ti5c(BYmEueVAN0jk5|@PTwFG{r+&ncy~5~mAW|SmVMs(d!!kd(A>5{Qe83|2tjwH&V5`Jj&&0{S z5w%_|ud&Z@{A91eY#}3rs@8sG(FuwqdMa~8d>$jCK%kgsWpb+HkBDGyfwbVt5)1`J z{wav6+(HP6c_10n{|InCTsc5@$S8kX=&gB4GWo;^pV8wO3CUdimK(+v8&-EW@tIDs z&*trWd$JahA0_?AS?`vXix~EX<_~-PGR+xc7D|~VC`>G$*%m72x{srpRVa$@0%LF` z+$BsNgJ69u_z(oICl%LusB=lLi~Z@xqW4eUeskOS^1PL$>yGAXmBJ_$i%);=)!@Za zb2wJBpLMPI@xwv9qt4wZLTMB=ar9;@{!?3i@x@>DYXrx51X*l&Bs#lU@~fY){-wr< zvlqlZC5ho>(LeFSaF5Za{*X9DIctl1qLkz=#QKYXb9Z@6|LaMO7&qqk`(5`<3CZfa_r#vC}ceU#+w@s~CKaivdNqh14{SOZzS}2fZ zh@8sYGd9IC(U=fxK7)k85j6Obdbt<6H_O1P(Dj0o29E^A!uVoL0Ed;an#=q|q(0_< z1BWU_h~%REa#|w<`nLXI;7ep_Kjr8CfATnBKsxBGzOFlwLT`Yqvp#>t1qk-fc*{`6 zW=JG9^Pp%DCv4NUs{!jtW;4Qo8P`u z$C8dI!$Tud#Bf4j%#fcGqM`|cgdpG_?x_SpN=%R#5lb)+t4VsO5@ zewn0{>hF#qckN0nk+^iccTAcdS{TR{$$&I9devbmUD}B1-f#WK8em*;tNj|b+7Lgc z(TP!C%rJ_#?$Lh?uuP^*U9FL%3QOsL)DxEiffSU`w`xBqB~|1?RoNIC9)>B*$Ct{q zEEsJpVdy@@Wtx8>E!cBc=fNKE4BQdiRKvfr!hMR=$=kMQ_?N+(o+{CJr!n}a|NXbQ z@ZnIRI|p(8Cr*0*x{eF$owPvnD2?lX9+O50co%5N3d=-CVt))O}zExh4)&(S+ zTGb-34#R&Ngav}KB(!2asF)IUD3OSKWS`z2to`5RbDwxas^B^EFE&Yb;ET-^B22;f z^f4mwCu#pQw-(EXG-enY5lI##R9#Cb^8@h zETMnvH055P`j)VZf92)Owq%Z|paD{^@ipRzr>v2D8h$p2@LixYI1-M4!-j_!El*yq z-E9^j&BN)t;ovJb)fk#+JpGlwO&!%-JZ&_ZPi>XIxz${X&l;xG2gN@8Tk*4nzQ_OF z=d}=NcvzSEpf#SX!5yGqZl7G(XMLA6O}c-cYSz{V{r;7BbA3!o16>y&NX|{aUyZQY zXmE}lziEYFZLEAnkti9hg`;^pC4BbbTHatEXiQ)PDL#QTmI&POK~f8g(r!U>?1q4w zg(&~WaSRhIeWYUa5G6*ZfUVzlSJw|_O zQhsW*)0ztBx^n~trSmjqI2AJ-zC`;qdV%=GyKJO{1xo8hY)xgXUx&0G+g1mtL!_(! z&5GB4m3k``Z!k#rmJksgYmnvvl5_#SKu9r)WHN0_Zh<;3kvf(bq(_{v)r*VR5iEkk zFl#@VO_4x0;tmM?elfM%-2;}?ZghWMU|_x44=7_DR}-)p;OQa=LoVRR?%rE`wDy$x z&6@s|$FTHBq`Cj^na$t%O&U(n)CPuG9Y>A!u6f%ZapVft#0jnV+D4-U6m^`+SYhRs zi%R#^bPSR@cE$oEqz|3W#8OTD7dDFYx?p|IkzDOMO+Ak{tjecwcDXHt4aa}*wsZ0+ z+8-qKS!C;Z3s8hHBOpi9<~)meD;BJ`yO|p*3xU)x9_>+eXuan{4?&{&7_t>zz+$-^ zRAoSAsqvShX^i+a=jb)uw4a}mA8Yz%iC+pLz&XsM_QH1n=keIfOle8uCtu$@O@7Z$ z^qBYC@47+!GpA~5E&t$TQ~!VA!?Auper$9QJwvO1j?ulhe%|G54LCe*;ia;y%#!Vw zD-SeULJ-faqb$N8PiIIBEX&+tF-qIF9RscVqi1wNBMYR*U{iU>Z-t92eiEP<42gG9 zfk;?oFLv5H>2(sEC!_3S?B|LV!FaX}4HhC9rZ!FK^8u*yGl2k#Mg`s>gy{yu-7K2d7`{~0HG|J&ZN zZZIvJO3hnDql0_jITTAHy*?j~tFbHP=Z{YHqk}fDtu@r2nbSvq_~@ov7z9_WO{u}v zSk@(il*=!#*?cfeF{FRuJ#F-F<~uTWFub>4JzDEX&MeHeUWq}9t~X5FDJ~QNdf`4C z?UXk20w?1HiB3udjUCbSiuST|CDXddV6`w2YU^At+rchukO5T1BTSKL$STO7c{NR|k35eJWj^=-R2@JszZR0l}6j(Eq za)Sj@w|rUKFz^5-ZNLT06>9c0?gY#QRF}9@g&`H)DKa<;*&X@1pKE5je`-P(Hyh!v ze)OF!{gp!w)c^nF4g~S1j%h7BXzJS8t&^kv9I!KcJIyzBO_S7JSjomYFR-N)qsvF% z_v18P6$*E+Go62LEUc^m%;gNH-Xx%=MT%&ZmM*EC*42<06lHclh#buc4T!4rJ}G<8ttjN-jR%K%qvfrL9-$0gOdCQ70k`euI*pI+aa0ZXOzBnky!)yyd- z&+K%aOwt&Yj-u9QPQ}0d#FwWi6MxsO{?78hx&7rHdL4c==>MYk`PaGDj}E|}pmibK zU_ghGMi<;+P-iLpBpZeST%$gY) zZgd2Z^k+J)KXWuo_i=BfhF+a>@E>~ru!C?Si8On>n^Ozu&H_$s-ymoxDh_4AYonP% zYkh&_<^ZGJbmGf3Qq+!JQ)AME$jAbr=b;px*E@f6uSAFyL$tvYO8;wq{2Ct`T|Krj zlrzgu{FkCWYUX19`1|f;Ypos9;>OZ}lPb2Kpt9s$`d%%x6IF;g@prCbcKap{TJvZ> zbA;>bWUiWd>8{-tn93;tHDRGp9~f)qrH4y(TUv|xREf{LlxWSEHCXb|shgR!j=gW* zf9HP_KlKzS}NF3`1PFwcI!)rdUj-?^QNsZqnu%yV%e;}7FY1T>1&y885f#Q==EQ8)_xGJ2@~IWc}C?gUyF+E zU+YzKpZ>4tEGhNB+_#yt|L{0HL-%TqXuy37MK)1P0hNBNEJTWOC8vmBov@KK?eWi^p;IPxR?Se)Qqb%;=LVzx&xNzWm_+XTJRpPs1m7Bg+?yPY%Z` ztBx-o5jy`2?HN?PAJ-;;|tcJV^ehOY@}OKKyCFam~< z6mejv7(o;`xbc{{(pld!1lE7Y%fK_hyH%1#$G2nH3mkudTQsd`}@x~(Kb7K)aMERoiGGT~yOJ*2|B zt`VPLQF?J2*}BK3(nMGp6e$;69Q#N`-}Ntc_%O8EGZh^u~sbHAV*!@}zOY zQa5rGgBm&76NqJmF+|?v?6k%rrVxMwOK`d12&rTaUtL3UGfNiTuA?aUX32fOE#!I& zqFA>dht}u~uer3W2x1p2NgI5U0srv3 zDh@rYdxM4pEUhZ)&GrAm$iBViqR^N3(>ngb38{aB>T(ZK9gzCs#|tn~k@lNS{V17= zOuiHn6D*Snh{%HLZH=H<&pReZa@6xw>{}^yvFu+rQcSQ)wsZe_m{(cFq{gLUg4|#M z^0|aU%;)iwI-(0W#Kp=t3cnhqpSmI_5j>*v{s8OwVVck3TKZTri{Q8lRN>BWPAnv^ zNKt>6Dy>sSAr_QIs#g!nUlbUmc4bR+7)!HRlG$elw3_6lA_c(jY|j#ut=qkVw{~ao0om z@sEG`gFr*gANH%=b(IN6^9KmnpEJkbTK9j)N2F&KKi-3X^(wvN_greOpLD)h_1gz` z{P;~iwI+$Iihnf(%_U;1iKm)}RvOvgp7rB5Z}>;US3hFH++ZpcspM9-jt& zr_%k|Kj$0VA+MqAf}9dRIUzw`jw~#A4H$I290TK6dyMpa6d2ns6~W3{LnaPd=d(0B zzbLKqz4ZP7U!kDU`A*i<&!NWyaJ7HV_xJ1x8f*ZPH1+=n1HL@5@s=SK15(Lnwxo{@ z8yzn~mG-A4UHWDLJtjx3Q$v;1ZWwpHfJM#gy*b`O^fO-?cR~0VINA%|_tOe3At6L5 z8|%8PpMPMw*Fo(<5Lm32B)UGZTD62MwhlzvbLLN!UBBRZw#2e`BmAugY#4uNid4Dv zUz{Ov@IUh<=1>rLt#xWyx!f1?sKY7J=TO8Rw7WsW>Et8#Q;BjBBg+@M2NLa#ftaKn zQZrZ26^43l5Skb=YU31s>#Av?_5M9LT>JgmXO3iz_ACDtV{`R6-+C6*__Q>h{?zL) z?k4u{eHuw`4qW%Izp?a3iz9#RCtvhqzxhS_^>b;H-I+`+QM7ZBQ~hZBu? zDrZ>6)TXCB$a3j+c^xfsMrFx}imBaMJU~8L6Ra^?W7c(pVB53On1X*ayt!P2j%Ve)T15K~y^9irzW#8=&LrH4_NmG+SNz(DW3-a>?uIPk* z^3=x4u%MGaV7Y!_Kbn9L-j~bUumr>ui!Eu*56k{2RCH-;zThWlt(aqp(rp*i-k&1` zJ`WDQjHr;hE1qgxHMM`Ih(&Nzz#j3y@dRL=m1(1M)V%Zc`F2=ckf{_Mx6!`%`_of> zarDFOI$zw={?HEBvGQg;UPVogS}55=I>KD<$7nDzIeH;q{&@^a#fMAE*wKBU74pyh zO3V<}{()b<(>JfQKjd4JtE3B(i^HlExTZr4b4(D`{F*fSxt@Q!_4;EH#$JjU2gxZC zo);+V0ZPv7nnyV3uV%S$amp?jc!acXujXxm%@;x{#6kvfC+0|v=eF{8XW$lsSOlkd z>s>l6(0&CSTar6im_FgU=Q8KhYR#va!Lcg&L~$~ebc&)Y|M9yc?E^_i%cZaDXXG`W zouSTWeJsOMNhg1x#1SL&ir+a3bHzcK0y2|8ZCb7f)>?lhLi56E<0ySQ`pylkNuUG+ z#h;So%!n$+ z=5=nNY7~DHFsB3wsri%*a&5tAix4NPiZwowsn9-iN^wPPss@PFigKm&Y+7T4DUeuG zpW~N`5i$LAPRy-?qNj7cL`iqKa58gYSJit-`k^JM!|-jls^pf|e3q4Z{?5E0mFcp_ zXRP8|+g|wc)iFWwqQM$z&yi|yr$7*29lw+m;?#eG5k)9^c@Y;hGbTtzt{s#v(mFBa zY$eN^_L(d@#YAJ=1gstp68GbmEAYxH5E8LZ6=1;k_CUw^L&8$%9faEVSLRqXzCYum zyEfj1&%PjCGei?}7f&=A(a)?v{22p3vG}Pm$?7N8nEHbw{KT^_w5ROy@u+a^M>~K0 zO+SD83^jiJni)kZ;_EJiu2m2ak`len!t^W|qe)O85GNdo1f6iMyj&jVP~z!3YFm{V z<4E3-uY6XO_7rz^ND7b2f>U4EAZ#D!R)j!%uD2_a`izO(f|PEZw`A;{%q^*QaF;Wb zjV2ZzY22{ZO1dndOTEU^;>0mJv_?@8v4DS#BLu6(acmJ4V`2ygwM2wghkWWkW_03wM%%7P?OU~mWd2uo_S%|?W+IfbERMZ z2v@z6d`1E5IOi)6&FlZ*NrQh03`aj4_M=DOpLukHAN02ewyAk}E2V;2eKzl+BnZa= zhyt~7iLvHxRV1>5*&fnz6xp4femtM|gQ_;r_i;A>$JLS)t-#7dDk90?U}|P zHpxljz2@YKp#poW3DAlDocz!p!9$=@B41x>hJWWPNF}|w=DuB02NV@v|W#zGcu6t5o2Tw9r#fZ+kxe$iCaj`nF* z5Fv!NULE@u(%QIfu1?0E+QLNax@pl2CLkjWU7DSd65vH9#PVtyz}Nt zxdh+}bAdT%ePr|2di{5nt~qn?(K(Vc{Hlwi3Bu6~Ek_$01ilN3nJn;BGKpVi3n}3u zB`|D3&!dkN;^uw$yxoDU#1DzCdbZDgdoJ|8d9AU`;Sc<#$0$R;^pg?x^qxSy*ZW^< z#NxBZpM80r#`=GzC;aLz-yTu-fzA9>^FChx>@kP6`EM~iQ5ImI1vZKcEXql<*RT4k zR_tK;xPnSJR$e!$fNBp5dOsY`U<3zd076b3LT8|2PcIY?AjH||L{ZiEVYHelHj8A} zky33bs2L;)vIGmXu#cpXPnB{&e(Q$Vy&+>#Z^S}^otS^Y=1fT!r2bA~Mc!VE`|%7P zjWh9tX z?*4;WzxML|%sv0uyY;iyKQ;LmM?z{11R}z;c6U0h_1*}qiv2`Lv^gb(+hZ8xJjWtI zP%YR}#E5?hZV~NgR(f8=1R>=f%TY%lm|(P=uTogaj_Ok>CzR^TTNHbYOvjbl-~)4Aa|=vmDD~?Np%(`VtyyY*wbs{m0Lk}GB}lVxH;r%6 zQv2&*XB*=SR_=@&Z0iJY^3FSdlIj^4)z?pdjigDnFA8MX>Q`Eujx3BvZxGXESBM!A)OKnbOjB4QB|V&qiS}hW z8FiaI5{hfxO7||f;We-J9M%PHDbQCzK^A;{dEY(gZf)xQ0AF6-;Ki@+OM_oE)%e5P z)$C!1bvAh)2a4iN zq3B*G)?WDRiB%q;EYF;PS$dx~O8LdpGnK@&2?pQGN1I&;_bQP8#aH) zQs8DU=5fh@itW-xp3Ol@lhCOBK#46G2-bl#WuGnZnY0-9^#tKPMmlerdM~ z?8_+vlIseq_fanOwN{#$_wPO0f9>)9a_-(AbGu*H_h5C?tDn6fA^08(>XzZWDsDkg ziM2Ekh|W2}MmM6PP-84eu~=BA?3{n)H}LUvjn^&@qaoHhMWDTo`4aD0h=BR5c=it$u`P5TVn2=tY*X`?eX zb?}!T5PkQKHuv>)K>qaZWWPC_UZ=N6Tq?{JlBH$UxdX~Qgq3XghfT)^t?^9!#KMYy zCyw9@1&@fxegqpjLz3B8G8t-a&=bE$DO(X%I{cA);SNa7OWHhxGM%l(?Nt{H$r+h2k#cjTS3*@XlcD+kEfmQSnC=2_>0hPRaf5|SIjDD6ABJ#Ha z3PylA^e}!WBA>WWuEVoEXs^X1)$5`;>TO?P9F&#+!?`sAR#LFKpsEpQ^c#dT~Kc+Nb3{)*eL_#V+& zuVs&?u;_9P)TZdyL>g&a?JW-B7u|<_h_9~<&c%X}_Jcf{en#3ac5Y8pup^``1A%vS zHW+S4E1H(C>|(uUaCjk?_7yQ(aG9Mz`bSvmeYcM@;N<$5VC^wbHGUp>6^y3skgIhv zQ6-P;e2?qCxLOqQ+xjMd89a|kBVefSv#L8+e&ro|{r77>hbf$V&4<=;>uv7w4qNnl zbr{=`&C~b;r2o!I`Cs30J6ai^dH;@aXbE=whj%2+d<`HST1ZCuHCGhMQ7+1TjP*HS zE^dopArqNIvb%*)_5Z;U9n(h%K0=dJoZZ(Xgum~ni6mtT7svGYFO-L0^l zsub>}_285LnydQco}(|&$7sK<#rH= z^20sD!jaJdzRi~;2a6~{{Mk$4B#US9Aa3)p-|uH(K0AbeIVcadBLm}D5I^%UGz(+0 zAMd7nHup%2{iMC|QraJi8$eA2^8+NsqQAR-?~{skwcH`USazI=ThD&>YfcVa_Sz0z zuc_-9T-bi4ICaG^bd%d$S#Ly{Q>z?Us5&6bto=0F5s^4a_t}M`@^-L)dZTqTpj4g!vEbP- zqxsxEWkc;Jo9@eCdMU>%^=40{q_B(jema{Vza5sxC15SYv<`k6 z?Ll&XKCPnw#CiK?TtQZN?shstC2CE1zt&6NAF}M*zR^9zKhJ?arp3AvfBdzdZoQ+v z_FO#v+K>2my69tv5Q%b%lc>T?Q5J}U59Z(!5QK=gZ6I^UuB7LsV_|$m53#~Nql_R` zp$jcFT1$jy({sV}2z}ey>O}@Zcy=N^&jta1xrz5X`Go(&eO%u^_rjFGUU9gzQ7B7Z z3ao>O+F^T3F~>ncsqXv-Am0$CdL4K9!KKgBJm=afry5hN9k9Od&k}aKr}ur_{2*r5 z$vKz8ON(F+e`tBpU%FlNFC8!X&)Q!8O7~Jxlc|y}e4AlF>Lir;da`2)0e+qwOFXN8 ztU$DgRUsf^*PMW$p+*IZB)WLw@7==Zqqks9!Hp z^7F3n*u|rzK7)*Nx$qxKM$+cpBjAJ;TL$p`0_Ai z#r@INE04ODqW45!E4}l85obNIy10|EH&3j6T5MKtRty!pPNj0F$fGc~<`kq$E-!mq zS8GgT}Uwwuk8=qXL#78@~kyh=HwdY_nQ?BA~rrEEoXu3bkJSo_S9iz)1X+4zIV zGG;=0-Zc;D!`ng(IcFsQ{CF5wy3U0-pqG+d1-#(Bc*CQ{T^`r<^R&KWagUkrI)Xc0 z6X|oN@>lPo*Q3O_t)m&pYj{Xt=bWn5YZVcCbvz~;X3rxcZ;6tLElM{Sj$=lL9#gqPRk0vdeP!^u zN+0VSs=C@c%~zB(EmIgcXWQ&7%#gs?BOauDF^Ng#1#R++5EG$?V z6E(J>iiW%O6x`x%a_s`WtSuT)`eu5}z4Wif~eRO5yKXY5jjb~>I@5Sw~7o`mpZ z=9i-IVZ1)fr8kl5qPhqVWRAHDqv!E^K3_h}X2-Y9#k`a@)|P8O+>LMNfk%h=XP@v} z#kZZg}umqH_m-6x{u|3tZ8?(=v>#J zie&Vz>PeRI9Aq>lYG^MI!%HUCwo;4IZzfnje4T-Br|nyy3OCqUfxUy!Bi6(LwY4Fd z6XeUOGIUjLlk?}hU(v;@DBs@~ZSC7{dsnpx{GSlPJ*)a;q_&B+s=0xyrL(7cD8%R;V zIkH(eN-q6t8)1Fq6VGRQGN#h5;iEvw(|f%J@5TN;#F(UiJGahEd`qmK=G1}mJw4}h z5WJe~ln?J%C(Z_`@f6vtJ=%aR>}DQ_nrx+GAU{9G8r}%#QNaduLGK^)9NEV&)vuU{ zboq0E`P?m@k++&ypLZ`D`VTp%@ia0{t!;y{p~y)kZn3oaSxDh5XuWXGunc5FDsUZ; zRzRlsnZ^%)5_96H;B{Fu3c0_##n=7)*yq;X%W|$pprWUx;`^9dAs~$p3QBj@6UX>k z>bkMrLhy=Sr$UeOAGwn3&&BGm+~|bsh}hlT-skBUWA9?S*P`sktJ5J&*~#B+e`*X0 zEh}?|q6^09d$5I{oN1{zT}nT3&gE{JqPDL1OcS<$a8$veGEa2-^lY3ON)Zc=;|9&U zD}ouh=ZM{&x#fi97!6PcZUlZFC9MmJzs@pF?J8pNr42ylv5hg_#WK4()7JP969skI z^&^r$=5hm7KL2Z;9iDV&An$Sr9Si6+dhKEY(&dnL)6F41zaJ4SeMOX3<~J3t3q`@G z91S*q-%le>EY}70#mwS^s<+oba6fSJfL=abcCo0ViVX*MNM#!k#e*nk4C}lkS&~RJ zsBse$#bhKJ6Rv*l%EFzvIb*}dB(hk|Y;MQUoH@R)Gc~3%{KS10qxP*p9zdDg>WXoe zKlJB!&+pdQ{QQk2naj%sD0WEIeVYb80$Mw3`8L-OW>5%@^u+bPid$ZY8LYTANL zpb(@I_1Tq;9#?nI{C}EHhIPZVbFJf`ULthuJI4zHV zu^QWS2OCmJ5>~Mrs5V$oNoiWmsBAxyD*A$|biJKD*ceYETQv-df>g3r*gM1L^lb2r z)5l;VMF1;^XbNY#7j|?ID(Ru-XRX^A&Q!bj0GS| zT}R7}@c_O|?4#kD?z!LWR?~jy&rJM(_lMiR>w1(Zyl(zF4XQuR*g+9eDMPv!1JsBk zNj`|7Aw+8vH1CtM<-oihlXUp!98(wtsJkspVsEbxG-E4}m|r!kNYn(O$F^XxF%^D5 zOyQi%W~ttS;0z)#n_zj7cn)rSRqON0Fu2_!Kab=ZT>CH2t1N$vrJlQ4e;@sS)PAvU zxX(?cqJ^W{nZ65@W9WNt`V<*;8!eER^qP}TabEN8`{n_h26FpM2q-eZ`MWSHgqo71 zai9`jJZbU%xfZB%o{C5PHRJ;6y42Pf%ZWm$vJ3F7^K3i1-Z{?ayjxSh)*EDbr8QjP zR}J@83*41-d@dJS_xv)O*wd$f(5P?s36QO--yHG%l7KseSJ9od-F@3QvRybTN)p;q z03Je>J6Wnc2RI%?l%~q!+vn1`ou#RI^np>jClhUrEx|Hg#yAf%)UjIN%V2Nu68${m z?#|~v>R$RCi}pM3zu`^_XvoZ$9 zlvMqm0!cv~)1=BHKit>jSo1Zrlp{DIb= zF-Vm`gPf%@#y-AqkZ+iOqpo30%$Es)JW`k9kOAj@3N}=DcxgV)4#)Q_`5b*5NBLu|arw6~4&@y~*4I^Z{p(g&Kz!g&Z!rB8zBiAC={y1s{}Yd*J_ryuLS zzcggnClQ(-rpG?MfFB!40QVwz>u>n-2ET^i?dC?9CATOnGJbO;%SuO}fF@K$ zkjum4S~)3QtRF$NvFp<*P5BVNMjK1>Z6+*=2%v^w;UDTEBN7V(MV9&CBW{685s93W zjMk^_UW&+G>qH5EODKhXD@x>Xo>Y;mMUJXCs-bp~%Shl%gmmIe2u_v6+W)hgMm3Rz z^c3<)B;}Vw2tsZ-6fW=U)eqR$s&bT-oayu0?)q~*{|u!*@fMRVnUD$l7Zob(Nv@XV6soB4&j zcn9)*q&@~$GL|6alnFk9BA~W>4tD3feSdBzP>gST=bXTO?~u7Nz05QNs-&HmlJxt? zC(!L9)KzhtVwtn`FAd&IJ>=yhz_IrWm zF3~-de$4uR*j4vYu0764SvEQ674p>kv!?5vb!Sl@_m&m*MiEzHYMpAnV3pW$nJmum zJ)J4B9bWZ4hiBua_XFU&UcYL z8;kHdENy<_Ijb-)MqS>~yYba@cTy^dugBvw*ndAy>}iAC%$E!?MA8h2)?Jd!-^nY; z@6ASktjCsYX>^fC#I$z2k0Pe3ptK%86UDq+UpQBjZ&)#H^>gos{J!roR#m0jpL>7e zps{|@#eqMu(e-D)-TEOb)a%!MpY?YM@$5s@oE(z*4DU-YDO|=^D(W|S9f#3=Edo(< z*jcS9yW9p=&Pkxe;gR3^KQw(qoR%MF!U;fsD$L)U;9@!?je?12+K`!hB65O_=)PZy z8m8KiiA*BI0eoY-l_hsUdi{UVPbvon%u?8L@7vvoZEH6axc!o zh-FD)$%f?8j;PFuGaGwJ9&KDD4Iy{{JGOAUxW_VvEqQ_X6(=>p{&SB5jQ06|Kg5eY zF0jh`Tov(c7$0Li6b(S*DWjv-r0e<%`1i_|95sHfuHgIb4#ikWPO3sAOfbd>-@v}K zkW)*om4jO34zXywFDLe?AiNspxX#;Z>@g{rw?A^{Y|%&oYI&RfrJU%W zbLWPY4+VT3%EwtwA&OmST7v5o8d}(&l+W8IlGH1AHUs>64!CU*N0XF)6U9h-JtVKJ z9&2rA)h?)X#@Vj+#iQ>{5^ILIZF?t&NmYpalur-?@cMR$5g$m;x$h$%3bl0(%hlX{ z6Tu#FCt@;DlG+S;c_#8f-Q0OwlJoq*SBW=u1fPNw!L=ZhiaTHRn%VYeJ>8>DsB}rf z7oOtr`Va|yueg{xdp5j(;y7beChoXg-s>jZag#qjF1lvu@AHSAx2w;VyDnl@kkwvX zY-vgC2~a?-(xUNqg`|umRIrcWx@OK*yffe_?4l!ap(0LI*wuVU1(Wf9Y?G?-Y^*(0 zECum76Hp{#Rvtq!c0m+^Nz@qllr}KOP9hGKrRxGSu#d1}#FpTH{c&E-!~`;!+QvxS z&L^ykPPT(R@*4~ZK9Di~3{&$0LF1-C$EjI&*08iYZ<#1|{+U%Ui`D@dZ`e8C7+=K% z(uPo3V_oVU?}DK$SHu|2p*3?au^O*>9K==Q9iLQe8>0}P!DvxGnrSQ$I9KCY2H&t6 z1Jn1N=yg=!u0`N~UkdOq%aWqDlPYNljM-A!1g0-0;!T$=$2P5Ti_+`ibON<^7*1~iE%$@oK>4{N+fR%Tm!7xhBH`AF;fA;6wg zhXVk3Yw4$eHoKz1=CEfY zms|#@aUD`99I|}|B3N8_<^zy2&uW7-`G$>ctVccQ_XQrwMMj%4w3U#InK-;3HnLj z^g5V-8^#ClhuxAC-pt6M!9gN9_n{l@-3m%nhpKQXbs3!`qm z|KYO}XX1sR9uWEup`I+BvzxJtew144qx%|p7ez?oEF;jmBy&%1dlt~Wdwjd%_q1#n zV@c36M$@s2duJrIn9q$@l(0;Da^AEN=T4IUuH)z&mHm1ymY|2_FI@aSAO5P_XpM4z zmWhX*M9sm1Mi27-{8Jn8$GJWi(_Hh1|LJ>>(}6;oee8H$ihM?>Q~H&@}qi+<1a z=)PUm7$l1*aF>|t(D<4j1mv#9&?pwg?r!icL`Y=*R=t6IUQx z<3k2EtV^H*Fo5Xq9b6{@>wtF0kGkuB6CHm2dFP`7^|>kdCA#pDdgT|fbZI1EsdnzU z-r>rDWxL|L=)y14K}e%lH)WNZH1(uv_q=MBT)VGE;2481Xd*IHc>hJ<{oFfq{kwMg zzk9C5{W+9p{_`{4o+DbiJHC&0(cajpy?d@Jp42NJIy_B<_=WixFf!3Si0dDJdI4RW zk%=GOk3Vo5J?Hx8j(nUkU{uPDcdf4>QQtw{yg34MmMBPT;7(F-@7kDQa{HRZuZ8dC z(^J?5qgoXMJF37VN z?V2~wW`<8A^7`nfF8bOC^%-R2seP4CEaAd=5C(_HjaB|NJ=bY)trOPabsv#IZ%Qvm zy>o@K#+2p)ti9wiJh88 z=TCt;woc4b@fJ`vo>XSTr8x=r=-20dcQu6lU`t^%M-HbLUqXo-ngfps_h}U|MoNFV zD^iEB^Uv3MUeZHEO&KKMgmmBaKhKS+zP>GR8^8cnatoIHFspY*Kj&B9LWQ&wA3 zd-l1q_iAE%7XpNTMferYq;5XKRC?^nPoXS`yr7y@71*-Boqo?s>vB_fV8iQd2}Sne@qtRteF31Xe7Dq;#VPvBgOI)voy;7Y`JtY4f0H5H5Y`e*<6wLknhe`^?b zXBt(Bllr@HVqcT_V$VD~J^Nb!OPL1;@kLX&Dj{e#{Sg2`L*}U zpZ>-VxB1WWL$RZO)#nUq71A?UgXrA8@-*KjJJjy%)?*|vX0{m3>JNu7YJ>QQr2i7q z+U?gXr$jN^S;PPSv#&2Z+%ph(4A=x}v7OAApu!4&!zvG4Q4=>An_gl?&h8Ahki-Qi z?*83;pw;vp(YzDZOz6d$(W>?#skbpPH#R9+0jBmE%94gdQm^ChXVf*rJ^=DyLFeoE zf1e!1`?jbz1fz0QXgtPoo4GPc^^-y zTh8-m!`!44f<+<*Sv<0;9eNe^{D=EO7J>ad4wK5PveZn-YCM5i8l5|jBOC7|TRN|{D6J%~O`^<*h{$C%l$6RPKQoWKMpJ@%@v3CDC_TJPCeuNV^_4GNwwBzn^tz_uqrT92mw?h0k!P zfuNoW8Fx60y?D?b0z1Cw>PooQ^6KxgC7Jleb;WpntmEED7Tx{+J)%a7=rn)a_vBxs zvS_;FuTC;{A;ox_GyR#}#Kq^cMaw~d%&)dWEKKB~#v&QzuRLi|0&2-Gkq+wY@4*Vb z8#@0P@9*1q-|~llJKlIl|KwEIE1E;YcWzbh`89m0l|{E=JTcmEpsK&tM?J3Hs@}O( zG-V`OffN^h5rv!n+jm%DuHaN-2ZhGG-|Kun2HwTf+0S^RFM_T+c=tQ$($!FZu&2?* z5Cw~AxhIuaV9guHoXVxO5oO`R`e_42pr5z06*U4$m76b2kFM3(r>SebW+4* zUZPiroMB$Oc~)5;q-snmy}?l{zYh1~)XsPrUZ0?>#!W0+5{P2%57@ul1A4#e zJSMUi0@uC7@z)VWUn4`Gt!vzAVe4GMSJ4hCA^+`~Y9Rr&deNSSAnCU#Y(_U*SXyNI zZRNJ~2YMfG`+Y2K`Bk-WnxLp^k)YJdA8H`denxhH zo-3U0IO3^PsJ*`p^O>rD8he7?rSFkbX?mEq=f+C{9|au?SYZ7ikoUZukseP)M1J$k zJH(g#$gcN1<=AP|_iJw$`9w+L3$_7V2j8QO@dYlE5rfpWjE(d_jP-LHuUN0f7{Ip~ zjOn8+;$2ZAquGJx$f4p`Z{zFlv1iOM{zZL?t>53{fi1dwJU2#vAN})};N(B~r>-Xo ztS7SDz3ivr)b|^(NAajV!I9Uy@A=UhABW#Nrazdf2=O}IoT45=f2K_p$6m$%eEwd? z;yXE1Zg70F6CmrI^^JNg*bnCyKg#+$=X*J6-i0+an&u0&GAJngdYpktM~cS`3gTOb zNu0Z!h{9nq;5)&89UIp&FwgL0NRz|KmCSdbIY+lz->%%jeUEOIAoLiXBN^!$lz)kc zjc1J{neo;3>%7!FkpFf){C^qCWkv<+z~#h~%p)osJbsTPsW^J1%E9!pA!qvH&ROxk zs$&MuL%2g;1QtE{p4aE7y8{O=_4Dq{md0SWvo55o*X?ai;xt_9+~o`zIo6ky4_Wi7O=s$jc z?h`jDA0mfJ=FQcZPAaj1&n6>8ec90N+zLgd+rUZ0D;2`Fi?2sQlyFsQ?Uae0xUUY$ z5$hB=q&WQkjGMsiGZrDO@F0D>eTI=bFyhJbd*o(+L@G)jj}_awez|K;dOn<~5sBad zT8#H~pK1}4&c9RZ^xyKN?tHlCU4QC7{hkl8>+tF5p@e)&>|cm^{WO|^EIm-p1%z(^ z>M0&kKduz1CW`;5^{rA_#g{&Y;&4|qH z56P&1f&H0^+e1Jf+H%K)auF{xJAZCzI*c;x$3QVXzP*=-{~=J2*V*TQUoEus5ZyUB zKI1v5Bv`;AtrG*Ck6f;Chdn;7@4w!9;5XeY_QPR{x?1Cv>pR?$+Cv%h1N1?l>$iR^ zjOR%|773BnI~C1|F{$r@b*)-aZKhu!Y$jKKdw+)G>&~f=0h8h~yD$m|bGpw<`XOHj z7wMcp0-KT=dHw*OUbnzLU}ocDtD=Tojs8DgrBEi(aAo4(7gi^ zU8dQ1ECw9L$QfC-@@~V@_oB z?fGdAxz9ir!e;RkoQ8vpwcUO4iE&f3N4@-o?hA1C*LgUBYt-qFRDWo%{pjawtn*!m zkT_5_K@C$`job?CPy zdbX>vN2CAAJme3LGWRvN4Yh-2r@@D+CsgrSjdA9>q1)w{A68=sh zM)el%5;lI>rzBt%`snJ?tiI&*>z{e{~`wBlbLM>^c+R*zL4I&DEv!Pu(Y)Cld0Ao-;Tw(<=KN7fQ($P zYFO%_f=2A!xaAJN z`MPA~!I#nhVXXcKF^)3-(=qb@U2HS|UyluTb*=`ea0Bl(cpWeQ(BFeMY)Ad8&x&W= zoOF1OZVYenBK?IS-Q`E07AIeZ8^k3Ui^8K)PN2b@OocF?!Q%7$bQ0`IN){Tq6;g=k zPGUWK`?8kWCy6XK4WY|_$HCd+gns0ny%{qCsGI%zWs4GVkw@neCt>z^jexnaD&vt7 zt|JRJ!dkK_h<{k_CXbNV?s2aV3~L%V&yI60Sbx6n zeEKssb6L))#jaj=6tCKWJvv8;(}dGyTb=7c;W)OGZ}x_kfSg!g^| z(12}eoOf4C$dA7;I#?U7dn_FG&?lhEA3s4==Dok%sq?J7;p;n$&Zm?UTZy^xbvVg$ zTGz4h*~!Oa_g?hwV4Fq@{;4w7aZBHuwp zF)QQmAt}%Y3H85X?_mig=rQ;T>n~x`g9^0+wqz4&a#G)IEBMG)cpOAQ!cmg@{oCv&^Sg_hJeOsxHhjebKDZ>(R zIN0Cu|KuKjb@Qt6uy=iY9UFK4Sohq7Xl}IKpLI80%#Uv0^&fg1ckKiCD01X`P`aLt z+0gbCYCDRPFdx+{Ejh?KVp4vmqn9bMLF{sm zhxiq1c6-)V(fmi-jbko-^E~bF2=AqD3%-n#;(daUu;JU~wpQ2H?6^cfBE0j2zgdW4 ze@vK7L`@0EkSE2ZfX1{{&K9$oa8!}mh<}oQ#Uu_zygcTNjIAjZ?7T|PNs09X^hS{- zh3eJ6fbfv_19JZSDcNLVv4ny^9Pcixnt?Bb09E_Uu15=s_dQ~fr0rAk9>}TwJoJ3! zNOt*e_UF1p&(B*d-DxD>{PK3U^KWeT=Y5aUxnpP++Dq>zcE>zY`^rZ9pEbC5#)*D^ zPt^Z>HK}d!PvkRzlEQ}K@CajvkVz`eMWsF6kk_EfX3C=_jS5Pj@oedN(|jG%svx#% z(RYl(ncJ6AGNP1#1tTQ=TyiQxDv<|}S54M13N`a*(JI?<)MaFpuyJVSJ0i!VLVsgD zw&7rdczh)Zbak)1&r@n(wcf|h{#Mt2DsDJLTt^1n_Tn9nfW87)Yq|H%_UH$$xtDjY zX8Cs>xremJYg9LHlJdfiaSWkxqCrBmeRKse~z{c7HyWm%SgiJEPg zS|nAKZza~e9a!Lb2)h_G?s#vZrUrTh63r`eCK+H2!wjf!$O5=$%VP{O+<>S~S@p); z{d(n~PWdMP$^QiAe*%`G!@2Zl-@_kRy(<4(_Zi3a-;SZb?=c=7m!ZRsYi+Ze=5(|t zLf2`o{hikR`$0^Ot7sV!i=^6rI}CwRg+mTjVW*y?d|t^UjbZTa-# zoyTNxu%`CBIu+;g;>uMEm==-Zjep^pgHr&o`|acGD95qn26TzOVWN zOWA2xo%};um5OEFrM$R)JGGaCh6(27b1MeHf(&~+iKusPSw` zH3n7G*n=aqA#2BY%4|`7hYB$Y(gl@oJ;i+NmxmX`=qau}Vp1SfboE3>1VcC}SSlLd z`sVku@vcYQ)y*c?25|v*&$r%_{pJsJ@+;jJ`ipn0YmV2xA9?eC558lXay3|<#QJbd zB8McC)e;5u#W+5xf2DE8|2uuNfA}}&!x8BA2ahT8^W|F@U!u~XX>d~J6q06-wGluM zAi>w($+_6V7z{4u-p0C0%FE58is6VFxGdNX%_6DRYboW8m+DZk%3^N`Rz+fZWTmYN z?LADlW^0S`6ibbNy+b^=%k}uTF}bvjEnA=LN(sFleQp>FT;HzJr+9`~FMS>-tmn5C zCm2S|2A|o=co_o!U|_s_DdZOM&<3HkC~d(05)UEKwNVgLV*zb*`?+b?N!l{XnGW+XDp)$13xuZiVRR&U46U^QA-=OAcbdk{oRv)@gb3qR3NG33+oNJxZ&ad znRoX~lQ@g^eQp0wjzo6HKL7BXcmB_vwTNladBkIkF4-!~%7(hj$*js5A7?cAqk zy=Y`YKqSe2^SoJ|2_xrZd3tX}0%ew38@GxkLDr&$oIj&xO}3N-~e+Ds1oc}mNV z3Gg8kY%1!1&mrnV73N{el<=J=Uv_`5pN-eAb~9h5PO>H+-r<;6`9cWfJL1o6B3CN> zxwu!yfl@1*Ew{l{jsn_RQ^Y<0J&wG%_5FVHz5hXJ^9WI2;PH2VPT65wyE%oV`HU};FYBkdBhcV< z8LiGm9j3`7*+u)Q;(-H}4=O@qUK_o)A&E++Ib{Wb5As$x!MBUmz~RGL!P- zBB4Z|oT>Uh?|>PNkItIPi9)PT`Gj; zpe_~SY|j8DFysLcU9W@aQk+;4J!tH~nYl#eHcMH8XCo6)_urHy3CfaVzy351u)k-t zY=(G<)EIpS)6o~y--D475#$X?B?@V6UtR^CpUx*F*TEC8()h{-Uy7}buZVct(O&z1 zt!sVDFoh2Wwm`{-9Keo_zSh&ZpQjG!=fUT4P<9l(q3KpNHpuUR609~}bDA0tL)4N< zr4vwr=BC3>B5DG2lX%lnZG5!1-|PJGdjWrMiToAB|6KiDnP_c}q*yRfcyM70$O_c& zNn!;=b8r-Lwza0u3B43}w*Gl8QF01@<#cp`d(004sskt;qlp6W0R>=AyMv&EtxQCLB5IRL)o^{ zIM6xbK!*ykDetT9d$FH^uNbUQZ;N(3J3LR3Gl^&Of?dyl4^KAOfZD_J*q4Z=7rhe zuvili-j7jmWbimJmJwMdxQ<60a`f45eF}}YM}KLI;m_b1pW_LLmzV9FpO?61$aw2t zO6+smu+Oje^{wBV_?@2WZ5^Qgj9v#by$3Jl5H;uZpRc*ee>fnw_3+1dIoTgNPhRga z*sl+Z>blHZBS!Rpki)#z=fXjjp)E_Y+zY)oAe9Z1ErqNYQouThyVU#nVfaw0qUVgT zi%&kc#RHFxrnGrwN-o_OTgWD-T<@nL>h?VjUy8kR!aeVu$0uLuHA3SoqTU%|LNu*GDGAhMjw837w5??~I(}Ek)KcGH;xuY@L>Afe_bCXNTC}-L~ zp(^Po`as`%wWb3*{R}a=NNUK>@pjr$S6ZaQ6gDw>>7h59Z9E#VzJ5a1j@F9xc)WW) zHU+c@Bjr(_lUELRy%pq1pnhA*g|4qXbNTA5#Uo}J0_14lT5Ck zkFfoH_JfV{wZ{7FNuh+t3NIV5{|Dps2Zv*QpbB|^zJkV3Pi{SBklVrYNAZSU%!9}4 z;1zCIOE#Gx=VF%&iUFn`iDjm;tX~-z2YAqTipjf&{_2C2xjMnFt|t@p0$@$lQ<&Z3$lskKvYJkAc2uJN{t zW548cjT-%^@g{S7?OKQlA>uebtZS8n*bXKbF|PBet-^?kQ8Sjcz_^}n?GsTFfqV*~22lc$~`*T8#IA^%Umpy*@&cVEC;=muhQ;qF@M2_1~^Wvu!1JDp2>It0!l$KHsP-bvSgdL5k?i<*)z0LCc2we0jJ zwb%3fH&;#TKBc{u{r(rO^LI?u+}7dJM4ak6>! zwt}9MUO#QrRo4QKUNTM{B2s-H`4LdzKfuNWAK>(ZAL(^Le;G1hlbLJ zRuhRN%>Zh;mfi=@0s`!hi@%?L^&VH;G|ztXk@cTJ=h-D)OBs{Yk5}&D9qTPx_v~Ta ztX%A!>a)RK9S=>Ga_l`c{*}yJ82=tuO62mia;p=O{pW9UVmw`7CQsq8z>(Xn5f1PEe_f9t*c7c4PqP+Fi&jj4gGA+g}ZmL(^`8$1f`9(?qFsFaA6qlc$m zaZtgUN-Kcvb8CEH%_$-j1AWP!K+RF%a0{46maFSOnTWPkP!el$ON zKDg}se(xO9tRMivJ8~F)o`Vxxl$^*(et>tub9Ep>dN%fIvz$z1@IjT1mAvLXtsCsE zVvHdJ*}$>%9gdjN2s!$HAPSNRBoAm_V2!*$6W~MVRlxEeeSkX;?iU~DUVHuF-uLxb z_~Eitz6?_v&!PSJlM*qbHenet--2_&C#E^Efz#>5X z39fJK1KsPT*PABhFU?u0eO$ZR7wxeIhiE}mY6;Z95!Pe9V>7opKMJjjfnJ~7wui;@ z?5t>5MZ}w!?@V$(YE5O6D2202cFj9{Ec%34iu~)>os2GzRH8YT~ReJ-Ly8CpDfn0rMsVNRWcptYA7g zZ8kRaeMqER36{UkfV=00Pap368lA&~c!x!@PB7=U>*702^BE|{ZNOfEH6nx^B7wM& z9+&kZ>8Ic&`f}x>(;5HKu(<25-Sk-dJ@v18X!?6_tRH{lxo%o^{hsV%P5haMXnI_l z-f~%I=m&FuJPLmD>-*UKi=J0}ph@VQWh2gP5?{(J{kEp|JFUo73fMK0JiF3Xoa^4= z>1FM*VzDSV*Kv!gwJy=W?0GE#dtxmo);2VYm`L4=LXEcA&(Np7yy@}KLiuyleM-=m zcgIh0CWhU(+_km(^R&bH#nAB`?q^q(&-%|j{LKkL2-uCpqi%}L{-7Z6Rx z$4y?P6=kpFwkBk=^&Xn%ty%7fbTb!quEZwShbzNI>;$Osj zv{%<4t=(%`PwhM>4$HrJ%1`Mo|03l#x8?KaZU9|CqQ7Uw{9-)?u>1TF*5uu4TY51+ z>E#l>EBI?Xe!b|htk^|W+1*7u-g#AYxnaN0H1=v3 z3xA69h~AstW%iQP&n~Z;ZVzBiF8HJx&Wz;3T@afU>KPJqRwe>#_KlD0;x-;|D5lr* z_*_ZUN+|2#%3h*f@kPd0-xiP4t9-Mwyg7t&5nfLWQ? zs9lQdIiLdL7_Im0z<&7dB~3^=A8E%v5yb1Ph-#mg6>3s&LX%?pIg&wqiJmDiQp|a5 zk2MG3e}gCO$q6qPTRxxnSD1vE+2u4fBRsm>&ArNu+aa6CqMcZadNJAoj@}_ z2%pH3_-}NIaisA%8dy-IpakL5O6{g2NqlP)?8>dTs~eDPVi#{L(fk<9<2x?Q&MP!d z{+r4{Kpv4#cA+t ze~bp&+d%P}2w?;I$YMPSwLWr;%W|J?M<2$wMYYG=PB;W2+m?lGY*Ewg|FQS3&50{Z zo9JJem`^j|Gl96+#`}#Eak5G!QMy4%2rRyw0AZJez+eO}#Kipfi7bJ@-MxCRwf27J zd^sH*VTmkBb;-JAWj+rhA=*<#;k1rle-ACGx#*|^F$3ifi5$JO6gMG~f+}IHdx)jp zV??mM)>+ZxM2;RFThD96f&BNRsn2Oz&jxD=-orTIK$7s{w`c^F=z8*MgSw9~5R5C< z^`&4;{1vP#TjqUAljOE-sZ0=9xRnpN)crihH$-8!+* z_;fnvK#(Rz8F3ZxtpAj-#=eGieQtcd3w=Jn)plIuSbd;^J#4})GYNNEOT=#UalPw< z{CMd>pL!V2oT5$J-~{*wLBI7@H-gNkRQL~HBbB$lR){5GE`j?LgLz8pe|adklt4df z3(gqE_^4OjN{}N2IS}OFAPdBx!q_2~h|z?}o&{mX6VYDBz^poc`|$xjxw(`Wzs)qy zDZ7(X_<=R|ec*!g~665rQf9lufUy^5>-&Y|HL?Fx`@nI4mti9?|b;61*Ui&_w<9_y<@hhpOFYP!uB*{CISpe+*wmX|>kHUxh(0vi3jw*B!2DxaN}pZ`R<>a9F?rCPksp*yw>g#EWU}xmfAr;Wz+4J)`bD|TRL=sz zZ8Oh4e8k|OSqSO#$W-0r$M{4%u_lrgknlM{JkY`?7W(Y!_y>PM`~ljad>9g>?Bj*@Ss)GK^6?S1}iHOeOm4`2}aaPs=9n3UNJu@&O(!k&}{YKEx*O zGY1>-l{C({fA)d;+1LDAtj#x_)qKktzxrFpfWFeJZ}GXez4b|4_sWBxVny3x_#XGi z9*IqJA&fg;^uV#~M(n?{1{h~ zUipd}Udo#1s^K{^zGE@sxy4<4MIAGLjR*SE@u8l22}h7bHpq^Ee#a9%#}cfEfs$S& zCO%=ie*kfM)uXse*ER|I_7>bUWjYY$mqTK8iC>`|`6nIQCUQHSWsqd(?~NFWlFe6sMWfUwDhbB4zpCR(pH@v=snz=vai#1( zu?_xA+^M53u|5ekXJDR_gDHy%j4_zMsnaf=eRF@)|| z(ti?@`V!`KOcltoXHJ8$CD?>ai^`Y9hl#QBae3xp$;VUU+BUg$=5r7j6CHl~=cauy zh6sMB_bu3cjCZ_?UBr6j7hlkOyOKY>_yOGJ`|sG3R~~>o7~i;;`HkbaeX*xpIe&4S ze{276mT|QubOY&jEDXxgaabe})n#5Vf}n539~8J_63#v2*m@PotMefxQTx>i zRLZpCe(#m02}mhMwvnjlKefF+VSo#Ie|9}fTMn{e;@O z&TF2MYpmSrdkmvKd%~EJ#t;8`E^)rP^ zHRD#>pFkK}^D%G6ER40YRTeD7wQc#UpKXc8w6?eBTl=anj(!X5tBEUf0iJ3+kvMlK zx4&b9rbg|1jaVx<{$YJ9OIW|pYn4e(N49~7k=4>p)h>W^O%yfpOF%0Arq1iUFWM5EOP{{ggh}6fl z+J3?keR3{-Cuff9aE&`Bq{d$u52WArS9%`QdJQKrZPqRt=i*XdxZckFe_D4lhVje~ zfaBCTv7B6duR`u;qGFzpVI141P9wK!4< z@qnJlJ~+<;JY#1wC;DHV~Lw}yDlx{w)&yMpVK zHP6NBI!^lJ9(x7X2g$9D^!oZ6w?zB0&d_Ug#Ow}MeR{(W{z2DN!!z&^=(xvJ$JuIK zI~K$yhH)w}9x-u9`8BlB*9e$JRKZPBSf=!mR1|U9<6#jG$jJx-e`&R(LM)a6`KpQv z_omFKh78x3GR~3}Yn!ATkk#7SStZ#xa%!!5E$h5tf;|lN`HDne%Y=#y)|TQ!3ThZa ze?!s7`HBG|*_avq)mkz#JolKX3_5M^&N*<4!G0uUBJT9|X!E{|PJT3?BzO@PkI$$bv;5meS&*X|Mf59$cd5aIFSKdUTkncRXj)ls);eyuK-m<}4sriC_#ACtuhkE`D zgRwG45;@fHtiNZXJsPpAKx2KU;Yf1ao4sm$VLdO$8z@rSd=-aa6BF#Cn;qEXWO#wU zKs$>~#lkyhg^l>I7#m4J&CrL3aA3&CXPG;TkYsN{x6pBNe~tccr{Qi^3_|0HB+K;U zj5eM4eRw(@ok|uE!N@$bm-(YKxhe+q{h30R%zWU1sRAb?EE;D|f1%yV=VcxjbM&(h z?WI-m1rvr%bgY#$Y3|sbvWDZw9-*zL$cwi#j8Emt9}d7U7E>dfQdL?f;|y_)6CWt@ zgy+<65)iASe|x`Yzu^X>z3b-~{IMtk78OsEjda*5ITx%qU=n@7q;w}Qj}CRLWfZ$_ zXoERDa<51o2-#_^dKN=FYsQ}3N#9{p$6>POm0RJy<8@o({$Fr2?r;HcJ#@Z{>%3ss zm!>63$F7__n@+}~Dz%UdkICLXD~vZ@iMM#i$y7>Duk&_RrHlb3YJ;}D&?A=tc z#q1v1U3sg`%y*oYNeP}p5ON^5bHyLNeaC5rd`0?v4EK^XX4=(94eccw^TDruJT7$` z5A=8xeTy5p_`&J-e_Yog&ARZU&u!0j%*l=K73NaJg|%MikZ?LAllU>7#HXo@d|9qC zcLamCf5r2<#K7g%G8w}0VO=cacXQNv9-j+piqQ5%5+Tl&E}!fTNtPX5Q=Ck5+ojZa zn8uTd4>Dp(#}g6)d}xZ4kPot6F0ILEa(J@b zlZ;!%QXMyv(Ec$Tz{BU$B53(8JjIClsN#u{hFHg=k#6zmYFg{O65+$}x zd_vX}W=T$nOj_Ys?Cjj|F(#0}4C1wI_Ex9~ZpVZ+r~`+b)FRxs<}AD*D(- z#)yV3jJ zwt5DgkwE{=XtPtgJXNVV`M*Df5+sL2+pn?k{Zf*)PI1s!gd+o>az6q>Uq;Yt!dtJ1p zZTyjpEg;fx2ySguzkm;1LG@|xe~rgf;CblA&6Tp?{dw0qqV2obLCZV$Td1Q@+W}wD z$DA+Fzw+1q;FDkaFuUSce#)2n5zY&GcAKK%&6rr|_oH=j8JlRQ=;6^A#z5Z`!Ma>U(l#)0Uv52@HnP_8%`$#MkUilAMJY^^>hF(eT5rAQco z?zuB?Iv@tSCo$MTiS|g(f2@>r2*Q8%&&cIVp&a={X&=K0ymlWF8J!P`kp*zF_+C8%Q zkUQ<)HF)en6lMnQ<#xv#VWIdbWFs2d@!XBAgCDVILq3zr;`VT*YM-l($avpibc)gvgTNFqR%fh+qy5y2pozGY=9C9;>hI@aGOsbb+kc>oga zm}NX2btZzPp$Wa1e?riI(|mqvWRc_vF*~RbWOxwsx*b}daKi23jYk{x-ju#>zbmhS zbxK2WS*z$ZYcRPHzgOy*ns&oKqx+4dk)OZtpPQvtFtK`kC)q1P-N_Jso#t0o_Ee@~^)h*X}RO!7g7n5V_M z0FE)vR7v!W7up|yfOR;Zn8SBvOa+n%6c&g>lqiF-ak5&i5ksC^pniUatsetZCZt60 zwT&MT!a5FrM89=PL_P1`^*fw*FPKf&7;*HU)Lce-Ze)@@P}MizIm4I^TfQn2eF^G* zCz$!}(-Ydsf6I}F7&b%{gEK-9OHNf^4fNji6qEO>pTT3Fy2?MG%PLb)-S7|68k>83 z@3&w36R_Iv>9RlI*Lf8&Tx(rjrNop29oN#zmH6hzPkh^a6O@RAQLJO(6{biWqAa?o zCY>?Cxz1lub%`C>Cyh^kYr-8o`|T*ipTa)4wOe`Ye`>#*WWO7arqY#~r<&f!#SB2> z#{XFyOMUK5NLr5_m+=*Lg{^&ml@+xP4@Jt6L=LfMoA@5YR-)dh(}K*m5wV`B*aDdl z+-F?JV))qp39R*A&ZNyH`kn}35+SZQm6!Oam(B%`ltF*g5niFo!-M-okub6f6px(uhH)3d$`kb(7L zd#3*)R;TR**K4vwY#!TP`Sg8U8g3y46^uRzjHxNit{g0IZOo*`W3(yS7T%s;*5q;U z8cONqqVI?PfMz~7<4KC#H4ZH@Mn=J)ST1b}N5@nrrc#$F8H5v+Kx`Cl#01mv$L|kq zf80-hZE-!{MciEN*FP5Lx}GmSLsvbC1pKGfb9*~LEQuk`3u^Gtdw*V#ka-RaK3ngM zPRYNwj@#-Nc$XqUyvvuv>EBYnR~(!Yf8YMh`DVyH8uhOC2* z-v$&)i#NOq4Q|wH!;kd4aVqowM&10Gb4m7>>*TlNTeSUab%B1yTh@$w@-oj`*k{7V zhwK~fD_h<7uk`zw$T<1NqxRk3LF1VHrvHA!OG6*$`ixi4>7r_4Xz&b$-1>kzX2^-g zu*LW(F*vr|?OWJ3kV`Cr&%~s7e-1P^(5zrt=Wl(SQj*wGC8iUl-^K?5#UsnN`w8NR z3F++7?keL~JszA4e7_`H--aswxQr3^?f!(gB~tt|JYcB4$1vT;`^B9r-q0`F^%Z&D zelre##~#XJyFQoHXV=DU+}C^dzcOy;VJL)eB@2X~C9)itaot|-D!jETe@jM;78}z{ z=_n?5V|T!=`oc1{0x4Z0PYwv*^(NtU%pU|RS?h`^J|hAS2=A4sVhj&`Q}@r;-4ld7 zAo3h_-_Lsd*6k(T4XCX{VRJx?G7^4oQ;>3cZL^Wd#zbw$6x!A+2xl1bPq&Tu1$Vkq z;r!q)@%H2<{9UTH{^K=uY7HbatMW_ zL`aVc@8w1D0mw3*+AsYGKYAo!_{$0%97*8kmpMm77fe=e?<7{IBv7bNG4FZzvJ zDT}ZBujv&Vokacx`>g0X(f9fOE}xXC1dr{;L~Kn0zIEHPAcwIOR$iQA>%khoYhR_^ z#jj{U$tAzfGJ+&G0kO6b6aBh>5vbp($4+wNjr;4CihtydI)I)0&`#g*uJ6~%YbU?) zf1r)Tsn6WVe}zA>=AAXu>=KKv$81yaSO3&Ab=!W!%52sJEy#Tw7iI)w`-Sm7D~KXW zemGCPPg5A23Ey#T@8NN!jus#eaU6SNMeGgVqOPCFzs!wv{3Wh<7=QE0rFBdtzOBVt zr#@B1YiqDi>(-Af1Zs?m#&_9-bbR%9_D0jLMh^?h{&8fAC?Hu1HXRB-!8PzfI_i@;!25+xHxK z*agA$jK7gvCZuZ@yd0k}ju88s!4O}L75!3VqnGuKBl>8GZOz|&7R!NDQ;z}qs^DJt z!t1;UzVqa2KHNX%$!+XBy?WhC6&k7h@BRgv!`Zm<;@Se^zeUPCl(b|Y3L4mN5Dfj)Q4%Y zW!UcvUJfGzISHoP4?J^59^>;l{ZG{eisQrquHyvFemM8l=Sn;i=U>c|9~WHy_78j# z(FI?iIrrvze80p2R6n-sW#4nYVQ&1~f9~J-^{zg+7z5n+o&+Yp$0z!@VBh`Q#*Qh2 z#ICYvAo&ZCu`nsiXx`5oeJRR5&655Q4~=A;j!plYPMG?jzWj(!{gD|8?H)=9qgJk5 zFA|kg_3Qx%HyjJs8kbKb^PGv}8I~VzBf5gz1 zjW}i)YblawT%9OuRfmgCaC(j>?~qtj7M3WO#ZI+u2rI^UsyC>F%T|bw+~`a%lF|#p zcxmTJY62%rCHpDFW8~-_M5vpVNflqwT)Ynnuc*3k%>##^^Q^1<-u&u+;$Lt4xUaYadR##a zy$0u_8JFrcgt}pw)MsbQEd|4OKb6f+cwQ7&!K+%Cpax9&o_pRGyD_t=ELpH#()I-P zNzjoVKeRq<zo(1B&) zwEcHe88Q0SA`c&ANl9~~xQ&qEnU7}z&9#or!_TAw}iuDj^Oq)#wDPfVx~ zs1L~H{sxJ-5N?J1+ls93{Q)yWfNSJ!}?Y zD(eu86Gk-88KZ8ri(+MrPTOd+lg6mE6JI05z*1-n=R0@pj7`z9t+kx)K9d06Ub-I= z_Z%3T$29JUWw!+-f90H-FHN0Z_&i)`8LzF(56HRKLcSlgKM@mD&69w82+fz!2^FVQ zNgPL%uQ4x-!G$aLvX6V>Xg_0mKb`)odKfl)=wIU1e%r(Sh|RTfnr|e(=S=vm{`SdY zj9ib2NueZ=7oYLk56u3YVLh9zr`S99%%9pRnJs2J;z>cD!AXAe8Zx;_xoJMZ5?}i!_30T*rNU6Boy5=81+obOQckA}^6_+W^FKvrs0kNgO0cjjC-p8dA+j>ric9;}Y2Bt9m zHv&oQXMGQ(FDwvZdQLYi6e-9pIa-fPJ4`oJ%!oMkCj%L0k&q_K63euK?8nBK7N>x! z*s@RQ(d-sBJ#Bp|{L(D~!-N9IM$;7W0DV==e>~}qC~FT*W)W%iMy3(intxp0G|%~1N9gEszp ze%}o!hX@#)?)+8|rzbzZDCi@Gu8$Zhpjg15_0X5szrKy<#qjlf(wMg5`Jpd*e~xp0 ze~f3U>HsJ1f{JZ7R|8wV%{5PYrS=3$7b>IFn9)0&yT94}UJ;gT}V(GlkZ={|9ahY#_?`YFp#kCkV?(A@wL? z3TOTcGoATBvXLpm)?3s&?vms~<8yviUv{>)x3p&wxwX$r_@JT2tql;XAd*&}K#4Zu zei*?dvHCi0$6h;YH`;!!CqC1D6XPu%f&F|$d>F;Ux3j&6FZ&DPbfkGP&uO<@f5FU^ zJ!*?OqjCgbb*r^g8}(87@4lnW{%j`u|9*gV!1I1 z*lX#YVS;0O%uV(B&|Mu}SYPq3F^($1II1Zgl3^@8AwTjE`e#eq|MfZu+wIOaonE>4 zuKVA1ZIc$4IJUTneRE77W2P>+e=G@ckiX_U+Yn`q>ACHILSH=)ip3Td5_RTj zZaUSxJ#Cjv&v`FZ$1^1*83b;Xb;-pyE~d%Hzk|>Ebz+c7RPIe72ShsC$>wP5SON$( z39v`3Q>fz2>txI3LJ5Lc-G_vH$iy~98ng~=dk+@+`a{%uj&%J>GST&Gf2Nh@jmv+G zE&ay(h%sax|9r)s>YEn1tV8Rwa@-5NHazS(asxK_9R^#8CO*g+U-;QOTXGaJ_2u8= zU0(@aO|4wvJ^Omv07BO4XL~eTw=+=j2t?TqT>1|4(-c(j2!D@zU5C55`8wA3?(}^N z%r&K?5Hsdc!z4lq!V3ESfBNW0su_1WIPRp9JYW1s@8kI}_Mm$gM}l=*uiNv>Sxglh z-~1F#!nQO%c=AopzqP|j*yQ}a3F$Q;?<*6R4_HKV}f0uLVNzDNt7&LV> zR<|&?`Sl`Uyn5(%yASCLVxejbUz`gGOt!5{++Xe6AT1Uu%!eFQ(we_PuO741un&?c z#EimqDkl$eEBMn;`8I=-0dMF(dgX&Z;hquW^E{%Q%06#H2E8GC0fkUd2rE8RH4e@Z z+wrVmuE4BwQPIBc-~vtR)zM8DObo1$OXxY83?9aMy-rDrCr~0;U{_*;oJ>!C%+MH*PFC3ede~AN_ z#oJe(uv7oT&+>(-I`5x^5z@Af8vmzIT^<^X`4n=lghet=f+*e)C`Yqtu$~DcK%~a) zlL*X(t$5BYBI1DwtbLHZp%GeyIiCwtbhf**uO^f2e|*Z*kch$PpL0AWMu4#z8Mk1K z+}UM79t*U0d~26r98<)-Cg8-_KK4|i4EbqDWb_CTFAtM2t?_F2664kTU>ETI4X-wR zz2ZY$>N>u0j_J0!*^D$s6pn5F^cOstxAUwQ+OC za^<#Df3vZ+E4AT)O8#n#)CQT3hcaz38Cz_d&1_P{;o|sgiY_}*oMnR#ChiVqfJSleVeat@ln?I1zil;N+6NoGmAcxoNRg=$C1l3^}(i zc0c(+RqTXnXW^O^Okbp%I%csczRs>I^I|y z#_aOKm0CKmY`uX@LIxK*%0&5wFR)*QK~ZQsOEivMjDExU?kMYhS_*%RKiss}5B=qb zvF?X)4ta-eHmH%pv(ig{`oW%Mnf~?6s>axE;%t7jS-H;iv@LAXe^)*mnd#mgJ$!s3 zf9K}lB*xegD5!{yWsM^fp}%G!NOrJ7VIWkH9CQw)DWA9O6k8f6J!As1xD}j#W&84$ z_qMA^*BFr)=b-e$6I9!w{4rRF0ZXFt^NJrsl|OORBfa(|?f-Dv4!m=06M^Gj@d=Wd zc6iE18AR*^4>xYs+J1#`c|_Iz*{^l`e;|%+6|DF_9s8`a|C0}L({8_*ufFlr*0`K? zf4kU(C$4pd(0_N%JlUwL&2SkmB#zlTKd9K!IvXdC8dhx!x|JON`=F#4 z)Qgl&Fg`=FoX0!Y6c2`AFz2U3nxD7@lifeJ+e+5H@S7%6$8zH;_rd!(OC3?Yvqe^&2cu*eg`L$PFW&Z9-yRt8`Kyj$ADkK~;A1@%z_s0KNWs{W)BgPp{^FKkUfo)>+9Be}n$<+-{BD;K5!! z4o185p;|pxqf@{2YCe>L1duYb5aEP`g>8#;Nv(pn#K$B3VqPNJJ9A@w->SC$rOJ8&5x%3%~vK@aj9uo%IB-hsXEPx)mk%Qf_Sq z$@nEZ*t|%#rhne^~fn8Kf(Hq| z`uB2JJouv6p&zj}n&AJ-zXz$~7BR{_xZGo$J}__>$j4$x*xc@WG*K`FwFO7|E)YgC z0WmJhh{AhGHz@Qd zB-FzmjUw=le*mRfDvt)UEf`-?*QZ7%oMtkEFfI8 zp20>QSMq|m1v~UV$M#6^`EFeveExPHsOJ83lgp<3Cd%QyKCW^y#`_rMmS5k$3y9?1 zz1l+5f5`8ir|K8a-^q1(YxxA4_r*Va4{CkE{1i?7+?D52K4%4HTGv;%(D$u=L6cU? zfdld-dQq!i{IWd6N-ynEVE^f2e+U$tUf!?sTVQ%e=f8dXzN8O8#_%OC@4J*|R|3Xg zVtzKNe*W@3D-h5liXI(n6trO&5PGtJfWh+ZGnQQiXo>cATdg*ut(<8(nJ@iFC8l0AYB?gTv| z!=$4;8xF{6K3YNxWDv0tkJ8M~S&zpoe=DdR@mZOD9v9{@?a^`GZe^%3u zvoO|*zPgT#KUkGY_pSVV&36y(R(-DX&nLG`KF>pNdSoE&R;yJ{ri37V+ZN~=+#d9S zeL$JeE}rSW7-pYBn~Q$tRIK9TBomP>_>5 zU(fy4JOK#cK39CC(GyO?$_$AA3WvV&%xW~@RW3S?{DkF)2O?Be#;L} zyt`ezWx(E9ZnyisSoVYNSNE;Ee0u4AcJb+DS9X`JWp~+~ zb?2`sERPSJ=dZ;vIJP_O?!pgB**QHg>}C1dQP1;s@GV^c}GF6Ry^`EZvm!9Nl_w} zydYFO!&p@43E-X}3r(?SYcPfiiXeWYxR}t_W-^Twu}H5(Or0W-Vh`CYR9Oj*DW3Vz z5*9^9Ln!=}*z+-je*xxURRR%HdonBWs1l#cJBrXsL3LDC93;GAE53lKF#IO1U6F!& zCd3{vXZ=~Zzye`RR^bBda3Zr3{PlF=ET)vLczATD6ul=%!WRUlVJS;96H{udp|zfI zj}bEtti+gPGY_`0C9_go-WO5j>*qk}=X@fj6zloZO(Lr3f0`r-DP>8CejnYyf_u=> z?=WZ5?vUAZQY=c~>k>|8@(d8Ka|P_EpXCei#p&Rcf;jwcXpGn@|f-x4etPKSy_TVyunqM|P6p9-Sm z$%lAdg4o29OvdDp#p`v#+eJ2wkaZ?VN??1tvX!ukrtQ213_K4UQ5`E(QPCmPTL8!3 zxWUm&7P#S{-sH1AkmMOi;=#bpJBmh1*5!OOlV{1mf96uZ*CUWv>rDyPDq+#3oZGUL z#SmCviF})t=p)2x{i0R%i@u;rPKm`mAZ|5Oz=n#@iR5hR@(z=;nvE0N4D4rQr9r~! zBeLtvIJT>mt)~kxohZn{CJ)B%-|0bt?*aXuTOFbP@{Ywq4|j7NW#-ubVI}r{U;>!o zgd{vHe^-1`eDbKH%&Izpbga@2vygTa!~Qcx&qx>v<5TMlUUS2lm4ukl9`#Vll0dCL zgq6rzn{4I^roKFafTXuwB?|~e2V}m%2v)Wdxqm1LFQaytlp-ubt0+onuX$G;b ze+v7M$ca3hWCDo2M|=>4L+g4KD}nD{u$a=L%Byn0b(~Js zCYD@r`lMiDEO=yLzb!qA9`NMaFQ*ARf5cPjOK%S(4RMSq4ka~89W`M)x=u?vlc+De zf@29*WmhblMja&1ffD-rQUDDnVNw!Yp%JTu9@a;|Q9l@>USr+Fi46=G&PUswEJMNF zXX?49GS}~lsuNJa^5ve#2&d(tq<&cnF0p(LtxiyB`P%astL0d~aV@W^6UnuFf9rY3 zr-*W_U#aD8UY%&9>{JM>w_1vpqN|l#yaV}M!2Vd?peBt=z z>+vgvFg8JJue>+d0k*s4Yh=bLT zcGM7cNW#>K;KRc;YYT#7hfpB);EAe4v^;_97-3mS zse~jEEJ`IdB;3tJq32Ck&&@`<3acRF#!QwlGjSgMg^o#_o3HB_!U8#?Lq*~sn_>Mz zXeQwi@1>s87C?Vd_ZhG7e?4NujafD%<*J088Sa5P3vk6H9|@(36vJPRw+W6Xn2%?$ zacqa^s7N%)O1-Xdex)@S+j5Xv5G@!se;wc_;AMm-Pr@G9Dwu-ndTN})fEo=!fn7{@;m{gf`U+vMkT z`S0F``I}?8?UF3bf2qY6v9(UPd7629rXUo1Z$V7YgD1E8F)b|g+VXo^=6Ue!_O@6a z-CuOw{Il|)u2oOU$1$G8bsg6IZ*y(;WC1=m_(e}_MGb$?E9i6DTMfPD`?lvX7uO?q z@A%jHFnjw=U%~!S%6z-SbvP3pQt)T6M90!=NWy~5*7roOe;VDdVVq0YDmhA zQ(|-D4d0FPPBvXM-60$PM3O~4Jr`OjXIv+q7=3`E= zTns|2Q@g-?;QWl~K6SaoC0$*9Qm0dO`D2~#P?z7*=@xbQFX?dI34OP^jTe~bG}2&R9F>hvY!nD$A(ze!t`rC?3kcg*{^N&BMTd*5dUJ$(0r`%FNm$Gn@p zeaYC~TTrOeAC<_-3j8 z$#uS)w2ppvlXi>wxTUSCimH0wSwgZXk#pXdf1qs!=d78?kBZt;1yv~CyxZE*6KJb4 zdA!a~Anf6E0pvR8r5*!fxE1P|yVuue6JkR1&lA9smM7l?vHdIsaIxPaWHtdP@!k>p zEnM%9hz$aSm60Ghj;oCFsaIf(Z4r8G3rQAEJ;{M*$u;dXJf0k7^LUa!Q9LFPA>?)t zf7j0egw}e7_{pW_FYJ9DVbIDJz*Y-JfkkIB6*jQ&_l8Q!flF8c^{HOsei~Hc+<$gk z&(b|W7GDw;kV?!KW14H;)#n!1dxCxz+nb!?(dFSLH-T|bH#q~!3AxEFV4OHNx&4DY zfDI?}+x-wsVO9vYWcMKj@(2SCb3s0ke|&j~=O0u;qF{$P@g*SgCk!!u(&XUdxr?do z`wZ+DQbl$cSGkZN@w_L|_c1IaS>azqfd*F8_p}G}FMXFGDE{4d@sN#pm_8BYi0OpD z?=a-oT!U@En?O0kbEh>}5?&R;XB&7P6=8m6NbPDXe7-^$iiILa7zl)z_`>|me-v3? zzJILC2~T(*DU#v2)=ZEDtUi7>!18+HSMM$HZ;ZcB(VvMi7~}ahH6WIl&lHw|l$g&{ zlBupoNzUlUsq=&17x;Y!Lp;|X2v`EyVEUyX2G-vku%3HOy)E&4nf71^_?!=}_}ALE zZZ}_s{8Fv-}QfbFID*aZ}nD}#}c)k{!#uYU^zNi zKl4j}NF@H%noqcE zSK7yN+XzxV*XacM-9F*(f1bkhKBn6t*!L318T(f~-l=>2{Q}m<@x#CQ51B2guKq*6 z{6`2}{FQLrra0lj2572}kP)xnMbjBIupYm#Kl%bH?AHMxo`-frFm~h*Pf^M?P#SnFj*nL)j2x3mLEwLc~a6e=&qHu_Sxdyy}9@ zAuP!N*b=e42MRp`8R3$xP-z^&HV~!61-64|OI*cDhz#OM_61`gzF-xMeG&j`!#E*< zVm%la@T}Mqj7u^C_5$M`87T`OW<*G&SQi+wXhWQm2rLKUE$o!#X=egq4I%E67_1N` zoW#nCVG@&xv|eE%f5}u>bC{Im6|5ypdgN7E5~hqK(pte(keNWQ!<5Xy+QD>-R+$o} z23bh!3toYw!m8laCrhx}@S2dNvU>1ZkT+=nNkF#0J>w+uF02Q5?Gp*;F3X8jsH`!` zq&+~T&4ukkQsN*IK++>CWeb=wQb>D@oqa9rDa=wF#Ah(ul8v%cn131ML)!1Cd|P4X zF!xcp)-X@VPT5hK@7{7wB_IxmXdVD zU=GU?2hAlcdxPclaewEVg!m9{lZ{o8?<h!`U~vv$WUc%$}?kopyfxJ!InYOkn%4 zY%exN<(q!Hmh+R(l3KoERiG~Nv0pXvb0a_B$$M9MU@|9pb*e>oS@8>`) z&+}<}x3I6e%`4w%@@Cx1AsTsLCXKw&vsGv{@#-fr4)6?YqX+{imO zd2*AlWFv3dc{MW|dC`=w)3oC%RgL_7)kEI(Z}Rg^-hbr3+w{TJ^7j5){dAjtuv*$z z`Ot)`ymu?V?A_!=Bfr&OMjQF9{vMltx>}P)9yjg&7~bU8O@4jex%KmcHF_h)vXM9S zS6I#aMN|I{Y2II0)=l2LpEmVZ@TUI6N;LAIc|UK;e;Ws!+c;1-Ch3G=mg!<)Rx zU&xyLv0MIb{UdDJ*RgKx6F2P>j&Jf?`QzSAUNrJXe~L+?KjE$Ytfu|W{93-Vzq-mp za|qcuTmf1plwV(;@2;!6Yoord-xrM2Yku3onSVR8&AMFJlOox?^hhCb-0S*kX{4KV zHtSBVeXlK=Z%=TJsO6pT*vN}(+fB^oTPwKEpIyHhJJRYZAI8_RJ0!TyaRuvM%Pz)C z>nb0bjl6dqTf|s)Z*tkluYE&|Gt$Vfb3nm**EvLti*_Twwq0R0x-Vkhxyr>VZRA(o z5q~j%Y~c197md8uO_W;{uIvAI^q1`a-~au;yl?lL`Qg9)*Teq!cBpo5 z|MkEBum9(L`*HXm@ALHSe_pOO`MUatJAWU)z5c&+zRu--zRU9TKeV(wRO`2YeBf)= z^)Kozm)YST&xY|ICO!_+&M-1;9XcK_Ukw|@xJ+I;o5ne`Vu9rq=R)Ozja+3#HhXATYK zWw->-Y7ja(qmO000g%A)=kk|!KPNeY7IC*eim_IUu_!AI-ngg$?u7Zhd@PO z&nfBGK+>86pIfzVd(V&EDN0{VTYrX=xY~8EY3edIbN0`RA$JXhn%;~}e+nQU@k;|W zf3FB1RKXH72Ma6`BYZQNPxTyNmh@nDg*e{yLTAg(M(oF zOK(`-1FNZrpbs$!p)!)h4;&y(V&k5dzhHCRsKlQ-E-4UF%sQ3rV2%@H27g6tBPe+s zR-7WZ>xIJO0wo!yFjaT3agKvl_-esbMFtecMO{OyH#=~q&{IT)v}KtueVP;Jd@FSV zlvIHr5TYAEE`~;&^(fi0^RgXg=i^R6stKHc%R{fzogJGQ;Ln*qCOG$$I+H<{5O4H& z4J>X$dmUwV~w-S}mmMCx)=Ld)EGtjK_RO{o^tkpqp zN93i>cKWL=Cmk+qR=1)B%{VVX&N@r!Fs$5sR<6P&*t*y~yYk@_i!qgZ&>$`}VKn<> z{dldaE6An?zex<)Nk!D)mU6~8a*gx{Wg$cfff>{F7$YR;y|BF0uz%4HuhvTfdRSWy zq9)Ipcu+XDXq)F-kgVrxW7L_dpn9KXf#u5$lSPvwGcxU1$fHSLaX8D3oB2wptd33|T7W>W4b?Gv*D4B|sDVFe0@ z=0wMkFfRXuWXpr>hkuk-!`AC~32CZllP#~HB+Ns;w(Y0J8ItF*yS8bJb6I2n3*si&s-N=X*H95sEKWxxmXE>R;yL_5QH0ak?OxP^23`4se?Hp(C z9FU}rS$H>J?YJ02rF_Pb{sxLVRT#Q#OtZ$c^PW>nC8#dujfjCkba##O%nU7&(N)hHvH})Gw;N(<)1D>J;tJ*F5}ECvcAE*0c-2_>xvAi zbd0}rGV4DJKGper|Fricr!n}QO_xet29Jy1CnKnlPQaU46^HtLdt0s5%8Elk56`BO zj*eYpOBxt%GdEbkr_JpAd{12#N4H^kc3?MfiGLn{wO2i0zfh`7P`5ZFtIqy)KVT1a zZnCl zcg>npr>+DhTZ%_=erf+h9u;CS%|CQS5z>KQH16C2x7lTEaD$@s~awq|nCakHbnXTYsseIb=gTgK%A##81q~(|x)|Tg#r9=W8R+ zv?ti|Sn(|C-{9|0qp1h8euK4SjP^j7uGjvg7l)RaZ!6BuhrLs9EsnZH6Ycr~pcS5d z!)TseJ?hDp(RnE?8~;^2X*g>`cYSo|{MTrbBD;ibq+`}3br35PUz;l| z1%cWQu-0Qhr+>?t9f(U;$cAX~$G8T3Op=O2-!UR$me-Md8B8?ix#&c=HGXi{b4PRg z;aO+^4evK$s$!Hw<&|h{;zlwfZGZ49jIJoLpnZ-?t-hDPuQlY!Jv6<`-l4zaA#f#t ze~m4!Y$}PqHXqiQ+RR~ckf=e(n)jK{ON?=6vSsUo2wE{C@{N6mdSK_`O=|>`xE!VG zbEIUTde>~~3vnLVMTEyQ^-HxPn|D2C())d$y4gBS*c7}EdvsmtkT)h(tbg&d%M`%X zgl9x{?qfs6X9&87&`Id{ku>tCjCx%!>-?r>ktzG!Sd62;rXKAc&ndmqGW6IXxq^#i z7Qlbgo8C#DRLaG6gjI*PM!7?WWrT@eDDwrV6#VX46G+lthiGl;6vW-lp<{mj))YLL zDrOjOO)(n@)@}XmMa`IUiGOKd90)&as_mQgb~1T`Ss~f6s(hX`YklKqlwVh3pD-+3 zKiFv$`0;Uqh&XL~(=E%Kx0jCx3asp^HbM3rcN+3@`Imjnw0{WtutHxVy&sAs zQyIDxiL{N*;K<={b*dIt)#^V?G%5Ar>nAKv9fEXayEwfWVV>N+23j7=lf zt!ux1}*{`U8<&u-^$wKx{_HW@Ov<7s2P zAF#odqy_C~pTi{*t}fKiIa?S4jQi%3$?h~xV*!B*<#!BIuABP@@-d{>!3#Ip4r*kP zX3lbkZM@9}4P7L7i^}}7o{OO8?TJ-l+kbqzG9CEBM$%?7d4G6W&AjhX)VW~{h^fr| z^jgf~oI{443FhO^K<{}^5UmKvBqFtgOUMvg-XUu(_(n>&zpuTKLF!k2mwga@36;EJ zD10%bHh0g+B|vv~MsRmK3u53m2h+v0)=NN7YqUaJ%gp@SZd$AOv5EC*`op@9nZ6a_ z(nVs>eY2WGN`LDebe$RKd4@QkxyPN?OrAP|K^vdr=x{gEa{zSS&*elPRDxDw-1?%I ze@N88!q58vXvt{V*&)DpKJQnx2(Z~y+LTjUrWZYl@$)zb^u9?>+VZq<$ojjPV7m*u z+t)tN_$fTYKnDELV{!L|J`)c@Q!Q>0|0Tsql53uNiGM3z9TiXEf-Wv>Ky_sWNPn(o zst-celQ9c(#$0Un_t+QLy3S6j!LiBBy+!ElpvbxqHrV)h)rRq~muEB@H>KN~XXPPn zt0r=C`NL!|<4}qW>u1>CIN*4orrQTQ;kVLj2B850Hi?!`9#c7?2@+=z^rh~4D>^Ge zK2I;kxPNC&#@^_AQVFWx~V42)EX8n8W@t0?QaS4#VQbhL9O!-8QG`c?Kokh%WXL;?l<=+U%?3Cp z{FHg^{mSlw7xSoFz}6LjNb-V1LC^N&>t9>*(>>08k0BhK;wr6D|Q&l-=~ zr283FNwIUQ09;tI=e)S$JNdDxZF z1Q4BgN!3U3&K7AP$s`pCC8=EgT=5jsL7|(kui62_({=qV4XadN$jZiiyA5+k+JE)n zx$sUq@RyzK)7Al&<>J*gTQwwX!m~p#Dv}Mp56it@FOo}vLi4zy*};&5_Q3)SA2J*cp3_cZ z(leM?-z&8ikB0L!>#J>{CzG=P%@J`Z=5{0w#)uf2TQ|big2`rTD%)|91d?JHsJ4q| zy(9Y*wJ-WWs4vhMLfCv&z<=~h6#FhLaGSJXq1M7DdQkPo6w!!A3>fn?uS>4Eza<6% zKaNd5llf9m+NZJUE{g`7hz{u7#d}pbKf=e;6MV1^{4;4E-9zkl+%xW^0cfXS~TU=oDGuVs?}H2i3<2RT-0TRldf#ZGtic6r03AJp*VV- z)dc1u#mwBxqt4{UfPYn8Qo;q1wln99YZc#gg-E2j>kG%~K8ADtZ~|cYiNt*SNpINh zvS6`JqbX_XgMs#U1}KU$M)G}CM9nXBsZqOz5|_O1rum^9@ft9TI2y))FnBTeglMcy zkhITVG^!X!+$EYu_x@EN$w|o6Vx_K30PdwbTXaWotv~w+mVXn7u00)#3k!@><`#gN z(m03sR$JaxNR%NhR72jcby-iRg>83smUGL-Zn3SYpdmKuq%IIE_5+~{-sRL!LOs() ziB2LLf#@`v*k&MKn#ERVJ_w}-phQZBY}YXWYtx0wW60}(s+6>SS=^pEie1D2`9DuCD;8DL-p2Q1Wjuj-IK~zKd8s2FAn{fCeA>g zhtVSkQXB*@FJ?1;L@Fqxf_rHYc776>mBTSS_*`|X^-}3lPS^y!!rsea?TRP>od_Uy z<+&QFD?|(p$b-t1NT`|FcVUH>8(+1gny+o!525-qc7GDqH{9m)FQQ0M&d65hohu&u z*)+J?`Ody@Py(lOKpzbZejv+W&}8}V3ZS@RC(sC`W5BH*9A7kymJX>=aN2|ZpoQq- zMFdnSt_oveoY$AQe3=*)X+y;tXr~^?keZ$Z@%Cw@2x#*M9Elvp9v+llJDG=@Q3Jq5 zJe7gLe1G)HQJTZ%WL}Pl_+|*Vy)+fHQR8Lc*+hkc49$U4fFOX1o>%tXorAaz8bDbY zevgB-V`xc$tX+m@5OP}5k4P+7aUA);Dl>Gc6UGs^2uYc9Y#I??+(i8zO%J*-X>(Coy$Yu#|BjC#Y z(0dp;VL2Le333$*S4h}4OZ+pci_IDKC(?6dAMg`^W`11SK)qpwm?jS4c!yNSN9|Ti z+OtgcE1YWN8MOvYM{go(xo&<23ZvEv_^!%Qg_bE5VH0Tpprw8eTzVtcAdh7^Uw@T! z>>mkRspGV~K>}TB^d3^`4mEgH+lRr5P%r!rDosG!5x>0$1oFCd-VJ!L3+waNTtOJmyI1k#SNDs zl}K{3YSfY@9gPA#svj#lDHz_!s(*B9uQXV0A`~R_I7f!zaMKi1Dk`qFx}d(w&44*Y+Tan&+@YYQ1BgavAR5Jx zJ1HJE(G?A-XRN;j`$F*g?M(&Z&|?jQZ&zLmX5qUY8P}YuHvxmg?y9`knt%FI(j_ab z02K4WyIvjBoDn{SrZXTyGnX`~x-w|t3sW@cBx#w?B^v0t`PQM*WC6P^k{Ja=)`yjHrAy6rWbc? zt0TE(2Hb*l>{%=A~gfPYQ*-nd?JIFvxqr)~_MWYmQ2Qy(TYa_C-eW}gUQad?sE zymw`L)4Nm$Z?uil@bd#4gRShqi5W=&y)_7c-8eMB_=$V9KQ`1Qc6NToR28q2#Cq~h z1q`{%z{BJ%aVY1NN(|##S6#S7yR5?lNeQm6D-)%^LnW^nEo*bGsejvxI7$pfN^838 zJ3Lnqt#5$Y&(|VG!B1jaK#FG?pvN`eUuXf!Lo=Sa5Cnxlw9s!7MJVIAr_{`Etpp|! zLhB-O7Yq|#k`cAerFuq&}%R1 zzN9{kHIa>AF)A5?2p=Q=2w9tFhzfbH)?@Uf&uX#ok3;EHRDZhQ%E`%(O%;XEEikru zO@~Vc@+TU4wXMdK14wZHj=HRi?u`%}6svc-{GClDbi3IK4888AdR-pMT9JeY)=4lg zpvQo)-o?DP0ti@ntXu_5GXV(BvZ5L0{Yasbv^G6lek$30{``T1Zc`w zRqt@vt$$Wq)>xI6CzF;^0dh`CEgd7ty%Tvwr8m=8Vu28sS?Y5urPPSuXOzn`!;$x)IM6G%XS>AtSnBe_*uJ`k9<<*9~w(LwP2V;?3I1R$BFm ze18bwA{wlorO13^2;nU6B|?mm=h)j_X?d~iNXsj<(}IY5P2z_9fUH&tG*LY%IK`$A zvY&M?vF^@7l5G%SMW&o6_10UFbMM7w(ym(?3FE+GWI8w21eeG~p5(dd*|rc=ei-Cp z&)a)B5-5RGU=9q%b7F=(JT{K5tq-umTz}cH7x|U&DERb(YM>wM-#*+GRo;2lQ|q?3 z>2v|o=UW&l93|bn+9o60QhG7={=6wZsoTe^y*op1aA>(9Q@l29 zs;MH8Um;W_Wd|BaBdT6x#O`bEL91-DS!LS zPinvYo*_h-sXH8;)9qI>$~tuP0$V{|qUQ(l)Rz>7>VD2|h{z!Y!c#ZV(9$0?R^I$< za0bxtG8X(ABQa8zkET-+v6r3-rPEPGd$?Hd7c#95YZ_dBzbUC-!8 z*VhBIcEk$%dpkYI*z!+8gAUw2V4X@Js)6ETy~2qZQtuyYt)tEc;xevd2!BJUey!m- zCzgzL>)y3aX4@h9GZCvO!)nj5fip4YKk}o0vY`L#|Nejezha^PT`T1Lk5&ll|3@n% zF#l(q-+xp>mj68fCYCx+OhsNwP$WCP)9(m;Z<5_YVVWC&+^*^?z5v zN`E-yWLU|Lu;4ifg+E@hm;G~b{6)etNFoS)8#Z$i{{B#a{~D40?7f^VL$`UQA`F#( z_92)<{C@fT0*Y5yCYHWZT*8sVn=3s+)w{Xgw7%ra{eVKxd!Vl#}y z{{u94e{W8sU(_6YXQ=T9A1vU1?fsR)&St)T7RtWKzwtZ&_0s*bm-DFwGJm@>I&^Pe z3?KmXMNH?K46~~89H3@4FpYrQ;F)I4*>RM`m1$@_9jVL8YBD>fL_uKQMS#@`pGHBk zaxzM!-IW+=!75=i3v#YYL{#P7bc;#SC;1|IXDFUPBp%YwCo*mGyK`K!<~*@CGi7k5 zYq1w$e;viZf9BeX^GpcW&wpQGr{cF1I+mzRf?*v9jeT)EN37iW|OpWjo55EOFv zbzPbWnfstB*O{vJN+@WL(RM@8Etb7Ey_WK0Z610IuNpO=cXR#Te*y??0oW_Dh!RhC ztJcN+f&E6R$iqlxAEbDb{>RQy@^OBrVyJI*lC>{M@hCwCqj=4&#eW8%6a&^4brpfG zR!AVTGUPxiKSh}RynFNfKK@r!>zx0ZCvf{FN*iaU!@XgI!eiUkc5<%n1f^cFsbZpM zyoV|C_^Vx*hEVaz(9CecP!)Q$DjMk+@s9HArmS=BA3Zi&q$~iHpwcNy?VuSd6^E>) ziCG$uTGCqHmZTrFHGft92rsc=q?#eB0q~%1uUV~2b6v0OIMB0^&%&qFqz<3jH5n@7 zjESsz=}a^b2+pE$xT?Lo&Y>{NaDMLq03ZNA>O@ivENu-TiYo=0;EzlTVUu#ty6RR;!X=mGw{uVt=|AHy9h9hcIL`B8%u# zfT)F=qqWnO#Q0JH8KOtD@LBRPXv$I9h^`$84DbbM`!I6*U_?a02TN_yG{@AzLsV4+ zTJE8sKeGe}pieu-w1w7eX9#w&jJG>E z#wdnwkL(yP=0smVf4eWEm)i%hLa>}S${<@GAA5j!C67&=H}z&573D7 zhXektS>^MDw4xsM9!AM}V@DHjo(}Zglt+vS--GgqZya;cxUeGvZ~DN3t#9>=_4^9A zX#jDm_z=|Lt-qBqDcc>S9JqMD0Cj$hilfo74VG+c40Mn;w)--JX(92+k;ih&UT44g z2V<~H(SKd6V{+-lZZY6EC(0TR||bGbBF(XGk@FAa)5{@qvM!(nCi=me~j*f%@~Q= z^uaH_v>g7-fIu*e$koayiDH;LFUkG-m@FDQ;3?#I5-&o~n*nXfpoUf{C2V#Ks%S36 zA*_CPkS8fhnfxHrFZl%G`B8S~1(ZlBqP3-Sz zi+{)plv!zt?o-C0-?33uq->Z#L#15_ZGjcC+FC)Cvv}aOrg3r*yPA)E=GI%>LYv-@ zGCI=+)arb(x;}%XsY8&NR^djWF!Xy*q)ieiY^f^Kd7rHVpd!}UaoAj~kRBY#=cEx> zG%WD2at34)Nv!{*@ydB?d9%s=oVgC10DlVk+SwsIKr$kVVnlrukWgLsjjbUvrn(fs zG7E|{7-2kV%Q;YOp-QRx_fPW`P zZYa}5P5@9yD#p6T$8f8WG?OLGv3sIQB5Djjywt(ZEfyHT4OY{wF$;z+kS*7odr4yO zGZpnnpma{J3~br0m|!b3+F|QKnN4YUHjLG98V_D)?#DUPuk)u}r@L+w_Qwwt(ap}3 zZdvDr4{5p+B#mqYctWA4PrUO@B7YVOF#xA5I|E66Ks7+hymvs1fs$8$gD!~{WZ^Zj z!*BA5!HKUJ%iMJw9$cW6dFEI2DyKIif)-%vQ%uD|NvrO8+w5q?*Ckp zU96Q&Qh=bPiBRXG9aq3!1UUPNPQj3h8>8ivyDY*;xol^FuAN(*IchFTF_}ao%zKk=|(qa(|_nZhj6x6ESz(vgKbSDMHfPw_~;3%M*6Fb>*US&6N{*H zt8ciFTG3|3C|>MppMeoG3?D;@`c>V^e|%BWn$!y1+%=FdpXC>d6lvI)evI_ATw=q( ziKFs261ARy!-4_ppdH9vf{kkBF#*n7qKskSd~D#ia`|4=&pK6R_J18P>?BqRMo8>c z7xqhpki4q)VRH=G129LBUH;R_5l~~Z{ypsMFATU}b(Q$qHS>AYGQaq|{sZsK9FN^+ z{a8(B&UmFx)>hA1%B@<;t&xWgYFURju;Tm$T=eVie%IyF525H67M8J4*qd}bL^t5% zCAKqGucEZu;ZOolPJh#fheIXdZEeCZ^1WF8F3+1oW#C1e_sb!7DbgQ67??yO)GI%fd8=Km+M@WVV&j<=x91|)Lf_6NQpAeDX-%KTrK?X;63~>vjxG)9xgi z2`XIUXy`WIYJZ z`4=l^%HP<$-#JF%3}19_sKHBfM`~=}@@1`mPx4HzsZwmrK9Zn&RrF|+jnW2V4uXg= zKDh%kj6_}r4ybT_q-OEezhIA$@+&OR_~x5s7{l~9W!Op#q?eVY0p7R{oIP+))|$q# z80hllhxma+wbg%)m*uks;D0fA>$zNt5adcQ6-p=XlbjuMe^g1OkYauaPIt!n}JF61{z<@8yYpJ-83I2E)zzp4L_*Zz?I?hCU22IA7&a-?X!w+ z>Ix>UqKa3TtIl|hzFjcoTlsJl6c5tZ%&;?)-h<&saB=BD6Mu)A-iii$$m<=TcXIpq zylle-DyMH5>BvG(6yvR|us;>yc6k#SnXUO}2Wn}0Jt?$pYert=Y<+L9 zaz1%DdS*T0*?){<%uC{o>9JN4Bp8VgNBJnN;GmskL7!?6uSFQ->lCr<+>Xx5E5Cr0 z`XI_5GUzbcpOWu%x515~)gg_p;=3QWkW2S73q5F}XTA44*lLqV$5c1HA(3zZ*$>w~ z-aY}dZ{mySnhTnZu$>r&cTeNuw0Z>TEvNvB8roUDSARIjPS}h^_|x6W^kBf7#?)xy zoT9w)ahgk1a?9^2XP43ll#^tcVV;yoC0IE}`D_KIlR~Bj-#YRu6{MW!mixkzLqO~c z>N~XAmf4$2MW;=3tY#PsWIWU$#G4$3MA8Vj+c0xs2RApV(9v%)ionYPCe9F5^A|WD z%c*zY*nhn70I?N48n}^9N7iV^I0{E2(rbTU$$c&XJv*suUF_!;?Kq_YzK6b@1yUvl z*RJ6+qOSZs`#!tdlc69M1vwi&U_VhqTlvd&^rK_IL)fltc!DrufT-gu8i>`wKyWqzEoZmgurR1iP`^Yxqh_MU#;yMF}5GxF!osFBi3fs6-H^Fa| zXiJQQXFE&pH2~?4X*Q-pLQft&j18|IMt`ZTN->MTYOfWq63hvvPVNw^nVH|X4Md&} zgCkT)VKe__^%}Lt4Ay-x7zjBb=t^Hd>Z^MP1ZSrlLGY@;j%ht%Vx14ux$zJU)biAr zExlmGH5}Km{bi#$PJO2UIxQ;_BHrNJ`i`^z76dGe&&9|UT<@Y{6Y0Qis?3Wmn19mg zConXgNb98Msn&;H@2JK#70%6OYnh9r1jiOqlp$YdI_J%1!)o8$4*l7u^PKZf&rl}T zF)kSgljx7qJ7&tkn;$U6Kjy}3)WN}CoeY-_eP8U%-!&`w8%vESWB2$z4emoeI91^b zAq6Z@{lgAIs&tXiXo{LtbhH86TgQ}PuS3f#ot_3BLtGKjJd+6uSirVLMBLn zd+m{*l4ug1u2pfzYDg%S)=z3_bO;E=P%CZ7B4bA%_^9Owhv^Wp)ZLgb)8pbH!4tH|WRext38`Z5H zAo=0LZ~bIeVnZy!YVs%0-~CqN$>fW7cS!5v+mzVVuz68eW965o7*nTWbw5+FZ)t!{ z6+jchXV8~IS=uW;TC0?#a2Z#=v z*7Hyx_O&2okP+I&-k)89n}613uAnB}twpjx14dq>bb9YVTltstEqUv1Vo$yN;H253 zxR@2u-E4gGI|bqEFH8Jgf(NefO{&naxHegViUvKhXXZ_)Z7R_V^iD*ggR7su|5moy z4lDizy+{mNLp8p{{O>ERCFQ{fEIfnhk!!Stka&Q<7zuJIBv5Vh=T!>#B9H4)@&J#oRBi@A~`S zaYujWIY4^PPe56g<$p8${V#Lw{}9;(4j_CBu5HNwecrMg28*>}Bu&{kxwhV;b zx@bf3$hLeP%N)bjsYu_lp3&HA{{&_nceTDvp4oLSnIllBALnB+-F@s>w6<>iiLYm# zf3{j)zWM55=zsXX`eSQmR7RPN*?xZNagJE#*L(~o$lm;G+EHRbkgkW;l<5k5K*|CM zj9q7&VF*yy_NYrYA&bi6(D?>g&!7whHD$)U+l;tWAsW&i(nioKB3~8hm9hZnce8E@ zN_=xb0i@P?MInLY2y|J0Z+>E+pV}~b2#~kWWGEp+C4Yp5#zwCgXnD$Q-ZN#B;2Ez_ z4gydJrdrh3;gG0|?1v)`HJktu{4%Such8uPD=xg4NfSX9k#DdotfaWh_E#uEw7DjgNAu|sml*YjVMM6|6)5Ba2-B;byO(WKrc@#d9jB#d(uy)z=bVKpk zHml%06nKfbp0}AN)xMX7x>ya9<+hETW+*L;i13xRvMF6QVjQ!wpk$_aXR8!bfk?$+ zsnKVgU3$5d2+&G^***>iwx?Wk#^Tg+T(AQ4$A1k}I@IP_Y*$xV;Vf6tt7y}nk6hx7 zhE`kmf87$hXFn<{SI{hi-M>^!k!+y8|Ha#z_NwnQ3xeNe-i?k%1PI%0jWlJ)Nt_Lw z4Z@od1aLM85Woqo`R*f!bAHSHKlkZrwKPXMB96fHtF!{Zej&Cqq%6Z=@x6K_Ks zAFhWc*;pDOZMy7icbz|rB6#Z^-eg}E&VLb!54slIq7t8cb*qnWZgaf{*zoVUyWNy& zHXYSMG23=MfhSH*jbbX4PP655KAM_`qj!7ZXnn5`<63o#jNqVPlh7qGlx3J-$Itg9 z*cl5FzDXIaCS7WD$NTE>T3o!!-s)c$`*xpZBwSt>tI0XNqOIYSvjn1*Jvb%24!%H#=+(}U2l(yi;Uc%<*QiR zRnlh0=r+2M`a+MJ<|{1g%?fp~Q%`8p-1PbC<(<`CLX1uJ%++^-?aX7Vb|&WtT`s{!tW~zW zO@)~yU1^!~r0e<`OqAPcD{jZ@BpSq1=dMo1t326ykGN~}S@hzs`6zz+>AdM`_mON`NEK&Vp0Z?i zHqGv86vwq`b+dOCdE>wtAAi?3|K_bTh?J;251iXOdv>>-E3`RZJuus!PfORX2@JkXnt)I-YzZQ4zER?p>lXM~p|I!{;Y0hHGKx?8-yCW1!d)TU>A zKWbIG0Y16qMzo{hL35Vp_wXH&IzP?s!;mHMBafoP!_5xL)}zx2K7SoX+l<1cT6NL* z*6f&jR)f&D>MEnpAaK>Z^X;HD=%Q{GTkC!E^J(@bo6B@oOsrz;>RLV6G8`V~Mp)Ki((aPO7m_5ojTUh>ZI)u6zt}o;1(|=Xd&eE0P z>@j=>{&+U@@p?L(PJh|y3#Zwkz0bF|KrvwL4Gd#(Ke?9_o7Wxo->8m9b8{Fil0*9S zjD~wfEN(TuZ#4AE?^)JZ`f|G89o>Wu?e%l$f$^?v(oVGSYx}m(bRpJGy*;1_!_kYC z=IStiA7Z!4>t`EwYh^e6iVmK;UfS3j1y^#4^Ub;C=jkEbu74@ozs#;$)Q?fNZo&$m-+7(MdNrkcZ%vZLi)!!uizZ`FvV%J8|jl-YSOOqcud zHXhGF34cPl#^>nO!){MJQ2@)1`Z#AqvmtsoKCL4q^bawU3p1AJ48+LU*rmvaRPzB;f>2QQTChb?OdH zU4J7B$ELqo><{CEqHm7(=is%Saql)I569KJZJoScHo5K2r)HldvtxU$HeZ9I z{dFlG<G4SCA<53?>)qv~*~WVh^I0*k%~Qx=qAtAC)* zcTN7Rhi9Yahi4cc6?Hg|+BDiUL-m{g{X6Pa~on3~$rn(PNsgA8*T63!}JQ>+wQY!eVm0EyCohy;z7x zN`Be9M2E!hkDq+)59&d<%g30JOn+ES)qskZN!mDANiXqxG0>FxYu_$T?j{e`IL6zJ zt;e?_-d#3bJ64qY+vSRDf&!ZZr}KX6eseGou=;y#;XjrZJ|4eT7Je_0TlV`X3&g{r z`*O=2O7ljpZ0lFnCB|o^pj;UU0IAuBzkYGJFpjTtPy~S8_A3RO?FC)-Yk&Iu8q4;r zj6s*0@x3zevsO{`A_m!IZ&v5~b5#%I-KE_1?7ZF?ake#lpUFjz=w9|K6VLrhz<%W) z0PN3wxa-&Zjjyh~QrmL?qwP-(p6`uCk;K_~BM)*%x4u0yJ}dv^N);@w1A_X?WYhs6EXbl2 zVZy|WB@QYbf|Y|G&}_+6%YdCtRF~oDSG^L64w&em3SU{I4LMk z3R*YX(bd-9o`{s7)#p)Qn^5If}aX43e6f8|qzJnIN7<&Nm?3 zwe8m%7di|36u(+}@&zG?dwWe#q_yh83u$GFgb+oU1e&_jE4Bo`@sMyet&JM!eBqPC zAbft-cmOv*$iM2?ZE1fS9Vzu--K9pGm@TA`QL2++eY-V@hN8eks_BE$kdeuiEcp?u&iDKD|blHOY)vj0v3OA6j->SF(abcIC-?d z%)oo3Mu*XHpfie5OP{25!+@(B>NArB^DW}@(jOkCI`ZzzfDiku8JD67+JV_mxiLLs zKp$ZKm7Afdj9#yYVt;?pL@?kR1n1q=$UqDEX#WMkzeoJ ztU%N%1!k?8f~>BxWUl8f~l4{xt+!UK9l!lKbW@7!L!g3cl2RyTw+`jVhC4X7UGdd%)mgBRU2JF zP>W%1;FdcC)t9ATCc5t}?JOZUBs;C0cn&5M;$&in(b1VTK0%5SvIe1cu_Zpb$KVDa zXt`A2%Ta%b35-|-mmbEhiUeIEe=beAsr&q5A~}9)U_=YdB8)V8b{Mmq5OyY>ORL8Q zUsS8P{gkKKqr2N-PC8qV-0IwqZu#KlVo5yqc%w&z(zX8E1{6Jiz=`nmcUBp3*r)VcrEe5K%8LQMK<>N2sl9? zs0n_fBRt$c9LlsTvN1G)k?|Y^K?o?*AR&6L>LBFF1S!$T;*NM75Sjj!dPj2xg|JU> zpF@8v^}fRRNAI&I2_dhmv<9={!{GqcxvitsJXFv4G-Ta5Y zfzKJu(l{z~YKSTh0(w|w!U+PhH=*Q_cGW}HTzrMbNYnv&r-_ExWR(+x228g{%5$?; zHEmqw+Y5Ct&(@~Tsg6awZ25*{iV2K%T1I~j$cfnJE)NanTd{g@IQ7xGPWLovTpH}TPEt^BG*6U50@$KdOMc~uBl*%$ zK70wJv)iA!_B_^o^O1>K_IBSr?~VSHRo$2OlVA6Hd`VvJZD}O&oA-L+hu42>#QT3+ zUxcin-EZH=3;SQ$R-qm3ZHApCOK;aUkfm3bQ6071+^<#5_iHaeO@h0JrHT0!^SOb1 zT{E&oTUKa7+58SHm3D$^nI9$~xFHl>L-4fPNF$i+^>;bZ-c5=9W(S-tksys%Bzr}%wp(#EI_YMcgNgKupEbU7WJ9 z_=gW7^+e~%&G+B#Z#~etKl;vGiqYHEL7OhZ#3 zJz&i;>)smYo|g>`-zT>2%QJkd%a)go+tyfH!$i+tnkty9w@pdp#?^Uqc362|>&$X8 zHGq@rBj9UYBS*?yggG}gpXEqMKWE!CG1$P$E3{O@c&mSWNlG*Xu^}2WynnG36Sf|3 z3&}TS+GbGjF^JA!s-yF?uyJ}LKK(jY zht(46p3YL6;Q3o0&vl)%ZYh!#^MeE(;|%k3WxVG;&eEK)D2TMoFol0gfYlTUC7_H; z-Z9PL@Nn1Wr`=oJv`m@ksNS0}fAlW}@ntX*9+rBOa+Cw)!&O&qFwlR>`QLuBl;Pia zVls~SmiZr?eBzAXcK`iHd;*Et%XFSxzWK`3eYWZS@^3%you}-+P4-uHOrWIK?h5x0+v$2)H3aHP=AdC%bLwFoARs2>g0V zz@^JrxWJSUIN$gF0Ki4ogI|u7^S7g>ivp9jDUYoKcGv@*jEs^7)El-fMg2X zr8-MiCZt4@8&><}p0rGL{A5i;8KT@Zah9NInK%s8tHw%-SR#9PIS?5Z*&3{MLsN?= zLhKx|bd7)*y0tr9QBCwtwa5ye{VK^%bD{#H*g_;^tJxtalAFoI8hq;MK1+3xn84NC z4Ii@`V`P8)RxJ+%$BbZT`B$(WEs4J~E<4CEkOS#IurvqL)nJ8mq6PAxG1$FB%z8Tf z_Nji^{v)(KU3xlrZ#Mi*A6MF%o(KQXrKa^}=09{tm;M!#dm-!N1*fA|P1HUOn*dn3 zTDEf&LxK^|>AK@PgR2O_%CtK2h;UgOm<5Orb!mS~HEe;$1d@8u&s}-c@6yZbLCly& zBQoK%9ZE_7|bu+6I&5sJ;iLSBW*~RI@$q16Zt9oc>uEI_PM}^Nz4F#ox z{I+>7NCOg|jBZ_UHKKUzun;z}?R%9mP50Tscq@VS$$AY2Uvn+x_COxYi0Z=cz{{g=(;@_qIfGXuQz6{gc`0jA|ijy%v*QM6b&IXd+NT!b1?@#dBwiUy<&U1 zh0K=+x#_E~1t==M%L4~V3fPHeZYGa1rg!}_?|{7fJk7t1x4k$0)(wB?LGkVXb8@Dg z8|(0C`v*Vwap)~e+0joi1lxe{gdkXGW}9NIij?)HsckeFAHwJhqq}h+OwoY*uHb*Z z55|O<#FM;#*+R`fqWxSDENe13Q3G%w=(#A z>y8yy&l9{4fB@qz+SobDD8^Vhc_Q&Ok=~@fJgyc`PA#G}|H#3--6HAq z|7_#`$RGCc(jPg*zvc9Ij_ViRLiT@&-T8Oim8EZ&Q`N}QV96_^;bpw^HP?phSG;tBK3+Nf(hK3P?doLf zFph9W1Jfr5oi$T8a<4MpUt!esE>a{5?Mm}zUkQP z*o$P8KpR3=iB!*1NvG{#09ax60CWN9?lFt^ac!r!-#Gv1*+2L{QvSg4@bCEd51sNy z?#k+tPh9cS*N28u$NU{P$o?A{Th=RsryB6ITE_0wk$yz$@|}`Y7Bm=%iM!OZqerNU z1Tk@$WF{~V`K)!avPFLxthBaXJ7Zp(am%)-uWITW7OZc%w5r@_isEUKkT?AJ2EcMk7jicslbv7knqW-=FU zvAN8n)P~=Lx;DgAp&Ih9A-Z}K?ael;mW57Nu22gr9cX_|GLcKU^}2&;knGC~=*i3K zK!1l-3gws4HXExYn*ioRzGaJ53yy{&M7R>`NX;PRkzT}kB-U7Gu4@j(a}^#Q8$6Fm z#_Z4gh0CB3t|8-kPrhI1Y8iLl$w%&fa-N@<=V$Ig@@=E~dZ}OkvSVJ@%PY3Jq%%U1 zV-)+YlEi;~{`(q-Hd8ftv)n*D(4e8qh*|DwaS1bO63&QjBngyCsB2PBMuWH^Uz?CF zQ=@)cEHRDF)o82C34gE^Qpf>V?RFeCkLnh*p2tE_fs0Q__aB1ORFYy7yJ z4W54l*(gnQ#%%)wwyv_cwI_Yq{L#X5lksE6*s`V zx7R8F(eggOIq&^$^qqJAi8b89{n6Xf9*_RO`%kR-8>elw*0X5+hmHPEzS8%9?-%uX z(I30<9e@6ryH76PHl(-RNF)gRTd=!ihM|9P4a5#ukYx~;J}O*QAt>1=>2=$aIgXQAmu)ia{mVX zx7_#o^S6D~-0rvU{_{@cSFZZc9M*?6-?8OSAK$q_`L6g!FIN8%rZ#TX7)1SMP+~0~(UgB)kDd@KFX9fuX{7PUF?}V&;O;z#WDJgQMBXv0 z&yY6(K?fW82W7Kegp~8MHku4gQ68*}FO7F?L#V#6pPrq`^Tf|O*0}rSWXz<6Ds^fA zEIA@XSBOO+0RsFSxu~ssz2Zn-A4XEI+@)TLvRdjD^vMXY}!D{#UNBbN_n(_jzZK;(d*5y)AtN^_co3Fu?U!Yt$P4 z7>t0$1l+YT^edga1dAxcOu^{w9+9A?<8wVu=7(i=3TemJNL=@3n`-(M1zI@2&U|HL zg8URfcywZGMlgR77Evo>CM$o*c2y)k6snGWwzqj>f)57P(k^6;j)3|aS{yfRB7Fc; z14?!5%7eT~xPc8YqCxH+Of6-IJ(edn$A~dZSi`jJD)y~B>{~H#273fFS!q1{wb}*E z?~S_XuP;h+LH4jAXvnP%Sewp$F*h0;~;*K*IBv@szMHT^r zxh-fMF9ekErr_}JDarXN#b5(yht|$ua+>)lD*yEkX>!vr?rz{NppWh98q38+Kn2dZ zJ@5B118C&|@4t65`?=Cy1hi)MN57G=)wgm17M7paxP0*Du;zar1zRT9@M^=qNqtkl zkxPDe+b>;W^0)s2NSBp;;z+K_`lbU0SaZw=gZZaDAyBUdgTdEoAO?%CzfOyE033`3 z=yBr5C8y-60!;j121YczCXL4S4bHZSk?_2VlLly&?92XH$;d5|tjXveV7&)moktqE z^BnOw%SQr&PEdaXot-^k2fG>V$fE(bp+@kAmc>9UtwEkf4aC`pa|KE+UE?!%~2bl5KqM zWV|WcIeQ?E1DZX9m<@SB12=2XR+Dd2KCtDU8jLqCVhw+=^ZHI}U_;GcdwK&~eJ!8a z#oMTQB(klcUK8pd7U?`|2-+GQ7E2u^bPwt}E_uU<$CSA6*LvCDjIFWJ{`4nTPWA1y zPdg#oNj%rrj5=UPddB5_`svS3P{SK-EBg@M8TN@@ZP)%&K)x0l@rnjeuyz1~nQY6m z23pNfp2B~u`u4|QWT*sCA@3(Mu7hE1*K0pp?*ZAvYD;GTk)Bm@+*=3M5|`^>Bv2r@ zA7FuSoNX}{Nb%U(0h21Wz}Uk^NBTYzJQ64uADn`Fr$*4Ck%g%PRKlV%PK+5rTIA_T z|GN~hMtU*>-OdSbAk!$Vd430E&xk<5S|Ggj*9L!L#z+s=F)!AEzNHR=XqXEKnmyek zSO>po_h=w(8mKgTCRr3|389Y$c+)^xu-xS`L&(mCkY8&xpcf73yn*!Q?7?y{^z+n8 zuXqB3B*%Tf4MBXp-U(QH+;=|n#Cg|YrUFx9Z0ANNW$K&19LM>mENED8N+R9>xl25= z84!PK4ct%aA_-7Q9pE3D!M@Ru#4k5y2-a~a>B`ndylud(=tw6&W6*+s-)Dn;*$?#n z{KC;$Ko=EB{;`J}p|}8^F2`YXZnh5U%-O@raR`QTEw+YEoINn7e?IUA z7FuwZJi~bd^MX}pL5OGfe2Ar&_u(HB2;P4nlyC2Vk`Ai~P?6_e)*xL=Ft!s38+e#@ zBSG@kiS@iihELW(AhdN<9Pvnyu63d~bE4sQo!?}#o{T>AI__FPmKMi(OEODqK+o76 ze0z8nK$ZgX7&mN=t;lW((2Csw={-NVRix)FFr+085G%6nJjXU28G_(CqccEB@VkF3 z`B&1k`SgLU_2>S24=bh|<^0XW0sK$ikvvB~A2P;w!p4d)PP*L3+?U8pMB_hSAvWM#2cX7TUgk=QxCX>YJuvQD1N3u0>g2 ze{dXPe(F{Evj0=xbuIS$`n!YO-}Ik>k&s8U#x8g6fHr=UQ7N}rG_=El7~gVpnz1*`LafzFU&l6za1F>*^#;p$g-q7Z5be8NZa~uK`H;)F>reTE3`O&Ux zAqP3X(s79PsgIh5vA*8QyA}m~{l#&J@u`>iP5XK~=vs{X`kUhr>r<~bRO#0FQCMMr=5S3N25X0;Q775bE&`DUVnA)@A(XS*%G_GEvF>q)PW@% zEu~E6)RD5B3ud5gPpc6WNyzs-eKh(950WJ+W845)HyXJ$EJAt|C$3)ZX(eIZ1{_$~ zv;VK~tfAWaGjFZu^>27x|F7mDb>6?5=cgXlk8d}}*VJO^5C#0`MuUHm@N&Ud7@7Zm z{#XE!_!@KZT7?kVhgOe_ia)?X$O2oYx%s{ zKifpyDbvF5Jn35)RIz_s4G?bNq1hc4iS#j~U5)cM!k|<1d?TFbMpfaD-MmWMEZduO zL<;|m1uB;U&i{(Xu(ZuTehgtFRMHvfY^0YGzleC5(U(zg(;Be;1@1{frLTdL!aoz$ z=#~PPo*`vGj^#5BUGlFX{l$Mp1F26SXo?25Qnw>}2D;SO(yxE`SL2rcUh?sC_!cjv zpU1ZnFMdkBX-MQqORqzLl1Gd!N6eEKw$9`-{ z$RCZSHj-Bxlosp`nzaXPMITxVR(TrOo<*O{xZ>D%kgSX@X@; zV8|6;-@LW{ujPN-$z4vh&R$}-*R!kc?{TvZp5L_Ri32<_cK;HGzj+XW)ca1klYDM8 z=%B52*hf{BP_5lMcOt-NcV~~#KjG*;(*FU6|6PC5PWaLuM?0Y_UJ_*wwvIVp>E*Kt z0qIh|61Qoz`u-1C^a%b5%bG@O?s*P;^^q5eoB!c_B(HxtcMwv?``@(x$Y1hJ_e0iz zC|Lf@Cwp*M*A!BxIJv9-etyz4td{=BAAS6<$*@2BH?kt-qr-N7mVUUeB?c=Rh_z(h z`;df<$i6S@(trHd@{{zux$W)C3cIS&5l%@`_%qqJ)T8?L_x#ePVKBA3Y3l-sB)$Ua z!)eW(20ec-3M6gzJO9+nv3FD9_H9ckFEJ&@kKZc&Y(I)fHx*u9KN?d159*~K14u_p z;dQbsnno*c&K`Na-Ylb{*VA=fBw_w*d|}=REd2+fNPb*74h0~Iyk`A%)L;9KL(%vB zpuhgy4?pm!ypBHb{dN3-@2}S%`2Kq1I5H1_H2Ht*HSXShjL|iZMlYL25CGyzxSiv; zNcyimTqofGec(jX$UH&R!yS4!(0jN|-o1g5Z@78WZ~$NWa0bY0^tmQY!^!u3DzCrq zyFGj$X<@J){&OAm@Sp3XhyPrsavXBUcsY)`pBFi{MH?2&)W-ohCsGC_I$0}8$u-faMCveF1{2SKa8Z&Z%B=gr!O*sHU1k+^bf7*fEnzh=li2=mjRg zk)aNZ61D=>U^1D!tif=>X250s*p)4{QhsmQNvydHgv@LDfGRLu*fDegLj`{dD3tAX z3{|q62vh+U^1k&c!&7$sKk?iaH1_9nRr%aN!W#tk{F6^j!DYFS-*fH-YGiw#qn8}#L9X*P=5lA#i=-=+pNe8tj)#L* z%lSC+`LUe8&S4{v){h6!6^wsWw#(%Fc753eN7Bcb%lZ;XBcC71e%F9bKwMdFzVXn> zeo~P0Db&kn=zE z^RFddR0+55@sh%d!}b3U{`nWEaR($SFP{qWhIBSnrYlyox`*vj$$&+$nwSC~uqp5E_XK7aA^haP)@&Js@tIes}_ z`5dE!RLEcPk%tex-o1Z5lkLpDT=wIa_%fN~>q_A#Zn*h>8T=jW1t_S^G; zo=EuaTYtPh`UBct&e&%@@%Q}xtMO5g^GGCKuChL1D3`zey#B_2_B}2HI{9wLV9CLj z^@m<=K$c(fyU|pjLgHs9^@Pdf^Pk`2Z2sg&N1zpu1VZ@m-$8$VNv_r42zW{OM11%& z?fZ8npS)#x5Agh@U+^44feeYKKVMf3RQG2mxc}mSp!w&B9O&&v2{;0a!v!#x{C@*1 z(?0N!<8_3TTO#{=OL_1Zj6h4}cfb=l-lc#!XbJ*;81B!daETlYk?W%d0Q3Lz_pStw z`}6qduE+oFfB%1f{x7*B!T)jErN{sNeRa;O?mz5Zc-;Kocvt28zPo05`rrE6=FwHh z|G3C*s?Qy9%z5_s4^>hATXRqU4d}FThpqrZ)m$PNL+%7tr zSLXudy5XSdnjd!4VON~M+ikR{MoOul$7JHfU4U&n$3;Y(#rT}oUTw4ccFU~hv3LfT)z&kV$#7~} z)#iPN1qWLU8Co1)hjrQ%;v~%F)!n4WS-jt+XdJTR8m;fhb~B@yZO3DS+)&|eH&%3B zOun3A_cc_;5%^iNxK{VHSe7{p6my+;^<~7?c<+DEki2F$b(TykW>=ScfSzBk(K%Ut zy*KH8e0#M?mnWlUkB0i*HfJqeKh0o$>PAWz7(}TyMS57x7skZT7j;Z7_;ikz8#+i^ zJRNJNgLj;!t^JygeSbQ!4x{Du1vKDfMdgV$+iEumOxvGTt+JTCmVPi@)cd!Hu8&!= zNnn3(aYFzkZ8>BSXl1k4BQ%{fJo8th(ec%sqQTKRyD85XkUFz6PJy}JX)&#~*bisc zTuX=J)Evz0TRD4PLgAEXIvI>-7kCr%aI22?1n_7uSse>h+>9>D9Cv;l?#RiqPBW~I zTBa9=Lo&S`(_L~%@1FN{pAUtkIJbz5i0LE?Qa^P&V z*EC$OlE8HGYR}6-H_o^9&GD_ObNT+Mh8Vu+cAcZkaxyN>m7+hUo*@#}sndgIj8lI~ zP&pdt!)6t%VcSht$BImIp*0y#>5dhRxaCR{?1u*yyl->d9VQBT4$tG=7upx4wsec> z`hAR*nOya8w)k#I8uB`izP?C#%T2xBuB_b!RU~SR3w-%vX|qXmRF}f5V@9eeXPBNB zohz;~7M+2s9t128=eLsA6Z^RfJ8FNm?RB?Mt75r}$!13Z7XhaH(2~5lMy{tLj}fcJ zo8Y;8-~~GF9$uM1RV?{2Oo+nIH( zv*d>S;$D0u*>kS1t66d&Vj}E=o_yV&uwCu^mS!Xzrz_HE_IdC82|*T}9*0_(AP3YOjao;oucXHLG`TC?NCX)Y6fxo;1TU``h} zK+9BTNxss|wE8LcLaMZ{Qm1_gh|-hTNzlipjlomqnj z_H=MiYhjThVA^E}t4w#_Vv~)Nc_-}2<}h41b7%j)O?MmgUJ+1Xx;?6>%vAlhJ1vpX zL6BS)H^(CF#-Cl)g5IpT-g>N_wy_n5qpFGy+b@+hp&8QVWweby8sqIU&+gtCJx+@z zpX!IfFn@fdFTQ`w_HC(Tm*hPTP)#0#_HoPi$lg`uC`>bQ@q||betzL*>P^?YakeGQ z?b~971yR&t#Lr=Rb8%~%n<%W*Wk7uzSQpoDe7DsnZn2MvpE|Lv&dX_eC=d78rZ=9e zu0MHCH}p1IW>+*d#q)|S@bA(1xc$oTIe#A3xicZ>%j9NFtcN5)ukIMj#q4E-7_AXo}OcR=ks8P65s5`<(ME+JmTfmTrZW= zj3&`?Cmdp>$o8x0IVEqH^Ovh2cN0&pwbflNLD=T2%pTFf_+X}k>ZRn@DyYY^(X^(Q zM{wR7(H?(iNiY$FlC1UOW~&KK)k-)g$KCgu(v-XxD`wH&$AI;Ps1*&e>*`_m>VenDdB$-^|^jx)Ek zI(rE%J71^eHIAnFGa_!LyyNFBitZzHRbw}c+jxIG3|xHS`?=kXre-#pnQVV5>c^{9 zmSc6TarC?@40i{-n9>ACQNHuNE9jSlj>lG0%@YFK&a#E=v&X2*5_cGgXzRCkPt<#F zetnHQ9kMpBMvGN!3Wkf}VegU2({m?J4^N(+J$ZV@G@pSRf%+)Jf zozGW$ch=asEWXI(Fi^wiG=wibY%+d}y+wcV&U`jsF3MMJk<05Dpo5Z~{V2UC32V#jbGJw8nKbKLx1Tq<1e#H#t6M ztTg@Tw&L+#e*7wi%n1v11npIYd-09`RxGRM+08R<85^gIDlgx8rd!QOg^myW^MQBs$^C1vE-!kC?fcj2{MC5& z)sBlGI&bapwN|%QmAoHn6X~HgJFilIip-?Ast!H0TDe&6viU}g3)Rx?=>uh5Q_i(~ zupPod$@Mojj$RNAgND>MpWje5jv#+|cH`%$EaMe6k(M+YPJCa#&6X==n2nY6bh}Jk z1)mx;Q;);-d9>cRtL|>hlQb!#w}IDRly+L8->OQT)a1S>u8;U&1nQh#%km!7mpW`q%NLu3)@8}&d2_7wsdG^SzEEe6?0D_a2Cbc474O2z zc}L3x(p+dK#mztExd{;(_YbDPJCsV+$tiS&Zskey{Cr zrdeL{{}T2dI}a*hyX6HjAg6yp56C%ZX5?UUhO2LVd;hV|ky@T$n{pMDE%BOF&q}Jz zGHVV_XY^>4buvW+W$w1E5uPmad25C+$<_nKFEWWIYRj=U^xjO5vH{0PryfND;wI^b zrIyNv1pLM?1u(d6lY7fY1|rmz&v&`Vi{*NQ?+?XoKiGabF2rxw~=Yu+Ge^I;g^Fmmx5 z>QcU0?3j3GW7eDsPVRq9)JuN$uWv-rI)MW~yOUm*HO9TVHjE!WNpDbl-B{LAvnnKZ zA$cXPrllPZN$o+f2L_BDM4cKs0bL!cKCd<)y|1!Q0ztihsGz;hz}x)j^vWM2z~V6r z;DAgAyYzuI>$}boJcc{Iqbtsaq(6`|S$PY}F=$`dz=zju z9|LaWd2QD)j>th1zzz05VLlOvl+vB2m>({0rM zEd43Pp$!N^NL8WEH}myj_#r0d(_fQ3!*MaAu&xsNd^Kt94460WgUc9uQw!Cmr1Ef3Hj}8A49}?3i*;yYoA917Lk%1Wk@=9BX$b@?*sAPgagnC`UgD>k> zUWstb1BMy9q{D70$F^F=1RROjM?h|VKf#>*bEc-F z+3O;dgq~zjPp_NOJbHg2K19A}m8FRc=sg%UAFnWblivtu))PU`Tp@060Nd5x_H&OuMJkIl z7UEG9d01X1w|{%uQ9cgTQ^}>;0pj-zTKNy6bOu9^L0^9Q5FDVpj0Aa;B{Ip{ZMSLi z*DAy>vr=JZV0PO>D_U)?`v)WT<10e!gg1YemX(f`(@<6>CYQ<@r1Qn&rIn$lhsEHq z?^P&gd8P?u0u6(@TG4XxW%t6)0~XNglFMT#d9z&RHOai=**?uFmD<8*seOh&rNG!4 zn1WOJ8scvB81Q|TqQMST!2E)K{iXDM#l3+7tpV9MLM1>+@?wDa&&-Gx2tAVQn+&XoZG7siE%4%u6E>ng~wZpKbvBm21^oqwNu$`^94vdP!Q~V;+u@VU5+{4HPRR6NE}3 znXHcq?9(jy&AusUJO}Sl0aP|R6zYElKg@9^0r9+XYUX7_0|U!rB|=&0LRx{ey)x7q zYb3f+AuiJ6aC&Q#UzKJ9ZbUdXq6Dl{>vxozDCb@*rJk{Yk&k7Nan(Wv(xTSJfIOE2 zs^9>p{p7Xd?qv9ls}6B&ky&U$jRpDbv2~+hI9puc>>csV?=vbrutNnl6ABs1w z_l0Rj3%1Hh_PvS<+mNTKj$4ga7#oky)B$ZL%qa8OV>UyPm*LKSnNMXJRZ?|j@4?Z*^bjKg*Bb-Z9VnRjKa2th zo`NwaTXBQQO+Tb`e3gOyX}y2<(gqmgDL8@KpXLLkuG<2za^X`L17mg(Mqvk z)d?2D6V;pn88JYh;YUfE;Rrr_&UaqtO2?nkSN0yl%Ck7SCT$ccSuSF>HD;z=sCJ}5 zYx#8r;cw@%<{06e?0DH>RXo75fU!UK^_r4_Ipdk`jCOO9CB<;|mMVWF-$qe8)YtyEQ6#NGD74AN7(9v$VAaMXACZvDklnGIC`g@8o@gi+?eX~}S* zH>GBu0<}e8{q}8v8}iu9xah9BaiThZU^;L=Q%JGr z1P><{zn9=n=?R(KwpJRy8gyKh2Hic@cEWpO%sg4@b%(I~XaC%xLqW3_Ypf>dxx#;| zaf55^Xrn(qhZLuUx_NCChR9>Z$%f@6?o|0zs~V5Q(6~t1yRv_+|NeN14!ndP2LJ`t zF51-Y0%k4(NSM9$atXqQ%H$V@sT5iLhiZtkgXJpJoBI&kLb*uhI?()h2lm~Q7Kz#( zJx4nkBlH;9puS}K9fakLr2D$T0B`oUk1v{gdzrjm%4)u>hy3)NI$^W1h_cKVlEcX? zCySE(>xT(4IRSr>r`TSwh5d5fO$~W?rg(|3gRwBPmQQusW6CGjQOKRFbX2_*wsZ8g zfya#VZs9I8ABUN+l(8vaWShJD$#ozyr(Nwq((t)3EZ007otao?gA*7&)(aYi%!^1H zJbL+V7RwoOL3DK$!-JA)lPsYsuPm{64mFr`$@%saSD=47qi-r>XXWm`wGs74NE@0a zG`ZWXcPqL8PRsj~57ROti1vw_zYU-jM?Tl-kGi{^fJsvMbrbe$c}%}{=Ie*i%Q}2P`JuLA7`n{5JJoT})*yObuh6<6YXA`#FHEl0;@}SLLh;)7zBGOQZ%TB zeTR``yEMNR>*plQ6~&UQFWN)1Xj-Z^+f8J^@yLIex!x9{?$oB!&Vvp&IN+nOeuQ86 zJNjJh2B8qkeFvOw3ZO|GWQ^>dqd4gqnza7DD8XYrs>H_}IPBkokx zw`&nNa|}xUQq};!WYV`aB}L$YD;Q~&urH{Nt6%cAEM2Y##+5sG&iJO|gy}4)+Zwpy z3Cn*U{^Y^_m{*&NhHEH!p|`sJrH!J3-xfGcU!N zAo2aaA%C#|Za3);hnrSs|He7Ca7lD#F{dL?mFi1#0JbF44Hs7lhp7SLBfS% zRTO6m^TNVFzs|(zceO&c)P)w>ZJ@jpLPLLx5Hnu1^KA0HwOD*()Q$E&Uty)q9?8RO z_&R89cajfh_dN3HaEimsRbaPEFbihkX2Tb`AMf0G`l-vr8s2fPxa8TUUka>cIuDpI zbRKwERzFQe@)Vy)_MY*40cPrU4cui&#S3%_x3mEGD)wm(Mw^%JWerD=uPXOA%CCQ) zFz7AT7TJutOd@Cb*IffUxdsVjtDMDz5ps`huq2?!LkW2)79N&_50SKe%XFUzbW?5F zn&&sRvUAmi-R_5}mxc7pv}C@0$H*>)6#EEAW8$k3{j+vE^G%F=%YJ5yOd^3oJvpNY zDrX3JodJViv}eW*fRy1bh`{Kj>sNn)&hMEZP-|}Ef|rz89GX<*as$7tl#&G?Mm*oY=$*CJpHqPK^<@prDOcv($`=qmbz?T>#^X#?DS zbWnE=$A%i~1OHX^D*f>;KcI^ETCg;}=FngocSeISO%T^$55AqIR-BoVN)+YHynn;1 z?)}g^GSiSRlp=&?=v@l>`V{|;{OQX)jvjfzsVGE-FMuA(QO=VfpAWGy!V#RM0yB>{mXPkam@;OIZUi@K#ne z?1^o|G)88UesoBT58oU0jH=$|b05X-O6($r2X0DH+BM>f54Cwfhx7dl*|BhYL3xxC z!9xYX8+>9X(!|vm!?valp#uKk1IxZXBP`EuZ{e-@n$9-`anZ=z#rrbL3RUFeQmFtTBLS-h9z9 z0pSh^yTjH!0elAz^zl7E^d$|&Y%H&<0mbjAQ6vUFdnWi4M_g+HQJz`60@w1@msgQrvW%u-ckbKV9Dul0f{b|lE$3@Mzb)Cy+b3!@>4Zi2n!}sz1kt1&AaZf%x@?NrN&Yewjw9G&&~RtoVR?Nli`(jKN@NzI_{Un)Uuz z3u{E}H6{@65k_b=68~e48mu9zA4_^vSNk*7eq+S?p@o_&_FsP(6b{#Mj`>5455*t4C>a}Oj+3ZhkyG7yB4d;=!@cH6x&m8G{R19;! ziTNN?m*{RNm^04iL4`eC{`a5hFUDr6JK)ni!s>Xs2)i~A$G ze2Q91^vH-KOMdbA!`}0*dVMdqV#3!G;a+60x4+D<$3K6qys_{SZx9TI4oCa}xC2g8 zGB)wISRun&pBi_x4;qs0y{+Z3Kg44}rz>s1$#h)E_u&ai`^^{fz#H*)vi9q9vNVrj2oBIrrxou4F~FKl{Zs{PtL zdtLR{uB(6KFOWG^cSX!CZ*~%Mi6#?~CFu0xOo92+>f2^&uXaa1?RV!>tX}G)9G1HH z&)>Nb9*^jYLR1yaLra=!SQ!xP@-*qE>Mo31y z)rlj5IM(?^%Ho!z%(%)gC|wIUqz*-6_z2$S#IAo6vl@%1sOF+Z6zT1TcR&EksA>N| zXl@i8OWk~Sb9-dv$Toa7lJG{GYR*V79^c9`Kg$j5uaL3KVsY#QV7v|2<0JzX>5bc&R@nNbv7E{y(0GZCoE-e$l@Q6^3<_f0QQwBr`bwC{+?P zh5kYYB>G?5$iFN2mza^^v5?9?uqf#NL6-hvp)n)rNU}oyL#e`|&42S=|7keE5kJx9zxn~F#ON7BR}Nfrf1{Tu&Pt}v_~{P*YmqhKlWQ9M$I zVWfY&FVf$k3A+?cEu-E!YX0R*l;eLGRvCt!tz!Lb^iRR^_w@KDkD>o-s+IpJSt@^X zDI+FKBPBr;xnmMQ(7St_#lNB~YC7<)sE50wB--@$kT55*`h!qh!lI?y4=C z+Fv2b=9HTQnz1mK%eg*)hJ%>1a2kIUi6RdNkN}v^3JQ&dx8gLf*PRK;&zxf|Nl>I1 zxdoz;Fmli#BxiL<%JD+zQHBNt2@`)o9F@l_r$uGQODB*CXiQj(K0xV9;yvx4D}YPQ zmh%BjbEi4GGK_bHqX_^&S(&`@FIovU71Br&Z0IlZV0As1FOJyID2HQ7D&fa|I@{sg zIHSw^AztsEfhH{B;>fI^1lC9rZfN?IRLes~yDr7Aq-dk#K39~tQ=47vQR#nmWEplx z(Bc3slyrf7cR_?RC6qC;R$!VTL@9B)1yIuG!!AizG|X3hX{PosH`_IuB`FHA02T&0F*yTMcBgcwouuMZ5;W- z&&V)4Cn6H@hr*U3{C*ZEhnIiO%#ERG>(BG2h{e=EG};a!@n;oBA_uwF?6Apj9xBz27oWH;ZVDS#2o)j@_7%nPf7zM)53gVC)anU3Ts0`sK zObPZTK}tk-Z<9QYr=&Ld_*M#DA+R{NFm~~BSWk*cF;SD}O?)kjb3AkdK!!GdQ9B2q z`1uhmD|7Q|D+DYD$kE>5BOm8-MU1& z`CD;SE=MuiDWxmF)esmjKd7ByNI~+E!3KOf{i-J_rmCvKwO~qcJH!wxz^!-zU?Y{fm%@6}(&ZT!Hgr>M(uuKtuxNR{z9Nc&?5C>EY z{-!^Rp=pp|xJtvcN-eK#l(M+V8lT19FrkS1JfV?A6|1eq^!U*daEp& zr~McqS>&w#7*jDca0^n2N<7?6C~@r1STqf_3r9|=L9QepT@QlB4dJ5Z9FXTR#|jG1 zFITQG)9(lryh#;*t>J49($t;>9lSMfw(JNEXq_LKO2}*?QoZYMKocdU^NK&U$^t*- z)wwQ`o7yajgEfg{7xPGNw(VHubgPTOgxAeRuAMoQLpL=7ZQ)^_?|14mpTo)TOV@KNS}#|F_#!@%UI?A7sD+{$jf z(kZ45+Q$8OgMZdp%d7%h=o7UB5NazQUJs~ z{tgdpTV0`1PEh?&)Tl}#3e@CzyoMJ`Y&{4u5+WQBcAc?MVtkOLUE_KMJ4|Q{(dn3( zV$8L75FU0vkCrz#67lm;q@9l#8)|UH>=${;st;j*S5xM^D8;fFiES;Tl_|QpekZs@ z4_A_B1Xl7a$51VNi;^g=o$e`mcOZL)hCrj#a8VwCh-k zcC*2OG=MQwh668+ZYN;gN-aF+nRx2>TUGTb%R4GzRaScIenqJxf)?ytrTwz!wH2~! zF{xF5LzBC|z_+oWB0`%2Dp z!7avA9i9=CO8L#waxW`VUzX}nLrZH!)2Sy0qRSi+%ZN0Vl#^Xz?=Dd8o)=Qtcjoz# ze-aYqmxE@VQvEj(aOOAk@PaQ z-H>`OEf}-pB5&-PvA#N!J!0iWiA|yx{i~-;aS2Ei5v^MEGf>_KF2FVdIN70pOTV{^ z4AAFm{>}hupEe=HgLO+8f2~_CX5I8l-8@nz23xL>!ai`E;53LEb7I@LM_$T-W91XR z#4i+7B*}b@#~AiL;?<7ra zq%{^E0rC28vg@}MJb`~uMcy-Ia7(1mk;ViiWBG7m+bjMw)(3W+BWsj|e$A^EP0f&g zE4cV)vJnPplUd~8EU}|RCvGsmjP$bV8V1`_ZrEB+zA;!w%kvF1{n3zGP>*zdj>xQ>i>2HSqVmNJIyL{T z4sE2)mfmC9Nb<|suE*DZr2ywDb!7-Sz-D_6F^M#$q;5u>^WznGMtvMYhRFM)P3sdk z_^!{3zkiWcfmoW0Z8Bx3oj0bN)RP}PKUc2h5A0n9gUuU(QV1aFEidV{iht9a^y*O* zQ-q%T0T>eb0d#U!(y`=>f6>4r%JhSRkWDnVBLxa-bpZKlMP=ZBfc{tjxo_-EGpV`} zPm;8^CYtq+a}3McIr|1c?=q>77K|4$yPlndwJpr`@!X+6a4~~|I<22f0@{Kf4ZF9(|j%5`&g*y|0tBK=IsMPPK?q=+h zD*|`85zyFOrXfRrzaro1?(QUKznN4Lh~dF?AL~E>+y_6||K`DGyVODA+hBNh$KEN5 z;QH#IcSU1KE0ev>_)?+ch^;QPq}$L$2euwsczn`eK}>qvM!1H%+m6u>0FZK`$(83dk4Gy021qz6{Ur@zB2IM$qGDZ3YCLEWXY+sp z*RE}$6#&^Hi{^JFJFfh!D-QC>wUOjLIo zKF&lGn@g2@%Z<4=8*g<7ba1H1uPLxx49j+{Gl?eCR#m^K|D8=;*<%@NFCqt?VJJJl$9Bv#&1+_OT^5eW*?&`YPdrciQd$%N z26tS4vp&Q)x+e^TuiAc(00wxJ4rdF-2SC{A7|mkme|;zP0jhIo8dF%7b2&m1?)u0+ zrmQK@zpSWNAkbnE|4}(H$c5VQ+Y}o0nV^wjC+%k_jGNQBbi~T8_RU8n-MOabL*F`% z5GroGmZlvAJgffi>0ipk&{tbUM4g8MnV~9wI&6>+siUB_;r#jR&g;}$qBsNX#=Ze* zg~v9aH9Rl+CObrat4c4BZ4uoxP?&-jIQxK*W5k86aFi3@NkHurkwtCLxtK0(Mx|)D zfmoj>zpdg|xRd~Rz9ch4{SjSVGd6l|`RywKf>wgN7~anj`HWmtz{~O&RwlIC{HQN~ z%5#3c1}?0qbQ z!lb>M+3n6&mOEe$GqUgtK0Zh4cCT802+taZNxBUFQ1;%U0L$8BovMeTyb^~ff)zR^ zBq~o0(GU}nEdHLmDkw3YatK1eN*7j@yZl{kz{eR*!>(J{#mHTZxI|Oq_=`WH5a0&r zZCYdV*=In1j7S2-CnP;2hA_q0_P2tsUetWuFVP8@=aSvk%?GDl%hhML*XA0Pb?-@Q9ovqdhy`WB_$T?aQeD+fKrH+X85OR*$+(e(ibcCJ}J zP`9q_VP7CUWAM4fZCnMPrs3>=)Pp-0nJrG{mRW-aIk}B+i$JV+BB7Z8MGL%P&6w09 zO#%rN>*N(rKBJq=pZ`Z$ZY+cuwpm@H?0dVE^Nk2W&6FP6sE`cRFkB#?9&6ZZ$=`*U z#}L)o{ABP7o1}XB3#`tX#%_g1-PQMj5y|V>aY-dgzMg=8pNZvoWp-nc z;DjI>7ovz0(DCx0*{hnG`jM$>MAUNyqWdfyNU{#ldv~*}X`EAZ9p& z9h@r3k+CE(-LQ!N1jQGB-LDpk0FHcRyw{iuyAOTN1-3Oww~YFGR;3O|Pg_XSP|O)8 z)kLyyH%t$H)ejg2+Wa$MoFrM1%3)|qzSf-6JQ;$c27zlU#k*;s>2o)~(OHHCI2aAT z5jenU%^!yWBGG(jmo4JjWVEW8qub!=Pp8dHj)@BZV_fUENphlp+wxD_1;GsH;g$6x z;BqzXH6^k>hx|-$&N$Rs_gwVod0*DiG$P)?=9Ua0lWjC^Bk_aKoHrPS=g3Yf9LEvisw_8v*}ga*h>pvUQq=X*= zR?JcGp<}PCR0z-=wrpugrIVv9Ov$j*=A50VMKdYfuh1rcX0JFhFcjuRdA<1WNNXLj zG2HzBjkLxt`e9{(|0hc2e@0r%ea0vA#LNOK8YPGx=Khw1XQ#@C=-bE~v{{P3=}eSg zmaJh~A0$B1806$PZ!q`R3B5-u-o4UVXy_6Trc)?~jBD#)M?A}GT#)WhSG%|fh?XXl z8f6fnEn)V5Z$a-A8_NI>{~Y)+AEdNGG5q1dnTUFO7b4mGGrk02e~H9IUbCz6Tj zhW_9jV91L;jt1Y{8qe&Bm}5C5IFI*fkOnfh$<)t({d#FQn=0!h5+j2aL;sU4@Na)? z^B1?PunGH{pJtq6mJ!;I;Y7usg|-`I76j>L=q!bU~@(!3BB^0QF(8%2A6$5Qwy{&tu91=W}6)Hdgioi^de@!%`8lwc! zh(j$WLIl6en&RIxX5gv^FIL*dkWCaD?1?KW?TYi2s#v@EpxYVsEmr_AH?Bn1xb#VN z_$prOkT1@1Nj#Ccr<=Q-a>flMe+)BpBVD9_>-K#UW@6B?ch8@BB(P6TH@-!&Q~1ct zg9xQ_ut<>*l_~TvS402R_jJ>UJ?1`zk0fKBc`B}b{yhCqevZSc_zw#G)LPHmD$;u2 zt5RF+mdOjp!A>iZ7e+$(T3^|Wu39NcSVdHGOSA(Pc`az)1QxAl8uH|#|VUeNxbtnsw-F0 zEP~xaCS^!IP+yH#9<+Z$`-54Q_w~_3BCV9n4Y^Z1!7Ean>!-#OLnw7eFe2puYa!+EF7lfyqO~jvA>^_$*NY2(qHSQE zh9A~I@=A!41;r91$nv`dJwM#bBwFx4;TPEWTS#xU*~djs=Afk_Flp;Xw1RIfw2hC& z!lsh`BNHaDHMoR-y$(u*bDCr0a{C!fJ`1*GWh9n`Vm%iuoBpTJI7Nu7Lss#AULBxb zd=gPwK?W;zy7IgFNE?!;m5>#GS%rC0s(1(d-yGZQ8|pKzB>O5-_GOM(mIzYZo*niD zoRkY*6xQtpX83M*8kQy5m+=SgbcOt?muyl?s!`o@t%T1&gRB0kpd3;+L5EP4se{zc zVX8uEVBY6Yq;Ul8@O(|U}6q!1uW(}c)r5116 zKh}VTOu`l~`tHxb)>`s=gFtLzllx}c6J z#A8Lk_j(*!>-1i$1rNY~kkB^_N9`_2%_DqwkqoZpqLA=(3u=F7<4EF%DIlHr)ontK zvivSmUq!O|l3yE`sHdY6gK)qa z`qs*Lo$`IW3t3bUtbCkX*`L@SvGIpyl0PNENZ1j`{8Vbm=nKGqZzZ_vrx>}d;)f?@ z8hq`fAXEI3*RAn`zd)F~n%rRDNy4KGg7Prh5=u>lz`43a(pF)n!F~gD-807+G0%6^ zd1#f>`&$tIx|;V$3O(<1s(!265$z|$6G8U4+M|ZZcO-l=yeF;|5gW}gk0YMGy0ONv zZ}z{XM5Q5xE;bc^-X5`yeCM3w_c^rc!UW9BI~+|Yak|$)ys8Mn~OX@2_W9a7d@!tr_2p}j; zBeFEkM7%|)!!915W+X~KjTAP&e5g&@=p!h4Ph?|jbJjS{T6Pg9HY&F$jAm#|WI%DUJW|iH%$qK^=mtC| zL2H(V$!`(eT3n2Ge!fys$JO}0(n`g%H9F$`98&rTI&WCJq&YjY`gz&)PWHWVr2rpIqC=y7_kI)PY*kAVeRwNa@O> zI>Pq+r9W8c>&co-em@-D?QxmC=3=1vPlI7g79XBRh=wKW%2nqY%i51h}@} zvkN|LF==&iT^?HRC6>~((dl{tWXw+x+zTT6=_ z8A0kC)KGTY#IPyhE+!H`_^Rofle@BK8QS_)efvv6q4+ZOZsBI2mT2^j4yPfB+NIWt zW9mz~3Ubt4`c!#_w@nzx#2hEtcV9*>awH3X0@{s!(s9-xMnl)225=%*CQ+Zk%+smb zae!l`-Me}JdHr`Ysn?iXc&G7#*>vn{iox?t#)u-t9>0IDUq`mVBxg=Wc1$F{WpyQH zd`b95bg6kPNHI`Q%rG#Hj2^8t_To+&_JP z7Ngfwu%;WO8J;W+ouhP2fA~o|w?R`t{{VW;NyV-@p%To5+t2fM@$kPPaa4h_Y`s31mC#Z2tS*v{_zJ`nvJJE_rMe6!$ zR;0(zB)+?&ue3-fLDN$M>kuw1m)~-bA!gMyv?>nU)K-1m<{qb!XHVX(}uHQd) zb=U|y#HFMb(EDT`GjB~&-P*VrKe;e%qm>9!?wxP$>We~+yf&%rO8&ktxgm&5;{JhR z_m!7ec^lS&IzoG^`>-<@XC=jdu5(Z^g&z&_5Ow7bT^n)dMO?_Gu_lIVA7)ehJ}3lj zpV2^u%s@A7V!T-0gvOqNWlL<+N{59H0EX-A2u%I7tmSyN6MZ5 zkZpNbKzGh# z7xUAC!*7O>u76EbxkUF#IOs91Fbuo=)83K(CH4Nuyg&Ld{--^ACJ{WOSta~c%-Zu; zHhXs=(O=0=+Wt`t!-4;Qka(H=N9M8rka&N3qrdrMwBoPQ{v#oom-!1XHU1*)7~>H> z!7Dz&Gu~lzUZYV$u-acH7ykCIKnmlHTTlMN6a$7)j^GjPw&4Dk4F11mB=xq;n>?z{#4?9rj>pVVccCv0)q)0%fx(>ND`BDR!8|v zW3a;Kc(orRUXjp$Vv;G`s|1YW2>;46L6B&)gxLkAbp9lId;KTTW4!+%XZuHR_^<5t ze~HEaOE#RK|3gv;o1gPPBy_!A&c>OYdxf8^8u zN>Ba&lSmd5u!VIJ%XgbTP6O12e>VRVGJ7jgH2Un>6gJB^Jl8-h z&nf}y3mkKQD+W6@3Ue=$(1;e-3L4i1wb|Vg>#DxxQmrQ|Ci5v4#|l5bn@_E$41u8_ zsh#S!z4uk*6{6r`DUvlq%&*y)NU;kr1{#)GrU-Iw&k{bV=Nl-=*)mtS(Ve z@xO0&5(0#y;V9g632Hc~sHg$E`n&Inj`5B$*PLtZ-nFZG+zcQOA$iVug8X_XHfLp* z#djWm`gAWoHu-+J_bH>R0ENR1Kz0l~*g6929S`K>z62O&Bmg;I{{-wvKA?^>w^oo_ zI+7cvwS$}*{sBBpHB*2Segho7oNv(7iHjmrfPaH>%eTF>*G}5Q)g6DnO;oLJ6ha- zJBAKAj(uPt&D|0p-7Hl})n4w*vZp4PyXzQFQB5KY!Ry>iw6u)6f@xO^)4^7E9_09< zQaLxNoFQIVDwpp*WQ7BzE#TZzOIZa}I>ZA>?fT+jDBB2TIL3u+27TjyqGK=~0d*~a zM>R))xymYigSq|&SzmxU9h?t9aP$R#{ww@`4ZnfE-@YZ?XzUy0()j~0{oA&_LE68- zLZ|LaMlN!o8Zd@IM59{NO}dF+85@LJbqyN|XzOov0uglfDlnIO0V2v<+wmO-+En$2 zcH<*Q*CK^M&>c+HY0xK?${P$}bc_ZCA(rXjB;XvVV+>3Hxk3cntwj|9Cnj`%kcCI- z1SruVXngEI0!e|1_|U{} zkky|4nrHe~ufX6Y`|z@n_2Eq*+5h&b~9#ZkwehHL(Ci58VXyuZIiMZvMx9M z$kt|>NXuJPMJi5K57>8qjRsQJE(z5TJ;6b4R7#NnX=s!To4`b5C~hii_JB7A_LXrI z5Kr|rSi}_S__|VovIVqLb<^9`IyE)QRTD%Z>N^cE+0;-gffC%%83lk-sB3rT@`CD= z&o*ae0~m$^fEAUUg(!xYbmAk!BL78`%b zzjI3gP7f>RCqMcHaO>2<=Q=``pIG!ehvJ&GzA_D`eJ8LWpA-k6LJES zIjEOZ6-0#80^SmTLqMs4xRgOoQd_NzBlgqu3~=2vQbV*9cuHF(Gg;E)~}?M&C8H)S`guj55Kl6O^ z#eIDPFblwc>C_-UAj#T0?+)qd+do2uGwM2{mr8?KW5DEtr|;U9_#P;K@gC6#8pPWkPNole;{nP84MaU) z?gY_htx^S|X~Q)bIVRpUZBH~Z0AJ$@L1F0vA39#q%mHV@mq?{A`=C?`y_7{Bz{aB% z)!c^SW9RVZj7{*)1Ein$@<+D}?F0J#ZQK6>Ui}5Y&Obc;iBsQp{_V4)2!7=Y;80+< z=bk-(G+V>RzQ`l=dY5mI;;EDoeTXuZ5xTAUTI=PMOq!`p6$`5*PxDJaH@j0-^3kzj-Hs*C)bXe)069yI-K|=8&Gu;V*Xn$2WX~vHw|s?!*Gz0N$J{5c=I0)puTCUjNE z^Y!g#zjgTIhyN0%$|y3$+P;kRKk|CZtoW&$)DrU`aS$jlETfakikQSW6T!`b&KmHj z_rp|zK0TO${M2540!pB&loiM)QxkR=`lJ=I1r!@*pIQj;hh4FU$ryqGUO1J1k~tMg zaV0sBr!iz$Q&0AJHUOfQ%m97zxMzNbqSd0@23S|A325tCy=;P>c!)ML=Gwk0Vjm+A zFr|Q{X{AXo@5=~+essOe!Ce>i5%o4+Fvk@25g!1(;|=)aYve00`%^3W%4fgxfnU1+ zy{7tFKZ)GJMd?qz_v6>!_k-hqE){S0b%uuhu(_|L6sl-Ns=kY%D0lK^7oB7W#OdJI zZ0b$vMvIP)<}=^w6%+MJsR4FyCa}|h4fiMC4o=gFcx7n zklr<*`YY!tzt>j3VjC-}FF!E-;YV>I-|MQ6UwHEqm(B>((dkeA#Z%p+jRh!1+6~@7 zDc{Vh+k;615S^ zZ4Dk%_ej6i{jKVMQ|l@~R^ZKfBK~*%6nW`q&HhU_zhc{`?wJ10|NmMO7PZhi8iBeP z-ZuYPPrd7t8vT0?hf8PjPdbb@$G|z7Q9NZwn>y~g!Z)pU5a@Ak`8YL7a=?Hy+b4TJ zmU4r3*en&F_)3O-NI^p^hzOvxwiprlWhT}MSmePiL0o_af$b|s#e1Fb9dmy9 z!?$1fsz-i*)$0E$ufNt`-q+Vagu^H&T z5XWp!SHy6Wc5aMm+MOhLOdC5Wpk^LDXU%8LF@7&1W-T{#rYdPwnnQ6KQ4f@sae)rr-Tg{VzIIrN8z+ z{?x=X965`%8s+NXtW@6CCM>GJcE%c$b`Z3G4-GUJ@|0lJS!s-71lqmY18w8eKr5Rk z;90+wqB#b#j}Seh-Wl6WF%ef14NAxdT!5WRL`vI;vrz0Zt=h z*ezC?tEs%ekjkDhq`D3><4Q4xb5pZ;ivVlKc7?C>0xxziM;LUj_Nj|zW2HllsC%e? zZ~u4!?Cl?=uUgzkcWUQXe17{o>DQXk_`O&5r86IU9YhVi;rgq)CA39=;v7#OVv&6( zkR&Rnj$Nl%yJ$|PNeN`w0jybjrEb9pmU6>LpqEGL<#m) zY8%9RBw9%kBN=4!?N-n1c#te8;w(IWk9$fjQD=+abvBOPb;}q1seQkS5%}c2?{ReF|nWIAUR%=Y`88R1BY425kYz@v$ zKn#}_Ctm>!3tcjs?EXZ+(FHoAK~p0b83Sgt3b*D#^})^z$sJLm>lzKP6^_OiXN{Pw z1zx@hSgMcj=aJHPP~%FV$E*3KF88ted6WA6C77WQ_IMHY=yCS;-@o?!K74xn>NM8A z>o_6((jl=o^T%e5ulxEEKp&A7bL^yb zyT^TMK~Yy{7}^>d<*s?B=Wbi-|3uxx&(+KdsFJY zoN6Us#z4_Pe4pc*5rY#01yN+uf(A`zLDq~c%{4VjT@$1#WUa3cI5(?it5>T;=ep{? zWKnw+_Iw!6FdBqC zp@`TJWepu(H@_6_rg`TZrg)X%sr=DbivppquRpdrIA1wL@%lP{|M)QWyN>%+cmJc? zf*<6g!##a{?KS?X51T{r_gd0l>Lh>i$@iX{!{zNI?NSFRwh78P(W6u?n+uhs!VDY> z_R?PO9Ulc2STh{LVWRmkiftf=T6}MGCO=g`8cbV+s@B(y)c^xW$tLur1=xa>vW1L$ zpd@TP=`2$3PFtjZ?DaCTRJeCbfl$cUM&-2J_KMh-vbA1`aUUVXi=&EG2Bg>)wR4T) zKu;!T17M`FRK;R{oklvO4%B9sN&xI4;B=Pnl#mk>FfLnIyo=D_Nj6*~^7V3yCX1?pw{PB~NU!nyOWFLhA>p6R%5Cdzufj)2P7?LYn~WIe9NGmH!B z#9v>|LO*ToH&`MJ>E|2(XAhu^8VU}8DL@5|3LrrkfIa9GfSDNr#MO`jA5_LzX4VNH z1&tWkgBt;V1Oz>RTy@vuBj`Esm?Z;9R7ZeTP6R*9z37h!@ZPgMUUKsYV580e$UxTs zSG-IE6*Lib=FgPGd{ynR>Vekc?l7*#4}c5$#fN|SGX%u$O(4bs?Tr}BtoM6AHLxsj z6LK*G_$vhe3KA(q=qVI4Mu=A9>l88!HZ&Fw7fT_j? zV;$hNIHpsr>3~o7HbH=yYKkpRlBgg#KvePuwTvJwITN%*vIfj?D8@jR07P1OOmdO{ zD()lq4$!Ux&aH#7njohngU+?aRsYcX9m^1Z;#@-_F25>6~g!PHZ4Y8Y?@s%#r~0fKF6DakVrFxTV3A%fI3IuQM_cYol*^Z{}ADEa`?6yBKt%+{9xnK^Li)Z;)i(+)uUeJ|2~ zxr&`USc3igemJ+&oKc{AT*Yqv01-%!$1KyetOMdhl1<$KNE1EIk^nXx%v2(onVkd8 zMo=R*z-FQ~#0CbPz_bHJO>ck@iR2^@eQ3kdR$5+IJS!>S5dVmw`c2sASX*gR@!s)0O>%`8D~M&gEb7>0=rpw)L} zhhg?dcEOaxVc;loZk@r$SIYI+Rfs4q1I6y8+bAZy*mV?COAVC}s?Y#p{M?UlP z=+u)TfR(Td0lrRkm}NHr(<#JX^R#T6m@0#+ACW7`Ma}`g^sDRs!4@1F0pG5O1aknRXbmBoTV(vW|)!3RlQh?En~c zqLvhW^j8Zw=qNyb?57gaDg}wr{>hM6x(Y^j-t( z){*X}4{w6Z}wxO(hT!9SPn{bjYB~;lETY{Fm{z%m1KfWQt`nh*-{;KLFN& zxtcD8O$t8^X<|c=YR9*%QskB%?B4Qms$V)tW!;Y~4*V^5f@=>cj{TVN#P6y%?Z80? zl-vJTdKYo#=AU`HZ(aYQkK%tb508W3@gL^-X$NpCedSz#cwISi7gDGNZE5FVY9pW! zT>R(x+W^oh7XD>sBYCq%Q|=+o)`HLfF-{;3uq$MxrZ0By)JFJFpj8pGD`<4$hV)VJEqa4s1u<39=R+U^6 z1eB^4&+Gnweop`%{@@y9DsqdorZ?;WY;hzxH)2DPV~DuwC$@tLh?u@~u*srr5}EI~ zi5d}`U4uwPod&L++CUe6^&fRi#OC2UkMY%1r*#KnHBEe)*Z|Wh&fn*nh-nE2r+?sm z;IMbjQMq~=vYae|MWrnVRl-s8HI78+Z>IjEzny}A`ZErX!$$aaeCt3Y{I$p{&_C%6 zL|OUCb6`W3Ko$Q*?xYV+nfzrurSL_HZ3Cd)z()8MVRtY{nf)!#7Z-Z7`c2b;wx$(e z1gEvX6k~Hs4^8>H8icNLZE#TsoTG>%=)HINT*Kyty}-z&YcGt09%t*7IELs!T7J^zNiGvzhn&14OY620pM5N zz0-g=hM!~Hbqs%9C*t_)y7QqQ#4-JU(7%tP5B>W%{m{RUi>?#vckGB~r?&09*BD~{ zNie*`8lJxC+OFdwe|^*CZ#tm8={9J1iMfC1BtZv2^rqX}w%8;7q8o+*0H}%M6p3T) za~uoLq1 z^TJSFMx20Q$-+n-Tk12R?D)h!^ zKJ-h71npkrl;I!RrEO#QL(9c~SpLu+-eZ@4+sx?QclF@(Jy)?-10!n*p`d5zL*HR? zvvUOBeSgyuBKrQJ6=M$n(BgN$!!JIPNc27a=1VEYttwx9LB@yvizkGMzW>lxV%*^$ zTI)TIKeS!fvF@|(w1KWK2?os7L~TaItRy104wT0ZI89_!Ke=Nd0a2@e8br;5Vptzf z9DK*T8R`Mpq3->@{`GUrIR>=7b`%)&<^+IY%erBJu(bfvyT)d}&m(K`F0H-K8z5?N zzKt7@mcEFYl~&EgZ}Bo5^cK)b6jw3=h$QwGt=8mSP{2yl(gzc-Q)5g?S;t@Z0qkd@ zZjc-%k+u@C+uNRSif-?G| zX+Xi=bwZ5ZU;6*9AHL5aKm@cu?MNUr^grz?&C*i2-s417Vo3%-o;A>QVuR+Bn|^y-04w&a0e<8TH9naIfJ$MZ;SqL^EpQ{_+NYOdq3~jbD4OqE$CAUAD?=vcn$^` zfJdN9U{6Im2C+ET7}#*}JPdO18E3&@2@C;CAzl3b`*Aku3%6X^9A$IZa@Ek6cq2A^VF83!YPn*Sg4>Yv8d2|kvBkLlm( zAMf!ncm}ErEHO{ef}OArg9=y__N5Q6FZoTy|Lo1T^dm=uig9^w{iX2u=F|U@z6g5* zmY{#j+i*mGe?R*Dk;fD0snCzvmp;9pvlZh);LC2DkGzcc|3+Njz2)^D$M8OX>v<&l z4~4yl?|HoCaTNUd*XM?R>-VDvv3R~gzWj>v@hd78deMLRkLYWD|7LuEz4ct^iLi&a ze2Dn}p4VUauj9|S#P74e?*m{ViIf61_D#2%cdW3%9 zUJEFbkQ%#(T-9&aC#w_6vtJ=nGRGwExp+W=8OuD@IHTBVYu=C*742a^_M9^&meT~C< zxQ?ssZKW9Z>4qb>8f$H|wcOB&39%Bm$pY0L53|*(nB6zGYMtG(ELEZ*RV$3EX*wN& z*X|H5d!ogY$FZJz!QmPo9H;Hv*1%=!-%t7Vf2@xCeH=-<%iW^N#jrC~8ZLRmmr-d3 zd9k$1gm1z0RhH*`g^k%579`Tk{Jt81?a9yfl+Q<_h)T+UXXdVpmO2^oZk^9AZE(9- zZ843Kaj_{+=G9Lx=xHmx(fV|nR?X---pA=Bn@fF0?w5(t?GkCmsA-dZ*{f6@j}H^m ze;%#p)-2R+mnR?r!ya+X^m)4It9w767t5^_?v_KoK5g%=tW4wGeYr|dbS|~Y66V2G z>6VjH1}6Ye1m`56puFoCvK`%_A)cKaIb57>TxG+3u}K9}Ym*qaA)wy>Hc0bvt z@t#5{Fb?K*>hY%c`d8Xm^l=M-e@c#Jlj@SwG(mYE-6$JI=5c-ANb1QgYMw_^$p}aK zbQIndWM7p;Juh$0+_2}CdhS#2Sr?JKP4?&DBC3f7*c zNL1$?>|F0Y)^Bzw%R{QmgSyaGq=8L#zyX9r$z4~pw8~2m;R2=GQ z)IP(EnWejd~(|E?h&VN;I7i29y4XEmkJ>7sfA~DMGuJ)d>)|0WGewcz^c1bk@Mt-7vBd2CFkM)+9m#|UG>%h{oI_{LoCSFGI z-JXok>tMfAZFSTeg_bJiMLL4ng87|&(v+T=MJLa3`k>EC9TTPNf3+B2ReIKZD-ED*XX!zYqELfDois{;O)|s&BZQSU6S}Vdqkb-iw);EpW~;0?-p`6S_bwp$qjjXoSz6YUmBadfqQi*Zpv?FPqVJwb>t3K)0bWBlxkdc8j_qZZJEwSGAgT z&FePDDuZ zt6=4y_nX^QJJYrsHuw=E^L3_^V&x`9$aaRarN<~vPLIv_8MvKS+b3w2C#^fd=5Xvw zJ`DH$b~0~^ZM`XHlSvDbrcHwC{><0ve56Dq!)x6f$jSC3!!Ktwj_S^buj9msmzzty zl6*DtfA!nCnHTBgK0U$EPmZhkC3^(&JY&oJcAJE$a$k>jK|N~=PYooymi7ir=NrFX z?`mglmkXA5vps#O(C=(NL)kdpn&(A5x25|D!CTaw#%zC<>uP1QAc${ca?PCVyy$O_ z7pw!me9fCOpTJV8Djn9EQZ#N}-d@frDkh$Xf1=Db9s}3incbgObkfb{i%Xd_ZHup4 zwyJWK&F5Tx8FWeRSJg0=^imepHt;$r+b1A_ClGWP`2bp4gi_+u`M9kK)Z3VvoVo_I zFZe}cuC|CC@6fXemP?EH0~k^2pNi;2g262C!?S#_SYHg*@WGj%k1^mwdyKCq9<-BK zf4{zh^b&d3ahRIavG$QJ^(~jKTi**vmnlzdI!E`VIW;6KJL=TyvT7Fv$=Iw6J!0lSVg=x3z%56qx zZGD`dYhLY^lPOci7o}FX_|#0-PV!{1&aP|alS!~^@bq#G`e*3htsL0uf#YWT8b^jF z&vRSdu2>yz(e!fZrRuo#!2)g%jjj~Y#R#{x--Zx-Mjgt@Jok!3wYI68Y%f!Hf3FUB zYcGv<70oMvJL$6G6cO<38?Rp@8-Ryb&S@ZO3gsR-2Y8A>gW}3dPc9o88nMU)U19B2 ze_CvgyYT(76#eXE`09J|eMBGor#GIz=Ix>8r{)65-W(cfQ<`wQvxlg<_K(qda^Gru zEmlZ&N#PPQbaXNUjO_AtIhWq%uHzjGbxH$bP@A%luuJieJ zvGGUeS-wqf-b@|EOLHzACi!@g9+z6S+K;VQb?b=cBHA?OIxzhb75mL$f4v;baI=>; zJ7bxRn`b=s$CKqP+bfPbnd>LVat@l3YO!_L$NSFC>9JV2E;%&q8bLFCK5gwXyOH6x z)4FgollF%*SJzPv7R=(w5KOj(c}$GgV|~vyx@14>)|onf8;pAYSw;of&-`d5t+H1; ziWXN%QYWwIXV;J)d0zs@Lc`x*Oz7W_;y# z*XgRBXNkX_tsxJZF^Tkb<37DilJ;@CKENH%wo3Cl&&YCI2Df`3peMJ$%+BNj!n1Ov zyFNLN+tF!nglpe#+ALXKt;15A&Bpr`h)46rGt=PUCEY^u4Y2J3e^mjYbqD7189yfn zdFuOSGM%)G!d{lKUYds2M+Gvrv4zg_BN%RMvFXm?mFx2@eo$w+eU3b70dgt1?M9h? zJs;28(t*$Yt-qw`QsQuQm_Bw&kXbKhBSmSuH&^BSIP{J*9=0=RvUzrh%AMB5%8=4sH$=#UxEw5nY_U9Z>cf3wt8NsLF2PMwU+OQ570 zb0#x*x+aNpx~)&C){if%a8w@EWL?R++V`cjrNX~s82watB=2O~0*#|L>8r)n`@XmWU7n|Sk71AMj) z`3{iteSW0G#pP}a@%f1vYm-L5+@>Nf|YaB%H!UcK}* z!s#h!9K(sGY>De^Z&mI+jmH6TH|cIHEzSGlbj}v7d8NrIduk7xZ%)I*1&iip_)qq9 zgGm|EC{i}V^|d|?LEMGQ>;lXDl(E-t@zU>`o9@;UU0f7_eA>lYa?X|LQDnnNo%8Jm zos)Wcf0^qz$xCxzOh@a^X34O$y>YK+>+4#_-Q#?s<#XFMjXycAHt{MyT=$b949!gu z9^2__zlqPYyOEuqyvqun^n`>gkV}H?7 z*}0bEW_Q}avG#P9cCowlJyRy9GVF&vuGVhef93bla|ZX@moihOSf$uh+*fl??w9kk zdf?|PyD{rrj=SO9N!y1!c&}q_LF%f9-DtX-9GuY|Jln2~yJqRVc7|d!Gr3#c?}sWV zqCw|lXPR8j)nrvDZlaw=yEWVI?YLfus#mSgf~k7(kGjXwu;_ZpHei_?Cdb8jgQ>(; zf6wv#tTeh*PSfx-8u6D8M$h57o|{k81cNf$SCQ_=U2?yjv91;K_6bMl`_1yam*$@j zcpn|2SGw!-#daduYrawD<_0y|`LbK~)1>f^6WjO4i>p1Y%-o1~)6?XfbUc5VeC~v2 zx4#}-FO}i3NwV%>9Ji~dQ}DD$&(mvkf6n}SU>@fFP(sM)Y@UVfu{OfQDf97l?z42Z zI__(C+62;ypB_$mVEsz>Q{7UgFkE!=5;;T}KS~4xPn&HI)9tR4N;S24zi-PDU*RR) z+dh7>`Fx(I^$d;YhI_9fWj1llX|TN9EzEDHSvDS4)(CQ|GG0NcF056W$-OjpPF1D#^r`hf%_!PYI2e=e6fLGXvm4i$N@3u5rF`++`6kHrf6-{HE|fT~ zW(~}7xgQNdFTrDq+tcKh*o%csCO2z#mo(X5o^r2>F|H@Fg6LGaT`sbZG_N{h-(u@( z^{DLh&fUzi$9MT@e-2}kJm%WWv@b#T^ooh!?X(F? z?$miC117}H%+1NH7GOO$qRp7g{wQ3Li?s9D$Ud5-as~+3qf|DgD`agG7=a@O-f8V@ zuYF)YSCJcf#}qb-;U}e`4pVEncTnw*9y=_joAt@cUpvdI*X{k5e_Z?R0Squ$M^rW)Q6wZ!kQ=(blvw#2b_?N;H4UijD_w(PN%;^ks&Y@fTTT7@Go zA*;=Kw`>g(Nkv?Qe=nJBkpJ*ATMzmR#dbKU_*6e`aDk1uHu__`AGh^QYfjC^iLUab z#5xJ$D8<{0S2@!{+W4of#&g}zlD?6 zB69ZlPG5`2b=wK{d_CShw%{ml);H^F1dYXz@mPeNIqdF7e^xK2(@Zl~WjONHYH}^_ zE?l|cmA>j}6^vG8tmAF+oF=>7HZr!9o7-81*ZpOW(+))aVgJ}u+b^?(#V|Tpw}zi~ z=kBH9*a*Byyl^i{7~e)(r130OedVz|#X3G^qkiNVyIQ?0vWI)Bu9M<4vK6>gSLN`W zHF!U&?@6Mif4cv89XUEp3#y;J&B;3KTTMNl7&|U*R(p}Q>g5z2?b&m`ALtk@?SOUY zHQF>fm@lN;WOLnzkxGWb-Gq`WSz|kS^|o=Eo#AdAI5V0S+g>rHQGm@p&At6>8ef|H zbXLzsHD9gboD8dL(oLqNr^I__c=~C7SF_nUoob69f4W!p%zvqNx*Akxb0Q1;xaR5T zM%$ZP&RfOm|i{WcmE6d%JQz3Hkl1$dhfE?c}o7W{=x$@XSzx zG&Q9C0+#G%N$Rj%?@v$IyXtbbSd96iiYs2UV?hhxKXJFV-E`USmb# zskdoEuX=>%d467LlhEqs;~A0`;|lE_SAhD%X#Oucz2e^r=X%G@~3v&)T7jGSEIh`Q5!v%- za2cwDUZ(bWg4%g6zao2a_xAQ?!S9X--EFs8?-C1l;eJ*(h3-EV+h(wfU2^B3J+IVu zxq1+|drf_to!SsgRQh~Y#<(i@Gpc9(fBL*$%c?{xWqz2Pmi0QkKD7d@l9l^d>n2_2 z*ZF>)u9nrT7|kB5l|s!jJe91;&l6|UPA>Yqb+-A$YpHt*PJYCj$f6F0ruf1CBVinh)0(q=#{T$P<;S3i#0b#k}Q=JF9WhX&Wm z$$Z?dhw4S*ywdZ}q_t`NH}K7DVPBzmHLFhodZiSp$Z{HE4`o?onCtA2KxXLYnZg&8(6Z$CFdcHu2gZ>Ve0t<{*Sn$V6)oPfbFkf% zx(agon&)tXG`#}O{A{FXe_H}#lF z9Y+?*Q?(5(W*=myV$b9HjQFey`lz;>FHf{a(WMVv)|to<41OZ=e=~FY!2t3!s@W0O zkU&B{Hvtp_7@|Q<2R!(|PcAZx98r5&+9)L;x9v~esnNZfO--%2ahC(nYGMYYqBu$` zkR<;}eux+sQ3O;y?3$?E(dZIpC6fe7(<+P2oKlf-^r^*&n879b8@DV(*{IcZ)E%+g zNCL%e^`qkmQ;joue-f(;SmbdX@x@4#F11O&J)76ejxuyACfRBhu6?gmdhuqVE_nPn z1M>`p;|WcLZ;VH6A$H6_#Su8X$rXkn{hD)yEc^oF( z=6u6PEDsCZ#S!V$Be-onOATS!e}}v%*FkdHWe>Fo5+Z*=k4S=VYk?43za$Uhs%K@+Ao6DUj>`s9QwOmx zy+|2JgH#enqOm0n(rY9+Y-Mg)TW3G4C2g4tiARmDDcHZz8O~^kn?&pYNGTSn09Ei) zmCnBAR3F^Js&`l1wL}M*Kd8>zjDAT3aQ;NlKKNSjf44lZX?#+pd&`(0kWmS}z0t~O zYzqDigleTS!lGrCtAPMc*o^~enV`a%buCu-#bz#xc|P~?`7$fk<7=uUh|Nc}oWRLS zen^VI*XQO5#{HAATr0|8o;Erjt)n;r3Q~1jXL7Y5UX!l)^eE7?8$83zPG&Z_&>@2P zFT%Ite}olal#5dk3*473I?r3?H-UABSG0q8jo0rc-KV#g*M z*V$^xCnSQ~?;6SnQqE{?5th&-#>dO2S-B1$FF5o#+-FaM4@}q z$#849Pxg+~D&*wuz5V$qt8#WLz)lyq#2JT7e`}%~FcqsBF zT(=$hF?##u?R!m50Zn*)6l9Cei&2#Q9AYG!bCQeYH=?XVx(GlUzdCEYS+<4sY3o6L zl9R;VFH3Oo)(T0KKESovIz8-*h%7eV(mbsq~-h^(fR@_l+WBN ztO617B4S2edRC7zWp2qNA-laKY|b5r=+}%I=g$JB?ZM&T0hb8@cS7!IjC&87+o3dF zO;lx0R2f_~C1J#4F1`kNL@odG3cJdUe>acns`XO`6!&y0;B|7h46r(+c=>mxQ30G@ zaLnQquil7nEgjUrujmuhHXkCOejv4>Ee@vJ_<-bQ7=l>c18C4G_5ShnN=)4mBOg+( zk&jcu35qx$d{k`fi7L1sB>JP!xYJ?_44rK&XeSQP81i8tp{_vdo{_hNU@>8bf7kpP zMduwy3_rh*UtS(X_I!^qk2e4Utf*dmD=OYwy-dbja15aE4FvE|A-Kfk93DSPa&$LH(!+tAY5t}*SGMJ)FQ>L0KBV&|7rcDahN+_5Go zat69~GO~Lx9HcrE6l8zflsOIa)Hl?qSik)~wIiw^nW{8eN79QcNI3wIf1K!2G*s!# zlFPs`H;SU35y(W-Tf@)fotiY0d=-ojfyE-hn#caCTCIMVdOoTu1rP81C|w58zDI-Qs7)ysDni?88fH3b?{ZvROJ&ET06j`2yBXQqd98wZIeVr#SA#jgvy( zMkdMS=ck;2m%G5wVA1pLf7Z_BggQE5bo4MLbKiUDu^!^L%Lj244yjO0yD3lx4o4u0 z4*ribbQ1ws?hVms^T|ONz;DI0mH~GGUiZv%)!M{bP7yfEKMa`a)>=88-XZ4|InB&b zB}^ox3j9`mgY{~W_(8pdUmvAl34SfE&R+4&^+fpR%)To4N`-7CdXl>E*wIabKxmOR|lKT%{Ds=h5ZJpA=evNkhhW-Dl*(fwU;R>Dz$OEV*xmDDb%xG~ zI5<+)W?0QEJu*tr?3(eHHeSu!4NL387YSlPH11^qT@^$`3T6-2%KBgAI{A<4%#c}0 zoRT|~Os9EJr(SpCeg!L>$`?=eocQt(z?a}sv*0ILe;46&lJOvKF^5F1eocln@ef5m zM5Pcnt2c*quJ>0io4&lZRxX(uy!4y_IwhY)$M@yQ-Z)uL35g6nJ#-HB>?CS=3p9RH zZNL)T_O&3BM%=B}!1dj^@ZP*__M7$4j6j5;vZM*znbMh~1V5CgBrWy%xev-!8y2!B zWHcm$f4Yqan(Tj}o0Z_n+t#`OP!5!^Dp=D8-Y$MCn1=>^UuMi>B_}=?ewaw1sxqX9 zal$jtD=8zCfNbLhqFz^VinW4rGOI&UkfUl2m}KH_B)uOKhowm_hAw>2MM!qliRPf0MZY`jrW6oW4_#nf+#82rLOv!rIQT zw5cN~p~3L2B=MxpK>$pbh6gi;ngPXPFVDL7+?$9ZHVq$BHiJZWNL@{UrzeEqkM+2x ziOeJvv`t%FfSq^@gAVEDaIF8n{S}?_exGcpfy9CD;zf ze>=Xj@Hu`OV-GkZm|Y|58sA$d8wEExQmjQ?`A~3JTp2)?8e2gpO1j_no)sh`hpqu% zu=X?9+p~w5AzU8ch7YIc%||A+uqq2ZF=JQ*ZqcHgGSw~mu)Z{f@-JR~iv#ARD-n$Z z@l>YH2QYzk2nE-ZT!7AvM3?8Y`^F5Ae}n7K`!%0xK|hwgSwy}qaNPI##l(*R%aGi- z$MfiZHC zGAZAWe(}sv6kMIyy86`52d$i5ulQPWA(U^>?yUKjWasmUDFAZeJR4jCdjklde@GVe zfI0AAWiwTE?-p*|uc?{o9{z;NlGvTBVvMoGCJIVMGar4U<16H!u($J?;z_-ae-oy+ z`m!4!7O?<>*+yg{htAPjiDoVC#V8Lh6!4K)^^~3j<^FFMAL;Xa3QVfXh$=3_0OqW7 zJ*$)Q0BK6%%I4RmBzRf7~t9*(b)hR|j;XFa*VCQFApts+OgtEo^mQDDHOjM;AR6fBrry!*PR942Ah_;of2|soIXIOu#sYhS!#+S2RN^A{@2OIzLyNMUy+k-)CQIuKK_AzXSt&7G)sOLNM5SzKT8;)`iBo|R8~$(N7x_ka(U%FqTb7J>Ije@ zd1L2w>c#uCe`qvZrcQrRl3)%qfR<{n;*4 z`~XvnTu>{g^?c*VDu*z?w`dr3&#vsXTv{+*w)4}@Zh7*FOnN_}s0D3@=*awDg2Nz~ zP5ENrlgK~MERe9MSvJ?Uc$z#iw7loqg*d%{7{;(uI18AxB241Eh!%?J8n2Lc^#o8% zE#Nm_e;YQQ$ltJVw0*!(T2vfcXkJpOahm={I!<1=rS18AxKdq3=~M~3RBZFjN5H5! z*)}^>a##6Hn&e>EIJ$fZ{zN?OEo^iA=lD>tWNY#pNAt6%piL-Xlukbc3m9_IZNs<- zX5?=G@zbuNl(*Lbi-CFD>Aj;r9&qPFZ~%CNe_4EtCK8Z=oZx?D>dT~pmeIM3ncd)# z|Kg^Nz_+qZ!nvfcm@Vf~e&9RsEumub!;=ZH-VmCCu7-Pvnk)(?@0(>?Ci^Rz z<7L?_Mv7C-PBj5B+`B}o{Xu1#D4j|ae-IH!b{lkGat*zz=I-D8#Lk4yH}DzQo3xra zFYCgtwavAB=LDNOA_@{l9)<1G;=*`-&r&Bl5xGb=^u22BRzU5uF>&NLu+d}(ahV8P zqGmrjz!#l--DX$54ChYS^I>uM%0Zi0S1(8>T_4|*tu?os3$L%IfBhV~;!p~@pcio|4)7-$umF#%%|*}AdzU=|VI*)j zzO*MnEduHd7<(BUadiPVnk8~nh`pB7pOadr{gvF^?xkaQgiS<(mETClZGJ5W?Y}(bfBC(>3h_xb z$8#PT#p9tT&ZCrySx?AJvvSi?dV}=>mJR^nsST--R+MZFG*uY<3?RA<51<>VD}{7V zFcc&j9-NA_dxkAgZPV_PYQw(9>Jy9{uiYlB5pxeeiF5}94JXqjf*+ENhMW{gnc&mc z>IiF4A$$X=U;OI4cd>7fe~KxAkYV1HBGYeh)Sa#T#pgq)?xT{ihyIBQQ8i~R7w%aws`lTLd2DFe)Y#DN764D0diZ(R|m~Cok9wRbP9}% zbV4zQm5UXUpp5USD6+h1!NXOdhdWGYH9pwZ#Yu?$bxX@p)T)n9fBNk`FJQ)OLxI$W z+er_7@`Rm{c%B^3lu9-_e`mF1&ZbZD>dq?rSY<1XYN<_r;VvOWCbm{6**XoU$|5<; z0mHB5P(rE-{PI$xBExtG;n~zJH_ZdeqHYGM}F)i=!yqHiD4^KZU7q`wC zVt18|HNB)*j6QCHSI*|Z)a4yc*Vpj#e5SP5o9|HV9~`$)e_6$Ur25Pt%3{!N#5|6M zzY1U#z+CXRSV(UQ6O`t|o49C97Jq$9p$&D_-2AI^rcpGhjzhk|SmD<{9(XP3nr zsD6Ck2%ezABwpn8qKWRhtkN(Cg|sYVQ74i(#6VT!mOf%Zq(3iJYus;F+IiGlVNp}) z?6~SynbA#Df1&l_WUoq&`k~9|Ss=L+ua={5TZJ|qUvS4D8b2YBg0eKGI*ldJ5LGE1 zFsG>bgwy<`xi*&vf@S9Ps(sa`jkbgR;Oy7nY;4eK1zAT~C)1aQ$F{gYNfj-(NI=ts z`MX3tyd*zbWVJ|ij;b|N14)w`p3&3t=9tvX+Uda}e}cIpj^Q#&=@wiyS$EvR|N5UY zaOD57f&Z=k-#J?6|Kw|1jX$LfXrD2aCT&G+h(F3zv&@-)5;Vo4S8Ya;;k=TP&{C$v-6F)p ze^&K+sF%Y2(nW_vHSl~DzVjp`qmx9^0+^tbj?R3ud6mKS%F7#%e~SK$cP+!2J=y$p z)q4r^T6|9MwfYlM71RhI#+(Za>n%J2NWE){QM|Qeg2Q49#02XGE=*znK*Vo-=4T$L32ne=`BPQP!&Lv-TlAjWdheANC!8G8Kz5`R~r<0Rh&p51ji0cw+ zkD_IDAAs?wkFaO>IetY5cipLiV22$wJ-BvBrc5$_eM$k8Y+(b+CC*C4f5aHZ<&Oz( z=3>>ZF7$%ts%XpVvPk`?Uy}YNsI}BPbO5xGIASituMhRC=h6_CmUoM7#w_=|lHG=- zmPONn*FX;XYG`mF6;jj?Rz&s(&sx@cQw=!b#l{%%V`!g`cTO+i(!lv$|i z#~!z+8jgmzYu(Q&D@3mTf8?*`ReY;cD;ILgn;=EPc6#y`+V05z9xn^69B0{)Q#lIZ zV=Fxr9}Q#2kSdUK4|N=m$hnkar0x=^!|$ z_OWLm{LwWSe?^1q$%Wv;AX>!vjCz0w&;z(M6p>!Q`}WN~Elajmf4wog?XaQ(3GkWt zzW`uj@j?QEp0}_a06l&hfd(luF+@-?(L*zpo!$IA^*QHaVDOsl*Xn;W3xxde(LiO( z0z(Xbe$cgPP>x86Aff?@eRajx)6@HivmXTISS=!^MU*Z(B5%IQW8|_$WLtB zRme8=;0U(j(2*?XzsIxw#2x5~)xxxbTr zJ;j(8Bjg|m&ryU`>qfn!;k2Rv3g~W9hkQBQ=)(sZ1gT!0n&f9l_gi)1y9$lMH zyebMCN-Dzye?E0Vx-t?aJ78S^no6MvHFyudjK80#I~~xaFaLgX1w51c6x&+g9>$kh z(X|Bw)W-h4JLZ&G>Zfbh-r;JCdu5)RRzLZAl=2Y1;C|E}Ocr}Cse!)k{vHH(W3S=zZlB#c zV9#dW@-_Kgi=wH51AU~qImP~=&GRxjxTsyOGw|t`Uc0(w1_Ke*Mj*;@9mysAh^wf3lMLYs; zO*MTv2H2tGGX1r4+lPr3C=N4Pe-6PO5X?24GnJIz6RZlrkI5A@Ske1?Nhejlb#0_c zy8*h@lvY=GivNnt%9juk1;DGK8{$o>44(rEYzTE`MRcNMvt6F^{Ec{n#JnuF2^q|` zO&FVChS5xecBwFzZEHZsN4rkIe-$BK<~P^G zlG|X847pDoP#18>u|+C*uy3)=apK0{E*m7+>@-YlGJ|Cx{waQ?vc3t1{_OHd}p%( zPK+B>OL}vFh6a>MELf;sPA6EBpEz(&wQBJ5lY2>Jk-h&V0uS^7r|oi#``pVMC=bVH zUkoWQfV!ZS2v3IqkV`P1nP3G1MIHrjVxtQLfTCONfH&&p)XkA6f9AI<(NUlI$d0Mx z3HOtDYt=*`+1R{whOcXs{2mL9z7;kbX|~iGIY(MdDn12NyJUi??2GKeIIxD`+U~nN z=&fkLHO99-WmSi0hY0~_GkEpWspD){2gNHBK<+llJb!#6^~c<}mfc%%w^;cYsH(HE zLtrg7_yg%_#`Ouqf0wI}GDE)Zadnw1k{z8f$&F(7ULs=$*#s`hSDdO0&kPwPDqkGD zPCbVN@Tx$YjRH7GKltezbRNJ4KMvRqd&ml9l+ zthdRAgO-YVkyN|yT9<1T z2n&GQ%6lpbl}ZHu8vppAu?1Od@B?PLYU8OX;zJ@f^hnW%`kwJZkSs4Eu?B6{{4m862|iYz zw#^O7<=J)91-dtMsTE$)@fQj06zC+`77!ECD_DYrf2Qya51fFI{<0d<<=7Ok+Q__+ zq0VHF=*`0NCr`=_KZSpJ+`-$df}Q;mJpZQZ5|r{e(dsW_gfKW}MH8eD_Vea;n_3)% zMK^r52z+C_-=r2HeY3))q|Y{4m&z|8|0zha*O>b^I4lNM?>Kqj~$iE39(~LI9W{#NWJr4fa%UaC=OOF$( zPy57t_r#cHIi+Ail2*l&ek)Nne_4;j_|PCnM2rBRfE?L-Zbl}Xq04u@t$d&;S?s@h zqv5=j-9Z<{XB&m)rtZD-5htPXvwB{Xt$><6ctD2dB+CuxJ?Wc5i~y};i!w+^S~vD< zlG*YMm~K@cB*i*ba8Ed2R>nIE%8rvu1cQO%Uu~uHlTbrXOPE{wUF5A~e;elJ{(BDj z_~A1JW2@+HgSM*#)jakzToGW+RcNlK>~*lLWgQV+H7`HuX$C0#O0i!EEdN zrchKLNEI$Ah^=Xw7>jt>X^sfuUUs!-dtgSEkmHD%ZI zdi~n2FUc>QZO3+ON{+dLe}zn_@}lPP>quv&(t{0!XW38-PBH##?$8UD5zZu#dfW3S zCKe31d!}_w6dwn~f22j;i-V8uQ2Bj`)Wz>qD)E>>MYy)LlSy?sM;G2D0xePA8m?M$ z=s0u9--y%KbcBEfm^xe9d=QBgLen^{P&uBFH?E8hcAq19j^C=-e=VL+Q`#5viV%Hb z9$~4^d(N`^M88O(_YrhZvu7IwzRx`fN+_{@~&d8cJJR zp^s9k*fRBK_TP}3v~F1Pz^{xlQYK)ER=jI3{5rTs{{Tzj(Dk#YLS<1)?i~_pkbB($F`UECxWH1M&t8Yxn_>u&7U(m zY`2t-u=dlMn_-mBBH`)s&MbmQ}3^_AR;q2}y z!QxN6x9YnNO?KTO7@vvR(Hx)LsJgXwd0>C0uzNs+Z^Q<;*w*=qH5{)OLhi2Quvh|< zba~Y@G|DFJ2M)Y=_?zraF*@M1G{ZJ|`rR*~WBlOfMz9VX|J^X{if0?(uo!oCOPk)A zbQyA>JmM&7f3v3I%1ym@X>fVx8;sA;nK=y9rW=anRs@`;6&~ah$?=W`az_PX36SXC zhp6mp#harNgo*}L4IUt!zYhqA4U)BvQ%K(!LJ$W^eZzd+<~N2Ie%TbvRm!3C1+J7@ zaZQLq<1rclK|sF0CjPx3PZN<1zP%*`D#sbsT>>n83GnFm8-ME@)cOHQ_LR+opc*Kh zorERV04d6GLdx@s%sZSSrIe8*20%O$yK0;+t)TugO@Id>xu(38B9Vy)<;KfsMWy(8 zG~yQ2ta&4O-1kYm#S5<5+v9HmKhp|bqoH&OtTTR=7pqTS;$eK#+k_ovWuDIKy=?WclYgYRh18g9qAxOoi_O^sg7tSUlOEG&$x4a#16!+jxPgzhfs&6z2M)Jv z#9GtAbiXxhiKBOnw~ns<6uFD$Sh3KOUQDFWPp5|5^{u1iH0Y#1;AWK*KV;zdv55fQ z(1QSlq3@=RWE1Yw8%wJ@f1r0(1RS--q}!o6`m>shAAdR*2S}x9=MdC|PM(k>`r9=~WeUsPfs~H6{A9+$FX5PPD$Z{KvoXS>A60r~wga0A*4lg? z-zA}dQUW}&H=R3EnIg5rt8okL^|$02 zJ}#eEI2;jk4(0S7=E!f>Yyg0QVxg25^Un{UZwvq|r0(X-kz)dwjN%aumB&j~c0qHk zlXv&tD8|#`H@tov^wnG5qWeaao{fu%gSBg0?SEC+W!I^Lw>?c6312(D9}>DOYv~(6 zEaK-mi~#^$y`&^l8OJ{>HVaTjRR2iz=|0}%LV0wQz9e`q_gw9+88vk91{jZ<$1=)% zm(aMTv&b|E@$+LO70`e`@ps1Ccg8zHITx%UC0^6AP#r!k&`yu!=VyM}3I$}7Khcn? zsDFYe;>OFDwOEtWaigr56>TwcYeZiaTsDu`tUc^aWMmgFq}mfrtb{tDi0TlsQ%61C zrPKWR5=2cog(;Ukb=31zTtO2VCc_L8Gg`etscMjN{!BL?A zVKB|17yMn_PpgtxAW|;I55XZq-7n|dm#d@ z2eFs)zL1=`aT!#63vwDVe(ne;`AI@lZ{IaIba;~7j$}@hlpy>xtsyHZT>E2(`Xd8W zclD=(m`O34`x^TB)|QcBncL|~EmGL4O6+1K)el1Ei$i^pZqdS+?Q8nA$9)Am! z#tj6c^<_2XvoVNMNig3b6*dk+p&xb2dT5V7fjjm! zakc6`7f5I~#TBKeXIynW0#>4jzI}N-n$9|QaJL^KM zl3-D?QwH1NG>D6}VGI-_z$>Mrn16K8i;-ts^!bP-E30h^oqvH2h0Q9D$})rKX7iE zFGlal&>M_WYK10gmwlH{x&i;%EVBlQYeK~KF{5zojVk<}&qtZ0Zb?i=pRJmtYRruF z7MtP~pg-|qM;kx2K;#(Ye!z%&(clcWbYnk)tannE+5(&=JjtVA0)Ik%Z0sX24bc<| z3Y7_&uT(6ei^F0d6rv^VD?fe>#H?`Opha|;Lv z+Bg^&h)i>jA#q|-<4$RG$YZ%t%tkIco7jg)!D6;;mSLy-`yWluRFtu?5o{6U5-3%u!di3vIAiS4rvJ^^muxwzYf*lX z2Q1DkqVPumkDnG<$Es(IwGedEJ~6=Uz#= zpj+koB4|^2W4Q=pP0dXVyF&glDRMloR_8IoRliz49X`ki1K}2 z1z9y0M`*_MRd#vhEEKIj+sStb-Uugr9i^h0C+h7>%YQ|?>RU*SdUx5@yoo(^ijDN) zSbfYtZ*el^Cn!uCUk8;hE%#);Hsm#*l~=%rWckD%u0GVR8auSUoh-jMg=o}o5e9QXQoG_rQ!jcZM6!G4p8EJS z1apE4&3{^YdGAqtJXq@7Zk0Q9`$BwAKRR_>=4PD z7Bu^Hx|oFdp69#K-lP_B!9Ghb!B|!G?BR)Olbs%$ct347sqA`Y{sf$sjT!hno5+w; zf||zNwc1vQmwWQ4Q3wr(9%Yg(I4Kd849nx`qJKZHm1OD=UVtOu(KhmCM%^M z=>rn)v)#PQ{vxjvQ--p;i!)EtQXll!{`N-PbFIziQ`d9W!V(u?UF%#7w!dmNboT*V z9}9e5BVWEWqwCsg8Q$`oOKup|k#*V+3K`Dp9+AR8`0BPaP01)NE@~ zi+{?Esdv$DB4!@CITaSnbGTHKxVI=v!iKS@nW>`GBb&R?2;{E43?FTm)JtB;JxRwpu85a;NmgOj z%07POT*3^Q$?f)fAD;em&jJl6!s~0gGJmMKckJ}Mw2WAM-lwF^r`<|F+Kh%8ux7_4 z{G(lOi_ERm@QAflHZvIej13aI9<3-vMX9L>yvH~(o1*c`zRAV2k3U1v^6qb;3n8vf zJ_s=O5O zC7MwSrB90C$_IX40}CZFuuH(np-_z zJRZ+$*A01EJVc}M!+WaVG@Z6hs(fm5umWjbIW>5KR+B4YTfNY z;M^P**Zj!(igQUgU#eKrU*_K#SHeiA|nyux%tMQNpxi7XTe6jtkJfocvStO z`qGjEAkx)EO&s}QW+M9%UqdLQiGwKJO|=)nO7yn28sKVFkLd;RODOi~cpF|H;#`Z5 z%JM_;dU1+QJYV?Uwts})X3+LT1<$js&P$2b%FRoqb5W}(%fPmTzjHPvo{YB8mPNHK zVtB8e2d~&7?&=^tqi3I+cKx^-m)X!cFc{$8n7B0s!D34yv*6h?tW#9vhq);de+luWxV&G?1x&fasM|Q$!JDwVT&+@#7>G9pG+Ox3 z$>u0eUM|nEHh-w^c?gEPxLX(817lRq&+7*fCCcse!oK#apuzv*>X2dY%vHG0O@%-T%b--lL z0uIFYk~)VCr?TSoj6I^O7%oewJdjkiP>PCvjGus*d<^)7BemR~8d;yH3^z$;udSAz z<8CCZn-vZ>LJ! zPNn2C6`^shRRPVuTAN;nIJ;n6W<*>+*J3bTYJV7$1#~YT)nq(zv6%Q~^<8FNhFa*c z3OsK6)Nr%_QdC^yD=4P(RCR=8mm3frHRI!5A1-WWKq+|Mo=x0YZjmaVqJ6kC1So7z zqmj6GtDMYfn3-uMQ#VLzF8It=JAdpb$mX#51T*Hz3V!QeVyix$tH?U z9DmN-C&$0Z)Fq7{6!8|_An_}MhMj*(3fq6*Vn%B1`9eYVvJ-(9@afQn&c~3Edg7&v zyW_O=3PO!PFPe8-FEZb`TD=ScGNj60R^`x(ye$7Q+>INc7LeQKG!4*%& z`2pfxPnDVUasC;m-qLvwgf3Qk^R?QJ&wqsc?8ga6Vzds5{CrgA`grOJ*6Vdq>UaN| z4FQ^n6HkhvjFtt*-Gq4+bz)2vP#0l)1{W&|mRB@yGxBkBdfZRN{loRc`x%?3UT*Sv zCV`h*=XgYhhEHK>FclWR4DC5<7R}wp`r_X}W_#^TTqxehUGiP*UI^mc6AF!;_J4U~ z9vKONCr{1u%JgziPfVmcTq1AB0|kpoow+N0YhHL7qF_SV@VdIPm5NF7d0o^!Tyy!A z6Uxn4%`U!BczYg;_nzTY6+I^+NJkd7r!$df*!+H9JynB@X02~W$zRk> zc)xk@da=g~Dfy!>`vl2adSyJ=WOgBtmWSE#pxjLVg$e38sBiKPyA!<0G91d93{Opc zCAig}vtA=F4J+v0`{gbDvagnUy=Jeq)o&eM;8SQ(*8#i9IJwz4|EW#mU4K*Z*prhy zn<*)zg0r`h>;v(*J3Rla*$mmu#h_o8hrtu@|nGOP-XBi?N~UId$4PuGtL^x{O` zpU5lybpCvXr%ww~I}FOyvF_sR?q!skGICeFh)=G1@h@ln3@hEO%75r$ilZ~1Cq2Y( z;tP93b0D0JIH0%gqhn`yb$6bq=77u#GF*mpu!R$Pd|I}R5GyVC_UWT@x#5v{*Zg35 zbb|Y$)3%RGaw1k-mjXwh#!I_SYW}Gfy@$=j5v&?F7)PC#JyPBl#GD69neeJIIR98X zOVl*0b*%SAP>aGIj|4nG@4YW@rfs|9 z*40?8yykPBzJDm5$K82GNjGvhwal}QRGhiqlutGk!_$dVd@_PJhHkBV6&g_usx$dn zI0IbqpEGHXS6l=d)Zb%c!QWSFM8F!~_hJC0|MR+!4EPc_$4~P(R?OvE?=45t@QA$ zZ*f5@`i)`S(CG``P9_f@@C?Z$Lq<^VBbI?BnA1fOXz1iW6pOzAj_?OW< zbRpMh6R;%gblz*F&4dkU=~!&=r$#Dm(qCAdLs`OW_u1}r(r(DAG9r3?-%h%J)1^K< zZQ9w(_I_511YkxAVefj`)0l zi+}foS9@ckGe7vq4u%e5f>9=|Y!Ss6s%;>Uv;8?eLyK@qRs}Tngo$m*vwX^h^Bh=y zH{Ccufv^Id-N^=Tpy*6E%w9i0orf_E+4+h`rw`UECMqFC_C^_tuq-~$d$Wn^FaBaOI@_Y|(n@YAyk)N1tvK9%CJ+nKX$Wm(+ z<#vi+zAo$A!VLiD)e7jr>9%+vJ%3+iyuidHvs`5{myt#{>>WMv3_IZgmS|yH z23@QI*$es$UaGB;Rs9ajy zcVqMUUd$}M&vTKh532%ch7k2O1L*-olDlh9m@q{pMiG<+B{jt*eM%ASi*yte8uO0Q zk6ug#d8(z8S+6#Q)o;0Ejemz-%25sl{5UUIY0ij))T};bg|!?-zlW2y?zcxYG58bd z9^X77=iT^1WPLmCYad9EA$U^J`{qsvgwurg3&* zW5epCur=iUT;QCi*Rpq;iiPu2pC6OMiuP3NaoA?nl^hpbTG!wL-fVQAaS4~@Yh{`= zd6n|Qcz#$fm^z3`u77)aqLbsS#LFfx;-*_a&hP!=BqGDRJKuWNWy{2^n5@@_8H`Si zh0~xur)$N#=7X^=efEH{sI|8Uc$P}!MB8YXctvlcFKu>ydxHzHDlhcNG|yDSdXGuV zNPT|`ID7Y@?6l6^J3Ha~8pH*zL$TK6_R|Dv7p;{@KFKFP4u5hH&L_7WLkn)cHeTdt zehRepF?uSAcrmQrHMM)JnDS2G->VOnTxP3QsU%AFgnM03ok^SWkqr6~t8~47o>v#( zovpNmmv^xpojYHV=+{kscSGUR9oW{F5`O4tYuiWd;kZ>w=QWZD)HS!Pp>Fg(4jeQk zd%)VYgRH&n8h`C8qju>%I1Em>1Qar#*F~zLtyLuYEmbnp4R>}OScTRshCd0F$?wqzZr-Q- z(4_yiGkV@e{S}2P$Z+l2-mB==OVaibn<9{u?Y38*adxzG5j^du5mdm#y|%}+=U%|rTyL1)zOcJ3VMD=mCT~%;d4mme#s<3CaHWyi#)HP+f#jePDFQzy#8th z7-$vxk$*6s(`Avp<-TS;I~82FhM>GWYtc(&H|I37--+Gh$NGMo2p2{EtMFTw*yn8GmBpBju^!zt^^oo zA{frsxBcm^n3L=gTl@5UU|r?Z2>6J3 zy{Jj*8~RQ&L~BPk0odqdL~af`y|;y+)PLSLqrV#OJ$-*3HOcwvp!P0#J}<+qwA3*v z5qjozzzU_pK4hTkDhPBuj2^4lKnS+@u?V)uu_FFVVn1phf_)br`Zlh6E-U?0-}l^% zg?FXb*KJ2PvE?;RH)l~rGkCgbUNn^VX-+{;#W%)5*U;^M>Bsc&u{As2^HA6873u0N&s_+Tu>D)Sk)%Eu_!bG`ur5 zOE!g_-+ofZyM;E9#eF)MiyoJ&Onmh)wy*TUPp#sV6Gs<5CD<|}1B1_*B!9oE)+fp9 z{5qR>tb1%S71Rr+SBOu_N5f9ikiBDi*4)c}ZMh2p&YG*qmMlyWc%GIo-)I&Psih6% zo=dS0{l@!`)ZZx!gY8y3(rU1f2D}Rlru^Y2k;s)XTh);mm$Aw7bCp3oa*d4z!iAS2 z7c3bk&1K4Rk7WS-*2k46N`GysDX|zYQJ%orsO>d;M)TS%V^3kQvRZKzg3S@BvL2-| zz-Dh==3U>7J+IqcXXukgZ`r91rbo~dmPJZJ^gJGSBHUQ^3?yIcTmtdkb&?z+`Q|2Mrs;Q*|8z=_Vzyo6TG2d~1?HkNabOkhmVbGpDr=4NZ1!$~ z`g{dp>dw_;qw~;$!>fCHPb$40+K))klfj*z7N74;w?cK>gN*_r%EP> z!M(7lcuHD}OdcC5Xu2W4bl(oN8GI;~h15PIktj-4t&KDS@@4r>_b@AIiRZ zoL&ADf*~CBUdx~Om0=&wEh|3v2^V)bZW=hA2-o`^KDG(P^C{JX8*It=L5O?&BGb4b zE*AzaRBnc8O@AoeE^L?l9i|QOINt(elXjx4yK(EHcNZy-#IWPxe6$N5i=a9)xl~ap zC!Zvn7Anf(7Jv9q;v6h3_SFL<3|oNppS@Q8tJTWu^|#Xs0Q?~UnBVpw3oKni|LAQ> z`IZa-mwkU3iAU!Y?%OE^0C0}K?L_P*A5h7qh_w&^_J8A`WPpwbIHZC6KAmR(xci|* zn4I|ADdnqGK)v|>w;EEZ>v?mg-ro@{0B}Z;h<+7|R)bz80GxNUTLH&Q4{^YMJE0ui zQog-lSbI!Eo1$FaOiKWa;72w}MW%_|SVnjY0B3){zlh&XDBsQ}007+mejmQ0fas!x zV1#4*$AA5Reab{;&xKv!-!lOG>)Sp?mX6LR@Z0GG04OlW_torZ_HuNZ0UMD0qxA}0 z0MJ){=bdcl9ewM1wCSbnW5v9Iln0=M3x)x31L_7q2KWWYFGQ=blv%x=NrQ_vp+cfu z=^R}2-f$7A?{XkWhlqk&gZG~N26$=31EE5=!ha%M0WyP)I156$7uYBjx9Av`3Q_o79l=>C zpVAhoY~x^yGeQxJiU??VIn1_zCx!ve0qO;C89)eX06fojD`RHu`CykP;AVlo%V)Sn zjek&i9$cecZz$GSGg02;0oHnbjKgTMW8P<@`Hs)yN?8)BC^Q-mS8BcHw`hQ6 zdD}>WWEp^Jv=!jpLFz&gjz~sj`9AE8SP{b`7%W;1AOdjGRe< z1sjDzDWn~{_`>Q3-qzlvRptdpow+L1BI+bYLQDyj?zp~x4%|LU)aX0T{kVKVg2pp>B5$dl}pk!0hEY||BD%cog; ziDKivRF$nzM~46kOzZRlxX1b>(7y9tykk62Mp?z9%$D7(;@u3AKzQ-iwSUn+SBFPJ zu9tkNe{K%H2v9HikvlqAi5#XZ90O$fh<9;bbp^QcB<#^)v*E|*^-Wl#K(LVrmz=c0 zDzYqX3kz*44Y&Z>Pc^%=yk&HSm2w52ZWQ0(z>)E}2wWC}1$KMHp*Sz{(I#tB9R4gL zeZvrP!DkrsuiwB5!+1a=7JnZ32Tq4H$ZzWYn|JC6ulMDtHp{bgMp~^$`ml$3r*EJ} zEP;Ud_19AV+Cy0QL0R}SsBj5uzyN_{aHVE(*m`4anY(}6QOQ8PV+5!?$Q<^BEnqdz z<_?!?N`H2;^PfBhx9kLda~xbE5iXf1*hfyxLQoASp)_Gt?J zY1=CEz1>w~cDl;8^URa(X*MIlQ^Z0A z&zpoKo?A!(dw(sw)%7lLD9&N#!yXQX^DHc-3PAd>hxPUUg_mSU_67Sp-|%-${?j%; z_OL;T3T!G3w&}#&9T`XH`P?6E8yCui_Z4GDDJX=Tf5?Kc^SCckjO%)dDjBxbWe>LG zvG3*!$6*YRTQGk+y}ge4tDBeb-}&>>aX+rqC1HTKeaImflcxHF8`^4 ztZ28R&e^`nmJV1RjY}Ahxt}S&0b!O4H&E#bYh4w9GGBJH)@m-Py|-+J}@fK=H2tlS3+M^u;>u$y~5%nT*gP->gaD=Vf)Csi*!Fa^TA2izII%*Mp^h{JdIp> zgR${Gcz8X;`UE&APqJ;|Kr04p82VJKi9HfA3kErr9&UxEL^GN?&VH{5*M1IkOFae zJ9_WBu>pMWi1w~S{liOk+qDFO4Dc7_3XGk$rGH>Gm(i?jQE?catrgik{1=`v3&|ZQ zN+6V=Io40M7sMXxw|Rf^+1Y5aBex%866M_^jXgRm-`xLi93zT;*QpsVC*AOZ!N4$t zd||_KG{yF%Ticr3jWJr`Dn-p0+Iyu}^lMIRS}{F_kYRaqDUgUQb+!8Wz<=ihXKp|$ zzJE9q9r$mNfAOEASCI#6xi_HxllPCYa&cv`vi$INX7Nt6 zm`I1`?dM)t6#uWf#{w9E2arF&$B()v9e?(RC1L^i#!qEzymZuQP=^eSW|k|b-=F-X zPwjTpB?ZGqkYw!p@Hcij>Qc@h6s3_TU8!8%X2Y#*Id%;;Y&TZ1SJBui%66%T!XdBtYf~}TZ2qnBUXAt~nItM?dw&U8 zhmLf%E&N^Sl(uYSyY-IxBHghH>@zz9a60OX@`KAdxKXUU$&&JajYlfwW6Z-(OnhU7 zH2&6}Y!mkA%)40#m&g`i79N=Vz+Irdj}n_hi6tUou9pOFKL)}jgH19X_Xwk=#%}lH zlG`Ej+dfe#Q(sEnbUlF`ypof)RDY_yPOU_cUMr9|W6M>#s%?wyc&(+fo^9+@rm}N~ z0znVOXW@Ykf`oW*uD)XL3vUFTFO)gr7yVdg3On)KvZ$|vwz?(E4xZt+jA}b0d+-be zefdQH*A9F1iHO z*kq)!MT1t#jeOux8*%k>=}lakEW*#Y=q2fFnc5Y{gu3Wj`1|Jt0*QKJf zf)J+y&i3w-y;QqcMOZ`HG4*JTmcWCJULh*tX!8VsmQv=I6C~}Az?YlI4(Ar&+=PCv310)fPX-csx6;qv<(c3Eo(lntD=|*oZyQlVUM7u>!9;%c3rOW zj(iaq$n5n7RrrAuCF}%P5~&^F+9SU&VENX`&}e>moPfTu(JRY0haLW9f-hG8=C;G8 zCiuq47t@@D<%j3}#7jg1w~gzu49t=aqI&c+C_8FhR)Uw!+KPIsK7S5^Zk;$tcy%>! zs74=Us_xlFyCqO;(a(YeMX}0exm30vevmWEUL3gs7FUiZnKfB)p=8K0s(N_@e%X>D znWVU?-g`f%*d67a#T#`kCi9zJFpp_?g2WKhkTsM3S=qa@6144v)DV zzbm7!Z+*F~i;fxq2Y)*{8E#UU^Zb^b@hSvLyI&R0MqZqV(nv{h>h@PnBa=6>XHv2= zTO*fN+s7h7a>wp^Bya>P;?#|GGsEu6S1%Zzy*~y>T_TV4jp|%|LJzOaYD`6Fz+geT zw3cC7F~Xr?0Di!zMa#jH2G+q7HmW{&02J zzrJu>|MK@O`_^XxDZR8Sv5NGT1)0h@<|5P?%~h>v@fxT|zv%ac8hc?=z00nWbtbt0 zNu>!vi6q{RZVDU0-7PDQ%OX&62sQ9$&5(>pbvg^%_`0s0PunOm1JI36JI|*t`C26 z<>3=Y9{8!>y|Pi#7msa!KKHE;zy9*&s6X)mM6#>p)&`3y+8|RlG-*DGoVHTAYK{eV z;Zwd$6uX_&eOZ+ntJ8ud|5j zG;i~k>_n@x&ZiU?B&+gm33{clW{eD@qNp1u72M61V`!FSlH!g=W7zaF5Qvye%~LtXB@)T>Z7d z_NV3_K7XQ>@;Be|+fR);e2F{i6O4j?VOi-%`^lie>X>Iol?E9$t0{()0MfQGR)MjE zB5V109ax*-HU7Bi!oy~9{mD5qBl_k&BH}jgS~m` zNLK0!sZQpK{Hg>zBNnXhuh92Cz;Le0X*`9!mqsuC3m0eg5$itCSBcbj<#cm$7JDwX=KAAhy_r*<5^_FHql`G}3n?i2PGR^EMHMwul# z9);@iF~iZ-)`6iwc8dXS%WM=z70TqD!SW;lM8m>9~m zTeYcbn+f;e3weF{78ohhAr+ad@7gi14@O~UnT;3qap-Vn3FMbU_1rT5%nL=*p?}AH z*Xi%NA9(+0(uKrd{ozl&BoEy6&7VJd$e`5O8`KAmiP)IDtV->R+DffL#XI6IggQJ$ zYOS5RHiF)c77O@BYp{AmXHSm0IcTgwo|HK9S0rQ z{udkk)`_1r{o=mfBgKr zzQ2BOV5}ov+9OYXb-(`V+F=6|#kx1bWPiz*^I6wvb2p^ofwOjQp}3KubE)6yDLloV#K1{c_* z>IRQ5$x>W$t{gK3wt>h~y@Gh2UHL*_^~4p4&&{=gvLJ)y$H{VV>T`Z}nhi{D$#Qm$ z6dpthSoDWH2sYXvaB9lrDxOr8)4kulV_XeuFR}n473{OY;eRZxw3TLTdLqG+G1oS? z92D@5Jct3E3y@~)oj$vG{NzAOYoK)070>)CDikkz>2|67S!Y0(>=VJ@SZB0QU!53M z-od;0AKs7+UtiwjfAc5lkKU%QUmWnopMQM%zxnGoZ-mxS7yk0KOa#Qz0faQ(raXCl z?jDFfMkU*VjeqBo7keVR8$y|#P*=JvCzlbVeje|cq=8Ib?PX56YJ#b20*k)Hmv%n!L<~LnW z6)1~@pr?MtnJ@ITxY7^owo+Dfvw)DBh@zxQ5RkL-6Mhk7$44@!m<|U5rvimsDkZ;W zj$D<9D}Ox+0-Xhjhp>Te=&>-v5uvs`DfE=cRwx(X#S2)z3@h4Hwn#@XzDt{JO747g zrDTwg<5Jo3BKqP87u$GlKKL3xA18RouRyWXg+J=8aO6zG`GduO^XGxF{^rIbcTgY9%#G#9@ zB3s7MORs0jgbOXRGO}!{k)H7r1zFM#(a4vh$=YoD(z)QwA9eZiWDUDg zT!*yamp`#^`R*h9(E<*f^Kaed1om&N4u|jl;s5*mO1-_v zI*e5wQZ)y&iChz2U?SIq+o*-l3j(i?n%f>Ho_wXIEI|T7f zr6^T%$d&OWaOVat<)#8U1Ig&wNEaQq4HZD+mqvGa=P<~;b1;h7YGnCUX9U|$LCF*B zGR`(1ae$0S(9aL4l5f@;MH;(!{C}$VDxzD-!9$Vvc@4*}g1y3BH zL;u*z_gDXLe)0KVt?Ea&`I95Rv3}^GmFFGy{>AV=dj7xkEMxty)t3h!dw((BsMC^L zIr13}UA+cw181jEDSlVD&;y20#}V>o%cvL6As$`DkZICCPDUg=q@lViIH*Ls&>g#F zfu|P^DKC%i67jW;sVDsqo%Z|t^k^^;Hu$4e7%Sc*t;5x9gYme61R)4iweD}xs*z<+} zau|t>0Zcrw(6R3N{kuPojyU@2dOtewmskGfUw^P+2)7^I;y0$wzx$;9PhR_K$>9&? zex-6%kQ$3lpv(*}7;Vv^Aw)bU0M;R5g)M7?C zq3?{vxsX(L6tjq~?V5h+O-V`IQpy!zq*^ajJf2C(>*|P}g>?~x1?d7_K1&vC&LXoD zVFPK)HnAKlbxj=9r;$S$)0byuld zOM&=HSYHBi&VMJLD51H}kx-3AX?qWjTTF=;qxPQ3DoDokK=`9kNt_e zxYoW)MaTXG|G#bchYx>sRiXNQKHwWqzgqgwe$5|kzy$yJC29OVFQJvP3x9GEY3$#> z`vymBuU}qtV8LH}{pFPBAK(7P+Q(;%dC|H5w6(}qn1An8lPE$*tmjhQg-~{4TCqU{ zoaLc!S^9FaTIyi$tWYv4w2rJ!*a+Wq?qMqxa1^|FR4a6RN4K37)N<+Zd`)+<)AF=4 zSLzG&`i7G>kOQ96O>Q5N#mOe<+g@7#N3GaA+V#*CZM3O#pi{OB%fYsk`LY!++2Gp5 zjkDW|MSs~K;PXvrpr;WKX`8Ru_2(3Ov!L44@PF8Qx29KhW?k&R~8uQ zPX=0$@s&rP{NUEmJj=Q@DjrFBXAmzF_Km>&p?{p)@gnW@`_FoQE9K-TZ$If08?%c# z&|kTvA75R-S|5My$>$p@VX<~KpKu2J6n|%9$*@3E+-tHyA{8t!JfW*A1p2)94apQ# zzIz2FIqL+T3hZtn<~>y^9Q&jC5}~iF=O8_7INPN+Qad+ap=uln7Qr?1H!-atqIef4 zK!4XTK0RL~CmB5f7rbEMpfpiE_dQ=%TP=f6j^~&(5=mSvY3PXSqvER52k`)loSs{6 zW?#dc;6Kk@Y6^8Q`N|MG1X?lc)@ zdkiHJ`I9=VA8Aa-;sU)t6t0&!ZQ^Z5aes7PZrH8WzQ$-!GZXDZ1LBQa$5t`=l}8iL zcP|e^oLgi;w`8D(io3npkOy3ZmxBIY7HL~r2e)17`Ym|hZ(dpj1zhMZjGZSf&6n$u zUiPw76B5rVTV#1{kl;>sc+*R8OQ6UsNc6E!mA$0DDLB7)mSkSUX7BmjEV6I@3pU&tL<%qS=Xsxe^MD4ngZ#N=p4dWb>ML6BUw^^Pk&2fK z?j5J_4;bRe z%%$N~sXRE2n|+W?^W1?C)wDcxl(EK89S?F(dBsU+h8vLZ@pHnCM0!3$ye{v>mXnb7 zHcDRr3XvW`PAlq9cqA$11bpgSLc?0@;-hv*}Z}J zLd-w(vZ~x=FxX`ps4tQ7z8t}li2VKTsZZ6-kYD)?#ONT6{$MVPGIX<*aTV+O!mD1L z#iIM9d%3&Gw3n5RL{5Ly&v-leu>r?j;EZ-CSJ$yJSXLH z>*eiYkBGd-%{?ap(41O5-=aqdy*MB|udJ^VBek;NU(9LLQ} z#Pym0c*0HQKlVQQ#twhAoBoPvU4QJ`FD%)gan1>n3i`yozwty>vdD8Za6O3Cinp7h z_G#AX`uykuRVXM1&bLo1P`(kbT7`3!nc_h&hT|cBC6U>bu zV#plq)TYvBx3+0}+v|fAAzLr!(q2Si*mSz<(tFuwmdIio2o%!$O8I{gTJ?j9?|cz) zVlIntuR)#gu-Sx%B_A_DqdE1}kA0sL*PL4)lgZmOXRz`8<%4o@OizwU#4GW2@U@uk zG~`e(sPns3*Hp)GqRa)<(CoUAx1nqSiCuN&fD~XH)0lUTW~0vJAuA*H$y6kACa6a^ z)(o6j0OkQdvZpri4&;9zPVwqLH~@)#nEv1Nw)hp3%M|c@o?%3O=`KVYxhyl64B{-i zH%C+?ydUIkkn^>kTlt}&MykRE0$y^1+Ci1y_(EVHlt;Q;d?LTU#&b>5%39WXN4>)nd8sIp=CetnvkGvmizGXH<#k6ix3!7yHb>CRU^ zCV94oZ(a&Ldu&Pl&ZlPNF8w%kOji{7!ODF}a!K!?z9rClvfTBgq79s1Y;*EisS|0C zZ&5&nokO&zdt=NZWQqM<#Wb@qe_O1M>ojjn>ePFMef%~D%*|rt^!kh8g_Q|kCZu9c zC;6TD`Nr_BTK|8eYrk@W-+cj7USG>eXn?;w@U@QI_)41XO|EhY2EoEvkZNv2jVAU; zDN(<%jyB%~g!Y{3&^q#a&vQ&m?^Vxr^Rqin`MxLjA+X6=9`g@hm`?FW?{E9{ulbqJ zWYd>Yp<_)Xk{nudB5re|SCNFaNXu@HjS2_l(kyYhbL4-I#aWgdQf9fa>ci0S4ZW{N zvrsm!+aFwrCw#Sf=B|(ezV}cA78s6=;lGV9jxDRKkI7H?dGE(wv;W_<#E0qJG~ayJ zx@^t=YYiu(B6y;J*ba1o{2317$1lB4w^AA{Cx=)G^COYzRn95FrxIebtZ%ZiN7~yl zFzJxt)>MB9)Rm1^M0N60fRpE*r^hKEEe`NMwttTTaR&$Zxn_@1+dG2H#H zJW)LVuF)sV$uFBbt<$BCx&-ePQRq}MIGPWgExTKcop!G_$;$()zQ#(1NIq(Pt;_Dr zlSuCtQhX#o@d1I_1dcsJ{>j}ZdrtGZ_e-X-pV)u-kN^2Ame&8+{hrx@-@i@tXpFzP ziZN5Be(&SpM@A91vUDdG*p?m@(Y6%*4MOpMPxA)Ml&txBl_x)x0+y zUPnXzAg#KTd&%%bt-w8blai>`#SHDdCu-f!a^E^wPIeBxQI`tej@;NJ7;d)H)v;ne zr4NtIVxrM0D-Tp)Jgsi9Cw1;L_&WKdq5qN|B$dRaaZa?8sqD4+`um%=Et^u z@xe5b-m7pABs?q}Bk4(U$g>QIb6@F?x>}J@!pHF})|^qav3W>br*v)^>K7QB{9)`l zo7pRF!{!eKN&!>~59U8tomue%$*EFP(x73$@H4@Il% zeRAa<7Pf{q27tflNN%Z#j*xEM6xK z{_-cU8~osFkl_kC2FjKewurWNc;}x!@I-Oc4hzrV(3>tE2*tEZlscDDL6Cn}W-`W3 zv`4&*X=dOeGn65?8)Ey8Kix*5ohdfmOqq8??gfu+*Hf@S*pAHgo;$)xMFPMu;einO z@mK#~eg5iS|H7{R&i_wqH$g-!?&$f;pZp0D+U+ZbIjSXTTRP4vvKe11NioMBbM7X9@^5lPmYSq*Tx`7BJCoS z3wKd?UbS5K@!vRZMdb)uf#JS??`I;D?;n5dz;_HP%ga8N_hYX;GeDd#!N-h=*OIOx zE7Yiax{ggX=k3D)C|L!%gkuQ`-cLW|;=YKg-JC`&y1n=?6>Zw#dgXtXRLhv3rZIax zwEG`>9sR-QO>>$2Z?f>y$5N3kEs(|&6&$5dRV1qNl}CWfivQ~mCyky~dmKKfd@wQO&1$#Q%v;Pj&!~(ax+h4VAY>V&}NubOU zWxSJyJOqADQ_*~AAwNW=5UBH-NVuOVLA^6QmETJ}lAMnt=04W&$=$o%L!a~>mfUVD z8ok1? zTv44yhe)fHVI8`2^J%h}M0uyg2U{#@s+UiQyd8B4b3BZzlh*eE>K#Mv)j%$?wMKWk z<_HHV?`HdwkF@e1KkkH=`TErpmf%NkhJP8~Y0b0G)xXSLF$OSAGMHWz<|i-bGYte0 zebkrT=Uw#SOHxTGQG@2A?dljB5P#KW9gFfvK#9X|7;vDQ)$qhMRbj1sFxAR zQ~M<778QSvL2il&R9SZX#=y-{%YX29V~tK!iP?TGo#6r8IG5npe5FqFEQ_Q3V0Qnk z&A~2Q{aBvp>7EqlF_%?lXU~zWWmAkhJgyLt+*UJxuMpf69!0 z=V5;DYl;`Qe7hnz*3|+s+ifq<`F7xvSn`Oa6sxm_j^a8YJLv_-Tj#JMVq5=rE-izJueYC#|JK!ebwiZ&bGQc+PPlhXDn3TS>w-99jCRTrW6 ze)AWbY(LK#0KC8Obp68*)4h24ldn_MG7F{mOC?I}qlgb|S>5|%AUYmB%8qXYmC1hr z?@#fMt;`>ny{(?;6#K9V)RQ=;M<-7Mqg~M+a;3V6-F|`A>h595?4Vf5$@@F8lUo1=(mi`CVat30gA0M1*_K z;vMiSp2X&xZ@l0yKFOch<<~y<6VBF#B08J~^nj7?_PsS)RlgPac5nCjUa98}k)8{= zpfl79mg2;FMAfN(UzjgtMBV)uFVxR|fpKbEa5K%3s>U@@)v^I7R9ni0qcne*OivST zG{pnM4Nm|OGGUPZ5h0nwm&sIuk@|wi0x%h=?3GKBnsWt6nohTco|{)W4Lrs3jckW z3%o(IQ2hw4DgGxcNl~P5Z|r}4l?1wI=+4eHUuEv2i-0X+u7YcG+iTC%aQwh>`u-#V zQQS_qy*TuUiTx4^Nj&-nPUfnw+4h*-73aUo1%_-1gE;QZrOe~H$FOH|FlNgij5V`G zKRQ0`pPlk3lK0pBz7sZ%(R?u;=&$>Je*AK-H;FW0uR2=h*t(7{tgU~K|1-PhBQmM)ZPv8}?=1pFW#9Xj6x$pG8LkCd3{il9eV_iz0?E(+#ik-fUY0O*m zneH{)Cl$9(f{w)E>5h`)!NS|S4*V6 zv2#}z8R)sV1_{vEV;O(44h~dO0aNizK>(ixx2zqAF>(PF&=An6<^Zm-&(14o*!cnM zp0az3-IH8%K!iW#j2`j8^{|Bf7OY&80mbM6a0Pw~3Z*`XIp%w$z%B+QH}^nl%;(_1 zY71^|v0Xj5V&@aIA8rO_0EmhthZC|gkn=su8;CrB0XqVuXD5IBbRy5T{_Irnb29`a zh&~IM?P8Fr=KD$XO3feF&|_C z*vC_SmkdIX3;-2*d+y0j1W~EvfK0^;1peSudvOb7_dFG9Ba0yaIoIi1Et_H( z6F+(|9a5o8CGqr24=P}l9l^8%aSLIOPN1~VVn~EOsxTX;DbfZP5P8`52!X^yOmVKF zs)CcLavaAA9G7!Dj$6LDBe(l-i}!3Uh{7=#=D`m?|M15)VE1_kCTqm3xpbJrJ{U43+t*VocO$2c+vT z%9C=@L)llVYZgWdqJdG}^`E^bq6T zmr$&x2tudJJF%c4mU~-j64f9`^erjogCa4Y^`*S%=udqv?#<~r=%?pQL_^H_`L4$< z?s0#E;uv*#htX6YzCfxPu$bx)lU!b)Bn;}b1GVX35aSKAvt$ah@vtKnmP_HJ-E*#bnPA_6emHGsIuEjtS_7T*@54w#LqjRO&rp^ zgQMt6WsgP+caT@K=n9q95yG>Bxcli-Q6GO%-O*!7OWb43XcH+!uL0;>bV7Uj-+`1n zq1_|5EmQ+kI$~W_?>)G$!34kQfL{Ed$bIxI{HBT3d0m7q?x|$O9gv4S$~~3p-(zQ_ zpcn>dbbL)`4bTO`MhYtXP36@}WfqRtnB0^J+ZDF~6dR!E<)X(f+waZVxf0_|PC$QF z^Qm3~K`b?w4x}n3?oT`0uFGp1h$YdoEjpIdAYb{yi6hzIn3gkj6Py3C&)GCOxJ#qQktRhFtjU3bX(DW_dW!KL#c?i|&nY+QeRy(jK7 z=>UmLN9cp>p$u^60M>lH2PbCoAZh`MJ4%2qFzY{DwryUPR?#82t4t?C^5@=R@{7dS z_^G1|zP|^TUgRx>wclCb9xnebJE{Tpq<4TyCWmZnsEm?H4i|V}YfzPUd+_ZZ#P!Jb zZ~Vw_U;r!Eq(CZrQd(FgVK0A{OsDvY=@2bZ{v7g{VVD4!Kx++u`;}Id)t?d{<5ypf zU8+uF60`ma^l{NkYWF(HYg3RX4g?Q zXYJGQIKViSH-kk-3PpdE$fL$+KL+4l9AE!W z>3IXg{HNV>_MTt#PWWH6V*^Ov{6jmxZcqmAc0RNxet8xU6@3V2W%drP^=%s1CrEE@YzR$>m2ncuZ6!k@@8>N}6;;2vW z8~^B9lN}fnUBHdRVnV+{z(_ouBR8|+!9((^QJz{&3sll zqgAMUl6R1ZY##j^8Ozz+{L7C)#L5$F_sF-3g!L~tV(fps*nK|DX(`~p>H9(*Sv~Zk z@=0N%uIk_>IuF2F3OfDaBgVK#wf(pDHfHvO@v#A!*>-SWJ#vGhoBRsx?_`E(7xMLU zWLmL#GL{`0Vv?$(l>7@nQvK}2zUu(O4rLWBr}hDx*?mhe$mQW3X+_3=N`LOS`D|4K^?12($Qvi3~+GWE@WuRToG#&vHp8&CXi zAAa@k@@w4V6ik&@f8<2&F>gN95w zAYhl>tMdR!Q-9^mC)wz1{V>ggG4lz9Z~3c7wfX8qHcujL(iaygU%!?4*(8t3*g&t-Nqk(hr!p^^jhqlG{G30((mMIXpw^5~7@2!LdE z?PGQ=eXred1p4!P$*zBX|I#b$I{4CmufO!)>o5KHdK^Df_-yQ+os@SSiz5(|wLdWo zlVz|(Mi-XjhsArobRDAu@F%*=Fj(AMn&={z(E<9>30>C{^h0+x*jj}!9}eZywLgDd z7lxstpWn;r`ultJrO(-Q{iUC-n=k!zefiQ)*Vl;;K!ovez^q*a9M-3D&G=yRGo!mt z?V>+)-}a*~UH@&rKhZtE?UyFH^xOU)I_7u6Rv-V^jg0wcV*lvKBs&8e`#pa3l=-QO z`qfeP%m(3!p83PxZ+f2je1GVFbeeyX-}J|l)z3fvVmp@8JKR@K8BfY2*E@ZK5Tif2 zU;2)yjCMWM|C_e!I!b=io*D1nZ`$2d?_YIB5m? zZOi!hLpzQm?!W8K4&t<9=Lzxwi_Ne(E1~79$8y-i=+Kk#$9FU+i&crL-;@JT*$z`c zzvjHmAb@y%lqmuIFE~?^h5z$l4cU~pAOtfyeTswrxX;J#8ej7w8F4z(A@|K;9yaGjq14UPmr$ec8hKG!MxG_%59D4So zP2tJ#GA4`TiY&^52*^HUJ-UIML7GDjk_qv2??alSiv4H7Xdw0Xp5b-#|B?3+u;8!n zW$|`oF#4FH7o(#rzaxJWNB}kLsPPj!k6k7ZuzM+os7Eq;PfXDR6beU*Mz)VF?NJ9Z z1t&*ukOepbk|(}$#*fe9za!(392Ls=vRS<; zMa|StJt{z&`Za%M`4cBcBP&1jhzEt$126U-oFjkgFIGQ>zsT!X-_jmkCb?ksv3_Lj zlWWMByvRL!uKLl(&u_kz>AwE*`<1H$eEITwq)i`d0JLX$ljNr#|K+{p-|L;^M2Z>x z2#U$dCwdjW@;J#cv`in`f8tMNqlVsZ@^n-r>Y-$>KR?e#{J7@$M{Qo*8dq8J=(Kz^BA9Bee0S28w?MpO!jS@ z_@C-$a{i;Q@E1A3Tkx2^lm1`QU7TS4%BTMQAIqanbD+xRECMj?|I-0FrivCz@rf*N zh*+MKu}FWJ2!#xHDFYMPmUY#awMqX}h8t}F#|$OVGX2tXc(C-7!Nvn`fAmIW=hX2a z`e5a-WO`&%ScBNX9$21?ZJ|Sm*#U>Lc4aKNuCe`^ZjVeLJUgFu&ysCEMJ|)e08qf> zA?TTW*AV6?W%!SX5)4KR&z^`(IT0l?&tDEDQy-APr^wLP? z2ga$~_dlj+{w}xRclrEJdH%cHrhoUPnCIhldXBf#|MI{7U;jJ%O3eRuK4ho=^XKYP zHsgQj2k#X9AJXGcKVOgkHkEYGu{r(6l~vJvf9X$UUOfLp;Q9a7J+lA!*ALIIuhA@i zeR_Z9$nWIoh=y3@?>$andtnp1vAH_n4CflvK=nFmhi7 zgh47DQ1Y@hb2m#lf&yo`k8M*li)*Y8=j~Z~mackGuG<&7Cqt8tLv1eY$Zyo~c7rQf z@9<&Ky82!?y5s$FGqpi{oqXB9AJTfZJVk%Y?7f2cd=7*caPPgpSS&Z@LEL7oQ+K`1 zi}7ZCFWp9Nr5g-p4L1#(y>z!PytG(9*GuPkzBpa9Ts=7_G(>K$T+fG@Myy&W@R}Zu zs&{>;;o@=-H>YO3SQfKTwayXu)+*JJo=9!VN;E3fi+B00)eZr2^!xMuacr$I(T&3k!(nF z;y+80?9aK^?=1J2q0j96nsM^5OcH-kyDh(+Kl;yd|K=XZUhEIF8;$2W<+IPL8N&24 zOBap45@xkNdmen{(cF$I;SnC~Ww+jJ$L{k?RmXd7yewEPu0gtSmUIjv>6wBb!FxH@ z!Q#Er_yFCk5c3fD)N%pLc34n+0ca^NUtZzdfLA#beN>KByRvrk%c0`!#tMIIw=RYj z5Sf(&cZe;B(dGC_Fz9Hxqbz#$63*LvIr13Ip1J|)jBagfAKmWvZls2u7qdf!kiDLEr!V*5Kx-6Kw(xUsMPGhA7o|G8h$yaoeICA?$9yH$!*#xilAx2Cd^{fJ z1_-&iBpn0G}DuT_?GAV`4B4 zD}Sq^=b@uZM2}q^Il1Q18jYW8x$s48#^Vi|8IoqmDDTmO(@*rg)ykBy#x+<-!=7K37b(jJN-i6(KsK~=*|y!e5AYk zBFmatyL7qw&aGF%d3VX1vFI1tz|F8%-{Y7q= zxh(VZ#yuTJ6U}fdmG`MRmFT!6i>a}tKBRKAA{@HX}moK#)-Qw z+${-~peg%@{%W829y;`0Y`fU?XAZHBw*bSU&2%2NpVWOj{>Oha(X4fxj)OCQiRnUr zsJMTP^Dynab-JRp(RXHL6yxi)a~(H0C9W18qahbQ?n%_Fz=@@!&=pHbc6aOB`mv$JihHGx zyhW|VpG0i!Uax-xNsHrFlQ-12Px>{A=DNSBoy{1vf#PtNCw_+VOJDDMfeXv^Z2Xwk z{iUxfRe4^-&o&eId*mykOwek!PWHui)TnhvSXAspp3Slz^I&)~nr+ux!tpeH>zsXk z>q=Ok(ih)14l3oR`L<(4T>63Z2+ne{3xsV8Cv4=VdPRSy+7efG+|iT#`LsgtE(#+~ zE|A<&F+OLF4x1s@MN#He5x0F3DE!LaVlG^b4iVK|YxuM@b>f>Vr>O3?xJ9?nM~lZ@ zCG)vS*N2y+uX^_FAl$CbxJu$1r2JEkj)$zeT+Dtqi>{t;_n+-P96EICjDw*UCxvf< zxD5Cf=0|_uE)gxZKDtBCkZ`)~<{kKYyc10Yd5RG{1jW*@=Fj6XHEZd3$zExVL(?;4 zo4*>h%!i*#eei{)ZR{dr8~Z3MH>*Tk950;pn&0B;(U*Rxp3C*}Qr+ykwEc8>v&lER zLiF|dRh*X?8l~wyuhe&rbpM>TLXvl ziHTFVpWtcR5%nZ>Nc{OOJ#d0_+lGPksgHl+P=3bw<^w!`c$??b(ch(gP|r5a0_%6l zNx0ZuwD()7C#fn8`s?JbxZ`s-o9cx7Ew`?Z}r+h-Nd@xVx2v z&G;PRjCPi{-)-l1oD>5HPcQG&+TYTH%Js?Vm>(|TGjzCk8LvEjsF3?vY){+Pa~^-m zr9tItH5aa>CBJW(zd0tC$C=y;P3fWgxYv~vYjsL@iwk#@f>Y9L5S`tJNP_$E3_RVf zyYWnLU=e$HdHZF0QtzAUB<;16kl`1(_R7_C_=18GXG#7B^RjkulWVy!qnj6YK+cFo z&bkcevgioJq!$K6vN*q(W=!k;p%8!lcpH&&I_8Ps=X*12UJ6J?Gm7Qs+{P%l_fC3` zk$=cB&m-yCA|s`;JQnzvhu%ri?hdzl;Lh%1@2Xl?ygRpgV@TaFVl^maGA~FzUShjF z+zNSp=<(e8EPGW~LNzV~!D%Ivti@3=^`OU|EfmEy>+{cxr|nLy(GKmYRylveDW;pZ zsUG(i(_Dsk>5T0BOvCueap=ui!t8vn9z|FU&oF#G)gsAvYuL=a`(`$RDHpJG#(7qB ze0utn+9r=E(+#{Upy@PxiH4$T4)rI5ViMx!;*b~jcH4hkkx!b}V}5^)JnM3bisqjB^W~!694==^)nn_T(oDzz}1Cx@Njyc_QQ zJln6k@!rJMc-Sc0)owoyo42Ig-fgtAwQaE2ZQp)33|96s9^Ci|@G*bP=ONyI8o(>@ z4PI`PM$#cGIka6*d8qAMFda0Z9 z`gFL?t)mw|Yq7?tuvPE6ST!*p;G%la<=CMy+ohlFL6EFkHa{S^i6WFwkMlOhFt50= zakoRgD(cls5DnRpGmC$nsv9oanXjzQ_UZ6=`S5-&=I6x=-lytlzm~?e=#^9Ye%paR z=XT!fxqL5g;W5bt{IvS9p+}N(+vxRPegg5hR#AWB`fI^yk;{wk4bS%(>6c!9Rit9N@l!{{Knx_Ey{xkwe-V^db%BMM&) ztu#_5=f^W_7SCCW(Xm+G!u9j@@mwz!)w_22RDIsYF`NM`D|~;hl67*irD(rLpX=_lN(=6#@_8aeZ{gH=abxYH_)@LUtCz*?-c7M> z@6~H@C@!|Po^|PLwp@mf?Y>R&#j*01%|k!ErFxmQ&3piUrR-l>JYS}3{=Me=Ijn3$ zviG}tsmLo;2@BtNHrMEODhgjNF6M48t?Y)gmih(jHB+{5gEoos@&dPvwQSZzK6@_Cs4_% zFAVkA-IIUR|HudaIcVC_9)@ScZ9{(f^cObKw`rpVOZSyU_e-qoH;<$5r2VNeD=dY| zVSYz~rpPauf*cKJv;45Uif>N<$|dT_hpXxngCh!lG|x9J%06`6%wk zOI4ODXI&K^c8*^Dpm$vOv-Qf5Ms1cy_9x`4v*3U7FQY1qyVZY0_4V;~#JF#(^?UIg z``5;*)-%cl+ebMb?CNY@;@NRF^heyR+p}`pU8VckBF8~?b*Sp1qR+O8B)E8|M{u^^ zj^Mrh8D4&*+ohe0htjY6#(0Aj=&Axiwhs_|@ z+c$q!I8WPXgR+0U<8ydgp^LI{JiW?%O}WpmmT?Ya=mv4 zz1;ir3Mh9Gy;wadw#6@+`}r!;Bbv88O-4^v$9=jw4B`5*UF6+$RW9e-_0@a3KDNZ2 zJHMz${?emk>urxK#l9^TyjX=LS{%LiIl_Ob3z@p!uZo=|9S?EyUWxWQ7~fB8?%84R zfc3uHqQmgB%?rBeV}MV$GmWZd zzg-F2&radVa~~wOv)r^R>pt`4va6!qW>ammjn=NL!)T|ojgs(!Zy@EFDtCV0`?7z% zwFYh<&eg@|P#x>}!YxgIF0aqW;B;&19bdit1zvAETnEdimls@_*za(W`)<~+qR&El zJwhqa@0k$a$J4bLs>OZDJNr}$7nnc9JJjAo{k~TBhYROjwaX^AmQ{wLwK1NqWas*) zDp{T7T}(|c4#?^m-rPnwUGM&SrS5+V`q|3(P|!@f3La^%t#ZtRk6$j9-8HZd_v8I_ zHy5JI&9xL&mk-uH^$^X=Q#woUhsHR4aF>mjgP|T)4`d~;?aD2ycrb}^cpu3b?W(F! z?{stE{XHbI@w!EGX!7E2Nuj#yof?tXd3{`OxnNW?w?C{lal4P6elD%VvT=X+^TG_} z^tEcG`k@tD3#**;;;~d}XJf=$uN4-tJYJ4(JKElIk6PC%3zdDs-4>VarAe%4mf(IP zuJv&L%p48MrFmJbB8j)vcX&E@yV|uAW#fG;fquK}xz2#%xnww=?hkna|Rvx*DC& zAyiMFV;VkohhpA+mP_TGJ}$sz{#!7V=a#OQk#U;$*1b>F{heg~bNjYQD;&w?iPmrb zTs@6(_dYB+a~D4o%PGX`_AI|s;f(a=xY*o4a%X*O3)ZhuZ@5BjGXi4wLM(# z$&u}tGc1da?S3mGu1xpgX56Nlk=eyXS%d_(37&A8CHro|12ucMwJTb5^qbnn{BJj zW%t03xU3J(WqF#N zIWtoiEYlKp>3nzJaL@H2(%z=|B#YPGdzrBvhk4lRHBc$-A#i|kn{c(n@l z`_<|m(fKh`XhbAM>BH30^yTu9j3lIcL*ZjDxZM2ZUAoTRpYe5%kYH3B_pX*#_hWZa zmj$8v;RB(IciuAPUi=J9#RZmXSn&N^&3je05%S1yV3fXsBnonPwP9J$9$O=xvn z%|l*Wqr>Cml(|wf+qcd3G#l&I?6Om0>|-aIA0Jz7$%}suZp9O79sCF+ujL0E#XYsHR>im3kaX5T6li}CZ`o%Nzfn;x z70H>2pWulU;ok1+_jObHv-44zfozN1x!SGM!sse5Dxb4n^Qk?%7WX~Rx%F5Tp`T!y z1ZjJAI!S*$@s^q0iZI*QWVW8Iq@B=_eLAjApH&v2MX1Q{*UcE?EPsv;Pn6T*$$iYC zjLa_Yci}#f5}(|!VYzv@;=@GvYB;WN(3fg@iFe%0@gH`xviC``!goCmjY>eI7UeoG0NJtLUiTvib5-RQm37j=Xn2?grKR99F_OU%sMGOAA|c z$!dQqUTy;{!aQB+Z*COrq`BI9#SL?wz?H8qGD+M=>Tj#)e36{!`Z1&Qjkr zttnNjI-J?>-Qlz*H}$`#^gx4$gmm)xPJc+AO@_ezV@|{Mf~nwBuF2itAxK zN{{NLJ^3wd;_LPuwR0hX>Wp6x>uF!TXu)YOGqT*pvM^;_fWIoO#8y`7@nYSAba$7p zUHtw?22Gts^HQy?gg)NweZJ~ASAtiuHM`v9!%byAm5R#Gqwq1{6Zkl>6J59@Ua@~L zmDcFhYeJR_<+B~mR9yj%ZgQo2aFjqMpW2-bgF*XSl-cg#<)F;l=_hTy?#GK?M(9bU+|EM%Se)cA znxEs%>x$7msWyTp%`=kq>+zs0qGf+Nd^p#5jalzMUf1hxTQ0+U?5-98znss4_wuUE zpW|kuYu0C<3G;Z}zU%oe7obLJy;F5r-QiPwimR3iVMJ`BnSTQ9-p!0-7CaV(f($!# z<(CFWt9ifzzmw1QX3G~+e9#a&Xx$%FVdMfgjCR%dzT|)XHCv>+=5#)k%6nhE_h21qd7G2XRpZXLj!)s*%>wCNo=%_T<|Hl5 zhre+beb(?7K0b>p<#Sl3RNGai5&qw7oyV&6K(}4r#XTc~a%w8)xRra#IcGh+IUlzF z3)ek?#8OoW3bCj$=JU`PrXGJ+nY8XxRe@*Av*Yr>j8nkLea>MWxv@BDN5?yAk67@b zRpdpsIpr(DrU21)_tJ^;L04oZxu2(0N{4OKXba{NUOECGH~g!(I!r$BIBKnc!d z4=)UcpySae*=>IB<%#3gNrA|?M<+~(qD(|XPXzb~DSzC|;JvY7Z%X|3)*z7@p{sF! zsn4N!dn^uF)=r}o_d|b3Q*GT*Z_OZki?sd;0uxTl18yaJrXBNFDLjyxSxYS4G7d6f z_!)_-_l{Mdi4+z}Hbn~x_-V`W*xfI%Y{25Vq_9VwL^Nnde3Dlk<{BL{j*4#-@KQwU zPzx3!xCQ3PZTp+@@`(kgp^tJIXUbarV%?xf#2L|~3x~di_wRov{S4$-n@DUf{b*Io zL$1=I!5lRq`U|!t_Yk0ja-Y{AeGWbk2FeKe7UdN=l)&#cI;LN9Xm+;T%?MWQH(kz} zxQJDiv(62sr?g6H$E!#!IN z>N2zf8&_8uOyYlbAxwon$}17`p67~m4H#h z9F@Cp>&B8&;wu{7vfr_GE4|ufDy14uhGlNtBzlT!K`w#0Sie$?NHGYS9Rz7Yp>tFp zVH7QTpNMeWm;h(7NV9}jc9{0L$!TFesuBr-6!inBE{K0iHeLpbR;+p2d8vjmD5#7Q zICe_)b87SgUYrSNX=-&1^^|i z)~1q!w+}J+>G*kT(EFR%#U2YxZDUmSacezuAoP{@jy*|54!9X!4O>5%GCr?kb7dUA z=j@i=M@)Yk2=bLIahZRz4*|KQhW)JXWKEG)t?6CcLfF`P0CqFcr5*_s=hIjm(@$|J zk)C}cYJ-i9D4?sWCWfti)iQ4S0$Gv*T_a!HDrh2S#urI!J&Q0LVGDE(-JcC^bckC6 z>T{Yo<*3qElnv}aXf#rDIN{jXUAM`@>7IT#^%Z}KlPtb-)4SRr9a!UjpkcCUxewN4 zqIoyg?r00)3krqB7D4jOR2$q40xdnueb|TROFJh*CyFAgpNY(Q^1~Y#hu< z9jSk5G9H@@8{&?RgyH^2 zV`hJsp0V(^JZUp|5oeW5^e{)rZEoR42IZ3kUm?w7?g5ei>^p@WQ#p_aoi@bgqY;`G zz<}(_n<|~$iby<;Xmyl7?F@~4zB06()G>e5cT^Uz9=e1y1VknbN~qrYBaZeAE_VDR z*xG06Il^O#pzUB#!v`@$R zU>Sr(5ly#@vAhmh=?Lr-4yioAhkRvy1AijI~FD`nkGgcMeLqqBb` z%jMRp)^4Vx>%|hXR1zG)j0npxW5#tk??PJzqb@Pw#PYQDQKb9t>(-r}M7L z`m#o-@{vYd=%&;+2Lh*$pD_nB4o6ib%JPPn7FoKZ2`oA@gRsF5!&mcgU$#fHZk0+V zX@>e)CeVC8lGOSZEoB7GS$F5NMxiZ?b!ymaLe~ zPV{ZF3Q(y}o%=+!3;R=`y^5Bu2>@=h!cCaS7-^_8;fF^1J^9Jg25wN4z7*|&xqi4! zmk0$*AEITr6FMnM+dcSYKW;BkW*a0>;@frgMKvE3=3vGV1kk0$g&jbVROWxmBj@+{ z8w#x3T-O5kDO(@Fs&wkj^xAB>oYHSudClLuGyt|(T?I7BSZ{FP;Fu~#!9sKe{gnNR z96{tL4+MEa*wJ%U5UeI}2w<;Ij{a>~*g>nt`Sb0R9Zr{m4v6IiBz0Li-8ERyhJN^k zJf*RjWQC$3OqKoi7ni(it@(eQend}b!%D3zoVf*Om(?d`rvCKh2F*dPCa$3l%UcwF z5m+ru;5aSvaMz8CONbT^Ivqd2EPyWgGC9^gh(7Hj=E+!PYH}-7q8duSOM6ZC$QO*z zg~M+*-4h$qKslD6q6{6>V@Bv%hbc>zPD9(W-bT&TwNb^%qI;(-EBAl3H&Kl>pAjk( zIvuod#BqXii6=8F5Lo*O1S}Syi#HE7p2TVLpdWuCv?j9|)qpUYFX22|LHlv)0@nU` z1!CviJ>zp=UB#hv%Z$4h0Ick)7FU|^#l7Tlf>Nva+QO`^@J~)1pi^$j9`%Q2r3!@e z4n{7q^L8;3{bvUixDtPy0ifE}R(NX+b7XWZZd=Y0nCfi;A}4T9ct8g6R}#vTjxe^& zI}8e!$bb~l@pHSnpGYS$9xXco@1W4#;oh-Wv`V-px%7|NQ@=~1iN~G!ow#qC7p3@2xvp%!54NX(#Nx?L$pd&gr*isi~`XbaJ*vm+ns>AnL|wc~fY~w?q)VdT=7_ zCU1#tvC?{_+)YlVF5=e5Lu+cDRh7omb33vMVDkE9`u+}qIOKW_$JKQWaLXsqqLnQYKVf(aZkTQ8 zpq1>w4by+iB!CsKKD1{RcA739NynT@J~qkltf|Mz5~(y`7a4Q0i>c&j=*O+*!fcgl zNA;mA8Sqs@_Zp!-r~+IYxs(oY=6=P2l!V2_&4|?$Odf_fv#1lM=j=;jOnb^y?yYS$ z#!AxCXHmhUdSW$})2%-6Yg$LYjK}d;}T3z+(h5PAT7XbQ}~R5j&NJI_))VzIY7_PZ14zvhWUF&xDR-Vr<)hW zF4}+Uld`$F`f)7%^8;4ID<}6XwuQyzp|ZBAr^>DJiB7ouVxDS6=$~4A-G@zUu4eTD z2{eIwJ6ItS(u@e4dQ@0W6x}&_m8S_3M0-#)ngg(1czU}(L@jny2q3j2nsmw@t)y_T zBzi!P#pBlF>z*d+Ga=*~Pb zm-;Sz!nLI(^mrA>@@owI9*E5^i3o;ExHgZ;D2I)u{^@mz@?dKR^uzi8Z%AvpEo6)OE z;LPeDb1G}>CS$F++XQYF>h+s~Cf_nr^9(RfKUVfRnxr$5q!-goM+5!>P`>`l#$BxQ zvhDZDHfQY@3rGMiK+(U3J?Zs%zmc^c(nv{veg~KoVfyJIDMRGNI9I{-OmS|Z(Y&IW zqeydk^dkh2O6>GH7hbOm!4%zuPdp4V{7K-IyNt+iZ+uaX34vs_+&P;!T~Gb%I)&ecJt0+{W3`2zaSpi% zIg8y$fHzn2fFqYvk%6QvRoS#k(j1|GXDeNrR&TSOcwT(8P8%fAB)}qD^1YoFwm8xo z4&*j<0F@pGIa{Vrj?f=vH^`7dWu9p^Phf42EUppXEwekTcrCTLftub#QGPR%y>=G@ zwueYMhgDru?S{tfbxZ!pcV}2%W$Io(KZvW9M?Np&gN2FHEDdFS+c<&4NT6^j*HMf-9?93z>h z)#tBgF~IKls))GtdN-=_-(3fPs64GE{8u7kgOhcL!RHWgvLtUek&iX`$lJo*`r_mt z3?J(SFGGRcW|EPF_HEdYGq0~6(Sgi(Wjjs#Bl^Li_^lx!3P~UJ(xvBt&hORET&E7wzDVxaDgiq(xn8 z@vX|9`3M=@GW3cQx|i~Q%2K!*N8;2>%F6V&O7P!d`dqS>qliXUraTMD%if`icToa<1)dNJb`!<8Man=Rzkv>v~X7gQRkyFFrqwz)_A{6L)A}fD*k2sG_GeLZrH1@_e^QW2Wgi9 zT+c18D)o0ViZ0dFUCh;qQ65MO#i;|Y`{67jXf4;_y3leb6TC@~Da(%}e^<6wL^rwQ z?BM~oNm;MeHhPG*vga}-e?CumTML5@=t<8i{%%}G0lFK1J_9~*eCw=~bWNbee0`Qs zQJ+)W;Mxpm$c9DPCSdJ0N<~Drkepst95Kj(zG;3Y*YyJ5TzddkmFZP`qeq#KIfq?pJL;}BSj{RNR)fLM1-p4%S!_cNG%4R*$rlu#*$$L> zHrfU@gCU21(G$sY($MCi`M&9+G~F6$gjub0s7|v7AYO3srHmRw9-*zVnm9lz6-7c& zzo?j$ljlrJnKLy-C!HWb42oNshCKkD>4i4pa)QM?`Ns6CYS-rG{oY0=e$+#uF>$x| zP-wbR0NZ0E#T@w|7H-C&4k!dCz*+Ts7TBYyS1pTwvV!vg&nTL5)PQWoWFX-Bu`bO+ zIZfIGQ+?G@(O(Z|T2M9aGsZ6s!HO~+4k?>fZQ6>X2;`%H#NJ=*iwM~7yrEgBctDZ} z&eDovofy<~e+{4#LSWlwZ(iehfK+0~zixXWY&NKrxB}&)I&(!m9>%z@>7z|Z<`9Og zutL~>HT(g;>I}Sot}b1{P=)XAe&kgN_(&(@nvjclQdyJHO*EqGc}k(t`gwh44lvEh ze&;#Xr41J^-D4t0a^e^GtjTxmeKg2gF}w*v7jmG-Dsw-I>hHcQj!&BiZ*jr1b_BTJ zH|S(F8SKHYm`X{`Z?aPWm>;azj_q8mwWgG4gB%$0hq?cSS zTtmku2*Q6+05&nbG38vKxOul+-2hm z4P>~R2K|`)H#^udm}N3ry$3ZCDg_)LJ1*Onem$0hA{0K>(q0R??WXg}*~eMHxTa?Y zO6)TPZf3O`JC5F#*eg{QIX2^5Jd1#pf2!2wMT3=Ze3Rp|<(9rUn18c_nQ? zzstsj<~u^~6eo6nOt)~2LpgO}Jb#f(Q$s`?WqS4vs!2vc*mzric-WH>CWi2T-$7#N zYk^sfMdiin%MU0C}EodLBQCvk`eG>s8_ z2QTEP1pEeo5+v+Q@L7t`{KSsonbeu!?vpasY;U@}Dj4#Qg2YD2*@oHYpYTnw0yA^N zO3aLyS-|Jk+y^83aMP_|IWs$dj;rPO$f6syc7O}oV7DQ2Yab$b(~P_z!Va#s92Cz- zyW`|E?O*vNAQNrKR@eNqS-d6_&yLMCuipUHI0+U1I03H8kLgTaYQv+LFPOg(UAMnEL0L{YchT zOr{nUWjfa=nd2H7$$C4o|Hf$ZEjk*!)=bH_8bt0|QnNY4?sq2$Vx7GcZfHu=buL}) zt#NRF5Fu;SZKh3yJ(&IAHiYHQ6cnBi;pX|w2XfVSx|XXU-$kK+x-C&TQ((Y9>+*--AbjC zGXjFHI+tHo!&|GTi6MCOl!=QO%fS}EeHy0V48rR#A7-jbQ=Ep|47BNvQiKgVjsmxY zQ7RH0(mPhl5baWb@a)aU*@8w62y7uI%kPg%JHj0cvK$c;gzc_ye2Z})p1VuCR$8G` zl$E=of_V%*{9;RQFjjE-Me=%4P5k^U9O#f9at8WDQm0;nxw#d`M_3~*(7Ah$%sZdt zkFxcLyW3T0lu&SeFLF;lX1^Hm(@0}fs75n+NR(_&OFownpiD+S(@a7S#WRAM@SK2jGY z<7k=Stp=E3GB2?tVcLCEYf(>Ar0--C)noE8x_LR!W(TGvi$c$9wI%5Chl+;u@7YBq z?MkTp1~BV?&e&qBUpM?8 zD%SR2>Tl;<{yD+Lf4ZC6EpTHD%_ENfSli(ra2@@T*z!AHB7ntc0{sa%4Cf!7{ZD&s zGswtf?3hc%@mKGkMR?m8hQW-uCCL&g{(=$y#^?Nhhw1)T+DVWHPyQ}_hhezjuij@V z^&vXJoP_`K=%@bQ-2Ul`k@!uxH*Dr46#m52`#-bD-wQcMb{(@%B^W9hhEb`%qC)p? zQaUK}KX}g&^Dmlc{O2e#{${~*frTs12sh=Q=9o+O|D!Md8zjp+#GH-dkvI%v{Bw9q zfAxfaUQ*E7TJ8)r|8gT<V-l#a}-pery(17re3 ziVF(kqv%1Ae)5G*wr!UsAU=$*{TPMnLf`#=IKfr9_6nP*ol=}KKG&(`Ap!xNnQArK zY!~=deF$`V%k-{bnOmn00_1D2zs^2)tMaRAnwuX3>6N8I3g-J8C1=$=!VvSVu1m4A zeuF9joJRHrueaZ?#mTs8WFM{F($xurYLg4G%zb5U{(OI%o7Y#>mzjo9uPOJ9CjXv) zw6x!?ZkJxve)#Eb-UZbv_dc^ z=o_gCMOKs+V{l`cO}p}3%{3xVNUa-B_-$kFN}(F)!S<%@ez|PGsRVx^MgVXN)yEuI zcl4I1DQ9zo#NwXIM9)euUn+?(nvuVMAX$>EP`*m6p^xwxfAeqzi9ylB$MTX7tg1la zGet=`Cy1TvOuDM34^t48syTS}&B4wfxjTjc>U;Hxkwb$5bw*m0+k`EGfPc|D}wS4h> zba!&@vC<2|~yyz==TBrn_|PqL*# zb&Or60in3!1+o~!WPbZz(i;7LfR_1pG&lR|oa(Asqgs2a(Ge%qNEB3mmS8_{S)Cii zdVS}4h>MWf8q^cIm+`PvB-K(=ff&0X*GA4jn9b9J9#Ote_0 z{Xl1{1?B~0Ix1yM+~$QQ!QZRe7ZDjHqK!FODSv!JtMr$mGIfDR;c%!1v;=;-SY*6& z8*C6Yz>$QVJMozl3ZsgD%x}Zk4X1w(!xu0=o5TcGO6fi=(Qwv#msVcgX2nF5l$R+y z=~)-XKJPe7Jm}H;{18ydV&0x6x~U@a+oyu4&L1HF-1X?k$fF?g9{Na8q@kB^hHlrS zHYm?ubqMgn58`>>sO6L|SrCm0_to{#dS*=8hJJClUs7N7^Hz+1_d6#3%q_obSZwWu zHWok3sHDrGl9o)qP>p*CWA4f>wD_=FhAZ+Xfig4Txc zSKr)gh*SbN8g9tZBcq&EjqIy7vm_whmpI~U`nm;!tQZTPwPO~*qnJw*iW3Myt=CuL zh}^zvkw;SL zq)>DdCES7EN49Db9i72e4NftjxV=^%D82Ffu$`U|IHmjR<4*=_Rg6VI>@O-&uKm)MIQKM`ZUFDTI(AnBM zAMvXZv{t6z!!%W)tc^y>aX1V)LcZeMP$GjsXhYewTVzYL8-59OECENE%|Di3t)-j3htJaImpLjxwX*N-_4C92w&y3mEP>eh^x4JLh@S34QIv;{ z=t{u~qd8#OLmBKwN#af!KeJC-g)_^G4<~L|yiSl(E0i4M&ID&up|)rM@dt2uOuC$b zp0IR5UihHo0;Si2`HFNxqk=W%;$14*_SK_*Sd2tGGGj^`qSemoBzD)U%E3 zUmsX`Ga~HUr8TZ{!GyuL%`B%pbj=Q<7p**Z%^FWI4h;$Y_3F`j+z+KDAx!~ z4Ln(xH}S(TC!15#OS=YAx$nlC6*9WH@n1DxH)@jjOsET*hq?B^0N($Ma8G%&1vg=x z);FKLA!|%7%@RaylqN93kql!>XI-e2Ryo12xu=^7a|b9W9VY1}`)DcxyAHR1?=~I& z)?{#Tn8C?H4DuthJuiPoPZc#A#pX z&LGeSI=Oal=-SYI@PMVFu0c#=@DAhBTPjuvA`0=FQl1;k<8ge$s5EGNQ-3lWn0;lz zBy|kndX0W7aXIaGBWQonV@?@=^bFar$RQN*=@j~@fmE4O`%e5S3e<6QTvwZ-G?Ef- zF2uQY8S6Qzn@IhXCD6QRxT)J*g{n=a_u!tuYdWA2Aet~|V>Wc-Yh;+vrx9xyGlCUu zyk>ImqIW?WMM15wd|^|nenncDMXHlXzOd|PYs|!cRORinz)cKZ)s>sW{?&7 z5uU9fvv+sNzKLV&2Lpe9=@4b#k483qeH0kxB!ZUhJ$a7531!_#+b@MzX5ieXSXBr-ANxQCjgrD5(iG4Gq&fs&doZwnO?1+&vwyN;VbLJH zN6R*s8WhIQ%I#sAO zNYKISu+R?8iIK>|>3J$-n<44=4e946j5-qWFOYKma*}a(-cQn#HO%?D@I>s!)tYSv zzZ#8G$<~BWkUkNA8oDGbaNUrdgJ#r?AU+4k@#Le0sy95Sh|UF=yGn!MT;)MLgdYytO`n`}4x`s;^?Q>Hi8U>z4by*FH2Jd^8N~rW()OdxWRh3?} zXIu?0);9}NOH(yNo--4P02&!~9Hxg@=3`_+eJ2M2fFq%QdBkBT5&08FsN1j6*POUM z6=)*N#p-5$*eb<>L|7|za-}puPCPq2De$YoVr@eh0`K1EA&BI9J&oC93YVNrry)S%Q4?NXn>dP(lzQw5<=MK$Uhz5at{tpLV7&y z5&ezRrU*PxMK8My^Q1TlWMx^c7e6RPmHd}v33>YngfJCrf^^;0d2O8sXsx(xpKlXn z&*bjZenA9qV{rY8NO6UO-U3=9N~ksZ*?I=!73SW5;|uvA@zKS?U_lGOJpL8O>s_pB zP7Pta=9uieP6H-8Tj3gbgEmMiya&gp0&=A_4>&&`uRZK=B+!Pt=bI9KhGXVK@;Dh? zW~`erC)?J~;nv46e9=RLG5FX9x#@J|@5pk=Wit!*)P4&wMOn}Af!Bkkfx6%qf@wtN zQ!*ESZqoYjPEx^Q#)6oN)ZWYDkDuK3;nkj?cVZygnnVJr$6O|iCwX-bYfz_%L_*AM zlx&&Mrfe<0q|{9)<6oizX&EpCOqPK|^Hzjb){MmQ>R8L&*Pr0u;ICjh)Pnmj1ny|7 z<}Opb)GPd}q5?#;f2)Cz(Usa=GOJh3n&^efyro z)V~So(}^CK1kt0XJ(qb}Za{2<(HCn#e%y+hb(t5C9V0W_KxdZjF2Qczc)mxQ0?rhX z@uew~7DXCt^;F4}o;3A6@0S=nI2`dDj=g7ESp_=qV3)EEu+tjzd`H>a(LiE9Yoa#H!K2{ac5#PyZG@}@?LC-^G{3R-jX7fGVs{{_T>p6-d~D zpfITD!sa!h`oHJ_%vp9Y87nQ$APL0Tv~Te-2B|A3-D}Uy*&a6at?)%jmky&Qz$f+) z;S1%eg%;fqAQCiFdZv3yGp~)YQoGN8^fTYb#9#r5Eyy=m~d;VY%c@qnAhG zlGhaDm!yfiFTdWI(obb9<}P+FE@rJkoK;c?Kq6J-DIUr1!T3FQd7UmRFapzudaDt3 zUcdAvd4+N45N`c{jct{-D9LrO8lFer-&28?z-Z<~5wBwArn&{vMu~E&x ziNKF~pd-1-U;1dx32_I*XrsrfHiKMbQ`C%cx&2rpC}!c^F0EQkb2@@O*%bhp$4Hjj zTMmy;eKH%!QjyIWg1*o@HWB3$@q*`t1^hTQOG^HiJlBPPir1;TdyJD;Q216{sjiy6 z$j?hBH0zc@kv_L#%D;qkT?Z^GqcKGGYOj3e0-hWIY|XxwzqpaVR5ERpLZS5%T|#8E zt%w6@>WKAY(bPvI-$nxeGUkuTI`LFrq{KQ{jIDol_Y2hv! zpTBWz)(NCOT8X2vgx0FDzm>8is8^*5T3kLhA0{{5C?F9Ug=k=@ zA6nm693fm#gNhi1vh9$o1!GZT2A}&f({iWw2g)OV3SgCaaj-cLW588(o~oFHJn3&ioy8@≧M`(%ooq_3Sv7vD)* z_5^%??V;MBM4MlR8?3lOI8qk08EBP6lE`<6n#@t(rtE* z#QV!_RNRbX+?pw~5BZ#6pDhXCg zVAxMVkf$&*M@d_Wv};z-XBeDznT?5A5(i0?P_pNUAKafG%s+BlhY6j?2ybHxhKkMQ z%(yHawUSXldx(9zjzNk7Hs_Y4tEw;%q6UXY;eO}@t~QlWSf(F$5%%z`bwkHYI+2``U*`skFNmVm451zso~7T3T*Eu-2v%R0A`Bff|tF|vwBYk z4co(Tk$`@S)KRv$k3xnIL1VSEM3L{$+i~%P2Xbw+{~DV7o#bYMot9l7S)*fggF0b9 zg%li@9)rPq<*BD0!=|MWj265u^s39bc}Nxz-Grlf%%QHE*<-F@`61ERn0GduqXijUa9me zw{(j6J(jo{jZC)#aD7qTB(5Id{KBae{3AXgSKGv``*}M=n+tfis|hcd2A$I5Q~24V z!9HJZr;~)m#`ZB%!4_Lz(f_J!7+0=?&-@%n!^5w=q!$TB1vCl4pthC81^xoN&S+;--~v`pY{)Ud@28=e*uqGlfUb+;w^w^U=#H>ju7->`kFH^y1H#_mD4 zSH|Rr8su*@l_!?=^yY~W+$GsUAkz8!qb&+=&&Zl`PN<;nVVq9A2fE)<{Sv&i`6-SL zV^kC<=NteZcvjU-^tOdPn{|*2s*KbE<`$~)!@4;7+Cl4oK>ND*Qp-{p(f_9GP1+S@ zny%3wvc463y}<+!~paJU+ZjvhBtXhQw{07tgN?~4Vqh-y*1 zRA(ptJk-yjz*F)#t*#}`_an15w0ETw?Q`3f^Kp7uHeDTQrg`-@27Iu6y*8|s8kW?6 z_~E&Vb(ub{m>+pEaPL)?P_8kEO12VFVG|vktVeIbZ~NB zanTZgJ?X>S)jQ_pn}PN5Y2GsL^*Vnnhk>Wyco=JBG|liyvCI&|DDg|R^rE}H6rVic zd&pDbY-)}WF6}GyPp|P@pSvcL*r^M%55>#^CzqdBqj-_r5qWy*h8t_GNGEB+?|M@j ztgt44jRnnrDsEaUCDhq!;;iAiu#=P%Bq?wY6R70< zd2$%N6V?X!_t4mUBB(x&|7tz;tqN>N=1U{LI|99l{Bg12QlE)s7DQ5JDt=s}#R-m0 z^bpk3(?+djX*t7|f){8DU%9MJgbUk%X*w1sFF9Tf>Loh*?5>t}V8zRNDrV_!obDHY z!s@PW{&AYOxH3gVCiQ_72C6yUM{cNi?i6v4h!Fz1*Hz6jo&LeJ18L;0*|}VEXvUUFfu5fB92b@a(=2N$^BH^iWV*2)qD9Q{-Q?txJi3|LWfxb< zi<5F%7xO89B)4%1cR{wYN!N57k=BZTWUSkVE$3{)Y;7a^4E~j?)$C?IQt!^=F1xz3 z{lUdz>67aMnL=d~+*R!%-^w;qk7aDMQ&5zm0fpw|v*ISDSY9k$CI2gpJbF`PL*&)p~g80z41uYhuO=WAGBr^J-n>Uz=a z3wm3Zxg>}~X)PE3%Cx~*FYEDYNVMs=0`Dpy4{YWhYUy3pFUNJMAHpp*n0y2(e^2_8 zEkBssi+>meS?DZBc*&!Gn|!CX@K}&t&YI#=%9lpDlCsOTtKhhRVmnG(O(f;_+FYHWzKT>+)GbdHHyWbCC`&M!7@d)wW?y zem?0sT#J{I-J=vk!Q9^wMJ#Kwp;zOsLMh;UtB4ij{MCuiE!($uMC0D$Lf=V zXp%hxKk{Hi%)sRqqfym=<&}@IABzoWZS_ceZkLk1yo1T}2CgTFu=S(2`AREswv+bi zrFt1YI9No_v%VH|9q`_|EM1>|{2cIYkD56;k-hQ;-hAOZ*VChLp)MCf^@|M=iSr)C z=x{*U+|5-DYh`@hjPjtoozttJgGYLXdan09ebvkK9kkV&9my+y`x&C+WNeQ^sXR)K zpOWIlbdctQcggz#n{7P1cu(c@5YpoUb5mUKTORH@IE!L-jGB~9eP)=PC!=P@^ZHVo^^sS}a(PNtdRgbZz(mxW60Mwy z+l_n4g_Jf~!MaX=9k=%R(jvod_(Wrd+mP3@jlhzf%1(m!2^LNr#joqoICq~u9In?Z zY02#*bTo~OcmJ^Ut2J-SaDS_GAkyX2cMASml%IKwwJBB9>@g~r!R#v!k@lG~y(DQ< z_?A2OjNg?oZxwwqTs3=kVr@Z@Bt<|~jbwG?q_h)#kZ*5)Ta)|Vb!J!kD3sP{^F_O( z6Qhxdk+6aSmS=|MF6}w~`Ut!EeWOkmp_5CGxr15fUap>&B_B=T*O$Y{&o~FyRZjWA zv9h2PtN9uXl_~4TX>i}YXU{Yn^ONM{NgzCMXm(y*{ z933u`GO(?G)PMSWe>AehBwB~Z;qk&}HmP22KV7K`jw zf=#s2B~toS4bEEBi^^KjVflpgf%h3~bzC((;`adX^rElHOC8P|H4*%iY>v=`b8>zq zvm$o2^kGl0QZjw1zcbQCYdLlw^AZ-y>scQQojwEK_sN zr)o1~6sOOOP}JGC@loJpK9YKng_DtOQYH&@@;U=qoCg+#*#PZqU#w9na<1a4tJ`OX8;vsAZ0m;VM1_e}G)AYfDe+ z{58+S^R@1@rD}%_wrJk!xH#9&at!WIMwD#-gT%@d?J*uGhpDT!wg;+ko;sPmJu*4V z@ES8mP+0qfE2g|2yz%YKm!||hYLG5hSuF{FzbFC~pBd`qT9yB}9j@99lb=m|WS(r_ zZZ9;{*9se=6>BP+zv7{NSk9exh`ghr{K|``9oCo0wLfsCW4cFVbLz;TLi9==93sOR zhpIvC%XXd0fCz}{#KHb*s_C5inv!+2{IZP0D`KcizM&g&_!x`gq?@G9C}Rh2d2kPZ zqS9!QUW=OFSi`IQhe_m5ts#?}=3ik!mh{V>Z`y6hZ-egX_tcHm<>aUDfmT+O?6{$jRhCnU;OoNs3K(mF@80Z14~px{Y3ivi+VhFMN5x~VU#d}62UIq_ zsd0_H#jG4QJ$lG<4OFs(-LuvezcfaQH=iDQ9`gg^C^R}L*%4xs>uSGsV&*LgviFqF z+1rVGBlM~4klYPPcgmouD@KMJ0CBP z#53>6G+6y6cquW-6uu7G1M%C+=a#nW$4b$?{$hL%uSK}s>M48x#iuQQdoiY-S*zfF zp2p9Jtkj;d_%c@t4QGU^(X*c9Drw$o(MPku_{BYr zelo6ctmZ`7D8TeOPI{aEZBw(tT9fh>jQ3-0NSywB^$XJqnW7(-*KDU7y7W_Yuh_9P zqma;CPCScO!TBgtJ04|!iU;O0MLl;6uM^c-jUQL1##csaMen(1&c-=>kxz~8j9TiC zTcsge4r8KJE}&>?R{y-FGfmu^j}tPtDUyh6SzZ>C;radvr-WgLx>yfS8b5|}9t(Hd z4uFz-x{*+>*j*m4Gp?d72$kw%uQ$Irc4p6Z#`EOon$N+5emgvW&fPR4pZ3gEHdjG8 zea!sA&*tvYESaOg*J zV>-gt&KocarK1}4d1qj=<@!x1ph{as)RvHMIfa#Op?~qxJ`bQ~Isp%DLp*6Wj!fS< zRCvpksYWdh=OU_qZ<~CU%GEczmndHN<0W|~i74H@fPmgdi}#8Q>dee;c6q;wm26kJ7Mz%ci=L-Qqc!7HCYp3C9_^_rd-1Rz zC9h^_ImvG(M#<-Tjp$>Nk+|}LGZr}pq}_0lww5@Mf~JgrT551)%)ZO&pu>-q-O6kD zaje^|WJ=fIIQ--RAM}cOB@t8cjSWtO$k^TOA`tF-+(PzJekA#`PmEDPJ@C@4RP^i| zHH(4Rkv@ND|C|_z4z_m>3y09?t3sznV3!9!KQc=HxSYC6d=ffU^R=m*O zTA~2uGij55dVOW-r>?_{xE%`h%q7&ZxtN9_A6cvvvF}v$K&*Ve#V)efyt$qP?_xg# zDt}eioQ~`KdLp`qhNYXn!RnrT4&%!7`V^m6YDLKe#LI>DweIy+lX%V^%{Z=$jS?d8 zb2VN(r@j)i;H6%~{hX`ezei4h{Cy1}_vs!@7iwv$HTksmj zjK!aSdEd1ji35{QFB8AHmm3rQUBLo-}6g1`R)h7%rG|$OmO($-}OiwoCC27*U9}o zsw4k7&qN=qqlu1cky=nXG3)8wos#$KlxXpPL_O=`j0Lu@0>kcEp%ZVP&b%IqOhoH! zyC9pVb+mM5h+^+FLgCq$Rh-%?ca3$^x$;KVOBL~a*8xm>==6(7Esi?dXU2!P((=Iv+NFO`B2p^kZ$Rcb*5kABBgPReD=K~-0 z8wgP?FEZf8vItMbIDe!@qvx!d;&2b5!4#w3`;?#A;)I)GTW%=A!8sUWTwjfUyRHvV z7GM(s9y0haL;6^5a)EsoV<3`37J#BufSr++qIEWuFvp*qKMRQn-!51|cuaR0mv^JM#B9Ygs=*ny`;MJ1^lsdtS8r|^Ps@wt(aE-d0@6R*+wp@g z31t?@u!Z{Z43E;wU9(`l@kW*H zyN5Gl#8gX4{Ot*d+lQUsR&Lx(u`;7rdW6i0Q5f@HpF5H;qG{%m?r6oskx9dIkdi#G zvq%@*qw{=?TrUP!P3rA`r$b}$#Pfz$x8()HcX3)Th5rc#s}q3@ZGS6A@lqa5!TN|o zUlB)Ug|~qJ*tG1R}|XHxkU5#y%?+*JR~L)MaJT!^KyL zyV`I9lrKL2dMH&~^f|sy2#RI*?#?+n9fIbqp>r7N7-MohH}AH8>a9Ayjp&VTgObBG z1Q{2<8s^xX>PMR?MbF4E3GTesy*KlCY}Hv5?yR|=AK?emRYs8zXUq$MK@PM1l)(Ww zDNCXw8>F^6-aX|A-I%}KneZY$=JFmqarx36PMGZe$x1YG*P@SuL=;xMKooFeRSUz; zP2-XlEuB|*C@dj=urT>-70{EJXx%Thrp9DZHZfg`-dU2f_Y>)mNxcbcMJHW#cw<>( zaO#gfm8YPNUvn!w&)Aj0Ic{Oo-$A*sTnXP?LqG4=@!)pl$Kshq5x7Iz!?AyE*Mx|+ z$nLECu|A)N!Q!`P?i`|C#lBUYjUy1a^&Q+Jc_o&q(duY_;PFdq6>`q$AgD#Iejnty zRpa%f*XUZ_K~G_ufABq}lkIhCyWq_MYp7O5qp9lQqtJRk-_KH2R;j(3%~=(;Ic@EW zVT+nt=G=9QCb5ICrITSo6ezoeZ`*)Qh~t!SbTx^cI39~@=E^-}s$|)6)we(Lm|Ui2 z))m)|q*jW5eAm~{o^26Yw!R+Gsm&FP#>DongoqxB8n(L5(eTFj@a8Jv;Upgn0$r4O zc1kiL&M3mTNyn8C?3g`7Er`x!%Lo(CJ*N@UK6!=*elmz+g#d4-bjjd&M}3V`Dd?RhhA7`gVG0!f8ks;e43rgB{*B zi0SE;6o4$_erh*aiIvCXr`4vmH+KuqYH(j3{#~1W3k|n=6R)7rB1D?Dd(Ylj)pYN1 zJG`3YoQ;)r*R$(AX`1`t=HhmGXHz2)MK1?wOo(ywhJ{e)OZkaUn{oo>#V?w4j)nVw z2i`o>mjd(E>i!Lx@sm}lc~0mj9VV>hOx>}sMOL@l*W1BxJliG8FiCXj`l!rQs4ilF zGZ(ZPsle@&qm$<-;Zqx3rhSXkrsH>vBH&08V!uC9d%GA3@uLAycVIFy`-3R}w zY1MhYIw}7eo@^ZClR?9GmP|mKCjX;<-9%!@3}lyWPJW*6xo_<2xfJR{C2n3Rnn{wW z%XuIq!n{YkZ!DO1&D!TrSaFob@&@)gZP8RGE5BAfw_FEUFZ3VYPrD@JLrkhlg&oY8Zjq-r?Zm5 zk*9GajpYH^dE;KNYBZoW$5S|v;zR6#QJlh1I+j7=KlnHDFPr{;GB%0V`_M8M(mLkx zV?*}~Cr%D|EryUoA zGwb{AluL@|+cbdebgaW}y}s*zv)SBk<_xpXWk{I>@`O`}!D}8m${AlvU_CAbPKkWT z(%k6MH0NdW^90g~5=-)R9@EwH%HT#2eKI&_bbzh#){}0%8|Tk0;JVwknPJk*F-*LN2_d$D5S{_=*bO%{~aKXVx9mnULBHy0&Qp&XRJ!&#L-*?882L zac|KPK=(XN8ukyxU0suvAjvD}H59#3n4tK1t6@&UDPGrb?Xczf1WSanfX?$EWd5;N?)mr zN{%qQpUQ!DOuX%#l4YJUZ?eK%3n?v5-+;u`DarEw7^>7(^Cb%3uI6A;+OXn{5}|T z(TSc^tQ)0&eND4Z{vyQ<4yy*Nfr-&duAwdlaV@mlxuU}N{^{CJ-rbgr{>S_jhE$jH zHO*dV!c*{S*Zo_J56;Z~sf82G4Aos8EUvYZ2M+S^;56ZM(^(8tJfPa76A!@%*A%WkiBLG0*JM~0O_iofa^#!az z_5eTt&H$i=-*`LScpI&4hgVd{9+8bJNO%AW7!!mGe>R}505m`-fc&3}5^=*?23`X&0EB=i0sFd3xMr?m z(Qv35TcJj)zN7|NQKhV@D6ev_)cYF0_i{v=?C)!ZMJNendej}b^zD9%<2*p% z$S-KQol<@6deZCEgC*}u1%jG;cX%kfYH*Oj0unt&AsnnAq9-;&v@fpOA#rYL-s3bape6O>jqdtd{vV|w5N>4(WlFEie zxP>QS{^CSZs?_i8LIyUS3wF3DE1f7MEvq2qm6DD$c0S40kf=b0GY1HT#Wlamf2j!P zM%b8A-sVh(j3=!HmMl6qX8-|vCEkT5fMx*fyx-3#?Dqp~UXm?d;%)vUTl@)xGc<5; z*x3587Ae%$0t#$>AaM%ys(=WD0AR=wD-EcERrmfZd?6J8epQ#W*X!MG|ABvTKM>Bj zp`SGFQF*t9p;bZ6_HAOYOwongcMzC}>* zt2*-mu+ttmDO`)-kgb3Np{#*%)Cd{KT%pt7PJIC$1cBfRq&EQLj-Q?He-OlA=Q}9u z+ylZ|uD*B={>1bi&j5SIJ$gj0_dZDcVp)++HaFD^04!1-0$yf+vTR3?1AG-y4A^4#TMc)=ZE1l1+B`jde{KC^gIX@aT6bvf zzwXTpImfSpu(``TFE0)?bc$aH0(LW_E-Jkmb5ugx+7yv2?!J9wfohF)%s+Ug!cZ<` zQ(T!`b<&n4=`-@tjth0i6esM8}o&eLMKBu(+)BRp3*>NG*5UH$4 z1i>sopaZ!m1JKY2e-hUa*iuT*DN6V6yhNT)jVxHtgj~{!bz}k%pAOL{GQJbUF!&H; z0Vgb}0MfgCjBh;7yd>MQCs^s>RQtN-N6(^nV#Y5`@(k3LU`Bu^P& zOts`0yOTqOJ~xgZ#(E9fyYSH7QIX4}JK8`9MPVe3e@=A-yNKkO?DQs#)a@PJ+lQ_= z^eS7P9=HmY3hEYaqi3_thD#9nZnNV#+2f4@lE3RLc?R;^=2IRIJDyu*4r}Fmzv)q5 z!r(_^?eX^Ipg;G#$N%n67|l^=dQpPb=xCLbk{)fUY#K&tZnkFMM3XyrkT1lvg(+IY z6D)>#e|z4y2ri1L$QkltkuAMkuWzp%XXrzaK*eyGfdU}fab|HKWp|u?`G3dyZqq{k z(VHdSwb?3{MIhUJAhPmb?I~1rC0BF>LNv*hJjp*iKNF-3##vH!T`PU#+5Mwy?SJc9 ze*M*T>yJJD##s&iUIWg|hqGmcooQQSnMy2#fAg=-y(Gj?AGQBopZ{npvp|31&--#!{j1r(KBhUr!a-d9wc$srhL~^AOWw#<+Beab<%FG9 zf0hd7rrSj#tUvZWRUp=^v=Djlva0l9uSKE2W)SW9;xy@5sZngQBPrOx{n1YeamJbe zUJY>e{I?A1H~-;u6_y=$JgIDH@RMg;SJ-j*qsMps^ph9Y^kf#{PhQ-AE?rkDQT{W}CPji7YTnAgR+(f{| zD{PvlWzn_NxHHcKjkTG8U$89(h_d|@niu>+KyZQUW}y!>shIsvSgZ)9if=VI<*cgh zih-qEK?Y39FHOL1<-_jAql%ecW=Tio!+>4!$N@%2>yYqk81Q9t4x6&x`3L1he+yQI z9cOR@`-1`)sBfxuY= z(_buvBG_m_U&CIhcD$rNwazC0M{a?I92=?JV3$U@Jj(mq)ilS7vY820c6cEwDpim7 zqYl3WJQ|`kd%|3_{4#64C*(qRf6uddCis8i!9b6z;D3HMdMmDKZ(ws6dXW^-Rm_8dhTt0M#Skuafk3`UD4=Z-ocL3T3Bge8>oJy>f*06*gtboT_Op778S}TxvFqAy! zIvJczZWegYVLew!b#1|ee_51~^Mbk8pgmtE#KC1Rg}MT6-t*-{{>hgC{PM(C^X<88 zH4AjoOYl?Ye(N0k8(XRWCr`*#`bU$UQ2mX=FXli1u(&Hvf8$-92U!MVi1+%` zdmytLwSvMQRN}Cq+Eb&_wIKj(Z^NopZp+oP<`+M8;svM>JZZ2$6)xX~7d2P>m zd+p}IHT}i;Ui@0|*%OR8Xzzjy!J-Si&K89)&!PGA^v#Eeu=3vR z;O%xGk^Z%VllsE^v-bbgFuOl#R$jk6^EV%UZT3f>S7EOef9Y4}{PNN9C&zqa;mhfJ zFXf9ZAk10S=<3p>LM;SBe6NkIkzg^)aF$~kdl)TE%#~y*ZImo8!yPPG=5d2y;YFtO zfPMuq>sz5J-b)z{7uJ;v!QuVCAPe%vb|Wq4k;MaUqq$ev!~Qt;fs zw8%QBnZUNges)h`lYFd(6$ zofpTq=X1K9J=hklw(Ibmpo!DSmRL;yj{tn>Q2Ey5?zfivY7qzjlUs^V`VYKp;px{7 zdz>90f0qW|IQvs?ZSr4jDi^|xpgahxY_V8X?Uqm3vJE(=C?(rPA}`-Hmh`|e5KJD6 zY*Co(GN_!-i7B=mB4sr%cx zhk*ez(pU@2+;Jw6Ll22NA9Nq${s^g`S<)MEf5x)6$Cm)W5rB$9>tL|6{L8QD1lfPB zdHL2!y;;aR|9scIYnZ>dng8Urxi1NhLe#=bS=JpVR{7 zTuBbnN{8$svo?_Pez*8^OoafyaEJKaMvBjSBLASVDxd7L!Mh?&PUEzj^gQ*F#6NC{deqput>YFj|et`e+5e4s;hS^*rNKb|93+d^_?aw4dnf>vE+(cq7SNt-g)eRPaL9$Nfrq;af5Vi+ z_zJ76iQoQ3_#N?uYHOTu`B0Y5_?yhXf)A@AL1 z$xeG=rhRRN7iq>6u4!9dd0$W6k}sAjFX%yH$s=?v$szfhIkMWpj-jY>mVdlU>&ziCr$m_fKl}wgCxp?5MBeDkJ zh8621u+)-?6u=%}IH?#@&Powiq%2#5SbE}*xLT?kAOp{7&X@F^#}*Jdf3)Gch@|4; zD==y~9=Foo_k$nejB&L0jf*|sE~NC6Z@+j5ezep-8uQP(etu5-AH9`+d*$DCe{=J1 zJ@nNmJ8$ki7nju8Ukv_W>|cGduTI_TrhnCfH2byPH(tNJaqg#Hlz-}kLTn|fn5>fS z;+nt9cuYlzBg`@dTkKj+e;yjHfjkRl&f;~hbVIvF2A0N2Sa84tN|BcEEW&|@pfCiR z_YBrmZFSh>$}QN9Im#}G#HF!$uNAZnr}DCJ24G^PAom`&i57lz*K~FlmjiV(APT*$ zs##Usn6k|fWj7WA8+!baI=nbfL-qirDjXzFk(I|WXsZ$P^LbFpf3ovj>`NANmhCa* z-C|Hjif$eE$&159fJ5Yvw^;QuNrgj#R{kZ}BO!s&(5z79@$eMc(9t$z$(3%R`DZs% z%843m^p5L%$F*2|aSgvQ{N=iTX~Ox(=3h?S>oqRj^V=`i{qfnauB_4%`o+Kx-hX`m zTi5-U&%W0x#7hCThxrIe z#f1*-aTS+?3cXeY@cBbD# zF<5Lhgi47%h>F79&MZN+U>evXhVIwSxcJ%`uJU(ZLa)>9CvIpKE#IETuRf=_KXu6u zuh8PJ&33&m{vqAm{Ydk zA;95ex0oWEqb36x!#SmRjvhHKtYJZ`JVLwn~FTH5&=Y&#eI@bmu*R%2l6sZSGZQ?JFD#P z(y|6!1n%TEZ`uZ^#>5$_w zW{ZJSe{EFOw(KD|`W#O`lxloKXGzgEZ!83%waoW>b+_s{d`6xtne&_7ES_U=YVO)k zD3B+!!z$wX;t!=H`$hhLz;E<}-@_Hm%#Pn=$FJY;-~9Lg1EUfu(+qs;t#4g0feJ}K z^;vpCKN>L*_RlOoJo4)|53V5#+=6r)`u`hdf5p%gEuD1Pf%eNZ+wqF1@Q#Y{!h;92 zN~BsHzX|PF@g)6UIKnWzcx!9~`q*qAfiUGq#<^{l;YAFY-W94!&Zko;B z<>s!lIDGQku;D&36Rrrrk_G}EFS}0RituktY@VEPWf+Oen3xOm)6YJ*oKdLLUeU&T ze?QkA9rkzMPOsA)$J4%#4}WTn{23VcA3JA%bMSBPar@h6gCjSe3M)0>T!<2Eo+ir+yrWlU<7&`qe6B2{%xf+hC}eRN zBz=EGV71c5U{je;4~l z@BQhU(9+lTdta&gqf7ps_fYkXk)4|g!QmWGOV&y%&I9A)fwRK9hPHC_RZ>IHeF!%2 zMKD}Mh{D{quSl>K`=RGKhd#J&s-8^;gO?!zNK-H8TqPUEK9#cKj$7q+P9h97EC)@% zgANN%V~_j?oYZ^Zt$F02kGqwIf6NZ8O7h=zi~G8tnElK@>(+kyhV4J=`RO73gPm~y z`JW!`PmliiiL%5S zH&D5u_sSbooY416URD7>D@Wb=)t(9peRD=OR{2+KZ zCJ?wtw$i(FhIC$fKwX3we^hj5t}3Zk^V=lhb=EqSRXKE%QrGuMp_H< zV{@`E_~UoKM%w%D+qce5fH3TJ?jn^)*y~&c@BPo;dVSA%H9qO9{PUd8oQiPy%Q=5~ zhX3e|d9UTa{lh;PS#>+yfA>$0{nNYewY_KZyAK6|$6egyW8lcCe+n@I#i-fk?I~lj zWQ*Y`f}%`0_o-KN%N6t6fI}?>t|b-W%@@ay<%F6^iZ*CHDH4|d;t-um3LDFjTo1mq z;Xx`NgvIKfWyuO_7pIOvbj-R1EJ5x&sPAF=@L=R|jt4M=lA5<+$){y({h#=P{_d*^ zInhAK>~;+RJA$kLf9jhgKk_mfVBYsN_>=taJ&0YO?VSCm*ADovr1Wbyk>7sy zFn{Zq?=uzu>S>F=xD??ki~p)UkNURvr)26Iw_k4mfAGVJx$?)MHny3U95ipk)@44{o+ysx5+rn35k#wJuhw6$|IO5|zRNfMeR4{7T zY!c?5uw0yC=5L(SLPZ$=ZYpKl^gB#h>CV92f zV=^0NOKFcEvK~y1a`(PC+ba`8LtgvAM>$S#<`L~ivci8C$B%XGFUtFHo~c3?RMRS{ z9BI*%tKxn#-TWwBB5EO5PRnbTw0^aWP80QQWwE@RwrFvEmhml0^R#cB2c=uByDHiK-yEGqvgI%k1P|nenU;QLX55*X*{7fQ za|JD0ifUD+7+A1j#_5Vb!>sPomow#f3t&BuyKQ~?XZoGX3s6P3kksf6m358_cA;82 z!2}ae6!ts8pq%t!lqo8+G<1Atq2^-0GBfYr9r3U61ngSs&oyoqJp? zVM{Jg_U!WK3zGKxyGHM$tTj4+=%9~(7n$mZF7rK%?jhzk%Mbhx0rO)&Cd~Y7UR5$92;!#!-|cEuzqd{)LXjEwdkk;m*~lBi$B{?#Y+uxP{LH}2tr0}4Z^5UG#> zg){R$6hZ;eg~nw;lO+Q;l0Iif&-}B&D@oK_>GxXJ6E_d_1)+XqB2h&E{VS z=b2Y!GcN~+@7v0<43mQP3q7v=#P)wA;5bXG*z#dnkuisROZ(d}_iG(hU%y7YsNekK zN;D@rc{fw=hK1^;1S2WJGKDGSS3b!Eem6@s{S{}QMJcf<2wU!l>o)?cadsXp2XC>* zAl#U<^P^Q)yk}vzP+fb{FvRW^+n9omOl=F2$4wFZpxh>02Mug==u75JZ_j^NqnL?` zz$#~{rvVvhsur1%{Mk`c3SH4WIn@ex8DjM4j~6T_xyS$(!zq{iYPO*k~{j=iem*Z~3@t1G$ zr7zcw5*?A2)Y|s?IJ2j>*7d#}^In}Afk>Xa@$t5{3#X3 zX>4M*0xN2@hr$e&c=Ga@LIh1iVdSDzUcHoUq`)$k`t^$~*qsfD8s?Ko22SbONUBf9 zm@z8UoT8a$$~saKr|l*_K!-tiyJBHm>gM|QYBK`zg+9zgUnIR2!Csa??&Vudqj~TG zYKee!O6b_|QU4Y=Sbu+!{~Kw6VTGL-Q8UIys#~Izx+y95Za}tA74LiI8OckxCHYc! zda?=>{YoZ3de$roWox-bEr z5;8_UO8*mqZCecHn1<*{+E1>HDshokc)l0YX(~n=iV5AsUb+V}F+7l?iho(Cd6 zM9_T;r_b`yrk`hQ50s>7XjFWe?KFTGq-#fqS?)KfzYTHgthmjq!w2@DXUFIxS7uvo zEx+i&!jKZrnBte#o(Ca+$>wL+>MigIA)Izyt@3Qc<%()0c*^4tHO#0+l>bB<`Dalb z2n@*xef5S@OtPlZ2_A3^0^tE;JhFNW{7NWvD@SLA4J zsShS^%?ir^&#s*K$FLYePxRScsZ8ne$8+#qi0g~7m9dtI3+fAI%PsE6S$Opes(xqkIk{Dw}k$pHf07N44DlwW9JG^FahZi1UvoWD(Q((L5 zhQo83gZKhG_VA;)*O7q_7kK4~j&gw2_w;>Q<41TS*v8s^0-Go$YoNvw{H$zamS2_?C! zFH@xyT9+^RxHb`Fi;Q9O8&MepCAkAM|K*6Vs;nf}9!8aSIx=bbj&=T*V`3PBOZ+j! zgkS&%MdFixX8?E43L%rDa1k>%l|GG=8^K}DnntF%r6Ng#0$vb?pE+@ zU9a5XsXl&cQkQIcF`gl30;h$+fXacB>>3KxxhZsiAR*^-Ap3p6ejf0vKzi^*ZdZ~= zY2R*hYEe*Ec6EtLRX$dUEt~I~d|8^cxU}B?olsFV#wiAyh`)6=sx}Y~hcB!A<6)tz zutkY_w=#~b2(%}O#^V5VB|#uli%z}^g%>oQ?vh;v9XJq%+*f(5+$n9-vW;u+#~zP| zfqS}tJAK7i$S024(>X! zFjq1lKO`b+Uo61iwZ$cvK&J`ooEf1hGVxRuK8rCzb>PfPE}WDA zh~M&PO>GrrS<)wLk2}}tm%UV7I1$i)Wtd0C7RHDcev%6wWlFm3v9aCwSz_S2CiMr_ z1-sP-HMJCW5YF9SAo0Q-ro-Ft{jAjBjJ@TVgue=t{YWsfEu>NcaS+8b!1nRj=BneqKv*~X%wfGySxgZ$d>nSJwKROVk^Jn1Jo_FAuT)J< zrkp|#SYs*c2UPZlW{pBvCSQhsMvU2<{0@tSJB8SLS@%PUdsPeN*0pY}%@A2}B-hwt z?-K`h*Fv^s)Fv_Bg>5st8A_)FMKG7*LP}+%WU;A7_Qj}KP_lP%tx_PTW1ubBZ5xL1 z7RKJ524*yWRyxQ20oUT&;%4^4CP&WZp<=vO&0axCRk%fdK zq1DX7=ZxXU6NBFv5zvqhZ@B9Dbi!B=ZrkG|<`z_rdfrUbvFsyWMR;s)LT62!AK+|z z1DHWPH}I{#Z`nBUzGsEwtn<}*Y8`B@Aix1=meM2F$@Q5l$E62>O39W6uFfhbc?Sk* zUS@_wSjJ=LFV<~*?>y9hN3+?3FfgN2oOl-2rvi%C>z`>tHjC9>)zBO-68^nay2_ry zbii^lr5OpQ)OPXv)gm;)aBDe)T>h<*`H6ZshbHf}syv50&{+T{#wuHvqI;Sa6qXd@ zZ6DR^A~4SZo~V|c3s6^NSM?=fMc8K>*jCe(PfnQ zacnB>K*{8psPyp281qDq8&7 zHo*t(*GvldrZhoU>J5gz5IE`hKxK6QNhgLpcU;hVt$Sg9sDKWp>?9sL1Aj`VaU*d( zoJjJ^yg4P($`ZeS{;+n=HCWoDgMNcBf>G^;8Stm;Uguqnu$gXeeh1(RZCI&^*(?JU ztp=J+gG=y}2X37+O;j9IZXHWxisFOi0?OWhSAq^4=-T9p&bi~t^-vA(5jOgLCoR2R z=OyM^J;@clgjF(X^VTwn<_TYL6R68V7r?Lr0Ph+O9%5O42?+orkql_r!@LGiKzXuz z1cK=+jBh}x6S}c7LwX9_&jM32EVkB3YiA4?NUD(Ko&bKes0VFmw-m-q&v5(v++`=K z6q8v>j>a|hcp!&+iG70&-m8@zf4^cpGN$B43M`7?;P5`X&rQSpyg(n`{`E@YzAT%P zMJ@-=UM$~#wn(8ma?cZ|8$tl`worF!i07d^E5LN~dKN)Zm`1pKLk*X?at&z1@!Ns& z(gJjMU~W&0BS+A*^eJup)nq}~ zkSs!R0gd(!pk+0MPe&o7p>#a>Idu6Hb5Qsd!d7@B?`%P;*B!X#DYi0z-b*cXd4CLd6kEAQiQ-@wVQ^VQI>W&iS$Z_#WS-f^kJbdKnuYH$gfN| z(Z4x5^F{f!BE!!9vG{C5zw0f&c0aUi*rZSOFr#<5AU-kouZsIMV~lY$W=80Xq`t*; zs9`qGQ2LXVNNNxF9)y3+J?urTB~m-P*X?%cKf&?sAr#a0Xs0#>QkM zq>rk6I?|Q8kCAFmDzr7+QiN1tdXpzsnKL#AoUUs@qQ+x8foI+6JAT*(IbeD&rgPT$ zjRG#G-3%;$Pyytk`&|CiK|fwNg~}@-s|PDunVe@~cpF;gJ_E74Qr>48;%88QeV=1U zm5-O=@zX9DU4AVuK~f4T3^>XB5ECQ{VY!8;k@qMvP2gO^<+i+SX-~qw>~1vsv@jcf zqi4uF9P1%uqWc?vX@YMCChvmq+)XcH33B#8!c&#De!NJkK(#8Uhg4e@hi)T^2S_AU zlU4eKECHB9vfTC`HDy2xx=43_Rf1L#*8Y0!E(xAQXWTtzQw%I2s7Dx<3Wwugch*m< zaLA{J8p(=+pkWaK_};=3Z|JRgz4O6HLhl|3ooGH_`gQ0$>r^m%bRNrTFeIej2feW5 zQoQ1!wT7&bXAxM0d%%p;eu()6BYzp7w(;)4;bK&fd$mt7IP=I*2~7Kcg(*Ke!}XW$ zHJ^qbnk$Q&6rFUj=Om_cr60yAt>Xv%+0Y}>({YsNO8=C|))Wz0Jvd11Y|O^3+?QT( zsVb_3I+?x&#i=W)OR)4jMZw_kHn`%Hh2&tmf==b2&WWmD7%zlb8 z$g*sME#lAC;a>b;hMseO)p&^XV)e#1*++A4Yx7wNzW`GPvU_s9T{AT-?yR18k&{81 z)1>mjkSkN)@&Tn)VUV1<9)bL%tM`U@&V#Sip!OmjT|#9?8N23WF|4&TczY=>Jh+)X z`A)%w;y-J}RTmfajwo9iZgo;(DZ8WbIjXRKdQC;E+CPnTvO<(m(tN6 zzINIwZvR^6%471X#VBIYX^NXYpVB4z<@Te}-rm2M_U4K?1=d|xM`f3-1#q;n1tCR@ zg_vPl5}StM12X1+(vkIr*El?l3_5pv)E113Satqp2vXn+EIwjLR}!r-nR@F=_&f>9 zuSb&rMfRf@wQx_4Gl6JnbhrDeinD&WSPtn9ETktXz!GV~_LQTcsmz#k#Iik5!VD4xC$cVf?5!p1 zwYVxn1+H9I)E?PP;rtv&g(^M)O)|EKMxzGZxK_FQ@_^2Rp&Le82)4wNFVo-F$H8#& zfWs4z(-P@_^>3`(v*i(QDBN<|noO!v%hMx%@hK?|OmioSZQs`y8Lyy&RIH1+K75>> zdCW6|D0FgG*G=DP6+2;5k+xc2tPBGl62Sr2B}(YxX3Y`{Iew7Ob+QRt^}DKgKT=MG zov5XZEGkacLf|H$vd=NI89i$T>7P%q@4I%1A4U^@H8HcgK!W-Km@Jsj_c&lnGNHR~ zsBKbXeG`C8R+cyrv9s?sOUSc9mHs$mF6AxmXie@lioK}Oc@-VIsrG)K{Hr*(gXn31 za#BKS5eK={rXXc1A!)+l`gh}? zUbpk2=BDNoY8j&@?F#tdPi+9tI;*)~-w3Xjc+=X&H(Tm*dxrE9j_Tw)0`W4HXZAOL zyR2=UDZET35dBI!jqH4HZZRBYMUv=<@HweAuf^ABu>2{D=iT*@h@>LIvwPwmgKGjh zO6YG0{@^t{SjUPRy^x!9&ZoVT&q_m*bHM7!5tgE$TwwFhe0fuYF$R8faSQgdu6MB3 z+ZtbF0e7-Cj5?8kYH}9g&sTN5PZOAb2Zi|mwt^JCsxL+#H6p|z{HBuHNaTzQlZI+1r3;v<8wS^|?}_ zh9eRLq_%t_N0(WjCK*5OapJx?+Hv4EFPS?(u4VUp74%hm&8CwP^c_%mTvOqpiNfU_ z-@nspOfe_Nm#4xQDZ#(FL`5V8UeKTbeqIcdifw0ZhX*8x8pzEt%&O9V^Rf4ev>Px> z6^8pM!eJMLlx0IZx_uzm2Q8HqdxY%i<}^o7$>SziSyozA&?h2v z{WkWn^1Z|)JUM=sMbOP`yP%_@n0K*1(0BRDZeYv5it=zJ8WmEMNd+atH)86rXc9RF zhT`Z{RWqHgRCojNYJP_9^W_v|!{FJ>sGwjS;A+k^(Ow&i^eygxp+8hARuz_HWbo$a zJJV$S(|Wm6@Lb38GY8@HaeWfZ9Wa2ewqP?fEWtA#W@PAth6XGadBO~A?vp`GAyzQo z)9KHYOWxZV!U*6HDl8B*kylXmH6LP%lpHyFg4ZJp)F87hC3Rr1^kOC!3nE6mFrd48 z{o=Tj0m-6)iUja~hd*b&dQm=6l}F3GVHc1}0nM85c-K7#yJ%E@(fdzfVj|uRnqwep zi*L<*f8ob6vE>nperbW;{Q#f<+wxQWMsLIiUOP5Bp!J?FwY`28c}bE-ADU2eD#lgs zRw?nAe}3O^G-cdPw6Ll&G>9=&=OoT?Tu&Zk^lq?|DQoM5CHNzmT?v{GZpA)?hB2+%#Qw`Xog)|-5z zn(h*R2_6d;k>LYUOR(Sg^FCugkjHL0(@_R=Bx7b90hrMrOH|jx7Pcp!VMP7mKYg-N z-NW)YBY@_du$Tfffcw}n;>uR>1=tKvyw8=kR8C-E$M7KhT*ZQn4#J$S^P!_v#B80V z)%iFWPNdKFtD10bP6m)ZW8&$!3Y2mUYIZAsdZtl~9V>s*PAuWG-V%7lzQ*(Bx^|4I zYD98k|8SW&cGPS6u?%2cwh|FPY9=>8{zMoo4SIxkS_DbG#OS<0JpxKOfeLt)D%b0g)#x(TvVYc&8>b@NW%5W=s{b|dRP4`!Ae~q<@UMKslc07iU6yNq zAHajyCo_pz){?^P&n|SN7G+0Tq9QT4abp;-2axn_s{6Axmg92ymNcioj+p7*??v4P zTIvN0UcU>;F@JDzJ@9~?LIW8w$^zL(-Fz<0j`jfw=E z18woRi~`w{AB@C8`-s|HbdGD(kyE{oH+r) zsY{Q}u>(*cQw3h@+dO*cCp_}$Sf7t0V_Q$u`dPhi67hHHoe>j>@5LmTC{Z>a*wt4- zHG`m9PRul5ypCy;vLPU%2iY9#Nw^tY8S_(^d`!*-}1v*b<4+|!%|s&nXVdK7D0U@;@x*(P(2Ic-;G2Zj=W}e zX=~D-@}$nWg%^x}_YmNM%of<~yKdt|9reD^a7-lije{w2hv`!27^iQ{Qwf7)0cRjx z_dqb{H{TRisA|c{mcB&NFxfETsJc-9YmKJKFRqr4EwJ8r($KsO2aG^_J%ygQll^5k zf(77okX*Nr0L6Uz2r%(6KHA>JR@%*5Ff_T;+n{Rw--2f%ML=GmJA8u)0rci zG?{*rFnk~0ZO;X`fb_F-JRF=&_)(Eq2Gn$9+@dBpHN3?92rh|QT(M2N6JYpPauy5D z%_8;q27^v)BU_!Is?a+E3;#+HE36y~G}iA-*fgd z(+Hf%C^oNu&c8P$nwL+o(}&O9zwiDyN?^Y=!J6U9P%uM+rf8>+sv6a!-vP7LXodjG z>#!N|b=DwYEPp+`jm#SNcAi!VH3LB#Eq-g8cN@*Q)g3)MWeUiX)|L7&mnOjuMeRwF z#U%hrSktxM91Q~Gf87kZbgd7h%a=pv74!1OaxvLh=`UC7sEFeuT$^}hnhfWi17X}i zDcCdDS$9+2!a6eK71X?|36uM@hmw9X3=lq%c*|v#X?$A;%Af%@w@0~gpZY8(pbo?- zEy2n1=ZPec?IS}G9X5**mzKK+CS&9IXoxmsw}J!wF$UT!Qtgm*EQocV|!yZ@4`^zJCL~)M=8rHXJEZMDQI3W1e`UUD1 ziQDTEt*QQC3?k>AC&;}_yQZ@nUI*2oC0vQ}LUfmsnMrv)c9G7NDWAt?g*Shdi*?sI zD!eR~-wl`&&zt#0&(NAh2F&DvD=AfyI?uNsyW#@eOP3uw6zIKxHJ`vBN1ICVZ372@ zMGv&RMe<(YQf5_o>x~CcVR%m$IJZlr(BeU^UfJMG>2Rx@MPI_i-DQPz%cf_5)@FjX zMteuw<~Vpop?2GLSAgA5D*}I<$Q%GAzesIo1ZYhyxs8la^RnsrYP|;KG5{qEDb&~I z>3S(MjUtDAPVG#rbPLWobcZEbRBt;rv^ZuypytNT!1j5*aK?U?M@lkf=@s7 zzGOA6`7N7#emurd_7`oJ{oo-*`AGP`tz8mrk6EIM2-N--V;*|0CTlU zg|)y45vNhYHTOK!Pe#<9b7U|s2ZRH7J(^h+N6EW{M4xn+nyr6~FJcL+E1Y&P!L(Ja z{c=c|@vI7rbh7KVdxnuoQmKU*2kg8rZ2g|2U+DG@r;qbdG-Ye-J`~o(1+EWrOhL1J zSAW|>4K`Gl0-7J21Q0(=M4w>IHkp*alJwBX79*p-YBwG^sr-C#!3{rNQyRf-bc}?+ z#rx+(uz6lL!XAI>r@YpOL=-CyFnb!4W&My3J~)l6D+M3TNjpNTd{U^;ffn+K^6;hD zsZYb43!{LvY$s4+C@G4&!E-+W`nUvIcpu_(%Pk_EinR09U(nKRGG;GLCvd<5thy*T zrX=u>%N2%-6k1K6(0N1V0Vg!R>MAxmRt#P(m0#~i3D18a;Y1r`wu;OfdyVy#lve$c z7uy$15jhKtk9@qiNB&l2wmo=?2B<3Fq&bQ(bF;K3PKmPI+pbh-h0PQH?6$dQSI@=$ zVBV{|W50Q`o1+o|wu6n};kw|LD`sL$`T#dT$iE<0MCb?Gug{0QnK4p=fq9jd=eKOo z4w?uI7^@MpvV5O^B>9VCk3n3_ZiD;xcvhR_DjH`G91f0SO;-o|f@^YH5<4$5X}iLx z>4Q+x&X{Te8A`@zN<$j83c&%{=ko%ARTeh&2=y`kOD`lTb1^0A0!0~Lqi@@_JqOCH z?~G!kk#co@v`7*6^af{xcjn;sW@OR5fU=d%>xNS;;BH=jws|`s(spOE*=2qn?!=Um z(|dah<#C(R;Fnmy=my%8SOLMRmJ0InaG8x9eg}Fls)pHyX2k0lF>ju4{u70b zv_+#&gsOev2{+4MW=Yi{-_q!DT~*S@*?xm)Ht z_xrTGU0Y&oUNxo)OjMuUgpEJOZrw*FS);4*kTp-w(8!eUq>oUGOu-QPr*A;e-etKN z6TDx4!JwoL-sWID)gSh}O_FlR)E=(1YV%eHhoXIS-VQX8)<|A9j%e*YSPUp0FM&OK z^#&oZlMDJ@IBMdd#8aWTu(j;;ay_9Kz#=9d?J>84?kkIJeyNYV+t#Da``z05?5_xi zglh$VS}Fa5iClGTPymIy`RC>e7Ej$2h@CNh!nGU%YH1z4j7M5=?$JOn^{fMfa-RpLXf z%!V0&C3EgbY_i+ha~V6OgZ)s-*s?PEY}T|o;Hl0`t&0taY&>a&cLu(lMj zRuFBs|EEjO`M9h>+PtlNaS~TgV=t?JMV_A+R$y!4*8l}oeeA1}vc}dI7a>rs61o)M zaU%jDyO98Y&&QGi$4mmlED9zwChP06r7WF-X*2JNL)G^FfrCNC*$Ag2J%vCJ%g0x6A= znSn35fb%W3VP{%3+O5zgbGwv@l9SFJ(V-;=idTgJ?zjMwYAWM72wTpQM=KfI;ZEwy zX#YHW5rMLdIZ}*IG_g{Q@0>(Hez7jy?+jL*u)#{?G#|6Trdi8I-%3D?!Q)++~n;f@7AELzI|)DDB#JQsV7HQcC;gzRpYBuM5=6n+&T2 zh_>sh`z7tmrd*Y4wQ?_o04*n+rHVpWbiV}+p|7TS+8sb+`FQ|8F_3|OJWNclj}^ZK z2Jc<>Jljgp2}?I%$p&*JO*d|tjhvdyH(oD$v@eJ0kPUMCOY+m9qlb7{u_wE{lnpms z#;Dd#zwPUNj^FJSk$74`PjrNt8y_$*lkQKKLSQLfspPxqTUp_{ac*hwwtl@lW<`lI z2Gno~w=_4kJj54v#ewL5KeWy%?4BEazRt{qJefucXe$GMUY3GZJr$5(eN^`(!aJA%GlR_E2q0B(oy70~iQ-;<#rW85k!V5ILpCtrd6ixC< z3;|V#^xEne8v0?k5ic@lNeIqAtDsMy%eR6N4_3*UAl$3!yvx0ueb(ITc~EJi`KhZ| zL|>RLc;G*pg{!rH4Qccno+^d@2(Y4fe)V=d;kmXbqUNd@GEeR5dt9vVSvBrFz71<0 zX$67-Lpd~bgq?FUqEiAeyOVHsgm96GCh$KTQr+!{3Lu>W8m}=zx?2JOg0`a&eF@vv zni=xR58Kj0x`@%bqSjc^l!=Qcm-k;1mIHeFYDvcQAUzs?5K)sp0MxgFdl|WE8nkI` zNa1*g7@}>CI#<8pw+m+p(vbEQd!*Hbs{4qvb3^B1cOxPxqs7qo5~Po_KK!>s6VG

5#)JXnK5D^*9}O9yx@M94xPtxM;8tC3JU7KY)fD@(SPwa5|7Ye;dOqtGo=EZuUT>hSmqr=t0cTo? zVUoQCm{?mq$YH5J!b68~pyOpNN|6uMdAD2uOg27&ZnjV?SY+Zi?g@vR&&))+p@cyp>sgS?k|J&T;IbJ;1 zJKNZ4ed#N>>P=Yg-|(!j9xLZH{I_ww*7+|SCE7smPB!~jkz~twB}aoq2mB~@vVBH5 zO{-6j;Jj9!Qisn2TKV|%fzHtko-=O&|AYTadQBfEX2|sZ+f((NDn1X6Y7uBN9dLgb zQs6mcZu|W-kN91U^uS)CdxJrlMZT(tANSoL?n!Ue08<(h!$V(d;>WIK7=R_i4-$CO z5EyW5{8Ikn*=yHwhB?P;Op0Te{;G)(H?eqe>JRI&;c-qGA&)CCbI&{mZ`vqoI73a- zbn_s_>t`|B)C17FQJlVuc=PbW%wB)7;3HsVF=yiIo)H*;l9ZV61>S_uw%FG$;@s58 z<@N~1ewki+O|eX{e!P4VUYRcwa6<|v%CVQB3|ADHswP8C0!g6dt$@JuNmK&V=uf{n z_5ECGfnkZhs{9R_(>n5JUsjLl@EPlV=I4oW0(5EXeRAyNas*lLmmaMt=4F43JD|Y5 zJ$n11Hrm+ulx$Zj_g3Ixv$iPJi!d!;AjJE#0E|mx>>oSTz3h4|l03WQHB@o?{q}P; z?71xAl+WlgtYN*C(0;a8TXU046`mn&-sDsFJ*@Rm1afMUI`=vK(qSKcAwpXpKJ{B- z&IpRBXHGqa2e}EYJ_vP?@auot`(F8AssS-x;(Xmzg}?%kVqCx8^~O(cct4P1I~d;s zGa;ee>A_jK+7*Sf3u9`i}uY6YaWI*|BTSIQQOKp7vr6p7}Sg z2^|L7qiT2eh&2IZ8yits4X^3>;M_Z{2VNm5WFcKXeL6oqikw$iVevpRrgP>j`-n!W ztq;lkZG_w~Wb;R;{Z@b3h(gYs@8#Wt;{LyQO{*_>>S4Y19+R^lZGQH|Ogi@-tKhnh z+g?`mGdK?IA8EY}ea&Z|LyM^Ho~X6N4eE$4(^_MtAH4t675mF z?zOB{>tn&M|Ni~Y-~FupLz|y_2U)u^{{8p8iT{hgkBUZl#Cd;O;tK8x5IosD^$i{^!Xm6$-Fv$s33BiIW`7c5xrL{IS%sosBgfYMbG`T z!d&V;?K$)pPRunfv9*MCPHT;9dbQh3*@LGu<$?Z5o}L$^tXFT83y!$}EkxLvHHoVt zPxBEq^#Ju>9!oxbQjSNS+4&$scKrGlx<3VzrQ_sz=c9jTl4{4Zi*?S@SbfmRM*(Xd zhm)U3a5=N--#ogJ!P@Q(C%^T1R?j5iO@AlL)hjdIr@{2dEF2#l?A?bNI|?0PuD0=WTNhSAEE- z5B~mkPF#QbK`%POVw;)k=OOXvWe%%itNO#;BQnejE!bj!{6Sjn??q!y*C4QdOX%)- z;q!bp%h68&wLKLwo(Y9sk*|A-mr{+lm+QcM6vc5r?_&dPc~h;A$Xaye@Aame{@9Cd z>vV4}thn;zO#ww|>rSy;hDVWa&G{M@@vBBW-n)Ms9{9qeNwhQWcsqB4`MgR=tVXcr zW9HnCVe)>z(}ob^M%1#SIr03CqGbO<*>f2{-?u$R3bipj(w;kWs=Te0SbpOvuD~3Y zUto$IS|1znYfr*TuG_Qc{t@{|?LErxFMW*XzJx=5t?f+Y^)st1-ENiGh1HnN`zO5H zA8SG${l~TNFTVHEWCRk>Lw6s67Ey96)&Wt~>b*Zi2B%d0u~YQv3*el5cV_Wl)Vs=N zH7+6H4cz6i855h6;h_`v&zIdA3?6?vL(Na~+@6=td1mydz~}SUr@ZrD<%^t$2`6*~ z+V1@vUWDS2phUSNWD!lYGY*cv&$J-nL4C^9J%p=XaFo@M70!rO!(p<@zDe;TnDZ1+ zwGU~>OEAYt;t9!AHKh|TCa@n6&xju&?P7F7a^pJh!a}NgMy}z}%O58w?76 z>5#zKc|~6Z9&re#7|*?%9(@Ia#-PH{2DNGe^qz31cfh_m{JK4-61c|2V0qcxdYkV^1rCMU1!GmS`j2=9f zm>f7*2K&aP6OAY~xVOEAtrqO8W5BO}%eg8B$e+SEmOzdD4wC-#3^P5#fiEC7AvRPx zg&v{vOknfxST70}IN!KNIz@iXgC{)3z6ZQUl3{Odn-hlx*m^B>o*V*(1d10-InO;M zmgx0zu-XV~aolqrUFu@qiR5_Rtnr*b(l>eF`10(Z_6*DN@_6ghj$D$>nVrXf6O>}{ zGe?dL6CSr`_Lteb7Y*lApe%TJlPruAUX~Pj+=joCq}r#x6i`)z%9kT#;Pdg5aK8KP z*P1^$Kger(SXU5i@@=>)Nj`cBvjsb-P1&h}kzTkZAct1Gl@l(g|1oB2aMvF{35Y?w z)P3wj6?FV$KzGNj|Hj=H=7C~=%m+~kw(IX7uKRSYtJAfY9&(({>^I!6-WNMvcQ!c) zx!m-O+;}4KIeNv7VaxG)R8X4POvnLLyXPLd;+w^`(*OC|miBst$9rEJiVgJs z_ZatRw7UQOJ_-$cV67hYAFk)(ui#k?X6}V;|N6}8Gp&zg5m@hbogpiK*J{kXzq`h_ zX7EbPY?1~f_c^0KPotcQZ9+|FW;~Wq zxGGS3w`+C$^0v3+%Bw$r#i3zxqv2_+FA$CiJ8_Cnb@soT%fUYBn!@Wwp^PP;?FM5o zADy?yFEQH%V@P7}AFeaocAw`-`LHPVJxZ7N?#JxwC!2H*N6lX&^T%}=EOPzp>$-_a zZdv)^A!ltYgT0ei9l@siM%UAc2Ho7v)^NzOQsin+K=n!9z*|6n7PWF=9t6Y>Snu_P z4}r$A8xP&Bk2(p;-=l*#c&Z=|({MHKsVUp8CV}c#VZNA?V;w9+O-VdS)2?<#oV*~5 z&6^CzFH}{@dJ{U|=52s{d|H?2|IJQyv{N5=7tlJG71q0jr)}@2h;a|~mUeyBHTp%7 zAKYguiQI1m@3{h({vHe*6e5Q8cKhC5yFVI!&w-Desa?0p@Ru$h3>|+4RPBWi>*uZA z7sBdX@3_1l@+yekQ>{M6>Q@?G!k5i)DGvODYBL~)f1<#Q#(4-^h1dMNwbx7T9aFH# z48DPK0Fs~fD?HQiORi0<)!IviE3f&s8$Zf|E>(Zz><4$IjmK{CY~vfR%13p|uaTYQ z-D7^)4V-tIUycCv4Jv>503vODnIBJmu&49OX|2LIa~9(BcYHliRq%WCxif<|%Q0FQ z%Tv9V-_Jjjj?N+2+OIV>MyGEmJAg0aQ_$Z1X#4y#C5xG@yO0qKde9jDfO<7x#UI?K z)h)wm(5rlsYoaii7MZ_l`^3S(7?vFUsJ_qVf+3TZMTgwVHs62UV^;eN`HhAk$8+Vy zV_Y7`Nq|gSquva9Fs6M@km2oZ3aX1T%;tWW@PKOXHDcZia&JKK7uX|5rH~D9%^+La zUT_d!dz}Zc;=8!YcS2zvIQjNs{e`{*tj)K+IL@BEsNex0M~)<(Ybs6!_dw0OGp}cL z)j}dGm8ZB?To`}X=-X`nO)OhdD0$2)a5;58G?^IjPQg&3qv{a5}Dcn844U>GHuqk+;0BA7=%!VC9wf1>WJ0yfOQO=cRw;-_Hl4Z*$4{E#E8nBX{h- z{K52^!(YrNYQ&r$6w^>$seD8Fa9+@4L;3pV>iC*(BH<~dCLBZqxSjJ_u(Z&{hvDZr?oMX zJ#)!6UVne=appIBd(QERZrA(48^d1DQv2}Jw0jBqkKy|%t0#=H{qYp<-}qcvb}eCc zjeplUNXcOM{i@LdhQ+x-YNeW$%BwRtDYar)7Q+jYK6 zbo)7vj|2=m5FqTQL6%u=rI!k(_L{4x*;wuANk=MSn$bsq51dCBW8)d#{{F*nb$ZOxv21&(Z64+9T;G2(oP?`~ zA;tGKoK4Gf4rG!kC^9f*&W#|;ZB>A7einbcq$$lQ@-0E}yFx#z@aVpQdUv&%3>OsZ z4AqdZ#y!VNjxl7e2Jh&*1?<#A5F6_0eCm-Cqf;SQ@1hOugzKyitGtVhjZ@KC_=w4o1zCW}c_Ne30b&y@CTWjrC?UNM4`uIZ0PKkD40wog8ZJ5O6~{bmo7O z34fj}4xr1vPwN*C{TLwv1rpgT1a!Sk+y!6QJPh_8%RcN~+<~X({=(CkqZb6ug_NTY zL?Xabvd@0ph3%`FRuoDRkioV$Tt0SS`PlWacS`Ni|Cnx#L=R-@e0<)07?;2{ecpks zpWuE;y0~8wjprA`7#S|$? zc!t`~G4yHAJodh|L|p>^{4CI;d*DoKl4wB^9jvIT0;XQ?K7JlQYz<3$6efS!kV20* zb3^BNAIPV3XM#B_E6h(|KgH(6k+_~ePFg!CVMU1Io(bk{Ob`qXT7}5xt{}5}e*}ZO z5AbH7>oM%+E|_de)Od$nD5uh-k_Bxvhqiv#f5-aW!NB6gf#_aymVcgwF6>#&-5Z^1 zaKjrr*VvI|w^!>NMtuI{J_CB}Q*na;Poz8!$_rCiFi(L;= zl_@vY1(oO*WMbqQd)`sZ!%Re+h?1{zMpS2-bi{#Z9Z|DCWE;I5sDd2*#bcBREI5k9 zH3?BwVEyoP7N>kK2dexSYjPO8jV9R~v`o`OpEIU6 ziQ&;}S&ES2o{Qe-`+-OEDk0@J)w~(R{bYWqH$_s%eu+AY{){K@DyrtraJ8?gQe$c= zMl=3R$|}Tj$&vLMFj+lG19?pw~ z>*^az!qrLl*IfGuVJ3z|XV~W^U^e<<`myt-)e8w;HVkzgvAZg#d|CmLUpuTxG%D~{$?&^b-{tS)oWN|fLF)6f=A?Mc zIM%($(OK%jH+W8kLNjA4JJ9&I;f7NSUt{L-Lx8FbK#_U`eCMUl^`|MoU z;+Xjv+v{cua^B1GKHO7tv;IAIQqnO z=coI{TkUTm{Fe8axwcO0qOYrl!c@#Wb@H5mw)YlmGw20!?!^typT+vi zw9gH$HEkZ_%-kq$&pZ?Ksj$p4tPh^};dX64|1Rw7)NL;$PjeEZw#o>_j2bYEttRRN zJ)D1-LJtG(5#1NobJSEroR`dKZ$hCz?-fp8(d; z4^}I6VuvDsrS-@e-Zd{%qY|oe_Vt9ChB_wR&*Rg#8*p6*&PqhiN(8h#y>E2yFz|mi z@U2_D!=EwrNgr7pEl;4W)i0nt?N!R#KojSKShK%613*j`bj;acQW>e|oA6!|x=R?z zI&KJ(;e8$lYP9aDy6SLGr+v;&*UK;y`CVBZFF&^2JR8fmnn9e`nHOJrUjwUUzn)?K z>O9aRz>(n_rYm=giF`u&cZxYH5MqBCb9QqMpk2c$UCjrv8@pku3uuVZCUb_a*&=6& zfqE{QX{gO##s3#BkN2#QV`#Z%XaC}V$r^gwkPK_)AI{&f@6dx8pkKVN!~v z_xqNCr;8|=iGt{CNJw)?l(hU1(8xwwq_!_|>~u)Ukw>#V_eI*zbyR0A##w)lF}*pf zwmK91%m*)HdP4sl$LZq^do2GgyXnGfGJE0}TPsN|hw$VF^}o*|ykT`dX5L*^?0lt( z2YB9*!rRU{RU1Ap^8j_CmRZ<6J`(e9Sx zyY%PVzAtvxKIF%%*XW4_l<-egc+0IZJRxIFh>?sr`#xREHHp=Lm^fM=>TjxZpKf0k z1D&Ix)$TVVB^CC)l5Fy?7-@H( zdqPVQf%4y}sk3pm|Cf9;G8iK_X7v(%wEGJd*{G|c&l9hut%cg2Vyk6g}}bqW2zyTv%^4 zm4z1_f6!-+I-uHe4d`#rhSf9K@w%SHv3A*>=P#@%!ffse*Iv|q4k%d7q)9XRFZ7?w z=Nfu6#(MATm-0-lF8ay8(SuR~{nvQ?;p+nTdz+8QoXhomSiTipe5Ka}Zuabd)`r5h zCm>o;FoXZa-n%tB%PS3He_R&-+CU+pkf?nQ|pK4aP=gfgG= zCIOYnY5Hx`2$N;h5dJ3EjK>%PO(Zz_iqa4i&DlE86~Bf&bc~aJNby~Ca9_|r`_~{) zf8Kk|R(F5Co8t2Wf4-T6zs}wi00o8O!sWg*a7VQo0&hz!81Z*eeAmU5%NK_V0b zGymk`_;3?)p!J{Bc%Q$|%`ye3F$`&o2fu&r$BXcODIo5TJo5P&BYSZfI(BtC$_M zr;RIkr@RNuqM!=C3#}1w34b8YF9$?|Mmu&X(L!mviKCIiMqfr~Yocg-F?1DCUA(ot z_@Y<%Pk1H8W(hi&JVXXu_f82V6+}F26)7)b-+2boi%SA$W@Hqm*K#|^XCg~Ve`=AN zCGSdhC$Hfxg}Yil8ikE`Lpk@QxQVtB+U0AL2jy;5ugYK)Hf(~*4ON?H&+fHSJE6Oy zL{jIpJ8fyC65yV=xu$Z){ZthXP2&1)Yk=n4kI}s!wbpySs>#q!%^10Bt4rQL{+I2s z*tShK>B(@(y`FJ%wXwGTGG1D>e|s8|u)KNkYShAZcpI^C2;Y5uew$)S(7Z>OVMEe< z$eMj~IPhs}5*(FZ^3voyTByt3GEzpow6k?nj(PputW99u%geO(DR<^K>8uvq_I^Ll zc6v(QhwPk$r{je&&%?4iUfA9F-Rx6-SD$wxkO@fIh6;9ssN2beoJMQye_7=P&-84X z;g8&b=0QmRyRGVtkB=w=A^~m z^W`qS?hv!$a+7}a7UhB>!)ey-qD2PN5acBg&90%8%Pq1l&9S*`;yEGq4-esFpnUJy zq@%au-AhD=C6&Q@m@eCzf4s+Kp6I)VZ$5mj_4RMwnm_SkGWH3GH~#%50pR!}_*Z}Y zPP?%STt>Wp{SB((7kB*jO8@|m_`(1{v49r1*+3~w2>1f$TcL1@W(((?M*SBbdf-|} zBH&Ya!i$Vc4=f-AK%n8p20Cy;a4BTsPx(9M=Peli_0y!I+S=_G;=%-gs< zA3R>4S#Ws|;}u&{0JLFMfD+q7D3lSv2JgtZV0{RMVq^I}@TWeRz$sb90#aB$8}kzl zu*3ea2q3`n2_ClR4Ln9*VS3g0>Z3Eh`BUTt`%!`Q)hvegf5*Nau1~dp;S}pbD$WNW zgt&YLF8Y4M^~qQtaKrUG7&frI;m7{hkNjzrazHRJ+<*0T?!WN?9L(SPuCHMsKKd8za}OZ# zg?Gi5{tpbL5SWY80Y3dlJYQg#{u_+_@Al=d)&PF`feP0zWB&yJ2@7~)|M}F9QtVGN z)}N2_a){Wz(EIxTZ@khU|MaC>n1Kk&z17zSVe`YXP_@&0MgM}NQad6-_bfBOUfOaHC!2NnQ^Pq_Z`*B}3p z{}24Jk6QnuJZAlE572)5FJSPW{RtoVV}B78NW}gy6cPs@&fA1$42#8A-j9AFtZymB zaBc$@f9q2!Oa|}^<{y9C_b>DT7g*xo)1e1!``f>kHk>hRE>a3K0^YHIGz~D9r~{%c z+shZWK!o!|$#wx~*kOI+_*lOB6Ahfq1n&mnOPp6y>}7jTKlUswCxx66#5zGKI0Sr%?tiL-Hf9d0Gt1y4cr+43Fk+=E^znR5xAfp zXbTu^bijv~1~m*k0XdgLH4J`#u$KhT161SwZUATjdI)B~OTbJ4h4Xr!Q)t+t3-G!4 zLRZ05APQI)tgLRaa&QEEzgR6e76qt)sz9TV5Wt5X3bg{8Vx}dJbF4W=DIpVA2ix95vS{C>aR*ZZBmUv9hoo_*t=u!lXX@5kG&*)x}Y{n{yes@~gYuGrVqY1kj; zeXO7Q+k4zwcaGV=+r4#v;po@9+*^E6SxBN~w#avoq2*hV?qE-Gms_3f42pi;%F|Au z4)2>VaT2v$a_+>g)O^bs^LV6&dp?^-iW;u@a5~P^^PN9W#|QPS^T+9Ur5=~OnT`(C z@AK+(RH^$t&rZyn>aO|q#AH-^%l(Wospgd%CjFv{Ax9bYrZOsj^Rmf45U<2FC;qV3 z@zZ?X4(=^dPECGrsNislM&(TWiLd|v@App!Ku2f+q8~bbQz*hr1Hd>g3nY(xgd=Y! zBPyd%3X7`s!x1QLm^7b6Cn3ItrW{Kp0U$^Kus2f}@%bCUEbQvA0C!sy33hq{kdBN} z*@S5X#;8c{8?3f}4+NDTp~>UheC~{F$R-_H(Sf1hXTmHa9SELWZTLPX z`vB31#8$?qPaBgT3)%;sk}5ht_GY57G@w9W8LS0}FpT_e6AR%}PEUZK>wwo-;M-(T zE3Q$FFyig)hqg%+$u36x#Ru43Fep8+N!|%eaRiYV;5s*dKl0is#APM=5h^;q#kDB$ zX>w^pizUKPNdO)AO)=VnX%l|!1Y$Fy(hiWc{_s7&b)vsSa`v#N1T#O2f$@<*`gf5%1gw&$nB?m?j9ss z0IJ%7*BDpI(s7TR)f@xvnhY~A5-ZRDU^WR!P2gQ&DqE-pT z2ufa2`l1>D)L3DGZm>WUIe`Ri=^(m+U(|ssMv4x9fk##gaK}VRAPC%F+f5@##}Tx= z#wwpXD?>qqsZn`5 z=GCF+2)ONf0<2H`BNL4aVE|x?SPp43%E0YQJppH37Z7NIC6YuzPk^m|>IcOLPP{u{ zpDBtFaJAibiy)ZV#}5%&1oZVE!Ci_G!YUen01P)oLjdclGRqN)e29>ScV!B$iInZI z2-T|lVzola5dzm;_`s`kyb8Lly62a=NB+S%V~O#?;foj1wgRZ~zF)0Esq!NbOM3v0 zMs^DjtOZm)8f^}#p0)}=Y3%_(@#7Ch>*2&QHsnJc2&$)PRfRO=fNu)uON9k6bpL{X zvHxP5j$a-~4ZiP#1e|Rm$L*Ix9iV^q9b6ZK<4ypQQhaQxLlBDX#}7mOv&Xo`wzXLR z&I8^WCVu@FdLzL2#`gjpxC)^ZP68|(Q@s0$A*4u%!dtHKUPfVo$ZoWcUPWwIAeN(J zD;kS*Z5=CY6S2bj@&-KtS6vsN=#JBuHaiS7f3iOdq)?c=02zfo0C;uAQo(VH6E@>4 z{L-Jegi+y=PN-~P9)Y-i`WW}H=(pA*4(h&G)CxK-;Ho5wD@J+(3LT(@ z-{utnRrJMb5h@cYLa^`^)(26KKrQSF7FaivQ-t_FJ$#LJ)uB}c#V{)4AZ;q2xFCObR{oplsTM_!; zpSmunUpzf3{PxMTH6m_QB(z948zLT?0?2>IUI0TK%vvQ(BM-(Aal?o>+<&c~{%ZsO z-p`O(TFCf}<%*CNiViIMVBwgo6#S7zf2M}-f*?dCte{)b)^|7-2Qecr!o=NAuxpC4lm zPuG?%5Ww-CihEehYAMbL%KyIoXaNG&*I_ZYWftGdH1fr$@Jr!r|9+ep0pzi*f9!)n z>j_{!eJBiD1<{4W0)S%|RK8mE4_P5n++T-yE``TYh0rz&2vBrvY}X?mPs9OSKVvAu zW1{~plL!V7v;`5y5QY7$Y@j$RJ+6=a=Q3WQH)-u`CZ1bJ3Zbn$fM^60uWblt6be7% zs$ab!0-^%vH$$Yu)5CE-?syNRV)rlRv%{{MX}LuZ%+BH_bxEr=LA< z|0uV;f%%W^i%I$e}{khAOyJI*P9soxirFz4@8i7Z5t~58tH?u0LLEm1j2u& zQxZ@7Cpz@+_~CKF!Q-(PFCt-vUOCx;UleDtna2x&BJN)WV;uQ@_$OL8x&KUSk0U=0 zRRxOkH(s!A{+sQ=zNQag!F{~*$M)a&3;)J~pIP9iE3fd??xV2z)*aK66OHJ_yMN7V-S?U&l}Q@y+Nn zzMPRkOPf$ALRdR2=8gMNG5V{0={VZpsBOp90D_10<>EOUKMsof@x=w=G5cM8@mTil zxEP6*Gz$HH1CQr10E*CB%nifKIP%R?PoVkfHyf}1 z^wWJ`hnMPCzDu<&2+tn`0{i1x*Kvye5I@_WXYcdu=(+;`ymvp(-}jd`|PLr(sAQh0N~&`oCNSQ{5}igSO|aKEBN`(d*egzVp~Y|hyMHQ zedxc>;fMbFoMJfu5I%9-f7)e30m~7)SPlT34_%Mj1^>_u-}Z|iy5!q__@yJh?e{)( z^SAwf=EC!h{1?5A=X}5P znfr(S@=JgC@OQ_*`VRTy+b4~M0GG)>eoYLcAYgjtOFu7mg=u@t|1VlB|K*pK`^fT3 zOJKgczsJlC&*OP|7zI3+g!>QgYF=z?Ck1@zS%@%ge}U)pmmgY$KD>Wv4J^kmZTXRz z`$~lmK28A^o?x;{@=xDx-7*9ba?BL?iee^On&DvOO66=T4zP0Fcl~ zX(`SSe<<1ytlFWE^B!Rsc^V79nIRAW<8!ouj(cv^^Po&KL;pssc&z{A#h4 zGMsThBGKYC02iEok(JZI|xfB(0hFJ4MG7aA?bX}0Vo818wGwx%68#E zpjg^az&#x-P&urifWo-=4J@GG{C9rNlklN*e^>wsZz=%*;Uhs{l=sI%`P^(77O=wi znX8}sW&oNB=>-74KY#IP!uKx?AzaRTa{o5I<-dO_kQIFX9YWB2mPWw$Jp327fd{m| z;qg+B9{vR{PymMg3qt@JU>ASE_^L&mRQLxj{t?gHY6AlLFSq~%I4O9y%M0J6lG|_t ze^LXY1@<^8yMp&qNd`U#Uf}h91DE6*e7p08HE-Bp!IpLaY=LZm*oJo?(LllLqtXX% zF&r+i#Y+eZfa({0H3to#qt`vumg0@4LO@Y4FHeZ25llEUZk6nvS7Y=fx4^d86neBtC% zvHg|*qdz&sdYZA3^#Z){`S|}i05?F$zdL{ICt$sO<>1#(wV7V^XZV~|^EWa?~`aE1N!22fz z0>cj+C;B%}3GhF9YCwvYEdS=G-#Fv2AAb;7j*lNp9)AD8@u=V=4pQ@tKZ*Xz^_7Rj za0I?`e*8%)zVe6$aBXOOih5dS;* z5Wwv?VExB7#tXQ=9sjh8!t@`%`Otmk!dW{1lp_G&15|wdB*V#y`5x%_9HRX3(|?a2 zrJwjQusy$W0x)AerUkqM#Nl>-)`jCgah87Ko&)-V=?zTxz2yptC5F8eKX%hhsHh;spaS_1zYI=wk!2mV^`9e73K})a& z7dQqKf#3t@v;Q~*x&p=pum7i5DI`!7uoP$jP6MSvNPzRnYnKCu!#hb!;41hokp{RP zR14t_+ydWm@&xV(bs$W@rGKHm2n*gnU=+d|cmAd=9Ka=o77!Sq7SJjL7CYV97Iwhm z;CaDH6QEs)Jn(GrutgECpgV<#^P5B07B%3F@B&Z@JOW+{@eF*tKS^|euRw3XM)t6> zec*@CU$D{}7!+cHm4CIx9QYhILJ0x|h6^@^2ctr~W23~j_{3jQ5`Q30AfREgU?T}I zEyOoAR%VL_a80q%nD4ntkpou(-U@O5nK{`K!rQ$|Ac-IhVYx^O2pd=xl8%G0wj~RM z9QITPL<(#c$%B+X#}}~J7a(T=^DubjZ7vy^bj@Nk2J;8M;j7fYc$K#cUk(Fu3hh61 z?}mWB(??gQgw0wefq&YBwi?E^YR9&JiGvK&`l)O_66rJp685oe+UcSB!_$P;m)Bo?Fm14WdD@+RN)u@20wV-_|b`H^1SpTr#dE*-(^4TI5<@z`wJiZg-^To9?Q1z zNB$=4)+V8{`4aCu!tO-GKX&qL5?%c{zD8K5m*PhZF@H~6HskW+6Uu7-w51jfyd+}Y zTYNQ#BoP0SqZa7D$~HzStFKf#ZJE@*l1?09AkNWYZGUO%U;dkEKgQ&cJ;H-Hh5^%{ zV0}f0whn{!l@{wGxjVRRZAdwfB>`#ZNurZ~Gcw`oTN(PfQt$^;7;0eq!!z z2!G-2pZed*_KW}ZBNG`vu##c%gNuY;T;u)W5gwc$JmLN5h;n{)o&D;-Bz|>3IrUHZOfrAVhtyyA;urth z2IAL8DX00u4g{jdLN8s>c3 z9{GRz&;R%TaqI8X|9*?p{Qvs%)K%^FAAeeW$*=#X5l`iR`Me&dt$-CPaTx_(2@|#6Rn^ma08XG-h}hIzdV_S=L_*RvPRJyBq27;s8OX* z+lmxsw+;^MzH<49JlmUhW!4((t!_DVYpxydq;VeJTkVzF(sN6csLQ(*WnNDJod&Pz`e0 zxLob~P`1NeJj{CgI<2+1A8u?=KbtH#P?St1r^`&ePSjoxy!O49AMYLMJ-w4>z6qPJ zOG&O9(LZ)X#{i=2+$lAZs@`WwID2QSpPekK6Omw!(s7pZ_w_WOtI|2HPk-!>^FT@W z39U2oO=iurpam~&EJ)rv#+Sf-4lFTbvOE?E!GY>sD!0v#3%)Iv!+|_^T^|~f`#h-I zeUcJ29ay^U+lzXC==b}68l}bF8i6&E92@`-UvA^bM8T-9n|Gc1HoDeXVsLDnT$pSD z=UzC@41b*3y~|{*eIrG6B4t+xA(1?!>Hesr-L|?LcW|6aTV|>?K}8!E#;}(=cju0q z?A{wiyj`hed>-J=Ut-lFxN^TflCIu4C;3THj471~+AGyQF8MooXnzd;P~^smFOa8Q zhN5lRvmea|SPf56B(6JwyN~<*X(r~&yL^spt*w`*q{xS2Q?8;?WU3(x(`j!+%KrR% z6II6Do}hoWYZE-LAl2g}dj}_-en><}tGsSiyBi#a$Fa$eMRUJI$L>@od(FDl1Wk-_ zP*<*x#>{^8W%$$_6@Sr#Y&;9i-407HG}7U|%f^sQ_#=6I1RQh4%hookR@PT?)md+d zN%+XpgXqZ;!=B0H&8D5DDE&6`)}gm~IeT4{8gzSS7W7Lyjp98ha@KkkGy!(BbviNm zs@|lqIkWjmolkm|lQG#_1GmTUid(zE%*E=_a32(+0uv9Efk z_i2CKQ?;a-(xTgk9i^}O2v>L zGLVMh03|8VB@3A-RUR{?-nXj?rCQ&^j$CgS2--KhTpB454<2!rPF|yD@27Deh83Jf zb}aqz64ARws)U^y84@FQc2zj_5VT&Cu3=;sw%zi<#(#_K$d|An=`~e@ou3V^_ZsW{ zLvUI2I8S*-Eu!2_TXPY&ed_Og-Ku+?Djks*1~YL$KUvDrW{3-d*Rqq07wSxW4IK&| zZDU&HOy860*bj+au(`81ZV{n2-lvC02(P~Nw7ls$D@#LCy4XsruU0L5@(+{&Q}vSN zX!OEvJAdYUR_C@OmZ42^6066ieB%otW?7wAWb$=QtN497 zzlCVM$Zcd@CPB}P!z2`Oc_k>@7YbsJ9?)aP1{|!75K3k$YK}YT8*Dh`zNEr?xp`Mb zs;doTpoY$x8Sy2Nc6-ey1A5HzvF@$l<-XzTet(*f=hAKHZh1Jhc3N<5AIT@(Dq)lz zub!?gV0Cf=-I(`M$%Df_8(O953 zXI)w3fzm(38c*nNcUUj((sN1gEY)?si!mKG<56;Y#hD{xTAH@82`8lNSv2zD;q7%f zoqtv3^3s#=dZo?*HNMJ*naV;G?pI2WBu(I*vog5{L5^-E3w;&6<6y>=stF@Zm}=We;+_| z?TGjW&gWK|hkus|X*nOTv&^rKv-Dzfx6_B;%Z`@tjz-sx zCcGP~$`b$Oq8{-_ZJi2%Vk20m&s#u^Z)UeYPikG1 z>~t>f$?iP5a<{APzDd^}md*v0T`MdeBKn{zy;Dk~cs8CVXY8MItypzN*>)XOmw$P% zm<7HIw&Tq{lniajNso8$;)Om`SEm(?3k!kti0>Tmf}8Ef7wVEn+I4ztJC|9(JCKrlB^0mak-pqwZVguKx5!C( z=kVG@O^5cI1f-Lewy9Hko}Fz+Er0B~c(dme4vRqUP zaXr*`%K}$#-U(YX!?Qn5r117mZisT~ZS7bTr^ek6>ZRV;fnnBa#rG;kN24#fVqBA2 zd&}lCJ@Q5*ZIbikJ1e{t;c86G_Bg~B#q|1Z<4wtx8P)yPYSlmKBF_RJu3vF6D#pNK-4AY@2TdgY=&Ek*q2>U-p?R zqQMVCmab{OGm7Buy>nxd4`OI&V|R1aXKdR0r0fdOu%tB3-&b?K^fq%6M5mVqjcg@Z zXQdJhzP~;U(uxIr-&}77*?%{3`z#r{A*`YaN(=U=+RS0{j^xM``r0qW*7uem$%*1o zlMIAcb4?eS)|1__1l8AMz*kulS+W=F67jO?TA4$s~= z>zDj&1bWlldG*w%{^;IVU929&kqbGdWQU+Ee33^?x-8(R)jd#ZrMp z>Yf`M9c!d|A*H)|wO81939@xBvyp8^%`aNI$Ci{&9 zFJ129S4>e=l6Nc9|BqP#gk_Pl{oe_ z^EA#Ka~2#lBJL6IJ4ZZUU+W?PGCwX{Q$?+J2!c0qld)3A{vxPB)I9E4;!T+oo%7`q zt1FtfcyS(X@_(FPf$nru6v|z9VN};uJMl8c4x}y5=lSAlim>t3<*pU2*cE1WjH6EG zg244QKYPxko-M~>^WJ7IlzV0k76+P3glHH+5{|asx$Kq8><8;}#6_Bx!Y_68aqonZ z5h(L=bK2Un45dViL2N0DSBW2|GUWOfSe0RvRF)UlDQnK`;n~IAnbm+%kEy79pcE)r-4PDp$GtQ?e_L1oWll2xyJD>`zH*kDBZ6 zb3+Nwi940_LO=5QdEQGE$`jAhNL7jre^x6ntdy|UZE?7tP`oYLWs$_C zeRPFAT!`oGX7YxkKfK;3bI-ch{B&zje@S0Jc7FnqG*xdpUwE|2r{Rgvk#plCd|U!k zF9~;z-WuPa({0Qk7p85RQ;mRfAQk3hEWi*wiCk$IS}>X1klDJZ&ul8R@Yntbhu*pPW9(jLxgYWi|) z$ZB8`=0HDmeGj+I^JcYIN+}ePcaogAynpy@K~58asCs?cakN~o<3ODqSy~(6ai;_$ zYwMHAT5pB*rOIE=LR@D%+dnF~EO`4OgrYT7Pq-A)^W0n4^YeW2OI=Ba=fe%*{#jeJ zuv2WpH70d^q`Tw}11V4FyTvDL#_X$nzQxy)o!wV|zdY}v;8AaNuN|uT7ME6u-J%RRZ&G`3UNhqxQ@a?sX=$z1drmwyQ8cNF_FmtM z>fcirTuz~+s**Z#-l2@5i6>6%6Lhl;kMz|D*M7VomXYImd5RMHeirA$-T>9fGkm_W z)%amZ_O|aG&Q-_cb=M&GPWHE|@P8{|x>{D=I7}CDR^F@EnqUtgSROn0I4$>df4)2s z1@duxj6`qGRT+8@Z9R0ho45TbmFa~|0=Q^zce{_871Zgu7X9fpRasKHx4aZL{*jFG z;;v31b?c35a~6R@dy!2407a%+&~Xv+EC4&?0=ryOhvm~ zdF`AVD;v&LO~dGZbI;O?X*p_`3@M&ZOik!d6f~4~rlc38l3lQ$jpndZK{}OW%VM`r%;-CY>w2i@sjbc^Qy< zJ9GM-zfn-y?~)x<_KYEc_oo|1nOv{OyKtd3y)@(~Uf+7uPf`i zT;9Y!+|a};yTwqYtaPC~CzVZQ!IAg8Rwda9>ZhITyo=~QI`>0Xu&j8hPjm*E!YqO= z(2vyJKWO`BCX6$+X)a4B;&M3HT$kA|yFRg2v&ijpo8HPWkWzQJKVBJ5GQzj)LKkay zpb{Y$^4rnNuGTWp+kd#-w^GiUS)WBOd06IFI zQsA;Z2^rJsTbH_a(_A0J3&U^kw-1sc&xV#HS})Z)MsMfyt3PusoN_e))tM(a;mP&B zIOyA`0Ed&r;(vUY_OGrzG%G{go!xxZIH2#zlI&fRGOn%g&Rm6|Ha%Cla#g$ooz@Ps zDt)VN=hvO#`|&1Pm9Oh1K{q0+dy`Qx8*61o)!S8bDO_h|>UAbd4NuCpkwyxUPNUD) zBvT*LOLIRG&Ew&7BeB;Rq8_SuGc^^b)HjQ?HEn%H0e>t3iun1FD7>O<1J4AW2Cgj3&5TqWisB(|>q6hCU~E1tsz?YT4d{qKSa>%;8bJ znY~RVfs(s>%8)4jXmoJ8X+##U%fo#EQWl?ed6MlHn%-}{OG%CG^6=g~NyqgTP*-kw zw5&-K*T-??FEF~FoABBhk4xCv^8H3d`c%bC1(NqDE$6G5Rgc{%50g?xw5d=iiSBNZ zP=8JxEH}d(phpHwV_XLU>hdZsPFk-+WZI9$?-tRWMn*c%_cJf;1Zghk>j0)mzDUzx zndY`%9_7Aw8cRO3yFz98h;PJ49eD{G0g{xCImunhHti3L(veKXn&~?a#^Je2%-)69 zLN4#d^F=@T2rl%wy!Fcw805R(B~);b-hX-*O^;(lSBx-U)JS#C(ASf3^hMDrvsBx{!yvfMYo*jrY1|-&?R|Hp8P8_QkOb0j8W~p>&{)%WPj?1OFW&q zYKjAzcTI=Mi1BDgYTNR@`DB+DsKl%$g}JRl@z8_xWn{Pavdj(VbKw($yD>;RawtMK z1k#)n{-uj#Ps(ngKuv7wi%hUh193?fp;%HQO16pCo6BOnJjd9GG&?8&eY?)*Qz7SyEE-a3!m&7K3hE_Sm%fEdZ}*| zzcOY!Mb&{)MI{Iq>mEHWHfz8d6)PL^$bFlEk$9&oADY3-IK1rATXdr+qXPwHGmamY zE;^zXfmkg3T0X43>3>%CQPm%}A!eCZpFL-RP>(0i>aG-k4#)%6Gb-=pc=_NCQk0*0 zN(J>v(Ac-7=PhAhdpPHT8ZZPMA88bI%Iv`|7uV1zHSa`5qz~xP*A+J=ST!*cDIuF> z==ywjyhgJ3jxAfWrl6k*3q`ubjxFIRT{)|}zN}v-z+~n%jDM%c#`IbKdhd$jIC4Z2 z(Jq>H-udl_(V`vVz%n>{3}+G!4j7Us;;)`iOk+e3BvUg`Yo8-zs+C{+^tilR#}jXN zVQWoqtMY{A)G5_{m(5L50Q4l#+#?k2yWZ}{`%t9Scvw#3G3W+0f7;zhZko1i7Bpt* zb+IM>r0mj{w|~85Zso!Anj(UJGx(Oa=;)WDorx7oM=f(E?K#r|-;B>_KGmh4hwH9E z(u=VtJ}~68r`Qy0_y z76ehcIS^MWGQo=iae72V&E-ZE*Lw7$ZE$EV$(3Oqv zKZ@x*&zz{LX;7~IX)^||JE*gJxE|#$IA>FsCr)#KVLpv6?Rr|g+O=Z_UH*)s$C79M z!V+Qc%OS~j-!V=zpjE-x9aVl zN363?xn^f@{UX>K0Hbf3jj&NIEu(ZLw7g*m`w+j1-Egax=C+qBkv1jLrB0ou5fq!; zu2(W1{m!f2o4Ll^_Sg(JUzsyYpIKQ$m#e&wgnyllY^T%XY*DRiEFvo38YtNkbaaEe zGIblwsir@}3#W=t=A@D~k0epMy=n2RH#5g}dauo{a@{A80nC#K6}!;%CUl>G7=uYY zGMD5o7H!dP6^#7QIZK`?cV6#YvR&>@qW+R^qm$%31Cm-&ZvY0>8r?fBb?Mjn$ex?o z4uAGLLX2O0`EEe`NV4Tpcc)Shx-_O9;s#2A<1kZ3g*zXCp!brRv0Bbl2`kb;l@wR^%;x(sKLm zsq5X)!n395?-Nzd(V)onyd&mUbwg*~ynlOKb~{_`d!TRPiHm&$YANlK$7Tu-w0_~N z<#KxjW>T8jm?;K1lbl?U>MJvvs-4-u)%2(Ahz!>k=i|8*Ml+0x{`&N{{j2jYInyOq zcw=erK%a_tlD!k-xm|)|`09=GmZ|ViznbT*TxWJrZk4BJy2;e{<|vT26B?a+?tk8L zfb3eVDtJD>ys)Fx&VHoR%j@~)9kMpeedXXFYMhm%o@E~ae*fn5oFf@S6nxd$i;rtP zgw5lA;T{jk&@D!bXZg%s-K7hwf;M-$m+Ei-OfaXhTd%egNvrFNdQ%OV;_tmxaNU^h zW4rBTN;Og-nBD%?0-h5h*wFFPM#~gP_TE8A@k%saX2wCtb|^utvDs? zl1M-&Ak8ffT%~RInm>}hEwjcx9clDMK@$DjCC;^Q>7>*2TFHH_muwhEO~p4JN2*?-hc7zYA9i) zWnDD?a_+r^1DDe3WyioO9OIoA)#PK5z;n z*hnyJMx#p7%vwHO?h8|s6#~Jh-?ylxPteuLt>ufo;aB6- z8_93A1CP&p3gdyZMI7cI_G_FH(Y+=oHFLQ+k00W7icQLR<7)+Az?^ z6z(0~+~9m+8DA*L#1^YE75mAaSMS7-^s=a}DElRt2Ra`JE#$A(I}J7v-9-wcJRE{w zP#|Uz1Dfma`XmOc@+i96qKV5E^bhGPb!o?s-y|xK5T{I2Ewxn zpp|NI#thh4| z=V^Zg(Qw1(x-g9039Wiv+Y=D^Nt4!-N@E#Ve5Q3KZkFYM<*)MgY{dmD(}-IWdzQJn zJsK&QYCYF}*=h6ZLO)S1Z?S?{uv88MgeHp$k}Tr%dzHi>4s`DedDm()oMLcgb1Ob0&7ikbliNuQY4@sa!T2?ki!z{!+J? z3pG(U)Vw(KQ|9tPe3k4-_Z>UmH7={JmL|@XMw6i`Ssf8+PTCVGQxo(w#qmu6*i4bL zDa-_U5L9S%oqjh?V5DMkp}!-7>y{~5f&9t9ITo9YMDF$k(~Lh~Wf@I5h>P%?Of2_y zHIAxX41WN&DL940i>60j?vGr_X;+BaGHX|`QQL#3hdD;zy5l=nbl$YL{fgkG zOuLsX&0v+-S$b^0Rs(Xsjdz7sMb~xMmKv?%tuqpo?IF9E5h+WwWau#(rSNoOcb1d( z7sY?(FHc~bY{s<}tGO|LW3I_NtK-mTHvF6t%70aoSzoir4$klH`w-+d`F{VyfV%?f zZ{^Cf8^^2pdmDnscoli-LXuQoMbln1KQxsB?$;G$Dyh!WyUf|yP~CnrfQCMNHxmte zp7DV{3~jJub}kt7!~zUauH}Fw?2t*q^w}?TMl?RV$hnDcP~@9~DK&~27L3nhAedyH zfPXyLuiOzeONgtc6lg<+St##e7zClkFi$XEd;Ix|O?B}u2i2YeXuN>TH^ip6at%Zo z2r_jkLYD080y}Eh=YDpcw2PdY3>+nRZ><{+m3n-T*VR0a2Tk=6@R*&7IvraK{{m^DIWIDqjNuOHPZVY!k=@ ziMhRu)aNcTxYv>uAy#|hN?%%o2a=ku#5hHc}++*mT8hqGVmHLG!loP4H*kMZy^2Hkh0@9q-xp5c%c2%o!P=j$A8<2y`=Iifq#_? z*HuKDo3HcrmPOmDiaI}Z(lx*eTG$}VStqg^5;oSH#a8U-Gfw8As|^5Ye{;}7F2HOW z7V7Qf$R=kO!`1?~FWhT!T&p$yLnM}u(w7(Fx`HLrftegwJ-?Dm<+tl^ zOoJC~eHS-RmVgCMNeLSi>4_T6`NP*a0^Vxg~&`^B_h*cg>BupJpnCtO4@X4$ZtIQvFbD$95;|ptA#W^ zE<|OYC)wlX_d67SXc>_4;h%Nn!wAWUJxz>!*;i+Yi{7HXZty1SDX*?V3V${w<@BDz zjorqF?YxYU#AOz*>`z#vO&Bphh)sxfFUU)txFB#=_DPGsyt-oe07FY^Bw5K6su%n1 z<(WC~C`ci}X9w==T7|H>eLkZmGE)M_bpMyt)#E``^SS+G1e6v<-l zD98eI`dM0Oc7p795B$W0_;#?_$BE%JMfM~y7kuK=)l)sW-6tC~&cYk6k*5$Et8)4S% zr1Qi|p(wlW?RA@Y`or!7;HaDs1R)iaN=&Ju=vJO$8fN6}v z!OkdVU4Vp%g-F-UAsfcklwf>4@b*+H1%i3-`_HpQlb5MZz^~xZcuJ`~_?Xw$z5xp^ z9o&1qd@xoJ>oG}kjDM!;aHiY(IHO{eglEF@3{ryi*M-dpLd=@CLPzhudG1QPn$out zy#_x%&C#X;N>nL2aw~NzawXR6eShV0pU>ksA)2>wRsgWc52_ms+y(%S&i!K2243Qw zd2rt7rb|4BL;V%+BKjn3;phfV^WvxXsJvu$wHwAUn#dVNe}AL^b1-dBLoECBAuza! z&2qGO8n}Y;)S7qYG#k*0vhSa*Gw>6B4k-DXik$_C&FgOZJmD{+qKR`o^}G6viaOr$ z>A(q}(F`6x2ml{WF8I0;57X+jqiB=NCw%|*J#s!49yRtDE8)U*1>7uh#W_(1C*SNt z%3QNvmxA#TUw_{iFx`~wOOYdN;#a2`G*4%sz@C9GdMD#O2LYCwYQI?NGg4kH9VB-_ zkTNF$sh-znxB+~mvecNyFVMcc*&f01UITj6Rk>ttsl zSr4Jd8h`cgccJ5v24QyN$fq6~IAaMK?`k`XUyBi3vJXvj8vejq&I8U9C-lgVjEHa- z)%Jm9VuohTZ6|$JBBLsd8^KGn(pH?5LCYcMx^@&osW#uJ8PhB{tCX%)dKk$z%=M0E zl)Bb%Ol9+Hy7ZTIfbh&plhaTLr?CtxV<`}Y0)I|MN}@wPAI#a;A~2Zw2IJ&@42`}9 zmK$v5thXi4TKKAAO28K4{Z$(nxoJ)Ye<2?MQXSf?ixyXcy+aclCc_3^CY zFMn@$J-o{@lMuPfGx(!Jhv4}i!d;7Syn89{)E`cf1e=Ks6GL=o#p#En5ZlZ)s0mXs zpr2hZmb5~>0})?`Z|4NHEA}@K-EFHf83l>;)s^E(ucMgeiKP(9y&E)cbaD+qheaGg zOMg2c*Ur~5L+AGlHVQ#l*y* zKp0W=-PvWP&d+}#a+z!l#$=8;$4*g^iVPDo^qD9nl!Oh-98JGnvLIatyB?utJb!C8 zYmjDv&f2QPT^5!djU4;eQ#d?h!3wg#tB3(piY(-bFE4P}C@4oZ;!}& zBjVQqd90mcp~w53Q2(581lkX;c!XR6%c~o^%SH&5NK{=<;9TkGcz!2|V?lpBFyK6d zvo4|@5m7w0q4Y(=Zw}@@%m|N*3xA1AlYXJgD!kv%CZAUq z>cAE*j${|v`*2H&R;Wh@UHGojRUkuEAJ4N`lm9Z7E73j)g8}qdJZ_jlG3Nnk!}^FU z1UL!lG+gl`d68JR{YwKjGOF`v@aQA5L=~IqhBUB}P@3vjUdrH(k0CyF(|;8Bd71I% z5`!ALAz;&y>EysB(F+J)FpL*TU`|ng&NE%3)olPEZkCY_)Y=v!W~O^sP{A&tn#&)< z6!AzMPue3jY`f= zNQLq;0RmKMq_RgmQJL1acYoet<$|ttBKWPnaDrNB5hiF)Lk@8ISNdmo5GEXPL)-Ej zc1>NU2W}x*w7gn$rGl^JWoONQj3-B=)HgHgFD=_s=p{m?6!=c->V}!0+LP=)31W$PQWp6KJsMAAhg~e)x(l&SmWc z9XI-p*!J6G>3W|a{``PZ6>e5Gy6+ z_cr&%KnQAYcF9!`aRM}%8cX9@4)ClDl1V6asOuaQ(2Ou*Pz_kd`d9*v(~!utg*qT@@y z1gPF;C6`b6O1X2v>0TGm`bx@~M}_zT=_qOYLJ2AsgF?wQxqoA;d7z5&Q*O!Rhk$*s=Qh&Tb{7dM0(%MK^XD&6N8pia)Fq%+N?biKOlK$h6nW8z!n83lf z#$T!bylfXE9a@Q=VAC)MQf)zA-`&-y?Z8{N(SnfFYd-5G`em@Gf|~}(_;eI*GG!-> z2jdHv6?clp8&J9#YH-$3jkwOP+JCK&^U<26lc$HNf`7fqZ!PF@N{3__`n63Ba4(eZ zi{VcK4k`&JN18Odi{dj~zo=6VZ-t7KGqV^OJ)P7()-3K-ZL#VkyDW@`y>nZZ{TNb1 zsU=ZgZb-KxtC=RM1)plJ90%!k+*)BXJJLwCwR#YAp!l6BDX-6Ojnb-w-I?q3z4Zi+ z4R9mjQ-4atgfCylT6!ONSeO8Z;tq;n%D3&Gb|N{)Sz?`jk!GJy!U5J3` zkY9ajj}U-}4sUL;evLcaJ!)J4CN4qp z1760;t2fp2j)q|b&k>o>eg^ivBQ|q1KSHabn_@X5L|1Z?_z)_VV*yYgLKz5UdJhgb zKYx!Kw@Z2qzk^2#F&`#5#UFh$&MCY-u1WR2mc;oQhwz8LCscJ;*}+_-M)sud%BcvA zkUn_@nqYsiBQ=I#c}-y0!DWHIMx_ytlIq*=c4370_K(La6$P#NC80Zfc4P1hfJo!l z#E0#nfo|A}$G$Fuv7Xg&0c$QG%ju22f`8odDm&RX7$W>abRb}exREw2Hw05ak~w*K zdW2X_pOj9gNw7YQvJyD{aCew{Jo^2p9ym7plHv-ufF>1MQU@#&DK)~M#pS_39p7T* z5Mur!V{{$J3y_r>ykTzx$(f~M+`4DY^u6C%rn8b8fdz7=DtrW6FG>piU~7lA9DfW3 z%%l$!ZDSzIvL()vAECkptTa8c21z2dyL`~gbZoX& zb-GprYU*;4C>}bIj1ng?#_J5732aACPdnNSYiN&)VowXlK3ob$F-%p+A<_T)i zZa%-eCKr!V%5?hs(o_N)2s{OW()|e!)bpCSQWQWHh#U-ncW|!}Ba^KKUV_D96cuLR zT+jD+Y#1@@njm@KS#UCyX7d;(agHHFM6sHRKQTAphK*aHkFM){SwOG2)qjdAP{;QV zLWtmXnTq}X6p7^_Aiqwskb@%)8D9ftrY4457#rXw4fM%Y zCMt2!WjeZI)JZJxECEB|SeQqmEP0{Y*^Q8x{zdCUH(Q#(i_oI>Z~IlM1Mv?^j&0ie zS+R~c&-9j6K|1o)Xnz=UKhHaqwuVtIChJ|oNM5!du-W_2sLzG)T5Q;x)*30seqbv< zNRe0#vIN&wKb7SjtyDL_SCg!-n@qBAc8B27@+7Hl7NA|4^Aga?f}+-$`pLl2YP_YX zQYha#zn16Jq1;nW+@O5l^yuva5)&l&*d+MtS+N1zm3#M*h0)`TYF1c(FOX~@5#f*MB_Zj=>vNzuXduY_A zDqG*3V7_uw`+s6I^ZcTnL$=tyvO-jZDP1}o#k|doyG`rn9`vVnm*cMdPV=Q8a)U?RJXgX-tzr|QU8lD@z z(S*MDGa|jkGD?UwA$Teuj`l`SFAj0+9*+5OILcmJ*zWkCOwdQJL&W1Af`w6 zwIC*>;cnk@th9R>fPlj~>Vk%8pLO6QHtGsL{lL#h3SV#y%q63J?fPo+J8~MC=a*MIOgIz9&7kbO5tZ}iIS*aPb!fjn5pL! z?8+1oqui$*8W<5?16RMAphump%%dYtcp||xO65c9N1k6v-w3#`mO>3*HXNyD^Hal3 z#E^-bXpst*-wD9H@w(j6ozJRS?KDWXLEx?su?uP0+vpo6eIU-E)Tz@hgMVm%zI9t@ zFEbeMmg_k%(Y5@RQ-RAowwzN!1nMS@!lfXSvOmwqv)fk8^ddO{cL* ziq{=p(9Mk{QT)v{kH7@A?bA3#bHb; ztLhEJlp$J#0)qA5U4MYk9S~Lyv=zHMGlHDXWXTuo8J@d!DzkYYh#3x((q1+_ zx&|QQ0Vf^}XMeR|IvLj*vLI{J$@W|)T~5u(pxn-nN>FGEOXNr!`lbOU6MaT_-)g-? zRecabn#%d>rfv>P(PFj`(fc^O(sEqz2l=(N(2l3dk0`|e(Ky3lWyitDJ0){ZgE_Mu z61ha`VH2RoyV}H-;gUBWjP*7a9jmD{bQCh=t zae%BUPR0!@VpqX@i95Awrj8s@3kz`CgG+oa4;tW|EdC&Ozp)9HrflH-PN@oEs+PS-0Yl;WvKuu^uD)C@@|5RRvO-xoy0t4+6#ggwh$8_1I)%(DJ*|Ni{nhSza)~nZ!od z?6#V2iE%#|b)qrSHo(o;B|!0WtKKa*)}D06jp$zy9$N8b_3I-|w|YQzj1j zcz{v{P>Ho3S)mANBif)o+tc_B+)^~ImK;F9_k?=5(1@1jPL-rh_<~nGw20IfaH-cl z!=3G}{Zp_?0fWXkk27dVIbs1u=coz2l4t|L5&_=H%$PO@cE>6^rZ5B=0HOg^f`9y! z4Uyp3td)Pj;8awC$lP3HE4$4p%)phIg9 zl2Ti1r%O5baTMofLB5tpLqLj|+;XtFDciK$z?b$D@G8m{Ai~1%T-NIR_G|ogiNJd& zPHE`0^My&cPHWESFjxfXc#%T(Y{i!`!Zl+qrNe*iP*5rMR+P>^mybh5zA z1ARfShA?u)*VA4jdXdz+602yCWMcax!2X20g$-;VL@)-v!85rikW?_n%HN*Jn<1))<*-xG_zI5K@yY}sHh(t1uLas81g8iV?Ai_+Y1q97{}jJ^0J2<& zfVpTdmrqcsAjW^JvpT8VI(e05a??H7D+K^+_)7sH{btLTlKNb!ETR>OFy9 zkVRPCm%KQclc#^@DTemBt*@EkFn?-}1cI&^@S}Vu6yY#_ABE>5OAU=r^;jb;yYqj4 z1yE#Bd*>k+j>+v$zhQ+?$8aJ`CSOgm<1BLv2hG4z9IEHcXv;QF{?SY>!b_*RxQ z5Wb|pZGZ1o){NK}5u_%@N0~*#3|oKq-+9z^xP;;)VWD%Xdmws#{P?RnWR%YMaLl&! z0#P3m8)NL-b~1>?7=In^53R|VA_q{U`>_q6qO zo*7r+KU~xolVhOhM|M3556%eVoYG@4%%GGs0~3Pba+lI@%Iv4zYj{zAS@4YN^Rz<1Wj<*Dd>;la$Vi&aoUYgn}&deB`H<8vp>G z9i}A$pH-q`e%x8yS=^vXJLFOVHsk}(Fz9_bz%ETj<|q!4t0DPs|HXd@j;Najx2?Tf z*ANV`4j3+rDOG%6$1VeJl!Ci(iv=$Ij)>%?#5jTK2zvKeVE+^fwPNN}-gJ$&91|(h zIg@8A+6=x}@?bDC^AnTbng&%$NOwo8#6&OAn4=Lnz|TUo`yD$K)@VNfr^1nHp#8j? zWI@@BM|F|*wge-FlgEEbILByOKHcy9{q_+tgR}w(%Q2LQ?qFNiAp(hxDi2A5)GBwR zz0f8P7_-P+ zC#H)_COe9@*h(KI)VXO{L-vh@*r4s{%z^T{30|JC@t!z(wby?e@En&v>HsMQvVgmL zxvcT?F%kz`I>HWy_&4*7Cj{976rrZ#ND=;_;$4)#{1y{`dOHR#i+gcM6L*oXN&aRz zO>jQ-lnK8X#j7$pLhl=b=1j8i|OM1m}xBZVPGeK zHc}R!qsdM9*{pvPQ34PUdNxYzl7pvW>M=Sf7F#`I6#TghWvM2@Zs2<=rql1{k2AuR zBpR9B!5>Z>HvrC&G0k@9e_=Q!z>@d;u^N(G(3)k9}lNm$9+ zbD)(;pv?9p2t&^;fs}k~k}*W|Jh|e8ni_Dir3&jik1G)eD}tf3qO|tI#jD?`FW0v- zyH~w-aigt z^x?}{WIY@=1#MyXWf@xI`uol@ArJt=Z%3|$lGA#k^o=jZJ6x@ouZj`Xv^Y|KObTAr z)UWaeYd?7*$+jt}kmQicK#Abdz`22O4KO+mJ8^%cs|#9I%l>#fjjPZl{lr`Z{X>9b$4oS11>oUyS70vS}0RC z7wqG)&TC;TY71$G6KqkPk-A1;&n+kkUkGS5GFa|^=_&S*_U7OD%2Dn~O3o<-5pCYi z?X-W>dw-t2Tb@glrIL`Sni;hH{Vqkj{l`%K-~W&C`VYbSKmOj(-`k$X(P{10F(FFR7 zbkcuv0Pt@~Yrm3Fc+92ZPt6GUe>-h|awvc77q=u`Nkx;R|ML5j%>4NS{wduVhOram z!IQsJ|BoX(;0(i-lsueHo6Jcl{>78`KV3omw<0k`bJ*C-NhmN35C4-H$bSKxEu;01 zP9+#B8HO>zzfDE+FO`#>BS9$k3^5vpVTgYfGX7S+6xoUlLbxgAzsv1kX3GCfl9hjM z0?9`4Nd8<_<==i%`|mC!;i{GY#FSoXHgs=vFRz9Eky{P00EHa ze~zhIzz1QI%_;yWXujpBKG4i@y2*byzYp9*0qRh3hpp~|7+L+aPKz^G)OQX<{kq^M zz~-<1a*ISY=B;{Pb*I+_kNm2Ib%T270Zu+~Wnu_5IHu&zc+efcpRH5lH4Y9y3(k%) zVNmA*@1}5=LP_KGQr?sY_9sWUo$7DSarK|s5-7b!;C$uT$glaTq-Cp<%;kTCRaM=A zdizdYYWzhlxs9dk{`^);?XegAkwYnQT0%*^ygsRg1w6z!TVvRF&LyBq3ZHhTMSH+Q zl7V`z{;|a8yV&!@T*K?dW7mHY|JV%y9KV}wIgQfx)s1VdIa47*;UB;T4`K<_UfX!5 zpH(MY&06x}4{a^dzZ>$o!wyCzZq}92IwFri20GV4ulCG=IiF*9mCm2UeIV-e)8+KA zYi_ce=hsda_QU7W%iCPE%QYmoyHV-{|Lq+@aJIrD(Hdp3W5@x^K{tO78S}5xE@Ses z*tyr)Q4t5&yVL)AQV?UpmU~Du%;-}c+t)QpGeji4Nrk`tUvJ3;joq4#XY?C#n6i=# zPNI=8hLN~FzSYHs9J26zwch@*r_4=1abD;Io$$dj_`-BBg)9eE)X!@yrjKJ#f;xtl zg&A#PZ0!aO2eDVAv8#WtQ7m_|CrFw!V!K_`*$>2nK#61EOLAAZus@h~W3~0+o;}MI zDTrf-KAm6exANpaJ*2SWuy1kJ?Os z*g$plmQBAWHr55KjTPwzgW{|9a}X18kgS6Y3kO!_E@PPxi9q&SoEb7922e8?*_32 z%;qy3kH6pZ^QnKpJJ&Kdy&CgLvG)nhR@ z1OYk;tjj(A(yc1Od%CoSw{opY^Es8^eroa{2>;iMt)g6Nq*o0$U|iqa7kIs$a#cjj z=QL}n9(u~EbgD~WeOtfu#RYd>(&TSSrp3Mr@0q?j#jAgjGtDMcY!Q0fXD@ZxQ1F`UOX(VOw{?;erlv3x_@)!%4Z{A~F-Rnf_vEgTO*$XrC z0H>2mv^wbrfGF`kplvFKe7(3q6E*G$%nwOP$~P|f{h(G`4)G77W71mO{ubrl`y3)> zMJFEZhva`b#}@9hLkfb%B{0_75hi#kiorLq7|}sf(#j+5|+3&Varp>v zSWqWAeCK(9VhzNhR=>Ui9bV?39^uRD>(}HHxq{#MjsaX9mBY7;w8%! zZnn;L_s}DFf;L-UB&=1mV8GMS_hZFm1ucI9Jz?GZ;RqfHxe1PoSX2F3MB_G|pdV;3 z>9w%AzzXO4qf2o*aq4y-xv?!5O=q&M0na5|yD6)m* z$GLtZlliGJ!cl~SDr_!2DV1yc8^4Rdul_68a|OQMSoZfLy0t-Dxy-4O@rv_VzR!QE z*|Y2-S7c9Y`5q%52ZN&tK=x8Y0`Pb+nX$^~EcpTbC%6_T_`ilu8{6@lt`-wG%Xu9bG^YbqYpE@1r|j6{d0T&yNmyDl zChE-?tICz?mkFd&jyjp zK#3yU50wbxMT)i|?t5(z`ZvEQ8}DytHT|_~c5I%Y-zf@K^B1Ydo|Zeh7xi8Kpm?5? z3O^bGG~||2h6z8vmXIq4cv*k{C2aPMZ|ECMjS;2Rv0S{e1q|iO%%UqMV-QP(r|r2KRoN3f4W9WDzn#-GgNpn*!s;0V4sM*vwpHZ? zlU+NjyWSvyDY9f+5qPYJz(txqu)r^;1i!G1CiLCamJA%~^l-Zc*7Seu#FaV&c_^{C zqA?U{Td2OG{PcQbwj}*&Dce7h6Sa4qPUgOjk48XOu@Dj=^&uIHsKZ}D{0lm4kO*WK z)12#KPl@!&MnU_xh*g{19$L%iO)OR)#+hh{hCz!_1U`zUrnlPu_W6m5yM0bSR_{D^ zFWF_fGhG@r@Y|;^gHL}_((bk&7ioiNg{u>BK$sw!X~KU(PzC1C%#h=B@&o*;XqA#$ z92i$xA)ENMRR>4urszf-N)O^QzFyf3n`@=VPukj3UcUeEaQkarQ)wO?CtMgbsf<5Y z(f%-UJnBK1hk@4cqwRI<>UAtGr1IoJ$W`NOh$%D_p0e|_O7DNI&Fm(C=^TkOEOwJ4 zQ#D~o5hyC;zmXBCwF7tBbF5b8QskrG=l=$ja+v1Fr;-sz@0?>9uoE$RAs21dNeqvZ zZ867j^WN}Bb5VKedIx!L9@7c5{9ch|j2J~B7U@cqh#G6>Qr;50tx^MZYBssyfo~L} zM+EVo6vQhAw_$%4?jx<1SSL4i%Y$&aXaYaGVax0G{La!L7S#Q`Q(r@WS~6T`Ww={? zb1H=ZI{(&N5NxT>TKAGq<%*L9gjjZpfu|O>&289zkuKm;9s36fx06AWWXhHz&6_P0 zm+2Okrh%Z>Fr1iZK!yGvw(eSILoJO0@C8xeR^skXhkt*#>(x*Gb0$+w11$;3&gR{$ zXNApA+6O*nI0AoDzl?22rQHl=MQ@agRq9%zmqvS3C*z| z39c=hN;ZE{2!-;hi+zpE!#e`Iy!Q_TyYmSh@7;OVby;e0;M3S(7C;pVtHJT;MT)DYRuSojZ-CKIa zUCVeB>9WvW*GG{}|Anr!LJ;PQ8QIs3(tf+YrmnZ;5M(&$?|oBELDAB>(Rs(GDvfLL zrNrud5g}jB1ZE-!VSWUks<>(7Ak_l1ZZ)jxFP8Gbp%jHa83iFl5ePF^wQ_PAM@3hn zQz3ua;Z-Fb2}!_~Tn#ss7%thBHU7yD+{dSCF+u5&F?9!#GvDCJ>);BwX|8Mm1!HW< zD;bJwS;(SJ!w8+yU}otmyi*x}hx>KVFWIgCsmDWKgXwMjYHmwS9G;?mi|oYPdC{$p zMvB;p{_rL9oiI_MpZjZnMJq2x(!<~1qyvADOG-;nMouc$8F)@%m)MTY$~LAh>`qD& zv00pJl?^KJYoIF2GYOBLcU6CqV`6*2)FLS)A}P(b&>o1`=(1vg`e%;^ZRx3fnNwK| z!l!WFnoamqeH<>!zAz3DyFg5ndP&=Q*<$aMg*c11+=IP0?y6p^Y;mS_eg*1 z!i2}D(b_GC#bxbMOdy5;JE~SJS%7w3u6O3W+0nRf?%qIh^;PuvvPvQDmK5&n!+29r9o?G)VGwGHv%JGfDb>@ z5w}<)seFzy+N8~5Rk#yJ$p@J?bn<^fU(Y||z~Z#zO>Bq@NX)QoRTP9`{9Q-YHU`z` zTg8@D^DI;j5|Uv!+NfmUB_O2S@qlxqK@hg@f^BT}vQFV#??`y=WW)ZhZXSn3sL%QWoeyYS=?SYc9R6mNA?9Amkz4c53672SYe}WFz z#u`>eAWeD^r^4E!-a%gCOpDpGPYfuizysns)1A|_fuCQ{Nad-$S5`okhfA&hKEWZb z$Xt?WK@B%&|-hdq-hcP#8c-z8l6$nAzms(1y82;MT3qs+p_QS{hX33AqE`V za@*$%HG#3jHVEKW~FBVSu(6#o4lxpNE<*UZK$IDDs;SB+AnY%dc1CzuO@#^uQHnBLaaEj6wVp1 zZTe*^uhQ9vtG@sTV196KB^&zfngYx&*=2A7^F{blgI z*|V7$u9wQg#BuI+OQy7Ucj?2PjW7t%O36L-lBb?}7?IlXa1CvKu;%SBzb3r09D zsi9_>m+?orO(Fe8+%_zfBo zksZ!aU!5on?(%h2JRg3Z6(}?ODcWEKYbNxnvGw21L7IPr_%FR8xUzOPc1+3y{k_Cy z6uwh4qA<{JEh8RQMM~*KzJNmvsBNkI{nEWE2sw|wOjShAkvK$;<043TE)K`s*l##K zz1Wm_RhJ4vdURdPr?BK+QCbw0rkedQ{-#O-7q3sL4UPI%Yz@`pEbCXf>cI8~zs%>j zYY|!d!GnLp(+2LoM0U#J8>rfO&>$9bBtoJ~+B}76X|JWz>ay@GIVV$&vs}NPvJHQ5o10;!Ft?r1`5WkZE8A~f4C9-Go;!j{4fq#<5sISo*-vB9T9h!a=KAyf&6z1( zf1QBF2gg}^XElQ5#-3)ZI!#S87iEK#sE!-S@zf#UhAmvps{8`T7C(l~#nMmuEO%;~zp^OBxcJ6LnND7wl@rUXs# zX!Q}h^iMVmEF)1h`Mx5y6LgybE`6Qx5UKf4$Y660`DeDYLL^uZmVXsh3_jT1xRt&d z%~@VPs#8f8ammU2&}Lt?wvpYe_VZpVQ|3;I$!;skJ@7w8^BhCkhP~L+7s<;CclUqV zfB_A>gifqC#OJSlEhK!#{@yS4-5tnir*+Dr*(GyZC9OR zXr@4Tc~Vx9`!HLuha`%pk%IByzJ?*J@q|eb3g4ODqav0L>jggt#d^-r=k4AWz;)-K zW<>*+hs}EZ>-|uweYh;8tVF{QS-F1}dZTaZp%b~uDTnFECeBM5?M0I}!t#e=ZiaYZ zlvw%gaTP;eE{Hspq%j?6=1c{yc5sT#e4&Te{DNDfpq!iYxVC~v*0l7_{-d7+nCRcP zrc&Rw>V}NJDgj#^AyYg~USaO*_pHpZ`Ekek?3M$#(3T{3VnTT#-y(a+HSB+}IB6nW z!sd*1Dh#I&oi;p#mVF3)>hMZlWH4sQtWivLPI^R{B+IF;c0RQ*d?=?8mk>U+x^-t^ zJKI*)7jPkQjCib&J(X`H))r;G_9GCLn|FfUyYaRQ1YD+@l)*YmUM>sT&>4o1FYVml zt1j>KXpEKDA_@WbwXT2kknn#_&v5QAG++7)9J9~JoEAvo#3bl(I@WlntNPxTl5){Q zj1jz4?6fN?R1Hx8^rZKUg7&ci5e;Ojyj{0`oc-jiO;EQ^^%Mgt-H}u6w^n3}VPKF2 zNp*8nzWWJPXrmtt+WWM&Vf55P@x!Boq{XEZ3yJPA|5d7FZBl zt^i}${IuMoAGmG?Fo$JUMcjp#HD$5@c)_8oA1ky>vFj#|8z9I$h}1#9kVe5`nd|Z^ z%-0&kt=Aw_M?MNa?HYfhv5RVgh>ptUyB>EJa}Zs~Zk@!xZbGVmBjqXAy<4%p37UO& zR0qkJhw%(j2KaO=`TeM+rx8WQd<%nE>h@^0K4m zKS?MB^#{jnD6HX}`(iX3X`UZb-=fsll77UjT<|hzn0kzCwLE{NX^9CB9;#z1y;6^jf`nL`}Gk`yzg6W?%)QJHa;|)(c9AUvv*zJ82%hABmbuh{%QDuK#vB@5Y zE;g|T5P%3%%qS@Y+!#BUgCD3z0LRJ8M7XVTzIpfKwfQhUo#WghcI>&{WclV@RaA#S zCQMe}1R~`EDh4b&djpjHKD%8vec)`O1=6h_A}TxRgp_WR9ILl)<-Ui|V(URbzE5x! zhhNDIR*Dsk%Cp4?MH{0t-*SI}Em1k56mWf-DOR5t>p-FWa4ly6P1OX^3njl0BOz50 ze#AeobW;_W7zMXwG*0u`>=AJlEb<;wnyg+N|>AkJ`tBb>+h?ve=L89&DJEzKdPwaPv&M{ z3agn{bLWY&8`UgBX^#_ttM;BVSpB`2mGGOE>mq=GdsViu#bU*Q@|PVKMD_3sLTr|+ zH;ee&ito3&`p=KhqKG~wDJ(V7rVFYUgevT|LSk5G$+b1$f;udJfMQyW3Qi{2A*uti z#1dG2QXm?b{y~4itg;(~uHePt3x)DJN+orzERxBkA-LVo&&sv9S`)UlEg9|M@GzdN z6d)_Frf{`(UJOD)b zE|A1FV)?CF<3tZJ0_qO}EEzYwL%zMKN8uT0f8x5L#?^lsfV=>_T7%KiE3jc6YLTBi zB&%Gw51}-Q>P5y0?e%f4@SFn!y>`tc&1K#8y!1hzf%Y1^P*QojVb>m3Hc&y35$0|g zlh7IdI@7(}ubzUO@$KTkX?)Fw<{MIhiKGfNuHUFgzVG>H+2}hnojO5I^oi zZ@~PZ#|GtiCL=Ntz1!YcOIUvz$dC@6yc!yHXwiD!2q#Q+(2Ffm zvK0*o5en(w%CB#B{ESfaVz*|QW6j|^C^hCSS7{4CG^RTJdfE=Vv>joeCp zKF)vo#lAp?LWHFS-cmK0kf}tPzk#68o#WhmxMC^4&PDbO2kj&bFBw!K`Ix~OzwN`= zM;TRk0s9>9Ey;su0hO->{%Fi{)FkQ}n>*JcDTV^~$ zuB`I&DhpIJL5=}A&x*eTSi4kxR&o@7JCTLc(zpXvV9cl%fnrkQY99nGE%pSCpi zaYX(wp2>UVUQvHC6#78g z@x$IGJRr^n#pzd^7Ear_h!5poOR5JdEXA}B9PwTDW2?ci0^#=50}WipfT0nM?C?0}x&(7t&YR(*0rlrt?*I+{yt}>3J;MYmm70e9 zq7LQflq3-W2kQUSt5s33O1wKP_q;|ki%v7y{h|l$fQB#o0~vTZC7T0%Cc~rg)@>g& zCr@gN%r^p#Y(*rKHdnwE zY7(n)X3af3YQ0x2|2MDZ!l>@gKsIdvC~uju{M~*dzGt4_W#+8&sn2kVDS(JNKmbcKy9TZHIh0>PIGzl{)5xhoW>GJEe?g@14$$UhEDJn-HjEXbX{e=@wE{ za4J0pjif~>Y{wbTC#iqK{*1OnKBcCQNdZ`^GTLIA@-zI~WBw`d{Q3&k8bK{W82L7Z zxsxpK9ui@)9?)vppMT4?l%c#Ywvrw`U6Jz7Bs>P=`V8ZA^DoQj8vUv;6By1m-Yr`4 z3`6{9QYhH*yP6ueDFH|b=08W6X$2E7=ur$KhouN#*zGpRt6+b73$LV$xPrMetphj` zjAuubl4Ao==IwVh3rVSUleq=8w8zmNv(|(JfQWPmWj!-Sz=9LVE5jeqM(7NnOyc9c|u*wv6S4Bq9% z5Ta^5Uwl`PVWoe0U(CNZUgP9ZoA+nP3Ds3-Rx|ETKrIH|C5s02+{E3ge%<4UPac6$ z$UylHATjRI41GXJHVbwz=el@X9(-pCPsOO+Hvps5HP^r@d+UMARgh~Y0mjhi&0|R^ zZ}Pg9?K^macTzRgw&}h-K;k}~L)aM5G*^lWPalA96|{fOE`@ECNaHE|7TNErjfW8o zpASd$gDA1Z_{X#!BPL%!R9-(anbqH~w0rj1`0)Djl4;Ht2 zYub8p2SujpL7RH8AlwVF(wXe-tlJrN13UW{cj70>iffrWqd2AWKBes_YUAX*!R?$U_Li0Dv>AeI zH$;DV+c3=Ol~q6SXxC^4rSV*AFbr0Zo5ZUN;5P~2< zhhL@{Gu192oPCeC5K{%aY&XvT>Y(Fii1mNMsaRoYu3G$xx1k~ zYXYAGF;R1}_~*$94EX+6z?8o)EZZqOX#dny8!YKvJRSv$39V2T8JF~Y#DGCFB5{9) zkXiCYF(80ZufwXkW1rJaw)@7mF|SWiWz9PU|1J6Pe1_L5oy4XRqwAj*nIQaxjonc= z{u#{!Yv4pQi`_`xH{6p6WO+8HO_arX zRtRz9BN#&Tguyv7OI4oHF~#A6TBrnpSAU`(-cu3K6ZLg)45eH415*pW9vec>DNXpa zWL5DjyYoVVv?&LM;>7N1oQxQ4(6ocK5UZeTchyD3+w^d=F8+-(xlkN-IS7AwDwEb; zb^!t|eJaB@ikq$XeBKuDdO=7xVC*+&Dj_+e`U*PpT+OmEGQRPCcnUvEYLe6JTe0FdfEl(hisDx~5eos!u-?au*5hd%Ak1GC7vERQ-oW#7KWWO8V;Tt`FnRo8wj*YVHDI&-VZ&pvOXLSR2pfh2!!X4bt=r@2yACd!Yp zbqC4>-IT1-_A!4{yrNronzLT;T}k~18FIYdNe=*0Sb7R;08Zw$7o&AetbE0y)fMxZi5sJn9T!}xzhvSQ?{k0zh`H})qoJ(QFy z&h9qAIzkl2t?`C+E@Fv^(jZs_SNMxkJ*CQ4Y{2tDB(LTc3!-NR%Lfg_TaHJSJJp*b){Xo{zP~>OV6@64Zz)-rd?=K?%ZpmrdWA^AS z_ar4wtUZ7EEbXU)xtK7%`1omPyP{DJd61px)V`{&%+5~p99LU$aj;~-Zb4!ul1*`L z^Vt~-l9lkJ7ZS6^@+ww5yie#^pVeiRkO#kl)0z6+oEHJ{4~^Tztw?oPXFiAXfHYwR?R7~6 zE&HCGGAtX@%IjhRR*xt($FwLAh1+igdmGG!huR&uQNN#%XVPTjX%TL11OfBM`Gm;Y zZPN+tv7n6cI+m2Ae}xv-HwY+o`-NOqN4dEQ&+n9PPrbf6L1NVaK@m~OX-H~`jgtWA z@~?kpyl-mOR6ZRKc)02P>=VRhW&A+ac`Y0)*T84}frruu0*=0TZ(cB^Y@5mZeb0nU zpkUVxz%q?wAP1uhT}x*ySx!aaJ^*}NjA&G>&gDc<%O3~2DMDQj##6eP=Y3>(9n7ZX zKcEg030Gd~J()I42FmY%6o;;7*rqYq_Y{Bp{En`Iv$~(Nl$X(?N9MgWsT#}&3>z_- z*}!wT_H%F_w;87QEk1OA8#pZ4pFa63^7(%wqcSnDCd?s*ZzRSQv^bT@H2Kz~xy$_v zXk4%h<6vUx*B{+)=d?Qo;~rK6K~Q3*Bwfo}`li_610w}<0e|~-%fjl zMUxftl;vKMazqewGR8(&hQvHJd~WZ1%M>QWs$F4rMlk5otrUI4{J>7V?3yOaIi3Tu z6bp_Mku|NEPiXhX8o`|42DOxS)knY%+L2{&SuLX1vWi}2|I$6x!(&aAma$JAJk&3# zW4fdbJSjN*eg?vW7-xPB>$CQ=rw;O~?1f-RVtyXYmUESuqG1)EJDDJABA_8O>Y5 zC|Mvjnx{|1cmujxQB_$ej^V2y5Me}H!@J%PhXWXBikXf>RQV|q6CVpFHC5m%Eb_2r&cxEjz4qn_AG=JQ(5?fNix7zGx;fB8+QZ{FDhl6ogCt--b4 zkWk9QWBawRfz7Qu{I*o7f z#e=3Q3Q(etJRQ}QgYAy98E$?H0{=;X1C@58K8gqiASU5Xmz}- zxxpmGExda*tte@}t{nA4g3`@u&#l~ZL1DlKWt=C#8eX_=Xakgj0@65Qk0<_7Op)nM z9lI-N<KEymf7wpS3 zmm3kNV;7sf*6f6q1gC$$wBGCa%=#K|OEECUd;4dlA(iiKk%Zz|k^@w)k#ChhVRKF6 zkv_mF&OOVXuyX}Y(qZ^uV4P)t;Ci zv?7Nyz7}S3#&M6!aYm-hre|F%kSei{(1pM1QC5x%%(sM5gN=VGmcvlHI*3{B@6oZx z#We@mJ_)4Mc08xNxs)?p$b~(vYkPu(Q9pOH2A9M_((zDXy6heFHd_`NurAA7QZ8wB zwk~uLtP(%eJ}i%zxr0p0E-Qc`W{nM+;yJ!xDy1O(HnfXkZ~_9oPklEFb-<_A@Nz6{ z;GA~HKzx>@r5JyL?~X^DB-q9)l4p~$@1^;Tf>{9tvoj*MPDW4gBQm)8Z*CG84gP{$ zI7B=mrW>o!`my}8G1k2GK)-b8d8)n5VD~JcFv`kLDY*OBjLS0y^lUR`44AJG&0{gK zHEs;BY=wwv@Ts7+#yc5>`NLI4`32ETuBDQ;Hf0Olkn4X;WMv%NaGgr5Z7Pi#V<&tD ztBto9R418n^28YaF%m1HKzFmuuMua$rc|^^lHzRAt9b#8s^iFD7rLRGlKp6%t>5v2 zI)L*MRE!h-Xow^DKo(>rr7bmMBrGOOesgOw$Ehcg^M+m45=x$Tf4*?IGNd4boA{lD zoYxP~&O(0zC>V_2hhMzAF#p(O*N!P`JPnU-S0&7=Z&c#uW!GVnZjevBk(pw@&z)hF z+RmddX(Q^$BQSoRan;wspnQ3svfH{w;31F#xD?Xca$jtbt2E@QN$)BI#P}PiT9O}3 z^jL;rO%Xp#6^*LFU~eNAJ=nA6&G6R>w8l9xZN7i8e}uWZ!+nVwlivV4T53~Rm&Vtd zk$Vul`Z z0j|3qrTc)-J_Hmk&b3Z7S`0N%P5ds+>taM)0V*@qonD2X5H6Il2s;HC2V1OYaH+>i z>Cu0pg`$F6qU`e_gu1#T3IOuVkEZ7i8gLKi`@F#{QOmRYn~T7fJxKQ|(gAz_J;$1W z*C1q7W!^oiw3)}NGCM=fI-@t{s>n z@Ix?yrNs_LOw3UUCDRgS<~t?CHZznl3JGNr&obBh6BHe-l`bLjPE77xv03lPUCKz$ z_~um>YuZ~O^tD`(7?;6l@2!I>J!hUWW<>>n_F&moH%gy3Q3qUN5;n6ed7y*BB z7NC$i#v1%k?2QHk{m#Zs>ixyUS+61~qM1j!(HNdhK3wt8y#a*%*B42wOPsIx0?axN zO$j@ADLc5ehhp4y1D`AVp`o57U5vmr+U13M-B~^m#~J!g-oBmVIEYn+{MyZKFpY+C zyj+r~lU}lGHRHQ32s>AvB}Uz+sQ`cAERnktA3p0E#lY7G{lW(4rdD0OsAT09HsCKS zmDy8qny%pw_}HR>?9`^>_sDgZb&DN9JHsdK2*VGVGOmY4^h^m=oBh^Z+G| z41^_d?ewPwm^`1&;q%kY;n(R{jmsKwBIMSVlx{U2g~I6#wLNvdt+2&r-m-r;q*Xuv zE@l#uQUW%MS9A>^G88U$zbe?{G%0?M%!o(sUsxsfALB3iIkOM9a?K_BK2hnN|I_n~$(9Zo^~L+$@hQ z=(<+zk5bnmvIbZ^s=j$aoUODiu{Oh4E=^Dvq~?%!WRi!j^ycq;T*Cy~>OX z%B9xd6{w-()w4k(FQ0o4*FL%cOPH7m5PAzID24UO#o6Up|Jg8nth^aw#gCLV6x5 zr*xP$z-jSffve143H^U+2XxUmRUZo9stcE>XvejL#f2_lRls_qen0-(5%UZJH1+{7 zmeN;UnPQ4_J1S(Ul~B7Se8QJeA}P=El;t&fE`vXS2qh4kLAP?RAkXHV> zz>`MwY&V!*5UI|Q$kk%um+LV5W>i|W!b4UWy2@^auO&`aOm=@@41}kvt=bF37iK+x z^y7kxW>3eZ({2@-8*a_EEa{fw7#=hJZKr4218Y!Pek3;!*ipu@feLQR{ecrK!Oa^mGNWZ1abpZWps zv=ifrR)c$wlFvc3&TBq@8=F8A>9aW_9ZI;5DfDzYA1xu7WJ=^7BcUBnT@$H^n)&L7Le zLL1{s1{xUb$n>%36=%`-C_!dD58Ns4WvgPjtNN_NfA%P+3GZJiT6*$qd5!dn8@j0s zeB+vs3d}BVUYo#wqq`(&M!LTr+xRH6oa!-T^rc;$@>-n~@0z@Q9m4018SdA?C+o|* zZO0T1B>+k$;NL1udM5aCV(g7p96}3U({@%p29+o?;<2tmDEOn`maypGt=?rQdd1W$ zNB$hU(>zDE@LhI&c0ZT$HqRmj6XXhRd47PBJQ-B0E~##Rb8K6bm`^5-IHeo2KyTsJ zs_w(82Gx@7nZsmM6~+jXbN>)sEv0yA1+{Qt6iZj1+a5BruDoB<6kAlS)_%jlpWkzH zXTX)m@9Z@L`xrr%#nN)kpQ7f32?c@fwfO2^r)2j7i*+*G(uW+KDg_Q6g2KS0hmofF z!}C+ajy73;E8D^O$f}v|-z9=SIfq}&3f#Tj>)(t_n0QbumHw>cqM$bDZ|RFMwozw9 zqG6pxdZ+WX4tW`GSx3OS(dgC(`-kA|4JG?+19nxhuX7lA#~EzuhD&Ms{zzGT&NGIx z=ncgTW@$1Fw>@9a%(v759#+53oq~@=dIt!QFzhgY9f*@OtTB_btxg4a7d%Zid7H~2 zG1wuU^zTh-gAe%;w}oyZUBc6c@ivppbit%-bse761KGLTupU=ZK#oxx*s0Vq$cbz2 z!g7I${&Z>mSXk3G_QYd6@FpgT^;KHq?|*T)zf0b54#OD#_2T|f(Go_1{_X|e-2ap0 zgog2#5ylKxe>bcRi?R}+Pbi6H6RB=4i;xgEPa{auo;++nNXeXW?y^XilQ-;N<&se} z&)3NATn;WK${STXKVVlls=-^yP=0&caz>$`SVwqnrJB^>aZ#g|)EKwfr9Q=!yiKWz z)cm}Cs>Q-`wiBq0)QWq@PXnMmd?8U!nv-jBP(PeYe{1P~QNDjXzCYt{m;x`X`R{`n zhT@6;>+|s-QB(7OID5M`Rdr@-_y>6)ezkg72>}8G4Ie6=DQfsou|+$;uYazf-LvPq z*S+_<--C6{b+ynfm8wyrMpXi03=jDGygc##$jWyvo#`P?xk)$6{;}WA&2~Q>Zua}C zy$o%re=j4;dE{iYbsrZm=96c=7@opsUqmd1Qg2|L>eD6O^bpHux8BUI_nXJp@J!O7 zwQS4X*Oa5y;^90v^nxH6=*R7cx$j5jVd9wA=_gKhquKFcI#+LALv*Q1{WR52ewMTY zSxlOl5%h2KJ(`!XySMo0t0)Iw&*wg|DH~o-f3;r+y`T*r*S@$-?rjbYoO{l~nHkOF zY7SgyG2mbBtwyu?Ik$3gk-_1fJofXv5q&AvpJ~0k+2+vPPVS3giRS%BaOzq4zSC*Fk#y45 zfBI>~ucKK$9rm{foe;N9=B771t`6bBk52~odKf)=*dKlDj`hA~7glNW=QOwme6~%u z=JB~}N73ik_?}ER=oEgsi>W3CQIJbE{leMVal5T(#t_+|`D z6b?CG^g8`yV*i|K69I?W7llhY9gR=>+2}Na0bmN?VYX^N=Riuj9G*U}Uz((Qe~ac} z7|!)vcNd1ss)|*&RolucH>&WE+!OOOsF<@@7=!+vZYSPzTFt7%@i0FbhfVs}OuXPc z{)}!IO`7OtYAN;a)4X_8Wq*5e*U4oIMom00jTYxoZz$Ztwx_-AW5pe}J_wA@saa9p9&F>}@6YaXH*0TSQ4i#Vt{0bEe;ywnp*=ad zT440L(4g`Cxx~Ow~NoGTqN65 zRUHq`ZJEM8;e7mDtgRcEe?$G`exlv{n3{2nsA2CHrrdv8@oZ(z;`Py;KjZxuJKm4O zUS{(F-F!jg12QxMdv50=uA_0~d{P@P1Dl;Tceie*xA;ESJ~@j>ev5-= zp+wii91X{k?9AP8f1b6c3;*mHwSRGQbjvb)-mvqtBHD6)x~FG9e}5uZ1(4D1=gIj( zvfeyO+bU`^F^#8myBpZe$)v8v}zwt1@{b=f^4AD526@p zm(p6T4`OBJ%j@1;pv!??{f}aNl#!h53cDxLqieF#)6VNjo)F;SU_7SfeDCbA=dz7$ zFZ7SXA%<_rwfC8Ze|$1A(&ChqS#x>BbY~ukJU_RMHC^Yr+!)4P!1#9Vx3CX{{v_k+ zXj^)d>2RmHzLP~pM%j9s#I_aD1d1`=^U2~fKTJ<&yWC5@OpZQ3T^)J%CRJ}ZAF_|t zt=NAgA6fNDw%*4`n_cWS+7)pX9xBX}Ycw*Ci?8H#!)3EWe}&mj@WHc=o;F6sc)GlJ zHhw-IZBj4j$+taD(%s6PhX?1lf5c}uYKTL=vf#suKm5!rHoN<0!L#J?am**n0P(qX zi>tgS_KWnSPuuO&ydZQQ@5?6}Kzgak`MeqL`@`p6A7@YVVpc;jJx273X4$@OPtH2{ zqSgN6SxC>0e}b)bwzlhN?M+sJ-*=DA{k9#QPc~=OK3GlFX&SZ@pFK|>qxo!&=8+xu zMzpu+vkzO!Fx9sG3EOgBBvsQlf*tN{&#;|n=b7;&`scGk<2Acszwf5wV7c9nWVMyq z3AcTJnZ{wtdgtrxa>Df$qrRJ#&iP=%$%$@LCi!Exe|t`DK~T$(O+WkC&*Of%%qMFg z!|~0on#*lDi`{-8hoKgZ%12miU8|TS%iF}xiv4OfJnF_`6Xy2#e%g+2M|-mFpBwvL z&PU5E*?EsuQ>~`am&kj=LFOm3qAh2)cvHhkbDQFn@{h}PDR1k0vbJnF&2Jme3f$4+ zc=LDme>#=17DZXl-Sx94zU-NB`(l{_pMD+|$)a49d5WUddCUimGY@aqeR-BMSDp88 zs)c4(C=4(0^E%pL{~9OfkJD}jk=MI@){ov~=0*^<$JPc$#rPAO>s=fsx@#8g zV7ehRJ5Jqsz~jRsi=x9LIVYNJ*dG&oI+%x?e~N3d45HCzatzo-24QO7&M7>Dj83w~ zcLU_O^H}t@lTqp8kE|^=#mD|@YU`tgA1~x!!Ej(alaDJ+&*SVe*?!o}ISs?ZwYYh$ zao&y}Xv^w+?JoB-96U%H%xz!$Fvz_!g6nAa48I1$#y*d=>0$5;{OM%iFIFGM$CQ74 zf1sq^Huu@;9u$tgKD5M~-%sqCU~96&es14ai~M-M^_uA|-QPaiXSz2w{yH;of4jnA z61k7E9wc$=f5P*yTbte$uW8QR*+jP%3bm?`RIRcUBn;5bK%Vz zto7L32jgZ%u ziSSMu9`sM~wWs@o>x$jS#bf(rBp>0CCDpOuqdkk8kGnP4d>JtA&!_kDsSO<>1an5$ zPvpkzlU3j*qy4Jh&6`1TIvEdh$QS1*t@{1TVnj|6OpMfr4zU2Eoa^Z9{8Czv-0f4}q>ciS8<4#%xIan74uZx*4;C)12v&;48SbcK1n zNoa@T!9K3nhw*{V`d{TSS!zqqx`u3V{RFF>W8D}I_r-HPXJ772-`sn8KI&`wH6J7I zV+PT@N5{_jv^`&%=gs+Qr?crnKTYl$+oqrQ!Zu7!(VLjSYN4?7HG7C4o)u22bhJ)o2 zXQShK@zV`e51N2iOHuZ`6AL$+qf{`B7 zPc+z#^qp8V^mGFWLE7M3$GKhy{A&6Vcof?ZU#PyIIUCFkep(CgLDpm+@K!R}19`Ki*~HQBfI+G9;m;)qID z&K|X$uA%tMwbC@7o3EWYA;D2D7-gDgrdhgov-6?vkAvxLf3f<^$K!!FTWufV&HQZY z5&Jk9#}#>AOk;Zf@J6O_y>G{6_;HA@A5XDO!+bkEd&#-{8b9#ly6%-d4hARdz4=jU z>18q*xT~*0lYOB)tm5$K^)&Xdrs?sVd(W!xWf}K+(|&!5=XdA%RZbJ%bzL!^nHIhu&%Ck*{_H=j(px-M5QYd@T0Q@$-J39AqSonD$`^&gW*)x9e-V{@N$I8=KA-@n+gLrbWAN#>)@9Sk@og^q9>& zl$xKzf6IX_cg-DHm5+9%(F0e5Q+y)sroPz6Hl(AES{QRDqk5?On`sMWX2s>@TAn|v zj~Ves=0P_2+_NFUR%7w3Yg^kO0P1 zf9}jxx;2+e{-gk=o^&rXc)b;&dkX>p%G&NN27bBU_+v9oxGw=>Ur9;kcyG@0*yo$x zAObM@-CG_;m+mbH-CGX;(5d?>7(L_l;2a0@exts>roKeL5_8@jyo;tyD*)@%z3`#G zQExp!-CGU-P~)ikU8+Q5O*gK@j#0c{0HmTN6NTpDl8B52+*DFbRdmWGtJXu&CBV~Ygyy9 z@#}m0(f7X*%2;M+)T9l)cwwY<*R+vFWo%Iqo3vpU`wR#Hu^>(R0hbm5j+eS5SR@PM zYOE8KERd}9rCuSNMLI!(+Pu^uf2Jmqv5tTvTf`1x7J!IDloAGlGQ#5(0%vm;Ob1Q~ z=72y~z(x&7B=`Vk)s!ZL^iDGM{kXDy)7dzUBBbmtV+)DcLTx-^ty-J+GuHg|nR>n^ ze&3r6-|KR-U^Pp6c0=E}xQhQYs2T3e=ewn(qv=gonH(*oLp?>%iVSH(CcjZ@?okxDL4>Y^2Lta zyj>kLipEnN<|_w@ikcI&iirzBd9krP0FMmd3DI#f@O?%Nsci|KT@WM)2axHw@G&Nz zE;lBmh1|g+BYZaCEqR!}Ry$d-L%>%{Ouu}c6;AZPNK}QJX_=Eoe+VQ)2&hR~x$(5> zM0jO@h$13l3?kwSl$37Msn(+3j z@4%mk6~2xyoOBzfk^y*tc(pM1Aiot)n+4y#3}CD+=SS7HRf3-hE&Dvft3CBiKUL{*D6RT=Ccjr)BsKlq8*}88!sTycDkg7S*YLbkB~o>j(#w<8#ZOS zOdSwC!bPgyN&-^oW-vRf))3oa;UZq?ccWf`AUaB`1icy>QsKNVoCt6UORoGPepq#^ z`~uk-Q)~Lde~(sSMz{7W1Ag#i;OR++sqdd>%5P8;U#Y^PS04Db*~J#-$^ahWrY6LX zb>MWvnwwa(H{t~oGtkqTN108+>f~cSvq7wEU##^&ny7`Gi?Ibf!4WTslR>qE8Macb zzqo9@Eh^VG`8ww3^r9oR>f^%ptE-kWjJZ~A3tCuBe_Wdz&v8_Djp%)s(kGP{ToAGxIi0b{`Mt1OSBihDx2&wCPjw0xk@?pK-hj13I_qf zDBv3ta|8G)Hd`%2=5v9#uo35mfm9A41qkL46d8zH4w6jeMQkC8kuf+1V_GglN)4nP z43TV#e{og$nZg7)GY@rhA6Y|C4SV-*JBZzRp&dD1X*@W2pOtx-a_1(Tm+A8ItkJK)amg7cr-LNIJ&0FQ+Z zf#J_A5Juz=s6>M5)@nS6*P26CXXIe24RxYHrg^y5R=5F`jPw45K3thxOfIh2PYjfn z)iRXx?wwP*-ggC6u?8+66CfKbnsH`=LjUjhk4IiWf35kC-M;aXRk44EqmKq;mPjHXyOk$6%j~cm{}5?~_#B~Z)SLDOf!WTHl|^SO zh|t_mLftPihNLZX<7D@dzJ6E zG$5}S=<)-p*@o5e3d#68PpE%8o2fj^6ekP0QoU0oUEV75utik=tz!N*ylC`+S6$&rU)hR?V}y|8L1N+^8^{PO0b-vzJxPve`Xv* zVI!WT1}S%3SSQ2_3kgPm69g-NwH=Cp_|yWyF%o0|VWDH-F7&5*zhX#-h#OE~NJ&V9 z;;^ev#OcJ7TnE7&15u+}n=gLm+4ySu%*CKSGqI2aK!SSRnZDxAQ}I2{Q2_8c3bc6Y zHH8N)y?bpOkeqip`P;{gU;IYle=Ke2<|jWx$H8BGwS93tCW&S@Nv>?m-h2jH#r?XU zS%%I=UaPpyE`w-mtRb>MD&82L>m0TzpFNIJ?OUX#!jlL`YZ73x7T0PbDuPr#&x{8g>k^g z(hABm)$An_gkbr;?kL8D8{;ET1gWv5r)lB)96`J`fKvow;4YA=17Ote&GeOY0Z0kx z?g8ecaU^;1mG(PUmA$%{%Te-}{fE-U$I)+9kd_IetRHc(Vfo9=ZPQAaQf>f6<4~Zvd1_))K zvwunp=7+P%$~f!~bkDI->c^9e|X;u&pIFc^Ete1 zc%_nIL+^O?+iuCQ6o%=qgn=X@NF-!<7~8w+*aJ>@v*jmu+CN~)yU!SRmVp-0m4QsZ z^)E(Ij@+Dwx~QCg_oMU{^FkQ>cpRd_=xV8N-`Ng`4;6ybwCq0x22tkLz&G7I(Yk3H zTq&_&0-pkQs@@ume;0}mcr6dSmcQ!(-r+U%|KdO|{LWOqW2VA(_Vc`NPN{rMU7L4x z}2CbqqG>X`-Vv-XN~hiP&t;6j}8+cT=bcS2=(X91&H^@NTVS66rd< zSTb2EJz$ZSI+lfjT5>$#TWKI-yz#W=f?ykns^whde``8OZeY1NAYM?EMhlu6jxHjE zz#AWdn0g_6Bo$xZ_j)q2KnO}u6__$HU{mb54@;2Lz$pt5?^5LQ$L6YDTJ|LjVk`me zRlVdO{&&5UvHZvHv*hx!xx&pZ_m5uwn{2-MW2XzfYNX3|-TkdOyfurJfxbCI@*QK! z)~|Y8f3Vz;g{m2I+H!<=V*60&s950avr?E5_owmN6=%+5^!Qi29k89VZuSlqKH8vk z6(p`MWp61W8}rCw8TxXJ3K@v{M5x@nWFVp(b7Cy4w8px$?{nz^mkq)s1x_gvVXxoC z4#9}W1o;U$1~J>5ZN6`!_3BK&^j0)~aR33|e|}9sI!`5Eqr2zoyz}cJNvWou|Hs$5 zIx=qFzTMeg@o?6vT>XQ`Jy3T3=IXz$1xce{cw(X^`Q}}JaJ)@I53AqH{?}{&iQA<0 zYHLlO7$^Y|)y&rpXPY0a?`weBxL$qQ&9)$QLOTu>lG=}w%H`bY;s=+zU2`aY`#eNu ze{=>zVI9Gu!@5uNQeC%*`-s1A>0hy5&_KnAs;v&27^(kM{e%!KFhm$qYFJ!xJGb|- zoR+^a4LHI2J5Qi4-ojpAC_S=L_o&~AK+Fh1vHo61)fvOfj<->zGhRBK@z*-Ob4B}W zZQk_)(;?-5_=B>wNUU!>W0Mm3#p;Nle{_)G5gYO2E)@~x5k@-K{OE8*yyLBvIWM<1 zpE*cG?Jt{{7)k+xVn`uU{20EzbKWn1r^Ty=~Me|{t~rie!duH|E?&k!-nnH$ERf0WF8 z2+ACZ@rl;6H3RKN>tYGla<=4SWs8+3oC<`nNF7=8WrD~RWT%~YPcw@8G93+H+IMIPC zpN%b)lOsX|!)ZE()nNmocwi}&t&zffB;#O>{gatROEnSvb6*yl){t(kFxr?v{+HPN zT^sO_jmRO+#y_!58-HCCVr)ujT4H7*hza82YJRVqWG=`UDJ=|xIfcC=e}OZKYNqR) zn;<9x9=k}YoXAX2IZD%Q;z#GrV21u{%nA~NPw_k6*PWL96YtJ47j(YU*^Nk{V-bf0 zgwol+{Q=hskM&CVL-W7jam)Vb?fF0W?YDktzSf=mx%Y#rn2r%o^v;%uwTF7rn5m#3 zh=KH3-;a{QPaU=t!Zr|4f5a);NQ&`kB!2jCI5xo-h=@)rf8!CwM07U6KXINOpN9Hd zGBVI|tn%j{9t@=8`)aEBF3^|C^M(YK=NtOsn?uE4N!`Wo-@L!;H_%mAE7l!v{n3B_ zTi)sFebM2?50_QgU;zB^2)l`O11L6p{)>Ng+#(Cpv4J3BR?Y)Ie~OfUzWvYdpG1jZ zNMd8azADKK{hEG&(B3wsf8`p2R_EY63kZ+iwj^eHnxr7*itsT89_4=IBOk@4vKNvX zkMvUUqX^30EqnIM<*p5z(5o{7?25ElsKxXT$J{+VYI{F6h{7#qM)e;4UG zBn;Bt2du}q|F!J(e?j?AOu*+ph8HjE)(g7z>UfzU{@Z%J^~d(7HwWy6!&jOmFWvH+ zo4s)Q*E%s1)g3p!Lq@N*pjTVcmGbW&&G*fT|EviM{pgopTS}nU5XkKj_}HOHFi`nI z#5zXEw~BFK%8ol*INf5j6&UfEql2`Nv*podPKfx%9}igIHUu&PYuBUtkbv2bp+V}H)>z3b| zk<-PESnel?V5F^9E^!RH-U?I3s@AC(M5fa1ob-0Le}_CWxZ;Vi&bT#31H*Ib+FPZf zG3vv}4In`Od@ zp(7ddHrbdk3soQ7JI#8~%`@elD z)mt8O17sKpX_{x`Y{|fJeAXh#EHC3G@nh3XrG>Pzbht$QXB0EgC{1SqtNe+mgCWVR zz}T<+lv@o;c(gU^HKf33tP5~Xkz<_T@2i>TeMy&5k+ZWf9> z3>;y>2yQgxr*{=!iSkpEs+J3Vtml^nXn*_ZAM9HH^tRu<6(o}UjYEHG#{7>S_3nS? ze`zc)4Z!^6+vYD#`FBo29*!@Lq(8CnV=JEY82=Y-tM_EDDO2;vXCfZ*x-ra4fvcs@4ry&ytCcz&8V#v^1ar zZvQR!6GPYG%uvjNoUHXuJLTC@?l7vy2M}>#h?0PaDGq5ypd!1POqZb5geM3%$8-6Y zzVKTUKvRDv&~<#|2>edpspE|wl!`a?e^tHE2p80PqPw@XWyhmm8ds->y=r{J=bM*a z{g+;<*7{}Zmxge$TNNK2$Nt9A;jeYP{Pd(>`t^4|dZaknui7`K(*DGmrO_A_4(J{UbRpB?=WE zuzAM^TKcw6hd&)hWBK=Nh%SC>f0&(aCjR90xa;NqrGHDMPtGuV<+?w0Pa$@gN2aeO z2=gK*B>GzT<-<(XS!@wTvT_U&W4mS%;xXdVAgHTHiAM#um40HR<;HJrhTd?BxoPTx zbu|j+$v(|#ed_utkq!YXGL0f4h>7djamAyhq64ajg06hzb(|`4;lXXJfBrTUSkGBP zGh*ZA^lP~#nR*T^69=TG#&Phwe((4Dpzl>3sL1b~g`nzp8ND^yG5#m_{Wp!}8{1#@ z9-CgsbK1mRy+;2huGRID-&i9IdblwRA#0T8@t3}0^0lG(4UY)RiRMYBpJrr?IA4&O z2D<7w394}BsAiOqh?V}ueI9UI z4k3Nz#n(*Ii{Fc`Hw;}L>)kt7eHdtd<}bQj(B1duT(nW`fOq~>e=`N&wQ(EcEJco4 z#0!IWHL8Jq+88_{(4S-=`NB*~9RpohQVty~UiPW^V|(To%4ZB^<;AXf6ll!`f^m#Q zWFWyU_#Ai7{K8#Ek>q-RD|I!;qd0%KdteBYUp7$|thfmgf}ZiUzs$czN^_A)yHIt4 zveT$PmBB>W=^&Mze<*(2Y53L}lx}K%+vl5qK>Oxxe>4~!+US>d`mdT(X81fX)XYMY zT;DT3FHZaFX{*2aZQskw#)|u^>*Jh$^T#?_BNFWwvNAEKxr8eApfdWUno}v%vl&RR zk8G^wkc3T8q+?02-6Vz0xs7>j5`4?eHEc}iblt0oh{p8We+`Mauz|TsleN+INQV#_ zvapdPYuprCN^BkH-8__mF4U{I^&72V=O3KV+x2=WWtw~#AgyS=;pmac7e|0H3eceveRaBFm(kwAl?gGay z5=%cM@|y<{z4nBit#SjY;;lMSFizmxS5H@SUqsha5~#IQ<;6R$Jori+od;eDsy2uj z$T3Ep&Q9MoqY*~Z@SUL0yZRyJ?{!tzbUG;Qdi@Xe2EX<7SIvltAOVpP0&WFe{jFxD zhGSKKfA6}vLrzt`h3~nzuAkKL&wuED-*KD$hyJJ5@qdWfIHwJbNX}mC^eYBgOYuB6dt?u@oAz+N4(r) zjGo|wzrbj$r&_EYUiSgtfhv7&g(P8yy}Rtze_hV$>M=tqo#UG?+E#wg&LFvMAZm8H z6bRc83GRQDJ_)s42+m)+hMK=p^R4XC<%w>-bxgnW#Jfjl(EzSq(DA*|QheLu&%D*I zIlb2`Rsf1;s+rD}awX&4uU8r^d`e$GtJ&gfZ`PkaR69=1@}jyFP91$V{*L&CX1T%E zf44bzNXiXVD2+7Ifh%kww9_L6#r>~SZ{E%WYqi~5lDxDZSs+IyYBpp5O0NTA7zlI> zBB9NA?LLZ4uJS?(xqC=6Rqscxk++YJU$EV$2C(VaK5hYkrh z1w|ao)bPR8Q%+Zfwj!w>*Q&?Ff7znLpswdM(~l#G5c1=&PNJBsRm=lOr;HooVux}X1^@%(5{FL_8Gk9P%r(yz*}&Jplc9;(y@xZ_1bT+`O)9HoR~DPUU7V(nc^zn@&D%9 zl70L7Td!lk<~U#dYLtG@;&ng!?pX~z{d8QdnPW`S&jp&PdVBld*0F)rx^}Wg_xfV6&AjQwx$cx|Xl` zov;2FP8zB?6l#tPyU~|j$8I;jqqL7a85)21^PgFzHs1X5_na!ekww}s9C*#Ld_E4R zg1y#v=N}SLBJYe(2#HxJ33J=w$rj8}eMGAzHwVQ8f^=)=^Ip`Bf0t0lOpP&yg*3^k zws%}&$eGkZ?F&eB+=k5btGyMmg|dmRY6c*{m$MOE0e^9{QF%8^XDaV*3`jQbIkq0; z(OEkcd$x#|#w@3;g(`quGQurA?YS zxDAc10Y6)u=r%G+YkEo4OxL^5rMRlK(F`9+V!0SHmZ}D7VX1lq*VWR=cxD69-@FLJ zW>~wU*@#MsT`V$|cKRFi+ZbD58K9RCD*Y{r=M)Ou=>|XXf0Yco*~@Qk`tIv?dnDxV zneP`)D!ucKiE15`&s^VmqqF~?Il8ev9;|Yx&Bc!M)dI)e>;wqX9Ugxg_1Q*h@>+KM{-JP5 z2xkaJx1T8Q`uu91@+U^#`bi#Xq-Osemujy1TcbZS^vN&G{kOdQ_l!O4No0ocoz`4m z3ToSp6uu@1aN%JgDR8O!uxlO_3nDy%!<5;+mxHLDe*_rP{W!A3%s@4m24@yYJ;UbG zw*B!yZzbBKWo-Mi44o3wNXxwe5?P!a{{y4szBiLZ$G9TKstAaF8#=Y>FIR}v<1YfS z!Y!X>$p;d2Y34MvZLPy8bJ-umdZzNU2c%Fq^{I=b%hT}6Q@`diW9tW-(&*p$%inu? z-nNdkf1iBz;?dvpN$QB(gY6d%N{f?c^2V1G#Uf(Zm!>7hKeoa3D)4aG$Eu!O6NI?5 zEr?ILh%5rJ!g|*rGtAQ2+QOv;sTNZ_e=;m77VZpu?&vj`mJS>VN(d@jzllzm&(!C0 z@F~;Ne|)a|?W?c#+ZZ(}4?+c8cKb0@-s$#Ze|*Q?8_)FjKD-~!^36}k_kN{6Jo$S+ z(|eAst4~&c{MSHk16!+^rADLQoVIwj1k-o?xi3Z)kAV4s#hmxrUBf7wD{y{ zxjUp0W}(`(U+YBJrIo`$?O(FK+KYpu6RYLgLWzk{JVw&Q1ZExqsk55L1e9)9dbS2T zf2>s|fxgi|H1uVqzj)0^@m>t~7`93$8PJbcoxn#*8~(}T^-pi>)g#c^d#)@V{>^pS zAC0>CnQ3|D2gJhvi6cVFM}K+_oz2yK=rhtxkmcU;2gD2_6XPOaD2}jCbj(GrhLhDy zua4=I_&_~vY%Ef~0H1F#N-joVSb^%7e}LxbAdn6MBeX>U9|tfbx}|%VX2q2@z7MrH zLFi*$D*b=9SR!!*o)~&6*HmaN_BEYE8b_R}I`6bX!t%12Ng??pJPlS-6K2gB_!_dr ze{fE-UxMQFUrWT+EI1TcZ5HLQ@An0AI)0|+FkG1B~$*3ZJc4pm&7=sD7t*}B^(tOipvb8 zWC=p`Gl;Ht^AVBiZ*;t(!=c_7)qq;XE6i6vee<)gwd*^)xc@6|BBeDBfA)yAzvC$R z9aGUS9{r#8_-oJ9&)@ii^_^Y5f9HvReOAs^6h|Qu)VEc^7?e7a1RG6twDDQjRh<;b zFC+7wlt%~$WlaM|Hzc+6K>1CtE^=d_U~>-2+@!ckEAfx)3v`TckM5Q?W^zWi`Rams3<17pszlS(m3`1 zN#Crn*N*kB*QNS0qI;GX{x`q(YsDMy*YlE=fA7;$n!sQ49Bv+!5JYrmv}N7z5Fa)g`%YskS3BFCs_M92l5r&rt4X5}ri*+Lg=X$ACTrav0VT^C9tLFqVcVMp9#CNw4NXI27{_Jl0(ca)f&4NBLFI7!s>xkux zY<6FTe`RQ7ys^^q*7eU%{RzYji_ZUdt-*h-cc`B24@PwJ_W6&N`aK`=H^=FE53jit z?;kwqr6=rRQB)ai#$O0ZhVr;2DK(62ys=VBFsFC}-F^g&bk;&fRSu?l7N1Q-fA}^E z*mL107RzSKtSq}e2JwXvM1|wj)fz|1(uP9yf2`j94uVnKSRjif5o4X!Wv4wfy;lWzz@LHVw(&4+k&;KjGlYjb=@3S+$&rHbQ_3`hR8UCfS*5A6S+GiDG zL*-aRRgU#^M3;yQzl#xB6h4T_sUkcyiTC_<2(wTP#Fv}7JUisvRYbNZq+#70cP12#`!#mfG zCFcodxa9VAJi{8VF~rCe;tyX*C8{|>9iE`BUo`6S=r_Mp^DO^3i{hIH{>+xWaL4?+ zhWk0=;=Km@r;oRoN{xUlQWFx>!z6d0e^^C&k-K+5sHHAQ;#t9%A)auQDtlJZN~f(4 zvjX#5<7--i2OI7(=&fVI4z8Nf5z>6mM!qs@i0L+m5wHbRCyp-7u#&x#oAKCcV2zO4fEA~@U>XSC+6}!C__=Rkf0{l-El{U@ z#D-uG+ZQLQwC1#OjEqreRgd4OY`wVYLLDu@DTvy4QrhCI?2)v*vRcMwAWcw*oQv5f zn$0{SXN8I6^ZOp%FB{%eHq$hw2g%SvV(ThhOO$6ro|d}L-hMtCLQyF{@ATeXYU__}H(;?<&zPCUrc+Q~&3RBr`aBg& zC#L@y|CwMbkeD@M=~%M~hywF8;RFb>C6%Vnz5Zq9EY5ML=bRguf7X_tq?TkD7nQWs zeyjR*DmztujI^_7+}V?p?tGu0nc4i;UW@m<%DYctv6n4=`{{2D_4>PRLCrs~MfC=M zpW&hW{i`54V$8@yWkN}@uV#V7(yC=dgGmuJG1ZaRH}bf`KnOx)hae$(QAx9|LhQ$d zovICjVz0}whhQ8df6iPJkH5ACJ*#ssHmxv!?jr?~Qk~6G>@bKU7M$UR?r1Yua?F$l zuBI^8Ak_R&v~)GeF{g%63j#iK4ApN_pNUz)@MN)Un1v-(TuJR^_+0$if2d=%uU7B& zKYjh5_SIr#N0xW{1b#Ts-?i6&-z!VS@4d1Jk7t;$8N>xTf4u7{lBPqa`M>OeO zPXvU(X_UawxV{joe0+iE(l7O^S$bw2x6mdUX#&& z!caRWXmee1f8gp|mVJLJJqGu&8bz$YAb{`52m61dy=l+tI@2!rS6b&&AJHws4DV4( zU3Q$r8F4}g;Y$kwI0FzMfD>B%-&+pc!`gd4&wGYjOIitb9NV$W<*KVLH{k|zQ2&r8 z`8U_jZ{9ETjzReGC;XBPhO4$54_|V6@P%R27H`gne?MZ?GADy*Y=$DO-CChkai(=N8gOSP>EGAZcA;rHx_qUonF|hgtbcZg#}m@6_Yq!JOPU^ zN5=hHe?(&MpwcfApC|V-kJC^HPM~Cltj?Z`hhyAVby$5>H z9=yJ=6s;}kBV(lOr7TE~s_L_M>?^C*RQg1~K?hDvT*q@PN(|wSzW2IpPI{t<)AFba zp<6X)c3n=b5+sh|sZ?SX_TKB;&o;S*#2&dH>B!>@eVu>Hl-uPiTzv-Vm z2rb^`XY80wthMoR(&zlPxyilpTdvCfZ)J>Q{NheHe|%)dx6GE~*Owd}wB^(KQ}1Z| ze>$dA)s9;I`&Km#2rK>?LHK0 z{(r+c!SpumV-xz075WvohcYH(#Q(E=n&d_)eys`WFOG@7t`~oa@n#?13x>Ko@h{^} zTmKZ1bIQ{tj;-Zse3hqMxTm9(MKls5fyxUveP$}yNHm5&pas4fq zREH5qEli8(br$xdim^Xl4gF{<#l!YX1AqH+ObQIk3WLZyVm|IlSqVY!U`~j-(dGCH zbtoMd`~o4LFR(BWN-NFwt4tRT%3?`U6dJohg^;K!OL3&HQdcE`L(Y3IA*gPhfvq)nJAtwhvIU$-}o-LucEO!qK@dK4A zq~zU`G5v^YT7USm*oI}22aYk5_QM0 z^0^`DQ@2w&?p896mxAPT>nG8jhnT4C{FZx3y>uryH$AkR)_~uw*nD_9@ zZ=wpSYb^QCWWpad;CD=1BJr(fDiROga{Xi9*w#AZ-cnZ>%68se;<9Cofq!3&J@y@I zzH|rEH1W&$!#B~H*I<%-8T&jzY8v*JCHGXONIsP>-iW7hPO2o0gAZ>+w-f}ca)X^F zsZkFlws>}7Q{51ihZxaOh=>ZboS#72&`d9@n1-kK2+6+JjxA>8Z*kWQK@m%Poo=Yb z39)KpWFq&N)GOILuH~O($A6k3R69L88}1EJ>TqD{0as&95QK@-#VF3Gj1!PsA&_|Y zA?&1H(<`%)#A2x*7^tG(?9Q~ouFoHwWAmp$g26ZebLDQV+>KKs(& zd~Clk>~Hx$=d{%ozU*0E?NH38s`nC_#S+I1lG@43QHxE67TY=G7=HVRO-#fl<5N$BCgNtQPC#YvZH#~KY4wl2?Dx0% z;-dWWtGDxQ&gqZOxSemmvF9Ist=0WM{!pK-$~06=URin;Q`8i_XNgy#;%wBKC%#(7 zI+~7D!Ar!LuZdW-q%9w)w$@;cSV*)YDUGa9VTtfzNK94Qr+*`#=`H6iC5RP1fkK0o zJai}7>-o~7b+S^GH7t%{$LS?K$ueSwY^tXUM$8RCQ0d;*R+(~dd03>)NYEe{)Vik+ zABa~I?|SugQ>?u_l)931@64gDD$f68mOhEsYq_=rW9FMjaBoaF(=& zH?LcxMFF3NLF1^Z5H8}*h63?i5mVYw#OwHBsLqhp_kV>iI5{dwyALqt(+->!{A6&J zPT^R{_~%hH&&e9VdE+}ieWCraH%d67z#QF8nMuXG6z;*l$8>Um1{NG1Fv$z<0WtzUmI&0P#*{w1)BLFIUyzQwK*Z=*wbOs>56KtAL+(GQ-t-4j^~pR=7sMr zhF9`(8ZU#r4vvRVyd<^I@k?U&BtK2dL!xyX1%Gdtp#o|7geuHi-trbt_$6ySPQQF{ za*VEibe}If;v1*%jc020?c|5k`jck17_N|z!A{1O+X#hLkWCMA4}s9mM-t~Mh7Mka zGt7ym*DzOo<`EdKbm>!O%4K~lPFN%=Y3m>B7^=>SaZJdxVJS!-lX4c)R#j-kj3S+n z=zk`JC8-l%Q;%!kXI}Uc0Z|#pHI+C7FuQLYe~&>U^=fQ~9oHwnas2Ii^%s36`sN;P z`DWkkeD~3xamQx;J^p$h|K(fOK9Bnyp~4NRjxzBi8F7GlG4jicjfRABXQ`#a1Ch9{ z;AfPTXWb_=Gss*Q57!!UcD5iGVIENAB!5`DFXscETL^4b<0H3W9+ok|yx8kmyoP#8 zRG;f=T@5gfbu+9cevvd`8gH_!yz3}UZRO}g%>t_K^0z!&bSJ_{B`>Yt^4w>=AdDj< z@en^e1wTHb=8ryFwfnjc2mH6+_C=e}aVmru<07l!^!e{9odlj|vlp*yGJTz>~5 zFbhrKWx6O`&GidJQ^zAHIT%B!|4Htl@Hr2PJ7&W>YMh=Egaj9CX5*=%LbkSX5hBUU zDEz6)DofLL0q7i5HLpSIKyIUIlS~GE|Ju9M?hI&MgpVZHmVa3YU-)-J?Q)Z$2#YJaA^aU{(WLNJvJiha67s(mCn9;UG`0~-ECR&9!lv&y;E=$_ zZ)`I&Zt*!Sj=!WmqP6F) ze*1@<{O@}A2ZQv5VO~0~82!O{v1n~AdT(iNt!Ae#dO~6VXLdm`R)4BNu$;12s%&Xq zE+qdXNeJ&gVQU0%FftLNK1#t?XwvJI2mNk_yh3Kan`pj&db~y<0gAmWX}l&b=S-n}rVD>ls;EyjZ4Z|<4+Fw}o_j>xl0LgfWmbdx`Yy9Px`qPj7H5cA| zr0154M65F{fV-B))bxvp!qCGtF|?po%|@K{RC$_+OWV{=%YRBouL)^m{HJk?Ei;fq zFqZgFk$XPzCC(q^Wask&S&Y1KgxmFMS+hjU8zB} z)wsY~%0dH`5ne6(sAdbl2^Z2gsB`D3r{TQ&ps^7Q3&drdZ;R92Z9%~X3gI))J8%Ly z&xM>nHh*&7_G5{SYrg%1av$3KS|^P^Z41BlwjVvj%7zFUwnVJuTvM@67=<2HYgXBO zwhfk5iXd58X}pKJ4v)`7Q+;k=HdSw1P9&d#ly7aE6eC0xk#cA8(eI@%!bEPM*i`jh zc+3=pAeOaXbzcb@bK3x2+EcJ9`PL8T-D}%tM}Nt;ju!Mc=Zx|dqyJgAq`c_OIsd}_ zy?KXn=hNozJ$Ea&HP!j}m%{LKPC)+UXMXDqf8o)7_sbU^W8$%$uvh zT`&|#uKTG7{FG?OUo(~SqlQPrH-EZ}(U)8djDl!&3i6JP7v&0*mDX~<5UK~v&|mT1 z^nXVKHyBF)62d7Ys&JT^ay$gdoyA`!6j87!O~NaKG-;(nWvJQgB+u?aJhs$2q|{89 z#IO0R$mTFD$+GBTQJme3mr+yr$d56IskDVrd0vp@Rlq# zHO==yVhE5=6N^=j=7Wd1=7ArMFb_{2D}S&qWc*0tK{b@|);e@2^_!bS>YZ&|=~?1p z7#KwQedQ+O9?%u$e~Bx7#V_|of5zy0ukbaOzx>{Bd**YL@3a)HQL&J5^s!NxTI_G> zD&1_FawRu#Dk__xsn(<>G&=6?%MccbY4Oq@rb~kbs9XohqUWL%);d(HdA9V~JAdA6 zCF@&qefp6<^*1lgcU|9p7CBzpxIF%Quh-&DvNpCc`x{2}+g?k>*Z7mRUc)pnwN+4v zqe>3%p^ods0-lsw9j6GW7x=|IVAV?(RisH9#?f8C?s|6_kMuew0f{jzi0uLw^B^A1 zNi4A;$WVD~9r*>N;od-@jajlz6@OC5wM1{vfRgc%BY~ye43v$QF&Wn=kB7YmHDN|p zwQ*zgIlaTNLPEg)6W>}sABCpE$~Y0-Gc7Ka3U`L%X z1jluPv6Q_WrKwq}b$Re=F~n;HBR<@)#$EVX%SW4bM+s`=zF(Dm(9VOrzxS8UaQd9t z)&KB1{XU=Q{3plOw=DVEgM7)67x<-(xt}sdb6bIjKj(RE)3N%HC{0_EgV&F?BOzyoJ2%a>Qd0e zd1;{v3DHte5tsYdxx|?sVJoDo;wkrGR_54$=P!_J2yK4zEVg_!Uve^aeshbxzv07- zHjKwv6eH5W#YHD$G*l`(?Hp;-lW$i9F(|cNIigJfF%`;Gs^MCf-4(95a)GMg2 zvpCuvQ2_ohCru$13)2kI^3mdkyWJO@s3c9TTHG-F;m`WvEv-WjzI@{EcD?&)o^BT` zj*PlmVhIsoiV3`0Vt*m^0#gz@kNSxx{PFF4=_4-w8-DE{_Hh4* zz5FFJKlW{|Pn}G9W2Nss97H&IuJpo{G0qNev1*ZqhVyS+YE2h@5IQ(u+=b~CgL%MM zJXoDzl@-$dq(d~_d!WQ#T08@MX>332u{UoWGlyRoHL@_WX@47Ei4n=nSE&P^ZX7-> zx42-Wm_Aq^ECh;!xgznIZywNw+{p!qdswn16MFFcX@UDk}pF-3_W!P$x$fxVtKcTXp@AXREM9UYc5}Y^CMz?Y}6l zK|u~0f~U$U4|D6RGD4P0wbPH5N?N{VO%FKPP``{Y=!Kb852G1Q97vG+iZ|~#O||O^ z>n`m9p|l4mJo(rIK5_TIc>KQj|9O)CEyh4~zV~0w4}VXo@rmvH8eej){uAH#;U@Re z((6d>c1~lOIZMJTR}-PM)yJ4mPm5J?GyF0Hf7&Utfo@ZvOX)<&&U-AK9@P~?V zmOdfEFn{L;HtwDDHM5CeClws5LbbD`?96u+QD%`|PpxlU>0RJEN12?jOU%FY?e`w^ zAMxOS#I~(kA5%)dc!1k8NTfdc!`u3eKmXfayyYI}zp=gX2Pd)~_~M^p+NPN%hIa%e z)+N8zdMNQ;a~(qU%!z3QJJ-R)!G01W5nnVvS$~i$-e_7m4b&;sYA!JdA&6wD2^sH- zwIYkAgeiZCZ(}Q?hSLrSh7=ODI_U@ z^G-=>_Id|tcSpd{@>mdQ`-h)6h!-T`hmVLYgzS@J4j@Z?%IWg3Dqt(^nJAZEfLM0E!2@DyxzwB)q=a>HV9jE_ZrzKB* z@WzUv7Z~3$Am6g{zt=t(FE#c{wtoAL!r%7VzV>Q4EC1M6y*YUGtlp%7BnGzgTR zbE1g!6(ebfh16$ucC?)EzLs-RjzvhFFHR%L6DqOis)j>u<(h^$UFoeKf0~;9bbo8g zeK_1T>lHhydSV(PriM$d31SAgw1E#DxH(o5k%uR7U#$&Q;&ml&qr_}UKG)RDGM*U? zuW`-SD)@(3?vH%sVM46_J+>L))DeL^^yq5ciFFVfX z8zlL!G2fpH+L32{w({l=eZWz^bntHFJ?ZWJ`LCuKim@}x0y;lrXNH3|C4cF?cknm!q<$o-L>D;UD zHEJrc6jLoG>p9jG*Yuqbw|pdw!tt_Jhl#1faIWe85OGZ*S+&+-&>0-Y2jHwUaulm| zan=2yF5?QBvWu;>m}@e=5=p=F97#RbZQ}y69$O?8X42n4-e(Q}K11U#e2#D5jg;|I z{ChrpV=XgB`#CeG?bjYEc_*>^TUc5^W$(t<30DSln%ua4jv=?xDa=?Wk5jaud%il{>cP6AjD#e5hCkf|b_O zk;clNz95p14@Y&V>%q=m^(j?-$ChWkVFob3xDr!oj4#&opIq& zOnq$`zf=fSZnKN-G^5oT2L*_)@0hw{A5jp0vA)^j|R@|CLK5s`b# zqqbVEW*dj!7Z!HLW`F9OF=)sSihwZcBA)cnBDQoc9aj`#h+`cTPTI9fsBgK!R6SrG z%`f+D&^$DW(+<=AvKKb^HOjHr~;mcSn*g@CgzJzj+y7IF!~d z+3Ex>Z{D{)^=mG>uP_ z14&Pq1iCzXEG^_dI+pwt+mGH3=qH|AWAQ)PU;pB(OaAqGB-3uy|0-7us#U2FmbeQf z#C*z7Dn!ebgMVP-D9OGJ@|Gs#jpmc!gV5~t;L$q6BYi@T@`!_H`I)Co~pdDDcM zNju}t2L~EwL*Hi%T6!>2)i;leS@UAJz(be#)0g2VTYrOv3npTe@?tK>B>;LzN`lr_ z_vV2E88Vo+r??AGh3~%r5S2?+09pd2AQr@c24I?SO;lj6fLTEzw%DMs(r#>xQ8hF| zap+(oXeo{toP^d|+$#Jc_7#Ls-J!c>gOhsVUQcuoQ4@Br$%U*Pf9BIExj74jJRsdL zOwz?c3xDHfAH_$lN>a4ijECs^P&-Py0)Qhx=ze z!^T)}yHQUx7Mb2yl-43YWb=!u4U0juq~->WgJ!)quhX;E>~ae!<*LaI%P%fTa2i{O z>2=%Oj#i)CCPBTjS$NOz(6Z0dM-NZ0_CR(Xw13AsOKw^m?R;}NozLeXsA68iKmEBqFkz15J(l(1lGoMD zH}t|D`(V^%l9{cIXX)}^hJQptOMWz z*8rKMJ}|j41(e`G;=isXXu>Q2Q*;Gram7?nQ>O1^{T~&$%=V-uZ^j{pO8U1grGM=Y zfEP@s@8?N{hVuKwCFr0Jx?R?eN4T%L7N89>w%zezDv;y6fB);BJ_N~44mTl>VgV>i z{FJ{_firH&1542mpaw?&@}L~L>QgQNlV9bA`dVAdX@S+u6%OydI7HlDg3Gx)!fnN+ zN0Wx~+|RcJB435Q<&vy~c+V@XU4Km5d{=0L=fRTydkk&ld445$A&j_s65bxhpq9cr z!MKn#J3h=}b;{hyMKZe8mz-FY6aJzAIbK0j{ z8t4MFlK|r6zAf$ENR0`-S3s%?Bfif10y2@)z)9^bs5VALC7e)W2(t++SR+qFvT8_u zmEFh@6i-0mhI@>KirZ^7j(-AyG&HQ_eyVUeyFt44MOuNWvI+GxreOnP0fm#Z8(o0! z$~xo~AQtJ%b(O}beJ<+@gc||PLujEJ(}v`93lUSC@4R}03h6NkZ6N0H+E_qbrYv)R z3-XM_v;yZCEzNC&NlyKT6=5t-;)>}OLF8AwQd1UTU3l7YO>KhI(0|opWtpyRC_{~> z75kJIVBxgSfoPv|5R0(v=c^S5Sl}ayV^r}9qqaPJf-SY7-Mv1D{VJ!+;!1D(JJWqamDWY3b~(j zR#glF)?3DvPXi{uYiJ1)+}9N#Rn8H1BXh`>xbgv;p(VhN<#W z@+@=H3c;1iIT4YcdtZ)Uq{jBAjBNS-eR&e9;u^C3ejf`s{eA3E8@zVj0m|e!l;4Kw zXx)y(zJFY{VL_EZK@7WKFfD?Kg>Ke=V)1FEsgm{qSaQA+~LTwV=9JsHiIDwCQ@* z)CjfdTiYhuJ6%*2i`#S~Xlhiq>3iEI^qoGfDt}hA=~g51pYQJpbVBx0<2l7+0ZU~| z15M7`%Ek__5c-;zQB3Y(gdHw9P7+_rnBrbxrJ@ZML^s<$t?-)5@j@G^!Dx-%kTeSV zf?}zpOm7tf>@WSeLNsR?(kd9r{&498a7-wVDr?IM zntvKF$o6HnO%TP^y~0jaG0J88Ox)DawCP9NCghzi`!{^2H%*PQHodfMV!YF3|ME87 z%9sU?@WW7kS1)S>ppDSl^_Iys@%Vm5 zuT?;}g1c( zP_Rs^RGTEAEvlstO>gP?EE(U4IWXGlR$H zd**XhjvM7y9xSkw-59u^xkg*JJ{j6u=-9R#tFWSqC7_Aqy7K;3TY#m;A5;}m*A$Tb zN|SvXL~_mg%B!1O(9H1Eu1gOlff*=|8|O2&j{ka|H^ZD_m}WW8TRE#v|Ezc6aJ%?? zd&!oiqtie3Ve>Nv85TLkBY$0vbN>b_)=Qb*GuIJ@X%viW2S_O89}E7LFVK7F@ApZaiR;XI5*nBl;UCT*?-^f*KM0vQXlzM z#{b$JCi{OgSLFPtcvfDd9!^*v6?6g`>uaqJiHMu(?LBJs$A6ypdfTV%`l5>5Lsd#K z(y6kSWy^IZjke$IGi;lHmsO8y0Z426i%XqkV#)nOyAH-uCmg=h9|9H5qf+HMiOhDs z1ju;nty0e>OAX2N_X2&CLxYf_(}WFz(H;ZOaB zRRv|vZpfA6anUpl0Mg}kh~>5Rz7CoO*ys05UVnan=Qrf_@}2*E{m%cse&>H*H_fXx z_chhM(s)&g_FNs={0f{6JJ@osLZl9dczYeT*Qsq&^!Yt&uiwAt@BCa|7w`P-_3@p*y*|D3x7X*k9)O6f z#|F!G5%6W7>V>R_T%TpWYuhgN$@gwQdgl}G_J?i0`@8*Gn{WGW|0kc+J7H=yKXN0H z`k5+j6=}y#Eq}lL96tn;da6O+b5uSvLAcE?_2I>LenskhKlwj%nvw7PyG)j^+okbF*nNEI-YNf}EpjqCzIpu)FFDd{pIn^T}^{FOKI zAGIlT8ZNUjmL^T+L#Dgo`#At#JFcVUaeWWuIWruq*>wnqj#fe7R*ZsgJK7pz@Cp0x zGv`qUxbz>X3_oT3PL-hNygQWVgJ`mdTemwAwtoT1qT88I?~uwE0IbUATmS{G6N!`Q zf6_bebgM7VgS~Tubi28Xw_EpR-g)WCd)Cw zV6vPF^g`BKm-X6mq)6QPR-i_&6WlNfV zSq`8=wtELC1zpH?=Ks^rX|bfrXuxpf{Zsk}sO%3s1c$*y)@vupQ1n=q*OTqq3RKE= zwe7*uNRQfc3vB6Uk1MnJbLSYsJ;x8-}Uw2{oOWgzqa*6vYs(RMLS*@8vRqfHf=u=*^lYp^AEqr$(+k^ zE9Ced$bN6f8GeZRZbIlBZw;=pxId z@N3-4_X$~UEb|ZKbBTOE2KhZ+^8c-TPH*e;_|&Vd&raU!>|M|Icq%{jS;+CYXzSU| z+vBJI{&?@=Z{@zn$Fgld{m6;wTTZ;kVf~Q@p&bAJx_-dh_P70C$n;e5yMJ!s-L9Q1 zw;ea{d~Ll5f$-}&fH6S%9w!EaMBd{m*aE%ed)jeiwC#P#`n>BYz?R>~pz^$xr);;+ zk@J>QvYb{9JY_kFK>0_GiMD(>zTfkq`aM2uf#y=KEPnZyyF=&8zx>aC(hu0W*jg{y zLaq~YDdWhsL{?@ES_zRr324e2 zSJ-mI5FJ>8Fc1^u8Qwzj2`EVAWG;mUVI%rT7h4os9Mxi@6B3_fIm>e# z_3klS7VD^1VzxUbhJSn6>azzjF2|it)b+|w(aJB||MlZ@rxR_{*F>TdeXDEt_~fVG z(OlVsh+&+SEH4CS@(M>4E%J*cD=9n!2p#T=H{}_@;4OC z?(1lmqVxOR4)euEFKTpp|L(9|Z+j|)qr~;s`~5s@`^sOC*MB?xs5!Ya_!tBM4$@CM z{lEU}|8^*Er_KF;|1bC3V}EZh`~UJ^|G)qHP@eDq=V6oV|KHEmDLXd*VQm)s)&Jvc z?&*JPf3EJ$asMCLeRIr`_b+0f@$~*5Lq+*-)h+pt$L;vP?>6_%f5^nHC=ZAJf7@=V z{do9)?39`A@P7}cgGlUFJ11b@vbc74|Cvu0od9?!=!TyX8RL&jy}P3Na?eR0GN$qu zxA}4i2!oax)Zhcf;S0K*om`DJ*8zy|5}MPg^|&2Z&u|~E{kd{hweB<_RxyvDm=1wjK9$%=5ec#d4wOq`QgtGDq7>49HwOYaP%w|zi%#LSXr?)TSXHiyH%Iw z-aN^|TprHGY;Yg4mj)A`^dmipL~)ppdfnUR)R~W-!8KduUDMo}#dtl&gVEGpsLq{E z=Zj%?Kc5`>wMUaBIvqRSy_=~?7UVE=p8e}(`{)P4fKCm!u`$+3%{Npg~yciq-ir&zIw_UuyR??so@9|Aw~P=WxcFX;)vJ z2QO>zn(3=+cf5Q)C&BrapL@=|=zHDM#(4r#e8vsi9T74Mp|bQ zbW>Ocv&&J`x|<~4Y1>RT_3;Rka)T5QtkAVu1H@H`4<>Q-C6@x&wQ-TQ?;ew?+DP6PK*8q1wS6n|Oly39`C?w{)LsOMvMm-dRye3WTIyo!(!LaRW ze>+$HT3gcYMv0~e;wQ~P09Y$y4)(m`YupYne z3;m+mUXf61zn%~5k{z{)$;C7u{iSCg7gnpf78d$ zyqY&yqwm#yi$Hm=9|mG}R08qJPV-(Li?dsuwQP z&3HD?h%VNY_?_|oyclx*QhVn#yP=yU1|iF1bK%(ABpU^K(IoeN@_Z_nP0uoQe?RIR zcLfuHqFR>X@O6Ms4{tKOMLsSTw$h8A(f$^sYFZ^$XBxQ~_cQg1oyjn{tghR~kuozJ zm8E_GKie%5LPu;IK7UXCg`%r}-&K52pzJv9c=4-qpI_9l97oacv5IE*4x1f%L%mMo zO~&U2!GY*QmmAPRcV4rpiLSL<2F%u)e%q&gy!IU@40D zwl9wc2_E>^umSBavh~v~=4`VsXkeu$ux{JSu-1yiIG&orQGd-`Yp^u3)Viylws=S= z(ixAFMANED(Tk> zqopX8`E2Sa_NKJvcOmAevm1Cz|Iyu!reZ_-<4$K%Uk$!GZMXE4Pc}WO+OAFw^Trg~ z=vp2q<(0VVYJbiYI@TXi7N?_l$fNCsok%~_&VF#zd#BB6Ri4dgk`5o0ieHJB&v(L$ zb3D3)9c%NnJ9D+?>hM(KYkw0hF46d;YzNNVRgw-nSlZ#f*wGG{PBz5nnVN)UPpvMT z^^ln_HvMu@>__Tje4JQOHVGEV_HgtqE@EFhE`jY2e1Co1(9WR8s_~5Qp<$5$XxbIk{wnAUgC+tJR=bhC z;K62jUG9P?)ZIrJ#a^ALyK_fOywha5eU#^gsVW2ix|j`&$=YIpz8PFgp&l~H{11oy zVZBnCvVUYvw{Q;e!WJu^LwVRK&Ti8qqAI)~S&Em!_^5loS}DAGjJc=a!opP5kK&>~y^fZ`WsYAo)4^>y z&4+r~i^E=dE9P^5%Sm+K?j|!a*I$nnJ)L*f<$wIjUbArTvr}?rw?i^q>+{R=(t{nc zgYfK*8-5;L*ZbVxtVg@Y(KJ^xyIa4=>#%1i-P-%7$J1zScXxCPjX51}f@j6T-f6C? zMzqt4U4ChsDusE8^vGtFRxG;o|{c!|j{_K!iKDj3_bcx~62 zHngnX$=Iq??bBX}lHB#@CP(HC$Bjda<9~&!@rYk~)EpyA)yp&E^(95P5;xCLk;4zO$$nR(tRtMUdmK+A-CeH2)07Tqvs#T#i*YKsWvlg^`Bwev8@Gi{eRZIg zR%MS+A1{Q{eVpPrO^36Ge)JJam3VaBCVY~P`^V|b?BNA$LdCNn-LSjlhHjK&Z#9b` zzmc=I>e~LZ+!p4H1?g7ARTNH_{eLVwp7@Yj$8tRC;k%AbLr3cj=uJN^ucdX3uf5^I zG#=xAo^L2x84TwNA2yTI>RRioEV~*(#(ecMS{-+!xA45dQ}5KzYvk6K&5)nX z`B|sMBg)v~m5Ez#bh*2mWl=iOvj_`I$&cx*Nmca98de-m_4H=e*HMu@HGeD9&4srQ z?^bQpzMk25u8DQ`OhS|(Wg$j~#|`HS+bf5jWVfdFIX?B&*=}z4@pR*a_w!K=pU&~6 z9?#n;)4YIouft-Btb=wjA+ENI#2D_Qm6&L^{tnY>ho2#GVvi8?q0{^!VC2N=!V0^g&eE}Z_@26+i&Ln6>ZY^9)Xn){VF&0 zLC<+@&%qQ|fpb<0)N6Q?R*%ETj?ULxM|;KB;~~6F%Vl!C_WjNxb$`gCm>1TK4c424 zPy7K4L#3Q9XGt|WDn=gL+N}uItHbUvdd>NIw@<3Y;mq0m>57q_jvQ+EPu=8A$A6fk zzU5={QY=zsx-nN;pAB4{-{&Xd-AOkO@@teZx+c$!r{`PakaU}w>oheMg_Sx(%ht!) zB|4VEu{hQB{l!&J@qh3<9xT;l7r3}{Sv3*sp6Mo93~t4SZhD4#j=Dj`0wa1nr+Y`q zW&y1Z_WGbtdW@7_CZfUWvzVs+!(dbQ=H7L2JbxE>4%y1B#o=~JK`Z0F zyPmIiqxD%?26|^{`|8uu_pdRc1I0Hn4vWFlD`(kr;LhT7VS{yPn!|lPs;*R5@0!w0 z2mDYR$0=3DGZVObaVFYRueOsW>4#J|<3MztULoS?s4{hBQkG$6+1BClJPpEn^ehM3 z={{m(|J)m?M}O70x_Q(cRL|Y=G9T*KLFi?kHF_xFD%zbcr)kH$9`T%;#^e!g9C|pd zcAZrui~?1nd8f49S-wl&zx*uo7wPbiH+{fn|qSfTXTrq`bPJgR6H)%^%bLD zbQ{EHFJBttbyA~QSARD1V47a6gMLQ)XR<+SWa(MG8)U95Ls6buv44*6W9CiD*(KP| z{lla@PThy?UrvjSu@7fe|1#R&=I(2JSkLm^c8_MkN~ydcp(=maH>+7!`ev=-hkNa7 z^Yvr3b1ibXK7$c$6cAOCgqgS4E|;O_#ZS7^Zt$1MI}IMUcsvcXFMoEpT4uI{)MmSL zoZGOgku6bng8`f5yuHS%XK~W4%X4Z=&5^&c*2x^0WqU$@H7?1@=5~kSlKa+7ju8u= z%QQR7G8);NLR(8x_jv5XytK%053T33Pm5}4i?DQjj-AO0%krtuBV(~X zQtU(EW(IptuDW=%D}TjFnl8@?-60DF=1C^^BUS6Y-aod^V^>>P3#{*wSwtjy8`3); zz0K&ocb+~vzoL61B?BXag)xQ!*xqXq+qeUkn>XM(oK8@W1QrtPbJ2%G(ujzk!32p0 zP@+Vk%qr!+-IkC#|9jz94ZCwyS9--1A7&5LdQ}2s=Ikl%NPn&3Ocx1RMvV8^vL0(S z)WG+=VfW;QB94=j3e&D>)_@C6p1t1!;6nq8a3SFhh_YzE0Q}iAKIVT!2GFSd_J`_* z#R2`6NcNc#hltbn2R$G%_ zq(f4d%#aH;nwwurdPu96?Z3%M68Vor&iGW#2*9lf45tW6s|bRt2!_Ckd}Cvtc`5Te z-zzb;Z4m?zl;^g-r*S-E1AMfufF5uE6f(8t(}Q(79#zl0iBuVbiyd`(i?)fZcJu8l zZ5h%aPk%pQuBjN|EGqy+$q_9*lFxz5sd8^PBaK}&p51b3J2Q!NibUz&rIdSM)%<1a zP(S^|LKQri0>uUcjc@fI^GJE!KKdDgRQ4wX5{@n4KFiC+z;X@*$ZxCrDhU z@>d%oO=fq%2S1=RNBxf*;H5j`y+CV!x((!E4th)q6$P0`gUItXHHrlI7Dd1vG?@j& zrhf<}Wc!b0|6eZbKWyv&_@Dpl|Ao2!-z-<>KP=at`VY&szyJRp)c;akE&runhGFP` z9#rw4C!==aL0>}ih@(FiEcUO4{*R|@+=2quaEvCM+|IzlRAH%R!F{fs}4095SJ;N}R z_>b%#|80@@DL6!G<|GsthDZJc){OjjDbAMVS{qXdhRQ#!u<&>ApLu{iEl1|tW6u!t z&%;ZaeLcyyZisgpZeEGR=PXZJe7V-gTDt|V) zGt`9syej{|zQI42HjQMSccSc@{Pmv@{JY8VuNAsVzBRFy=4zBofJxN&9_gAUecJV& zn?iPd9FptHrIeDnw;Q}Mz2pTNok`PwnHfmNq`wT%q(9>^@_@8b^~L}o8m~lpPB2^u z{P7lICTm3`6qd(0b7qBxV{Oh#TYoXmpg^Yy3XBM(sepASK-I~85~T)^Q2~Kv_`v4h z9UMGGjAh`45DX?zJ<*)73ter9H7CS-4$O{I{avC|sVf}P)o0T5N1QpxGlx`w%@^!a zsjnS-1LF_B7p}ve3Pj!So?}Jvq@?*3Ix@A6&*9o(!UlUtYJI1Zh5XFtLMO`T~B-|H#H4AU%UMt zMRqi&paIAq**-slk^!DbfFxXH)^<{ z5#idFbc1Uut9t@jEIn3Zwi&OyPC6iOa3jdS_UFn+;Au`IOtYUv7v3w*{5G$C_M9)tVa zUKF)p@lD+$_`yO#MF&32sqoa!!2LY6g$JPur-k>I)`$p2=oO2xGc$i$3|DZT%3^nu znlJdxPGF2pN>C+$W1em=yJUZW@ii;UfA4lGPDvYBj=D7md_&_qLC_hBr^u(U1bDvo zBNZP10+G{xJ#7~*0v9fjkS8}P-CR-@W+IbIC^gUQwDd6WP_^o-P+T9dKw)jyxj;LS zvz-`U@M@2GuWY#=spx-hQkbPaOtGAZzYzlR>4D;~9WiLiW* z&j~H{4JxbCe&m|cofW$aZS8EkRNZ2a_=~~OYP*eD7JDvA5jlSjifL^%0V_%q1J%fr zm358_?HKpor{U6wn}0*UWY8gs7U=Qf`2aq|YW|K|o-`CyACn-a&L~ns!DY=8K)sL1 zt2?m*BmFrnPsyA`r4B`1O6U{*SjrWb*c$$ZCZ~Z&3UXRpi3C_4D^g)O^LA0p_az89 zCV01RxvG^4ef@u>Ru2^cQb+Fhkh`0HeeGzHX^12mWa#Vns~wPQDt0jM2s=hfKx+WTbrGhiIBr$~J z=xbKn70A^YK=n_i6=es4jzb+0Qqvs22H(N(XiL9IPAC~1HVg4NALw87iCVjB`EJ)F zCj($EiBG4V{19o^yr$X{E1u2NcvHiiAnJ9;$PCG>k@pv+wZJt=_% z8KLlSWSD<%y5ypJi+>Rf;Fpb|0ImW2NqdwjN8l8SAUP*Q^eeGoDK%p7uavGj-ll~1 zKEZmosXLmQ2#myraC5s^BIvfVW?iUUzJyVm0t^t5^HB6=%xZc!*{f4XvrF+6D7;|p z!Jg(=$JEVq+5}!ARV2n7Y?1IbEydpq62Q-G-co zIE89H!gF0*eDMyn`(a7&QPu`NLD9Kz!qo-WJF8yW#MEoUI7s>Xa>>EW?mg@MReIW1 z&_#co`6!4WN|JK0#Od_DqfU?iI@5g3SFC%-?qf`DmDLYr8vcXi+qTHZk@&Q(bfGxa zw|(n+;V9LMO-LnA6lleX+E+T$VgWzPHKLPOUuW!)Cqu?D?KksmV95^>%p;UyATtWi zwk}9>lL30@DuuT z)hRr-&E-|Rl))LL8zsI3FwOgTqH58VLvb>{s+m{~<|hfu`t^?{h&(kqSKhWCG3 zDqev;Mt+Vw5x83K<{(bNM+Rm3PQ%4I4Lp~{2R8Qs#;cu%C0tk*w0-b4fGqQzKf)lo z^>HlK>fU#HHee=}t`=n)JM1Lt+CvzU*T@73D)c2esx}^BtSnFuxfhBW09f?vw!&_c zj9Y5r1jf~=JHx%VSc?S5J(9DgK&gL6a+!7C4g-Y%6QZ`{9SCI-9x*)(Ie5+;tMDsv z--zm7BuN=!|MbN)n1{DQl!-)fO(I7$tqL!2E7IdND2elIq*??<4J)EWF1Ar4M(mSP z&)#Mmkq0pbMZ;||34CwG1p81lf`ndV8z6N6oWey3E z(PJYZOVC~CS)#6upWnmT_;7z>#fqs{ccM}ITy6KQmUH=)qow_Q*Qc)K8=mnd$+>zZ zp+|cp@sga@KjJG;Qn>QJB9{RMQ$&qfTC%Q+wKoRGV{OL4f0Q$yK|QU_3I4d20&m@A z+Q%oQeLmXAx3N#pbTU-$RxEd;4$1l>x8;xf^fvP@R&HDb1k)g;P?9DIOB{|d`%b)nz9D}JeI^pD46eC@;36(Dw5lM_7uq(43N zD4jvuLtJUD-5{nXX90yzQ(0~*u9hdI1(2S6?3milLlStNCNqC&;Is_0IZGACH6vBZ z{Jwpi3OKhEaw5u$QBVWrjR!MDt9t;eg3A>ylcziG&T4hbxSLn*pVOwv=a>%3x0Yny zNf~5-=NjDsi%*!{yZ)I*XZka-wrgoU=GPVU4y8rO&p?kW7l{uE~)nXIaAE4$T%hfixpxfQ9B1e|5s z;Z}RKY3F8jEUa}*9=1?)NNisz4D&<7$4J(8yXVwUi38Iv2fh4(Mui;`&F6A6sPl_5 zCl(BJkFT2DCg1Mm0vI7e9lN1pcAA9<1b#_? zj$}ucp#=bFB?fm_ZZaEeXrM|mlvDbJ{37ZH-{pUPQZp`Ng!2n4(=qp%dKw(*(+;^-yHWpD= z-zg?YZ!`xeg|ysk%BHnOQZP-)Ww_3<{TJZhw}L9zof$IBuA%n#0LKrQewrQzw$MyM z0J(pQ!fj%A?t+){T zgF>yopEx>~ZJG9HHlA@|0?UsERvtk=ehjHy-zdpW_)b_oL-ym zS98u3jMO(XmrwW^;5w798fbcCNgM?n8>}_GsSjbw=3M$Kdh+oB71Hpf$|ZTk(;$Ch zXQAA45g;7Eny22Idm>-^Q-2uuvxugJtBY6{svbe!JQW9u?H=%RuaT%{HTkk;x{ea4 z6$hpTJK+RY2f-Vdi{Q@K@&Y}1GZGRr+(#~_Y4-okPTnImdEdlFr%9`NmF6zGUKLF-`9jF z3A$?s$MT+31a?O`9vb3(qE|{V-Vkb8e9$~%!Qxf}f=)H?pJw~io#pB0ip&gA-M(i5 zx1$cHDoo@IXh{PHZRs;}={9zXWv@QTJN}VJ4;*hGB{H?C#Yqes_w#>VJG!?dZ}mm= zF_J*VR0(uXcpMAD4+2#ji&>*87ogvw2~}M<+8KmohFP)G0pNALrN&6}7~n&A2CPGK z)Yy~3yMG^NcgL>@B>-vOLq;Kt|3O0dXt0vAeltcyQRi8G=ygz~{MM=^sGQ?HGpc_= z$?NQE@n8$Yx$%F+tDkhIWY*?CpTG zUs|!t(XiZrZQLB4vupKWcip)}GZ9#Oh$<_iX&jbIx0!{u4u5}#y`Pg%R}~aTT;D<9 zOcbp@B6ksrQ@Ub)n}J)WpAs3I3ia39?j5j#8xCC` zEu6xCq{XaQHc>0I=#oHXXY8!l6`=3 z2Y2G=3d7N1A!L90&?`u-WJ|36BBA(;9|tJ{F#&&yn)kXO$D=`876FQ%>>?iKDp0qk zhTQb@p~Z=U?5TN8U!v78d=&B0s@O}hLZ!U2&T;dnQRS3lrWUkqp9_2ppt5n)Dn6;c z5`jN8$W++8cXrAVQL4hfks5f}V1zVzJ826%+c1H^-?x9B-*C7z(nGN-mOkTHot2E@ z4Bj^MBx!Mbl5^C2Y6Z-W`vf^JUWyjJrCGueAu%&hj;NiCZ0;Sl1-XK7D~*ReZrZ2E zczZeUnNdtNwkf|eM3I<(3-Nmtd%Gw3jJfOjvrYDMvo#FYd%;Zy#pXWg64{s}wVZHgR1sb>z0_=Td~ zETQbW?)W=B+deQP;xm{|SaS9ykY+e@2%k}HQKXDK*2#AX8Y*Qksu0uvs-tpMyqJCs zuK}YE{$5|zAs4*J{xh?7Xcd83KHY$vJ!JPiOmb#?}qdF3!oL z;Ug#o=lsLom;BmiAl5?{!pJis&yeU#$fVLcEv?}Quui6>@8{4L09kp6&KK|am#`sd z5P^Spo{7<8oAy#ocwb*9$uFXtZM%8TFxhS&@GTLp%S3SEMW1MYD)YWwc z4AS(Aiw`WiL>m*;v);DpK-w|_jF-dt?{$3b?z?;M&K{m+QYWa!ii zSNDS&lRoJ;)Y$dO_>2#`Eqx;vtb8%T;~sy2&S2?Kf8-~B8Xz#I6Mq*&cds9%fy}G4=%_3o{PqzzafjE;uhdQJ5PNjyv7*DxVY5yk=mepG z@rCsXTW-E|j`DT=T2>&idROY=ZLY+$N-GH;=q)P^-V3rg3W{*H7&9#qDC~dCU zM@R2(7Q22y32FWKsxs0e^~QTr>rpDDS$53j;rUcSyBG8*=5|Rwm{B)MW-g4VF4?d< za`4NM4@#r}K}Ieo`h}u3Zwr5gpZ_Rw`V@KT#r2YxEceEvpRJsm2D;*^b3g7~R26tnYk6^AT{&d=iX7F`GuH z--|+^#Q;G1XIYJCriMz<)ZQq~UxLyyx+(3KavSjz*FgG&I!EQ`a^io7ROaXV<;m6N zKqk0nFz-*=fc(>?0aI#pV^6As66MBxW1HPATF(nB#vTO*?p|#O|?@w1(a= za1slBIHC3vQy2>K&|yX2#YXWHPnOjhao1H_GYBcPaSj^OBqVoo9_s;+`I#l!Axl%= zslp5Be$_ATr{s`_eHS|HXHgYh+K;k-9A~UpJAR^z_br~9*DN`Xih|F4e|fL0WsXE( zUxZf>0M19D2DpFltSLKahf)y0?{h3L9yW{gj~~Qd;~TQ%Qsfl+?b9oMhedTR?8`%c z8&E{6?{borke@QLt#{DrnfV+Y#PizE1uWtsBF z4;)~54dO#L1pZ00DdYlaGF2O12tO z&MkOEO?SU{51aOE1LlAgQrpls({)6A3L1V;O+4D!snm70Gsn$Qm%>En%>CbWYIfE95-#KKcdN=$bafKGziCz^0p z5fzD&M+=;uoNb9zdt(AsCg@|q&oHMnzCQkFq>TJLIYgc*+J#~b{&r|pQ^AJ;xoZth zMGwu~>588jH5oIO3sCL-t!1nc0n-{4KQUozJpniKy;E=43@!4G zh`^59$U7}dc&%Ckmsu4l2vt#*;-$sSg>A)i9^zoQr`8RBNxb`yelVki_Z!0p`;=&3 z+pNWbakZn3EotAY=&&l4=c9_s`21&T_VdG3e?F%*956L9M*fmyEr$>$EZl#H7S%_@ zT2hf7>s*MzTn_zIl-5Z0^HVL%Q?U7cu@sCK_gw_?AO`uT0w+tn{&-g2-vrUyd!qX1 zhTJ}_F-&$tE>HvE65O`!`_y}k@6yf@@gKFF&IF=IE8GBBJHk~=HE&zoo%Ih-?@Xzp zYOlXlH`pYkj}UKdRrB~QNFsj?(hBc$;;VtvKLf`rpik_>Yp}fYd-<1g$@3C~m)v(y zIrbHvMqpZ>b`eu6*&lBmB-ldU66q{^5$q|H-GDSX19k-1B&fw8A<@b2CumMww(z-6 zPeSb+ruq5KZ20zU*?~I9c_`icXODsF8t!a(h4w9$|3C=GfF_hZa>;-99TTXW(uoE~ z>DhL}9W60xVJKlP6Fw|KZ%9y^RteO=Nx~X4wu(ZwGo(8>bB2jSzVPZ_%`_a_$ z`EUk)dOkoP7v5SuQdobB16et=K6fw@>Pxe+Tp?CUW=@OKr9%PxWp9(_K50p+gnrAN z?NUDY!q&+sg;F?Lzo~dUt;kjll2#mQ6vTUVJSDbA5+{Lk8k``c!I*=o+BSIV7-J_E z5=Ej(_6NK`2GS-|>TM(*K}14D(;2Yrq_Ln?D6S?zxG~?GKj44SkMCq5wK)eDmVI(G z?mXK=T^w8NLBUsA8hJNk-5Y>TFvn0)GgMFlCNtJwpIC=Whm`l|w+-+OaG!uz6)PoW z`3*xEU5$z`2JI^|oT&o>=EcFNKl`3~K;EFYp33z^{uOP7ta;gh@P^#NH@IM>i5 z2SQ37TeDQVN2A-Ou$K8jV|tl+ogH(7aiMOEd28Af)HiJRywKR%Mc7^%E&EqM)3h>6 zk}c7SfBBlbv>iCjMp>3qKum(d=l0%+O*x7&7go@%T@##8rV6;y5!A?i?Id14>^bRI zPX@_deu009L5QBjaR%!+1U_IJf!NCYfqw)U)Ye4gc4rdJk-+FIeU zTuZm4@!whcFRA1k=KkoEK%Hlc(}#aZWob?0J*0ng=tj_zmN~~)OG`hhPNE)=l$fT2 zQpixKt$qhDIK`kZ!wKndkCm~3xvPqPFpd$f*^;^dh~SA!UrYOBJ+e#9nw>&pRMce( z^;U_wM70MXOw(x z1_^^4$Cg0Pnv6WZ-<-zf!o>1XnERaE>J+wBsk}TMDPQ)A_?xnCoGwkE9Esm zn4y7E`p%^%j*TEEA3kt6nu`~9gV;l3( z&-3uG!`$r5D*P*e-ZDf<5xl=x=I?)JH!$=7Ue);vf8F5oQ^f>)KXxnfZ}9+c&jud` zjU@JI>i0oWwgmH9M&()KN7e3>hryuO_8(D;$CeWm@WA3FvGW1H6}A6}Q(4@nrmVwO z+%&WSyEbPt0k9p8mh3!$P4-@%V2(iRFIWs0lLy`t(i-@!9d>J-nc@&>yO4imNxf0F zBJJw>_IrHjApmwiWE%PLrrJ0kkn5u}B&!vrd{tkFVOwyDkRY9z=}P7!8};0VQabhp zs+l_izoO{uaGnqwbMVF->s8{N540y~h>5_6diQO$wjI%X8G$qgI6o`x_-kUFauFm2 z>^Zp%eOB`dpP5!l*T8zi|oSOFF8H!T}O0=6B|X0ml4b&Vfa zcBE*KlKnCt~MoF7;EG7&m;iCkYGs`y35Rw|% z%TV8S`$Z~2W2je5d((GBQI-9Y_xmUt>rCbqkcaT%XsV~{%^k1y1|@$Q-|RxJGXk+j zKy2^#4P}AQ6F$lScF`C-B~+}QI?o|q@Pig34L7^tF0TM>9Ul~nPX~{&6`pH-mNiNw zrJHOD5;>_z>MMtRHfZ4!^E}~vD0T8_T|UjM=^=Trfcs3|IuvmMA6v6{ZDJaq2CNI6$_Z@3@8(#cl5sn>r6U&0>CJSPZxKnbC9v|`6g zTERi=h8j9g_em2X8-MtfK0}Wd(48 z%bk3h^1qshQ}iBx@AP9uWT8+-k|8pYOdo@PGqHbBP$>7`BtaOV2B%T=^KbW)aTBS6 z?5rBylL^a-eeQUYe>@mu32Q(R>n?nX#jWOHL!HSx5C~w;Rc@^D;X!Ygy4>dSSvsMu zOmY$+CFn_?37vTyeh)B=nsF_8p!`;xVH-qS0?WG8n)}RPPByKq>l?kGj8;NLu$jHG z@v(o?HAj({mF}eK$I8%kpb^7zSk5NRUDJqCF|krMx{(Lmdj-3@n;y%EU^sF{z1CV{ zA6>&t^AOI;51b*#sfXw{Zy0q3RT|Q|u}7Uh4!1(Rf_8-9y34GWp+>x%v-Hp<gl{6}XTK^)l^IiCnPr{G`OLpx3LUQL2CW zI^UhVg3XjwVHv*0n0omY)~*P~VWNoeu@z=;6e}?EJlwp{u6!bmx?VYogDP47x3q9X z^)5HksnceNu3YhRNMQzzd^YkD5uziRlnhyUA}RZv>5K%rQimY0-cR7{8h7xt??zv9 z_`}n=8|c$OSR87hlfE&yD=ET?788FoMP_F6DcgI0Y^Z!vIPCu#HnWcS^zGNPlhsb0{}7cM|4uW1xQVd6>h z4;T20*8{@&{+4AyaU77y!^5aPPuR1avah)rP@Z0%aKDi0WEYX1SM4D#HP(NIb>$xO z;>y*koU5||{ch|R_z&B^-hk)7_o)^#_c%vnh_m0EYb$W94qZmYN`NJTl4z`mSI472 ztW@Y!6aDZW)yr2kf^_g;KRqDpL_mQ9_U|N{fmoi<=?0#!j&$4ocuwkD3N`DtX^IJsNWmwvTan7(U&l!*yzgb~(~ErxyGt%R+xK4KCKj>jEH) zJbtQ1DufA?P+El8bL$YV5bi};`-k`Uw#dLhEEu`E`}o34Z!(dbKVN{+V8e!S2a0Ki zm}rxC1VA?O`bXf@^ket&JqZ(^T0~reL|*f?X*d&uRUd+$yfk!?ZdJ~|ULBC=zK7J0 z>Lk03!you1>E$+c!}5RZ)H969^DnhNOpEr8po*ci9CCo?<aYNM}Ep7Iv6(Ns*>rjvn`a@>?@p-Zq+2UvH5?|I7a1?4nRf~!25?F z$rhzXFFc(&7D5hf-yOkgKLtoWb`0|etwU(d-lfGmE+06lslOhe6lZzjCtrKj1LT8ZAZF2paTr=;O!OCOPf2*5Ye$6MM^MJIug|wHtNxb<;Iig5;j+vI3=%wsmt@~$oRT>+l*-Pm&~krdg?j5NcFqK6aaMTaBRK4Z z7Ui@ZN6ZVD1)q>A&6k^#XTx}Qnlv96b;LeRYE3m*kbelOuZ+N(;X*K-R z9dYXF&2J8LIIe?y~^cpwh^2xx!txyRzZDn zC?dFQp~azG7z0A(tedUmH3Qw~GX6JZZ_=%*(sT>|AR8i}V&gV4A%Ub$N}(sHNu{8u z6u*D|8{k^|?DIa)d&m76XN+~MiHw*r=ZxIsQ1(Lm=*CmP<=TOcB)!(xS`qBe>e4$_ z>O?fIJG8HPUu>vRv-K^S$aXwm>9!VjZ+rKAgEZJas?8;}r)~Tyf~RbsJ1sF+yZ5Ml zKJROooTi4}*PrdHzC2G}o%m{RezLc6?k0cW53iN1vwIkyJw}V+u4zBT&^`@INZ>(+ zxxE+moPN~L&r2}q^7u}64G5&`R&I~tasB=@`A$oQ6DHyD^JF4<+ydvq-}@{Q&7^rD z$(^>^?%1`5x3l*&L9IQ~4UBFnFDQmu&|yFcp_41Cj!@#by6md>VHqCTZx5mRGEoiC|Q*oW?W|0(@x`@FlyWZk0TbGoKJ_^% zo{Hq8rFWs*IcH(awwKkjYO8-@{duMu&DoW0L2c80pkCCM>}DRdxB0qR1LqH`_viNB zvZ>-`u5j&HF5RR&eg|1yy(cvtKjGt3-;bVW@(vr&a#Y!4-q)dIS*CqJ*=F~;pC;1? zP3Dk@7T)dNT{nvx8Ky>UTd+F-h_j0?;pl!O)z$>k(Bd-KYjEQaML6i&9bsB8I1huH9aJmBF=yM)<7QO-a2Q%R zPREsf3ktd|7)pN(hkZ5L))3cxa>~Xy}ClyNf7UkgW4CM-x%eg!cSM>vW`;ALB#J+mPq!&&ffi5E`^HXUGuC97QfZ1 zOD?Zshy$Mo%x-fu?DG>mV~BTo?6QqfykXuZM}?1caOr={aM){z3)aISRwP*GgWO0d zUqABvM)|Ux80DJDr$=e7Z;Vv5Qh9#_LBJU~y{%#@*<_y!;$}N-#0HFmyr&n)6Z;g= zVLo6W9Ay2u)6X5L1E21-19iBl?%Q_}ZM*n+#Jf6yq0y^L@C8cN7HL z^(sG^+GT%)0^gvzH{A*fe4}^wbXRlN?qMbGKH4{8c5Jfe*$*P)ogS_1BxGu6)|oR_ zN!!JHqe}(FdW7b^!8c(e)m!vf*=;WfO&xA756NGpauxXd#~3{LOmLz|<3hdMeBi1X zB}rT54Ig7WGT45-HjdEq!s{hGD50WWCuY}8t$%-2#HU@e&wWMw5Bt96Xr=7;S2JCO z9WPIT!fv*p&|hv9G-`N$_9w!5_9JojP*5gT3hN`-)*t7h*N{D*!?yKxd$)3%*j$Ah zQCfMA>0|7bhfS~2ZMf!Ji?D9WywtP{LbjJ>W}qNAy1Xs~NV-=^^L|Zn-74>*=mIjb zS|fk0U*bi`twVsjTOZa2FKo;EwoJV9mJ6!!66poEe;tgL-F6yy^v?U5#V_XUu7+t8 z&SJW+_;<@NgSnrwjC#vHMB>^v zK3k73tJ50sP3;=mDZ1`Qwm2Ta0FG`jpk%)aWfv5mHie?89IqoY>@hWZ_x={-i7CC! zcF!b-pzsCxaKiWJhhrt{@^lW0{WfFn)7Il5UZ)gCr|}TWyZG3>k1Zg2$*m6j@qK^c zwIN|hT(Y}$SU$EaCtr#u^fu%&-((O5^HFX0=-xk9#wpoXB6BZC@8x&fF!H=3FMPQC z(+DTJb_9LjgyAHvrd$xUmc};L{`_` zxSg!qbH`F~54!`}#O)b%hooZirg|A5b#pkOX*)D8v)5K^?aR{#4fk`_9kYM!N70){ zG|7(i3HbJv>%5>PQ7VUPClH5SWO^0f7E1O!#rN$AaiduzrLO4AYH*9qt8n|Ld;ymz zg-4c22H~*ieuv!j(9n*E?3(hpGX{z zkun~IRjxx<_i~@lu$wMXoo#cU0EY4|$7+FuH7VmS=wt5U7|R&SySd zUHm5^@$-`RnBFA~#7)MT9F)wmbNg^uxF zyQ!g0xzK1E@gy3`B?(P$ghLLzOkf|Jxze{CXtK*Ccu!?J3G0dBsZ|?VyMG=av){QE zsO+@-Xwva^;MG`8Hn)HG?vvjh1N+|O=%y7`{5c1_6BupdYCEHE;s>?+?fyAguGGxN zL$pds-{#rvp!QhXm{#`=*9BmB^?V!$+Pw9Q9Jy!;@^UBGX?n(_PfB! z^vCXUgjwjpnz3*Ov0V4W6}5fwdFySM?rD#8rZ_aD`J7SmPz`@4mAW8yAMt^RCx@=v zjljV7)u!1b+s%4s$Idok4}3q_B=!2;8lE2V^U z;!7uS(jiuPrf7dH`|7E;mt=CU;>tV5ZepL8$X+R)KkY6Z?~>DF+&Zq>CN{&CCvTE= zj8pV}bp9OrYgX<|CKnP*wff{(f9RG^3X$n{YZE!%i!;-;lzXr2tl7saFWe8?$zU{d z1^1wV!=;n-sq0^NC-{IXx86)c#*NWsrEcrHFkIiax;uX|NBc?wiFuQQvX}RJ_PNf3 zgii7oza@m05{oJ{oUdQkH{3>jFlKWSBK%c&4H%xKr_vy)U?`2El-PFjy<|=v$ z!d1*n{g!`ud8zWatRBJ(o$^}j0*B4+c;`ruQSZZv;%(K)=eLH8I~Pq8yS7F~jALkQ!00hwE;X6jkks#M^@4$sMd)|L<$5YPMB0EK=hErZV_u8I!pNZ#m zm;0>iSSqW>cvsAqGrpe9`IbNEt9{YHSGKKotYzjg& zg{e3%IB02eQ^cg#p26J^&oAv@Hfn!SY;-4XBk|VgSN`FS4^#h;XX1TgL+(4K*xZZ@ zddK44HiqtV8dv)zP|Mx>5CFhC_2_VwrfCC!AnpF^{Q#vkdlhaFcH%^S?$1Ymo>I$` zfqAKfIYG$3?jMI2yH8#$pFd1C4#QGSBO`pd#_@H(Z6>LzZoBP9bs7_-BHw=ulhGOl z>#wTf&`PP-n62=({IpiP@$R##^x26dLZ*6U*83|%_JLV$E>@fF&&@Vv62(0|uGb(D&6y1 zjE9SAteUNB#iK2v)4+?c5D?qo)S3lue)Nh2jzKu zsWpc-=l0E*SFh5ptLH5OE08{(&()`WRyDJ^PFcZUUC;hJUOdZwUJZZydTo?DO$B$j zSN5uI-GlpSot@LJZlA`##AG@pdp%6^rdPEQ>~9>u?gJn(G>ITR-Ha0vB*2(Xt|UUQNO&|o0AUj= z7H^owuoYLP+8Vc;Dbaskm-;c;q@S;a$t71O#o_$tVNntueX2)ARu=f137U9(QEMwHzWAP>j_a{qJ+bE%J~oE|T) zmnr4!;~ic?SwDeWRz9uxqB94^81{tauE+hTvz2x^uMXqW=~X5P?3!D#)TlIVze--A ziOsQ>?tnph4$OaOweJIvay!gi7%sA&hX=oQe66?pMj-q3JAN?Pdc{QRlX0wQR3|#X zL3GDu4>1MA#UU)bLEz|hBX+{YDwj;F#3M*ZW2Rt1vxe~_^%UWtHefm&iAykAox)_IdoPNy9 z^TMtQb!C4SgUhdrH>RT1x)jG}C^HD8L@PNWLB7);SQ8yjGq1;|F--e5ejbFgw3A5E#@kZ<(7bd- zRvdF{)G#{JP$`{zs@U*$#&>sD@s~d6x2H;5(fnd4W@9mRv@J6Ph`-@^+#FK^Gbsv>df0X4X1$J(pOv?!H9J9DZGE374O_asg9@TqasE2+Fi=6<3Vyz+qf&M zDz)>gw4ai$&AwBKFW$;c=~%kI1o#exA|0ly9(fcsMR#rFyyqkd*LO)@T*Yba zI(h8p=BmC6x2rhZMYuNdMz7BbxavW>H+%9ro!;Bl^M>9x_m8#cmhrlA%54M?%ePuL z-dab9jkVXaKI3!E%dZkct%S^Jw^~u9p}<~$B-uXtrN%$63Ey6{ZM3dO?b#eSr^rs$ zD;&FUAAD%JVmF1G3DwjHZG0TSl`+QZnv>n_?(}W+%pz6m_hoyuRL|h-OY5GrDDPt? z2#S?)S{HBemYUR9Qzr*x|2W&3-Goh-v?gc}}5px|Vqjk{tZ}Izo z&YeW}9%uFLdJ;fjt=h-%T5Sa*0REJ*%Htxv`Xj0Mhp0rS*v}+Gsiw{Th;VV)8wZcX z=T{K~=T}x|j3#Tlt#~~v=MvL<<#+-2$S$6F>pef$+ByesufW@4bPW&c#v!JwWIJyY z-4#=Htaj;VuSuNk#&*B4P0^Fz+3t>i^O`Si+hnJi`jz$1_o}rfxlZ^OOw6H-3?~&t z?}aAspt;OWM$b<6?%e6^e>Uu>)jYF5Z{D7}-)>wx+{wE=`RrgeB;!*EpI)U)(Ro73 z;dwRhnW%10qFZXu&@L~}b~WzXJZ*Q=J5S}K8x^v+PD0Vo0^`*Tn=GE(2Y8=WMih*DGM?hk0h=ELlOk#T7_oDSnAyI$p& z!V?J_-GkZjuO?y|ZTM<6-JT9^|tyxn$k)G-(_(&l!52k_k)`%`Px zNx??ip^e7qrur>>bKcGIpT5&_Ey0K3dc>l{vcYK+U+nN548y=E`{QFL`nq=*Gjdzo zPfg+N^CeNXdVMUV(e@3p-52OyuiNZxQ03nCU}E`A8V}K#KVxD2X)c+=IF@?%P5b@; z>U0n#nPt=J9UaV%AxOu6=SnKMH6z_i9$GsA1cw#o_4=CBxAEQTpJ^}b&(d}KWXvRg zJhNNa9wx8C6jm{ky(%S>@oqI{E2{cXow;^K(*mosAiw6d*T} zva$>iaVgei8_GAqKd!<}b3L;g#}NxIIUODOkxS?C4-D=AoFa?!GnZ(DocUB)CWXWc|Ua#tv1z#WzTKg*5RE-eSei zp@EOUSJ0GE3f?YcQExh6O0m+@UFPrH+4IZ zJH^)Se32#Ib!;W{-{SS6mu&uShsh9ccP~3|E>p=X=6Yc7>yxyq*PN>H_q&Y9dB00< znT#rLv+h5Cn-B%#L5;Xp|B#PUdNi!pFWC|C9z%TkDIk}HpesG;iHq> zi?Ki3TQw=9)Mio+zO|!p$G248_s?#XegeObcLMJ{MRHMRk6}l!*VB+2d2%w zw`9q`B33Y<>$N4J*klp7P*LQq>g#{h&-gCm%sq1%%URrZ-80_WDL32A0R$+IUD-8{ zNb|gZx>dqzdm*8$ML`();YJFWSL$xg4kt)tv`Vxwr>uV&I@_hu^x8uvyC=i>XmOe2 zkB86Q8-SolUqdEh(==i?0;gQkoAyYeO@4~bpO}4Bds}L@?|rd*A7A|Ce&4U0=hp8} zFc7`SQ^GM1^?Gy5H>+0|^ON615K7F+7w*b`MLr!~!=su$%r?J%gyY+{J_cPkm|v^y zP2L$jqoC9Clra6_sdwlyAo)R$JopfJ$OZDcDsjEH%<}qFz>#@iYMGXN{g`5b&qZ;% z6wzaP_Jqq4#!MUO&^ZQzvVPoGUL>ZmqqORM5Bu}!K%n2RsQ5U0tmcKs`&lG{7(Ud0 z`ym=ZWx&wJbI@al6sB2DdBw?rMye$%{*$y);LktLy3Ozrgic08;;d z{?EV9boh@W9bT_LCpvr`8(=R6SP!YcF+XM%Y2GdXKv|A00I{;1(6F4y001o6`9X6i z&v!zhOfUN$fQ(%ZKX@c}cuynzULSjZ02FVw+idCgM25v9f;)HlGry;5eaoB8xSUvE z_W;NT_N0V)qL@eWq5vq%i44HHEhjK6Coat2A1)^*xJ`O=`2)&qy$2vpmJ<{BR5_Hf zPfBHj08qr`;<(##;=*#m!hGsPvb-O@<7iVKcd{T;0J6jUpgU&DVLDqkhS_(20I2!= zez-o)CoI6_LVrIi+HnKN13tK6mKjNt9U#|5a2i|sSZIfZ|YJ|0Adjk1F^Cl9z)ZBfW`zO z40?3*@A=##jVCexix6%WoXwPzIR|L1u$F zo;jJU$((=V_xV0nWsuFk|87db&GHFn{a%9@mzd7?AfG=YF0}Ie^lEpIC=wwgzBa@s zn42b``QxOT8tf^8lrbSOQ$SCfViX{t7(Nhitl$7GmlvU%B|2=NUp5EFXB3pr*T;W& za^IPtuM|MY3Q%mQIZyz-h=8K`F$i+t0BQ|--@l9ra4N;3IPkvWHgk17)+2@EoL@2l z@M&2VL8Croqnrl;q?szi_$Zg=_`R;|rj2ol$9c`N(JHg~an|z5e7(`~o&x{GBIc~p z9Dm}dz&<<5dA#I+>=G5yScQdIXv=?RC5_^t6QV&X7R^U>=?1~(4U6gqX^aRl6*1@Q z$Ztkvz6FyABrymkL=L_s@vh2b`n=BI9!G#;>&OH|Rs%sUkctNyViG_wn6eCnB9W@X z1XP5ya{|FKA%dyr08nvprPUQ@iXcs-erqs+qPk727X4A_vF1wMTP^s#GXx3g zD+TJ=IRG6CD9#ib%b@SAsL#VN4=@U{eJJ%CL+Yq0XJ_mRGfqEdoF0Lm0EqyX0{rp? zR)q^r`&K#Q^jsgybd7;2&C&;2Wky60aySi-5_W)&RQeBX%k_sY3>nozAx7`eQROqUn=~r z|4+`uUwrlB0{_Gh_K6{ZK*AIuNCH^`S*>mJ9R!x@5pxo$DT+n*5Gg_-WDZ7;nxLh! z2s#9T!2+70fna|GNQNXzBnWVRykR-unaUFh(#A|yw3Bbj_y8z^peJjYnDYTq5)eP@ zU9V!$HgjLz0>l9Ed6w-GGz!4dvhLsh{F}$B{O=rC*7}QobKl7B3pR{c`RSEMYnrb) zV6-v5fw`7^vb;tsvOE6m@i383sFh>NLj_iX1o#FI*oA*CeEZDyB~jd9-_)d`vn+$W zeMqtEQ{9?-CvBl24ZhkBnpBur1i{gEfgDn`ccW0TN$lxkTxo=AN`VV)%uEDj5Xu@5 zVFHXI%OGi7$@gV*?5IMkmj<%ja8ZgVy_r|GbSH4fwEu{1EJGK!4vZZ;5S9t5zTihS z^9BZ>3W$G32(5xIK#8C7mnSSe;>%&?wGV!K#$77|<`0IJ@hF$(Pi%?u&6%aH{2$y= z{^B1$F-^F+CM41*qT4;l*xzCCG7;d{Mu{V7ic_r!phO!RR6{KGiAJPq>mcDW<)qZz zK9&HI5bPLj=st*cgE|C9i82a8Xiq_=xd5AXIUIi-kvUsE9|D4O8uE?F7P&r>z2#OH zkn)a_x6mCk(;k|KDjsg}h?6-!c%~??SeyV90qW-X=&nn^93OM$PfKrzZ%dwhxe564 z6El)s(_7+}XVGu2{m1Lato*GrdBWd5otvW8fBUuh>0B?jR2fhO5EB7PY_|Xk2n7+6 z%0qu7&QyS92L=)#rUXgn1w(yNN&ox$z@m-0FK`72d86Kx;716d+Yw{a-jg1SkOo8| zrUXVo0d*#c#MHobcUV6eN$fKu5n<+V3Uq-5dQ+6P@y-DOhC_u!8fX%w5CYUfuS)}s z!kJeuv-uAZA4^^GASOV6Z8p9;33o)zQSUhsV^FieMO^&U;DRmY2>#7ahWO|2AFM7j z=3A5IFJJYbPv-aE7cTq7rSEfP#)64_`4Y|kaH-`vmvO=k6n|~k%NjH!xkXwr2sGhLuX9 zKs*B^0fOh=FN0q207ar$#5Z#^a~&ZA^tEm=bnVd!0!lnUOacg!KwlwHxH2vQUa*kF z(i&jZ9La6{u75h41`rV4cB>f8+yb)k%rD3Fsu5SffB$%T`sZxN2h$_ zw)DckIpUWy{LLwkD#epo3Zl8>?c5_5UOMNtSTHrR{B7s>nGM1=fIV|F{1-)($dVg~ ziUv=3wC*J|Rc~ZbAa#QVVnHX^6bX_Pv2q~YigVsc1AmYgo^+A@<1n8qp-3VOlen)S za`QJK3=#pxQOt+pG0arY<+C7sC^?2=K}7`mlTHLxboM*g2NEPEk!RkxNKLZ@j}90; z)MnmD{Foww|7U*AdAH!)mp}jV>DU2tY^LD$qe+$=Ml@iwB3s@I3tZ0e1#^9WIXvf3 z4AmYZ2!E&|(#(V+o@-Zzz>wRK2L6UORk^My*^B9B&H)p_fuwlmQ+xv(v{gV(OSLd# zKo}V`{E>u6pAS_r3i?-mWs;Pe;*txlF2+9Yvv6o+f*~X`ZUPzfrUn77D%3`>pL2?$ zbw(u!5R@2H#Tvix%;f~dPcJ?Ie9b&FX6Vc_p?@X);g(;%K;uweYICXQ74*$frRH&Y zuK0=L(nHO1dF@h<@lRgGi&j}`-J7|||A=cDFnF%>AN<7bqV4AVEH$d{V9wO|n1>K# z67ZR$LTYHcKoP$@TZVlsLrNg%QI3fzDb!RT$pTaZ2A0CnmHC6}%TS|@BKM|pB7wAi zAAe&HAQAKgqhYCevFHglTw%diNbv&>4FowhVXul}*<(ZGwI!B)CJ6vT8($?9%U401V-86}0=b*DH-c+^))#25e*}WL{$)^51bcJFY#UKi zh2VIcweiC9;M~)V7kE%`nt2`@r$0PT`G1qcKRIaScw>p?O&iVI^7x4U>Dvn*nlP?F(|_WLAQbt~ zmJpfZ+3V9HA_5oZkmQ6b4} z6_N|=ll2w!BBZg50Hc8U#8t^y@2J6p4uNFh-7%Qg1A6%X50?J%GJbG+?vFq9^{u_% zdN3FKp83YiZFw1wU)}}#xjy5&1%H|qDa>PCK7B~sn78G)2NV` z^ZEpeY+V)aDHSdbZLdw?$XB8)$RZ#RQsPQvwxFmaE%h|_J04C;-wsNVB7X@C!tD=U zah3S!#eBglh`)Fx@)9oa4gdDEFPB(!@}hU=IL~qW2hUVlGAC=+NwdHA^WN|JnWJ3( z$NTLrlQ zK8yrGAWCC>Rph{z=HA%{GJhn21E&Db-h|yNZbi@@^sy>|%7GqHK+k(&1$_kgZ*9$b zZmx}y!SiP>b-H*lbDaux?ADAj0@u0EKoC1xgp$xDF6jR!f6^F0 zOt|nF8c5Rc?Ol+X4w@e)u?WfWHQyfytYQmLGJl^glS-cR6wIDOkAI*DumrE1KVvSS znS{t(g9roCpuhN&JaZ-xUld4U2zso70a=jZncN$oM-p0U{@WAhJW^(x<=Tjk9-=vq zNMX)-#Qj;vFCNYw6ZeNZ4S1uK!R-$o&b;Bf&XfGrjOCAZ6k(q&e$!tVIVoWme6QV9 zBpM&283#%Klt7{Y+J78m5R0=PG;hW7$}t<+yX{H+<_^wqB>NysW zG$I82%nKkW9v{PPpaN#?9m}w1ocRuJfL286$=sV%=i4p{h77RigYBtN|Tiw$4>^vA>ZfXj$8>4CIS zB|wTfw;>^aFaa%mlVld92y74pk$|MUBCto2ltV+2;y~)D3I=F61S^1IaimBSy_k6Y z%w70eo;}^3DB?)D^MG|Y<8MzSvD7v$B1gG5RTByd?0*9Tg;|bebJm%E_x^v?KkUt! z)4*@8R>2nsu)OXt#DLiIxJ~SF0%$OM>k=HO3Q6!@Ja(#FfE!0qQ4DbQ*D3m`ZBttG zEg+IYGAK*I<{0+!@>^*oBCv3*zqkPrwQoM0=ZRC^>(BY*bNIAGMA84QTYvMt{L%j3 ze(<@pMc~P+Ft2uwfxflIEVml#{Bqf?u5>pzxBj~?cA9~5`zu681 zl}IYjth%*U&_Mwif~2=#QN3sV4%WN~`b-8SP(a@(%7|=8Y#rPeZam|w2#M*Sg~Kk~ zM_%GYC^9bxSdFBbxsjO1M?V082uuyC+FGRXJbyDmi2f&z&OXY#bNtc6-HfC2P|qy7 z92Z|K{CaRpz5LS)i(P=-v%YWm*-aP99 zeLUBXsP#Df`u;zkLps}aUl_Smf;595cQGQKwDg;zNkoe05ttIs$3*oqR>W#!m(4~e zy_a&!4Hgv+Fg&AcN!}5TvC~QL4F+ z$T`IumGtoI>1bEq%|vhj67b^u+Mti#C6sD7rIVne{rIXQSH5Uh}LdA{q9=%t01E*8Hp=;7#Vq=n+T& zh(_>G=aFtX_t!-${0~3u4+mj?e6U|`q5kUYU*Ex^s)4*%kMGIR4(0ZAd#OG^UYz$d z?a|Zq85_9)amwt7#VxRX^CVBq{yb%WXV3vCweX#DwP!Le^6q^pHuZ$zkg>r~NWetm z$atFn-bPhkAwqBdQ=5D0kI!O!<~$)3rnAoyF!cRje3n18{*O;je&!6Zx^UzF$Nu-1 zC%Eu&iQ<{Zv%j$duJCt%{8P)HJ#X2JU;=_o3xzMpYF9Pq|E}93_GXU!t=qqUeVL@_ zTgX(d24g6ZK*S@4K}aweNs4ImmoGEtWD`JuM6nbmn3F`5s>y{Ig5#Ny{%^fW>TbBF z3mk~xWao6jYhBAzP=X1{Uy znUVd>oo7#Q{{J6t_T_8YueUFM&3OFd|8M@8ll^$vLp;qKPo6M0&rTNpH~R#!AW0*C zJ5^^BxLTo#@N;p|O}uGrdq?if14OsKY9+5hnrVP$t(#YgQ4mHGg}HrSrCvtbM0&Gs zd8tRN0zwszd{0l~L(HOoX7X|Q`}Z90U$5(5-12Ak>QM8~EJ_>A-#Gs-`ZD-WpWKVT zeelacMcF0OA1}uz|Mnz*|KS?qQ}69)6lw_1f&pTc^nonp)P`bAAcf9pV5h{OE9=|EYP1G1K2<1e0OpvivTLOg)zKX?gU`k-ZcbP#hl zu?8e-6*VmhT=Bq&(nM@XdrupC1C#<$KxZL9PhwdSGpFi&iTS*LKZTg$A6-Kz6lZU6 z0CRo*@W3vTmO67*tA75@*f8PQP;+7Chs^A&=fqHejAwqR%H%I!eO{Yy?fh%4agp7B zG|*CazkN)0|ICyBTH|=pNq^Qc&p2zT&0pahaT1%oTvm$Y&hf_BMeWJSg-)E$ z+JfL2`W1T_deQ2;tJ)I`R#chSLvdS305<&6#4-bpz%xZD=os_Xoej`If`B4{YYl0o z!1*6>fn}=;038rp0*jds8vVcc;IH=i&E4r6m+Z?6f9LUkKf3a-9+=NFTNy6+^)t8T zMQ!2L!Q5x&exO2%e{|HsK~#yx1xSSS>e&$YKR#sM3$i==2S^qR(-oQctTJY(4{?e? zrii_1!JLnzK32imoA;-bVAfYNpIkiV%9O?|!y=zRqL>ol512CIqIDJzc=l+1XK%4I z^F=V)hAVb|5G_7+byDj-5@)Snx3cb<-_NB;Vdp-w)Ib2?Kv>1iE@<{giK&1q=R=Uq zUh-Tk_Uwni<@;vcKFxmQYVN}knfF^Y!NS)D^4%vg%${)e+e;aT;&J9_Ki=4vOaJsz z_}w@5!#{X&_I&?^aliU__*rdPbUoWPU%ORXopwG*S#bI3z_q z=rBh|MDNx>XPj8(bM6;=;va7cDYO2ZRw$6@q7$d}s*wQEp8)mqd@Tjv`Pz@}U+UWi zGw%ME64DJ4fA3lJ~US<~qGwW&vI_wkl z=2ue?jfbH7;jlkin+D?SHLWQF&&T5%f<~76K!>EVf5pW=a@IRaPIDg8SvL%({PmBP zT;u+lm(R8Oi<>u426OCe@Y545ug=g(=KTP0HRHX~FZ5HYgo zf5+L+`r5e8q7V`t(oB@b*aihdEl)_*Vczpp&7iGFF!}#@d%NaFb)^mT2j#*faVE}X z>jMxVY=ObScQHu>#tBJCBJi(2)xvf>Yi8}e-*c+Y)KtYbGDcFX)gMnks&iw+_?=8G zmpriZ?XK?^j^K2Qxic9v$2|Kxag3RHe~gHX=Tq`!xbPJZLE*R;#O@_T%@GSG)yEqs zz9On4hrPw_3m%j%0Yf%Bbu0T>MSJdp)=Rthn8UmeIyVp*P+f*hWvBsuKE(#cSdU$Ye-_hv zzApbckF}eq)ZLdCurL3q2M@7=^6KEX9Q5pr63t%|!ULk^obM8}JQAWg6+^iuN^YZ-{)1lKzfrC*UAM&}M;o zUQog##Lnd>UBBNyc%9A1&(VJRM85+jxSm2|@!mw9a`@6$tEA!vtKzCqYa0xjNH0u7YRHv;H1{q7gvdR2c@$mQ5KiMT&3MTk4GIWKB?x{ z5m$;dNlA=)Rj>2+I!K^|@b<4|)aT|x(PVSWV0v`p0e;V^y_2nWp8EC{w zpY}l8JYq(5wQHR<*Fs3eEbMxpm zQ%`Os<)|A-HqO0S{8}gxSl?MoCXDbr0)@A_-6z-LS&~XmpdvXa6GxDWjit8w*ja^4B zobZb_hjWUn9dq!z!e%&vX0aeJCDzUt}B*SJN%Uw*2&JP;@^NT+P%y63ZJE zU<}RM-aNaN`w>=h=@)TGw6pcMniT!))lFoAR9D1wpI%e3f2i*TH-fSz3#l?L?aL|C zK@p_Rm*k&snA7y^y!(?oEh3zErAdzSzI4>a&b#5Kc}Ji14BxRo+Wg0Ux#{2fd58;b zGh8>$*}l!r*U2fiu`jM*HMR247*SHyA%=&8)|?x;5~QpaJ=!Ib!WO`Fx{W5~RRcCH$l> zVvI*J#-F48V@`D=iR$iCaJ!hO$3->8c%|zRvpC|&r4cWPXGzh2+|Mk&=!hHSC_CxV zo;u`Ne}3W@CtYI4<`kzcf9wN`-#qH$Q;s-y;ql5d;+`vW=L7$aaNn>wjp?f-g1Kqi zhJ)$@(sjZ#DI-omiHPID!8)x)wAJ^4YU+8KBhY0N(Roekwt{SaZ|xl-7S{h(fu_6J zPAJie#A(AiONc67{FmHfVrZYaaQ{9DgYrsHe_i=?^ByGJ?nCr@wudJ`c^H5f2OM=* z{)VF-aVEzY&lyJgu->ZU-{J=YG_JENNT*bse8`i0T*pnDdPMo4yFRFj`w;YFK#gRX z8~N$YDyo|yP=d0`#I7$GzkUqMK63+bQwlTfV*V#>pMBCKi*0jBY2v$;*l<%a2HmIU ze`SfUJGyeR$@~RA+$~V(OT}PvG1u8uGMIq%Z41Th!6zl=#dbxb^wKH9?U+Y3lSxKH z6J8-L7x|Do*ZRlVS=(ebg%^vm{sjP1$z-j z#$hrMLhYq5YH(RHlkSc5Xz%3K{#M%Rf1*^Ex5@sIf@!|E=7RR7XIf$`j5EeHH|=+j zK>3uZg}d)4!_&p5LV9LaK!eWkh)?3C3+UqZ5wRs0LvhV)|GecZrrSGdI?ihNO>fBAq* zs!uVpt9bb^4XP&fQeE&$M_h@*t+750?JVUntzB;`HvXU;qWEW|vV`fb^OSUbKn!^7 zOIq}x-3?Je;SNQ?tD^N}Q@8`av+GFfMP7?Ms>_(G;N~eQLQfTW4hHe)$vjuzLB&0c zo1#jqc{UD*JcBN<*6lP7F~+HSt84QkcI(vl<5-Tq ze+#NMagAuaUKF%yBSyMCSDa%F6VuxBw@?ei=OpWvu;hE_NgVY}5$vJ$5_vBm z3&ggW1Fq4XdIf$`wp0^(K=3Pu_20cWr~E$XF=&FW2&e{e_~$TN;cf^ zUjZzmE93aKu4^&5gy)ieYVu<1y1?SsmS^kW2S;Gn!kN8tmIM6BH59qa{B zi@?0XIJV>ch~2Ya#L&_oo9_W(gM@i%fDMOFF?OdVD7HL42qbQD$H zhU?kd1(7maN6@`{iT;XBf8nuIRSfZmeZxr7E*Nm%r+tAO_UU=mr4fO)NCj}(I?{rPhI8ADAfxun_=y0vKk2ky|8mqTnCXCQYqC^8-IVET*75TzBJjR zeEB0D<6hK-!_k%-f9pxncAGU_BW(fPZ_KsQ)Cx(x2KJJy%Cpzks!9vH2e$UPT_ZliMKNOV4wQt4tN^ zV%fG!-T-`0)|35|%#`;F^M?;uueu7>5=vXI`rAB?>+1~rag0}#wieab7Kc2XV&2Et z<_Ui~e{ul)CNIZzy|*^Qq4wuk->H7`HNE4pL_T=rGICY>DPi>AdB3aYRq+rfQx{Sk z7sLx=?pTd+4^xDkH)&GIL!47^mkJlIGYUzA1q{W}=J-y!4Kc+k7~qOmV#&7-r3!H& zfI##E=|Mse&)A=V-T_a^rfvNNB&v zl~jbPE`_dXEdJ{r<4IqzX<>o>VBgyM2J8P4aau=b_UgH>gEF@|$@tOE#j*XdUq)Z> z#mrq79;ng3>whbk&a0OcmSZAVg6Goge@pbv(p%~ktFom?rXj7I`l!bjm3TeLfi4)O zWieuc*&8E;m7xrd+~b04D>&E-~)DX)Kjm;((MsHi!nvcSnqr<$!sA|f3B0v zXQ~WH-MmG+pmISxc?o2`))>bwqO`nXrhSX_^qXTn($e!EdgBKVu;LZh6wvqOE`?d| zC7I?>2%B35=@Q~x&Z*-hkj;H6Q%?DUK(>mgOqp&1s>@ueA`^A~ve2{4A6h$39OENd zOG`cC=A&dBrym>i-)amqyPnRlfAbihK4NUo>O$4eTCy>N2j4TLE9)ktAUvD&-t%>k zwv99m6@w&lFUOi#B^Kd6Te?b0p`9g1Jf9JRw0TtNKMZOI%H^TvLDEA`v=pFQtrA%V z$yD$@lv`^9s0BlOR!$O9VS#`n(}cfK-H!SPo{}B%x*W%&E>Uh$9FIWRfBcs#>Jy&o zHQ1`7{(bo$e7)cCLdQDnsz8iq{)2VGlvYBT{bU(;Rd-hs=w%@JaR%hgbU6 zHPgsV2^@XGB-J_^vMjoqe_m3Uy@XU{4ASh0HiDVie9{S*4lNUX`O5;l{bFhBsH~st zFJaRm8Iji+4aa=(8Aj@dv*BdJlo`{a7S=BM%|;!$08TOL!#c%vroCscYBSzDlY56m zpqm#C#gdq2@9C575hT%rH^^71&%Qyhh+YysSR7%ztFDJSA5ai2@IGKRxL!RfLZ;qS| zC)=ew#?4kE{`tqx3w~-Z{;Qn*X3zJ>SY>%>>z1|_OG;R!Zb6F&UOoC@RSHOz3KGPg zi1(n9L#B&3%GBJgf1O5Ma!TrQMO5RueTFr*KVzxdAXv5@PaDH)!-HCV2!(b6I?oRI zFta&(DiYf3ft&uTL-vy!9{2_mS6`;I+2Sj~8 zZh0J1=Z+>AQ+BVORcY;Qf#a=PT&Fvm4=^P;*keEIsrsAWe{Eym=ld37Z_j+7v%J%9 zw!*1ikBYGt=rEU4`cwNi`r~t5aBsNtnJ%|;keWwcMcfHY7iXO)FEblPtHKxUu9Qb-p z^;b*vf7hwXa!H-f+m)^PL#%K)ox7g!IWNEC`c8VLe_gss*c@bqXY*EH^W$Re_&P1v-Ed6ZfeL1InrWJ6k-xSO#UIm@LaU;B7HFK=q zCHme4e=mR0#b)H;vidbcJ1)1luqhdNxU7t?&h*QHC*}u}O^~Om|3l7y#=kz`f~^id z@pE0rqx>Ia`= z!-#|U9dE);Ikx{^+g=}VE;yFI*SL&!^oU&|S)b^D#`hd%7H3FQ!Kf}qe0tk)N}cBe ze@=x=O!)vxqzn*0l0Aq0=Q=T16YWW&d(T7M!HBuVTFm)xHI%0sbX$*mtU-54#XsCV z5@}$(N%}CkUIOOS^s#0oL;nh+YYZXUKot0Ze0p8M!16Thh%PHZi^~{btgAfQ%awQy zw4g#G8J)~PZ7!P0ugqd7m0}c1k1OTEf5UnLOJ@e(zc@T=VRf9A^h+Nw#9GoxR>dRt z7Q36+x(oS_x(hY(l{k75_D%#K{~15{6FYz4e6aYIqu+AC0RQBC$}56c)V(_9x$GMG z@rm%>m=BX@H8My0M%GYvNub~A$6a}ot#0V1g~E(u3z;7n6?``LWJ0StCA(e~a6vYNjV!#AoRT#7a`++d+OSf0m(a2JoIi zfG64b9r9bz$2)PxY{e~XF`k%R{qP>CLk`z}BF1}rQk?i0`$t@|;l3~L z--&MKhk7laLD|v+ZrVj2e#AN(s-MT~-5r?t`B@%}Detmzjx0tnXO8O}|#Ct5) z2_HngTRk=*NS{IHF=hP7auO^qZXqmDN1+a*g=V(?c8@h8(xY_7Y+gcTw3^)P(HDk% zosKo|V#y6hJ~Wr8P=hHLPNJI#k9;GajAA>ZeAK@b3>OLNNW^aPf4Qh7&N4UESH^$8PS2~w`es2BVrUf}=AHNZz7`@B~2Sbsivd1mX+ z|I_}FX?J<7A!X{k&GgTjf2{32zn4?Fu=WyK6u;MosK3Hi^7}huc@B>)AgNGfl0C9`l=6PZ4fQ zQ{@D@@Tit~t~vDve>eHckaz0W>%nIN!%IjI>(LPcGf6Qme^hs3S(PEGC;EN61A~}4 z5xcaTuC~ZSS(ahmrNngPX2kPi4U`T2r6%Kypgxu!xoapH@th^<<8S1RaUK4}) zFVx4UxYdYj>2Xgh&T>PA-6N0keAGkm!!P)|j2~%Fb%a4s{-mAX+}mFP*)& zH@J@n9w(Vdf8~9B-2+L( zW#sOsFALK$sygZ-t}IcX1<~J66vtY5ZhdKlGL$ZNDK#abN+2P^bCjja$)^)qjQSbY z)>REhyN8n5^Xl?WvATJSljbHQvydoYKv3{PYU?lze|ZOAp_K!Jf zV@{2Jf5059-aS8aNZi$@ghUiej0ckGXO}pJ?o20_4lThs*3QyH-2q~}Vqw}GeN&p} z;8)YjJh9k#LR^qoJ-(wKm19)9JL%BW8=OxJ&jSi_fpHJr_R$N9ydE=Q<_)Fo2 zH6TwiBdRFeq+EFZZY@m9pej7Z%cH%Ng!jB$CuM}3Lo~@b*V=b~>4;R*ged8SwB?~{ ze$kGXK0e_tDGsEX4Z+k2_<$hY-!Hupb#PX`?BsRp`^ z?`^&x>yrA6BQBpQzVKDs6DXd2g;@?K_)O2SR&(!N2S z?t3uBE&$0^x#g~oyh?nBq}^vcj*{JMY4-@oT&UJq{}T~zelc5jCO_N??RH#Of2Ig> zSBAQI5~TfHk*VYM^256nCUvuB#oN}mHE)7bHM^pgw@Y~$M}F;0utD#G=90-5GFOeW zPu`>q>5ZG!Di3NMV)3BGc_U7D*Nr$`S0TP`J|CuZtg{tIK7o68`Hy&8KjvcMf5azj z-QaK@d;d!=cw1fvd-W2h$fb3#f3uPqVA6y}~me}FrgUBK3% zMnR2n3L0gHGG?(eFtJ;u-M-HorM^RHwziLW=uv(qusw0+)rVS?o(8X%q&>;P8HTExMZa@p_l7oK^jhb z^1QA~0t#^y%gG(OWt{`DCcfpx+&8^9@-<{9{@-J4@4$!slZ&y}4>}RK1#K&b^4 z$AFBoajdZ`UdQqDf0@^35rJtg5=^p97u1tmLA%#^PKq zlwR;vA^G>OPf5+jP1+ZhKuULJ>q5BV{fv@Cltl5Iny!0Fdl6p+S=!I$yo#l)%zB-5 z&#qDlai*6)d;3F2%Nc4M6GVOj&=g#tqM-kC{>yzO-MQ+=YF&Vm> zbZI=$GoMnke~(fzgo8<3RR&%%tG!wplFSy^|7bCD;IYJgGXOb2#=p1u60Hurpi_FR zAFy$PWwAfxD6ub3P-P>h$2jjldtHBqmH#bAVr%KI&U~^TjQvoM5OcoJB*>#ba_X0q zRNCi#@xYV__*Or=#sqUOTk)n1o|lMBo;^q;-|vfSrnE~c{eSG~={iu^txDWyRy~EO zZaiq^f>$!&Dq~)fxy;rfSx!;h!xq#`1y|f-P`osmxrtq;CTY8r$|$dfo*5x{f&_MQ zL5+BHsZ4R7q3O*`ZXX}Nsd4!MPZ5HDjd6IJ=Nz1e{}Y}9Yn{&Wc5ux5?bY&cxUP(} z(K!x>=_2NRTYvb=*_s+2EgA=Lbo5cnSim%e`{sAdoaz>y<`=v!&v9`g*tZWST-=D` zl9RggJmljt{>wk`Z#MoaCKJrF;C}P~w1HSd__-G!0oERje9=SvJD0-2b~;-x)e#$P z`4skJT(-PEaVn0sF1&A+5y+)x!0Z_lxVPCw|3Szb_Qs%hK1H6D5R8v%%}-r(1F5yY-te zS;&=*lk^tHuSdKZ`Hr=y9Ldddw?FlZgXcpr0Bnp4-(Uo;zlx<%8iP<_G;bl;;u4 ze}Cc`#h8eVCv8Rj@WPY$!2^E|2|5FB+*7-!1v9gSGHC$X-DWvoqhVmm5Z-xAL4L#c zHG?h06q-4Ip3x-B-9#JBwDIdKK$~hs2PYVLgz>zHfTJEtEN?A{gC2T4^F>jwoX`7N z?ZJ%xzD)g!oeJCBt6UV50ZSCPLPkU3mXSbpYAqGNSM}H6^n)#H4anENz$p`6g1{`Ik2SxD9$0xQU=&%B2|3rfK^anudW=KGf@nL>mULmB4-7wKk_> zuCjF>2~uP7%c=4E%%_@%po=-}c}hUD!08IEr$SVUEEUfs?Y1H@h=a-yGk=G=fvP}2 zGP_`;jLEXN3HqsraWANg%f$-o&&`it`)II!c?lHj#xm{d_Ah*I#W@E*JK4^^VYaP* zI>)mCQ!?AtMZZE$WOhTMXed(NG>xmLB-_r6NU{tz4WXBB{(S!_RyiT0f(P!tFD6~{ zAx%-_+jT;!T{W*OnOge<@qg9g4jyKpto-nMl%L>mme^T7^b)ZwDE~jT=-K6H)1J;o z^UnEH<&o=cYZ~Mjswsum?+f(jJVt1aHU* zNBZoU8j+;*Zk-Z)e%w8t*EPX?kjzjYh|jdEsR26T^2-sIfAX78d}^a#RF{YP8gPEj zi92wt?ThPq&FNZ66*%iPXv1A?E|vdQxG;^xP^`SZK^;l%si=fgLRab`zA}8OpB@@QHqH&`%>MH#a?2FGZDI$9L#^Yr(MXnGq zT=Hrbf^J`;Yk!i@@mz+~RpGGHL89MfI{R3qvk_OjnCvmW{$aqzDLs9t2N~BF>eQ1i z811ezzS$qFNR9d%*Wg#p2ikW&si!?zmM@rZWzaFYl$q$KUi%KNOJqzw6!8b3B$Q!M*6-z9?18Lv2%X)Fd1AkwE?hwc5<8#yTxrscFlm64T z{U0$~KRDPEciI8V=@(woE?J*ltP-BGH_RJK5s<%Qvb?3*h_o??$%=;Ahnr*{OOgp+ zPdTBAd<{kiS$7*=&z{Ju;4CR+w1PiyhDZPs)tFl*W%Js!vE$EMQd@%*|UtW{P1rfZJz zO?WP*3lPbYzpecg9t6nJg=rF7ev*W(AbV8G$ti<1Rovy7Y&gc{`|dv47hlhNXk}iM zOiu%**S9&HNzmS}<#xK{UOrC=@)4DZt3|@})qlJ7nUh4PM3tIzLGa*`#H>6uyY>ku zykD>4tF8;))rWM6FPiQXcItbsO7uV0lf9xV^qWSWEhLt+`N%U|{qcOMGd#^t3|Ri0 zF>IE{YH0r4AYR9GGvMCQN!Ou1ou&eXCj22DFffg&{dYxF7lTA zRbb9>;E1hgVDq`4eIT=-eI$-U$$!3-WL3@X_mM{2YvX@jLKLZo6Fj!~$ew}9OCayF=zF=6|u^wYP zKlS8CTm5Iu=~%<_&$GfTPbtQL5!ZOKO%8Zg)Fb^VmPKBa#qrD*Ngung${#9|5Ic@C zb}61iGIbZZj7idL*30RoZ!)rYzO5G9%#VgCrCxdE5<{N0%{THWM}J%3VfAS|ozV^o z5A^*Ps4fngY3jKzYgezI3&fC`^xES6oYLV*+skE=x(VlSF}b_huYqiaJE%zN=j%tg zoeKE)njtqc+lH}+uAGK+GAUSWyl=#h=Jx#2>9wm5e!(fGf2=J%VA(Aenrrumt3Ln7 zzHiTJJ=CK*!NC9EhkrTPsyWjlKgD3Wo03)8K&q^D2f0R`UJls5oD$={f$F$RipV3v zqckW)+6v5_f=PIx8*-zbFXoWlaZ^r$I{Qi!U|^8vE)fPX zby@m=_2sI}J;Ya~Q2Nz{NJ$$)Q{GbG{-&20JJG&=jF*X7XMa^uZa8w$VV$8sEZ1#t z)dx$&RPqI$>w9sqk3zJMl4bYB&h7Po^|fqG-8moi>i6?(El%>lOD&w=&g?$g^|MPa zu}RcNZ)Gt*g^V-m6;R|^T5`+9uee8*`I_JBB}Lqyp2mnTpt?;{S=J(rJy*2}r=CTKzR&43o_|4j=A->G%(x*{ZRkVUx>Xl4r8C-R(jGQ|HCieogVTP^hiNf)coQ-) z`fdtZS-BFDqso9UwjmlnbVK z4eRbDVDcgw+_!}mZJ=kAYq1SI(mZVo#DSu0eqkq5@55&wCU|L%@wp!j53uGfYf&^^tJukwRlG`Nv*p z2d|&CF=Ql49ha+V6cOB`@Eosfne5?3DnV8zBhN(Ni={KxZo%D3RM}TxoCIEXH?D5i zq{{ICPJc=Wzsc$Bg{y+jj8OJ0`pI%4jcZVjV+BBt=bI$aBzTDHx}n<0(8>L4PtucQy z{XHy5hukF#~diQ)9yEv4wWk0z?WfbwMYmr_?6B%*(fj5o0eP` zB2)DZRZXxrZ<2nS=*fIX7}&p7R~uG|`Kz8iu^Xs!xzF%Q%JlX$bPTqXNgf2AJRI3^PbH?`oxKKKqWae5yc zyxxuPzosBebdAV>!N34}142bLkQvnmo}m{YwVVK%kuQJ;s5_XeIKk`hc#h7j1%F}W z7T;gs_geu%$$Mae-WjOiC4(3O0VG2{%NaB!NHaIUBwL_7H5vGb&t(ne3MzPTAOx=m zDkuf0;88$e{C8j_xmFNjC4mHY4MdXH#l!*KfCT;yv|t+zIqWpJ%HIJMM&0LaG-8H9k(A8WV=T>v%S45%WGh7D{Ah`{d;&$ES<00{vA<1{hw z4Z99p<9`8l$S3yaDFdO!FCe%d!9A=x@Qt{|`S?_zpu*=5=lFc3hBdSua7hz<0nF&q z;CLtqjhv0&Ck)mFgy14FooF0CE00k;846hGML4;8og_wW#?Sxx4@E$bZ}1dYR9zUt z2kHSTp>63mHhBV&$YJ@nfdmd51b>vBfg0A}x>HbKMiA$Q6TMoY9L5?W`+SITZdnI# z(V;J$jq>ZJ208^a$&hdvV8Bs`$1wd0xzCp56~dKdIZ zC%O-BuyeA)@p9Advm<5KDV~SJmGS7%LzR6*8(2pXXXEv@>y#R=4?|B9dw;!*BJPdX zovu@*z3$gTPl>($97Q~^*J;<`I3MS~gSAdClR^ye)q(qn`v5g-RWL04b}k5_$94QRpCoMR>~rnmk)YkP1iW zWCL>?4~A>%g;3#99GgpzXnz8PgKEOCh0qMa%S5QZ7)Sl(+jaRLMyQLy-+E&p9t|AIE!OQ>L z{|G=`anvu@5uM&I=zo=&Q==C+&i`?rKp0?*bH$3i{_I$TIuzVG(@{-)Jh4xGpiX)T z>!#c$LtF{Bs!^wPz_;o{OhNJ7r8uu`*8!|LV)t8a_rz@cPNxj04~W$8xvT5xR#2GL zTl~H5&+nx0l?<^juF$q9w(5gR;Gv<8{16cqt!ms?lT{?SfPYMTw~_}sdst=hAscK`mHjN#rq9POBd6`tC5kh5Zys8s}9UGa^5@8X`eGVt=3u}`J8 zHb)(M#Zqs?EriN>Fi@`yZ4S?|Lit^@zv|m7*pAOJH>5$`&ObWHP+y~6LH{N*gs!X6 z&LNGs1%`joc7O5)S*?D1o(gr*EQ$c!54cC&g6oc?g^rH%xi=^`cjs+7Xt%1yNM^B3 zZt>aTYEG}rq8?CIjXT7&1G&R}Wt=O|fSVLi4KuVg@$;W+IJVgi_pD=EQm+h2^I+VU z8Vg7%uH^P8TmQb!<(C*hFGM@VeLvd39EJXkL?_62awrzPf>(*3Ws*XFaQu2Ul$Z#yW{J07(g99 zukrQa^Rd0a*XyzU_3;uLpb%fCSgEr?FoIxE<3s`y49)>Dd3V-8ah6f_6W8!+!^~-)7-00sL;6qTHE}j&p23jB6CX^JNsy zlP}R8KDB>Dn|OI@e|cz6e%cp!|GRMb(H)Ogww;fTTRkwf&y;={g>vIJWBXe|vF#Ju zl*?1w+bH7Xr>((fIkkPZ&pth0GZ*&W^_Nk$ujs46MR-dHw%2rQ-*NE~Z78w#K7Y0q z0`L9P7N2=}YRm0?hqG}M8Qyzx9G428o6Qd6LjAG*VN8U^d!O3Y_}t4=+t5D8scqMF zx*Pq@2t4mraL}=fu^HU6R+;$dpxSjHo6N6&Y{%3Aj8zR7^H7{@cP|Exd)_te0Ul|$ zhY~crcM5#g%>~X4WS4HIld= z9eyuEoXrZzGjIprJK$-E)F1ba@cD8aA6LV@Au+yxM7|p8Kx)6oVFTn1`y%acL&99v|j{O~< zs~n%pmDs;G{CuOy7JvUnD6t54!uNDK-k%wSDiD zF8=adA2^N$j-&Ty`$*&S2v{4q!10U#Lj2ytVcrFh^e{el{vL52J$&9XxuO6|oHx%I zILw^A$`LpZKKG*m>m-v1S z<8xYh^7QyTu0(m1_BjnPE{1?Gzt6DFpDxkA|3LVE=Th_s zCbZe~&%Y>Q*y5jh-CGd;sV}O9-#@*g{+Xk>vpn;ctB0}tE!d1ButF_(jAOH+Erg7d z|1EODIDa!M{dq4b`=6&&$vD&>n1*p>R=)p0`K(e4(;qn17FNdHS&b>+Z0v7c5N_5l znktkIltS|H7x+!Iy@`5!(h7NB*z)kAB9TN@kQqiN&k=jG5pW9^8H8u{x$D-@@L%t_x{Q6 zkB7DX?LU82^ERf(fBDbHc9zGxl>hnP{?~u6=IQuvRhpOo^Z&e!?eP9XO}#SuA8tDB zZn?G9zyEV?Ki;AI$MjcZ_}9o?wsmv-gM^@e-?#i9=QjM8B0bW7{7r)QTvf=wXX#!N z*nce`TGIb7Im-bU!~Bz9W@4^p7>=&~|0MbUvyFd7hN7b;)J-%j(SudQvD!(w#E~$e_(3hOSoRkO!v{pMWTxV3I3CKg=ou zJ@X|ojxXs^*$_}g)hhY$X(Kv4##hK_8GFLl=r#h*q{BW2wq+sb5m@q^6ver$%nL3W= zO!oVTjb>W7f?wdIF1;qMG1J7CByn79bh6*LNNg?#Hk=hy7i=9zgc0vc8o1ShgJBhc z+b;4~fjW`t+DGd%W?553TxE(L)>yiwyVsb1!Pj|F;5DJ!e1p}AHg|x67G(ixw9zEd z!s>Kg=Xsjph>#GHGJnR}{qi*ugUE!Oo%6cn0)zF&O!sHA!)o6)x#=J|9Dw+>NA`Ij zkP%>gc8T6gb-!dL!sE=vbuZc4xpX&4T5 z0)MqVI`HByt+OaNy%IO%KqT-9ALWfqkJh~Z4uXP4+9!!X?-qtu# z)oIgn#y8nRuK?WXSAmf%GATqeAR_})2N@#BQf2i*0^z?(^^~!YcRL=6%zAW1Jo*ne z=2{nAl?=5U8s?3u5=JiZc4Yfk*c_TdBO6I$d0Ay?2>#LeKx1BjetrIvW73X&y)@vz zy_a|!4t;-IX4yM_RdfF{L33yeB%!fr6}yy+kMfO!!`TI3AD+WE|GsaK<~(Y+?0 zcqd831xtC{vaO`DkP=KgT+{?LOGH8@Ic)ONlc9cUGRw0OV(DU>K9Rrx<&KWUSQnd} z*W!K=Xm~R42NYN6 zSAPT(+ti*2$r3%|IK}r|-Jd)TjlgvW^jp(G=)7l*DJ|f%9a# z5r#{*5LJ}Tq$28`mt6Q}#d$socVnFz?uRPhQ#@HX`)M%c@3%BxH!mWSpKIG7V$`2U51xOk zm!x3_)8hWZ;T{&t&me{w#oafhz~c#9zuAbV@#PHe8-B^JfC+18S6_2(YMS(}Y1g=| zG$3AJb9QS;-ure7p7Y+@Wmx*6h3ri%?xuP~?**K4wZ|*v&)QyCo-A)ig=Hp4w&H^u@UC>8AqkSFt=u^8EjYZ2dD^Y7z z7;-Dr`u%xs*8xvOSTBuS?78X?u}o>qOpP{X9+**^IpZv3xLZ0;A!#0kq0H}38+>Li zSKiSzzp|6g4rW_!>P1HeJp+H1&SzpbR?4T~VLwLNiMC>G&V3D(BexZE#qpPt-ZL7quoMBc7H`}(lFlUy#Z!CzNN4nM>h?l!N%A1E*Wgefvk1TN4>m#|r1G;kpRH2XirJq? zNv-57r~NZ-yrS0a1EVZ-QoP6UskkmeJ4w@TNJ-{Bxq3T>F6?v>xRZ+gS&( z*WZFk?u_L4LpCay9tMBQ%Z*+RxA>2#52s4rD{6xQrV`vpxq#d%(fF(Qxht4GI$BlG z(Ng$k?p6xfp`;()MaYPc;w%g9tg-THj@qXx_rVs-8?hRjY)Afv5ic}GA}7XAc35d? z%|n8(OiK@S*+fd}$&E<$w#Fc;MHCd zFzP$4xc%fdz2^XR*P;%C4LJu>z=8NCu(YmpH&E*=of)l?j`r)ZicCQgCz_Q?KYE6; zX<80y0vglYGgW7dWtmhJGka$qdyWaNx|9IFgBuObF9dAR^?E$e01qXCn4I@sA$E34 zdYz+@iz^EH;e~&0I2)zA?dV$p_uRn^IqH}f7L6FQzS+F!f7+{vhz&)QjJhUwzQz$~ zKg>*ShAT8?KRWO8OXg{_8^L1}vV?=FY*M0r=hLdAna?~7`|MmT0OR6RJ|3?DG{itN zS%bc{O+MVVZMohor^x3polcyq|7>hco?j?tY;L67O=^G8Hyn1rO2-ouc~~7ab4OG8 zuBb*qqd9y=d5GNH<-D)#xpjKBfakmC=1kc<8K3c zULowvZAKt&;o&uU6xgR50oEM*C7#`5r|8p0oWPOkmKHCN=)8M* zPe;wZTBP;z=WLW$+;ZOm?e`+K4o7Gy&MN>C-Ee;|_h+6J z6p|t=+Phv-$Ctn^k*>>O^Pw3OE53xp=ObH9{oFX32Y&PpuRjtWp|TeMUJ=vp1WPjC zsCd9bLGqEGT^y!fCayqBhti$mXL7sFPAt_*%~lN^1AKd$2q8ZE6 zS?g^+di^r-CY1t2Ax4A?gf2WG^vQqD7Yf^y(i+8&BK;VL=+XI_r<7@FB+rIA zN)Nx4_4P@VshD23(kULzE1rS!v6Pl4Hi*7DiA5icEb{nGT?p z0L(yHyPZ}G3YrQc)Y84GoWkzt)>DqqSFGtzqaBz-0&|e;DHKICK8GU7G`r;*Q!P5n z)*-SX%tUY8Mpb3ls$N=H;f=)&nk;{=FS(jn0K)r5-jeb$2o=A5>>;n6VveeLnsU@R zo0t`uEx@Eyej8)gaExCIyB5w`D=D6@2OyQ;9Rmgn=7cSM`nO?D@TTiBh1tGbR2pVIG_t+E`Wt&$K@lyprSDV;q`x_Ie6}+ za2?Km+Cq<~3Jn{Z)+h-SqAv@Zb@|yVTVtplYcD>-A*Pr&T$E+4R3#7kgk)l#OU7O| z&X-*zFzAYj4@6FwO2<7_Y)yh2e5UYoIcjdbY~K0~sv7_Wa4O;Z&%zJ3tRTJUK`p$AAWbTIzsNyg*q>OR06svdEG z9-~L8FxZ^j&YCek6xXguV?1Vt@43Z(L;KmUm@S)6(;D&y)b=N@_Dwg{uIq6eH5jUc zyY252Svv=DxI>U2QZEpB3`72lqh8h%22b%6ILWFCVB;m8=kA07D_(!+*LQp_&Aa6+q(-+`7ihLSL3*)KZBwpz2Q?70T(#8F9&&ZjS`Ps}^20x4pa8b16c;3h37=-MvanUN! z<|8`JJ#H`8N_SOTA@g(8szY$jy9#Tt-_Kp9VX~@TyJlUS3KW0oKkXsy`kP{IS@LdM zqGMJ!8FsE58U-)xRMJ6kQZ$a`N%}aX?poptGXwh2rXVEo^tXK}CRF<3+pLbqAE5ke zSF0ND`q@MNf)o@ex*gKH$x>We0K{P?XY^-##1&wxXUFyH=p#4KF8h@=Ql_`P`r=tC zXhI+mb6%Bnc0+$wz-}wSR-(Yc^n2}Jp6-)A+4ifkC^F{W(ib2*k<&x%vaRE8&^_zY znzquSNq`dtfdqH3LKKquaj=3foV!ZEuXh-~{@~Hn%+hJnjk!jl6}O_vQ8B_8Y31D!Nhj4h#dTra6 zGZ`?Ln^52I(Btqz!n?^#$CW?^TKx5c49ZsJq>VLS{wd3{($B8oZKL!tyRUE%;kk7< zqQm4=L2Oud!bQ@YnvKb!I0Ij()walfo5VBZ+%q7~mC7;PUQ{0-LUR~#FVD;+I@ z4N}Y>Y^i@-3T2(N8bJ?mFEkxD@z#i+6#T)lTJH#NAnEgiaJ~ij5 zP?atdC?JTJy|NsRcA+K<>2-d;I&xA@lAYcOox5-TU3D8( zV+ehHhYTQyqo7CGV%QBS-O}@9BQ{wDDR`b;@S}fs>OO2TY4!Lf*4*swX9uF+ahT(T zXOa(wM>nptC;Kw}v*X8kGaAN7q>m3BuewX8egwX!JFeg7QX&A-4+sV{^`f=bRTU-X zIafX+C@D%ty`vp_V?GrW8`bE*Y8IXjUQjrLOH)|^51g969z&{TG6#Y-?-$AoD$iT! zi1>dQEnnu&8t*E+<88Eic4wGbTWci*2uNl*a*}0eX%0`iweLH(4+RdVbJRm=Hto41 z-;sU|d@)C|ZLhWZyj|9?S306IQT|nysdX2o3YY9PR-9!z#&NE^1xjRMoj!F+1&3i3 zg^o2bk#q71q9K~5PvN%%esZjR@!nzSIc|SWZ6R!FBzxgbGi>Mq8w`uwtMl-12$6j+ zJ%smMX1_n~NsWzf3ko=OtU{m|z30Xmnr~tn;Wi zZd>dA#xR8QB9u|<8652Wju>aWst11%*Kz3xw>QY&u74C#mKB-%_K#u^3xFrx?b~?_ zO*F;ZQdm*6cv-NUGWt|I1iBI=(mz(d_pIH=zI}aX){QG6rNx zm%Q-neyfh8S9RnW5EdP|b=Ce%P5wb794dUoCK=Xb+x|`$bRED?r(uV!fXfFk*)zh&-f1 zra{DkoxP5E2a>V(H&Utm;3a>Ya1=@N{0P9yWJupG{|J)AVq-C5v%Z}%65&_oj_xg` zjblx+490fYBiL<5J8qn}JFGm70(%P}iFYn${YeQuCT0!{AD>dxGD0yv3>Y7Yf{RTG zfP7Z$4WL>_Qk0zvUTj!{O~r|QfWP?}X^B=P)?Nj?2}0^wuJN*Xh6sOW79Qx>Fnl$u z36%@BIaTJo-9oZx(VRJfKZJ%d{EXL9v~OH!mKF9Fpwn0aA!5p4(Ld(aPZ^QPD9vyD zGI2Oh+QK_G{k;WVa$FJuHzd24&95%!jT76bH}vM<06u;VsT*yAlYPP=?oCpr=qVJr zL2cL4El`T;^s5K7R3U$6qc+z&-io~tBx=R}qP-7fMN}E&UPXJEfaF}cf^UqEX5-Qo z#zgJ_wbu$VB=}wCi@7b3`5w4{J>nkSgAz9RYhYY_gT}!Wsz5K7-ewUg6eMMr8cA1I zDLL5DR-rm?M~~tA1@oo=uX|fQ&eBI|>f@SPf2~|n6u|$G`uu;j^GX27wn1=$kp4b7 z7*I5E^u9Fb|1i<7QwD@yLR}^3TizT`e3V{-FyYktLTw1~V+`Ne1a`;8F`+*Dvl~rQ zSD>;VQ5uFMyeq-H*SBw%aP(X?GA4#3Do$3@txX zTAlO4$!GRK2-8La=;~+B8!}1XT=0B|dP)z}=biNgdTD%r1-?SPP(n%qNeAjwzj<@h`6_>^n2Fav#0O9-{ZHd>`jmb#V9Fu|BCV z-mVjr>It`H(`Tz}a2k1tVBM)1>O*ntCzWZPWyk&%vC2@K7aI zuHk(EC^7d*`gtO>5*AnpL>Ey4D2?jB3+kcO-Iu55i?@+XTKfewNnC1_hS{2S=8LRz zG8|yH0bhKnQFoR*prS#S6S9H?ZLjfU49Fl}7`)IT7`P=wUj%wl-k|RcgS+NL<%&@` zf)#(ob94%~QS3qKHHMF$mFA5$1B^})MSxn0A+n_ej+s6^sX={}Ar~v&MhNT6V*a{X zTBK;l&!zR?U$Ydsl^%I8q3t?W)hA>6pjCaH8+{O3CWIn0qG_Ax8Y@jijy?wIcdEW1 zYN&WEIct`HR0~?ovgA^>nGg7`{$~LMiN8m40P8;o3*5zItU0o8z1iC}-P98#9WR zuP3z=!D0G$?{}~==v3^X>36SP&n2bQLQ1(BX}*^vlSwYyo_wT9k;Yd;s6qmY-S!ZWbnNT%Acd9CSRsc?(pyMx z*U8Yx34NtI5Gtaz>$!uqXl?>6Xuo+Fcktp!_iT{0^0v%Yq^vHulvLiWB zN`u1SknsGHt@m>@-zQ!zem1d;uQ45}(} znkh|l!1uY^^C!_WFwF6p^@D%Go12gNb;ST7k%yQz@w2+9B`i`z=@az9vLRo8Ng2!5 z13=IAjbIYk`MbfUDJmatE`enYjX$6gF_XMkGsD1mV3BtKRB9=K=HmX?RsIejm^n`h zh3ipHg;C9%N4_u#ZZY$%Ez2u46H<{&>5mRK%X?3}h_JlzKHxQkycU1=@8!2NVgAdQJolm%RIF(?ND{(hdkqOba;sm?{Fv& zoQkR7e)K0zz~cy$t6mQ+d?w`d&=?v9<&g-X&x=kDP83K{2kfVZK%RgX-e1P$H1N=` zMvSf_Uxt@xJs2(SVSG|F3(wnZA_{Q!{+S+mAr%ssSEpz|DSm%1Po%-gBa5G)1VdJV z-cA_V*&+5K`SVKO73AO1CHsRnobi3o3LiV>b=6l zI_BFzO-MAMp9#ywhXq*Ybo(Nu?++BqZ_ktQyNSE=Pr?8@yk*z~XgaYFS@EoK=mw!I zdIOcE3G_#UF6@6~IN{!?o1Ie%XYmizlz>r{yx?YS6W*Yk$aqcgRCV|h(ZKVIRZpwv zQrf;+P8O`AVEiCsyZUJ@*fIhu@lZ~BLs*s(5R6m`WV+zO2$K9VkWr)7s${3RxXx~U1a?gXgBSfc)sW5YS6TByEcw(NGp@Gbbd{kJ z0rm2kCxU+>=K@%9zaJ+zkh&upv6hC?vX+Qi@-Pr{t4w2}VY`Uj`u?U8Pw2sYk!A3W z_ywec-7cWTf@{mhpshQZNc~RZaw=sI&_%b1=D9Ryn8}e_B08suBT)z1pGfcl8!>?Gh# z`wD;lL57Ky!$!zd_#{O*17y`gERP7u>gW8zn0dfPzp8#xuXpaYRA}PmnE7x^FtpPAG1+9l-12`+M={0c*v~P2)AByBXtVGwBXsofus+c7 z>qsU1Xiw;D8n#rl$8i9(AHTMMVkM(%?59XIiksV zd&5DN@_4?2BojRrgUg^EnmKQUH~wqhA(DE!o8d%;sfm^oAw*@6y+3HNkXkip6vh}NrTtMp6b9J*VO9s0A_)O_SP7y0 zu8S_Q|CY9OY7g`@!Jk;@ol2n$!ZH+sDe*BRBMZl56U}lzd)E8lUhsT!{DP3F?T|e2 z+%5KyvK^2_k<6JVXTKtj!d&f}&;oz=lz`bJN|Yn=9;eQx$?5=>8l!9f-3y)Jm%hJ5 z{0R;~6qhp1A^)-Bo1SGqfzVfYmUE1bE%z%)7@boL6IfsICjY?iWPoDe1GrD0;oUT1 z{FFjtAJBtEPW89&$Ihgu|BRoGr_h~M1cYUTZ5E0xczK9X<&Jvy)ue@ISyX=$P zaXumlW8UlYJeVTV_KzHQ!rxG(n`9JEoqnOWCE8FFlxJ!BmMBnyIS9ScIQI(hZXc=< z){Dvm9h_olj2{p+`bp7;gRJSPAO<&{)K*ygX28KzegoR&U7I^-31k{CU4ZQA!Kwio zB2u0k+?Xf&y|LIMIHP+yG^~HtWRTRcxN|oC+xyz?tlDy~w!o!^so)_S{f=Iftu@-h zomABtRH5YoIpUWz&H*P;qCPNK5YCCru}QRjo{N1|okwgc4@_Bg$U8+U&{F4WXf5RL z(8kV|V4xWC8{Rb2Tcv7dUB5ey7q8PV$8EjF-t@OugjR1Oya19@B*=eRYAP0oW|7Ae zm-W~rC zH>GAZi+M#avaJ#bOTvH3co1`rTCyz4pVDBsdRjTFpiTVAocqqIUD!+rH!(jQ5iF z*Fz&o7(7Hm1OGI!#`Z#90GqNvF!?;s*zv}rP_7q{GDOss9B)JGK#(uD$}(W|(j+US zx_XWY#S~yS)GdTRhm7Tr6c^ZV_2ZyCr2^4!9!9`YjG)XqUOCX9Hv_F(uXDOU`ge=v zUskvKIu0xYf0s&)clw&b4-~V!XotGJUAxlR3&4Mbw&)juL1TtL0QXVzMLiuevzo{N zJ3z$0b{Dm=WCCw|5=N4k=Wn~*jzbc@xn&(dCyW|DSKVWtAt_~RtBST#OHq78tGZT{ z5@b?9j$baI8BXn9b!mDSRr*+H|f|qJgFwF^Fj9Z-y*DbmBWY)Ru zyWn(M;`w$@Dt}W^TY?E-j~zMf^qCnM;>+E?6G8;X;lkHmYIgh9bWqH7&^6UhR`-@A zx_rlq_2R@`IGYl=W~zs^&fB`osn>aA0eGe|HQZI-KL*3Ot2gynG0EUnYBFDxtd}`5 z3Q|tS%Y~VrLKLRQPn|z^_(h-3A$EqDVxD}_^MobbOn;~oM%0y2bL#pa#lv+~y41P_ zqj22cZ^s)wn}Q^fH*imsWzp|Dq4Xx>4i@6jROibSR`5(MeSR&VEHL8NR{Lv0Np7Fu zb#uE3YW4IvwPrZcVKF5Qo zx;2&qElx@h@}60JuM9pTE}D)$Pxo1JS-Bha!Jn9)xa&3ArOQ0QDYt7Xsh6kBmuxe) zxPKaI_#}jb1+bu`FanY~AxXguTYKmOa{kpv0iLFJxrCVD_KGZZ82pp~PRCd3yjygF zf|5j7)2Zt{lE`2OXMWA<3zR#HlSm32Y-sJek&jI=JM^YE=RsTdIrq1OQ&py=o?jnC zJS@v~K+R6Oq|EaQF1d#+(G0_+LIp+BOMhzqheq4amxQE{81^}Oz?R|H-bO#EV}^i- z08?N_Y@=cl?FT*V*`dcfP}?7;)=8!Jco)JUCCyw|aY%KdD4Wro{rDLas(dDiDp$`cVl!(F(BtFB`;zTlJ#Q)`*UAx2k)3Iv*nhUf zOi4S`KIp3o=X8><_7NOl)c6C=%u`V|H`z{$qi3aY4(Fl@X0hB7F0x9lT!S$1^!r{U zH}dW`=HL1|mfyl0?af*`W)?1?tuW$iTyBk{-i&0m!tP-la%J`st&J8n1|<)Y{U6i4 z6A(KcN@pXa8_if4sJLRNu1HS}e}6g#+@?W#1mIzu9gJj&y4iqZt3ko8DYHj;gv9dERB)yV55ka-ylnf%5aYy!aM(I}_hDL=dHE8b zP(KW5VX*a1@|Vl%7LyhfnKlSeY=iDFq@I$y53cw`D7woF6esK#bpi0rjej?7&@j4y zF_^DhjQl#x)EePoTp=-SdGF*W5N&_>d96^n`p3L_X2?C)DuWNb=VO~9mq!QeEYKLq z0~cicOy^POR<+96l_dNH2ra>?_+#A8c_#$w5H7)Lgi|yd(&kHSckV|4{m}a6&`I9Z zUl)}$i5@k`*E}b4rC@W2!VCZ8TBBSBP#Eri)0Y%O4k~~BK7Z>s{`Wb6!P^cdlTl|C z`Q}Yv^GWq|fHiUqU-_o_oQfqXK}*@g1+xoS`A{aL9F1 zY+J$x4=xP2fS~gdpet3vLZ!0XC?sPr|1AgjhwcB5|M|cEPniGz&+@zf#q#s5@?R`} zOg8@+X83Q_aDlE|F(8GDe~ZH^G~S8 zFx+7LN!L)m{|LLB(@6ZYH*ML!wf)VQ6))7W2~cL=^zZ)h{=bz@e;ZvL??#g< zb2Bd{DkQps?T`Fn3yZnq#(Lwjs2;*YA_;#X?Rk-YoOdp4$wtXqKyzPOdd6!PjQU`| zk3S_>OTYVr2g2gxV-KL;c?XC91H47I@MvMA`??F;AQw{fNCoR+G=#2%^( zm(Y9hj2o3;F4|{)zvgGpAdnB5}Jgv!lO2a}#HT0VK8wc*BynR8o zm`jHf?5SpO<{)P8tXOcg9yf^S+nCHUXLnbWO>d(h4&)GcF8-MvqOkO$ZsGX7kyJ3! z9&jo3u17e7y_^Iv*NhRFLQDBm7q~|rx6->0h`q73-P91}&V;~vq2wD*u_1q{gs(i8 z>PKl=nTXfp#Nq?iX^sBeIV=gn{}CLK_b`*z5JAw^!s%3Jxq+tm-23xa>8Q@T0wZlg zTCSF-J9(4DS?cPE3|%JqYH}t;x+OBCiI|-1fyJB~<9BCcgUph*L93gm8pU))p1ACO z1ehO4E<$KBY0ry9c&d!{<_UlIJ|RnbPx|>Cxt4mm7@o$-aq9JGy|)^3nA_*ft>8)r zUFl3gp8U3K{nAw|0aK<4y*zV15;IxLJ^_}UkPW^9X#^wj-#h%|BBJ!=b{eB_?YOlh zP2xgghg0Fz$dXZ{b$hxEN-IT8(`O)Xi7WO&!mX z-+jdm=fef|WDuLcjdYw|>vc zXQypxr?coXuggb+hr$|C&({(G8X=YXhhlPI+M_?r#>%$8f7AqvUgs*PX7=QU?bh>;#&&_Tg=n{kVSE2`Yf1^@x znPFf)Yi|UIzAb>T!-sZMS)Ox^IL6^i)KaKE7THcpPgHW2%pDs#$FB14Wmz`jqSMm@ zZ3hN_2Bk?FcQS*o3G!}&dJ6nqBUsfUCYDD03WD2&9>a7-m zFv6eM1z^}&vO~sZ1ZB=_f)Tj-^-6rR-If0!I;Wf zqzv8&-&%NAL9rx|t1-m%N^2`f{1%lMgYhdbCkvEzls*c|9y23v;f!7e%r+QPposD& z8pbXLeAa+zke}w9N}^GK5-Dr3R|PGqV^NkKL>hz^s zzyt5-`A7ZYqMJW!o4@NK?d8+1mw-ugA2$>zGaRIQfYtnXf7LU<@qiS+Xs1hHO!A`E z;EYSU*}lD27?nO?tZL~m@1&93N+>8e>o3q1TBxjQ8EHGHp8#VfZGyUleb```G6fsv zSTuj|ru)5;Kpnm;)OtR#3>9E>!-^VY-2>3_5|ePu$BTC{x#pIfiaPxs4r@E0fGLP^ zT10UNs>u`3Gj4Es6~jR9Sg0inyI6h1xw*GN+C?#~hH_gQrctR>FsGMbu-!89zVQ2% zR#_IQ>DpZ6?~%$n7r9M$x?blgi_U^3sP%sfny1W;gY-0(UkPE`uLE1@aS)tYV?qzj zg-zqI#p+Y&^sBnMxEb_Uq_*uJiDdnpT!4M7G{ucmCJV8Q*U6aBNs(z!_ND^ur;j;_ z7!a-IJ2U9x0!PZ$%EPAjG*+P6&`qqVK@R&~RhEBdGLZQX z8UA3{#;JX>tsV?nx>yLA@Uk0Q44vzEohC{<#_7G`m}3NiX3oNQ;-1ljvv<@H*#y|! zJBpaoUEw550S0-dVeRj}wG}BvmxnWlV6{_gmU&oF3B53u*4*34CZr{Nm~}jXXTNt} zz(8Q^HgoU_{tT`%3P>V&+n9fPEJo8zpL}&3u%DWQ?TW@?XBO5jVu|_Spf=!O_vYW~ zUIj-i>Ys!zB?F7l{$Tx(hLE@l$Euw-N@9>_at2cng`cCF;dG=u5DD_!=oN*w`YfA# z6RY$veMLz9aPZtOY&Iww!NY`}-_y=SuDVT@+$CtihK5Z^8?tML>i2*58}}4*R4WHT z(D#N-yQ69T5aN*9OcIn+2;e}ihS}0z9ijGMl0?~A%AYyH2s{5$h}feSq~zVJ{h!Dj zA$;R6I<_~FwdBuLNTAPxRYJ1N^|Bwc%cP@pWNls1rXvRdx-6^xD2;LZsz{M$WB2`+ zFGH=#J|dn{-7p!_Yvg|!H*x#ufBZyR{%WKg>=9T(P-6G$2jmkch=W3$IEV&2RI`S! zOjfJG)K}3uL*r1=mgl>FpB;;z+UdlPyr*Y%+N~!J{J9?fRhjG2{Wxc&TyGc~+Uf~M+Jtw^aE#c0E95;W4PHxJ7%9%USUlZh( z?dH(Ufm+^=ol$hd21yV?Ofi474AQv08#k=5(7GsAw97*vn7<4vNP(+HT3Aa4R{5;( zEsMSO9zaflaU6$p$itFx907O7X*mK&$pY(M=wOrh!;|cxKvPGpeh*19j+c_h8v1v! z?EPwC)}ve7N4I|gVe2R1EJIr;Y-(M29!-DklQ?}y)OsP>t8tHX@rLi%`Tm5{>yXg= zxsd9*^De22i}KJn=}N}Z24;%Q0mn}4FhSvI5L9ujd+axu7RSj=8MMl!;ECSl-OL{#&2(a( z=+;BQIu0lRi=g@}v0pG-rK_#v4UNM+tmt)2coF;7QGOKog;$c}PUPOHE1|g?Kc}z1 z_6kwAv0r~EaG7!9gP&=*fm1ach=fRc>d6m@L zsXU)OD%UF%RzhJD|BK)RJ$JBLdUzD39=xr~J?MX+9V58uRTWD3SV(liu_0>-t);o&5l}=h#KYOykZLFr_*%*NiX9ls0_B09^$1yw6i0oT`-)T?Y^JhjmBLzk% zlQ4e)5PA@|?&QL-9h<f4My*!s@9u6G`xz78{d zC_EGhy)o%sZ%Y{*%)?PRtwMm(9>eSm+~AYQr5%51?BWgXz_{*u_n0&{@jqFzSm*(`B@v)Nx)iq>w)=NFtbd>5H=^pMA?5J zKy)O3NtEUCS<2SPUyRARw+X~&s{}^Zk4)+k-A@e2P)*#zm*RP#wnjDAW9clKm%)1j z>w?9v1FkJtWSePO+Esy!@~RITMa?GJp(bsdHfh*d5RnJUdlVy?J{uh9>&jTjK18bD zEQax|d?k+YM2+Hcjt(Egac}~CPD*f|>F?eP%wb zUGj}E`bBU#zty%D_v0o`*}7-QLdQg&3Wx_Oye&z`lo{0-s zMV1PM!)C&rqDyGFhzNKqu1l=CuQNc;@&l|2OfG~+(Eu`!(l%Qd`%!;D@ZnU4(lA0G zD0(KXN(5e(Zg2I9VQ8lHIP4)k6+k`7(+nie2gB5OI+y(THnHfq1vLkyVu6s*Z(la! zPHS3Piejq?uav$U_Ql?gcy7 z-g5`8nB6T?9t;t2M#+CSzVmI5?^fH6KQuNrDm)cKPI2Q8EwvEttdlg-?ERY7XPrJU z*97o7aa$I#d1Z{?DSiS8!WW?50lt~&KAp|)( zVwM01(6FZX`9lJhdp*+?Iclldq%;s-4F;*Pn;T-&v?J zf~pJH3T`lENW?_9*yS1lV+sPo3R#25eJxpB@) z0gG502qy;S?!a?~ZIzU&gR&J^xXisSlU&hMKKWVqERGChO`mdr8XM5nf*%}cIx&U? z*$+>ng?E2iO^4o#hLylpO(v8Yu80ljY(bcriP*c0>L4T|t?l7CK^M2D{hA?Z$X@avicp$wToAVgAgdOczAVjYFm)MCir`K8iNWS5hwH zA}bS@-)!74pIyl#ixc7z5*hA5B`dFc6|Q(DifMl`$Kjq%V|$~ArY6d}akSOp%@thY z36>-}-n_tbYQ!w}Jx~mof%B#VeB?&ZmgVowG08S|dN{3Z3JxYjpGwUJ!BM~Ggaf74 zTY#s-=B%ZeaIex|w7r-+=RKf)D^Gh2Pd9>t+=vgfFn5ex@jm{Pu(WSBi?y^c)&`vT z8y$bkmCJfY?R=O7&v88IFOkt7FEdPvu?4s0Go;!c(BKbj8Ey+-sy@NBF`za9C%lS_ zD%uh!pvE))5TR^jXP(FkP;#>X)Ek3C@&q#PJme3V$4Ya#rr2QHPd-I`D!v7V&_b%P z6*rB#*w%CQDJrB@$W?OOk@}E@NZH9yv8aC!Tq`O-fDsibH+piMTHN3fh9qYgWu#*+bK^)kwS(Q*&V!Om~-@L;^p} zyxY>5TGesW@crpw_gXJu1-hnc>)2JTjcYgP$W+aIQ;EuavX~n$8YAovR#OK3r~41U zb{}thm$+i#E+O6!z{#(oT5Knutgr)5LM%1FjNpUW_iO;W_;QdI0h>FJ-zI-Gz%(c@ z)9n>7JU5{+new&`<$3jvdot33TgJ;vCxiIRQB#r!D!qboB*k~0FOA-&+g1#v%>a8z z_K!TvemQKN8JEnA{)FAB5Av79g31kS2%{MXov+YY*nX-;^zKFx>;20FO7A3wA31l- zb2bNB1Rg(k`df*tXI1)x7s`Ki&Fti}HFvOSUOjEde6;1$J2SER-5I#5o^sL*hvjTg zKvy;rev}-rr({+sm1YN%I(c!~3 zrI5ZuL=KTeS8~Zm`E!6`ru3N5_CZm2^loMuZUO9{;oGJpV3k^(h&g`?;o3Y>n1G%Z zzMMcdA)`wfP-9Bx=pIOMSOm2U)A>SlT6HTC1S)Y{n1OyqJUBT@OjY*t=IrX%CN8W* zqBw$!*lpU5NvDAVeS4Xwh+YV_$jn4=9CgKL!mlZJb00-L)hrDfgBiw;RMez>Hs+BO z%LaQ|rk@KE3ZOOfHsybWqjzXdo5@kC$>S&sM=<6NdN}PTN!x4mR}H|B<&H*I;r4>_ zE_I?d1#My{gNwF-jbT-;0D->JO7PD(LpW#!3qjDA)Ey4%wWD=u!ug>WuE`h!V7A`! zpCOAfJAvO5Z8}hLzuvms${k&7q3#{P{Qe6k;?)Q4~s(dNrkmD-Z0 zss8RH1%StD&C+9!m`;UR+!VA2?@9`A3*yW{M*|7977%|AJG>*icPllod72_!v)mRk znZ=aR2q;`S*AF@w8E+k~wW59ed0T z`gXDIO$OjFW3oKqbU3duWa;=emfVIubVT2PxaWT#mP^az?OkNoDhV(m3%oAAiH}fNJCZUqLX8CT`!dDg6Za3pGmVc-M10X2mX_rX1 z_47GJ%;JfC0Qe5k*CLL#M?t&e6x6c4xkG{5mjaYA-ZvtGDc?c>>1;mUtw;KY#t^QkO082h!G=3{=IWbjV0V(a z!?Oobr|*wh^iwYq7vA!3eQdnDJC9}@uiuHA{nCApMeY@)u|U{3AjYT-?MQlVIBY`hg9~?8ly~ri1v(86&=_od>=t z1~2liIEs}QCzx?)PkGcccp-86DlR_SuL~_rcAZCH*z&Qh4yAh|B(S|v3UsWi;$col zuF&^E`i;0WhR5?7P_Rt&Wo?zzdr*IjDlPonOyg%HEedSYh5;nwZ6#1?AfpadNTRkR z5>igj7yd5|9*i1238pJDMtvY1_daNGbob#ufiB8D?fstoJ+6PC$9k503 zq0&syfPtzWYHNeNFsPn7M{sD^1Km);t>)=_CZR zq_dlfAzxPuqnV!*VgWx81=oL?Li*zb6WRw_7;O{8u8K{%pC1qmV>h9(W?${m*K%vG zg<4ZjEhhp9bR4avPX_8 zW)V@IhzA1f(nJ+j0Bb*=!j!l!$3h}4b}oMeCFc4O2*-mMb6}k8<{jiCRME*F;iy67 z^>c0N5<8xwAFHwnoR@!2YkkQR+L`7^2jp600Um1tU9?oUqt~~~SsMj}z4D#P(SULk zYn;NdIXG<(3t-CP`<=-e3SB!Z&1ZT%Sr0xo#(ZQ%(PP05Ln&4VS?-cJP;|$U$n>9ppB;aKbSprf#rCVcq_Qua z%s|W{eN^T#N5aH~0>GcPp>&LO5`z=)9b#=l*3OJR*y$1~7_;M8;p3#Fl|=Xq{mG1mUJKalQC`}uhStpSycWb2e?G{davaHncB*@Q zTZ4M-7ajoG^}v6Y4lg>4GK{-qiJ>;FT}l#PY^?3z1dyEs|EV-k^q1?RoqvXB)&@OG z2ImUFv;umKh}oo{`v>*u9JiT@$<23UW>KzN@EeWEjmS28qrnaubG@V^1G8<^X_Jp)kOglInM* zf*5CuCzRQ=|2EiEFc~UNO3R^NI`g?bjl?FrzX{U#e#81K1x-RShGX2eUsj2X$^JZb zy4`GFAFj{c@T z6+@|1Dlj+f02g12T(0WX;t(4-8LnR5IB`*?y@P-EoP`xgxD#i&>F}val4d*+$Dw7# z{`QFYJaozqk+dp=p9aN27o~?E;h^+lSJCuvTR=3gdTe$SR|K zA-#VW6e}Nzh%bN%#diU~pyGZ-*R{IAR2<922k7q)yf8lO$YTZv(!eMIWK+7Xm4!4q zH=mokB~^*XRh$>|9oL&=P@N{hrT6PoP1zp$Y_qpvB*-=DM#kr*=5ZqiBu}X*lKs&RrKZd!i_vRn1K1k{CC%zSkQJO*o?w;woRM^ZI zR$lX1Dv5S$nJp_tV+!9*t~yK&D$5~yTH)3{0@nTom>o)6q~okmRnOOVu-dG z6*%|TK_+#>P0NyB)44+=M2XY*V^9(PKkftJ zHhU%%PJnlR!XOC?)H1#mL#A3Wd5PnM#?I3hbMUV|L5oX~)q*EQqOVvIk=p z<~K~FU%$`vc_`ALbz{F@0WQ^57Yh0N z#ggSI#Ky~)9~OXb35F7W3G*r(R|_{k;XS%F8BCx*Z?zWX)Ci)Ja+2Z-ux&sx*FY`z zj$x)|LBNim0EkU)x^@-fttJhAch1ukD4Y%i0^<8HAeY??j^*^C;)T;<7e$~fy5?lk zI4#S(C@_pTLC(?hoDhF44dODxjyTS*mu7fPij>zS(ia5V6Tw?rD&_~G2ttDwXdgSt z#S#D79}t2&R+^w*m!Cs)0YEmac8`|zgtc&Pi=QM(5g%%jDbX#k%6lkSWwnf;_BXqN zP8V9AMB7AM2YHGAA|(+SHA1065xC@vpN6VG5e=3I|7lh3fzp4dS!Am#GT&EN7x2M7 z7^MYn8V8gOx~Ul5wKF4a;C`X{lRUdK%6EPi)Tir*!K9}~i zappU|Q+P+T1ug17g-=)JlOPV`vT{(`b6yZ9LW=l9ZfUyGt=kHN(^a4E)qVvK(q5Uq z-vO2j^sRY^JJ)|PoT1N3ncpj*%RirCFxu)ML5M1^U{$EP#=9$w4qS504ZSrqK@P1) z1yHXyn?^m#yP|w|d5IHlL8_*?8th6-(!bhUihel>GkgWC_^YYG-Qr0lIPepnV)Y6u zy`slKW7K+7Re6?d^Nj4CoRJ9SZfbh-o)P&6hm6ax#bRiml3{WIGQE1nRxUtHlW-&$8!EZTH{&>RdUU`$5Gg&|ES&}ErM*^^o zgA!qEdh=C9)3=IO*O;PP3KEm0LaZ>|Zzv;qv2v4krn~?!Cc*U!YIvK!7sGOFr#Hp3 zXor7lNHw_kF2&(}Hd%f?LFJc*cZH(W^a;EJ%B!NN)VrfKa}~0$tn*t)10eU^DEL^^ z*>>5|^x%t}ilofmXK&EC4MVMr!XeDL!#p<))Wq#UHsqS_0Rd|*B}`G~2rrMKP?=Hf zo-H;MZ`f7efF&rUH}6uR!>r}OKY~lH$)A5gGQS)^fO3_}NJrYJVR!S?+;vB{V)KzT zJc*s~iFENf`McS{fR(OsYc{1D6*<7UnKHbpd4>6 zC{NyAc$4DaMYBJF3Jq@$j7Ey9ZK>0HSoj~cJ4VunqT02PFMX?QFjD&}1uw2%`gS}c z<(&8z0>}JPn&urk`9#wB(vs;qabth&AhZfyT*|lbvD$s7c8+!GviDthAv|QGJqVw#A0Z#_K`R#4A!kD)0E-uM^NncCyjilR6uStKaOj5kC zhq8|wl%E3ozpQca6FJ6`Ur#Fyp`RLWL0;#{1ic4%ah5!ajeu$}mJ<_p2SaCYZX8&Xj*^Y5#2c~+E@`QEQ+4&5Kkr6-4YbZI}zG;lJq12+p&@gArslf zmu4*M+bxprBG=o)m(U?U+^d(_q_EiMC#z2JxDX=;LRq#5AU9MMK4d08L7lsxqX<~@ zb|SAtLECcDp_EOhaO(M=EoPQsxVbBvHohr}b{Lxhn*S@+36g(5kZoL@IkEpl8UHJY z&SJ>$I%?>Nj?0N8@O9)#o_sT+B~}S1STakk5MAJaI0Ql+m(PPHH>e=ifI*gW3bF<@ zGmbnnwrHSxBx!>*mMbr&njPP+f*cMh1#?JSkWdHcuhlXyW`N)4#6=O3HGzFk`38t_!G>-?CH9NLft-i7=NUzf;kaj$mNSCL45*H*Q*B z=cg%*c7A^hUf`MAVOZX|4e|~WbbH1|(HPS#0=6k6)(O*ejzO_`T;HOSx*Kl8_GQB{ z;UA)B?C5p3!6*#3WkM7%jNE6bK=bjQvXV>ym4C41W}8PbVJ?B^4{HbNaUy3yAa&9} zAWlv+f_m9)F?;t$R|Z389?0@ac^tWs@%QZZg zF-Vso&Xu|x9<*rK72g%vH~@`et%iSugG*Scl&W@oCk-q2Qg_G7)C0{AagNfeIgj$ za-V;}%5@v#(e992Q)fx-aQ9Sax#>Z@2kilwEHUrO+pIh^w^kYsQyHW&q%nmBAw<3-ro> z^u^tWTU)d(b#|J`OG2TPO4((PPY`IO=gsF6;$|~K6$u^}kid9VYKj5kuiMW5q?muy z<)63um?NQrVCntX5)OMb=u0%w70>P@F&^thU%N|0i`b%jN#d^q^GPB&J^h)ZaQxhtJpJI?8$&X-~0r3!$EG_ceLUSt&_61GL{vgTJi zxu+A7x-Dg`MT z*lZ!zD}@KSR!7N2%^(rNnOJtFd$X6JQwSOszeIHB4@aK9Nq?RSRf{F>qG3e~SI?Kx zQwSn7HV2SZxk)OcnzAoj#7XV1HPn16S^%DJN_^Z5V=OJLsat|(hHNd1M*cTx@7AWO z4s?zFml$*oB7gh)_QiZ=pKM7|sTwtERP~zdcH`3x8$oeIaYoG3(!zcZ+u%&8WlY0s zG1v{>g0(fGWMJo!{Z^OmbGMn@9^1q8xL#SE^R)PMQ!Cuv#xo~Nde+67(7^_B+v$1L z`S3Zg-^G=mlij)Kgocb}{`+FW-W=;$ELCUJ-=r5QRy*r`a_|IOmAt*nFm+o`+B&ehyG$anpg=ttFmi^!v!6Nv&*XMoK?9}a$gVE__Fk+ zT_4@ulheuK^m$)Ld704X^!=goxIcR*!_HhH?|<$F8-BTlT?==O(#ywm{62}^te|?E z+2nl}#22f#_s+5kkH)axxr6@BU1q!a<1NY7@3ZZ5c(x9kdGcxQdXpjR-fnTeoH_h` z<}S>Uu*Z0Gv4_$0a(Ry2`E_8Xbut~k&!4`Lji2Xhw8-W~VtJ;?9$Eh}{FIl|li}`= z41WckzMsQ0xteP>6mxU6qh69dqW4w*-AQJ+3e1~5iaor6$ap(VF5|w;SAE;ro%;_k z$^N=7H~ae@_kfNDUV1&tkHj>bsVmdt=|l%mR~J6s^(xHH_qrS9mc5COroCA@OAq_O z?k>&){~XuMc_YTrdq3Ox!6|7z4A!eyOn>qso5p1x7&6GZ3pP9Lw(nHji}KBUo1Q*8 zwHQD11u4Do0C~DOPcJoF?bzkM*kvI;kMqIm%?*=kwb)I>BII!|d-l7KfBM{b1krYy z`tApcF5~`Z{xRN`*=pxa2ir}a?>gHkJnqun-I#Rbk}r~L*IXMHEPLmqLXY*}>3<9$ z?JQ&-*Xv$b46g6{^8Ow#-go6kv8d+8yhz2PKNtDH#2;(EU0U2&%gK3hOb;t{%-ATs zJg0BH(Au+*7-e6MhR9to*E&z{yT|(}P|;+2S&==zO}ttE>RNM;zB_R~TVJM6R?fH6 zTY1ez*(be4H+Mh$?LAn|Kk|Wh+%%zhA5I{G$@nS$FsTKDl{W${4}7w|5!ziF=p;9(es;vUnu7r9G%N z{w*h$Ior&-Z--lD_h9PBk(j1Sf+amf5yh4nG0UA@qT(4!$k&|aE;P zF0NVL*+lX2a?C%3?es3^^4$>DcD-_pZI&%eX)PCc9!GFoUGC}p)5FMv@#>;r^NBnC zYq(m(w_~WCF7rBn zBxmY8p4ZyHlk@WA--FA1sDDO>yf&u0Vd}i?-_E7CfH~V|Q+!$WSF7^#8hnmA z-RVFKI_oj9;`Mdl9M|*b{{3$AkJ00OT4xw1*nb@Om(g%?4vpAj2Xh@Nb{9BR* z#WRFGc3E6pPCY0WeSWEA+`Zk=n;79YF{%fT&+f-&RAn%N&p8)*PR`D|WU)o$3a43Y zt>kJ-X_+R7$Dvnau-~X1hoBMyiQHgv(R)~*R;gh@Bqz?wT7Ri{vPPws`PK3okE*OF zPQxxrXSrFJi}!_HY%pWP?&0y_?&F151glecdb@<1`gF}p@$r5pqbD*KaW$S*A3Z!< zKJlPj_@7s+mpbEXndIm>@0~YRzBydi^nQ0cpW>N?vpHM4r|42GYB`JEW-G5-~&%<5k!#QWS-LB)UTML<3{F3&lD9XIIffKRnLz;X5PUfoUzD*J&DkcEn@)HtJJvF@KsB?%;I2;@Rx5W}hGH^=jt$ z-f=#jmhLGXF3aS!}-G3lfI zxnmAPV#sbMyT{A(a&S5I#iqYSbbO2R^Sxi^$*jt3)Wd1qZcPI4sM-1n43*I@9%FPL)$)o=4G(k?e6P0v-kG? z7WW2&$3qszsMG1VpPP@5kKuZ|-#;FQ&o`&ry?@@sHM;h!&*imeJ;c>KP3CvweLMXO zKBL`Z_Ao4hih9F?g75e#bc%YDtjp=1FFskowu|}aGwstW^_Fb6q1Vu}CpjBAl+_o? z-dB|M-ze*tbmCvK{>S+A+yvYA(!VaS+IL|sb7E9Q-7(Z%~oz04|o?124 zKOZJ*Yo4wweipYJfNmOU$2&Dsu9~S<`u&>`U)V1;OMghOG|>a5{7V0*ecf2&LmZ@? zX+Z(#vU&cF)l9W&rdRA_viud0E~0XQH9z$^NsLGq*lua75DbIIo2U0K710!Vak87#LC zh=L=4-FzcN5-`uo&`c8mM?kp0j8tr~EIvC~a`Hkk@D7}K2evfRWMM*LAr&8-IzBs3 zG4LL_2~wG$-ljxyf>dmRK@~D%N>zkG#sPl{)Tb$81Tw-mnONNew~?R-iSSbUECMwd z_a(!~XVx6z5CKCN@lRL9VZ&v$PjR>SHBx+=Ri2r~7 z9$ZHj2tnjC8G>LINRB6&fkCV(l9a-b52ipdl8it$yn&=vvmP?&&ZM{0>sbSO>h&a6 zkpbxSBw71xasTyvueIAAeS&{5Ixx*prQ+#5_bE+fh7mIn9nY9wT8o~F%q3F# z<4zVGK^1G7<{7FuMbXh=xG+g20t+*ns)&X?Fs<OC%!LQT_3FAJ8ijqv-X!_3Ic5 z3X>>W2Q)_zY@Gmw*h;CrP4f;>BMhS4*`gp2tr#deu~Wl^p@os3tf|=cEJ}Z;L^*Y? zlCXK5pCTeh7*w&f!9ad45h(83zVcw{hN$BDb$u%w&c0p;`pm?XgETFDulT;pU?=g1 zL>8C~<_g?fZyQEbDgs>F;pw%9djFqZsLv*vo7yhZMDKNLcUh7-89$qT+}Y(qO7x0t5TON6xqkjE?KWTp`nzJx{p+B$bGwykKe-3)1r=8_HqPWTGk?9i^fXEpJ zZV^0GAY%*}OzkQ)PbsXMy;jU#7+iqQ5GZNl!-^817)0VxpHC5(ODBsFgYh#w@t&;= z7iaMq19coxnka-CLvh4{qLU~ridJMbF}9%c9R?bhP>2bX6j=#IGJk(iVCWnUJsQA{ zs*lbk6@y8ohxJ7;)B8G#@rMLgJ1-6h0UZG`fCoA6dO^H3)rf(JYFE*S)lA!et@lRn zo0-N>en|RWD9%pCGbH0Vtyz0#B>cV$fK+_^daYT#?v38}l%Ia~mJPf^WWaj;yj=`T zVbL2(F*ikSvy9o;kLG^}kAh|nP&%>*V!kB`+TAe|D>FF@s--%mpMhru72NKO3z?Wm zY%zfdJN)e45uiHdlBEWv=6FXz>{v#|d7ArJ~xKhWx;l!cWU{tpGu& z4093;Z`jNhRK^~bk%h4}T`7gIiXtSApGa~6vN(g3S1^(>tu8i~qXiO$1+pi?`MkzL zlNd?gs{nzDQ|6DPjfV`n!>M?x#k3kekYH0#fWQQFmn46C#V@N=ia^Ld=`((6Ut#Kf z{_?{w|L$Rac6yW`Fp&_PI(1ASReMk@plUQj(tZ4QjG+*AM>Z^m`d)1S)=wfC;#p~$ zkhHP;pyPV-6+6G~!knUH0gIs>h*QF(WbC3G5Hx2hnJlZA?ZxAcAhFZwu*wg%k}^S9 zaFd*uWVU}GooZD~_508EBMav0=luoM?;kenZG57(!x8>vPd>#u*7*c#alk8;VUXDw z@`Ei(00fLdW0R*{+jFecY(F0*q*KuChjf4qHfoK(HM9`CsMxF!oNc>UaNb0dHM_Ztltqg2S!}(u+*2c*2G?E1YfWcvcSdohOQAEyixw z)>oV&@Q{Zn0omUDTndr`Cm^T^hKZ1*@O{4;JOZKqy;wBAcgeEGfApa}x7TxnjZS}Y zU@D4Xo?x67fhDNfy_S^BiZvB6L2?=*r14Wk=&Y&`Vokx??RKxQFL1431tbDIi7u8> z0EZ@?>_B6eH2OXd=P8=|ps}P`$1Q$r{r|j)F-;86I8(GSTT(VV%APF>gfJguu^ccZ zkcH`TVT*{>d4!d~AlTGId}SdXXtRwmxT3RSnTBBVoJO?bk(kHKN82MiM$JBk`uw*!egYiyK3b~r6Gs!0)X{qx#smJ04EBZ3 zNE6#$zEj@|>hmf_egDz9BXCgVZ6H-0GiBIR(IS4b85Y8xK#b?8{IpeRlQe%0s5sOk zSq5fC^?jjS3=#Ob)x9k{J-@k^Iz>eva>9WW`W`%}q#6Sm4bJ(A7j2%_@SI zZic-6#*klM&v+Bdy|7&a%z58cg(dvl#4=i75E7(dv*A!pa&yYj5xLTTq^QCu9a-Nw z-KI09N+RS`)J6&o!jyuzEvSF59NnOZ)Z2)nOD{0Q*nY|!S%QLKCYABdR&9=l(rC*d z%LCj@zD9sQy6@5+K&fD=?_pKU^gV=7oLJ4;+^xAUzjB^rp8+@hDmesGd>T{p#`|doWbA#^`v&=xO zK}I3MNr4bI;XLmlpPPRmCu^)?7XjxKaaSm5A${jEG4BH%h43~}84tZfVo|BjI|`7t z75#q}osh6jSS1)lM6gKIr5uIyeJ9TjtU>5=TrWjEQVPUp58HH%!A3%S`3zQ!BUidkVbADzh zwy=nW$cFDsCKDD$d+YNx8BBZG19ZaiIre3Qb2?~n#rst(0f@liQI3sm*pG9wx znU3D5NE*L5d1$|w;HQ8SkX-x4%7vowizr}s_{s&-q&BO_Le2T<{3MzG^RC9^wXU!H z?uDO|6Zl)Ye6oKCWh6%^8e1x%NaTC=JW`~REXa+!gUT6BWg6CrX$m#5(t?pgAxSDI zO5?%WPG^30{-A(NuNdjnvGL7C{PsbbSEzr4#l|srQHfB0wW88;?OfPQ zb3a1!K%8>8WNsO2e}O@rx)?=ATtQ03oplx>0@0kCHPgOG=y)JPV zPieG6q<4RgYh!Yact8X*LGscH!u)MfSfJ?L6Le!`$%dq2Y6Zg{mMe?<5+fy!)-!jt zJz^MbK(6FU(0OUfGnN`pP(=OVJ-AP@eJwFaItyrdu@y5>7Tu>=A<1FZnFqsefTGaM z@_B&D4Fkjd!(IVy%|1+`;Y8&~+beDCkuQE>-LrplNWSl5lM5b_0sGDcUzq(6_dFSW ztzGAS%b#3)^_*EtW-9;Tn?1^56d)NLu*Ntd$tU_xl|xKGh>Kqv2X+uH28ytxg!;)F zfy5&LpA(|juJAG3ce*4V4Ufsw7Nv(&im~E0!^Y0fq&-c+4+513Dqj-iFL6a5_T0Id zRZf3UdK;Qj^(UGhmZ+%*45jS?7}_q_%@?+IKmgEYIwsLWbD-BW=+mE z%RmL1<3mVzO@4;{1-n5w!U$H#^MQaAq+5DBYqMY_9aExHqsUb!Pro?+n z)bK}&-6w(}B%Am(26IR(X)QHwwwnFpo|Z)F{cG~djHb3^I!7>DJ4yO1pzZzd4O3f2 zC={lLRWV_+Ue0{xAz^wDDbuO}`JhHvFbg9(v0-o`$e5XufAM#X|DEYmOmeIm|66}( zd#+uiW-Ts#;e~cze&WavHi}v-AOGN>XI%f}rP|K54@>m@M?4j26pTC!5rqsk!Q4A< z4!%#N-jm_k1?^LldEh&@|KhzTwOD#Sqi~c2!|?3)JnoV=2iH-CCB-O{wRQ9Y+I{yx zYMiu+P|7Jt%4l7FqLjDm-gkR&8DW0~@Bqs7x<_0XwqCd6$Mc`uDDF`lwZ82K|9$y1 z{q?2B6Rc+b%byM42d_sj-m&4roI1uQNSNPeTld2nLKH8#yj|Q%a-!(!ag!%Lq!P&@ zWKu4Mi{5(>8q>8bEDOmcQWW7J@n{A$mzPg0go!B&(Z^cB9OfX#m_6Cs=1_m|`*bU( zbc7&P93CHa8W(FM%r2xN_TVzbc^qJn!FIrwFMGV|J5*{sg#~bJk0D&KqS_wOSKWes zt-p;;XQ!sE`tld2rEwK)@z(ieCpLdF#8favC~;vN1@II-K@MPPz)_? z?8|gDVgLk8ofo_N7k_lrP6B@>wUZhfBtgfqfs-YERLx>yypaZ8`6 zGGU8BBp9YF8Ot`0Qq?aJD}p}qx|ND-T;3E#dYed1svib zGTtU2#1hVKebZgEI@6C$`wsZyRCQpkr0HU>dY@#$n?$Ck{Ywb*!v+y!CBZ9d!W zL6K;?zBhSti!aO{J8pS&qSzPK*S2f5w%c-%-cxfh@xl}hrb3sGpbyb-F7oneNjf3G zO;C};@i2B5y?uNdjzoWL5F`Z#DJCGO13nvuaPO7E45=)AEE0Pb0R<(mSZ&`&b~$H) zX^f~bCxw1Yktp_5X+N&dX_Dx3G6Ay2kNXL0>N?NlTIdgG|>BYVPw_Ug?G6 zw5^1VQemMgNgUrDn?QF5HWcUchy}*Vj?!vKwX*zY@3GZ=AD@5Mat6Ygg0MR#P|F;O zVsiG{{7OF`I3av;?LV!ul0xHhCu9v*zz;gs{n(82?|3~!8Popd#4_I6RO@4Y`1C45 zrdS$O$_dhZxUTJ`%p8n|A%-{tWtD9n!YnGONg%hTM$fhU!cJNAgXiM~-ARs6$}EO- z?x}fzcn)XPtXF^IcH19njEF!`R4DB?qm>0wfP~xAcBSjGa1gjuI-g!Y?JCiBg`Aqt zzjLSGdC331b_}zjxn2Hx&gHz<`1?>isgjln9GxL*v24l^5(>{?`e^beKacfuL*e3= zf45BGjHuMHR)62vX&u)Lu;wK08e{&ey1f>Fdgth13kD9z;rSpJS4$*!*=?sOov&i5TXK*RMY{%N$ z)o2e*{?L5h)qmsUE8i`yP8JtGHrK2}b4H}E*Fp6<)bL-{0YCVm7g;7SgUfGQFg?OU z4DROpydQn`V+RfIbMJNvbX7W@(U$)d4e#@H<_mx7!#5Y%)Zhh0c=%rL>%;HE< z@pdl|Uif~T8&lAK8$y+wQNDAVoFc+Ec&+_$=#0hI3Ik~&A^TIsC-jgkO^6Uu)I|D^ znSI@-PXpg14B9;Kn=fmhN3jg4dCi zXwHAFbW_gsS-W-7=7TGmg;*l2Ms!AsDM;E6qcX7+oj{ccq!`A6o+kBMviRYe@V;WK z;h-S1vUrpQL4q)FL}ZSTWc|P>HdgnYHyMIJI?>0b#ucd4=ip;~4t1pY;QwKL)Ytp} zo{AaP@1>ULQMJ$iWvf=~4{!=C4<`P4Ho1T4d^|oLAb#!lAjUU|sNQcT_^XbR{Mdu` zy`-s&_Pu|z#TN7UR3sL>xWR86lqF}DM&hq~MSMefR3y%d=GgCz1V-S6QT*fc=&;4P zscYGSG74`~l2V(PTMJ8yTKp@|R7rel))w2;C!KM^7NB^j_QRBdw0`b@&XpT}P;-CW z7B&u@Yow6=86@>|)cZSElWzC%`%Zq@<51hTPkB>5S>6$3}ovvf@<|RAKjyzB${_-3QhELavb@n(c^H@;G-J+_Rha*gmuf4 zcsSO1qRr%&E&aiVwEJGiy_bL9sOI{t4s3@|vNT()*MUG`3-n|T71^+zK;=-OA{#5o zk^Wm%b;4d}Q##AHbb^K^p>(XZFjJxZQobcYY-5q@ZcYwWY?&x}otNFo+}pCOe?X{ z@%J0kWnvdR`5b(O&GUtA2HYZn(pX-w)C2_Fnflz({?~2%ufQd=_}YS3oj;@U58feb z@XDdZE?;+?`%PW#hcoVZ&z0uK3J8){tdSYik|IpXPLeWR`)aTZSJFhH$yd_uBR(VfjRggso-0HOwea`q9i%{7 zQ>bE_R&#IHk#29Iwi|7SsML98<5!t#@HG0;ll<^K$)k14B*7%7w^-FCp8prDUfIMnO2q`ONd!-xK^1XCkOwNA zxFGcFK4$uLLOgvHFBd+TI8CS{-9Api?%u=BqZ(@bW+6FbN>Yz2fr44x!Idx@TU0UL z3cX1bAz!fj>|?=X5zZ8qM`g6H6(d$XJ`4`4C(M6om-za%&HiacHNG|b7wP@G5omM} zVH3nvpxC~3LF7KK4Ie{7ps)}~hHrnO!MY)F^HE!eiayz8Xc3iTy=JOni)@0A z{n?0+)U}<&LCf3d@o7*{iD=`B=CqwcuglYMrB2+Y?k&Wjt*`ymVt#2+Y*Y)4C7yrP z_Z!apWgi`o)~5R~<|Hj@wqvEBqj!}vt@ty z^Qv$Fe8ti>Zw4`9P;rUm%-9VI3L*+o+He*c*CL`MF)_E$Y*W8QiTVlwWy>)`OX?=4LsnWW=qsUQY9G3@NS7p;HyN1xnl z^iu+t7)O{Wi>Vv(}I)+)JV=#J7%3IzY zx3vVOzG{V{1K;cWvDJxWJx?_6{iPckaFz6DqSbqR>75~>8>c6_SaBD0-BK|V@^SV^ zIchZyYGtCtvdrjnHiyxKdKZ79{nug?LMAK{M#u|>=b0=qHudj_aKqsSYYVS-i&tL8 zOvHSKtBnZC50b`KfyCCVDaK*R@BsqQJTKXF$C6DNK3?gfzwSpcyoTphQRe($TK`Y1 zkhC$&7srS1rFjY3*_ zV#q|?L^8fQQfoVn4OM@mr5niIEN4@UwceY#(32Disrw?@=iit5{$xO?&O6Sm;!XY9 z5%FJok>raD`HPcl^onPGNP6EIDy`8+F#iUza&#U&;i7jJrIC52h7sFi=yj$V)b==Na;s4pP} zT~`*$6_|6SwIzCMdiUu!W#zUx)=6T8V8#aGwzS>pB_8_7uT zzYRVTi*uhq#7!p~ZY@-}<$a$MMVF5(*8MBF5W~n5f`BYYq_~Od`P(gIH(JFb)cTXH z_NjY~KS`s{RJecfCtUllXL|inWe@9$kOX3@fB{^zY1-)ArNV{b!_!N7n||Hho;|&pOg8Cv?7Q z8*M!J$se0~+IP+~ugN&`^W=rS#eE_~TVECN(h7fa18EZG3 z;#GhB&Cfx$Ijbljp*W$~?EI?LbicX#BG++H_tSv^?f_5$6Wvc2 zFK+DJ^rPRHN^?3AS@Dlg{r-5~chmBWRrD3hD-4kdI#2eeW)KdiR8j(;ui8vFG{5UK z`s2rQWcq?l%l)7|KdonGmhX#HJ|ZX0z(#+{)4yBWUsv1TsIk99+g}P_{_B@5na||c zdYWFj==&b4Bx4zW*=LhS{o%uA&G|$*LJbG-A*AxqGan#XkaIqcF)-bivcff86Y0QA z#!J+mW6sjwH7a12_ws!vrM3f;h=iV$`MHCVpp=>*N;K~Y)0FXO`onKFyoRS!C7yrl zcp0?)AOFIO|9WOm{)bKds+Tn}?RPzl==b6e*hb(tJOs6V@es>Dwe#Pa8f`m2y71q= z|5uDaf9jPrj`}{ED!kqGABCItQ|9a1Z&Pb%aFga$9QO%{w)g)JUpLLHC|j<^!cktb zIoSewdtZOw_tpCz`J>EAho)Q(zt?|%zKl$}W@}q6(EdAyMCjzhq}3wk?K$WSZ?8~K zy9mvXN#pArer)Yq&z$_9`M+oM^Zcot@`ydWG}VPa!Pl5z&X#aI$zGN>w?)jf_qs6>G8r^o^p=P$}CqMC5 zn#2#;uecE{CfG1d5j#^uQEGqWXZIqLSrA=Ns&eiZOGlPvd}2R$&n#gn>J$jVjGcfEWP3>$mFWJM?0@$}vkk%>olM;d=K+h3NwilT|i zxz%E^06jj!?oE8v%mudmGpbd{fp!Vo+D@H+8i)zf@>#k?AFaTiYn>MQh6-VL~;{z39uzD0qtm}2QuUxw}Oc2`> zl9Wt?+}te6Km8f{=#GD-#bH~ty7jv7+CHEczpwA%;dkDgyn3t~-e2?BU$wSBw6wo^ zc^Cbho}ed89`TbqPd-GT`DO*)bWKNp?mIMc~rSJ#b{xvf)eo-d4tgc1vx{)KZDdcb~Ar9H0pL^v=xr`!td1! zf9amKe%rg8{22@X#_M{Gtw#Md6JUtHYpI8*C(ev3#9316G->X)KvFcHTUABUmpB}<8QG{7Y=`N{9kbrR&s3$%DfZr z`ciXWI%W=UFRq_rT+Go^2+ggbl(qNiJAYJxB}UE+*mY6`w{!ZfML2${oP!*`xhd((!zoqgu9g7AD14KjV)d(EomEEBJp;-eLdlC#fTB z7pGu#&sr0y<6JZ&mU*3u!ke6rJCsIdtLuzW22qHCa;y8%XZO;vs*-i@3LWBtnj%8q z>)%mSOPxjQe%P<|H@io4EeP0gVOU?6BPa32-Z>i6l(U1L*a3h%@7;lltP2JN$Fi(13 z!idm3E3>JeR&kJ6AXM*KH}-+r_%YJ+m*0Q2j^Al@ZcfYN|Kg|q&=a!xFJ1aC4lC({ zs1L+XEb5m;>*A40IFfc=N@HW9!u)(6nwtM3MuB#~ESwsN){*OTv;4@0$Hy833$09O zZkjoMumo`#UQwfBm8Doh1q2$a);))yGE|`gX1Vg?sap5%Mt0mgN zk)vAad^!SG-iMl{o|?5#ah4iUCSX!-~8&4_!CFBnD5L@exM5kbV3QeP+f=7 zHO*2@h!Sd|Vqf}I}BIsf(iU;R+Od#DZb@Xh}$_|qNHqa;L#+Nx6@V`kWOHnaoS z+o?*Kaz;~0r?gfbqV)g9ggs$Osw5`5PUgkf(z*q$Q`C8x=F6Kt%BFwh=Z=5rebfE# zhZs$zNrmoJTp0ucmgqhmVrqNhy2j-pa5aa6kVt|ZFw)o&dvpcRPPZdp?=zzMef~Iy z7w&FqkM(-86cJ$=Cnz1+SdJ`@a0(y7Iy7-o*kI(oS#zp+s9$-k_80&E^4Rt0Ke!J- zdP+A}q-gGK)BipCtDfHan%{qQ{#IApX8%MNV zNgBdDMi2=7zGMvydxr?aBC?Py4i1!9{AV>wO58BZftRAQFF#zg{WZfK7>W zq5D>U>Q?;l>OpGfA^hqK(=}<v@vlHE^2(MrgWe75&r0-N3TBm!|y(N*y?*M$m7&o>YAkPeR?Q2 zLf~obq4qn(%?hinvm<{2nMA!Gou~QIDb&9(e9t4VJNx7Rn%s##ZP%a@O9>+|QI z*+R1!NIh4l(Uj=fBf$pg8auNnM7@Rs)cpxEqDUqV@|i;+wuyhzHG^n|WZTz2Qfv*o z7e9oWe0gIE^{XH7*Zuw40~EKt=Q@W!@Oqp&F(`xbv@9YaGY^f5J2IqQ4HFh{ye9=B zDKBSor2oI+nkynlOAAJN-`9`{R12+hZg4vNr4RYt<6(dM`R1N|&qQi)0VK?Va8wgH z+&lg&)?|F!^cjCb{4w$6!mLz6(nRBRe3R1h@*%-UMXuB~z_eq(3n?8l6;~`|vp*Zj zR**=&KOx|T56!IgO|F+Jo3xWj8iLEdKjxY#Iw+uD4dKL3MG<|<34C$d6Li^^IA8vG(Y@z!) z>;`ufKf3fk@Wy}WuzITLS=fK+)b&iZdKgD_ar&jX!YA~lQEoI=Q{7|LeUQ!w{@{f) z2qFt;_3M*HubN~OzWTAgc`nfPMpm;7WrgMFo)T?i5_(>mOhM|oWvu1ClsQI`d^cq| z)3bq0$pU`@<;BZ=78!40KtOU2v3{k-XIP$}5&e1R4 z(v-Z<|AWiZabqUUPgs&p-hB^X@%CKy&?iP{>7jp-Z}t>LQ}w&Ir2nNofVn`s*}K!d z%r|K~b#qywNvg`}sdh7I?qB-ptE+x`8r-_jF+_XKi`Elun?5B)m4l`5I6_(RCEMnh%M#O@GPUh4_2j9YqO_3;-qX)EY za)N)Q+tYoirr3{jRZfv!r-mY~aZjqqSI#J^%Q* zAo}VH9eC1tnp*_VD)uPoIZ65s2m-!Bmim9)|MTovVz%gsZ?4Vkt&19|$kg>3)8AOE zCQDQ1-tT^??rqQ+q>vNvBH4tioik(Ri1>Jko zFK$s8=Z^TEuXuE1(|-WJb@ZhI`8Upb?VtZgKTb-0=KoVK581CCfmeN}d#As9U*dnC z-j@%vTgE>ghA3L$3M^>29E}^7o#*~zW+{%h12Nqn6>iQ7IF9A7vrQ@QUa)RH zvi9)mRm1d!FOvj|*gV8JrtEkEmFIupXv>kjP|e z`aGh6*CXZh?fZO_O2vX&Sa?Gs=-CwdFXixdA8w6a>RmTmMH1Yb(c?2xrXctq{ZOXg z^oK5f+W~9%&VOnPG*A1Z&(=DHq~SXuh1aahc8-CWCc39q_x9+0Heg<09*kPsR+J(& z32b2Rvt_PlH)uVQ-kXXy8U%lVvDDwEZ~FVV!D^a@5`i1=(9A{*_Cqg(q@3tH@-uqf z-S`B8u1W;+rC|cmxH*fza}`IENB*h8sLE!!?%$(@izXclXo)I>XW{7J2!$c>BB~~x z9vF{_JT~*-ntu7J(0xP=ex9XmALP%BsKol_-2S<5jX(XKHPr0Mw;g{jzVX=Kyz%pg zoAI78u26E&A~jH2{~RYLRh(XR&)2?n&4#NJF~O)n1V+_VssomZj@i=*qIOfu0#;34 z+9-5Tvu2j8QV#@9O)4YBf=*4N=(1~mMUb4GpW0~Q3U*M{=q>OQ-4A!5YGk974Vmdr zY-;SueSiMZW6#b|RXcwxxYhgY4igS$qhC0Yb}m!)%XYqSb31E^e9vmt zy&ymR0U52oYXy4WpZ__p>6;gN%~eDP^B7@Kd^*yYT5|rrYxb|v9ES)?KSIKA#PAJj zKobWA9x3)_CD2KW z>A!g97i+X1ymf!~g=tBeFqvr0R>oBGzs^&9*px7A^21o4w*_szx4Wbk`lFoLPh319Q#taq5QUpmpRIQ&1)FpmDO<`l=8ulj%4 zd#|47d1W#5uLQYD-~_6cWkVi{`i`V(E;`KpUR*Ee_sZv^FJ+{e=M^K z^dKA9_m}<(o|9G{(pR{AAm)U zpOk3+$^(B8F>pR%1zf=Q4!D6P28QE^fJb?I10Uck(C%j-Fzf_=;Lc{ofJa#a_SX>u z;2JFu0jE|!aBWkbK-ge@hIzeGo`7XwzW+VXkubmhcc7k_g4cR-^5uHq2zQ=hasHef6G{{EYXCtG=x4L8;XA^h%Eo^ak%{l<7ffed2_ z@G(n4{1(Qoml42{@FTE*{0R<(f7fAF;L^zIC_do7TUK#^8^HX5!}=;%-mH`z#+5H1 zJdJ;WTuA|79nNq73LXxSDt!QK#ohpjVP1CR{71fQ3TzmCUwHy?=2C~d!uMm9{07SW z^S+<=|DvCLLAd|L|H@y1{4%ip!~w!r&;{TQtPt=U9|3TIC(*R8ykMLqHizwDJ5V3U zANhXU56V~g%7+!;#06|$;ir8Y5;PU)f4hI}0IW2^R&2@&C$c^epB3hx=l}XA4**mW z4WUj^%{I09JyzpAeXR0XXpR@f6k5Tr#eIzm2>3b`Q&VP9y3_L1pv84>D%=mtm^y>nr;Qi{bJ~BWPD*-pzd#ig!2g>+cve%<0SLodj66_zbOrcc z)Mt?RQQvGPyKb7;S4qQZVc2Y-3G3CV00-DMKFmfsgn9;SG6R-ZQ15%r$H7@n&j73I zaHmPU%s`oyLueHb-@vg|iEi*B8vv)XHUTxNSYo-^Q340mcGWTY8K`_HSJi(Ewic!x zThcqZO0D)NdQKm$|f#sBC z2F^SKW188Jk2s@N4rvoRlm8vpovVa{E8X12a!EQj70U;v7^Bg~c44e!fLFubi?FIz z4k2s$s+vXCi8yI<#4b~GKn8zeRnL5#L>{*T2jkty6ywO-G?9&!O!!q_P|f^LIpbT7 z2Z309<=i}lY6fp7k^ro)`ql^Dm2_~M*~2;j9ff@Q zwDE$Gc{C`$<=LwqXO#maYvTp8T|ofizE2WKOlBZVQP{OJV4X+`lgNK#yO~XR1yvJ0 z1ES8cqM`#qjl@pTf$U&xJcGP-CZH&&k|`dnL#*@+Ncu0Cf@7`{U6A8Wsb)|?T*(cq zY<2|GLBsa@PXjI%!gdD1sCeAQ2}ad4oCfKceITkC5Y%3bV-aL=s3s9-CQ8^serv-`mc^H}-!xHZCxx8LFsYR!)O&9CvJ-XL8EHZGbbmI0Ib#Og9i&8;H1# zJ%(il-K_lZX*sw>vVd)%T?AsYITD*B6JTQ;ddn}LnyxD*e$DgW zu?0itK;?Se2D}-{nV*5v#~dp*jae&_Q_b9uE&z6gCoSTc0~vqhE%IQyvT+hVH*f@P z9Js&HVE*$uy-kJnfS-E)um!-Yo>{wsw}A*OGa$eAub|q=!=tP{5DTkP7jj+ss znP*4g+>Gtwz*%M%M$i;1h2u6JHMOtK!1Q5qQ3f0b9lv_0=)n1 z-2Vj+bX@>KXNHk3ctQu;Rpe$?nBYA+P|wsr+xboDok?&&6LK>UqOU$2p%dvaq_0^) z`8mU=Daz2s8E~{lXlEeSf67Ds1rE+_KXoYm&vU5%H{O4xA^!#RLx0tsgWLEH+MNU9 zhWobZEU;A+K|M69*@3;b3EP&R0l3U?tuy%1U(wsZ>;TRU-FRk|)@fLA1|+m8Q~BDE zQpI{6OW+%`+y{cnIm1`3fr4rqh@@Qv)if*fBpu&2iJ-hhZ(e3LpJzu1wFBQI?$|*c zOnL?qT^E1&ANVlOj`IUQj-7SC@Lbmg=?9L^vy*?}a&zn)`-MMrT~NPpZl0l>e{264 zB$bD`v2uWQ24VsG4`{bKx3y4bezQyQ$?Bn4wF*aV;4XZ9jGlo-=ZtD*+2$)#8a!jD zFPu^76JYatu9dJ`W)93i2ew8F0CZsx>A+J^rE-5WU_p8C(59X&r$`)>4JNS_3(rG3 zouw>w@?L zhw_cTaE&{5_V~izx-KX`a46sI3)h>ivUR}5Fv82RRk04@m}6nz#Kwa4?R%l^%^W$x zzSMuURCY^su^7Qn1(h9{WR-DItya4|F%`k znYG2v-}W(@Kk1-DN^YQSz)d&98+Xn-{iK+%zpSbzqaSQ;5r8wMacg-|2hB;Es zIefFYt+Jt<|7kzLZ9p(#Tk-K(Ei>Oi9}2`KsPu-FXBdTUfZ$syp26m zigh4;`|rP}W3X>NejCTQd@MQb3_?DRTqn|j;NU{`&5N*4i#G85JF@qT>O1CWOeyxO zOre41py>vhXydwHbVN{p6aMe|TO5D1KXh0d+dN z8u0KRj-8~#sH6XZo*MchM%Mv=tGCd%K)YkTxYB=X&vS!%bNe&S42ITeK&1$A>(Y=m zv$i;Vvzr6z>Lwm0Hv^-G{mM^UNgEK|SZ@FW$C~i@A22A}poe|dZ@6p|ZLEJ6sz3cw z_q`ADE8Cb7NHwP}#~r!hT=R0P;W5{pJF>$V_(KN;Fhd{j ze)T-vMAVD}cJZRHs%)`aXCJU4^nZfzSG;<0{)(skv-(As^fplhN0j2hqm&y4@DWZ*yA=< zFsp>KHa0Np&pM(v#4wRPy$0XJ<*I!-Kfdt2dmi_~WwCfYtU|G+D_7YSs{T;W$aW*Tuj;~Ju z*BX5DntuZh-wlL7paB8%K5T1mSY`qJ`c2m}pr4lDIQqBu>lth5I?&+w!H44-ftlwD z0JoW!Sp+c6{-lL@u7rPI&lQ;d_1yTuKVaJV!T+X%AN+4R{=xsI^LhSGjn#)UwFrHzEs9hC;fx}lm5Z~Ngt3709=1)$5I9PVuA3L>HA9u zU<=FK!~YN72kMZ`&42g}8Nl%yqtPsYWpfYe z-8RjF0P&9>8--=o&4KMC8*n`QJAMJj#QyL2*e`tc5BwO`f76aXd`I!sZFHK2uOA@4 zGAAEnBxU{Yd>>s!rq~&+q$Cq~U&o*=U*4?a|^W_zOtAUF~ zvn<4~xF7skP$oU&*l*px;VKHO`wv`5v;RA;@U8dwlN^-*){Xoomm1Q|Gr#1*$v^nN zWTI5C?%#16NVoqxuJ)zlciefNtNG`tMr^Kde?lU7y9OjB2pf_EoD%83aK$d0 zy_{`CFxX&5b%;g8JvoOBo=Ey06WQehS z%m4ayAcyy8072lOn6&S_?{9e)&IS*Ez&R+i?GJxA4;QD7Kk@?LzCla?J3z$0f6I$N zP+04ay!5Z&CU}x;`A7X0;2oXl3=4JV!#({wLlmz&K8S zdT#~at3&*M*kMn<2f_sq!n|04`^Ne(pH=J$=}R2;08$5%isc|3 z8DKkj&ttZ+D?DW2R_q7Tm27PPi;ne&-=Fz%Z)0m9I1o6jqFukDX~zU_y@|KuyTKlDrito7AP=SR=}-FgM@ zcLf8~?*Ym?-~Ljs59v!P7KQx8UwseY^EcEt1#ZrlI%uc;uljo+eam4m|K(FwVE)@K ziSuLEp2HfDPJd&!AN&5xdyT);%Wd%aKh@)ZFF&I5qqqNvJ`O-Wa44_Tfp7uD75xA5 zML_%l%HxaoLze^hoBWhRzxSF{?C{H8HkjApda?ZVS6_dSLw;qbr@!a~e`;rrfqYBw za8CNgb3p#}2Mo|Rh;RI~j|>-4*%R8^k6+yWtj`~KAFunpNAqWu?)$SZJk-~J{A1~V zU+a(D_g?bv5+1yR7rG>9AJ8N>c*Na;yMY7%-0yyd4FvGq7j%K&e(fvb`4@>KEZhLj z04lr(gNuO*<8N^tTs@wCU#jDnUv{JbV*@CRQvvS+hIVB+aJ6m*He6@tNjRir0SrwA zSP{zK-5?Pi@G4e^#P|+tLH+}HEsdyuK>ZG29Qd$e?~up{o?{SaNNfQl7z>cIfMS5w z;Y(n`7`l86EP-VKpTW!C2!~IA$*`;=z&m4Dc&9RO2A10d12`zl!~5rS1K*?%!G*WuE|4m7_$}Gv`@B;XZvpRDd-rr30k7a->KB*nP!a%8~}lE5add zi6WL(th%HebMYbukr`=rC%935i*4j{xLZuMq}nI8Lw9fK?1F;!+t#XoyP7q3DnB~P zxi-k_Z1J&gujF!&kG*zNHo00)e@r;%Au$*ejt5`ePk|?wazb0vPE@+)kTbi`H=+?j ztx~#XsP!Orb&sotrlQbmUs;YgR1z z7=*^#Rz;>esdwfbBhos5UfqRPY86=RZFzAe(ROoLW@331=;O3a;&NK*E93aLn7%JZ zK*(A{tcNussN%KPcZAhlD_8mhC6X! zP{yEM0$W=qsxB~V98q@|F1buI1e4!hw=WTCRZvYTrEO>Md#eR+z|@#Xo2vG z%ZkceWvX6hlgAWo`usT`NYLBlr??)W;XKmzEP+^ns5A7Be*5eQcC$|GMzoRJM>%!8 zq~KbXx0%@A_`cUOPk51WJ@$v0-`Hl*bVU$nu-&IhP&8l{jL0CjaiQtuLo8wROwm=i z2~npMWbCwtSr>|b>UqKba+oPaX_Wwpn5j2fPM00IF3yoHasDRE^nJZ1IWp8WtG(A4 zNu3ZdfFk(>e&2tbhrqz&~wyR*mD+?6UfKAqYV`JdqCK$vdJBV$EImxZYP**Xfcq&1vr}cP~;No@R=hEL#{$mPLHD*qL^NZB- z1lCnK0!FkAYgFtWik}vOx?qkbAa0;g=HfVSFcoODaq2*n!eDF<%hMFGTosU`3HBnl zYy0VpNJrT$pg&{n`-V@y54I2p;brD7;bU{B28CUJK6K+TV9PThcHB2Nqg1okv%cn4 z&+6(vXuEgBBrnGb$?oK|UY&ws+3r;5`SO(QT2&wFtKriBDxJyBcD#qULHu_wwZH@bOTYMjh~o*uOx1EYtQC5^QlkKoOk>f9l1HZ1Z|?W zje2mQMswG@8<_g!>I9;ln4+>FN}1i-0@GT=m*5xfU2Q_(j!S-U_$ld{`*H`r37;=n zBgPxSldM%~)y}@W57;?7yYZ&<3{@Rxgmm`blN@$~$5H#f-;P8wQ=qG4pIEw)h4*=X zK{!^jX#Nhbrt4lhb={juITR&t^Ra9WP<8rVOczS7^00VxS6LYym7IwOhcDzMwsw>e zvgfdYRYn?8Rl1F_98ykJwdF1|z`eUtZd#*0d9lUP#$6vK)vgY{Pf0=0ew&~`e{O|3 zeiB=(25a|HT1;uq!x0?-)Km+nNry{+%yP1QGb>%-^b(cxDU@6ET#kYjK3&NiD`f|y zy>^dBDHs`IMxt|(UveAKs8{2nCh(OkgaN8+qAry2)ftF6Xd;zSI_13tqA}l#u#@xL zx~cYoAs#ny+NDw3e5LBSukR$io^?XX1) zku;}vMU8icg=_E`NQ3R>aeP?kI5&+^?Uo8K zwPhuCr^%B$Ig-`)hq=62;k`e~ang=3K(O~lWR<{ZK8LW9`840z6?2-xe-(_W*c)e} zPo2QM3of|w)we}{apF-Eywwta#H-k7R4*@)8B0OwZc~caG6-Fa;<-uyiJ8SynOmCN z)ttp=!@9rH1rNExqHU{Bfn!%ssgiSByFV|^S|sz?tcf|J0#_U-<&WIXs`q&1N<68a z;I+J6j8=m4E})Zx?w@k3$~-ktw~vo;(d=29^QHBg*EEx6Y@V6obbf_@lehqmKFEae z8ipRJIWO;VGo_1QTI5|H?$W2kUb`}AN~u&`oG3>1y5nuF5bSEipq6Z2x*W8`%pZBfMOy$0RxT`6=Z|Pp**kU!@&?^1*s3keetO;acw0nM0zu%tuCso9YqFqOB(`KLqdw*_%F08MV+E|*JG){Wo zjT>%bL&1CdR4kc^T)gT8t$624Ih&(3HR}L>hxaG@*wP_4;u^Sr^70O|DBYr-U!Pno z%nmXbktrEXv6A8t-I6(4gUBjk0JoM^P3{=xn48nsP*ugdT$i1#*nK$e0*ehJw@C}|vy=i+DE1yb@Pid0@L61x8Adqj&mb!AM+z%_6qP9NXA3m~UbQKUu!N>XRX@JhnaKx`XG*YV3Rs&H^suELJb2EY@crb0`97Jq%lR1m9yJi+eFHKjf(Uz7 z)Vt`x=wSgvwVoJOiHRPeEY^=K+EpQ{IQ~3v*CksL;l93Wr^>o+kj=O+=rtwMlDP1Yyw(wSJ+1rI2V+L@u z*LUeXXE)wcxa%eBM-}Wo_IT_--ms%ojmX=U&$YNZjB%)QhP+~$ z>nR}nVZgh?W6uu_f@nnEz{2h^mbup(T^RM-eSD?*wN}z@c)tkkx^Vrw&0{7)`C#s_ zzB4|5d*EI{MT&dU2UV1$E$YT=SxSHBWU0Lkh}U?uB4Iy8JxI_{C(GjK~0& zH`4r)Iucb+eA!)Mrb8iJ0jMUCl@@oGrdNy!rd*6t#rmPGgm#dkn)kRlH-Yo>t?^zD zgdqDVl+zi+{z6)G!*sJYJAWCrJ(Y!H#~xInBTt`* zI~Xo87}u7;Lx}n$p0l~^%}{oK$r{hEosx&=yDN*dA2fT#w#2nA%qIG)Q0fGe zvbjfKYX!=R{yYQjk~y~ZI6~ww8o69$x)831+w_!iOf+>%oPu!U{Cy3r_jaj0BV`zY z9zI2c(pf-x^;xQcMQA1)w4HAi%ecRNKH$x9BQBf3%<<==r*beVIA*I6^{P34;;Gf3 z#`Kd9F%x|U9gl1cb5@=G6q@*9hiUTidHc5SpJ<^3zHsw_eW@SKj^ z1H>kZ*=mEj0UN-CfvyE_kg9O2&|%S6!LU-SbZ-S_yEK_7N*{khO&6O(o#LXq`n<;r z3$DL&PIQy3UDjgxRL{7|H6H1+#W$Ky^nP+BX*G1XMG2qWnG}B(BkEIs-B4Zn;hil< znvs@B_D)kDG~tgZCtjL-7@}xA%kUBMT6YOICNC-Ob&1SF!}Ew%qCVQ*e6r_e+=cVRC#UG5jw1Q%OlB4E9)R&j}x zsaSS$YwaNFWW44cX3BVI#P!1~Gnf(YjQqo2p@$o4ZwdFEI`SY1_=XyX2Q^Q?qFo1?VL!h z-(VRy zXW|!P$xowe_hFe-Ti|Qfm3Eh%w`zLJPLr}u()5{bj`V$hqhszF1=Mm&=KK1t9hU0Z z-4E!e{u!D;8@3Ej$6U(lx-})^Q~*Aky2JT{>aFYauqFLEWpyg7asL!|U(|Vau6Wse z#ON!SVyug{urjGvaCaW#@xb=wIOu%3C+%5VR}$-rgRx_6I+hx<(ECy3*GtCF!cNhI zU;EOg>ep(22sYwfNOZ_E``+@s2&fOv`+6;)o%d1i$=vGM@i4_B;ZiaRY{E2lq^Ytn z!=vfSSQ2YxsV2^2)-Pz1kwkoD{4HS|VsmbH-riFTeW@e$xDAcS3ca7tuzU znI3F;=v_~wL3NPw&SnDd)F!qZ*78xcF67f|lPuDq zAhJDwj~hek!JRWiTCHD(Xj);n_uYXla9>8@LU4_G3BDKuWVZx$u0j6pyUdXx_Bq1+{r+5aHbEsN! z4|#67hq{>K*lp1qOm_vxYa1g8G{m@3+1k`6_sD(JC9V<6>n`=g1AQ(V>U=FFCdgQS z5!S{;pj0v1sX^Hm&d?&;l%8{cVC3`Q-o2*hL3Po2#=S4#iRCSi$B5cWx6koTg=Wd< za>_iCco;Q*O;-(_qSZUJ=3wvo;xc>^5P;3(uA$+ghyb2jYMpp*NUC^q=Dvx=r7>$s zS8C#g@r3g<)E!;k*!&#wyIoYo8^6VWZ+v2a7}-ycmd52e&(yfSLLk2ROSLSn$oV$T|^&_ zcz*_qx&|80UTm@9Wbf5Z)93wiDE0m>k2v&d9-!F-DM{i)V^|4Jd1UcS;K$!WkGh{FP?rQ%eOJ4c`&Fuejts+|esP zM<9-kz~{=F3&;DY&QW_lcV_eDL&D|kGrAu`s?6}Z@6sKl731=`lO@Ad4Bpq|qmg}a zm``UT{9#D3t9X}+`|Dno@8uryAL-H5me?TrMD{{B?JjSgxMilnQ|J+Y)gYJ0G2HnA zF24J38I2DkV1nbh?dc@Fq?pbMox{dy1ZjyK*6BR?a z^6oN0XUvE!YgCp*g!oE?7`)6N#+4kTO&srmoy~eC78Bb>F=shMH!k!r>I6>q9re8N zpVy#YgxEH15v`mpkJA@_L0_Y*8;2DTWWgWc zt)2B&YWb^~N0mwAH)2V3I=QdUr(XFZwQ4%#KFaA@q#j3IWDAy}A`;-3MSKxOq0Xk8 zn&~DLw0NBL2Lt&05ij=ha>|(`9r?t!k*^Gqw6#+hi8X^ zu2GlLQ~dRUPlNVrH7cH z4(lx{lj@V&51rNvBam74%22@&6=G=g=PZt^{G z!|X;e$nj9(MxSXmqw^#0*t<^LGXb^L)JJ?k7PN!krKc5tFISuzn3?J?Nc~8wH}%+1 zuO))O^>12HmB=bbn@-RV{o^$m8=StBY?rq(T6N5FW;;JW#-y&zaf?~&jn_!Bd19u= zm|nY82o!DX_P}#LRnCVy7Sv>I0>vFeczZln_cAJ%K}ny?{qkY7 zG@oQ*I`cYzS8I*8dn|~jkM-=U;>Z$E?|ynMrdabOeJ9D)ZW-b1!aQmY?~qfmA{%BQ zsKsvXLRO&ihLrSNV7Q@-eV|N=rC@^m@sPPu5$m=Hfwu)TZ(qlpBtvVLdgT640onqy zz;K_$2f}~Un%86xZ+4D;;ufNN(wACoSsXf+F9`sd78OlO@hTEIl3jH_-lXAq~JN7$m_YFSjF68p0b9`pk5WL8u z)aI9w&Swjgr2%IH-jdN9yd03(sYM?y=8e9l7Ef4yD+?vvA(Q)VKv^}f7>U$FfVXWS z0w&S+M85mc{H(n_n)*6GyFB4H<~f)m$=U9J|lrK&|=+ns#|pUaCs8&X4=sd*H65RTZaUv4cU(;ZoQ z$g7{)T%RY|{pg>!wBy{qB0cNtMKBMvu#4*jvERtd-2EgSXs~hj#WWB{VVc5KLt7Ys z=F6>!^aN$Whr1#*mT78xGlwu$yT^3W24h|yuea4?y(oI`?#XWKRpP3LY_)=KoCJ;9 z$|71oYKHh_P)nqcQA0jMOt=)q-pa$rP5tDqje1E#LaX|)i7RUi+5IiNa0MW#2bBTNMCN0y9_R(yd zHyihn`YKs&lv(`g$)iB!J%iV)GYV-@eoX56d7-Q~)YNc8+34l6irhG?UtN$#RHw$R7p~7U$W#tpb%A}lBq@)Li}dSN6(?ugM?$7U z<1*t*1w?zTw_Q@EFd=%jhAh9YwWibARG6q8gPxG^PQHMZo@ewCQ-vLW?S##apQm|T z-*nvWt>{)Ku_T2?B{#al>XhEEPxqEM=f_#fvKFih{ox3GxlbsXk0LtWpUw8+BL^;Ob1e~-0z^2Zu|XimhWtM2KSG;aTDoZ`Xxvy zAyp;967`-!KJJ)dyciIF*^3SCUU70Z?S#Xf$N1}nEM|q{*3G^kzD*H(_84{3V)UFL zJsqd~*t~If@0AFj!wheqTN{2RjLzP(zAF_aYx0QAs2{)bMw16of0$G{=yW?{vZy1) zi`-PqEg4h5F08Jm_IdgpN)Q>{xEAe`^i&T%s-Q3)zz7Xr;Uar?cKMtW4A2Bg| z%*{vqIj0cvazjlXEi%k8qN9A}HBS0B4IN*Yo2zj>>+$Op*TFv$3h$L`8i^y3>P~Cu z7Iz)j_3`0{CfyN#bCsX6;AXUUzZb~d5E5Nz(D{4%cFHzFt=r7G$9DxQ$J3A~ zia?VAw?MC*WgG1y&Z zk7>EyU3{DhOnPPlqpJHZUA(c{Kl-QK4=fML>t`zl(z=R(DFE*-bs`qy#;pefR_~$;dY<_1Hqh*`R*bKzSyX?7S zK9GsSC^aLx2KTd)1Xi&*q%-9BjI)u4`(4cyb=?+?-eKTyZhDjbtwPS$;Q( z7jf=%CfR)B2{C&4XvlW+k`B{soXcWj{*-wfTs_7gpS!m zfl3s>R~(ZRjY!Tr(sx7#5au2WY3pEnc;p;^(IBIyiv2KfwzQceP zSf4nvg$b1#AHK21X3SzlKj(@)7R*{YhI!z;D?aSl+P(Wz{qsX-78HQ3&yCBTlS&Q(Km8YyM&YQz=8+6+4Q_2jorr& z#fup|p=e6b1e>1U4Jr8|(O))=7J(=p?q`^XHxL%B9y zgR9A>_Oc$zOfwvLr`^~*i*Vl3A(>eWSvm6I+pj1&*Ogn`TWMr9iq_;@8EjR5roL1k zhNE+v@yiayJB$YUsS!sD3A zpt*-^!Y;&`QT|eY8j%X5jYI2ZP8K<44@^-d=Qmg%%l4wq_ z**(Pdu~2kd=4!}Hg4&jehmQOJ`l(XxzYjdW{oWA{ADMdpeS=GTlaz!ZgfXzK)daJ7jfW!D`_`NJSvb_6Yn8K zjLBnuJsowvzQkVPT)cb~pSRqObf#SpCVSWlyOKDIYvC5n&GpWj240-oCe6qbO*OvK zK05tM>3ecV=-`gY)vC))QEIBXcu!kuACBc;uj4AT?YNr9<-to2*TT|=9=|^mrN;KP zQI##h8IDwB2PtnMrf}EDc%IUmS9rw;ymi0o)T>stTXsu-sJ@;<`ojbQP^`=HxYdcq zk>_Tub~E)0aTRQbdktItZfknoPt>32l3l9Yk}&52lV>KAHEKr9} zd6~7ZBc@DVk8r7&7KXnc2R?F07C)B!Y9F^*jT$9GNE#WKlPrX5P3Z81^yOx2FZ&^L zcT;UD|L$E{#tjYrW=)uv&e}B-@R{a|E0VyiYD>F+fDpHyjDHnJIW`qJ#o5q~!eP{L z0<8rgDAs!A%LU!~&S(_z|6%LAb+(6r?0hfo3>i>P#TLprXLZWqRnF4YFTUSl4cM(D zk~1twf|~OjH-UfdKwO$D;w9TM>|RT?&Zj?(@I5#K0gev^8||%i+9KB)Mo_IEsY7!V zHj5j7&q#GpZDY=P>AGPEo{_>Dpv%OIl%jK)f})g+)ouO6revE!lzlsvT0~8@Trw~E z99lR!gReBb4X@^#y=|$-dADI9ijb)DYyqUd+6r&`91yIIiQ@oo*FvPDQS%3zGgC`R z3|AWQAwl}?S7im$Zzz}i#(2msoc3t{WpA+UJS z)W2|uLbDs_8LaDE!dy^Z40Kr2pE>CN92 z9jsti&H%q{4{Dn{$@v?w6kbEg=_1`_hbbJuZ24~s+G9RJDF|awPe{&Z-9p45ss~vu7%ApXvLP`1{E%!diRP+ zIbLg=S|+0WdfNRN8Dtf*nd7Z?M?u7YTikgmr3HC;Yo?R{(^sqT&17fvXaRl(BbSS3 z=Xi}hjd)Qo_6rxlI5!`XlMDP|@5^i+a^cA~NV{kRDYuB19t(CK14!plvja}^5>3a^ zF-oZ{e60cLwJne2IW09^o3dv~`K$FpQ5{IF^R8|&3PDR^U~3JRRgq0I8u{mccTcc3 z^gv8nL&1AXt1HgNMIGr4p8L;Nn^A59TOuFRDbcjJ9KeU+GCA2|cOVSuD}WaDI%~%a z0PW^C&dECi+yT~Z3&Je&_$>5U9B_b>iaL(KN#gs^z>DX~rdA+yVVwb94-C&SN-EYu zu!>!X0$Xy{%vdp#(NM@0y;@`0ZJw5O268ecH@1YlsXu>Ir_*Chl`Zm#qxb z0=du`#isy1)M$2(P$-Q#I7hTz{{guEw3bWRp8e>>ByRPF(KE(ANkGVds93K$O5z() zF|hA8S5MnYAK#4*y3xj+*IlC^a?WEsF%_y31@BC6Lom{o@$yf5bz5iVl$Y7=>>QV0 z*_b$hF<2BQ?@`m84nT16ucRq)*wJQYByOtTF#&ob_5Uvay4IZoHO1#@`#Z6tU?9S* zGc>;_=aG9B(R`WbGY*q~seq0Z&LtQ%HXNj44Wzy!*2l|r%P@zr1tune|H(a3(%em5 z8r81(3oUxITOxz*X`Q}BUX&-9)2|dSynoY8wBReQd#g;<-12bHfGFqpLoK7$xLWzW zEyjmAo$UI8d_$gt%p{%cUyX6-X}6My(D739Yv1|25(Y3z`p-g zZvvYJ!*4)hgxzv1StWkT`zB}Zpw8s#A}DcbOj+zwYN?x1_gLYS}BqG81;O25H%;t|Oy=tWz zrfb_+oEMG(%$QuBMz(6Rv%MFl$S*t9?)7OD*ylgvzQvjj(6zMTelBo7z6*pJu0Zv` zaz2MCd&LD7&>^;bSO`4(gJTLIOySgaCY#RkE7O_9%{wD^LYXi;TghNR7g`=!9#*+<} z3djY%20CanV!d%&m#*TC;SSJN0hv~c$O{XgXuUI?{R7WwiU0sGTg!w5nQ~Fy`_dSQ zo|k3QQmk~*(K3YCzc>(%f`ni=WaDoyniI1s|7tdWK3W%rIXzypp{3oa)@Pw8;CNSZ znO{<=TE~y+JOn=HvTgF1j@02%=eLgFM3$yWPZ{ja2&kiCK3uer;nOX*F>+j4m8qK? zGQ!~@HcCvFUW{vDmD#dH23z%_qLMAsnOLk?CyK|u=xlA&8Pa_gW~w5RBP5U^d}7LIkSAB z%)g(2G39cE`^@@ijvFnbG^UB_xEst^shWnHiH7!IF-DIjF@9^osg1YAW~cx^Y-xp2 zsebR7P*D#jSy{TR&J*h>#{6Bm)Wi7<{}H_Eiv3dZ48uk`?AR!l)My~Bz!G!KMg9qY za>eyR?bACc5D=|I&LR5`hQBb3+;mbli8CMnibk-fwZl^mOY_6YuBTTw%r0o}<)?kcFE==~NChZAv0ri4)DNF&~= znrPgtXOc1S3uOJpKW}vd1u2%8dF+5hMZ-?S-MYbx-Doz}S>iOCSNc0{v6Aa>Os=yy z-P#)4Lc1F3W|IhdSS<+0Hj?LmLRot-w8`8)ecbn?lY8QMr)^CF&GV9JYMDnZzYzu5 zv_0XQeR-fk5=FFrBbKgWm;HwKI6HUhQvV_P`gDE{mIq2kFY4}Gg~may@XgEw8M@6H zgFdD1fDoqhRvq)cw+aj6YaC10HbF0G)oEH_pt`*#@rgbf^d+jJ7SJ+(K{xx`fLI&+ zcySc1%ET$lgPzVx$aGA1HdE6v_7#Uni|;Valc{e<9-(4P>s~obeeZo(pAZAMitXp^ zZZrJ8h~3{`s+0S31@M~5X-)0w-)r2}zCq;vdxB?u zB{CE#(VC&YY`zB{l;-At$=7mNoNC^AEJ0|%fd1=J1pe6uZY-$|?a{y`#1%?)G%Kl+ zrL&GzE>`Y=*rB!Hj0uc&%{O;DbJ$^E3&ySy0eUN94sm%*wTfnmk}3dOyNkbl5R@bn ztQ>Vd5SQrqCrMrlKc^qJVLWjuuBv8Lr zdA(*{vnJF;S8$+1-tdn7B|p!&_icPsWGU8Qg}KGsf{brDPH=smOSMi)^PUYQPYh$9 zFRfROK_!Dz;=_-&>oFroV87T;=pdo`%&c!E&*`A$SOz5TB?z3H*Nsi>sc+uQ7YHAr z!%mOTNk70$s;CZs@U1T$18F3!2!T42At7J#v^}lB_071*ah+V{1A}hQJbUI+e%{JX zbdhb62RrV3JIs_nm$T6dPj`zJRW*Xq>EF?wG-ovDryJeaG30K0q9xM$i|Cb$qlYhf z{dbbez-3;@Xb$WY*wa4S#Rv%$scx54bnwkpG${@T#cXzeuS2C}?b7T)epZ_Yd;@2hwsJ zKo>DNK~v~|57l6De-HoXU{Rw@QoM|XR33kyBY!<@fA29P=}5Yk%C2V8e# zh0)GTf}!e2ETnzxKj7`hHENcaEBWQ-}P^=bzb>#KtIVJSh97fAhy%e^eyq zUnsgd+>Itx=4PIERLEu(n(#KEmVT~#x{?b%N@u&sU8)+-TzyG5{Jb~HjnR-kTl|)} z#mpvur@pn246VQ&=+kc(l;HNZ#C$`;tF&FxuK=+B5LgZUu*4q>08#st9Re5j@~N>x za=%W+sMXp#(=R($%fF}4R`CJOVyJkK{8jqzP%QIJ3r) z?3aPN!jk7X9vrOYFAaqtQbFO8iR^p9*Ii!(_4J^-J5Fn}7vE|cA7eW^63{2HaAbZ= z&=Oq7srBz-5ou#I9KQ!`FHwUu@rUo##1g6BjEa zIj}f2QH>0}3y%H>f&U>cNn&=OJ1f%w_hCD?-aQTVAQtfZ>M;j-?IN@h$dDo`AEGHy z5?)g>uKZvuX;!-yUtQLR^}xq)Ah3uJI_~IC`Je?G4OI`rv~u}{aa@$s(3&yY>dqcDQy`kp)P%^m0w773%r#UWkzRq78)CWLvq7${J?A(8J2k*H?8CzH#&jhFBNji)qf2ra1LZn z3P)33dcB6Yc1An^JaKyAX(L6x19&;L+wKK{!QI{9uU!}+tMlkdUS$Kn$NO55gP;av z_?`)B*~TW;23aP=ew71ofa7~2FA!?*4;ow$6JR+3@$U4OR*bCnvK}&jr@pB?*A+if zIDYO*_&L*En)a;q2`fLy?D*pHZlcq7&3Wy9R8A+H84KhGneybNh#DI_HfY_4&G+8r zC<1T7F`oz#h5h-d;NAzoDeaeogqEn5<9UBY-VQWA(2HdUGhf8qh{#r^q|$RCYqaP0ufRp&uj??D>k zih}V5sAbx;dGs`tcG>nEC9C~H)ik9ogUtYktAncV^Z3PC=ap~n)z!Z3VzwlJ&NU&E z6kT`}iO`il?~Gx|@j%NYfw{RPjxxQ(iE(YxP~{_Ulk7cL5F-qKAg}vyVX<84akWAa z!VyPd`io!^pGH-#69ZUC*S~^3ykr%-vkft})vS!oP>;jcBIQZyTsI(e8m>eR$@;vZ ztnjRZqaQdXGr<;*wN=gc1#c=_cn8$iIb{_yjkFh+3k8wXTM}8GgZK8`KE5{*9=wrU z-F_KHmzlay$I*y?Klk43(V8i9!)e#tEZ1}#G1#&U$c$$)F{Bg#eu9%57*_dy5s8uFuJ7a0tdX- zSiW>$d>0r9Pczak^0j!Ur#_m}HX7r}4rl3Lr@?fddI*SroVnzyOz^9>^V(-bn#PZH z_WJap03-y#S>BXClnWr&40TKfhSe>rBUny<5bhGc(GGkc@Q98gMDdV)&hK=w9E|SP zp=e)Q4EG3PTihpzGFI3JWQJ@?%FP5Ui(vK2yfh~RJbr|7#+xRK>2g$o+~%Q_8~A!! za@Qd(jAYq=LWaQ)W7E)9AGWC39z?TAbtuDsB=|!LFQ_2ii)BfVQT?vD23ZuRG|y7? zVOLCeAWjw>fFcp1*v5=8Jd*Kv#nk~7?r}O9FVsec9mhQ#NqOb;{O5G`SOn*DG_E!N zi0d%VmW5QenNn54vfwLo?46mJ(A5D@iF&tJrm#+bT3q&3!=QSuUS9%%J@TNJe*~^H zV%PY}I|dS~=rdye=}EW4gbzn)A_e!g>>&WX>uOG%+3tu2qO->}&%Hhl-eX(=F<6^p z2V>h}wm;3OiQag#;V=?}=ah_ZgwG`7q)An2MokbxYs8Vf!ddjJIK_ltOGd|^3;8u^ zNF<TVu$H)^C2G0K|uUUL+8ZTAx;bS<5yu`IJWJJg}=Hl5c!4cqIM!ABG>* zh`kpsF9lcA1Zf55(|u=RtJ-f3-==#rjd)n8iNZUX7slHrD(mjAScIle!kOknFcq8zLbCfMe=!ksLdFhI1f(F(Y|Z28K`3$j_Qp`vJz z46=rMxt2z&=~0W_{?b=KL)`aq@rLey-cPfX^$n;y2RpNZyALwZhedfPq5J9qx34O+ z^#W!jNJGYD=$Ae|g$Wdy>|W{A8Tk!V_Io+OlIRmVL)VQYJkVMF_cS=+ags6#=b&hg z?($LSG>f?td)N2W+_x?#p%H1GCqn3ZJ1Re`&x~$cxf>HuTd_A>{(@TTP8W;OS`u;EB>@R{Q7V6!X_Eo_PAys5T>#b6nTX^rd| zUZL>NqY>bECt0N{Lh+;$kW^8BB{HLS92Hn%FkZY;i$EOxp{V@mPnZ^$@+aKJ zuiBYoppre9>rkJD;;fsfFr?5i_-F?R^eq;WN_2HC^ z9a8Cg!22LSZE8V&_Q9KdD4D$L1pT-f2=)O(Q(~EhKPkPj@9fM6b?RDwa%{5i+-Ww& z^|s%_jhD6w#~ivGMmQpid=k`ieee>{O$%{FiLw^3wRrS3vlqxQe!D+V>rAZ&Sz3W}UHdT5IE*gwv!i#JK) z3N-lLlYnX!`5HALd`u34{^4kvWM9}qN+K`c9PMY$LK}j7OTYhqJe^fQCiYZ-8@lsG zu2hUB2-K$lNkF#0qbnbIVf8t)aQYcG!)uElK(TL@u<9M*id-)B zyO?sSe%+6cwFD%DwLo+@eZ*|HegO$JJ=P}Hw0ZoPU!D%S>yJRA+#a!O zWAb!0kd8WuVC1369f8X%x=5$jDV-pL5++XTJHEE~T1!0(Ht^dyZ7PJEP8|5}? zpay-PgeBb##HM~B1OTSI+68}+sjMVxG7GaZqCIX+f7A1g-jC&YcMLk0 z&vquZO+Tv&U?7bxG3vgR4w>U2bR)U_QQJgli7~lxg#=&W=(^-XO_ z#dUc~2iBg{Fc#+=Dk_7bun;m;qK;7^ve}ryE>-IK7xU1R@&y^HY zdQa*8lGDT-p$$@==}ZCv$m3fSgw%gNLWWaWR~{ExV0Zhpr<;v0e;p!Qgih+Sg@28?9crQj8nq1ZPil;Z;eH$oK^Y`Aq0u>YO<^`tMeE*OAx-37NTp(gC6FPEnX+09iW41=UyuS08@aP{tt@bHCqyPna;ZuJFOTU3&gS z${{wt8fMGGb}JH6Q4~m3LK}Z(Wz{zh=d1pHLXkx6c&Qr^XIHcA1RIVIm*WSKdlvzv z6N$e4Wq)x_QLF_NIvQvwS56|H1f1>J8j_a@e>lEV0;v^$g>H!HeUHo0wERG2x%7tP zUBHTQQfVaSnMU8m5w@#vu{}H_K*_45=sU{RmS_>vEMHprh5?p30WaVW+W!i}1`nOSV zy&hx2O7WM}-egUR!t{~!Aq;2-ucLKve-W@o+Ux=VjD@9nEme8NIMVAv2m!3CGd>{c@U}9xQo!Jhb%J>@}dFrH2$j%()wci*p zthw_Hl7Ozd!d-g=kEA3EVKrFUP=$-JZ7UR2?gU0tBz9xf*SXjX;YM6(1Z8Osf5W-z zQq?y%6nXl#O_lOlfwxB8DovwnAgIKlMA`rcJ`j-(bots2x;oNZRhGX4NDqI4=R9FH zwjp^n;%hs@^6^f+{d^dO;oYt_jOs2Z)}LJ;1Q;0}k=X#HlSWIHi5aNF-U2`7H*7=>4<@Ch}?MM4NxNRqC8*1E&r4VJtEx34DT7kFQvb9YUVP{ zt0GA>t8q&J^A=!pGJde;>Kc6mwcVT+`l)E7cXb=TBzHPIhbtAX<~u!Je_C0N&8+5E zn3yZ1$Dj3Zv$b>%6I8z&j~g&!TZId$;-D#J?G>z3Ocj||VEf(mX0bTt0`|4tYGHtA z^vpwwLphdh;o>&F;@p(W*=*St6OHMrd5f^Ghj8D3S~^cp26$ObKt6nSV;+yWR~<(z zNmRO82Ar4q*_7-cHagPse*{851UBG&(z;63n3la&_7q9h=^nvNgRDeI>$`~K)#xQ$ z6m98E=;osyKv*X6F?j7Fjz5Q)kI8tN$7LAVsEhq@EiH!G;$~4tqrs zSh1DpGhztuZxwv-6nvo~vtyJsQSDqB*d(DIJnwOv*+F&sZ!k3zT01h*i}>O2CiEcY0xRJdPT+YdaNO*JBZrFKdEFXoNjXLCdk^URy_X9WR1e_R{%(EgDO1Cfr#cN-*HJE%5=j(K@l7aBhTc*yG%ud zIVDM+P=Gn?d=z14cVBLL>b&HTg#*55k8sdzCNn0J(BA$QDeUa;Kl&wfj7yipwj1CZ z`Lyj>Ky#{RWFViFCNC++AzGrU5SR-r`jPZ!S zWaXv|Gl8IoX(nsVC5t|u|9-g&ew0c$kJwI^mp3C%y?p@ktF+j&;4Ll-8cx`l2LJo3 zKS|AHf0Bq=JALJF(^XwT6Yj1q)YEkqk{Il~h339$7viiB)qZK%;e6KGLYqH=O^J~e z8y`MpF_Q$*Tu9gC2wb%cec63eAk~nKypW zf60l1`vRw*(38Yi2qlSRSHi}xMx|k&r(F6_I1#cX=x4!P;l78_ufg!I4sa^Q$WB%vV3wJYpZ2Yh^K^a5I^9$mB z1a0BTsUX;gBBinr-{?EFxd z&&1LC`p)`5$O>d^FmzVHL=!kJuuxVbak_b^IVLz9he#I%R)Lx%QwizpbIG*G)#P4ax3^O zwb6nvgI5qV!|$q>cEsLSlDVW#q>=4sEtahrQfcwrWEwP$3zdl@sr&Nr&+sOYYHkz& zm_n%gObjsAFqX`1DUcF3%!~n+fB2HAO!Oa|f>jjFX4Vb-sw3x10O2_Grp%1LPkmU_ z_$~)WGPPZ{MoI~BlL0hl)3tf9<=1+_UHA|Z;p{7Pr(79a!z@4v087m~-<*jn;cqiv z$djN|Ql+BQ7feFR@TDf!UOiQJ!u+&)18BlvDyBJSFC(Q2{Muab6{=~he-1}gBiP-V z=8WH~h4WjN+p+Ayc~sJeoO$(o-*OAbd*CK~SILV=w-ajvGs4Bd)XGF0s>=DX2tV!5AL+0P(vC&dlXT}Tlt zadiISiO{lZXK+I8*iJ}m5=kNPw78SuxUM$t>JJ|ZT%*I1;t74EnfOU~EF<7SKjS*N zVn0RE&oNw(e>h$8lapfY_b7~+0;4l)d)&$JV=8KAiMJQxm(_ZAo3bHSepkNlml@+! zdQY-3oW(Jd9LF@o_Hh(6$_x}RKv$%2cKhw;+>FRw9>WXQT4m#69JtqA8Ti~`<2rJl zXY6Y}9U4#)HreKd@qx~ZlbfMBkh;1+Y$LspV?Q?#f77c$ypv&U+UpnUwe&;R)>`1@ zc@A_YD!Eld=Wumbc@3=Mmp1A$9;d4+Etnu}LziGEM-JRwLt~Kc9Aw*wjh<_(W4@Py zuI|^kyFhV%$k!3(On|aaf=r{txY)+Ai?$iRcXs@sl-pD{W9s864D+G)I=PBj@!LnT zl^a#Qe|*$@gbn)Zqtu_Kt5X0iUgn~gSc>a92%kU$`j|~eLxL~6gm0^^y`$-z%YSX~(!qKO2^m};QygduPGi##T+(+An&PLY-M zUeuMB(gDyToi7*uc#Z#^c3}(cXQlfweN}63e>n|oS!2<^%~@rBPP~MQE|s-%^|;D( zG}KMayy^)Xgv?u{Ech^em#bd3?<3EVkkpkt@-W130B=V`-6ZJo^4~AYf_K&G*DFs3 zS^F?cb__`CBh4>*Lw-YDJ!{$4a5b?^%TzYqtp}A4jVw$T?i?1B<^`RW@H9IKXD4_Y zf4fz#KkWG-naKzOKOfw**+F_#bg1WBhv<}D7b?6HV+xw_WE4gX<~Sj@$5~umO2%mz zW7t4YZ<;MHdRGPBejCY?LtQGdrynK^zVW!BFI4nwf@vPTwqN~SuQlKvcQQH=c}K)%1Sr z$@5p4S%H64XT2lfrX!;(L|L+0e=U+HnZ)!IHwnjyrC?2y<8hy%gOMuYfSl=z797lv zt%j8gw{jw4z|i@qNKeD_d{guz3Whwbt?E z(Q}_tO>R)%uHyANZe+rqHemA6y`g8Ze9t+Cp(#hVkFZT=m{!h}k35o}f8+#QG|=ZJ zey}AE&$!g@*0BzZHI8W?JI#%l~l%%KakRS;?01l)?^8vOtZ>jXuOpY*K?GNiSxC~m%G!*Wf@Q6Cygv5 zZ@(p3<^yODUw=S|SQDqLdUlGGi~zP!a51r$f#rsQ(*X?A;L+~E>L&_5-iJ2|NsHPlk$#lOIIpmr>T11{q# zb~)-8BHy#8du_2PeI|%+(8QHLoxUu05#0bOOjL!PYa5ce$~zV%iLK0yiO0GV-~;_7 zAif9&QvmCW=+m8+e-J-`=$o9twMqi%T%0{)48`NT`eCVm#Cy5$8}z%dH_aPd%ie`gvy+n$3+S4x(3w1ahb zgsxGvl%Vd}Kx9z?O!p@DX_KIXCH;~)hpp#t&k#wzwTvb(e>QmH7Q&he5vPwza?!T) zzLY2)!2xW^B(x}Ld0sO4Jl zexH)A#6$#todkZU}fpfVw{@%q%18B~K1q$X8ME!^`WUNl95IZw{%1907b1 zmF~R)X0GRGJuxXNhEQ&#BycxA_5J$p72~q)f8zSy&9)N<_auj#eo>Bd$Ql5jKEwkl z*S>vYIc8QwP->_}$R3!e9CQ%^Dd7<4unp*v#BqH2{-#MX2(Vwp)VQH3lK4}NV&)0s zQ6#Jd3iT1la$jqpAa|QzI|C3vqbzAazaxqOetubvwW^5b;zll~Ua8 zeObAiZ_#_14g3j3ecF7Kj=P?{OTu7u8ic&dmh|Qcl-|n&Sg+)00BQN2FV<> z+iOqkjKqF%@}NMAC}l~)J z+&Y$`5}Zd3A7sbZy+I@~0u#?h!CZFNe??m}4Fa(sridF=_k>)1KL6VMgHBNya)4{# zB9Q6kw2$!$+)&aDO86O_#|x3@bbr6hK5|cTX#^PXb!Ici=UXZiC~+5rkx8GTT1;!% zMPhqZ+Cpu>l|a{vG^&B#^BV(vZhw#$f8tp$DNne;yJ*t-y_()*7LmY_wBvc z7(G^NDpVPIQYMQP4w>*?4EcH>cYw0gstvw-t>`6Sn(x6l)F zy!tqsQ=Pc>z_;7MWDp(M=#wJNU#(ZSzOlHS`{g~EG-Q8#6SizNdo9RYf3r>o=&?RT z2YVBiYkvkq0k?Y5YPoOB9yQAsj@pF1c=cV8%k;!iHFRHNtWSmm({?8Z%WEKWmM~3} z_)eHu9XKEs4~y&0HXH9w^0dUZzJlrWU|5AYa)S;Pu`?$-=csTh$^W2~J#@MriqWP@kV}+B!&Fv?bUellOE8jm} z2Lt38w8<(vU*%{Mp4X=)&DVN9c|M}^A(^)*q_5K~+CA8HyUChIe|R;nLpC;7nBAHt zc2P5G-D-MB4&}}m?d~}>b+I@)yR}oTZ^P8SiQDu}$Yf88?X`atWA2=mJ98I>oAkWT z*Lj?uPQ-Ga3pgyI(E(TD-Ybny@AmdREsx(9B2zkM}0PD?`m%pCcTWkoqM>9 z@#g*>g=oATjkvLne*!07WctQ8CWHCpSWeM4v)JHJ_S2=2nU23lJe8HR8my1R9L!u% zd-FBUFmrUrTQU|8WtNTSrjd-SnFE8v=^6##_vc21lM*gH-6=or^Uy{(SfbPyc<~(d zjY(aOP4U_j{5n&b+G3f^r;uN&58Dinh6k(nTTf^)KECRvf13BwYkY2D$$K~(>(ihL z4-ne>LGfNnD(T8pSAXRd93?McGRs$db}O? zX!p5Z``espY3iT#)59<(_5LhOl9S$gHR|az_cp2qe}}Ec?sj10b+EoZjN;t`xYes( z8jSbzVGED-IWf}broHzl9C41{fY7D)X@*T4^v1p0XnjGGsL7idUv7NsaMfl1l5F0= zdG$c9ugS&eKHdx=?!ETz%a}YK@{sN(&qaGWjYs5j%C*h*I5VnwI~u!T{So8cpxTe} z(P{f(f6McBS=f(f&wuOhL9#g|%^`Xm$(g&mBJl^2ZqJ(aJml?mZxpAj$!*xL`B2^) z%k`|W)wlN=&pglEA6ZzI+hLoJs#osx(XD@oFZ=%BoYwuxr?*+Ha>wp3M*Z{b)t9Sg z^`eWP2l{rKw+`w7?%#jiRw@&)``w2%W`DP4XH}9}wk4laYgB&8S79qx0O9Vn0Mfu-@X7VMHm1+`%hqFXbh}$I4;i*5EsH z=KOibTDm-m_Z;_txlj*uMqJDXAALXf3_0%W6?iY0Q(a3|N9a}hg06Z#q(m3|iu4Bw ze-l6Vtrf|%pstxsS_o6*Rb(DTOW&789@Dw${H(hT`F^v^wW&$pwj0y7`)3iJR|CVPfnFLPF=Zr$80 zpB|RkfLR-UxMriXReVSDcl-~@VT9uOe`>XgK$qR7xc~mQ>WRKghzrZl&m`gBKONm_ zQHHB@FeH&b>yP>m`}G=z`@{J?+{Dk}V77e@7rWs6ZY~dJvzdI(t!MY!-tx>}ubW*~ z!^mW>>27>E?=Bx658c`rwW-!_lKcCD_J?Smy898@t+jl)IgZTt%ATRM&bG#me;*E& zanM$<9?H3pS9&^rbjw@4FZ0U&e-R8@O{}e%y)bjj;DP4q)1F+_bRcp%%37-$ygrv* zdu{OQkmjrD%NQ~{js20gny!1YSfugI=--~Flb!9yx|Kc$;UgRMv3)iAv%}&Uy~XDv zN7qptuI{f}o!;%^E?eL9Pk3(++4LjVhdLjiw~TuO+)v%(@)*3b1c!4pe@>;Ezgv-* zzDTS`|2o#P{Bt$>a?{_$857v4DpW!BsZ33x@$lk`p;E|}u_lM8Ldwqr|e87o}l59oh$})AP zx9RNOd)1#KU>~pY#p#B@U8k8&(g!cr21^!Y?8}Q5>|Yx1AJTR{O6%JwZ8zGy!K>Wa zjDsy~p`;0$IJ&rwJpJ{)II~CbK94-e=COXl8{QhTSG}FR8h`d`e_z|@DBnIcXL*tn zIlL{_w*!7?!LvVzX@3yFLfiC@$JK2=r=EcFXJ=J2NP=C)`7r=aeE&q z&kmlL?r=`#j;DJ;^xB1&BwpyftFwr*-MfBZ!bX9=(bkio9e&t!KUfLo1x9hZCqwHI zHOEb}AoUK7j?cK*n>%ec%ZB2%;j`_|Wo~uOn|98;Vy&-yf9>h8cE5I`-em6W24~G8 zi#zleA1q(~$y=<Yc7Uyir(Myb)saStDm47iIK*74rp5`qRx+76K z>~HLCMn9jJrZa<$D{tsEX8fAnkFNt>i_)BucjB#ff6JgEUb9a(2p~>p!2_JJ(8tJ- zOBf}#yRh$Q5ML9+uiDA5F2wd4*xK>tSxl;2*bd%SGqaW<}os19=vN(F!S*@Fk zD1z2#f>88sI75-28pW=PH1FsqTWjRd=k2k^W_Hb(y}zVcZ&1m(QTw|wc+U1Tm!?+1 zEc3j@e{iQCkBiF-Ye98JNv<20jfwpMw!rp?sd?eO8XZTIU}w&WKC;Tlnlw+RJ?>Yp z&!Ohu?%XhK%$|SoP!~66`=yiF#R=rRY z(aN8$mvmyAZMBiT`EZ}z1m7Ob>9TM+-9CML8~4oq;&G8Z`p9nC!>zp08QbziFY|QN zoXbbEG<%O5(}S2?{E0t|&qi^Kqw>^@p7-hcFxfPbzBIM-v0oQX5M1mqi0F@|2PB!94XsGa|ONR1ZwNh|guq&Evs7p?wC^#{-T=;qda*jE|%kt`GC2 znbNC#_O)=lCgX5*+xG3Nd~T&UHRBz=ZG2%hXXn^5vRq#t?_E?rQ__0Vk1UhnYO0TW zu|V$I3HI!E5BnzWYfraaki~QvP0XO4f2=HhdJK}AIXJmjQHN)3+#H?J@aSyvWBi$< z`EI&8wBxH;?Bn!d90$uW?Y~}0xmnq4x^mXWoLdV#zggqRyWQHk6W>Qh)}-Ee8n?cd zFWT!pTIX?nl#j&=-SzxUahjQpQIcIyKTakWtDr$ zvkWv5F(%$+f7WaGqAl0@(+vwKv;Eb(HFSHVxA*#(hxocEN85=L zruBAxT=I3ul0n`M`%pf7rhS6w;AOruL(y$9e3Y}r;hArbt~WaD%ktPeMB(|E9pBor zCpK)I-uuQ*yJ0c7rZxKPMy)-9thW|r((DFdHM&nv&-XN0Pmj-8wXS0=f37nA8OFRE z8F;p1hmFa!omgJi=d9Sub57^kt@S2@&^*yl*PcT$A0uZ?9rNnFkDuu!kkRsR+v+Fw zTDq&@-7#aAOzosx?QUL6pYg$aJ?|xd4)wvhUph1Y@D4WdtQ?u8v&o1tIJUck3{P{5 z>YD|;PNSjS7usPv2%Vn2e_ZeDjaQBeYiZqxJ8Vt%zSoO*R&#IFKTfC1mz!sl>i9L8 zieRWa#S-wy9S+jOvWrf~ z;Jm%Pv-sg+c-@mk5p-r`RdYDFdm2L(Nh{fw{ zzSdT8-k({T9?*L*eT%XwKk1d&pZ4DPZ~eNt`0wBrkLA258_he8GkbEHZkG8N-KHt9 z#oJvES$sWO!D=;|e_B_4p|}1-YxUbypn0E*`tZ6;mgi5nZ2B;m_QiP7U%+JCgYftP zV|B_seA^7S+vnpRJm$T=H{zq-ZlRmW?mn{5ySP0~-xizAKhs5ZN;HZ5QL#%t`cZBG zM-J00UGVHhMuY6Wsn?gSi>w7o`M9kEu3wzuTo^N~C8z7;f3^@)$AgLH_%!30+>!ey zTA9x%@s9m`6@8Y@X_jqzB?%TW1^Rjf%d)xo_w3{i6SM|Mu5~!owx{K^KVyd@o%@S+ zqw%|d_E_u}34TVkv)s+&*Ly;?r`8Fl&B7S@xA}N^4YkCjg|Q1IeRG`2{LK}fmY|i? zMmHvzUN(YXfBV-yE_qvnlP$;WqqRFR)c59!OV)2jFn@h|AGvJWe%r@yqJteHqTA!) z#TJQ&O2@Rt_JYbW4;n*~I%6YB;Z*WN#&lgW)SxHzu6hc4XM3@DvJK#QF1J^d9V$}} zZOb^`iq>@Vd|%8>(&KLt@9pG~R~+?jYg{z@bP@Uc=}=6wj)_S-N&pQ*5}XXvpOMvvY0QP@n+N?No~03_g9+7b9z0JNDOc%#;l(?8Dfom3ie>qBmB$yQ8Yn}8;JvsXM)i3w%yWTC_-WIb> zFUSasZhleTqN?5waJ_r2mvQNB-gE6yGmx$YEqZND=~Y|pRqc6kwzl~p>a#Y|$b2kc zVVvpgY%j^{JSSH#j523S3ug<3y%p^$6#RzqxN6h>vb_Ln*sav_f@NkVUl5P#f7N~d z!=G6FzJ%4k&uaRwqnci?KPNQ-5cN&3J6=d45f0dgmFqT7o&W@QhcG2(ck)tq(h>kd z9WBQF;&9~x?&fPS=B&F`;Bmh{1-S^QuSr5)z*X`Hp{zG*a!7eoy?ZH^ckfi{| zhIhwLZPdw0s5>bM0CBrJIf<&nqSQ%M&;X4A7$6D2YSNM*ZArkI^MyA|f6%ypd^WHr z00O<$^%Cg}0iKFpq%WVXkR+Jp&(LjIWAMa64a?my(cL1|SQc4Eu=1HsYBMKOyGhpp z;wZ`D9QM}*Y#i3-M$>=A2jCQMgt{+mz#&CLaE5?VSW2L;K+6qYecLILk&9X3hJ5sG zGc}P&EhG|)HgxoV|GQ)8e@Jp&j+ZzjH3(vWs2G#l)?}1f2v)jy+AJg(c!aT#ny%=R zgV?QdhCs0QQW6Vg<#Jiak-^FT^1eEZ3vK%CSdXML0^!AsI#T28X6YKq z1S6TYuKHY?v@A>1wJoEio31UHm|c5K>Oscae_T&DeRWLGYY8%PFi0%;;IN7Qyhgs3 ztz)UJB#x-TBO|qlu0v%WH#G|+jSy%UDv2N-9DMDGk<@G*i0(((I)=nSoCnbmuJ)}& zcD_Aqg70-UPGw_Re=e_EO-E9r7>H|f^Kj+N(z!6s+X!J^xkwxPj;tII)YSRY1YxFE zP7MUi(B-K&;zWn7cYsVW%DI0Af{~Cm;v7MYcm=i>3fm*W)4|HdfPuH_Qz#-l6UYW5 zf&{>M4Q|^=drk~sjnk$*D58tqBx?M>V_MK|%veifB+|&Jf0K=&##N^3bA>Hai)HEB z@VTLA+WTl2ix?#;VOs^K8r*2IIBQ zqSppJm!VbqkfnysxelBnhZC%asq4zI!{CW6kr2JrZe}1T2#N=p_CbrWfh3K600W#& zzXA4v_ZWzT0`w9hKqr6&=tdp*?elH>;}}a~u>6VKe}Xkeox}$Gu95HBP-90M+N~uu zo~-Tsz}YR>&s>0oa+cdI|9-wm0lGEvJ6C_NXRD_#x5>uPS<4EwMk38_)LKfA(IkTU zWzE_c>yTs?uBhqEGZXPZ7nN(k*fTLjS00%}Fx=lo7>RO)xTA#yMQkTD7R(Q(s3I?5 z6-Kbpe{(pajP)sJOFa{r0VShY!&+T&dw{Yqh9zXxHAGsG>a}Ur&;GI&M+>y=CIuZb zY9O&%Ai*qMD!+9qlM;|PO)$f<^3eDmhoaDdT6$DT6HQK)o_eMsYmYxwA$XzIgWvDp z;r+xdAPk0pJqPDCjMGTtSKzF1BpI@n{*ISre`LwV$U0v$FzTfJ&WW)Ng$jWPY|WMTrD8^ky7+fy5vZ ze=lki?gk~nS$V_oRBOTa{3<5qxM#}rUX&6X5_TrMF>M^50|8?NcDoE)t4 zB_o0^v4|drb5bMmbalx>BQIATl%KUv_GTSTmKi%+V!U)vvbUneh{z*d32H7IxOs@# zAS)dtcb!bz*r)M4GN^B$vZ6BbIcNx^f5s3HgjAg2xToS}D8KO%Jy?tdX`GrVU!~E8 zsaWy#SzlkJ#v%ffHpPs*UIyhq`&( z{KhV~NQbbEm>oo`9WD2IO!W|MJFE4FOSvqupziuJyF9ds{TMzC-3sa*wZft;e--M& zs=Z98_JnFL1lhHx*-hFSb+Z14Pv~ruw&{?x30jgd4WYzX*sdMgrHN^7h!;bwCWf$9 z_hIE=!CV+Nx5M~76#lrRKCzKAM$tvVHv(564HN>;B(ec^tqp*bFJf8Oru7qRf7(yK zu{LKaeqU|%BfB{8jpusm!Jl~Fe~ruN8_PRibcBL6ZV?S#VJ5ntaI)4c4C6?bTi1!l z7DbGp8V=Zx48*yCIbTtj2NWk7;atZV_yi8tGS+NbQv(Zrp^_pZ`2pVZK<8q7-ZcVEi)=rl$(S2%U(f3|5s&s79#8rw}&>wJFD-89$4KE>JD`}W`!TwcQM>!WpzbIAkNb&=b@qdc z&l&<@AE7&ecHc+BPP+Igx_%(_JqUmC-A`Qm#w>6pog%>$_QL&xfAV=1)j36wml)m> z>rm3ZKG%_y6mh1*oZznbY!v;epl7%zU`mhx`M^P%51jt^ebyNNj+MVYeemPcH+J6n za0_Koq&~|O4p;Fr!!pL9IV{ruZMmTO<){>F@`8#xzX&@H@tBf@(dHRA-gp10osh!)r#@P$3Y{mYy}C zLlanH%k8b?@R`&IxXrI*W24tqmlVx(Ha1!)DJCb7(RHW2F!EVKN;a}Jw#GagfFLM@ z6q78ux@XNdf9vfglHc{=t@?M!0pEm5v3IS_=iPX_I4P)5_M$Lym#?TkY<~J@b~&TM z6Td#Zv#V73LFLyKmg_Kvi`DlPj*gnH&nl+u#}&ivkeQZ7nnwCx(wBfj~yZ(ja{#CLU31trQpuLj6X1N^i4L)Pg8+39ti90WYJ2 zSrZ+KZv6+H?E$rnkSV{U@;{4q7>RVbtQ{udcU{Gv=Z;5iu}vU;@@201Bpy@jTfM#@ z2mwU|e-0F~&@sK23I5rH&m;#)alnmLg!wa+kzqi2rqb|{Vf8IZI=k%mD(pyyD`$xw z>;PZ<5V|^R)f{y(^@|&4H%BVc(J%h{S^HO8#X@Dnjb}u=_Z=k1s6P)8 zR5}Q5IIXWo0nPRr;UrGRxK=ne*+GLc14P5&e`#4#a#Saw6WBiZ44u0C<{4q^?~Q!S zu%x;*hHimI&+K%WMS7-Gek*Acj`5T%}3cQSN`FpXe7Lhrb~4= zcR?SE%zJ@%6#&7YBFw^>;in_LGbJ_`=`uX!pagtPYmt+n%26 ze+@a$Fo>m{k6%v7@4Dgckq^kssYf{X-Fgr>=zfS0AuOs)gf)*8^S23e+qg{jiD{>2 z9Hode201H$b8a?uVM&(&j|!SUpQe9!@qo`TaKw!sFUPaqSOxGRI#Zq+(nbXVe{LKxAsO4%NP5P66clqLHyWC{#u&P-Je=>DD7|(>cF^dls06U>9+`945@J;5yRCqXvfkK%z^Mgm%+<)0$6WI`LtWD(qW8L zT(C>=SrU2B3ADPLUo0&t5fHX4wfVN856ENBYpF@{^cJ4TZ7B)eM9Fhm%-b$LlaY$g z8QdeVB}`O&c7u`vbn&@mzxLPVf4Ird8vKb3MKt>1m=qrU6R&@AuHSgw7z%r~2|CDW zN*edJK2N-5O-j^2O*OHmb%_*ThN3~@_{rLo&ybhRDaa$?)alNuCDKJ~upq&=bc^HL z;u<&e^%BHk>5rejU?vEvBYnIk+W76@SIV{fV~F zQvD@zZgqI1X21N4)$uAq;W(~rHKxr@woH*Ko>e_r0GqH>C;h`x@sok9OgYAXM! z^y2Et52yGKO!7M}`cEtl|6*C#R~M&z*%*j{GP1*k964}aguyeme}lwDd3x0|BlJSO zH?MziZ!tcZcy8Kx$GZ}flhrPsk0?scew!YFYps7x@Ahgm{~bp)e#cdR2v3G3hanIS zco`$lS4U6NhaJ)p`{54$W&i(>u848$`&f3ux2K0BL6utTY>&4!%xkeEiz==O{(BDXj2pBGw}J3Az2KX(cD zD}%P2f*&2Tcre?S=d%H?^LlVTtgm%OgL7UXVd@20fgt|cFiNXI$AdJO(`uo4Nybh) zF1WI5MdpLugGT}eSNJy{n02am{-R|=QnQwHd2<@8`L&BEe?wiKN(Nufm<`le*_k8_ ziz^qN2PC&o5OmXji;^_NUvgt%*sx2l6hXi(h$|GIrd2R|-W|yTG)b*h12YQ<4*h7@ zZ-!TC!yjLc{rGcNUdxXNHp}^K>78Z3c^{ z%`-RX_)Vtsb6N7BI-=~_t0iqA-tMrTZ3r(&s1%3vS~`nR zjScb-uHmN_yNphKPS8FRb6t!v_Tf%3xO!{##&S7Se>^I1F|ZMf?OF<~xF#TQ>g;^3 z?EIzTH2ucuA5Jt~tA46@_}|zg8M#?x>XMrO!fI`Twu9R#CeIZF93#%>aW>yZ6_3z` z7t8j%bVY?hMq8I86C3LDalK_0I3BAt6XH1r&h@2C)|VkW*>o<6p9&;MXyg?$GlKPr zD|h>He{OaUCqW8$;c3THF>sW>@wHgz-}(`0S%>%ki7UJL7UV)3rg!hvy#0y&KXbXu zgWGzIjm0Zyrn@y{%|p$jFxKd7d!85{k<8+l*V7=MuOJgQh$UJJf=DuXiuJ(Wae*Mv zo)2l9;WT^kL@U(TLKc7gOA?qReTCiAT9mc{fAABWR^b!$U{?f0)d_|~)w;S^(bd1M z9l!7o{6w?JRQIm(O+zM@5g2Q@0h)LWW8nJJnIj`R<`wU7Jgs)LhJCV+<#aasMR*4Y{W7;{gPNprwI0LYDCaL0l%A zzjX{znmofa*}mq{^j12F21b<9d&uRTHjo)?H^zK7r$X0 zo`F|9DRwDxIU%t|=3APqak-TRNidPyf5@-r7u?u}iy~)xV=K5I-!k|xR=jBrXOQ~| z@G2qedTlp@xUy^;aUyi-<~W-rNbej7rVric?hWe<)f8 z%_ow^4y4r^2||#KS0JrLBpL4ROW9^j)qS}gFZF}pNz39cuMB?H(cN&i6N+4&+c;0q z2D)p0uhZE-Aiy()GdC)3SRLO~efcK;+n*m_{1Y#~akQ&}b=Z*)U7d2^y-piW;K^psB!Cfx|-PEaqI3ut*^iHpyHP_0#Ri0k6@e)4?@EXCzy!W5yg^E{yZPbYRm-CX8 z9BFN&!)XvI{sB0-Lon76X9(f8=bz_@$OVQivcCRGEv{vUSM08TlKko4zvp#hpfoV* zbkt$euD`tdOxK@0MXt2_5X|3Llot}+1Muq+mJAWhJZ>}rY?%&?(I9DOaDN&W|8f7< zekq4Kk5K3!0)hPPgR5I5x;9e9v7yRSdveKCJz^;oZWae!&<;2M@m+th%RlRiL(=ju zzWh_~`dvTC$Q9NDYHC>64Y$C(H1^ZY*+$FIzqr+G;Oo!80NHGs*!A_wCHj1^3sM)J zPlVCh-!){r-ACh9$j*myw|~#H^S1xV+^x_sD(3AzS@6Bk^yL7jZkK1Nad#Ru?xuu? z!ijbO@lXEqgR_2e>W;%vK1E^RKXY<+anf--U+neGIji#Sm5>?hRvb1utha`Cv~;ei zIf0b)U2$N6FOQ)%cXwFu-B^^}VqBi6x|G6D{V@v@6MvFi)g&;9Y2M|D z*1j*~a;6@qMpfMs^1-U$Ctaw#IQmb_-~8cnelbQp#zvhMs+MTCyk&K|G5+$_fAfG} zPFrD1VPOX^0-kHyw%B=r!n(dJq9G^xHTIC?j=`(*F;3?YMgyZx(NffmfNicI2@Tku zuUZT!i`Ga8suzFf3V(A#jji#Z{`veaC_!0tBielm6+sZl7v$30RE*i$;-LdU`fKQg z!>6jh{qXqRd`>(6&EeDW_yOfVJbu_={Z0eI{sT*^pP#(0O*@SI%l8@KAKR>tI@f}H z9nyfb>=!SOHJcA413`EYXzh9^+-N`(#K#1*S7Ho#fu(`^kAFF8FT34E{>8+=U)Tk4 zM_M_izGa4pVc0|o7vmKO&Vx)SezIF{312eMJKQCH_(|;t`>OlQzjM{q9oNRc@`GQF zvg1PjuC>3pz{S3s#eVRzu^^?=K)CAhy+P%9EjLN+EVu51V*JFwW$38@QV zWx51BrzL1~et(CKOi-e}U(RbM(913Rx8o?`uVO-L^}SPWV)mUNNO3Kz9|XZFtTnwK}@M9xS)N402Dq;P!BH9 zBb?|C4tv^ZrfNADq`d*9i2(xQr{4b(aP9CJ|HJ!rxPRea`9p`f)7IeB`X^p<@po;# zb@#gE1cxZBT=BVlL85fX&pZLOB=A`mq^O%TL zkE01vC#w4w!&SNlLyq0=xG0?;JN?+T`K;3pmk`Mm_q8_aAB~fm*X$2(qpP^6@|}_X z9JwN~n17Tm@FWjkuwFKkLz<3oV(Upm9K>G$smTH>Zw z<5npZpw88}Xd)vmbszN9ui`SG#2!BFGs5 z;mchzQFcTGBb3ouJRQEa8JHNlSc*}@dZO?%CVvRp3c7t0u?LY>due#7jOeRBi99iP z*4#7?!s^{V(=!4q3hzv?LQuq`T}V&8-dgA&oC5)V>~|_mqLvuICsF+?y8gizZ`p!g zyLy0HTb*v>t6fw6)RMHYn=eP);SgyE?4)R=Yd6NV*uF*QG+Y)YH!`DTOy|bL*Tl>p z+kXub$9u?)4*{Y9w}Zs5ZqAP1+)uiNM|9t0X#zJV4uhS88qtHh|Hn;@5%pW3;MjCtRgY-8^CMao=3l|B(co}59jwYi#I`tKBBjbkXf zdg4v#BJTW;E+Xo57;PHXZJ*L|8yZl#@2vqH|B(0!9G0?icNp`?|fywSAP5PX2>YGBO4ZQ1hpjI9{R9oU<$>ObgXt@Lw&craAXjnf z$|rTXe^j{Pe`(A*9;4G?e(Pk4Uo7k&e+xDLt$nJ$7hN0^WQhD^5lB**aeqabjD}vF zSXRyr96pO+T}6mTS5&(j&Laf8Qje*Y`=AoGMqLoqa%s|y1r`HgTPxrm%sXDMGzE=7 z2+hvQ$ZY0IQ$}0UhnjHu25ER8G>i17Z`0Q*OGH&yZ{$z@txtUV3r_cHJyHp^!X3*z z`HLf4b?r3zTl@XDhELUi|9|91dGy1zrY5R8%r!hm)pHq2gvE_Tb!6FrM0jA6Kue|4 z8!ekIheB>kKPy~XdObFb!eGhQujBqZQe2f`OX|c$xx&=m`1#WUe{6p7<=kPqxS=xl za8S>bodl{bc4DXng`;+@Bbl-jm3NH3wWd_X$gjrdf8yN~R?PU127ffMI)0;kueUyo zy*x4r%sY*-owK9C=gcM55{f7=;N}Hf8i+@RE?fx6;)JLgz{Nn!MAhM@t06T*3UI}_e=;FN{k(i5MJ_24=*gW5WCo4k|D=L%y*X~aJ z1-BANB~bCrUz|MhvEsR#j$8U$7yDDYWIy$?H2Wv6S2*FPwtw5g?ujrE>b3L0h!*)O za-RrNrv!ofFQP94RSp~krl3!|F1O2OwcV?eGdMFWF7)l$-baIK4coMEV4guR?qN

znJSu$7lwNWwTIR3+rvFyjb5dG1wE6rZ|!(0EULvOx5s?$Prx(Vg$bwQPHkCC9U z5xB=tkeqU1(SKao2}5L<&QJyi14&GP*j8ZJri_?gL<5k5u$H6nTw&nG>qobQusFm> zuh&Qtf;dHixeKYHr_ ztf>#|JiV&*q~_$S1xbJWV0y9h|HSWIo@pvw;U8@O!+&T0Lx*%#w&{la{9C{3=2+RT zU|r4kThmF`-Fv@vg6r7DVggRUa^8T_PGg3m5AAS*6ix<~&WQm&#dy6%sOC?*<3EXK zl!t(fl3Q4@n;k5$iYK~WQsr+TAh}hiYcuDimCt7pWQh69w)oRP;$}J_+G5EWd1yBy z(~b^_0)K+$ot7E|^$?T^Q!h%Pj; z3WWNOL->67s>eB&HFD565Bf6+TxnlvfNP#mKe2I5nW1nMQM*$pQhwa&a5izz;k!#4?2L{K@)23#wXpQR`rm47cTv8H3LCHmQR_98+nX&kU#As$v( z566chq5tCA^hQonjm#5YY|&}9QJ91RiNEW=s}s-QE>!(C?&`!sa}acKN9FqiR!2F? zSxjZ3vFz8^?H1s3v;z{sTEy$K*0sei9K(g|Yp)9JMOaz3dYg$)ttcbsB z)PgTkCP|{WLHLXj3XzEgiLl_LD^XOd(r7RM5kBH?1rx3UwMCo zjqmRYSN&+WQwuhJ0lB#8a(;Jya{Tx#J%2r{lD(b`VD07>c7YveNwwJgJ6Ee-1cc$1 zMjK9097l^_oT4s&`_9uy*_GbqwLkgWKlr;JJxLU(SQ2Qjjw|YTA%^-f(|_W$s>7%) zOu8W~3rMGTzFOk94x?%rsRuQWzuJc%pQ$wE^&g$czwNY${_Gu49M=E%USr!3&wrne zx^FcHl5fK%ni#c4m40PelqogG28468;bQvN?&|~AZyby2LqR)Qc>Y%3|8ROhP?^eC zfMgrW=bS1{lhSbo&lO&i#N?aaKt>pHKnAa-Ahe#93{}lVfFC)YpzbOgy5gta5_Efx z2&%g{sPvBTjVBq^Sj#M3u&Cq1i+{g*<^OdI{^$PASNwOq{(tl-keZ1~qyI{c|M8v$ zGWz`pV`;g72+(gmFv1)Ze%{c3*Xn;Tqtf#Hsm&|yw9{5Cv`I+=d(73|^IG(S-HW?; zo|-XDaFKih9X@y%uU5rEUkiZ|OtN4&eL_HMk5?keX~xTXmmE8_)iBQ@qq3-L!2`@^FQT$~^kZ&B>-bzYs9IQm4GokN`> zhUafAU(|_jA3Y-f_y8h_s}ADA=!M>k}Gx?iO?Z=+7zK$}1H;lFXZ+vAa1KYjTf z^O38rTZ#9wnPU**u*L}^Ld%_;Z$tbP27b;gK?%y~a+QytQ4u}de9UxdS?Dw9gRl;m zGlSHXrJw0I)SgXH6URcDOHgwNbJ9u(a)%L1} z@wM?1am~V>@i+fb`vAVYWctfX{&GY= z7~HS)%hp@zY;URDJb#Q0nn6yae4sjUAy3|{jgL+oB^c-dcsUKjd5kh?J3(Twnk^xE z_I$({M4kTPgb~dRQ2Pp~0g|{LJ_Bo`H4)qlIJcbqxzw}LLtIeESY>1pQ{$hgdhWKu zUYFk~zOduHMyya+Ec)HY_`7Dm`%c>5{fKGHvfrBc7^_^b(|`SDQM3r;3&D8IG9G(s zjRCpZLRe~}9)|zJ-ka^Zt~2Xm?~=Q~2rSuhw+{lONcEjfwU%xIOSaWpwq#jNkarI# zk2&Z1+r5Jm93Vl)%@EBZ#imFWtG+6^l9tsbUzj{yH-s@nEinZ-u`mMnjKK88fzdX^ zz>vAwEN@vezJCe{nPNe&s4Rk`G3#s4Ww{KnM5I078Go$;)n5$p&)tB-CQsX^{O~x{ zCDQ)<;B#%-A1aStD$O>iUZ&8;jVfcKxB40pyzkCLykH>drLR*BIEmMfHI>Ejay4Jt_ZZ-s|i5Nkm5bB=7Z6@QBn*WD+f18j zZmUj2PY{S_AA~?luK}NM3+enlr{TVzAnj?p=5-|f!rOjzt3S32*F}T-XAS^Jz(}#+EK&c+ zk$*#7qENk``Vu03Y_?>Tl?yGe&L5#3As&e?~2P7E${svb-PwV(&XRdn1?W->%$k9 zI{>26J1CZ{Yr|6P&dkre42FOW5M@An`hQ+&-hW3on|10GYm?5lAo@Of=a*((++TDY z;Iwz__NK)vp)?vE55);*K?OumwX}_Bg`(9bi>Ve&fkWc3$Iu6w4hljbU z2#`<=tE!w61=BMYQM?9GcpyxOGX~A)yCq=0mq%z^%31MzFR=va=Y0RA^Y|Nkv^px% zjQPofiQ2&64COIRs?*SlvK^m<0z^3r280x>fpSu9;U?i4q0ETI)D2~siKLXncy!b6 zxo`lmn&lQKkd*pG&2s0N;&{x-pnqqv91!6s1H(F?93@~rJt)4GTJ-#_#o}vsn9p~j z&~@?kuS({1VVk`28^iDtpV!%L7}Xd)8I0hx(% zg503@P313sap+gOD9*4H%zqfMSH}U4z(W)fs~-9EyB}_7`H-RIwZF`}L+xQ=O0g}* zL+v=_Y%>jHL8aRqei7Do69riewGZT6R z|5w18Ag8KR2Wq~Lki|UsK?P)Sl|hINS%Ue|P31Is0SR&$1)gLL2WhBpkhG;j@k&OF zj4W@BF8GCkE5SJjK*D0fkon=y7`a2e}#zm*6$;~W! z5w6Bl&$vL&LxbYTE=-G5d*fhY61x{ly`;X?zwla`MDdZr?0=8B{}=d3D)*UZpr42f z`x&cQJzoG-f+5k`*`}TmR|NS`Q`;;kpwT%VX8Wl`?POSYP0Ay)zTGk=A$3!g2$f0o z^(0Pb{U!C$y|GPm-)hUcMth2bTkcLGkumg!iQEEty?9@`JqDsIMPkU(l3OUynUYq z^|`#Bg#{=UhlF*pbopW%g!@)aG(kvu-?aL3bl!8i<9}|XF}sC>5*8-YgB3*7w_?^W zeev$nZ+t*+0G&^R`ovN^L#lW)X2Sl9&(M#)1NvB&!9Y=OKEP#GX7AcUlFau#1l;nd z!bJxcH{c5B#Cp^pRM@>9w+yaIU(0-!9^&PWojD1N@BDul+yZETT$P>vZ47vpqTH6X6H`u8FBVG)B_`y_zqacG20UtRW_V)!^x`fWLmer`bxQGu%6D6*P?HE!2 z)_*VVgr32sPVr?k?s8O)KmOP3U-VEi`lUyke%Z#p>qwb?>D%V}+h28Wc-t#iX{dc- zyE@eiUEr7G`-4~c-bEDlSNt0h0#BS2-}r0%g(T)p_)Dyg*|uh^e`4EE3|0o;m?5You8tC6 zFo>X2*#nV6uqG3uRTly)OShM!k!a(G{!;7XtreNK>FZ_0u^i{TYu;K z5fVcL*F|?;u`elMWs244mC2ykRl+q#&j`f)St%ByL!vN7FYqB42ncH*2O&~zWX1!a zycdulmOQeAT*918Hzoo}8UmNkRMV1N8B!MoiGspp##`Rn zv0iJay~;rFsjo34Eu#vou2%h>?j>S&h#)IifU>P(uCPjeonGFD%E6 zDejQgIpfqRz>VtHCZL$#%cKqp`m3MWAD(f&86!jWsJwAD?U+%&ahSei`VYRIvmVL+ z8{hcvINIhE;7U9wqBY>qG7M=bq$N@Nbf5gD9gR(}k}imi_c7fXqP-C5sFc~p6MZcu9JVe#r)r~@F&jv=k@(Ht_+#a1NiRGvVVww@aD5TW}JU~ zug*pyu%b^fY5~S+JxUghL9v%)*qN(DvG_Tp*wSDWLA zxY`fyaW%(8o4#^9jVCwTgTJxNvQN^-0_=~tFX%pk5m7YWQB287&j?>+TzOavN3;$o!58G3R; zX@BB-Wlit*s1GhU+dn-J6pV%A2?YEve*gbYe7yhvOC|rW|56E@YM6EF0B8um}wGPm&qn6P^%U!Y2T$7ks+jQ-W#;T!2JoK<#;)pnn#u08YJV0&gHOK^M3a zKr~-W(1BiR?gWfvt`8jm7QfTt{Js-SJ49wcV7}$&`V5VhUjcr@vxOUI5uhGdJ-WbS z4|mXQ5EJZC`8O}XOSA}N1`mLITHfpB05G+uqX1G4yxwd;>}h@g9l$kFm$Wu5g}bJ1ls;z{YimmFd|5&`YfCJ zj0seajbv~XIMe<@PxDnc?!it3eYpj+CCK=ujSC3HH~&t+B8mu@>dBA-UI2C?E&&_p zLQo0`RZqw!NIib_sD+pTAhJpII&BXzEkE@9md@`1_az&k1O{qUjaAqnQHcsNQ3)PiE+SUrx}O^2Z!IL7vAWrsJa}x&UH;nFQ9* z*U%8%2AT2)Zs4tePT$8Xv{56SpHK4TrvnH!DDE%*#=GTS0X(0HQf9&JJ9 zpaxu1;i>~+fIP+njZ62g@tPc6*d%<_A8)V)zJKnI6>N({#1JZIO(X5j?>`Y(g2+HX z(1L272D6PV1qtFWu zZG*A{aK*IrBgF?o%PH>`NZW$Zw8-OqY^2I4^vMhHE03pb6G6)ykJE34sLUKw9$hoAs`)lUNG|xTnNfC3?(saL1=uh$Xh_fm%Pxo zQPHB1R~R+aBM^0rL`4Td^~FNbL4PJJ3R;MB(tx5cN~GAd4l&Uqko2E1g|SUSU66x9 zNm@uKtz>yg)DpoYjF->(WBB3yB00k=~{Pa`Pbw%Y9#9OZNQleQm^uu3m z`%l(_p<_ra8~0$hbe#DXNEu)xW|^5P5=mP8p6&=VA|5!T4y&$Po3u3P5i2%}3 zQ&v(hxMrd#&qUF|QWz!IBd~Sh?fSYPgn|{SI+&qta(V=gF1#&Ycz-*;@}yCu`OZ(i ztLp*?h3WaaU<(DER}r^JX#5C(@|o&t1K%qpX#@h9hg%T*nIA@v__}~7uh8)$^-EKf zIu9ZsS|;Qzi0NO_lz)M&aR04CDZjr{{y*^6j`sgb_o08}oe}qZLDwCDAj9*VwgO6g zpYoxZP%i;tH0j*(Eq}nGrFsVU%fF)CLrVZ#o)xr6$w`L97C5@5Ol4j}N)qUCAb~)` zyEq`InAG#Qx(cK9;7fxDNz*EABaQQ%_;kEPyM4DPZd)RF+PLZ^>c5q`jUGX$>jMAH zuiKW8Z~izA$eQzGT^FQpKGU`&p7Z5w959;m@47Cib3SfcI)Bc;=D&q7u~{onM(A1) zuk`$&>sH5oPI>0nx)dv7JS z;@x6sL5GvJ695X&^>wfnMoFv)j&wYDx~3|G6$#U^VI`CximxMurUkm-V%+uF*hWf8 z>k`_QKtlVcWPjrTHqid%x-MV^)BcgRCB~eu+T(!moL}j>Ab#`d_y%*nhQ|SobN*A; z1?8Jh$G4dC^{h`$)G~ory*~CyphFN~M9(G^2*>&CrE9On+>4&21|EBT?0TK<|Ax<$mB1+@J2Hq(whw6JgftMn%B6Ephu+njm*hu-!6ML&)`*xkSB=QkhF zh53;=g;Qp5twWG#*Qp-CO2@EQg!te0Uk4y7f$lGB9dqXsUiC-ZCnq}2|Mr}~JqRY9 zD?XT#cYn(lbPokFa}zy7atlD$E>_)2%ZfO+Ml)5!G^2Z33qnpBg##jZBBt|Nw=IC* zqWQcP<}=a#mT5^3qye3b?=s7{T*0u8j18G@R)7vELqa}yWzY&K3=FaNu!baf770n?nQfD2M`-L z(|^5%t~+E0iT-Oor=If7;jc6;)H%r@QG_73cC>9~dc>>#l#P^EdvP>z3r0!LmG4|h zJ&0DIXRxQTCjI?S9vxe^q-WMIT9o-cvOV>CE*0|!EP|Dh|KhEGz0Qq}THjAf&CzpT zk6`(??SAqK-u%oxqOwX7Hveo4jXyjnKz~d3cx&eKOD{5J;G(zfdr5K`G5iFE%H zgny(<-{c?Zlz&%WuHcO{k4*z-!lt&wqR_{%_wDUqfZ)>om5gJ5%-X-FB@(Ot1FeO} z{y26T?a#bkYC1OmS%1176-L+cbdN{k&lrB!->-RJNj;BE{EJUoc=Ot5Bqt1=GJj=% z-cQfcgp{QDOCA4RYnbjk|8lL&>qAjqT43WI31$-FV~<=j4QCnB;sdLr>1384|LOXn zeNjrQqUQ_jl_`l78xkus<+*Np?s(Oo{*B`ReBJ1c(1S(jcqvpSIgV{Q^P&zCl?jjf zH=nj$Z=k1TZJVwaH?elJ+)CwaB!61O(HmH4*{Zj*|0I8${nTAy2Wy&k=v>?NyA_yu z-?VIl?$Z*Lqkp|$kI+%qL8J16L**J_-?j;Wm9_6y1p17=&qCYMFx&I9LZ5$rH@@^E zeI{S}`Pu!_&(Fb^etwSIc7|yh^xc838}l85_TRV1iRu8zOjoxpwXb~XqJNnV#F_4l z$BFj+OUF<;0C%Rl={mLR{m_l$2!KTT9ANq^exL1e1oHEHMxTFv|I#b;IsDRppTG3q z=P&*Dd8F+Cu;@CL65227!cW`roZA6Npmdjv_DlGoYrp+>XS(*=e{rUZzWqmEI_}&5 zFWrao&Akbe?2`%EQ0uuT&;>e$%r* z^ym+LKWbP}ytHDCsAw=mD{H1RNrO_jd=DL5;DhjRp4}UFfv-6u)nCl(? zGLDK%>t6mEmz1`f-~WsYmcR5rW1=Lq?%%W-ZMXBAR-4=Lo3?G+r2XDIb&#Z;B0yq_ zR5wGqj2TykL+kOKy_6k)p-yB~wuQP9r2Y@SX$Al(dEdiM{ZiHbk zKvMromne1dPg*5SD}R3#YYO1;H2}QnZvIs>CZKCgL1UYNr3(cFsYTDvOw2YEHxLT; zpn;^gUdCa-im(6Bibd4gG0H?S=n|^j6a=EAx%X2J1S_OE96xfe7}5B^+^Yk5nMNSw zk$^$4C*%_e1WWiYs`CP>|CH$ehVBE(Y|EQy+;JK|7|@mKF@GI`OlsEyY9EVwfYYA) z=4*2tnRg-^%jJ1_{FeXuD}T}E&W}-2^ELZKlLAhC;R~r57HiDKpT)1xDfQ9;MOahF`Piq!;`keNT8=B zk2EeMuh5YnXn$Ivmp;jbxJOTrVu+|eplCoar(K8=l+*t@txqVvY0Lpm{_nh3phoMz zyqD^!kxuCYf;vh^2z`$LA)pc@25!`@Aqn8ow3tvmwnNiVf+pI&kf0Hy0z&#-CYjKB zTAB{%kV$(=$AAz()Kj_`LIMeWzfRkO0gkkvLQBiJw10m-{hko*H*Jq>&`&s&KmDoz zGhh?M%GL#B=6@d_eCK!W&)lx?r(JGO=cS;( zceLLjfq$0%ZvUk9G|zu03>g{(`yP33bIvscA${(1*4w>|p`8_#ePO|Eygt z9mjc|f`6p%=Jq7$0X#?jnp=Pxu5=y91TM5)qVh97^Zc#o|8O3+;5&~pq3t@(d_qt; zkJEpZPi9%YH_$le$ep*u`J9`FxczA&M+;{ZC#eFI zfCCx_zNYI1W@=*vYq z{#H*LQGlHwoi@fH=t%qT0*&XOQ2uu5ck(=Hw-pocsrx*&L(+3sqV`dI1jT@L!k>W{ zumV1%lZuESf!WeVT)`}0-3v011Ysg*Kw89uU;^O~-wT9(y{Jv_1hdHzGoZWj)qmeS z4IUu_u545ly^$MhwD!6SjoUmGKlv(8$WMM@V&q=a$Y;|Zy6NZ;(Ua2hDhZM!iSMAY zQYh0Wl<0{(^BJz#6tP|(ugW%Bal9*~Phin>`Bf()Y`$|#{kN7&fc^ja=PRY}rn|kz z=k0&`pa0MQjeeo@f4N`c+yC+NRDWgV_#fKExt;%a`|@b*Ta(@Y+nm?F$MW_c4_Zn2 z{iVNUN&5Z|j$!^=+rtGW6;KF4ZzKh4vn--11iG5m#I(% zkR(L$lPczspx{nT#(|^G90hP@hdFTzbe4i5L7ahjMHr=Ry$FMwadkS9A%ABpWXkKP zn$>c7cz3oiGW>z8FVEnb5j)ZYuaYd+PxcMbjbB-y&1AM8E89h#{o2R-7A=TowIJ>uKLXTMy=nC&|)$b>xb zvzNjL?m9#n7j*&~b3m-_a;TsM-_zenz9>OpXxK!1_(ZM8Tpia~#` z%kZ^2n@4Y_7shFEMtx?Pij0Lcj@rk;WBmv3p{LD%s<44uWujY^Tx|C$KA#J%0?Y3L zGoFHHa=PkXes&V>VV&NoD)Ax|`TJvDvmLL#yOMe9w+GXltn{>U4>&D0`xHOFACwx| zH{X~$b9ZhY&g0A!M}PcTtlsLO^A@)vKIqrGn^?zyB)jw4RX0pb3SF|QU==tQ(6i&2 zGGO>lYSd$b?^ffJtwzrGD>Ft#TR*k^GG|xCa(9)F@1hIJOldPEo6OVaz8GHy#A5iM z&ef+wOqGVj=fXwN6RFwXZa5!SBD5Y#r?jOrsXR-va!EiwcV-HPwNJ(y__$r&+61U)6MLT(#zd$ z1M?EGpCx}^ynkSh#o3>lUT(2KbC)R9(o-(g}6s@|t2E1?%bsN-1K$KBeB5^Av zWKrs;(}Uv!ZvR;)T{tlwH+X6&*s>-%9E}5t_e)otmbe^(@E#BPQ@G3fWdHDA*%;l# z9eOs{+G77*TZ&no)~SpxWwABt#M^EY=y%+BT|Z2-AAc1|X49BsK8?4v_t#b{g-BA9 zRgoRe%I#4HC;xM;Hen_&)*Iu{uG{NJ$QkKUI;{<9w7M#Tq zpV*bOzIJv*8&dm|r&CQ%3NvVqcB**$#QTQie`I6tS(RF+_|nySus0&@S#9ghT`db$ zyK7bC46IV#miHH_{b=Fri>>l;%Zb0Jejl!z#D7qe`fcSQ$Gu*%F+w%ub$<3H(kgp5 zQybka-o@%QS^ODXSC8=KXzSZ7R_mP=9rM;(zsDHY&OTji4!1Wu;Ay3_jAmm=@icqG zOHpC99->A9v%a)!dff!u!NJXqy*8t`KWFAO+k?qh={paqW3z4?(Yk)#N$#cBH^&`u z_kRgD=c_rsMUe_o3w0NL*rqCdPHVKL(K>C=eM~+#ENsMg=bcDUL<_?YPs;hh?iVP~ zuHNflZQJI>DT2F>aJoIL(?&$N4Nk6&1(nnbK`!NyZ-FvdXm*Qw9vL; zUmVs|RFW{C_DI+1KF7pZ0y&V@}gp3#rMKtBm`ymFo2rde(i(+Z!>I!;5n_@o}wanbM3aadEC>$z$_re;q7#9X;Cpu76Q# zcB(H&m&-lQV!CW?^U?>W^h2<&lwp#z?&`%82;5FPSx}S3MIuM`z0uT@qr5LiyEQ*y zF^P51JYJGNMM%aNPy`Jveq6Zd+}IK>{?hol(bta_8{Wi@qg;e@wDvyH(r#=YAlWs`w@6!nNKChb@Vj#fG%a_J8pSFXUoL zE95+%f5w#g81cQT$%b}7H|^~0U)+VCEU>SwTx--3HG zv8@5NPJvwd!1CfLSn_2ryHz!Z14BeeH|a*bJq@Gw}@hify{)FT;7V zb1p*myShA-0J)p`l9sH&%D1FlOV1rP9L6oBt>#!`CgqQI8kXw~o__>AFp0d_p@wbL zG}02rFMUf^_p%iT%4z=yK37>2bN6}4xACPLgPfa4*Ba-iztN}A9Lt=Q;g%LLDjjjb zzeu)Oqs?$7f%l9`OR+AyWFo=mdT3dzRPdE(ypoG0C~s*cP2{DRI@i`9*X!#ramXed z#_l745!*_oD9B}NQ-3>KIS$9$g(vX76(tBSsgx#iiA}vI8dzI3t8FwzXb+u9WZ!Dw zX^XMcN~PhAmtMM>uAlR_u5QDSUqA9f2`ahop%M1FqJg-H8doIjs{dd~nJOzo?aA%i zlhLmpoT%glB&RqgcN`P!JjU48lDd=AODCscC#O+o+t%h*Xn*l}3EuU1PIt#s${kPX z_IOI3_EKLb$#}gv@|2f{$$NJ~@bHUAw4P|Kma4GS z+!Sp-j@*N??qxlg0M>XL;A(j~0f+dUd>r7bqu4Rb12gYuaFf*gI~dD*R^#ncUx{ME z67lA`*J$K~y?=dppW>v*oKl+_?+W@I_uSrNUYi$FXDqkGQxm+OdxMG8#@DnE4cTE+ z90GnC8SegGt|Q%eXu+1fmY|(3*Q;g#-HC}punXs3scdEnk19-VlJCJ688(yF^enmno3q)@4(&<+A+EL&8qN&cbM_7 z^Kh1{jh>QpSS3>V84i=43*{?(p4Zy8>`=gptY?w++E{Ig7501f@)8w|502)GpqRDS z!zvTN@_$rANa`E+6j*cf8sEa*3jJ$W2_FKdtK;&$5#NO&vf;U`DyLi z|D9i+k+$DmTb=9>+_R{&F5Th@_w4k%Rl~&E&wmGs_y^mr1jb!)XEs=OD11IALr;C~ zev6)Uw-2SXP_rmNVsg6dl-fDkQ|~WLwHf_ou<97&k{jGb<74sjmK%*(1`ICT^+!4j zPfZKYnHWrbIflAZJ+HDl@Q!E86{6=(*zYGP%rQPk%fnG$iI>M^IDpCMtv5zyRJ99dyFI*lH#)bgExXbB zC==e(`ecwXNo;9)wYZg~Q-x$tSvHdNZoKiL30&n=cbUI2rm;L8npV(_t_1-l3zFKR?P`6HI?wy2Bynnn`dzBXr`QkME$~lhQi&)!*=_%&b9v^FL z&`%~m6&$qr`jtt`mmytJEyfIgy?RZvvv&#-FAkAJJQeTFJ7?D)w66~_zHFR9{XEWU zVcs+Cxal68*si&%bz?nG@1~d++j93lINicvcGtKcY<7~uTYL`0>w$e7R>8)+pMR(6 zZ7UkJ|h!0>FUF{=Qb+4 zg+V)9S=s&Av8z}7g{+3VmknA@)M{}VbG@HXpl=-Y z`B;LHdpn!Cun2tne!dQ;NRqncAs9P(yTn)TQ3>bInKisf3D27I3Bhw|3W4Mk%IiJP*b8#tEFL03D@~wI)`zaS?Z+ZKe?7ny( zSrs-b70JhV#Q0?>VM3O>MBYN(*A0_otCO`t{&j!r+#p+K@qtOCJU?8YoW+SS9Mo*b z7*|zH)b%8qayM0Jw2|3%l{$y0;P5q5wn^z}>AF%4o_}ayNiUZ#ruJUe@gd$nJ8n_O z+tBeIH*Gv(x2(=`F_m^RR%23{>&i%Ot(327?A2P*uZZAXj^VrAU!46`hDDpQ>9|&2 zjk|eY9Q|fhm&ukc#D87%+bb{;dH71$)sHb{dXem=%0GyPt19=`eV?536I#4hmt9m` z%S&jPl7IAevv#zesM-ap( z>+<~g=7bNQqx)Dymb%_<>cd%8ZI5~F67FdPLw^)G(RFPuFW!Ld(SEJNK)du{hUZrWm~8@BA*eV*WrL?xa4td%ZX~WD?}X zI+hl%9eLh^u3)AB9#;!wqYBDKnl$0IG=d>WR@r||T@}{NyuD4$_KNeKzf@|&*W1dqO`{m{{UBJCa~;*onmC8W_TgR0ZZ|d>s-Da`{A?>WX_YB8)3h7dyd3>{ zeS0gK{*X4ws_V|7x30T<$Ga;4=Vq7g>$FVn2jQbGxTdY8;vrmjXSaD>D&fc%1;?b> zWIBKMGTJ3Ue&{cLq*tR<)O*$z>Z$KWA_du~%cXu@rniUlc-cjScK3G5J`3BfPD}5k zKJSHcziG`Pkt!3TirD;BR zrI@Eq55;m>LRg*eo8h%H3Purj%kepOJ4eq>la?Ab6xxCB%GM9=1+>Ffzt^q1X8V6a znBenBm+G|)%~Mp<|Fg-mtO|rYf#7AYBzXgdU)TqlD}N8 z60~dVq+!M4Fg!Z#4E{SPBmes3+{1tA=H8aeZuxQ6$$>F;BD1Ngt^cssrF941ylvmZ zbye3w#jNt?GwiO5CQxhSKfP`FK%#TM8Rj$DpKs&&v|H?Kt5Nl5+MPeEXP)ct;Tjxa z7wR8fW?qB;*xUzlSSX<@T_1b6-t4!k@6-DlH|S>VPl8d3W!IR|)?WJgq^^IsDLpr@ zmQkmS+cnbOk7?0!COcZZrQ`TZE*Qq+>NWjJfrNR zp8fpBFZG_`Qva+U>-FhnaFSOa&eq10xYO;q=aPf!zkT*#6oJpmVN~U2vFY&kfx1eP z;>UD(uT=aQvg^XF!qe8c4SRpW+ZF?4QZCbV?K8XO8=d7Mc;~Ol%@?{IETYmLFy0=F z&B8wo2jdvr!)}{=UUUN=>QF|R1J$F%f;cT$kPCY%d!+XIRpOt?R=^t}+w{m=c z1(RlLPk!IA^21;+sI=b7TiG!OvD^F8V!cjx-s8DNx8pl6+(SKHo95ofLLqv~i(g)! z1K`CXc{ah#codGh!-tcyW8d-aES`23@3Wlv|OF0l+&8#tIr^ro z1c3?2_81NPZnM!*@M=`sSYJ2M#jS3K!?rb~%}N_AetfSZ?B+Mp-4}abJjSkPMTLF1 zZmmcWK&qu}7ZJX1ob%Sx-%WovOb47Rvu%`ES#oQ6OhA2XEc<_%DTm6&A0gyfd}Vgs zb$z=`cN1foNm>0ejU{F~KbRtv@7(3`^uVjb4fL0P0xnvsf>2p_>lN4tA(ZH+bzlMs|+cwryXsoFuDg2*UiR3RmfP5lKfKoQHn}X}$6$7^<_#7#$a?ZrMuO zHX~c)#W3A(<>FGX*N~N_(veJd5;Y<+o83cPj-Rb>?Yk$h4s|5jd;isxnSCdyoK zUgbK!O!n2Ozbtl_D!AUuWlY4&6p(5cE-sDJzpJ50R1Y+kAD8;$bTiq<<@Ts5XAfd! z-trL<122DHdB?7**=)vt!t;GrUUIwEP-QymHW1tR@Ao z%Q$A`QI0CVKK4N6Ii_;**2& zzB-*vuVYvK<5PBesGd#CrHAElSc>`8txo;EwW`zY4nxrvk1q4M>Y?5q@=-ALJd0P1T;npki(rQ?Kzq#N*x(OElj(W4Zdgj)$rSvJfPQ(U%VISj8 zieP_w){_6u>zq;8y7BS(@sS_HAll)5JSCMPIh(=SU!y0wi0AEd>TFFTqisFJ+v=04 z%CJp`x`^apC9NKq8OkMk*pRGutFquuz+6Al^1d0CgjLpes%|qMH!JqFY!y3E#++g@ zzR!uV-}T9L#Vp^!+DN%YZtrSlde`g4La2X-X0@t^XIQn@bGT+M@l&e~?BdqT;@jFB zU|8)j=4$>@@8}2QvU_%t>v**@qNU2dY}HNUgO!EXd~XEN9rN#(QE1_H_++TqnA>di z7QFbvY@EIET4?El=kEf3vCly0yJ!5=ckDTqU(fIy+gv)maK$Q(_ncmJ!z#J{(O690 zHYay2KRZ*<$wC>nO(>u2{X-u)_wwx8tHYl&IjOG*?xuNNJDX|aUNT%S*xFv?c<=Zj%?axszFgF3bq_HO{2#!(Cu0 z*%>s=*a&|BV`kj=vP2+A$ioZrQ2rhaAb?b>v+=C)1q-DNRouhVzb3rc%3QX7B6 zk>%Kt*$Fny3C*|6YiU$H&MaujVixSoH?%dF1B(R&=BjL$|llX9yS*V)Hg8P;Om3y%}r z$7s_}X2-CoQ=jX{{1RSdUR<|qF0O%lnYm4Ko3i`KE3TvDV6ADpHjn;f8dNLUw5S=x z$9w1wF1ywkJ-4^%cDJj}f{W{>l-uC`Iv$RP?I+YneWWoojd#a^IbDSL^X`9S*K{%- z&R8(rIxkqQ-qYhTZ?IN3o1r-#UVO2!kT|>#$0D@cUbgO6{ob3kx_!E1ycM{g388~s zY||k+UDn~-;8luUMx)a_P6P$8==P$KVj!^9H?=!eH|uEciUJpTA}_Jn%B(uaCBVHKJKt|4*TBx z^H@q-+r*3Ws5cZ|={SG0-sYJw%0AM#f=1jN*s3>^G3-WDsvWe^%74u7nqUCa7CZPnj;>r$-F8y9H2x=gejo7&X4RW6NWHD(hh9NZS0e61~~*<%oz_nB2w z-tHaq80^m8a;vwuciaOxp{vE^R-}h#IrUREw?kRA{xcm!v#XpuR%@O`!^-Fnbq^0reWvMKIzLy3bI~5g!?XgfdT zR+=JeI=h7>cXxj~)e}okR|jwYN_TH|cpQekligdKnqb^Ny{N$cEf6%_BU4km)j7q8 z8)#g+ck19}=&prF zbUU5!dCktRnrO@2@sXc{;)OiP9$>b+d3L^#Q*R#UZ5@BNCoxNBbh8@0ZqtG0Y%$+e z;Y5t*)zcMx<~*B>07}}YTf^f7OF`C98+%cXv`b|ttG!69VtL(J3v}7jYcMUx2N}!B zwsd+TKX?|aeC@oN6d3^?4aQ?y&3En=`yN|6&cc|ME-?dBuKar*@yW!@%VSoZK9^@o zx7LBk^J{d7^B!$x_FTX$Iw( z@AzbKFZQ$J*{ODtFSA3yk5^Yd{7Ky#&WCK8+{)cFo1W@B+4$3ureB;k-j-<{?Q6`l zYdo?Ji+6Us;p($RrPWUG-nS3FHb&)mw!HWbe!YL5ZPqO4F>riN@@?YHqrH3BJ=3!n ze~3%or*P`0)8J&4>+R!S@>BMl9*W5_M0{@F(z+w}%gtsa>y11eaoZ1;c^c)ccfRVEBW|u3^}W1u&wC4w zZhV_D$)DTpYjO+2Mn-IQ@8h6I`_;0Ttbl(E$2X_`Ty9G}_4=V4Mp`tgo>95+>{8E` zw~2EqcZohc80KOf70&o^+>CDrXR_*_KhC3?kCvxw>pzoEoy_95D0;)eDaiD)t@K;E zZs4eSpYoXV>E*hVx78zC*^ZnQx3zDF-e_^S1zTs8%T$ZwQ_tJ>PcMAgG2!*aG6#P? zyYCm-qDrbFM{#l<^TEfRN4M*)I!oO%=}J2`LUU>qMVIt-9c^)NO|$d#xYZ%{d$)V@ z>`inphN#Vk*8N~S<(ik{JGNHaG|CLmD%-(qP3Y+`^X4H>_s>%t@1NN@(;U;8PVjMW z?F%Zd> zB#Ywkxb;4>TfVzZ+gH9b*TL#!;{GPVQ5Jj8su^Ty8{FZw->xkGidVGwSXVZ?CTyeU zY#Fv&qZQe6 zE_QT7uTgkeMvrKD8{IgaH^()_#&8&K2kBvvyyB*5G;6lLPjTdi^H0Vv{l&wv#*6)7 zV@=%ix-dSA$m5gQiCllr{ag0(gmt}1Xous$E^Su(@t*4acXh~?+S0eL5nEjEV7GH@ zn8V?&e68l}?dit)(KCusUo)=782M8j;(3pb-ScsCzIrhgU7n76@BfbX-}tqq%?Glo~l|W@8jf(}6{L zU#!@=_JX{3yj+JyRgH}+zmb!PZB&K#IoG#MASSaOorijT>%Yw7ln34*7H}Fq=__*2 z*4@-+8)2q^PDgY32+sQZ(XX4qxM=K=e+=*)41ems2f}S4QavBdYB(!{bD!Oo4^La~ zt!pb@`(+@s6H9+?OE^uywXr)v|WX9)3(onw*U&g=5M0 zIWug@&OSmz+)dT@(& z?#j0|)#90WRQhpD zj_~Xa9RGh+nB?M=+UT}=^3^#`OuT9%ZE!Tx`mC3ihrgQcy%FmTn%nGt+!*u6XfE30 zO3W8=QjU6`lVSvC$mqrbSv7r?O>9%oBDm;#EpO3n`*PwG--nM>CnF}wqts4cKYnC z^B1ws;3d1&xOxVqN$csRI46m@kuq2v_Re86DC4PP%F3LN*Pkh`P3LW(q_un1W3k?E zhNt`0`pnNG^Tn=%L$DZ}&aWvuw*fmp*eO_HXON!)m_WXJ{+h47j{an7?QNLH7XQ$xe#HDfB5GZcdHY!}N zbZ}hWw!wMhuVXU^le3w`qoccB9kP8knV5q{`>c}n+Ftv{?wIZC{eC`gN0Ka-%jJ&5 zx~|W4$1}D)eO_;NvspWm@!@z}?#yJ~pX-0V|33EHK|9i)t4DGhOwH M2E$_01m| zytGIWRNBTLk_&mI+AO&&Z=lqBH*I6JyL$DGju&2-{jv%S(~Z-Ko49PX=k@B5oCX`? zJbU}D9sx4~$>eSo)W+S~ds|DRlVF%$lV!^WK3XoA7Y|b2I$_v&nNdT{k{%{{6GKa-)jo|k0k}3 z&)*dVei)WTN0dk;q5*rea@C&F3cyIZ3#V4MLO{17000cC?ruS`zj6T&eNzg6V5OEH zK-aQh$u~jqn7sj6z0lI%R3Xu2RUpK$*?DOT#NwQJo*+Tu^sJB*VQ>LT`QXU&T=d|37<1u*Q62I_rh z{C6|_0iYRn=c8A;8l2Pl3aiQcLDsFKATn#-?#;Q3SO!3DqFW|mt!DV6ZbpB8xnLko6CSDB z8F~$H8W66T!0E?r*nl;$g+705DYw3U#*^sq# zWd__rQhmo(CS(@TFxTi z`P)GQ8Ns7?<2T_c}6c~?g0PmI6gbTGvAQ$#%kjc@YTi6 z=bSX@Zxp=_95{$HE!bYiqQRN{XdyrV0{;`@ADxR9>=FdHs49TU3>tVoU#W-K^SRg6Iq zsBP|v+{yaMh^XN`z1e}h6Hyw3^MxDG7z0_v5g>oFDY1QG84h@#%WMgvDpmhLBMFW~ zWuJV9$??u^OcYw$+_M6Mh}=)b_!GsvFb2sOL28`JeuOaQnabc-mE3!nbZgtSLX@ z0c?L;%o$>*dPR(E#AId>l&$G!@7{8g;oB#}r3^SY+C3oN?LC;@L>;K>_%z+=GjVEFPev7)L}- zdYrSAG~l5?W(LXUQz6?DH-4 zjs9$<27mvVE(}5Ls=Zgoo&lbO^rzntC#t<$g+q|Yif+7Nf1Bn+FrhDj&;P~-3f+0& zRmkMT=~*Vf&>PU{-@jA#nrzdHb5hT#e^i zh-#;(TU<-1=j__a2HA~kWw$MfSVE(?^&?)WID}+2SYNd%YWB1K{Wb?qe9x$SxNyrC zyLqLZo%xa>1Y-nUKfqmGUvhu=YlY zCSJjb_v+j#sVa+HrD(H8;_D z*CMH#sLKrr&}07r8BzC_s@)ybjl)qFuW@#vAKKl|40u76FA9HIVjAqnH>K;paFQBJ z3ZpBGxf-=OK*?+)7a`4=>4qgrj$?#Ryz#>cGGQMsaa?cWbOix-G3TqO-7NT8PeGL2 z<-!1NK{R-7Q7(jc&I(gIHx^8&DLA4za1`>UTi>n2Ew%&q7+i@AmA5Ryu>h5~Nb)h8 zIHn(_-o-)_WuMX@V<2i`$m-??dh7_{V<{h2K)D6(nSA2XwEP4F z_kqWV2_+s8!*H0FL0FyL)8V+WdlM=~y|E-1n|!<~NNIln#kinlSY`!ND+uC8f6i?O zZU97rRm&1VhmGUhZ*tm8`CTw#sAIE6WHtMP{t^{DNqUsMOe& z9>eH%Go@?t|Ja!a2>;lb_mhvScm3mQe)A8e#)E&ttl#$>`fdE@Z$EiOI`Z@EYnw7v z@7md!{~Ld{3ua_Z+ljtH4Bxm z^0>4DNG%xWE@U25w6Jr!UGMN@OIU?C&1o1LctCpy2_Hc@2l3Q+!1s$dz1p~1^OiX& zl$U=6R2J}T8MRzcjdvoPmBI#|L~pewBxK5eeSJ&O*;{M!zv6y!JEiaGCA}zq1#S9c z25+!*g%iGfNofLyZ=8-;0^RwkO}nwmi`B(B{ew?*q>lUd&A)MqP)Q=inlL{A6`1O} zLF{9o-|HG#{>74U?&PuNf5-tE#Z8dQ0>*#%$n{0P_Iff04p)hgWQNfn8pb#Orv}s% z_-bOv7$MGV1d=Ll2m~k=3{?tVVFr%6Zy1+Err-PG^!k$*t#`0(i9vIdvf?4b!S=e4&_w=>q4G&509f31#Fd(1{$JR@~AUFy?<` zMMGJ5@8P6td!Vpb_6Km`SOQFeXn?)MT&e~v=mUlHC!b44zc#J>rbvHnx3k-C3>Y}4 zpE&%J8~@mEmscQ}8vnvp-?8xZpK0jHfaX_=@z{t{Vw47gX1$PEZ1Rn{K2J=P#}`w^ zOTW=0yL?^^ZuZu^PI<(aF^m&St8RZ-@WeDq-#Cr8Wb<8%J2b_h7>j^0U^#xtH7MJ#iUyV~$SIRRL8_%9#S4ZV)UCx}vEN^v)xOTj1*e`!Jxc?Gp z2{GbO-_1rkz|RTdl>sk~pWN=<`qL{2cLZW_F_wCMh&Nj*Gt)mZWyH_hN1GE+{#fys=HKz|Z-1rwR-`sj`F0bx{$Fv9eA0_6X~Mp^YxIMQx2FwV zX9nHiPw+CgC#vG;jWAAy||wNAsmWGoHU6sU($fDY*9w!Q&KbUdFlFo*qPu#P`c3h3#3-4 z#h0$X2+%H1NuwYCr*PluCl>ti6$*z%&WRNy`i&=FTzozyXFhnJq>e$%li)n|&)3R} zDxeOVH{U)!u)QFRFdc*H(aWQt~ATf_L1T4}qAA78$Ga$EJv?{A8t`&lPT~ zSna>^xPS(6;WTk>h9qHamL$TqA2UF_Kb=1xDXs^A23lJ~lKx)D=&n=!->wrR!W!RQ zft>zq0}r3WAz=NwTlsj_@5eZi%C8D_evB3W@rgfv8y9}Wf?t0g>L-```lW8{sTlk1 ze}yCRYz3*1{JM+qU|xFR{CaBjCMq3sRkDTvAt=6`47pGD=YeCFoS(F|*OPH=1U>z@ z4vt}1=2noAwIBvE#t7dUnNDb`9t}*@rOxw*ueWMRK;f=$-GBtFA%*%sxd;>t^Q!+S z@$)^~KzN*o!Zd&S4^6e#y33!X;(Pv;KL@Na*IRIh{kT%?(&4XM@j@v6=8xZYQTO4> z*Zkx=DxcJTuq(2MF+%5w7slfdq$04hTR4bA!Res?y7ne~Z$>BLC=SjP#AaQbC!jhJ zs#^KIM2#<1k@~UI#FV7{@og*8WQ8Xc&Q8(|5ER>%K?#2iNKm|t%jCX?8+W-oRQ9qS zmw6D-&|OY<<{e98!0%(O+EEp3>{hiS`foc%S?f2+dTZUcOtrBs z@jjjO+tfdljere*dl%=^qK1LT1>DAC&9pi_3q0@}9qa{~uoWm&;92=Ucw9`!}w4 z`OnM80V-p@hU-FbUJl2cG@Pd-E3Akv$bh2rSj9@`9@BV5@eOAfmvdN; z%G_FSAB0nEXj$Qz=eJ+v_I^3LY;IKulDL(1Ywn0*$gQf&^S)+l2TyH_}gXbx`3l#2)$;kZH{m!%$+OBXj6WS+z%JAL#!Y` z8P28%=7==9o15o5#ek4Vm&lmh5|R2WiMReL2yh;+5T^t}hJsReSNmU5{?mVN zKRmADHM+Q$+4PiY&L=B~s&9PQ5B|u8-#&aTagx zjm|L<6cgo3IdM#MOPKLIT9C%385dVc5Nz(7eqG}oP&5<7G6_=}7`GdzxK;U{rs^R} z#m~0crsHSZ9~{xufW9{6eEas#*y(>bY!gzu`~J(fbvf-1{`#@gUz?~&@jTjr#z+7c z7NrZ@V12Xo4V-TV(O~bJh#-*s92Bv}rSdN!G zLB9NaND}(V0@@JU0Y@CKlj$cD-a2wB;>miPz^x|?%!Wo|* zdAYca1CXSb=5ZJo`oWbQFRFhw4ZuYgH@|q|r=Fs*mMP!6Vl5x}Ia9e$f&{hEhg}^( z7k-hZwHzbhT3I0Z_N3QG+^<2d`|#j+c^ucFWTv-fXs6W}-UET5axu8#JJUr;78bc0 z2MoZfKkSE>;Vo;%*lcKGwqO51+N?Djx;9(&IqhON%i|a%E1YkBeF$Yqo%aV7wMty7EhS#0baF(tJ;^K93O+It<{x;MbR#Nr1`11K*e9+;s zPxgXS)_!ZxS^M>MX={IWHK*+ALuw;AK|FTQAs_cBnA7N-A$)%}!2OSJ=;)N5C!0$) z^e#@e8Z=oI?ju(?uLGz4Tu`HmZz=2!!}3N^q=n(Py_JJ@*Q&kkuD+7}wD-@m7}*bA zRJrPyAlcb_{o1wN0(z0NrKJjbLM=E z%um+vlfCG_YGl^>Yu8lWc<>eGFF>`=TD6bC{~1rvYbux0x6!t@N%DdV%lK@t?6 zQp=B24#>VU#g@GV9GCr}P-y1&eUh&Ij{YYivdhn%cYl8fn+&x84=^f)dj_5u|1Y>F zE`0vOTmRMz72lz9+w>3bYpdFVQ^b*p{`jkSKs)>0?0B4=P_bc{n_JvY5>tp96RLKQ z4~ZnE+YCGo%m$(A&g{)jwDd)7;#SujB+>mu5!SRZ-rQ41aj@b^Sp&oksuK#z427h2 zIaT%Ekeq)6=|8m{4Pk2pnvzdAs|lFc^BuCeeED% zt6zTd7w;V|(V*U#SWXO>gBzrqASy8eg4ao9kdqLEr-OyTqygjB;Uzjp)HScUzrgGh ziZOL1F_fMO9$Vf&!`2eSi^eF#f}ZhLEq3bnQ*?h7HUq3HyD5L{=9e=@4Zr-=7k^=& zOvTYP`rkANm$(-f<;WHF0wg>6c)Z2oxOGoyVv{pAR9|q4L6k;N${i|iCj$sF!16ek zDUy!L@v)gxY$R0o<1xo!O3fgSO`wWv&4CZPO1eM5|WCm5Ihn680S&w=vGV=dJ<(NW8lR~oBv za&h3BA2|HKx{mapdQV9Hu7!*gW`l1ZS>1oQ$YXRNx6qP%cf&OdqA5e3`Ed(Rw48vC&Y(PGXfD?z{!(I;w>89 z;S@`afu!Soj><0(+4%*;@+|3c|DXC5%HxDJaiQ`)+IDr-?|TVisRiX}pi#FG4v2pk zZ!h~T*BC+rGDX}&ctDAPG7s@8_KmsIuQZftrSu`4C8|aP4D6-;fkTWMsoZY`YYy&n z;CmrqBg~nqlV5z}k3OdJLl@RP18dDDJAHYIv_PNipZdt>k6rx1j9s4aFHMqP{M1X7 ze-iBT<>vmCx2?^56Ph`*p7UtL#|uw>{jc8L=p zu{eXwi>1_+PSY{4=D}mv3y1qSF@=Mcj2Na|v0OI{E~P=g_54ubL6?84`x^hfZ+Nco z^F468-zkXp1k^~SSJ<}rcl@U7f9kV8`o*tJt1Ztj!GbZSN_Wx#hKx&PUwzhLhkSwbH@AK|z^}M_9^YSl_ zQ#Iv+{okLrA#G*s35b6&fuWxKPdm)e$;fP2#p~NN++RgJHIY6;;}AjC>5(%PF9*_Q z9bXKJFP5>vySf(q=8|8ZJycp<t-I*`Q>$+=NESdMu2q zC6bldpcJBuo=fVlW4f~k7Y_@v7-;*LesaFSRfNLtB>b?i_uzlPRC^?WbTT&8uFN*MU!Bv=sWyY0A_I8^<@wGkP}r97=>goGy-o(Nv}n zkSP~8?d*b#qHvB@1*N~^B+gX(eRWXsCokx70oL(YO24ac`(1M)B&%@(6^7)NK$_rK z1jOg(@m|`vvrYcBlP%ax>4njQtvepHQTD)X^!dvk^1pxL%AYz&ll}C|pE#2JQ>QK# zy|^rGq%{T;nq~8S#1$}fL^bu-`hE7%aCDxyX;GhoadDM*x{IIom$N30Ro9%-3jBadyX4D70yNv7X78eTz_>+UCn;*U%VG8&LY=OQB#|c47NxMUKG)b z;#3R|%NoUO!DV%rTzdh|h{wxlDFyuI)w{8(uLpmfAN~5r7EYO__z6?-+HD&8E&rH& z@u?(vd6I@HlxU6S+Z$Mu4(}FGuV3gn@EZf+^}ahzDn7!^U?;&fmzS_oDu1p=zF7mLcQQjs4gIuaHej7k##Fi8BxO+Mk^;QLA(V zsH;nbT|cy){&d~t@=cd(t`+yH;##M5|32sNHA~J*L++?iHHkHFs$M*)=KYH>y&(@s084PM92;E5W^dj1BIo~-by;!`m2v>QJ zJW{DyhSV)v+WQd@M2|YeP&tVtwh0y%@0o@P+YwdfQCw#{MhK*Vkz}!|vvk4*D^!2| zZ*=*7^)H zy|UtgRsRfsbBNBLcG`~LF`{s~8Tt{lqP9-_L?pK@?lWk!hxLhnk6R<4ez%HJc-|HfOL-gSR7bn;X2*`1ObT$8}AG#}H>HU%e9Q$U!WonCW{ilAo#GO90vqi;)1pfn@MpZ3=Sk>wFJ3mD`KP8FM#l&B3iCupc?*csTtvL-H zF*lu4Ofe>SV^R!3{3HoMSl0EoZpf6K)~X#1#b-+KZO2d7j^CPz;#$u!UPoh}azUMN zV;Y|{RQzXJ5+ z8lc&B$VwYB-vI6AD)6QJRo?Z}`CtC(&$$2p&|kr+(t@VmARvEF1M0s%D}NQMaUnXr zKv!2k4GdY6AAi+@4xh#f1GY;+f9esbz2S_TLVtV0CvdI|g3_h;85;pmY{=@xKN6vd zu<>bPctGrn+D!VZD?qwbI*jyx(P5Zt*nGAZ;kGf@^J|Bn6?e^6-ScrJ0XPka(5xT5 zW5gO3nhh6zl%ju*Gnc7>nymG4hCh>eeVqd82OPngV#EnH^w=98$l=P(43bCh_MTq4 zIdt5yP!0qaMQ@d72--19RNiP5{i*aqRe!T2NM-c+k$qU0?M>ju&wP>5FUQ%Wzjf5V zc|i#OtpWMFZU<4Eo6)HiAc8QO2OCxE5OZwj8*>G6Z;O9sa1}a&mE;!IA4~1lthm^? zpN*XuWZJJK+DJ1!&w)Wms>_9~7z#cKX2Ak947uAJp+bSA8#*2wvW*NNheK)k_vrlY zy@%iV5S^~9OqKnw{+gc=kxoy>Rn6zpVXmg*fDAR4(x-!FGeV8urUZfm!<_EiONG5q z&5tm*{ri8ecM}w0k=U@E2Uzkny*+wwA#k^cch|p;RLJEUFKgAl4u+Tj2jjOsq1uQO zHP7P>McYGQ>HVXQ1L34Av*?Y_`*pwGq@70ONAI)a%FeR?*x8T%MbPzjIi0{`Ighw= z?Qgoe+~^-YRQ9bSe9s$_qN@WBzIOVS2;jDdJ2iu?BUh4ukKiSmC&_TB%yma0N$)lKOt{|90Em+oC@jJo-hT`vFC zYyRrS*I!*hhYu1eodKN0!EBh0MNpyKSyKc%7-;62Fm9B*G*vC)eU%;@rl zA;lzbQl;G>iR8A;9mIJ}v}y?_QEg=prRIMJ5l)BxsL=kB1oADYR-FV?b>+9?sD z#%5X~o+2(7>OU$E{a^Z~pE)31PSUFRgo@kk_*0SEpfm&_ z8~kXjyE$M~<@4?D9N8beL532GRwT?gmgGnfH!N9eAc+OJ7J(=h*rJQ%QZW!2HUfVg zBS8$`hz%QRg%L~~m9LkyXMGU|#)tIXxW_0Tt2u^rSv-3BCq+9;Vq!PKSjQgYLjBEl z@uuXtHjbp;#Yr_G4DK;*J6-v zbb1o}XFgo41;KE?b^I&Sd4!$mh@>RwvI68_@jGhMi%d~v6$r+0JH)m-(24l0Lo(R}Al&QDN?Bth|f$q7z7U0Qc8K+~1LiziQ zQiu~aNOvz|Nbv$Cu^Ii$=@0(-{N=%a{=d48!2f?m)&HMgQ5DoFpYJEr|2YPTbl539 z6b6HH7{gIb3iS~f0?!59AxL4xEfYwEa^=5|9F~%~U z*{SRA>eyA+M^YW^*R-Z~KKWz4|+wEC7~g{DM(I zNCOE717NOrn^8cD0IpC+aDA8NS~vUF#q)>1j0+xD{3jtmOqtil*w;FLP*rg2B|jF1 zg^M2dk`GT@@Zb9WTV9NE$=l!51Q_%WzLy1JOU{9iUi$I*k&jXXX1n%KFW za_n_qW|o2R)sK`#uKX6Si^wlgmwcW-`XnJhC4On)e&i==qm#R}c0F;UR`J=#eR~{S-T&Fn@ zPJYnDrDs&Z-7eZ{l;Ez1{@m05S`zac*>&3MON9Qje#v};&+R8U5BLT<;^opf1OyxN z0#XlN1zTJ}>2z(*yF6=bngpha#R@G0q{Vf>OaMu6Juquo>Cnx1!(=5XsC5!!TUK zkOmVkxPb34j6Wa>rd)V@K^=Thkov+BI3DlyiW#s`)9CATVMC&k$7#YkPM;=I6CziH zzx=}gH-GE~*sYj<0OJR|AImIrB?mD*Z`qjEcm^6kL<;mlz;v=a_<;skB@jvWVvA_h zGA1XoBW^(GHo)t(t$pu1a1k%h6X2(bWs11MHc)N#^?7wI0i4D%&9v#BU@@#k0oOX1 zBI4B@ls0FLdvoQigOohyX#we_R5tOmLl@m}ni;_*j5?G04 zH_+&{HOelqjf;kQ0*n=iems>C^X*q*8*?r09m^(oMkF76Ehiwb5dAl-crcBuaaU#m zUTHsv3V6dp{Mt@{X!}Ju0sZ=3a1yY5E!&G4u5LVy7oR_B9c&BomO^3G7@%0pPXO2KeRMS5AO#?Xxy9`rV9MF ziTrK@&Kw7hxdDNRV!w@L(b=D<)unwNYRd6UaDEs5a&>*a;N1RZgXR z;RwJyfoS^0Q%*gY=K6HFRk0h(Ua+wcFv|`sl(Pf#`+ovTVF%{MIDIK=K(-E6mF3R{ zoMqR6*G|CdJijbFW-h(9jbyYb1Bcx#!H6cnkK!S2meBl`ot6Me(|}?f%ke#PfHAC= z0QvfVO-AHe)&P(Z<_G!=~rctfzNzMQ^mvp@R99>?FkQFJ17)k67AAvv5mj%Cjf=4fro{BzB+T2WkiW5 zhT)v?(!-*d81SP%3_r;lu%hm{fj}=7V1bC}>nH)Xg@K~#Kru0VZB_a_G4kY~b~IUk zc4G@oGXb=f@nW7BeR5DCa!})swnX-uW@3zA?bb9|z_Cz2nrzvzF1dQ@#yfL70fxJ5 z#2WbrsC;&^0jqfG2JGn42Tt*sXtH=ApBH;h5+n4?_T%`TN zQTac=``yMNhIEbJ#@G#X|EfE(2F$O2-2tY$^zf36B{K0>4>9(-fA7kOvjNZC0CzjC z^}JvD>uca@R|H?EHoh`gnGZIux@-!mFB`HdrkM940v52ZHG8X3*dM-HwRPc7c#mb5 ztvaSe{>mh}_~okywh5PfZEw^P5NR6E3eGf7OfyZE{%selV`rFed!lK)@YBA3O_LRU z+a>4Nnf=?IYZ@>AwA0gMHQ#pi*y=y!Z*EM-XueR?GJD&Af1Se=F1wvHRxc)o`NJ-Q zh@ZWH)eCI|Oln_qOfyd`GRX(K@uKqO>BPpxr7!XUmI(-lc<^Q5V4KWtU;^|wS%9^D zJDM1|wkk%L0CDldkSV}Myj(^hdLQmm%#Q}xB=96MmaHNHyI zct&QYd1CcxvgG0#pTZtHIKJ(zrt$nw`^8`Hr~No~ruw$`n#M~%?H7NGZ@XF@JL`Pg z7mWq<1cX9Mz^buKF)?K!^OtWD3XA&s-VFrord9prOE*met4e|On!oga@BEb(|Ey=A zj@+XD`f`?$8RH4@_{!(_iw^Ew5et`I2_x9mkr#mN z3jxQ(;-Oryrv})KJC^?{Jfg7x{WWg%dd?s4cJQB!g9407{lhqa+JP`yzg~_lNIt9G z0KBhO{5)~dBy(F~AOAdm{%h`t++v=%bkfR{))GKmGP?NukA6Y}z{120ct&lq*k)cj zRDeKnz#{Yc2EP7KZk_0(+J_-s^K~G~$=8oM20PUS18mQ`YwoZ%V2wb3vyQ2l#4E{rsbzi@Jk@7Q59lONy7C+am}qw6sK2i31kUfpgn+z7xAjatHRng(v_@gHG9@{KM3 zJuI(jfKrKn{uIHSC?9~9 zS)w+z-_I8{jaRRD_iUKY|G`!+`qc0P8`AJ6k+KaH)G+7CuIJ4#nHr{^bykp`q~;9I1T_9@hW@lRTh5A&T#rR@8}OLsNhqdiyWTPMYy7dMU3-BHcfkeKae^y;{lZNb z901-IF4eSaokaM;h0gIkGw=gPOw;!q(--cgUGFOV6~i&bx9ok(rKZWuuivY0`RDig z2fnz<%@6#y-2T9S%iRzBxBULn0T}+G8sawm139+K6ifLCtkjP zU~gaizhS3oviRSyuZwo~H|+h3_pdf15f^Sqr5Nyqzi8)(apL(=OIYv&zid`D&6CVu zxPQYI7v1h}Snf;5Z`kkq6#Q2{mS4K*UwmQ3FYeD2H zpRmVqu-ngj=LDi;Dl8Tt{-j+%9?RdKS{Ry(jnGe`jQ zhtEqj9x#Y{s^2}E&kjJw&h{1ZSu9)qsym)|t&D03Nyay}^~oN&{#mzxR`BVcbq`pDp#E8p{zE&$Cf9ky z-*o~Q;Bf=I4w8-8fYgBW%Cjw|;CUTpWdTqDQ(yb~@|Al0n`sk}fJJ~_T<2f-t6p6B zk7MgP0^5LiW0tF*w=wm%ZZQiGbr4(E(lG$euew$M^?&KP7C`>`-0wPnH9uYOB@nOm zc!s&~Mi$cm(F2LakXL(TU3r5&5CG-{gm3$D9YyyxrUjA=o)*&qLB8mG1IYu)g-?=! zT`=o~Hv^1#9Z5_J=6&J$(!&xzdLOanW;JbUX?$e76S*2EX{8eGd~}uKtHVKl1y^vH9}tx87ZJ5f|S1 zNALd2dbV6wKH1{Z`{LsF`7d~dFTc8rui5sahp`2UtADy(|F^4O{TiYj3my}-c$i_zq8DGU!|Ystr8r(M_-Ol)Px{4w&3`}hW%5^leP(IkZj5;G3H-4|{(v~Z z$^Yc6=wDyEV87?9!F6=@o2N(ypt;U*YT)OnqIsz>|20kddW3hKv%Xa6>+imzy!NXv z(!cz#zIg)y_~z?Xwo9p+YYT9^^jW*)!qS($?-zag1`xn`z_|8NxdNEhwR<;Rhsf`j zom!V&@$&V5@GGSau(ykd1~>(yUBtN8d1?N-FTmK>Jn3Ee-USc%*FLyVfZ)pGEL{8H zOa{a&Upi|7SqFH*h1Z>Y3~(i|EQYao5g5}&LF1UX z0M>QAi}mBb-|ha3es-726&o9GI&T#!YH^%z@ffHq?gi!xQ^7=y%wUCHIBDXznP6kp z1JG~a`yBm3>LLb5&Smxuu!icJXg2QBbW`1b{=LKMb;3++I>KR09l$5GNa3WheMPpgAt&mr4ZrhTz3UjBR zkl%4ozLEvT`nNwW)QIm?T;u+G$R!ejeHx++@xvx^t)PNsI9gi{9`X)>JVbUnzF^9K ziEjvF1LwW942)4A=_6i0q)36TZ=f&FGDFSJXTSqF0$$T@kPoL% zjn!~P{((Bs7(?ZAWdaPEjn4BCdhd8tMPjLebLFEVe$vd39OPZ85(*4^2#tj21Aq_b zKQuJNZfJe8lsFRK5WlCnJ0Yw4EK>M?$zNZ3 zzFtx~_^7ODS`qjrOjkHGs~Q5{rWkvS3^uIjC?2+$a|3kB6_`*yv*s9fmNoHz^|rjz z2_fCyTY}FW9}4T_-q*K#qCE6*lxZs@M18Nq*j2?lFvI2wR+gR9GVhdwEwjjbfEN?1>yIiOOr-RT$32c%dPkJQ=2{ zH@kjU<=)<~+6?7@U;1`nGN?lMogq`lVpWW`vEmSdM40%en8Cwayb~=hA)`RcRaYDoMj=3FK-cU2tS2ugIJ`cu!TZAEkIYv`2 z`Lf;<5Y6*e@Gu>wzzJN4dmq0cVlv8GSb|mRDX(~EP~LK@*P#2t__klA&fO&9_Yjag z5{Xz$Jc)sGSskSwObSY~`Me4?V}wQxp4Oe?&qC+5Pl=DRy*FvI^(5&0cW?PgZp2PK zwuVBudY2?S*L3G|Fnn2m@n-9RY&RAVqt!6Q4h&J|ys>VbP>??LTqi+3w5gMwh0(m( zrY_I6nMG6YO**&YnT?u9GA(zWU*Si_DWlRGJ@$&rgSN8!QwgkenYp_PqN77TgX}eW zys)&CGu_7_8xlMqhq#383VRe32@oe#7>OM?X750d2w7i|lz?YG)ktQ70w zAe!um|2X(O+Ty5TsKV7U?az%`9-iWI&xw3ja~6^2I0&iBz3f zCLG~v?`pDDMwnQ6lvGN$5F}&?>qS^vbC+-w5}Wa&Tq$o zG{vVU_Qd;tc#Lf!w2i&=X?QB09A+}0UGtVBaXVYA$cjyqTBMr8XH4oMm^t+QeCY4b zhj=34tc&K&e6cI)#}ajONu7+3!h&x&SxM1bhy6tE92xIk<-ing*wWQEZ0GSYcH|S5 zqo`o|T0M6*TYI5(ooZ==(9jTze{<;^%8*_ita7V=9Hnx#?xh@EIg*0+B4ot8tJFNY z&rgD)@C_nL~o5(*p)F5?Y`XSS~u3-vImx-x}?3UMxh8oBbqOL zFq?FL{vg#mk#hqBYwQ?&OY`qX`Ls`W2>Ap1Obn1`s7AO_nQ<#u0V7%V$R705$)9KK zg{+euHu(&Ofi-#{*XU<|91YRX-im+2ef%_%JGQxc2hjpXi)_dx z0|3)m*%^zKhCY-jA!uq97e{J)74SYU5E65jm)bjbnI0kK@U*KR3&; zWdanfuitLdUfkG9p!g2lILu)|bUv#o!lKNdAny!BV7RPb$I8AbpKU6OTi_Z28=$2P z?xStpM{B!}HUWPSKxCaqBC$4FP2P%s_AQd6xXm-sy&;M;*z}7BoZ>+!Z@6W6^p%%b zSmkdw0-v$!w4vUNxI^!^h-f+^2BuLrLW8(v*}cUjLX*RQQjSCmJ%|B6oj~269E6BW zn(RrTdld%d9JNKTJ;2Jd@40861me6|-;30-wv{x(&?2!D;4@Ee82q> zK+ZW0a61D&sA8M6vKC}6vn=Aj^~IsxWOod)@*^nPa2ILfY;#+o1mXO65HVI%(&}!X zy4wpbb!Sr2c4oQJ$MZH!&}ue+mBmFj#^-d47P?Q%c8|mu&Qnzq)7DE>t`v3f{Vqy* zTftkNvdY~_3|o+S;AL55^AeS57O#la_7JJixrI`vPAo*|Q@!iT4WkEdJ!i7rSVDYh zh1#tH*s6TUeNF9h2({$3uS}=GQpHm`W`Cs6Z@Z&G$v1<%anJE0e^^l@%k#TqM)cynXrH&L zu*BV}hjvL2szHI6`V*9Y*rIzIq-5*lmitFpoDfCVflyy^D$t;BPcZ;)(N8S;7`o}^ zX`?y`oLtKdL=V_#G_Wxd6TLST%-=^&_Hk#F4;xL1@CcXOhIQ|A_`}DMIm@?PNpz(= zbZ;gfYdWJv$obL2p!v{^xCEwO(;vNXax0aq)mcRA7&*}0csnP59z^|I53tHAWrLtH zjlhfB_r4;}peukHhvkd(gkZZ{MVRFomkMaTg*k%HrcDi2ju(>Od6fL(kAZh%vA`yS z6g_HuvS$|>kIqFu&SR{j8QYE?R0O$9lGmRVgn3JWhJx0n*}9kQU>HiQJQM2Km+x2M z3rj+vCUy^^=&2Tem6$!3?(Kw=}od;8HX3CCS?yQ&nq6(zH>MYwcD-TQchH8Lx-N%~79aCH70 zZ_{C-N}e@;cS*MDNd@6n&mXsw#m-Qk1432^-|&362+o_;ax3G5hHvsBhiRjE@R;ue z&L{0>ovZw4lo^z7%`vu3`z+4E;QPAS%zHLT7^*!x zZqVWLa_TP6oFeoqeTK@=m8HHBZ=nG-Z&c7uuZO8c3axGPCVYS>mdFN0L|zoS^9xwq|-A{nn$e z%3kY$l#O;UC`N1jLF5z|8Og&@op~U61 zu5h^4_82hF?utZM!aVWEaU4d)Es!wax|3r+uc`jL2<>UY(zE=iBX;v|o-5YP%Od!w zPy$7ZHr8uz!Zsn(K`kvOho6$ijZf#3yercq=CO;Mh#{;lPevPr< zF<>gC~5GL=GbhvASCiTP>l zhK;i?o_8zIAnRD`t9Q=mj(iA9V$J~M+_5^hC}K%GCLZ z+O*tY92=~?-~_E%sydM!H@Y?O59#0X6vR1TH$A@%=o%)`9GU$?9T(rDGQ9O$L(KPN zbfyVCs+-~<*r|p{-}P$d?yuv2vjb$Ut6t~4MkP65QcgKjl%Mk2O8 zSg=!v&vNw=+bjBo0dww^!t+Gww13L1ooUVNFeD>?CQ<1YSa^;Av_jeDVWWR^Cyx5? zQCEq+c(=7}5&_k{`hHW==W%RA8KsQ7+=bRc6!$?}WZ)iDo2JueZn!~0Fv{I-V#u`B zerwK8Qx*0$4!q#VtXI0wq%lH(C{k zHao%I?sY>coX7YG;##K_FiQ4*Z!+t@NGjrRU~$bG-$qu~@3=ESHv|PcR==ixYLR_B z8|VEL_&R}ak;qQs)V}f+u8>MBNMC7(!vs^=vbkH_$>j-5=JpnUHtLoPXAfD`^k#}4 zB?T^)O~;-zKUhC>CoHS|Dp6dg#E9c=5!ac&hMVXoNpVYD+gn4f}!*`6R zYsd^~<=rz!2e%l1YVz0{RD4Dz<}>ZB$7$_+j3T2K#*QryA;Pzyp8RemrxdLBk=#ul zHN~5W>Qaymvr56b#1G2Z(t00hw~zS@H{x{%{5zTsaWmNL4mAyd$XP}=@dxyW#4S(@ zX4ROS+|5ow{UpH<@zX5@*?D({>Z5hW#CJFDlr?C>#>U)#Ka}HW!F*%rlypj~r)FV= zsi_#YBV@!m$R8X&FjTyBS-dxsh-#MjJ}cEh>$__3uK=sX!hS9Z>WOXn!btlALz7h@ zah~fK_MCj<^TcX0eudPED$6*2&KI}yJja={U^*Vny=&(TwXntNIPx@QrqEO;_s`vs zEw%L9NJAliOvvAZr%E_Bj;>QA>pgT>LTK+g8G@H`+cle7%9kUX)_O+xoq9U6#V*?? ze!br$i8$~qMF*kbw+9BJPBv@}bU&N{_boqnrR27~;HZtrj$twj-&3BF*(|Bl?wOvaM%l$rU2hAvw z*|3eVen9DPw~Z*;Fo7l;t_onUUQ}A?HA|_;0>0?So3FBX%SeKR) zqWS1Ud!)eGCe=X(sS|G;FMFu0a>!iqHyevBS@U{tYG*3C zOGzDcj~%-L3ms$l{H_Z(^3_d6n6A5jMh=1Ij%>V~b9B?Jw{A{dgUe1cs*?lVp{;wa zRASAq{KkUqv`bsDxBH@#Whv1nBp_p1DA40kjI6D0u+3?`BCgP>2yK1dCtw zEl-YjudLz8-tNUaa<$u03HRL#x@0(9kPJ2}1RRNDv!FjIX0S_98P;{3VK$j9JYX-twhwGmT+Ib1b<{Zj9=y^h6`?^h|k&Y&2+^~lBA!VAZ z^n9ReUa2WF7+f^r5m{{5eZ7T#PVLK;?Wql%5dRDhL>c7aG1vFbIcu1}Q-F3(H#l@} zbjhrZ8VR)tq9w$P)7odupPO8NydnH~>yBHKTZmYF5(WFb0vdTM9TQEJQ@xT&RG{5- z_hf=z=R%*JZ$24%NMMV8dVI6$1+NLOk6yKPEaPE)ui}L8Z%%;qTUrMXfyfVJ(VB`V zZ1zDf9O8N&)U6#VG(?0;ZHXcp^-e-;ug*-Q%ivz!`*4`l?lcy5%9Y7~ZNSi1Hf&0< zK~@2n-QZIK9A)qu)x|>Sv)CS2*dq6wn$vjtYB}n7CO9368qM+_v#2#^PClaepxiqW zPT_=a_Pbao++=UIZ@zqiwI|gr<)^KTq)y^`^!X5|VCS!O*fr`tScqgWTTZ-A89qJ1 zd<$!pNgQ(j^zT(GcVZlWP&wBX6}`i%O|PuMZnf~{B|j)`cQ(iG4F_lIGV+-WQ01Y< zRle5=O~e zy+}IgqzHFa^eMd&LNpKmO^XeNWqF1*E9Q0ySriu4)F5kk27T9mfiF*B$YF)f5N~?EWkk-Yhm54b zP`&#PH}6Ow-plw%ABKw9ev$fu4nq{^miXgCjf?hmkQcG7V7cS63?-8#l8x*>v(GRr zGdfL+)o&X#E()iA_BA=!GP#Wd_b?HiD_+QYzF%gNJm0>UinDR^6swrGBMad>9ivU5 zd%&ea`d;%RSf-#o@%KdN=8R@~~`uSFbeA!HpxfNRU>w zWyx*Qion2S($AuI>&+BnjbY81TceT) zGN>MNC6v8yvqjJLVWODNsLMvkX3rPt=*sg~SAKE7|D|L&3`TyODT!_|3POwxpjUnPBgq`|UxoQBfL- zZojk?!swcpc}wi6O2X_Zb{#mPjTgWO9q%3aH5)vCr>uqOS%IIx#cF6fN*yM0U(RSa z?=W=J49MHn_6%+-*6{r6P3$RKxDc*C9F2Q@X7q&Rf`+jVU^za6^|xvTX$xxu+aAK; zA*0L9sU#4QfEK*8CSyW~>*M^O;sg7jFk8B zhjhDHl7EIaLunefCmtVWUgl1@org#nz~pOxVO|Zj=%%J*+hZdzGLCY5;so1}X8oQx zPYDP7y~km=SW0eq6t;b<)*YXg^BEXIbi^n7Ed9(KjF`RAcg6L9iV&?_1MHKS&kr^d z_YthopDLhQ()rhvA8=adK zIVZQoc{ZG3`*^elS!_|XhR@D_#vfR^Xu;mB%__U}EUQL1<}(?>EbWk|6G5Y`lZ38* zKu5-CB5Qp~C=mHpQv2a8I{yA>s$@5GknYMlzf||8)5w^dP}bB{CmKr!yTp5zl))R4 z(9q7Oly0h5Bnnd8?xgDrx6ebNUUw?F;jehs8CqK&*>kBw30fM$vWFpmB-4z|AxhN_ zSPKqd-`<1q)}y3p%0ps|tj$XFML#I)$TUWZFm17U^g%$JyFJCUftXN*@so}Wdaf$Ey1}90lmarX?`8c8c_|J2s5fdTn5(Z1j(pVeuE^7e)7Qi47#kFU_GAY`kKYQvSTD$epnH%Uuj16h z>6pRNs=l6MLh8kH3SN4eOS$nbKokv~7v3}4qZ#i3A4NmG_Y__h6MrayyC<&y%x1%S zpkYUmQofn0v3B`?!)5L|7vL3lgt{rodmV(@h@DBMeQcaIY3h#k8T)zJ>Gnz~-$;sC zoY-y%#Bd9uXL!qn_f79ap?AZNax(e-aU;uoE4ox|7eT?pT5Lz%h9L5M`}p-vU9C`0 z&AAvMPj$XM-#0)f{Pv)1*34=-d>O7se-yTRC!fm3#~V(6s>>SqINNtli4CB@hrrG6 z2^$!a0+D)xugc+38gPS@bZ;SWm^ej`H*7{A=2|utxhL;4o5zXWMR7^=S>mP%Vnw7co5@7jlm?lv2gY`Bv6yFUAq$6utc$T{ z>30lknWuXisf6N_DAaDc`&iREZ(GdCFjKcStY`QPx)-9pb z1b4fC@aD2jR-oY1AoqTXQ6IZmD?{N=*gzwgXGKWfEW#LQ;&q(FpyY2BM#aL;mUUHz zh@M-(;`9@DA}PROwfwvuufYr&q&x1MpuFA*z~a;9X5LvNg-34r#!OE^T$;np!&%Cn zUP~FBZQ--qh}FlzkV#LTMN)S}6ijgOXr@AcW@uQVq%8Lz)17=`wJc+m<(9h0ddd)L zufU&9@*_&0R)S*U)>sb+dGu8Y%;;PtGylAR_v(n!s&oBlQaK$1WL5VxdvSBGml|9# z>Y}unDzFU_cB%B}w>C8Bt+7;!f0yms6sOKt6eSl4Mq$<34_0yis6KBzA5KON-t*Xh zJMWXJd%5ioZm+e@j(MOz8l7OVx9RklPBE`~K3RjV&xi1)2{(m4QUKjeEGN&v$T+G> zsxt9j?>lsVU>HN#B%5xvhh*n-HsR=8*V^FkkzR|6$S0JSjmU!6#w@k>^3I?!A=7VP zyI#V}yRshME!p!&1V3wm>7|Z0kyDj_^n`+xkIE#daDRw2ro*?7p2F+X*pC3+Jr~}h zn4zY_LEwgLHk;NmNYTVT^fP{Dc))F1gwmp3_){wqXCCgab_ffoPxPD^9=!smY?n(b z>8q*1rb4C>eWUC2?5L0z=592FlKC9hg|2g9&Bb8OyOpR5r&<(UKD4Fcat7#skfbeP zUu*&%ECD*==o@+aWR^83Dv?tU1agMA&&znEw<&l^J>;e)WZ!sXK7D@^GZHG7vwh1Z z4!z?UsC=Ezo$1{ir&E=_Uf;}qqvlY_J%!B`UpW*_5(~TCqAYexgZ1p^(Joo`JgYKp zq$0P=JC4>5a=-OC^oA^4-O4e4R(aex>?HT04HOiLpvNh}%j>`$Tw&mv=I_vPYY%DX zqLR5Y5pTup1eG}X@X1%TyhkLWx}ZtfPrIa#@O|7AV&m`p?3Pc%a1ti{KmtufxApwy#k_EWh>GKGzw9XqGnE7*!oODqHav^f5dYG{vdNIVXBinjKMXfg~h^D;sq z$;WAAKia;_q@sL;J_hG29#m!pdTwD)*v8M7b+GT-fUA~9?wT-B_~Xo<#C(VE*u3u6 z3(guwY6wUR!7MzFh}XJ*@PuRS#Qw6gl>1Zqi$SByOQug4{o6ON_#P zGb;5KQv|Y+K_+V?M1O*AhfeWi`gbQ}=G%HI@w2ppq`YC(A`7q?h}GbwM|oP6bmQrW zEU3f=Q~3YV^z0BDZQ$;W)9Q+3&=bX>TIU5tMe#+1L?7hD*<0>a1f$VNt zYS3EC9ftP@p^e6WC80n?b3-0nUk~`14z1J;;pe#n1?3lp^V20!Y&~gs*PN2rzrUkN z-4x(av(dP9T)ic5Q;Y1`c#vLwWD@DS5r)(i@x0a{VP2Jb(e!Lpi-Fb6hz4(Zt2i8_ z$>opqTD>&FqLfxhOlkUZxEWJ!KpbjY zt~2=BXYNve;uJ6yecg;bpElufF$*VIbicrr(@6tf5%gl-vuD~+1C1C$9i6WdoHE{t zV5xJeiX%d&&P_*Bt#|$F6xhbUgm-NF8KcXVeA-J81dvrvKVh zVxP$1$r#V(km|<6Y=vs=mSl#px!v8@=OrQv=*Iki9ZH6yV@BdXy0WYnE*3vvO7){IWVWB!0j|d_NtRgAB z_CJ)0c7k2+A=XPC_04hW@iZ z=?@{KE%T@JNktSXe`S9l#{ZzaDxv){zOZM1nDOgx8UGiO)nQm`H$nx9KPoHLuVpJrT7^4j_A-9dk_-2j0v7;gHuLqdUd)~R{y#STV~p;P z`dl?SDpr=pf|hlFi^zz5$|WoU|5*$F0af%0oVrZMM+g1P>KYmoR?~x5*08yW>uBXF%lH{Y3$(%H!JG`@)Z)7 z&t3?&p~njK__xIO!urA(wxzOt0KF=I7nF8^zNUotxRFZM&yn8bTGmjKVIOk7U*9jJ zm%}t_Q?|oPaW$z%q72pCZ|o{j^h+x@cD%;(5%D*i4+eS=ANO(<`fy@0zl|N&sO>{X z3?njJ%RCc2Tun?yjH=k@4(+!OE?I3YpJ8|w;Gd4)RxkRlU5m$5%8bptG$KXk?M*>d&TJJ7n=#AUxU@M0Q6*C>pg{OzmhJc~r!3V9~iv*Tt zu2RU&lLj^e^vVjb1p z(qi3)EB(rA^G0jZ?4z-;N~EJ*NDFEthq*0HF$=8&63I9esVZrsxATIkN9TBS__O%! zY3>FWdwZ7C6^|3_f#MYX8xi}uz0Y*azNHH(B8;S~7;MR@p5`HcZa!foiTcowm5lW< z_(-395WKuV9q>R7Kq5QrR;L|G5+Z%Guk)J=QN5lL{O+TVc~PRl==)&4zfY<0RJ?5l z2w@}J53CB;Sb4B2kg0CChN_dV^1VT%j0|>K-vNQ-6o9g$?#3vMXEnwP?S(9 z!jTXR|J?}ke&!H=)UJJ=j`_+)FL=DRL2ZjfBXSS1GbkqHdBut5Q39`w>$VOO7O7qW za7c7axG_4WA~G7r6E+Xbkd^Qh6>*WR(Pf~yOHAw8*mJ%`Ls4VHjn0CcBe22NIjNVD z6TgwPo;Iu!9)5{P6L14N@r7?-36h1b9-9s(Jibg6e7!MjVXCoLov*teICzzlI zf-RaJPa1iD7HaD}S~GSX%WLj89=MH4O>d{yQY8}eBLB#Ce+%YzSXvLG zx#`oxG!=7f6Kkrx*)V^_8&caX@HC0l$C>&rH1n0U0gBi0`gWhz%%&B2eKYMvGmjo1 zBc@Z$TPno_O-a-aXILrXSZD($JSQ1b4H|_ocx7~dKJObIV>F`glOC?%gL4Bgm)Cb8 zFkz|YYVPO8N-ig2bsxw&$-KUA$G3KB55gHL)`Z*v&+1eT6~$N3PA=)btK(v%Q7@GK z<+ju5sx^9T7@y%4ofI4zSL@n8lT}juXVQntlh#tY-b}t$wC|lKof0WQ*qK$l%E%P# zGqU|2zPh_z6K&*Je{EA9&&hFAQOPt=1`5bs7Visw@ zDC`sQZB`{)w){r`El_KLy5lRPXi*Wq!PYfjH?2B7e~vumnCBTVZYu7ox26VjB1Uj% zawy{E2K=|g+QR)v?_I;XD(D@-nR-en*fdikp!HY0vbO1c>^3Zfl#S)Ld~7tCZ^Gm> zmL-Y~(lmf&Rvc~@4`!=VfyJdZDz-b-Ag?6&dxjXfa#gVndZ|k=N_!Hjem2u$;Ixjc z{)Fgpf3n_;)wX5y;u*{OiUrw&OKJzmXf{M8cm{sE%Wl04@*8h{AzS=RLvP9dcBWDV z_wC875PCiO8T7Crs^pVHQ$j?qOXg$ynq5oY0_bhvp3Af;WF<|~IxL!pOJU6td51%# z6?y7XK7pU%PVFY=E`Ixv3F@Qdm+6-73;dWce|)tjI6(`Ba2>b2UX?i^)Lj-ELw4hA zWRH|{ASzDy;&|g$qi(}46P3Q&Aa3eYGxZk975>}IO=4uU8#(Nf>dCuZ`(Wh+uvcmAjYxAlj4dZmB$l7z zfBK13aE9s6tG{zApE}#7<_OOFkvHEdb7%MMyuVcVT|`gEi0P5Ze7_I*f5+zjhK`5j%$NZvy*(x`=@6dVl#2y^Xkw-? z)7v=QvA*VS?{oE<%e!FTx_GymMdOCzgX_*6HPPB%T{eb$Dn_4~&Rh1J4@z3FcqPH< zsjjEn>bpRv++O^+iX|?Cm`BY!lliG%H{RPR%u_>w8;(!O zX)v%2rF@8xI4a{(eOTTo>kC~gHSC9r!q6j%&Gh87^u678tWAb=e1OMuLKqa@By&rY zkUGt63ObO!jvoPenZN08vAJe&e|LE>l2Lk6;vihgV!W;F=HA^-C(i)a%j8%{H8Bc> z*Q?A}o&1ikg9-eJX)sOUho6N}s;b1=(r-qIqEKVze*+8z<=f^u zS$Pv|QXNt<>Do^su{RboAp-;EJ5z6O5GiiXvgRb?Dl$zk4lfWeA# z^m?+%(WPmLU_N?4Jz=8bBZGBMPMR ze3&b`?Tshd{{QrgDRa9Oly zS%&g+HZGAn#(^I~e=U3~Zy+f#QnQx6jk?;ohn!F{?In_2-a-{^<5{IP=dT9`)44%0 zuQA#K&?Iomy@E0}zlelzFO9}N!G*YJuFcNQyk~m2haO6`_5FRUo97)p){GByY6~gJ zfJ%WX9=>xrJm5v2pYa=qaiJ><;*;@4j~e>gZ3Tr0yYqv}MRb{m$a4 zrM|vVcLN(UrKsYLTGl)$9U0p^9VvHYvpCH^AHDDOWcI~pG{uw7WfVW>J-ly{U)RBi z%g5?{Ms>^}f044v(eUJc{H9-#VOtNr?gQ;Ct(WvXcuhct%q`HnO?nhv2<&#s>HFcR zF^ndP;=iBG%25h`SD}PO1c?`=J|&Zq-6QojQ;8PBL=~&y%VALWl7_?leh%(Q2rhgT zg_XhUvsguoU=sbBuET=d4^04)fIXrg36nF(lE^t-e~Xi@I3}(jclk3t30|oar6<^@M{${-?%kQae<4S_4yTQ^0TM=;17y3H&xD_&+ccI)= z4k*4HUNT}uaW&bL^7eFt7^eh<&kWsha*{ugy_GXgghiSQgtha-fVY*K_jkv+5_@!R zdgtDie_EF)nR~s$7E3~yJ;?=P z(1b|Ux&fxv*NI6>ij*4W)o{_(?bE)qCUy|U_&nZLL@~!_wjX$s;XC|r-q5yQDknlE zf6Fm^CR9e(=GePXxNclbSFBE=XG7>~qF?iATGPgQy2Ya)i?5A1Bgz@+I2X%?k_%Og z2;5IQE9$zP@Jj+wu21WN%VHI;Td_mzw6;HWf3R75Lqsv~dq)lyi*x#Q-SIY4Ni7iI#f&+~e_YZm zN+*TL&Y_bv*K0?XHK}g?gy*Ng&~5+d)#Hu|AHuJVceDO_D9XOTD)!ZedziL_PGh!F zf;`EV)c2Gbx#3IIKsU%o^JqL~e;NwG^Oud+ zVQa9jb_;3q%;pk^Aor{89eV0v!a4m>yV^KgzCQ6Vph=B~p#aGM%5?FY_Cg-XCtA|2 z_lJm30$&I8?kE2A(BNyoVbPYyYojS`dWi)|KAW7{Y(ehmyrdWSB59uWkp9XNNq?aP zL47j=YOrz?=tF#yib5VKf0MjS(bSK1qOX@&J5`bkE3Gp!XAYyYl^yzx^obF_539)L zt11_$psA3MZKF%DiLPN|-IS}`cv=-tS>2YgX5Ds(Jhl>nt$e~pza4j^b!BF6k*&4I zAt)os!;c~?yknAQYvFey#k>+d>Mfef^wpjxG}p8HzGyGO@*K)?tX6eNrOlh!`*6uM{MiG#xf$(#X;gla87S znv+2(ed^Z+&%*^_e;7xv-^imbbA=9?&E5^r*qU{0&sH?uD(W&PyfVb8@_6}ZFb28N zP6`yLv_8f+!vGG%jt|?&V-EDTS1$dKuZkS|-<-$4@T+zh6S!iJ;>C%!^CalM}9 zZOrcb(%g;iYukQym@1TY!_rV>g&lo!h-5#})JN@Gd+UtKe^-|$0j_TO+`Am5;<^59 zFl&-m*23i7`*RzB-$!T4j1Ye-wY;RWCnVErD4JnK(}pH!Aq!kLXqCoS{L}@agQwFQ zO%z_5>2F-os2-gOX`}PZ4(BNhXqD-ZFs1E!LDdoa5G}3ZhltXv!nNx5A9C&=Kyo@- zVgr$|%f=uN24!kw!hjmvCsJ=y$^D7!PVW*v&sa7ze?t{Kq{gnNXWp9KeoOe{dsp*z z!BdlFmo(V)n0Xp8R4|ng?t=`c6`9NRZpK7R z4);n;!cXihStA-WOkko^5R((*JBbcccMXWKzTUIs`=Ab+y0+POQAi^EP!X-gqIFdH z+oDsrf3CivLBe=zLljPiB-9lryh!Tj!NonggW`L=qo*&eXV30k3SLoi#1A2m{jwb3^Ab8pYd4 z(+_1XZ+@f>E{PHCpC`Znu>e^Z*>PQ^d+?xonvrW+c9ERQMC)EAEc z_;9@so%R;;UIk$yzpq_LzH;u*3N|&eX->Ln*Gv-@mMC=}U+Qt;ouP?-yV5I^wTN~v zYs(y8HYlF8E}J`^#i_;j?D2DYF7fvU5p7C0tGshhMVrz? zf6A$&x>za{sJCN-)c)PhErQPW9W9{8p_D0(;f= z-M0;b;2MyYT!)(wS)9ZKn4p~A)O#;ff4)+NQgEK{gx>NuEw;=HDQqB7Jtq#7LP*AX zo2P_r`7Ff^0S87WBCi-fJjF#RxR;^GJ})vBhiz`|Pj#5&N#Cu&v#oAC9_^*#GG?ax zk^JxYvTn{(muPhF%3AUGEgXW|l~sAOh3Vws#&&OL8isTgRiPdA)gPhiLmVFEe>hh( zglmz??7Ri}7?>hUpe;WcI$w9{^~k835)%RqE<_gk%6v>Ra4^fK_wu$R!CzjX+8)j2 zbO-!!i{3m1*JcqqKB;I_nFPW7v%Co#z9OWWOSrJ=f9VxZ+K068tZS`pyP(GT$ob0)9g$xMZgay{>|~5@ z#dwKWe5kN=C-FL{@ZQ%5k5e(2$aZ2>+Ja9zPENxSQ}0Q*NK4g8w>zD1-9(@Z2dkz- zEC#PbD;MN4Dx~2?1QSY3OTF4P`9>zN{H;&UOi_wyMA%G>y<$8YK^Db?f4-NtXRf}{ zsJLAdD7};39Vd;y{n{0Zm-IQRX}@TcqL)@92*lv%d<*WJ%*6=@S>2J=!_g)HyQ!y} z5h2CEz&%z-$7tDkv`G7?O+u7Qx?#_E7T0kslpYh+!wmj};70nU^;>+9MQ zyAMK{4+a5)Um>qNV&)aie<73ZB}%+_6@Iu*aLCW=a`ao1~^U4vF?}UQh^>eBUvuasqLtFbrF7+_?LVB|iK7 zg`X4(gOu@-xPjl2F6nsP*c?Y87xfmzeQOoi)f^mG=k(rTHGhT=e@jQ~c6;cLZs>*I zGW~8<`h`TqCV!DLQKdqo)|ItMibv@tI?fC+aRh_fEyXSJ6NzZ(Z_^P_-gb7V6?^B5 z_NJZlx*5ytTbsvtsmV-lWsr4lo+TqTw%F^QaH178mr=q5UPy=j7Inn>xn_An8WMD% zR>vz{*hNm$1rsQBe`RV8jazE9k;$>AKnJc_=CVJ4I75A2Yj{TNRV7$IcPyWfmPhYL z2`ZIPS^6VMO&@m-rzOLki0x4whPu43%2c$lOnMf{v#QLiR@W;}QE@!a!Y|}Vkz|8t zFG`|S5ltHv#vre8wjtFD07^m9S<`Gro9O}19?@l(H8Avve_hRYL=pshoST6UcoJW* z6+zM3#X}js1GIhd3H42oLhv9ewa8GdQ`AnR6b+mczie|;M&g@fZB$16blcp>%Y=*S z%YFLq&?zB$PFr%vvKHwcBRgu*f{i;dd6j62PWSQ$LDh0(`QtV8{m{e<>9pCF;DJcxof*nLzaty4s7RCz$ZH*Q*($&#`>fu z2V}|98ybY{dCxPjQ+WyY&R_Zcvu?&Kw*5|&eMY6M8O=Rd;+vwcTjyHDw@KNJMaRj9 z;hr+`X-yRmekRO&VhTn4nyDbBib$_yekvHSe}n|Wv!u@xy&y^Eb}b+uJx#r9h8bTn zqPYQkuDB*cq**S~tFY?*Cbj-n38JO?&rCnsl0v<7_pk$hV`O=upN+A~7Z$qkR8YZ@ z2p7#j-9|EjbicA~-46|(nrX8wCZG_=AgsdY5Bd|PFD(<__Ui- zf4D!0aA#^v)4968_X(UlJ_NOm2xXi(VgDMpZLlue@vF~^Wsm;&U41<^@>?b7Du;&1 zufHq144an#;aX#{6ff;^6-^fa7ojorNoWMz?Q_B8mP9AwQ_9A!*uhHase7fVeU5jc z#qktep!9&7;_TB{!s49Tgi0fvNCTZde|u{C;jkKwDxXI6aN#E2#C!jIj%G<-Ubb(x z46pNw-Ys@qgm41peCJSCu_eBdZO(j;4>@b_;~V4~@$?j;z7JA-ruwrW#SjXhso<$x zkv^DtL(%ITdkkeRf(;5kFSwp+9CYm`tpxi6p(E}=$bIPag0Tb{(C7EWZ4+(Lf8pEj z_>xc6LVh(`wxQR}GOBnl*kj;|m>y;*$lnurhnJrBDU!5v_l?Bx+3HwVcu{--py65eM=;;rCy%v(Jn$|Ip(VERarjH z5-zPi=x;%5c=+oLxiStmuVO-^fBQ4=?LJsjZFz-$`RZI>M0Rne8X29Fl=)V7GAV7) zhuY(8Ea+zp66o|kQ&RO zuz$Gh`I2hZ#G~)R2W-q)u5>ri+u?#Zlu7T%SVohm@*osx9GB>oyqYG;E4JP%sqg=s>jmF z`Tto&_}|pu%l~incWwif%x<>vVK_uE>R4eeAZ>O-v(c=&=dQ9irJE-bafNOYCuH&5 zZc~h?@IB?(jV%kF9Q>O%f7=G&1@|=;P?lx+fII)FO8@VN1w-&2^O&twf0yq4M-AGV zf2sw47W;Ys7N*ua!0KH06S!YyAlAAFc%IdNDgZAED5uf_aB}yLkpAw0dD=M;POkqE z-D5N0Y8wX(&NaX9hYVt1p+K0q`~(<4H5jgq9xweBE0N*@-6=ZMbn|72K7^dr? zHD$VjFPO4m0%z9Q27rKdZI8Ni6SAoMd~~kC)*~nbK~2G!cbgHHDncXLMcUY_ipX2V zdZjEN`rfQlLK5E`UJzDmy`qp%@(FZVzfW!g2(LCk4qp?42{NzZDl<%%q7Bi^wnc8vr_6=zCHxbw;Y<|(#7S+Q{I(^c)~0d#1$HQvN_u!rR-2}pJ7H$tPM5I zI#0rgby{-IfB7*!G3t@i`QF0F&OL1AV2sjORG>(NN@aSO%b|O!b2@3v8Z(c2k0daT zED_c&yRL319@}P=caH>KVy?$&=1H~hWuY!s1G3z<@xu(I1&A0|X)BY`Wh2HhBMVAq zif6V;F%^hZ9F`h=#MxIbw-SR|2{PNu!Qu9hYt~qte_DZ#tPi|o(P^eDq{o;JEzs1~BJ zVdsrJSUpf;P&2r(9^R9Br8x%p$W~`9dxKGLEI6}=zOHUgCsshqg%x*H*Tc73&T>^>p2`+WvB#0Z z@%)vWx2lKj(RJ=4+-%f93fnS#Pzap6`aN4Ycp|)3eP1@06`ifslc>)d0-H}nx~}jI z4mPfKL_%blXiSsB2&pKI^~?I{fyOsZPqk{;O644p4)#U!$h3XJrjfkz-%FmNrMmyy7 ze{*Cpcc9I0Io?-+vcATMr!g-^&C%kI&x#rEhC$VQ;q<**t%|2{`lQ_FZMV5}D__Z= zmPDo6>$?&jKn6$YOTQQrCw>iqB2gbwJ%*_ar~%y9M~S{veE0p*`Uqmx((8&P)E+uA z?{JcO=80oW$(i8(8W`5OAB_r>S$mH@e=|*PbcVQXuEZ%d@!cyDbx4g&4^zcn6RS{~ zMe~r*nEXS8)tyk9n9CA%i1ivEF+(lxv_FiF45^4OZup#!j<1#IbuzhH-4y`-CYNP? zMumgp4Yp6Q!n|lcJS5R19xturt+sv@QJBjJkmq)9WUZ2OEtj_s2ZXla@0Zoae~Ix4 z?p+}LZ~djP;Bazkzi0guM;FKujp(Z;CPzuS7qKftNx8|V@r{?Oy~=QPRwHbHkNEuv zYwOb-_nPV_N7dGcwX?A@b#)HnsbTzJz-#(ci)fv)eLQnXkTJZt>{H&a$Q$vof2E=~ z#r{ZGAq;wjT2lH8y-(Rct1BPbf34((D+2XdfBmY(gbfBoqnn^qi@ zK*7AD;q)R7=Q|L$A|NRFK@Zn?k*LFl1WFKg1+jTkTpN|FE+T@^fJKqsDt*dXX-Qe< zVdem%B-E$lB8Lti&5)B`XL*Y_=G$&fvnzs0WNE3#GOC6LY5?&WRQ`H%9+W)lDAQ-C z%}K$$ZPx$Vy<`>CQv~{tf0*j>?hr`{$S({%G*#BX+=Z`#HX5&bBnmGHWhVPd_dw4d ztjYHM&?fF22LHhMNkO6Yac zuh*=nZ_7K_ri(P=e{PMbh1Iymh%RWb!jVR0M6LNrdX(>~$(#&s|Ja(-y(Q*5=L5|> zDvFwtaB&<))C^grMw@8|X7!NwLSP^e@Y+1r&U>^8lJd*8Szz8@p%AByW*!=9-WNqA z*G1Vi1kIqT{8VzkWa^M0`hsjLMm@7!pzw^a57P&fMn>H4cIx%^EZ*oSDJ>zBp^X8@40~f_cU+p}PILtIwfA7pfc;AYgv9dNt(C&Xc{WZSF zBrl5yo!i7p^VgfRA1mYiMekbO?w18}a8ql2ReyJREeIV&{G?@~xBAu?%J(#|F;`nd zF{CtnXtcRA2`)K&LA?7h7T=%#m5y|0a4#U-|M*de;SiDqu z{$=Wyf6}_!G@3%!B8#SfP_P9lsT1REKzJf(J<|8vptU#g@)Hj>nvdc5BikZ51zSbx zbH$9572EA-_1fMfk`0`U9vaJIN{hpzL7a5+{dUW1KkoYNOcbPwn zPNnT-`9EEo0;}7N^Sw+IYKjk2ZA|eA8BaAFfBLvd))sV^u7>aUXd>SKni3l(nkd3= zfT=X$vQv;iI?BhzMpIViiMNx`LNceFU&`;MY`)5Y%zJdn4m^AhJ8b%UR!l!6BRbQI zfVKT^e!e6tE6AtgDl)flwm=A^vKbs3ABM5TKl+lphP_HD=eVsv7&&J%B?BhtMoDAO ze~+Y~Ib5BzrD0SRJp$ej^?_X|FS9k%NaSCD=9qD)bKmE)(f4PLg!ydjQza zpZ2!+58~~IaQi87{sYTnjyObzIRC&un4I;eSFW6k^iyz?x<3>EJo^Vcmr6f`e;fA) z(FOn@&0nFK|3G~|L?O6;=#-zX+6jPu@el9j5AW*tFxo$r?f@?L8)W=1&F^5i<{+S% zK*&MzL+r`aPZCXSUo?_nO5!jxuebY4==9roVobT?N$GsVc;ZKI^2m5%7zXIc6gYTk|XrWR?TraoUJzx`@I6L}Wk>x%SC2wax-tfBtH9%(C{( z^0&k4_HHIU^%X9l@XqORmmDZpt#GwY0owrmh_Fn{b0EHTc$TM>@hzT_SI*Wwj;VD9 z@S;$EM9%@3>e;COg0?q)n+Tyv))i?ueSpqOQ z!+RaDj{ae7*=Nt1<{6t~e?iQGnirT8H<`Nk7_jKVNkzOPw_IHZyrZWKkO$yiU1H0s zKIG!HXQ*1%Zz*>vH$@0JICKY z<-y$s!^y5dn*qC($fla9f1aG)?u`78zx-pTPJij0e*dxQpZC}Te@*_g`_HzL(D?oz z8@77;$It%pqd)+;_;1^De|!-&*nj&__oq$$x7{712KU#WK-?*R{3U+$fBDLv_Z(5~ zxRdL@`>k_PaO{88`P2SS|D3=5sdG{BJknK&Q zmtmiodG4ui;nH~l=?6QJA?5;uh=5*Xfh*j5AOQoYb#IzT|FMNjiqKP+J;PUB#ppRb z*Xo+JpZ*eV>S1g-`iT*;cN{8%&x9BM>9N!f)mj}&GQ~} zI!eHd<{jJMtP6$}2E}J=rsfra^LK@*L%G?1^>YcxrrXjKehO$Z&{x}{zNDmT_^kVTLHupbIaXd|L~c{_w`4{^nYUE&yQ}S)5CxJ?62P6 z7$rD%s6X<*;uC7He|_z5>|)ONPn=W!b8h_NY53RQf5d(fI11ISZ&vTt$DsDeW&U`AK zuL5U|c+mH-k2xJCaXW%*%%i2Z)Y}$9YTP$SH$<;AG~b1`S}$249Vk zM@5$x_#h{e#%I7I5c&x?z)LW08}Tn*#XntKe_00XR#0UmO!7JZ;+6O3f9L)cEB?f& z=bu<$|A{Y5YN|%p%LnByR&Z!IT^P>_-HcoQ2z_6#@y4U{V&!2Yb$%C^Om@{^3 z-mgF*GaO524#%io)BD1$Z;b#DIFnWR+HIlsB^LM`Z~&@%muk2RC)U>^f?fnCZnCzn z%gwANDt9|MHo{yn>MSfH2Z?zrQda|=n!W;_sz{3fMT>jM0|5YQ42&xMOT3x{oQXAx`zMW z`+3gkI1zV6tPrTAs39wdUna%td5Y%n)|)@JQCm-Hv^ws`Zd$TDIWd_o^(<}Nd4dcm zFlA_F%Yl31)Cxnnpn*Ktb+I$$f51XAvIv(8%0*_N#AiG;DXJZvCucW=F#<*`QS@{W zpC(}kB$>k+-KAVoJ+4MhtqrhXm_B8M)Hb%@VhBo#Y8sgh&r!{KffNYp^&={_2MVd_ zT(2A8iGyH*aZq9qhEKC9!n8Xwczq^3sv^XgnUyKeSd3s`W3FtkYQZvQf9rDMILHl2 zoy-hLEeGEt8@W2vF2ey|>LNZ9E^0Q)}5kH=HecfeKr(tOE* zw@rKcp8qd9UzMh+_I$wOs8urCY&D;J)Z#&&YViu5qhX|-;G{t^Ks|(Kg)HAxk z$YdG?at@MW16+zxiAX+OZjTkhAs>A%5|akn!q(gg!(?)nytL-Yf5koo*lH#w%F{du zQiDp(AoUe{8HD zIw#HwX*t)+qY+d$8wn0q84ytGzX+hLOccnq>I+7tK!SzV2|igUyFL~h$PxmXk|{}O z!@!s|7dGbu!+s)nes;gh`%5rR-ta?-~TEe-W8=onuOkzSlR9Cgl)dQcz) z)f_>Qs7NrGbvE!|_{lK=ivf0GFPo<>9N3L2S@hCfzw{UXf6+ayKbHL1x?i$|zsr8; zl*~JS+2CKcG|ST*i6qk1n#;a<^!2Rg_gXkvPk#7qi@$j>YGvs!I|%0d>)b$ZS@+B) zE=UoZz4EcvTo)Gw1(B{|cE>Z}G7v7tSd$#Q5#wC>mI$FFCPH(KAW4keb^~&ch)9Z&-7jQ+j zzPSY$xGr=$pCP#NEPwFd7GZ-+Vp7s{Fh=IGxS4(l2jNm+(L&g~h zJ~J`PXc$d+b7l zTK6eq2r8ch_x68c+iVlhmJ6i!Q;$A$T5Q zYx7w!IMmFT=hV-t!wN+Nad~HmE$upTDMU{re=7+VRnY+86$p4m ze-;Mc%#IbuM6(DCLKaSxzh>&W@Q|c_dGs=zT`1y(!cOcI6$AwTMBFt%FxQrYxmiN0~>=^J@D zF;7&W8a{R4h=^pgDhB5kE0ri!89*B)UyKNLfL(xVU-U1}l_n58*$w7j{_U48QkdWX zyWc)ii>b7B(n|)tY;Pi&4_Oqoe`EaoC5L3I$G-HdI6~=Vh9D~q1d)MieZa+p zp?P}uX%!-_g-8sR-l?2JZB4VTJHfR#10n7v89v=!xg`AoK{&b-RCR2lf8OusE`|j< z`=N&jBzr2{5su zyer#5uU!m6t{7qwJ?J@+u7_zfL6U_wQscr6TC+3b5z*Oj=W`7Q0XJEZESwN#x{Oph z2nMc=>}-%n^YZ9s)xi~Pf5mcbo8;w!Hfcn-JKX6()N#ZEcrr|k1nIfd=U-@76jkSIIU-{byGT%Oq_U4Cg+v6{|N~?SRSi>zJeb*7OCOLoc#v5P$ z<+HWdTiNleOnPlu>$mm4_($oamVZTZX1TIqzU-ysg>nCZAEsZne~$SV`?|+u@~yAj zXM$HUU4Uerc|6VMhlL3ua-Hyn=@A6fYh*<8s&S%)AM$iXxocaYyD@5TDRLJWw52UH zOaqSedWdx4GP6lTjO5T0=@RA4Acy1%f)XUt-dQjuk7rf}2*Yu+#z?xH*-#6PAfZ?= z7aDJ4@@R{mXynV1f1rk5DU@9-lP(u!pi2>$I!G+4r;kd{5gZeo5?mKc0tK>pkZiOI zvT-pd4gGBGnmXN?Je|4uP;3^dWuk~8kPZdS#kEMWkc`fY5V%WGpdCFCAu}ivL-__F zqXsC9cha*3MN|qBdalCskSVx<4%eAOm3~fZ+@IG|jxf0-fnNL0B?2A^h73?SPk zL?jf~=_Z1T%$~{9Lw7*Jt%#%!leFDWk$iSO5mZXwG{C6ZLD0Sru5{N=D){pIe&*L? z;@j`~;?4eVEaz9e!~DBVobKSz`iUQCp7wuZf~_2EZ7w%Qx%-kyFTJx=7*K0>z1G3f z!^v5-17!Mje+|9(G&R0`N$_iXPD{?BQBX647Z5mw@F2P@&ARJ2JPmZ2ieygDER)RF zlNCNpbsZ;L+)asIz|uRUCsbE4aCRjslQc^$lSezFEC+_*LDhA)h;!w7VGt2vU7DFW zUz!?NR_GG5Nz8DbbetyY&>*!C?h}wFdp;L4*grLde-gt9QWwMuSy`b$rCqID`$*=+ zk^=YAAQ`(*E^sDBm?t{McZ+g~P%yAiARUD>3o)@ ze^#Vj=)AW8f~uP8%a2K&63FZI;;~vF&XCEwhMOIC^G2j*!%WB~HpvDWt!ZS)HhSFp zRS)hgK`#K^+$Z5w$%p7AAAaUv&ASfC0?98M@;xs(d)cP*7CY3upR&VWWWhh{Knbm% z)B0?}nNUzZL%doSE0=(4fMm)>;YP5Xf5*e&9SavT!6*)utf(AfO`e%CS?l6{(7CG< ztiAnZ$Tt2*{OCcb>5u_OqN+jARG{i&ih+q|**(Zz9fXQ0l@ugr1i@fG9WQki zI~7R~cInb8#5O(*X7mA*5=YjI7cR+I=F~}U$aHCD64Rr0+i>F!k{Jl;L5tTKf28vP z?or{jrbL>u7o3nr@Uj6GgX+(3|4-5WJs#NNuPuJ}iXFaV$Zr1k zIN>{X^R3grah6wG-u9-oC#T87kj(I#($&%B;Ps9h90cw{QfIN;Reb9wW|$;2y8CgL z4O~8P)fX0Kizvao^cmJH5S0&de-P?*#FW2Do|=b4eQ6)k0RfI7RQfzI_4zc~U{_#1 zdCYi85K68p_9L6Y#)8DPqGW-1B0Md2UA^>yd*K{`xi&u@iw0fi$aM*^F1z7i;yhD= z8@Mg{nQQO@1E)3>LWw+w@WjR-bdaJ3d3U{N^_ZgwffyJd%H68l9fv_`e?eZIu~hS4 zDZSA8aq(wogCNCEz#dqo!mZ|q-o;XW94OpTCMwqEqu_-D{4I9!_qdw+{aPQ3Z{OfA zHdpI6D4qGOQ{VR57iQYhyI=UhpYh`s4^_`i)N|i>Z1U|-dQH2Ym+lv)0QQJ-hEeT? z3L{Yd0hUtV9r!?(RsYz{e^whRHnvQ4mJJ+&qta>00abR3U>gvH!jnFW!+>L~JyC;t z2b;#8oPN%D>NwIh5x59t8V1fwH`E6lL_(!@^cLr6X@~{LXgdq&(hiC_NTylyD-TI} zr@1f@a5HGQ^8qeSVuA4C9KT4-;sh-G@omlRSSh@^*SFDvTgm%!8>ld z_>6G$7d)$#sbBKhz!Dy(SWE z%e6Vd>L>!NTH2Si&?(Y4}$X|z~@*Y$LF&MsC{e+^7=Z4p<{y1;@Ij4hJlz<* zq1};)Fp(N#!gW}$kO`N_$zb1&E)JT^ zi4HlyS8+PL%c8aV z#VA=*ygS}|Y~*o`;vii*F^GAf8DuVeN;J<7%aRJE%O-&e0wj4Aa8V*BtRk#i;o<;C zi>Dh3B|^MlIE*S?$h+OhqdG?r`Fc?9DCK>ee+?2Mg^GpmC}=YjbvG6iCU8m(eMIa! zaZtlTJ>z$R!~)jULHRE3vG#O931qeJ?uS##nSq%_Gmj$!%lR zr1X(TzOm2O*#0%==jG4;iX|s!t4^a|^YIs)`d`H3e)5vuyb-SfpQ5*>N zO(*Ui?D?Rhd~4lSLRVE=)Ua*Q?}Xb2O@br2Zw#MVRBArv@9+_JkWZ!Sjb$UV%|8_wj=8A~rz zIP@A6Zo^B3+rXT+{y7mmSNix@K2Y<89skNZ{u|bd9A=Il*~LKk6+B8mdO0#VLQG%!)Hav5;03(@t|$1#G$0$zFH zVbG5bMdPX51D;S!24lO3KJBJFhB4g{Q8)||m0y|=F~K0%r}a$N1XxMeGw3QD+0FiM`MO-kJgabcLkz3R2}TRaVqJ7dYQnwOk|QE8>6=7rYEPMhG?04qS$ zzaCfe@;le%doF&jt#{d9@wYeT^`|fYl}Gt2Zv6g#+lk6Aea9kHU4Q0(A@2Ofhc~6! zR1KCjgrNA6GL{#)JJ+|-{J08aiYZJCGN-i@R*wZwG4Nr!YO37J%haRE4g)g~r^Mj= z8RT-SXOpXQUP)I>)STxg+yfM_Cq)$R(@dB^5k(xK=kR3J(sA-dqN6nWGj=@CFrt~A zRSxWhtIH@@YK7YxRexc|gXQJ9?oJ^oq~6B5P++IMZ4fM_rE+J&1@GF6DYJ^Cq&&e0 zGYnE&dbG}mqk>kN%2tMgrig?MFjh)aDlrv*Oo@cf?x6A zmvBsJKN$PYSOz!650&DFE^GM#LU#INZMHHo{W{-4i+^pt?e?$yDP^0Jw|wPq zo%*`2Z1gYP+2*G!+vu15|H6{wFP_Q&VynM%L#_I6xmz#3Xmz&xg*7RC{7<={nsK4<%?HU!qf;k=45?SlG(Y!*Sn0J$TqFZsa9s}uTUYDOY#f<7xHAYShFn%y z)C3(xM6?CRoPQh2cZ7`VRxaZ(oamG!Q!JUv&7IAkHKEYDpg#~h22O!Y0DRMK^VI54i zNwA^}G%{xstIf-nT4bYShD1qq#AO&qUEtJ(I#Mymd4Hl6b|fOxj3?^-2?ytcPkN9H zxI6J0sp1g~w81BX3bBkP#(}y7M>fJhFiac+((Q@_K9hyQI1t7xUXjVeC4(NHgHG@O zqq-1tO~uI+rl#WPlUZ<@c+l#Lb7w654pb6y35ME-v6V}|ai&B1vgcYE^CRnj=UV-l z@0a{3gMYqoEAs1|hnCOWUw&+=@~VE@6<<8{jZyv$BU84+9~g&v=6d7IBTW0Ob0CA+ z3Kszp!p1WDRCxs?8-%TID^Eunh=;AIX;pp~BPi5PZc%*L^ZYQMvj;!&Oy%2wNDC^3 z2~xKlF7@R(^+9q>L}u&YdWfuaoDEZoP*9TSW`E^0Gy<)5g~V+v5139=ZUnD}dNB1N z5O=$F*)op0T&5QqOj12mY)Y>{*=#`~IHPc9}de8jm9(%m@Z>TaJ4XOGIj z>O!XS=&<0P4z6ir0ueR{+LQ;7D}!fVg4`z~5|g-|aa^p;3>HC@PElPZkinWl856M} zVSiAqxq9zKpLHg{*-I`F44w=4wzq5Ns>S^M-pe0(?5{lg z*8fm)>__Ga`tmn^;(zRYmD>SxesKh~EL?QghQbl}-4D29)CHS` z<-mt3a>T^MVleQY#ghUfwANHi7;hAY(B|g4SiQ3~o7Wd12D9M6ZgG$(Y#Koh25y4N zm2zo-_VDY7N7{BtI9*shcJ~o;XUF1b)TKp$=^$K+P$@`WC1kFxY{a&#){><*5P!Dh z$d{U>BQ3V2i%iZa5!}Ec@kkB5fD3%Y{NSU(+z>P{bFFY|s&^u}^A8grBDZlkC77oO z7L&6jR30gGtwOUPjB+E;0$fuc@w?5!i3>US6uEN~Cv%0hLzoUsa4ATN5Ksal(m_fD zNjJqJnmb5hl@iw4Qq9n4nu!LQIn*mvk@bjG6|5`TqBVRy%- zX6?kq!LoD?QjSTGdYT;H$Y8?#nn;=y;k@>+4r1YEQS<$2K_L5&kgS$+GMRFi!H4+A z&5Jsn@kcG@E4WYI+bpr~vXA%slmNQ-J_l%AVA6L1P4G#;8e{^Tvj*^h>?NGx#DKue z0;Ca}!x$k6OYlQ=z82skB7eXXzM*y~8VDqy6G1GzCy<0hpPdiraWJ`aQ#&3pfIgfC z;H&r-9*_#q8L9+)?sh?U-(6Dub@rVqfmrZ?>d#ZoSMPE7>)#%L5uFzxcoCkm(MGuLN-a2+bGsaNHF*-4JS;P)tD>jDhvkdRRdThf&LxO&iK;1*& zFi>f331#;RXjNe(R)2X=|(z7n2%x38};^;T|*WSexpawdQvZ_Sk1NC&CmszFO6n*g*Zkm5s3$kJu>C6FS zH@*bwOMiqHg3;J!Jt|gMWevA;2F4hq8efJUk}Dok^QvdZcmu==y)5`{gZA(1bf8f6 zKwHnx7%Bc`J)H&)pal0M$<_LY1HZW*0U*HwB`l~8Y*AcU*psAbY7mxQ89^4{o}_C1 z#2}ahGp=1*o>9k4vsM*CB)lm;6rw`#;vYmS=6?WBPHPFVs}dX`8I*@)0|D=W$WI_D zL=QZ7?-gQI*bS132SuP4MFrg>>x+AZ+$!wqN!azi$~J6K6czH`<-DpmguJ^*s(^)D z&1+;!8s1kGAfna?ztI`w3tR<&t;m)TB#Ix1wLnS5DexXvizr@!+KbHH!c9iUwCp_&_RpjFFt>F;p$#t zx2iZ3s(%)4YG~T>qvKHeDp%thy~>-WMtNIaIu12o0l%(lnF-N*Bp;q-76u+(#cDuOMj)~z^6TiPbI(HR(_@8-}4z9lDRmvx*Q{uDIHsQ zhjvZoxLwN?MIw&XTzP`^=)h>&`DuRfU`@nISA3=4ptf&RhhU+{1g(t5QnXJ8uq4gn zze=w;7`pF%chz&g=!5uQ^kWRj9{-zuzRE$ry>|ko{DVF7+5@_9+Yi?l2-|a{+mZo~U+=!BDat!iPlRBCA@v!hiZFo}t!>zU&-> zAC#UF(dG6}3Jpk(U=`hg04| z1sj9TdwOd_BI;#EdyZQB@t@bd(T-`WUx+;TsLB{d2Ge$`Zl!m!c=O#q({U*HdG)B~ zfb@2}gtAG-wtw;;T0NL3n{ee&$Rc|}rn?m7+(m8_kIN^Y)nWlu1=R(AB@r+tH}g7Uy`$W#0{Z<+=G8R|H~>R5jr zhfM?geLYvl@9QsmQyni}^snO={pr(f78AC zk6v{0)qnq}rMtiSueWrYSO4F1%I<`T-TcUnRM}@l-YVMiPOavCh98nEJJn>bHLC6z zBizy}dwBj$uPK}FhyJ~$IsK;J=Bj?}_Dgp3+h>Fi$yJ-omanJw7$HT!3t#jVWr}vx z*8fdgRTZb-w0qU|`8Vyft@rnuF$b#d4||NDrGHm#Cxcr>>CM?-c+o#3&#QaIsH*#$ zR;YH*ziCO^j&Iu5pX&S`$EMvL)$bB zmM_1vf@D*v*uAz;*o@LyE1o|jHev})z<17%?dWiYRS^~)^BTC}#~VOjIxlr7AkA)Z zO@I5=9zTiT201f^Ti2Q-=2H*%71Ai*P0x00V=}g-F>i~CEzGoJ_2@6PtYM07C zP|#(I4}P5^g)f&s(FKeYHh)6l zovzxK=%TJ~1Ujf+N7YA**Et@jrv3kTE(H&MKUb*dI*LBw=%MI1SI?np|Gy&`8tk|# zr?61xx`3rbb1+;;1-gIO7Pv1D(T)qXhH`L=qj|{gCQ@hkyT5{gwKS)b$w9{yk0&7=;=K4X_50Q0-4)%TcA; zp9&PW<15fp(Y5VU?NMI>AXIJfDAe<|fKG~j%aNwWbq#9#2jAnl6;P{j_d(s~g3bSX z^ZuI8w*PIrR_}JTOmQgXu)@Tu0N%b`*0+fy$UkOVvl10aRk-Es^>!c4t) z-(Nh|bC7)guXeb--yXYrbJ_i0|M&m*Kh^HG|IdD%?*8BR(J4PR|6#A^yXF51^XIPn zKiccny*cjwkv-CjcQhBjS`2L>aygY58=KCs+Xk`hj~)y(zBe987T@xFV1noEHbW zS(7`n%!q%^t!zH&a(xyoHXj5P;oy3?U5v|s&FZZGkq*dV<$palpTllF>$4_YbrPfZ zvFmJ2zB5x6;)tgSW;sl=CR`NV^Ucn6qh94s>gn}lf4_GgLACF*0`~dx-0iI0_4;U_ z!63D`gPyp?vvBhOyW20P%TBj*=*|YK@n&b@q_2fLvqsz&w?P!!v4xaIf#fu88{l|`OeGN=&hvDjRX+r(H`x`XB8BP+7i z`grJkjF-DH>3uHcc4g&*hVt?1vAut;?kpLJvv%&1{(l^o-SfB_cNx{Kau?dbi{oIq zKCTbRW?!(!IJ(}qt9zy!H$K{s$GLo5GFtXJqmMY5tRLD)=i-7;X=`$FU5-3$E->;3 zVk{s)hni~8bF7ZrAFyEwz?SJz!ZiGtCF2izKJ)?&jNZ^Lrq5zaH=b`*q(?>@WSGer-tXb!9o?dSoyEhqcp7+P3H}(3DTYo$rZ$BQ7czE0gA0N}tp4VA?j_zX@ z#60tl-Zlxa=|flTG>+f1wK!Y7kEB| zO-(dhQUteciRsba7nQf6qoL#Bg>f7lv+l=IyJkM#H9H()Gnvnq(KI?b3&_q}JNL;* zHh;a1eF)Q(tixq=-_qK@u@4m7rfHlniY0Ov%+mB3KdiLlN+b1}jhAvl7SYFjaV@8Z zmD!22>2!$W`(ZfVJi^PVSQ^*Pe&ccFbSGOlFO8lcCpg?bXO^>E?Z+rzt=IR{?o{p*EFF(d=;%KB z5Ersf_osbOU9VemiM9P*d#>?fL`dS?pXG;Nu2Zg+iT%M)c9C)3$9!NF>n*;b8h`JY zM7Itivky%$oqh(6qXp4o&#b7vtjXqJ_Vi0NFHey%@SAaWyhgTvjma*yG~>p*e3Ut! zV+2RPc;3oco-8ie@h+OKgmq+m4nJ-mx!-lIM)Mx7wy5GuUM;+uO!Dq}@)=*D_qjv>ufMW1^wpbnws+Z7f>_m4hQ*XURrXcJJVFn9(-LVJ3?SjV|2*$P0TYvdPvMKQ+-jYTfm*1|Sju&(on3f9+G zkNc(f7%h%dV)i!oh^C*<4{asm6Q<44zQ$hKzYw!C7RewJ;d6NHTPNdV?8kNIq)i$p zzYROa(CMb1p7t>^7Be?LBY$&D2F|vgj{E7r-4$JX$(My8i|cgrynikyBbE<0y8?8_ za+wAx9q-G;!bfv=6<2F+kfWFyu~Q@`vB#^U!;N)5T95indW)djrRQm}0kXJ9QJ~Ma zdws|uLiUYR*AIWM-B(>*Qg`X6u@@EV^70|-JUr@lKcDkaS=-{Y(^iJo?_9z#PKU5u zT|8`zMORdpLz+m98h`2dvOM1c-&}r=BBJu*9w=f>fN>f>E(svOZt*tx{_=+5mR%RF(?#j<>!2Y1ZF;&{J^=PoJqxatn$ zVsK|=u!64XFXr(%#>Oz*Mb>KM@Ac!3@CwVI}k3t?Ti@t{)%M$)@*=EB5* zCe}LV8?sGElNW2$^|%44hR^YlFPMnyb>1c1K4Got8GrEr#LRcLqIW494f}_mq+@m|v(C+7v}rLr2_z%a8Zii-{Qdspmxgl{fa z>k!kio_|}<>me9nQ70r>c}`+&oxvI%tBGOfAImV$mSKKbhWT+B=J#coAC_T$i|)-B z`NxwnbzIkoleLE;^+{mg8!Zm2jU#qbUiS4Xo!`1H?JeVRaEZ&&BaWjTo;~J^Y_PFmHpw_~(~PXK4O`gn@PBAn-MA;OsXRW4r!^Q;iJvn6EY^0O zG!ee8@hQI0D;nCY&^_QoZrmkd}uPSnONooX%lxvw&nKb_j-++ zz<*6_MA|9B3EAE@Rq&z7=dRihuFa-G5}#J|F7Q?XufedEeR%PlKNBRGnb| z@X|@T?OZ3xz~%+%3U8t_h9!{+5vX={HjkT)6p3F zwz>14UFj90jx$Y11??O+C%j0^!%PGzyFJLIw%L&S>!WQSk&V)PaVY!g*@63ImVfxG z9qC=?NdoI}!+X=y)}1+BccX3GX|$Ya;Y?n76VN`r+vIc=K_0DFAD6w=KIcY z_At_T-mebNkIm`mmTbj(i>Y>7ER4|Gn|c!^ecm0^!@;h1-gQRfEZ$ExGW_hzkDz}Z zbrys1eLW=5*(`4L(>Wd&T90&A!+(|kEQ{I-&wFPv+n*cB_UWe6RG+kinxkEu2B*`! zIeS`szT`WXXlj!rIhWT-QuN|ze-Zw5bTA5HXA4$3LxATz@$C5Yoa<-jB2y%i^kcqT zoSal-(LM~1_fryk-A>vquh-62TiBnU?r>av!lgMZMrPOS@9@YR*jKqf*MDwa@eO1|Z#tLF>XB}*`^Plt?Y+*re_CEUahdI%V_I4rn&o-OJKn&l z#EzxcMqdS|BsD@b@AW?Y#bdK`qkf>nFrSEhvpOvJ^H3PeFr8r5j~CUp+sFNS^UQb0 z%c-MJ9(%UFnfK{_F!Mjc4Sz4_I@1>F+gI~vkR|=@e(vuwcWn0TYGdt>gL7vvSxFQ> z`p0Y5!>g`0>*(Y;N26PD?Yqmc|~$NX?p>3(hSf%>Cq6^(3G4x{+8-rE5i-mH&aI6M{u6OL}$ zv8K&vJL}TzXZ7*9!qrS18bP|Js)pNb;n>=&x@61i@j35g_j|euyZ89$7yZc2tx-K{ zhE#L%b2`1A_Ex?-Eq~1wx(WL@mD?S$n=bt@yEl6=uZEAn(x->J`d0nt9G0k?h1qc# zL1y=>y5nOv%GbA4ADPRPC24obmhqlZvFc7KS?`)*W@?+{bGyGk#GGcqU7LkISkZm-;SKR*t#^BKhM;`2UV-9}OHk>>7$-bA!sc7~7XpgSq%hfe(% ze=ZLzpQdhd)_)?!FjO&AV9rcEspp*^>L9{Y&E99aVE(L&&?foj&Bp$OHO~ZTOuULStRV)gG$m z-DGDzr+-sEKf$%gx2&Ym)NE%a%jkDeyrvK*>z$=b=ff9kH@@)Lt8@(?@2VceCAi&B zi3~mRA-4z$BPa5agh2D)t zz=Y~0Upf1Voj;F>OzDo-7h#?sC9OEMD9|E_&3{^a|HS3QZUwi`#jG+J)AixGEAXqu zqm)o!g(sLVRg%^PO9=XIjgHXwtvC>q0K>WSk5d-Co?AUE3L2OyoIkIdOY5H zAKcFQO>E0gOVP`IoeetSh!Z`tDEVV@}6&H3W=MZikDBk)T*`T zI40Sm6h&vsD&H0J_&nf)7)K8_@L8Oi@c~(mvFkIGy(v7b70wSV=8 z#-F9`x}9T%8y=8C8;Z+@%1vdUvHBL5@}t2|FKxMJcUN<3ZRg6TI`kYyfiuLAGDA=huSXiPC zZ{?NBl$(;IH*zq$&A029iz`-E;eYdIr77WLOh(j<_+?J7@u@NkZ9V1qWv3qv?I+{e z;tcv0yr`#9l%vrki98xVRSb)ALzneOk?H8ZgN9DJ0VxnU97=#RD; z_md-3YU&5fqm29H*(JG>@YDN6Q|rJeQ|9iNKy2FQ8NH?_el;Hakn-=vlz$71SLHuCiT2&`hgvDFyJaY>GzT3z&Tie9`c_X~$-m1mOoo`}^~ zliJ(;nTW6Is`(vrk?XYI`G6{)3op3^_kIT~Yu|;?5?}Yn^YJ9kZgu-y25j~-x_Lk8 zr`nDdCI_EoT3*YQO3n9v+J6Ey58lK!%jXjTolVU)vro<=XOZP7q|}Gq1#GE8lZe~f zxS#yzeBs}nUfiCl&W`ico>|v^&yV*~2i1*xjt-@>Ez>T8TCtp2IMSkD9}5Ad0C@CC z4?y**mXYGm-Y)KGn5E+9LkCY1miZ;w4u%K5ua6TE9C%zuxB6z^gck zKI)UAbWxc~7W<55d+eCi5}Bi-ST8U3Tqt?PtRf#2APxDPG~Z8J1ZAW*7sU^Y`)hKq z6>-?=5R4~;mCJUyGl ztKq%-$FTZ6G_*lnH+Tx$PdB{u%5N}p`J9MiMx-&jFC9$WMtJK--8RD5E#n?%pFX%7 zwXN#a3HH0^?SJxz-1MEvDj8fVBQ1H26=Za4ukuTK1nqXBxaDb$6x&^%2etB2WT#{=xR0ioE< z#=wR^!I^1o)dJUH_2CFr5QkL}aoD?x*TkGJS(so5b20&3qVS%jZ5rv8s0|%KY^5kY zA%v5LE%(aaFNU@kWQiV@-S!_xpll-aHbakqR;aczQjS34W zC*BP`%zp|My%5DWjgin7*#{q?f; zWUG)S(2P&?eWX>y*r*`X7eR{`$vU;N_C|vC@_p}SL|MLdR+hyE2(n#rn4`qPF6sR) z$q{}R-I%!dJVoHNFm0tFO^gAj8izS4`?xK>u77WamYw0MPHUIAYKn~$$1D@}=>ytN zQ@ym~)}F}p98jm~eV#~}4(RLYNsJWhm-FXwA78}qAtyQ@uKAsn&oA@inA7!X>oztf zZo;cD7I(4d=nunn#WZP*Jq_<+Ipm4lA6(FHyjlEymC8^qP{|h*3IJfGKLKgMQ+i}3 z7lkAadZ$<)7Ztu6Bi9@su}?Qq zmHoaZAm%9Iz0sfB^_>L18TVf4>{@tV9wBm-$aPR@Menre)uybi&gs{Z#?_+ctd-lX z?|)gIvosfkLAS1oTYW+$T9oP zciKkp*WPjdz!)J>w z77icv+UVyznc!`zF5}&}1YW)K>V=8bF&Ze%x0;K6d6~VnwHxqj>EWkdLq}BBiyFlB zZ6rH#60A9kM&|eSb#aH+*nbbWIy09~>Pfgi4b$_&bd?WVj%&MfMSeuIPdZCh9(g;YjokM?A24*d;Y{PGP%%k?Z~v=*(%=1dl{?qHn<|=8lb$PO0tFPKCGB zRElvv`!rh5r`ff3qjk=eo4u=oOUSgbHx^@-q0k7slRhh^x1q}x#ih1C?z==TE>3ti zX+nK&g1+^;KA*6>_kVr=Qw-JxgSBVnaP+VDhm|2Ed|bDhbUjD7xfU`y$J>Ge&Bl&e z2gtoX60H)2*>0+VPdH~=!n(Xyp5v}NA9_?uzQ143e%94OT~S;XE3^hZ zxg@Xf*#J#Gr1Jskw$n<)FdG?7AFj{P4WWvNZ6&r?YwI`pL(^&0F zjQw*P*+EVg-?+X$`fCZEX6iH|bB2Kx;=v70Y3sd`YLW!w7hJUQ|VXY?1ugfD8K8i*-R)ak; zozPjk@zx=D(l$EF(m=b9a>!4hxmNMXxP?S?Qv$0^_@O-&gU9&VUK6Rh;WvF@ruWC} zw}$@-V&hL|Z`!pCuTTI_B^)r_H_ZmaOGNR!zP+Dh&^`3kYPwlaGtaM_q;fAj zCVyAr%(S2L2_Ldh;c24N6sbLnf}c;|1u2|8MUi{2vl3={j19T&}-!7s~tVrbMYD{q_Dc%K6dvEfs_?l;EU zMCMwYFQs8K6S<}w<}c5aE)6Ifxiz`v%oKfu7Q9CB2~Kh%*BH~@ny$K;pMN{k30!C2 za_7Nl3bOEoD3-6)a=}mtGqrlP*FzyMHkgD__Ju^1~v~f!mf33 z-zxg~Hd9roq=IqOs((s_UVn}yyAtnny486?C#U7f`HE0UicC3$%IbxHoN{(Q(5S2X zJGbs@pU?GTh|`_^;IjdVCQ&C<+VD4fzf9_T`ILo2aU^FKPqQp;oV%OY+ZtWHN_F(NuSj~5bm z?Dh`3vmsqSHEo;NC7@(rTJCA`BH`|Y2G2}fUZ$`djq`f~w$`3zJ z#+y~i3_i^)i<83^r3J@YII|dgJ}+jGxm`73EPGpVD^X46Sqo-rGJSFyELfsfuSLR2 z&Mudn`)k+v)qk*_sdskm94CA7mh|SX&oq0YQ3)@aChAj|59QpRIO}6zPP&x4t~TpV zlw9BVR`|N#^AmJu-YlJ;% zTHo%huHvGyeq^;aWgVlhTn_mJFR>}_T=|4qDX!@GmVdp{`Oy?-558r>t(6^*l(}DL zo=hqfVQgbgjB9Y{iG_>YpKv8D9<3L5?**BpYc!dmIuV3;{-N-xFP#rwUn6Bp@w3FI zH@0=@lsDy%vzf)j)!Q?=EfzHD`q5;8cU@UlVDIRe%#3Qe;Tttf>O?3Nw&=}BsU$p_ z4~_f_Du2`WL#MAiqoS5BdD3$j96hgn1=+x)R~eJLMP9EprzMquN!>}QxEH&R>8!p8 zi9a*1G@t<~ z4tL+;YcP)jzt78CaMW&nWuKbg2Pms<`5`Z73kqJmog^kQ6K!X{gkNAFDur`S zPJi)~#Z*<@88eXUfCqPCza_jmKkhEYYQm0>ec`b-k&<-F9z2+EA?`)4zKlyu@y?UF zI26O3W(TcK01|FLtzz&&sufbDQU{T~%k25pchddQKJMt~e1eEa9UdE7KWQA_HDh8Y zFqzP;77X%{*f+#94@Mo+b@M9P~n2_`Yc?NGt!9dLjI?^BDF$e=N59o{-E{+a3Pg0Q|LQRrL z6rEr_s0yeP0c2VL+CZ>-jRhSoc$qGHeJezRUM`PTxJd>BJ{N%*eii!qCgOu&e18?H z%(<=`0daAMf9pzeX%l~GvI6Yjmw)F^p{vS%##<|COAn$3SLZqYTypZhq&IWfU!8ol zZ=MH{6qS)GJ{cTr$h3A?~m-rPYpI6XVSSVA+5q4dVhD}QPg&B=x} z%oK6xC`Y-F^0#UWTYJvoBCG6Yca*mTcm1xx4EDo{?nByulR#xM0Am|~r*IM7-Wt6< z7R}NUo_E{2p4wb4y-44lPW;}6O1|llbTfkZrpM9EWec!m`(jF}d$JzFP}W z;}=k4MZ<2r_c3*mPoD482Y+|Z88{|N01-ZLuEx*!hT{+1#_=;=*ZhvZ-?&}B%?iiq zxy;XEU6tqkam~*78OP4_#N$)_-ilJL@8Rg#D>Yxpb1+>z^BT4p0K#YR6$@1MYm7fv zmLe8My}VBGDTmT#! zPDTsbP2iNG=e;Db7UvmyaIS~WKZnDsR?9{&f9Y6a8KYXeHUgCZL zjejW~fuyD3;M~#BxI%~a{$?iq=`PnE{@mtROWxyUX+)doV_u%3k$>;boevpN?3;|e z-0g-Xt;daM;p^Rw%a2DXV%=WHvVq@|$F(yg?F*ZX;8`X@9tOYgDXF&dk)QnYfkx zoC{W~8P=554l|e*Xw*#sO+!ej=fMxSyH{YT?eZE>ukl*H^3^!2?sRKsI(UNB3!GbH zut9bobed3$4){a<&Zl&@nYVYLX^@0&7+#T8XQZve$3WpZZSE#8>!5*$$pXgmFEoDGzoH2Yt^Rmf?l|sW2 zx0?~!zU*DB7p(nLE_c0KKHH$3vLTSFi0G#iC7r1UTz^tsdIRnJB2l2!qU2nuv^Uwh8hvON?_WR*@H)T*$(ZGOh?JZW9xS8uX4 z+xBniX`ktyyXjq2^vvL0EyFMAh*mIVD(8eTq8&V~XSHdSYHm@l?;Q8Ed5T_K=VnTT$O)f1&f(Rv5^6 zD%EJ8-F_%`WLU+9Uc$zC2`f8qGW<|}Hh+n2e=Z$H^OPj(o;z1~g>9PqD1_HXcSW0p zMW2Mm%SACVm6<{q&yZ!_z$?jBH0)j^cZU?(*^W9!7y$Uxr29y^*!<{gr#V zQz0!qDz?W-D9A)Ml(5uwZ6t!Fk9A$C{2bi$>@8J9RgKb#*8FR5YQ>emp0IvB_kRLB zUD*`CXG_{wbMgYB2uo{K@1)5*@24%A&RsCFF|uB{e|CF2(Tya?UsaD7{(_Q(ef#HY zZO{Qs(-QlaTQBqZ+d#UBQf6|LdJE^S~}f!sbox>upER9 zjSvr;`IxYLd?~>mrQZ8w2JRI%oWr3hH_AyTUhZo(*UT$w=h?p{^dY-bfDy^r`%qOZ z?@#K^z}Sy0YO14p5d)6qKzijqRm$49S4NjxC-6@^E*e+6(Yv9mSIfSehJRHh08Bu$ zzZe&bdx*_g#Rl4V6_7U6&Zti)xbFbH7n{5)=UPeq51kTw0bjRJ4$X6MWY2o|Y{dL%0{5|xWNMvR!Z2;>%#3B#*t#7k^jFYA)D!`u=|o@DvdOZ!#HPt(m3 z=-}>Oi+qTiK#-gJBZ0D(2=Vhatv-Kp16vWU@V+hcN0_KvpkK0?ycc<#eO`@p_RXAs zbrwd4ckfIncKOh8(evN8`zcp;KYdtUarIx!$r;|evoK?zM-=&qq1AH#$@|9G+Ml0lwzF6N3T0nt|n}o&RZXIo*Gb-Ogj^ zgy9~yU{`Blu)(|5gyzmK89vkV<h3P|C@)&uwAyBODds8blPE>FSeq9W%C)`Powi&gc`zHF)fYfvqri12^vrBne4z*~Sp^|IJNL8-r3qQ-ngpVqVxXp-HT%cUX_ zZeuLE#I&|L;zkvTEM|(lmf!}L7hHbkquzHfEc0D2RQ9vY6cOb@(`HJJaz~QJri4}& z+M58BlcZ)LSUFJ~Q6Y%W^#QEyo;Q)=_NdTvC10}c0!O>`*YuSUYomV+cLs-z+b4sZ zY>C1kK{1AXW{oLJ@_XK&KBpkRrv=U8&Z|q5QA=-znbGlUCE_?Y+za{CpK%Zptk5cx zQIf6S1&O0PZ;dlCulMD%2s3p;&v*YUt&H48%4bqVo^L@#+}E_qm9w-rO~g}z9B0-V z?xc>vf-{_olg2lY@aKQMqn^`4GnljB#)L#V+gz?3+0in~?dr46uW*PLfwd(X%x zr|G&kjJ+;bK!lZ}fs&4J PLpYfg%yyaT{8LOx1KECd^?%{^Q>!s98?Gf~Nx~Gwg zQ#zG)pPn-K-K{E#do`l&JYiit=Jsp6Y)n%qo-d6q9^J4oj*ARcGHzcIF2htNUfb(ukZgW!eZr|6y^90{T{?m+Kdp>s8v(?*8zePt<#y8C(Q@$A;-wX}_j!F>UvNPTF;r*%KZCt*2 z7EbO{80|1cMUpIk26sgaYGSH7(E9N(i11l81%hIb+X4#RD@+1GR42>fr z<2d{&aRa9;A#W)!V~6X+D3)*K^HSmWtKO{yJ05>!aE^=)SU--84EW8!002C{Lg~wq zgz_zQuzgG8SVtiSV6DEJWv6W4<27s&KF61z$WE$=@GU7G~@kZ41ka=27UR&X9XYm=8GETJXitw zw1QCLl78}cMB|XVJ@j!Dpn#Tdtx+e1R=j_WLUwoA4jSBmvC?qZsXenDK!X?^rQ-pB zzffyO`H1^3+zbF{IPR%H_f#CH5&BmzA3v5`#1d!Ud z{iHoQ+K#kVDOoHb1KTiQq%=U8JIXCT4|(ZB&xc4rQh#CO@A_5}c91=OquqWZ;r@T1 zi~fe2usdzH*76TZ<}dt5naV%txi6mb2U)3JAtD?-(Ry(~@TdbK?MyA8qrTc=s5=uv zaL{{@t5Zw>4$Ljc1QBJLDJ>@SBt_j0>@;6!jdsvJ%}`i0{M9SMN=#C#G>UM22}qkY zkC7nAEYZs(2$;rPma1j7N*1LGB1wM{8%+=erW9X70b5k-2cn<_ld>+#odu36FcxUE zf9UCWkF77h=Lh#%rZS9vp0UaPj@US!|2Ka79~kGq(CNSR`wI;{f%d?4`4jXaSq$P< z2b^{wAAz?T=210M8mvG1QM6tK83b#@tE;CRv^uHP3vpRX9e#BarRZo2Nd$6&gH;N^LNMxAH&P?D%vD%2NAxraHeMyeUb#O=jvwzSFo*sjC5C{YgqYV#4iPdELFIzhh zA+)?^2P*duX7U#bw}ngjMWliNyaPcKMNjLka6IsvXKxuSuw)PfQtWlhfr}cP&69=h zbG&0BM3PW8M5}p{BCmr4LQfZwrvju^;%>=s)h-Z7gmBp1q5)9TCTM?4AOLeX&;tNS zEflN+&-(KYt(hbrHgd@4p5JgjpS~!g#tMQ!vB4rG5cLWKl8n!s@JO>jLXsi0CWZ;ip4y2E&Z`obP=~xp! z2X@s4t+zT`Bn#o6XNP~99}6y&5+y)q9YlshSjHFs?&+HvXbtePR6!oJLGK(0ZqP8U zwM44a{9RW^vZHi()CIxQFTckb&6jp5Jo-xsf?(Q=MZe&eOh{(x{_WSk8Hw5(+`-nM+eW865N?H5u^3M~IO+V6Nq zd(U9)AqmPtZTNqj4YuC!d&vO7Xz5!fLw|XQ!f1FzWEh@htJ}Q>ed)pF-X$wCVK73; zi(1=jsSi?OQ5a4Tp_j4j*kzGNLChNHOVC&%Q<2Bta>GOJ1bpSb9qo1WfxobV$5@yC z9v^=6b>IiH-{T4S(?6E*A2LFrmldJyMbYP;4-#k)_}+hG>ub@oo|v@bAZ9g^Gx5D7NScPvZ^+G(ch0 z0HtCfe)4~;#TyLZy#-*s;WkQ!fCR<=(Wi}b3ZRj2M&Nd&%lED;S?bM<8Nf8^iVtc zO7MRRZ50FWi+WYG?dSt62P~zk?i$F5qDS{V350K=?BkjSgx{`GWzDzT<}-E`r`IlHGa4 z(I@>slJUw&lE=xwlSIka!~@fp1I`st-xzzzdizOqy7H}gz45>EECTws*ZB3`ogII0 z*F^m&u?Rr&Hw>bBAk+RU;`ocoqpV|2;d{y-`^@TQ`$c%Hmk zwgMJkTo_M5ULL@{0*I5%ohgmxU_2oa0?wD)gHJ~oVgn+fA!DUEX)4+R)%Pm#z;t;U z6p&^K9&@)W3`64>_s0SUNun!oi3)#-NJR?-iw2>GEE|%y=Dmq;F+c_)FxelIqr}}; z4`<>>58AO_$+o}duVwUS-q8Ik;?(Mnxz?exto;i+ToS+L690l5{EdjyMeQ$y-0CI& zKpOx4d0;bt(1xw{_kQE?FG%En!H4_dFWpIC86^ppq39z^@;FC=gQP?#SCN0_!hpk6 zVJH-N$t-ahuGTNxc1HeR+JUh7 zIns2k?=vmXvS^cpqu#xMPN}wYZ&_rrU$!RLo+gSCL72T4LgbYhed7(H^dQJ2g=lTr zi&U{6{+f3${W0$@jvaQt+Y5-lF{xVm(@x)ZfdG_B=4;vUTpoYWOvTS^@Y1~qXqn_k zE8;^wl_z6W%%p<>aY8M*MGnITNNVhFT5(U zCh%huUzt?orFNqoHb;RE=PRD>MTRDbP2>nmDT#V?i?$wuru5~7fljP@F4FkRj$RVb zTre@2ggoOk)tG;^c)kE4Lj}rssUZn|%lJ~Cr?;fOk+cozFoMJRQm73~}0`NAvULqaB;bSJc5~BeE)YmE+ z1%MbPPOqZd-E3jeAm#TmSQ+K0^Xq|Dd?NxL?l7fq^2&efu*E$~#~$L#qD%!K1Lqdi zsuDx7X8EuG;8+JPmVT^fjIUqxJHPj(AD>9_2O$eJ@|W+`m%tzY-Os!Kecl`$6>JY* zkjTJFf-xP3$N@;Rl?~`ezZv(R&QQEZks||mnhGYy6wr*#HH(SyjTJpTc5aqk0A;#t zpn~&R!bg9gR=1m}fWc5+w;e9>QOj95)*`?C%FE$@1kqo9DM_%EL_-5>t!%7D7+ev;w8^C%Q?#GA6Jn!FLPuk^j7=EsYpQ`}o$8m)5FaM3AV#A^s$NK8WfAd{eMPPriLaF&#Pv}S!Y^Vhw@xE)Rnh`*~ zNP*~O0($T`Lbf8Tl8R3%#<*vMPm(HmqY45Gst7?9N!?+uVa;|R&BO?O<7nY`%wMxW zYP@6Z_p^3^RtOf|290Gw&k$-F699^wV>>I-APxu+0B={W+deM`@OBLUwyLh=j0o&JTy{xcW4|6tuq%0|-BzyCF7YP|Gse#|c# zSr5JA9ZTfD#us>%Zw^?2WCl99h|ygHQma4pCQ)D;CrYTdTB|=T;lo#j5R8eT)j&cL zl;Y>L3&7LfR_e7z3f_2k)$?Kq`^F%=HyD4%4tQnMQBOBT<0F^-rf;)O+b%&C`+auH zn|?mUFUJdj7p3b{GJNpTGpf%Z8mlo~kl}L!{GybzhZ<-v<8ayitDfFK^nmLf^aB9D z@7vEim;Cf0e|+dazS}=zZkF1D^&4BQYQA;X4&UF=$DqSMQvT>@k!a07{C5ATp4@-w zm@i!9S1cn5UKPR;Km@Nq@)ozoIv_bg7Fl zO|Mf_b|A{(BAvyi&jl2pV1p8&2Q=`kOk9{0EE4U3AyL4U`9B(i=5rt;D-=5B1i?}$ zU{suN5Jle$MP#v#mzfG2EhvBX*gxhs#TVd=%I2_5={qO>rL#ctrJwoMp(7tGy(!n^ ztQ>IzWF=z59O14PK;uF)ZmC1m$3md#0lMu%r5Ta;MglpQ?H{!E zvBLlw+jA`cTa>34njdlb&T9e!2UBmUxb!1eO8BUK9{Gf%8$4;y|#Ll6C~qF$9W5 zwt+4QCJ_9CYho|fetg~?_x~Q-5QsK=1;!Gr!?H1%34ID}R3o9015@4&KMtPJW}w zwMA%=@y(N94m63U{qu{*us=&c9M6BdZlvULel(*1UJzLb-_e0w4?xTy00cKh);~z+ z(qF|tmLD`ZSblbz$j687_Y>wg7m~$XU;)$-)ShtIwLk+)&|uc|ff8z0 zsUbGa)taHzHtnXhW4j2Jrt#px9#{+Hz2J=FIOgKCi>Eld;~kDUIJNUWPHebG*BH-2 zdJrp*KM6FduQ*TBbX{DzD~V`-FB7|gWIHFuCwu;P1c`tDt$!u}=7H#wsMB;&?^+=!5Okiop?}U9I&ZS6g zH-I$ZKo@_Hf_zg2<%@715fg(*D>V}c-hm};EGQ9&48O7C_f1BU@C$i-;1@;X3}TN& zUv&HkbJ5#=pZh=9yN_*Gl{Hb&4@3hmL=*456ZC|)!rP~>f#zCk?_=_0j;vd)y0arh z{J0<@h$$exCYA*2OQzqnVlYYl4=b+oUuOK?=IMV&Ub}?%vH~`Ql6pUZP?R%wv$M=@ z{BYY?;5Wq|l0D#?nfdbV$M5w?>dkTAycPL|VehT92LigJj1*_)F;SOHph@rP{B97Mo4imrc1_kyPcISK;1ym>PP z{u_S>?l(8_9o%_&GsU#C7dI(SGk$q<;xwdNfl7%tDSGqkUm7|7A;0OH(!BGO(k4l8 z4jPe}n_s_U>BbKQ;ul;0{WNw;p!txXc%`~|)RL8?zNn^~*yVl^q>&v6v?U_&H}(JhL~UNfiP?Q-S{5J%943+o{v5L(k@ZvB-bM zxBlO3@D6DdrStcG2&YyC&XJIDN#AI{nT; zUC_5+$L;syLdrK^`7Z~}m;X$U^Ec7>L$AMXLh*-Izd1?ihi1P?#vgk9ebeJV={F(i z{m|^pdC=uIJ-zAipY)rIEPm3@^I(4H_vxSbb>Z*)`s)wBzW*b?PX7bHe*EUwRp%ec zclqz+`%C&wz8C+*g#R1bp8kK*_IKVa_AlO^yJ>so;HhGorFr^Ah?h4<`9rHqH!u06 z)uo%m{GrzyRpr>9bZ?$5>xX8)Y5N~~{iCX!@RNS$Z}LO4JLkdt(C?$Fa`I0)Hs2&L z6;R8}Jm;H376hvJUYi1%{kis1bf{A64>eBBGxMBg=6SbY$G83ZQqq6_p`0;#X)xON zTK+#I-Qyp&S^8TU%Zc8{=YhoyUqJg$N7^^1YI}f)w@qY@ppTD_bdM0e&7AQ@TZ^iW#y^UFK$*|vgeyF|A+m# z+ai;>N$2;@pFi~aOnKd3jJEc?%Z=&$Ek>o3yY2obx_X&e>@O01`eEy3@8?|26zv5J zR+x--=Na?&v@d@PAK%xk-?3qP6ZB5}y)NO*{SU98*V6f`Yz6;yJy)yWyt?*nXU+2; z_uD%E@bg8eQbj1d|7{-rRR(G5ZukEO96J54asQTBXa3Xr?(2koUyr@_&O>_Wm5UYl zMPc&I-xzRhp4oo-msomn{x#N`XLg>diYm;X`f}&Ze|vvCcUe;>YQNif``5Yusej?G z^PZM7wbxeltDMANV*O^#3-u34es4dVXRE*aBY#`R|GPSD6aS&E-1XyRl&ZV?;Wirz zRy|Wb8wTG#BxfO9{Qb_~{l|Y_f5LCsjK0?wlhH8YL>l)_w%u9-({P-zW6sg|LwQl{cPLwxbv~=Rlm$!|KFtF z{TPboX=B&V`};o6Y;$w%`nTO|y!zWt_kA6@)t~)yt8-`W{sRZy<gnsf6nlKsn|C)Kx({$kv}6#DgtW&g(CTsikA zK6QS@yMN-5ACmp=bb9ieW&d*M8S|TEzd3Xo|3kRn9J<*5X4!vY&)+QjH}?65aR0_a zKP3C#>P%k!X4$`_JotxY|8nTmZ^He{p%;J7Zy{#QOS z{2!M6K1M10&9Z;^_y`2#;U4%Uy<(rJFA^8*OR6s z*7l29y8p-XTSg%$DT*mp=4tG2eaD5mf2-0;Cs6NKSMX@2{#^6+*`h>i;wXyw#j<}} zl=6#fKPc+Uvy0aB9b{MiH6Q=}$as$U_r5IW95VH#)_3bS!7kX`Jy!Y6wXak`Rebd; z*1q5K{QQvZy;M*XO>>MC!uDTuyMNQ{-9b_R8* zf1uX2;_uY@Kcwqa7t$&9kJLKy|0{oLy`i$3GH1DeN16-w{>1llQ9p$Ad8dD=X;!)^ zp^Y0{63D9Cr^Q|FhivTng3ypP|vef@rqjOvTBSD_i7p4793%lc+{zJY?r{+>W+pDEh+kdUMmf%0>jrJ$q zrhcc({O@$@`n|l&YSypxMmzsX|Gm7n8^4#oDEwFYrE%P!bnEwVLDv2&9kt!!S9;1D zzt(TL)UR}z*4SUmH{5@&6@Sv9ztY2B=Tij1oihJ-q&@O~nzXmWKU9Q$KL~8m|0B}g z`#Wjh@4X%VChao@{u^muSzpp#JCmAwMc1OqE@*hP`;4}+kaj7K+S2gt3v=KIa1^<6H-6Ao)V9P1SVHS*j{Pm3l{Yfh1=9ljzU0!a#q$Styq^;*K7yu`!^R z5IH#>^a&pHCMF6}LT>P}LF}aYvhY5krb=UU*!ZJ>pDp4-wBR?MGa6G#&K?wyJ#95> zKp?_rq$G$6mDaKC^zJ>BR+o0x}@N%*oi28vW1mbZaYPPh0M^ zQdikI%4=#WauYvVv+ zevhGF_;s!IRDPK3l7jT}-KTt>(R@2Jn30*bFn)Lg>ZpGyg0M=1CH+WO?CrtTvnguh&$m(Id&)1t)U>(3JBLZuslpBdd^-eA*=Xhhfd5CV+MjKo| z`tVo*UE@#^z$#*g=VP1G{`6XxefbH+PU#<&U3!az@B6>~vG89i!8UZ?IXjVl+9FrDSW+G;tUp* zzjocbCZ~6~iE~z8SD%H8)63;F8r3jxlhw_!TO1tTAQ8LU^h@?Tm(dWvjJCuICmW)u z851=LrSc*iyZIr{z1Iz{x=G+{9R~>trW6EP8a@hM&=Q`1DlKp2qIF2_NmcN!sKzb? z8Cid?b^H>N6))r#>YT#h{nun;9{0H%7K<#lc^|xhVAwPD1{Q1*n2&o@Qq{B(BMt2r zy2lWPJh>)L>w}v5n%(dZcL4cgEDBYoE zrqBo8Cp}a6ggz`^UIc((Sc}CORj{ zh;n+8awUbPuGhW*&XGcY!;zMCnPkbM8goxH{*UfEXD(xt$k`H#y_mNe2d7jK#$6FAs{UPvD z>2Nw-Xc|u)g20{oTtKIe5VF_+eX=(F`x>Lxam?wZ5^sSvKgZe;FPe%Ns#Sl3E`!#M z5a)E*cMj(si^Jt%3a99T1;wkJ31r{q@J%ZIz#@jKr0llP+XwS(M1I!SGclds!4}KD z+LJ*%qAI-8PovWStmU)}yX82GnZ=V9>9Hr09XHEUku2C?UE0?00heXVq*S~E;2>tJ zm^2M<#x(cMplXuAX%|li98rJ9TT1q)jikZ)0?U(<$jvR+xO(oeI3);T1w-4;l4~ZN z>ZxcQ1Ti`-yOmar;8vFF;CxQ3OHy_ERb5s-PNE%CAt1kB8SAFh%zMgyb!#0RYj8U5 zlo6ObC$> z&N3%e3Up(65B>6XQ%xuyc%f#V>w|ZFq;xOkyb7LGpIN7Wy&LpeTfK-FhZ9l-@E4Gl zMFn54*VcL$aq8s993W_Z#4dtgsVJ7$P-vU4!HLsSVBLu#p?O6@aO0vqp3wX97l0#Y z!UXdoB@kn0c#N500f2uv!TFTSi9?)egfD-GO%6xjg==I(~+`G|q~ z?9|HsL)%wKCcyk`V%TGaF_HiyM%>RQ)uwPSr@)di+O1%84pvTA4L^D8ma9V3dCjV(g7wgUH*)WLb;z z;;E@M20`)!($f*jmfAj2SW00miRmE}vm}nGmACp10?4ba_t>u1J~v1oDC{Uy-lJZh z{a!&KR70UwL*ZPDo>KXyTk_mRPQ%az&pNTJ2M-sl%F_>>qB*s5KUBeSDwNVaO;-q{ zNw%{e=d&vXKkk3}mJmfuH|z10e4gg)5{}K(eQ6kT^-fA!kM{>Y)#B@#y{AFu8VzO? zojz#39wokRB{hBsI3mcOFl*ED5g@NNBnn|kXl6-e`i+PACnSQjzI?=7;y2*eAL29gFd+SI#!bG23C@mHr4qi5dlYw;WcC_wE!tWPEc^ z&!UVK+z-=xzTc26+s|Dg~u|EQ@vrPf>xJ5~HGRm~L9Sp<3 zjO9VS#TUYBfLE4bUu`Pq5uA-@`sPv}SfgUWdT)PbA!|i)RaWQ|^`D<+1jPi@)s6aq z>1E=e+J#H@fOSy$sIw90f>}T1daNdgg?JAoYrOWxPs1v!qhYH1NM6m38WIeC@TL9G zixL4I?o~?`ta=wH4)a2y!41ncR`C3>lW|a!=26Z3F;ZOgBxD26-?6VRD@eR6>1h=; zLPLMaYh9siQfQKR6YH5Ytvru-y2H~Ub1%(yh{yYt$>?YZBAsIwx90#@!P9(iCqkN4YslZZ7TKe#ZE5X=d< zb{9mB4`4t7b7hv%!wR3AZ)Db#TxJ?2PmX^#y6>oJ>rjPLxx9Q-e8l~n5Tu}u(!`?6 z5t!iR-1BK;PA^FNC5{1@qI!M|WtDuK9mA8%qXub^cB5u8l*hEqVp7*!N6uRa-O7J; zoGhAIrhG}bh~jL&e6HQZ58Ltps|7qwbVW$Q{BYWMnmV<>pX6D*)(|I~2{0izB?JJq z9j}DlMtM^@jBi|mo(>Oyd{_16UG9S*4P~YptSvX=05xgI};}npE_R z>6YW+xYosK&%sR%C((*3$yT-%dV+u7!Q)hqWv(E2D{i~%NQ_SNl#g_`3M+bdW8nx5 z-j3iU1zV8PCifK!a@X$34-@4?+ysgJ1yK2*5@UEm03o0YWaaL2(rG@d&n8N&H=F>} zLx7)rpO`5EkaR)G>&Bdw@`;p>S8pkTEe}&bM8#FyLw-=wepNw29`ph~m#lwF9Yhx2 z0Gk~yYM)tJ(z8RRz=nG)@`B*5d5zIVeF#f>Vb?HfC~)-(cs<{jNwLFb=&!=Ax+cE| zZD7XKW?rhp1Vv^p(%fxo=AeYPQ2n2%s6rS%<+c-bBCIWL)12K6W-YzPCbsX zs)!_}!�K1uW8a#qR4YH7A>3=-E%{~TaK!~QGv`i#u`I2iRtIv*quxn+UAsPKc9 zYawdbl_pQ!=#>VaVP4oa2;Mg!KLWzKg8f#eDxfnDkX2ox;OHInhwOhWT)oKha=;uX z^w7Ex4KcujBYZN$+um0u&LL&eo6Ui3mGC z2&V!l^+R+M6K5h*irM!^02uL5?sr&>>)J%pk_*?1xYAMS{sE9LU=7+bCILEd1D}5O zfzy5}(3-#lj7V0E10H`M1H)caHvn^ds`rtj98tuGYfbdrBUYB}u;v)ST)}(NoNgpB zd3BR(iJ#ucJ9Zi03t~JV%mhX01$2^M4=6SJnIc8?6Mye76B9}s$w}X05q59=Q--hn z*zn*ym$ixq$OWZr5<^d&JkE8N*>@u#&Yxmte}po_jt`qEXhwfhDGE5|Ik3W~7Dj@w zuM|yQJD=t&nsx;#T5XD|XQ@3*3!fi;)kg+Ea8U@7n9w043g8|HY?MfJ|)HYKtmu6$Ul#iT9ty|UUD|~cqA`fIRek&dhpH+k)7C80^C|DgB$Jf zMl<|GTZCp$Js^Jxls9?7wnj-zkS^(7@1oZ6EA!^le`cQeFq2#ppDp-0x5$*oC{KqE zs!tRzB2KyP;iecuzpV5bzCh_&WrH%9;GI7orG;+t@@jJbt&?TsJVT)be8c z?*T$4tFfR*U;<2Mc08X5GQyNmU=#V$14^mWPK+1Fo_e`jK%z=|NHu&O0s!D6BGWNU2QjN4;lpuKUm1~)-jour zfo*2JeOP}HA2_+d{8P!F{T2av-@J1bjH4*=m4H+~=kQpIi`Nn2Wkyhy!?CfCT@!dy z^vT9Xf7o*2SuMTQoU$~2nU4dj2h$VWDjChn377m9@n)B8yvBXlt$F&e8Ik=(R~C)0 z%3{T8Y_U@Mi26&`M>599+`GHjCU1eFlHB-1h=sd^Uo}rfDeQ7*8bm2$AJm zHh11k3@tvxnnSqo3*_9b!}Uirs5L$h@Fb3a5#9jK8R-13YUE60TXT<~W!SLdt)efnINc4NVu{N!iB}%|~Ws%%3f%*FB4W zmMp$(YcfChoYMswlG!`NY%(}E*v}a^9tO=?taro;f>h?(OMRW!f*#R;xG%7emU13m zrSnj9V6k8GVF`c$b(lsNM7&qyQpG1**Ems5!1Q(~Czb|5Jb23Fj`g)XJWqvJBTRoz zegmtm4@oa3$0NayaGS5=X(;-rG=r&P@qOxIfdTru-JjaLLE-2#@&?1irI?l?9t0oH z3VTkZ^Ujtgf5b`-rH^J#_&t-^r{T!<3muZnk$lXnEFT8aF2iJg^=m|+Ap^_RKzH7kocgT@+t}Ir3p5zEHPn_ zX+~sfD;hDf+u#~B5iG7x$#lScp8zo79xyJb7PC{X;rHtSTfye_$L9b~IUwL(6U<$A z%Ss;CdXvgGC4d={I^2~}^0xO0M-=?=WLS;%qTtDppP>0P;1KYEGgUcAdD4F)Df(kh z8NV+i{HkrpBBxFQyN>QWYR{wDb_7Yb>simh`OU*q2>IP0HuK>1F>s8LFYk3OV+$ z+_lP(aU+vPoRMwg(tL^$bK`i6A%<$QLv1^Lsg#JaYeO~9AzfyfW)4E4tW*!)WoQ(&{ zth-WGQR2MkCS_^5wGclt#BPK=@8lBg#WZE7p%7V3L!aptF5<;=JR^T0TZ}9O$aK~o z)~76}a`-%R0n(pMP4L_bc&Kh=#Do+4v>_tjo+J}#m#mUq(RwiJZbW1Fi9ELN`oWPt zr>qMfuDYs_#NJ-8M;*ZvA~hMSl3=ttnkG7q%7?HFfk@0f79Fc8z}w%kF#VtjDFrqXFqZB>D%eZz9=weuXMHdv zC#+;9nTiZP5d-0HA8mnjxx(zBoB9xRCbT{3f6uPp?<4)XmuCKt*>!#U z8@p}?f=~Wo*Rk)LllA?uo#tef#!G+L_0;;W*mdLcU$W~(^Pk!Edt_+JXM3-@X8$+4 zu08+Gu5U5}Bfr^oB>5Y=F6R^v{9)Jc!7(WM`%>=b6EA;M0{>>$wa@oW&o8^~1fK74 z$ZvN2_y=}<-3j#%yPkcm_Tm@2?&g#Hn_Yi>k3_lmhiek$oy6jAcHIyE4ZGg@yZ=O< z)djD+66>9#`de{G*wNbXs%rE4#Dl1_0WZUjHv705i0GXwZa>^Qb=p811yHjZ!V6@E zsNJ^VlM{c;$&c*8dvQ{hiR9U^AVgdlSFUDD4iol+|q<&?v@gN*P2v)bSYFpe8rn@Z`p4Ytdu!pu=e59!U{DwaYB)W#b}VbN(H#xc zcIkh)4!raL2HqSf=1|IYm$+QSf*wQ`>eQZ>OV9u%J;CV|CJKIMu#zuLgEFbuFfs9X zw9%t9#R%Ucf*ZO+#)V9BO@TAIM7oga0l|`rKGnlka9~sz$Hluf;KHs_E>Ue5hmWNL zZ7{dhVC2|$cD!kpVzO_lq*Wg?MW={PjRu6TM--@xF4Q|4(ugpxn}SzTUit}hi3G&!sZ z1GBmaZ$huC9ZC!PV$VKIC~u=|4AS@Kk@r;tMW)~=1awhdCKY?B@cc@@Ab0UNoG)?R zJ9Q||5*IsOqzBd2i+R!g`3{o~`#i}G^*W1Bh6Of|yo+Ajg+F2gSUb*>G@O4J6@L+Q z_a$C>tAFB6B-hbIw-~3+Ch(ocK7eNyU$h6G^o}KXYWQ8*vw`MH6W}h(3}V1OxdFp> zR}OCaJYzACPLYjfJ3wkE#*MwYzrA-kNGJ0iomN&E(H|ehIn=}b3yAj+bx=ndp7g5# z$_cYZec$cnh{!%9P#{xkDrJ8pW^1cXzOg{$mD90+sW3IWqxE=er*nMre&^M(yG9T- zy~)g}46@8U36lUyhx<)CK>5HCO+f9s16OO*i`$B2SdHxw2|;3gwq}ufH}ffdkPH;C zPR3lcdAOdUmD-4<&=lD+Dt2}~(2%8IQ8-b(-_EwZr+8a%@Z42M+Qolqv)p0=Q;=nG zq&V46GGZxQe!NHG_1zXX;FDzw%pedMW6-`r_eKq$Cu$+GgNr;0gbyPtsYJmr4 z&^mcfdx_8C!;cY1U#g5~u+%_gMP(anbSV0fwDj$^6Wn21pXF8x*YCjLMb*;C%!l6K zqs*(wd`1{=USt+-&zJf*_A5nwd&_+gK97x*iCzsh%=zSK$E{}DC{v&XOM+X&r9H|x z#I>X^qcbB=Bd&kg3XO#*QfH%dD~$819p|7f_h5U-9OZIOaZ*KjEeJ;@q=ykT_QRKR z;aHushz~UB5VmoH9VfT2e%QmO7woLl>#*)3V|tW(UMuDCShfBPAKN5=pGVMo*;I%W zuPZoRn0uAZQJ*7jp;A2jY|FVCFH~g95;fQ^?{;!Bk!^q96K^wVUHfFDMoRUu79`yR zv8FP(2``(7iW(N`$!invD)ugc_tK8cA8%jNtjI#(4$7SVT1;=$CLE+^jR(bJm=VzI zY5jAJP06Zd5;B4}Og)-Ot$&_qs}Htr%g&=R0F2A_Ccr+DAF727+(V2dqRtq*kG%@7 zfAYOy4kv$YBTG|%-shu)Y|93pt|A)9s1WaRSKiRPt^002d?K$(zP-nt0kK4+I$Ehb zxVNZeDV zTTE)gWnwISvI7g7ah_AbUVpK}^P?3E6#`AqCJcX=8sokv8wl?@Ws}*%D8DGS)HsA$;LOlfTOG2@m0PT~OEm7nzi>hg0T%TsNY?zs)HG2AR_VZ#G|4K*%x1U2z)ACy@-zk7 zvNC_uOzIWIFmdXGNn{WDSn)LlbzMY1g(jJY3sgkLZ9dHx>H^D4M4-zc4#h{+GUeSh z_{kpaXLH6!o&iy+dUpvF#o?RW z%mB#_7FX)e#Nz`IHk{VtwC8x2$P6O=PK)7b>E#iBz!&9bSuPx2KvImt1Soy@anR6^ zo$q!Cl8$7lp_-*Q#hOT9b{lNX$Y{=Uv~QB}3dtNz@hh^ryyTd_K!+Ksc=5$ce?F> z@@Wh71spJf3aGDK+Cr{#?PTW8A&hb%rK6JL^YsL=PT}&8&5YIjQE?C@p79GF9%u%k zgn#i~Y}c`mb6hiqN(f@l4_hzGg8MH*UCX9_<0|qXO1%zsx}D-E=>`qRve_ho0V|}Z z1?$5}W^*=2k_07ThpK{8NCA1is^13ep~Y#_xXt2*a{{i)5iX3V7F@b z;9!>Mi4;s7SNdYa)PuSAZPZ+i^B1+-jMbp2?v?N1AKeV;Atx5zF9I0U|1wKEXls^}MZX!Xh=l3va%Jd?|qynJH?+vlEI7Td$(9*(;z~@*G5JPvRjYAKzT~$XY_CZhE;0 zO};_Q>}-ZDHsuPF9FEs~?)}V3=;emVtFycnNfcISj;;*aGT;UI8v8ESUroK)cB0a; z5myKNGJp{8j`u4RE?1pH(m+Fh_JIO32n9rcL6XM@4(Mf7 zVP7)%0@DjZyw`7boVCK+^ZQqJJd-HF|2OP7a6b%RQA|Xs33XB&K)s*zw2L-XLX5d9 zDm7ayMd>+(YwWXdMmnB_taxwqDv9cz0yKt+pCVgm0ump45i-&&^HjHgG4)d^_kS1! z%yX!i`)>yUqs?buWKa5MZ~rz3*v)jC*b%TCMLF75?3%@ftasKdU(rFdMBOYf&poFo zN1o}oC3J{&wGwqWJ9t$f0(SgKr#a@(fuyz~rtyrbt)WYa)tX!nmkDRG*gI8^G`V7t zOz1nVqcDrJm?t|#QpiYu6yD|uUQmfvq}J6@N{>sn_ILJO2UJ6n!b!(^aNmvzF+Ixm z70RGwDWnVT7wavp(Qr|_Hc`9o@{5$w#hVkYS_49sZX!XZr~=Yh95w!-~5n_V+qQA-S1VX2)e*yt#*?l)^5-miy$bz#95f>R63y9_wC z(WAnnnA4D$M(#DhIuBO&v}NCwE`$!WlRKZ5gQhhdkFAz#BtS1*lBDdY2CM zC{Cei+UAfzPYDpH;9GX{UO};2ZT@w_FA%(GaF77O+X??n;F9+$@Fa(61&QhL5YYBT zN^z@a{^Tr)R|Hspd5{;|j!TF;nK5LHdHi zz_VHg=Jm<9WW1g=zW^&V_8+BZkdFGaSAi#t;!F@628&$sKPR&1S!jg*WsjN~9QG77@L5 zP4C<4yq=m-2drsBv!Y#bpnCxRG~RFvi5xjnAh|L7V&?{F3Y8hX#UXs6Px)!4HQqs> z5v@BLcZmIe?vZB_;;#haC?|__-b37Rn%R%=B2@9?j4oygD90IU-IfoFUJCSl6v6iD z?M|Q^+6k_579J&H^7bb1p9+jSfw;sGUADdi>wfk!6PqfDs^4D72%=@DG$lYxNJ7+} zsmxe0Rt;6I2tUqe0z3i*OqGV^Y3UJg(;!#x;*(o{>8jB}*)1KOYiPC5Q}_}EX*2{u zP?a{>gU8dgmXch%ls7PKqnLtyr61(%5?h@PO9T$fc&l@rv|S7!&GfRGexR395K~CtoFh1 z*eoG`C3@Onr|&L(s9wE~6|60j3w^_Z!6g9URw-Jd;Aift1Z7I}8EqTp32*|_UC z#3yvOcOueBYJ5Mm6a2hN0y0+(p}W49!n} zH(EIRBhayCYd@LSbQvBXKF?QgrRvU2F&u7Q31w~T%zL(=b8REyP{SK;YV-&f66X8JYe29 z8%=VN87a9%ZP)%7jtCsDY^XG_>9!+j`!h4-{h3@&D)iZg&y6BKyM@1oz1!y*|7X4IzeY7_D z{9L@+dn^nk7n8gIzAv(A>p^iL9#sD3DpW&~5VrbbQ=4?6C>$L1So#8;T1xXVry zB^5=L@iC8uo}qa&c3DR<9bu&lci=cDFDe|0yuvj;c#2as)AaS?v>vg#QwT`?GYN%i zHz52{)@oN{q3^GE#9aOPw$ZbH0r1&n9C}moM&A6;>mz+{n(DGu#`c;f!uf{r+G@55 z8-MKXCn@!(nT9q@#FT#07p%sS3}II!tJ+CQZGeW5q4-GUYT)^uc}zlD$|Qy4dcY0p5s(3eC%BlsY|AR?KwhZ2+?Y~w}N8j#r{L> z8mk}jB;T)aVy0@BFQJ|piM6nlb7-KN7lE~Dklqv4qnKIZZKDSwV4o;4(!(pMIm}d}CG_+neDOQuiA*p4>R*2EfwJHGqj!G|ap+)}E!*ZU zqA?B%t~-;BngEm zyvV_?*#MMU|A4p^h!n#jt_yu>2|B&=XPJ_Dk`7|w1{nvadTHQ(RoX)jiu_AX;MZsx z$Y8EP3;9p8rgs0~FB^wlPdxF21VRtifB`)|IMCaFnw7oY{s*bI73t4m8-ov1Y)2Ry z`H`Ddl!k2!@MB(N;N`pop87~wYCgNkS%_c)Lm5X^DK<$nd;w$&YZwf7$HY1A`D)zV3XB!mzG{C~Iq`VeIU zfBy<@_r+Cz>YuUTLZ&K|cfE4%BN{>2M-La*3ZOO7PMHmG6h?tKQ82%T1hY!vBXDJP z>)2&;vuR^yB_GNI2Zo@00z9io2>iQ3BULRVA@KKffe*;-nv}&?Q*eZo)`tnnZxY;^ zZYw1ar)wG43IxU`3LO_5YR9Qjt``o1e~PN>GVrZt*}>lRRcatW*~3#hH=)K9OsE1` z#{~d12p-DsRB7!}ycwyj^bB$gxq>KwH1ZT6XkeX(jEc%O3-GwB26gqAzODc}m0N$F zE;~EQoHDXw;2#(TKJqBvY~es{Z$@hSt9^g7Yy*KZu+A(kK@ zZk8Ad#H5bJgKPIFb7S$X$lGq6P{RgK z%(!y60+9`fu1<)xC;~_Er)gOC88FuX9JB3P~!AXY=qIkf4&c_2rJV_ zJa>$E9*-kuWRuxGmyNI>j|l>|l}5aYOwljj9b5D3vrDh2!gob|FU>eiXCbK3lbhAt zE{@%5>&V!zbSTwEIaHM>DskCe)lvCynP;+lbqRs~ZT! zKjcr~DpB2o^$aNWKnfcH+UUgC7~lO(RHzheRFXkeaftgXaLya$#U+}u$0fAJOcbiwWN)r~kB zD4nvX5tNB4*WqQdB1m}xOV@c?LOc&#gk3<9&83+JNVvK&`ldDNv}m629_#@&WJnEU zcI7s}=naUvZFv#yu@tlO+@71x07u!lBKxM4S`3DMc;xn28P8+;&H}Z)%3zft1sV!> zGy>Qn2asrb#Zq8^e>%_tQoB}qkG(|!b!;IL&O;^Bp&v+~q=6b_1}enXs`}OY1|X!) z8RXKqA1p&@K#W<)EC8Jdj(t_~sqX;wqB646=~utVc%aYu;G4hFAT^X<&Lx*&tW@kS zE;bNqudY%#Q(b=UiXz?Em6IF3)O*A^YHdbb>?ksEJVYYde~?jOL2;WJfT(5E*b#Vo z)ELrne)CX>qo@G64tJL)zz9HbilS@mt3@F@J}nJM9ty#V3=9k4(>`fl!I=~GTt4Tp z02UsPsV=h~jgiA$2{WKZY<5}a{};-iswx=!HTA>3qQaSg{)-wK`%u=7KMdPQ6OQYY z!4E5YHv55Ie@t$DBtz)z6fE|xPanxOP1H{8^$z7uFDiP`Yz!q*0KF>B*L-zz!vONCyo3|LXqHWHv5xB7pgRa? z6?*SXKr8Om4om^NCZSRQu|bRgUG?Hbhzt@lpig#b2Z|vb_Lz(qe40|IfDT zvHw;H*kc*~Rl8wNkzwDmP0zfi#>s~|Q0)b>Fr%5L|~R!75_ci`#L zlPiA_!=eoPVH`Yj`#-kDMpcOKnt}K-w5cMKe^X+Wd~f!SA}fF2mNeNwGKpX)r!Tu$ zy)eC+QOQ4@?$>?}8L+*EfJC(|&!GZRO+{h?u%T!ey>eZ<|;rqTDqNe{xu?FA&cQNe(HH?5+r@@Ocm-dy-f+DMzSvP< zcB!DXQLeDqpE!djiagzq7=SZbQ^VCE?}4|t0u(6)baIVU%c70Y2^c%QaCAWP#T+}S zMjTbe*FWSVVj(R72*0j_hyR5Gk5~@ep^s$N4}Z5=|F>o1u_Mbc!~PlPqqt!{3J>#9>>9)u0m_%&RDd+= zC&3WQ93XEhfIwp7WObN-3y^QLY<^E-&(ic0uhyo+^@KvWO$NDPp6M4tfA|Rcah>_x zR-CW-C}{tlu5feOb1!{$C;MpP)kAo9ptF1$F3YEe0dV^$M4$c=wvjSigSJkn`ss%Y zc4>2?dl^fvLap9Xr9^$Kdw@k-q0Ro{;Wc|w>wuz2p;w)&SLIwuXIw2zB9Pfi>#Jac zhjPdHQ~NWcMWPvqcERGut)u|mP{!q`Y7 z|4O`$eLrhc@;5&}gTXK73`WL}^Q6CA&!jv-(1m)1Dw(0`@L?r4?WDZk{Cc^_R( z#p%rsy=B18jS*BT^D?^yr-Lppj-s=JI7<8ZF1l@e`ntSSpCF_P=HsTe=@o4s0444%gX|`XK8mi$ZH*Fb61OXXAgl-cXAw z_IEJO#}yz*E3-v~p+r-&OsB)Rjv(I%K&<*~{%)L40wFF&R(L%ASzmR5;%iM`%I?`d zgK`T_reK&ls2Xm03lXc=B$t5Em=9ahX#^3~d_=Iu!pAo`c;mD@V9NwH~pOYz-`4!}kYh`yRJj+qc8Kb-6h_q+hc z+7rV7g0Kx{*au|n7*zVU&Qs-9OpPr`6h7jWf*f0 z^Jx*A-zYzAf0WHH32u*B{hmQ!e9eiD+;E>lvmCC&6A&6>J%=nUo98!x>f_J6-e(!H zGQAX+vgqCgZOYngbje7a$?Kkbot$BRY$X`erbm<-H!-@!vo}MzZsffzNWNUbk`Og$ zJ-N81&@dDo)G*N=Y${s$TBaql{Q~QgJjD38&7w67f0APhdawj4z+$&u-M{Kz(MY6f z&LlZmF`wDDWf8IY<0;XBpY9}ErW9uzZ#E|cDV~D+y|6x3zEc|_l5^aqJIRu{H!r!0 z`_ASI*W&v8vc61mfMk>1;$ReT&08h`@^Idz|A#PlLpEnMp6}d|DDN3zyavV|6)0nK z`|>NOgL?ZcDS-gvy-`qQn1AjLc0GFvHe4l_@J3V|L4b7xT6RvvaL4pVnC%r`(w#QG z0UUWc@pa1UAh{(vsCOEvv-o_4kn%b58wDh% z=8xkqmz`IC>X*&!{nWQlWwoFP7C2J<;oeAL2I<8OsccphxfSN!kSJPbusTyrfRNTP ze*}`fi7u&JwVAkuXaJ0U%9=h_B{iqulCp6?f_xnL)47muHLc!Eg-xABDwK3Kw$R!? zFZ;d=%i@CD{Qo7DEP_3d4Pe&bNfiTxB79Ko4Gu`wvB80BBeyK*{8UnE>UGe%sxP`- zz`LZ0Bzr?a)GKm4!UeCtasH6mh@RGwf4q|{kKXm~yRR&Ns&Bx?&y~KP0w5`izm5?u zAvUkHq-yk4i0`|-%7TT!AB|pq{0@K|W0?o12pH_~Vui(}R|x1D;4*p`0!AGRj5(LZ?j1mYZKq2Z40AZw$jfPRj9smNY7EBSt z834rX|97e57=!7*Ngdl@Y=`nE&img}$0Z7kol%Mj)OS!`kwx|_l=!d(3$Wic zfB2nIDjoaXNEn+FatHF~cN|x7f2*FGa+8J=Ij(xI8ZOFL&YO1yVjm_?;V#6NgL)wI zY3U`#GcEpIs<;85VJJ)-7itm!5-bT&PO9C2ZJ3-&>cSVb)Dzj3vK7ZABe{wfW*t4O z(#|az7uf*D%R$B9gI{@8Xb*f@`fPyF7#*q@2M9V!wzi|N`zE+vQ^Y$Yf7PJeU?al3 z)tcB<8el51&ZtvbvCoXNYk-fBHpAacB}EoBk|;Cwj4WPvN)bU^P{PtO(hpG-vOIh< zyL%%6*!&Q+xM5^&17jr_sAP?*wX1xm^&7(IU8*vIZo6wrGBUb+GO)S^<8k6skW2|< zmiOg}kVb_A6^t3PXajMie};=~63E{Kahe3w8hGIk%gaY(;Adrx4m_i+47@9==aB}x z;Y0)bAFlQ`5XJ6D49`;dB#gTGP<0NTF?QBW8vyO$x3WeRNL4WT%Mkse3l_TfTuqOH;fn~X)^1$AX{4V3uzF1R`$P_Y~ z4AhM=mh&H&O87jC3>evSp8n1NgPC>R+~AP&dB!CSq8DFA$o z6B+G|PTdalDUcXdEBkv`rxAWO5HxK2jA&H&fgr|=AXXS*f4v77^Y@`0GCKCtetgCr ze{r*h%^1=3$oxhwF^n4{@qB|fDtiwCn~}9PLR;i-O&hq|z-7#4Xrrf}plA5COzTv( zJp3$=7@37?H%@F6AjV}Ts@y>I7`SwHj6_zeP(}$OJP^eIB}04YVmz=`12v3K_mEtd1`48kTna%%-F=b;lMp*+@X#kio z(hWpo`&9!98VqDV_R|I$6)vIL0FfP2t}z@#21)HK)!1AK1|j2h@pY)zXy8v1?T^%M z!s=_Jgc+fRMv9nGWFs}aWz_DtoUv(mT;6pp9I4@qe^I;Raz>?PBSl3nWtp3Igqc3jw`3K_o{3~&s6<@?L_yddkx0Exho9P$lJW$Ry zie&jk%dQ#8TzO#ZMv9nmY9lpVF=lq8gc)1sj1+OuwQ!_{&$f-!aXG8sbfkzG<=hR+ zZ?=t$e{ngh-+EYXGb;B(j@un&{AZfVP#I|Kk=8B-MqL&MTGx>~YIiScv;Ip#tpC!7 zhUo)~7W9Rrq0u z0Hauy*?ztLO4g#^7+OpLelXVbc`8e>GGri?7296(=idZBtZW0)3O{~GW5b4Ipyd(t ze-qn528f8ZbdX7+T@snYc~h?0x!L`YyRMen|I5TFqe5e-BjajC$n{0X#Cz#%bJF8U?DK+9ioKfnf0i$I&gZ6X6H34RDq+;wzae>Q(i zi?ylwr?kG@(RKH-Sb2uy<;v>jzbem*!rFN?%0PswpE?|mzpwp1)2Jf5@>e?GfAWmQki)-8IF~76*3OV3hoy zxaVM?VWThJosF{@;U3x#Ftf@T4a)ywIomcs5n2L)eGl2b4j50&5B;VeIIN%6*f{!Y z`}Q8L?SU1yXvoGj=tKE3E>`_ezQgAae>;92Dc|w) z7v(#U!s&AJ3Cw~g87RA!&m z@pI610Dt~ovd^EtkLeBec{`>bKac6h&tv-W^GGFs=wUKa$s;{{8>!?EQ9M$~pLESw z{*fMT#`1sY;j#Q5qF5Zue?O*sj|B0rL@@YbhzIsJ3%Ue!qIMzA`aDfl|)3Gx?34E{_JBOsR}Rs0&} z51T&_#q}^B=-a3sf0<$U%@1!;&{`u^EXqJO68n=@t@llkja)G)*+Gl2QB?6SZ1KPn z?-5YSy9n}$+JL}iV?y6{)GbJXkN!R+sm$n4wjPYlVN!CK^8NabfJ)@@=eq$EB8xxY z9pDp1`}vN6Ns#mNJp!1ZU)G=gVL2~viOT5r3>yhfzvXSx}<~tc7 zT(eiAI`+JR1HhaSx?cpW^Y0V2*8kOZG2nmQ?i!3V@E09x00`N7Q%0>a3K_%sKhnVj z1_KpK;gm&RN!fn+g^cyTY0{BEbqFJUhd3yU|ALV1{)QO-2O`-3HvWtJsOd<>j@0RU zEaynNIv6ZJH-s$DDB2XmDa-esGE%nwCSxtYZaoYXtT_CR5x)Pu{XOhQpOs_G*X2|H zA*=r#9_iY2s5i>~munUgwL!l_xqhqf6p#7B82X>z@j$t<{c`=#-IRT|88xgAWO_H$ z<3J6w{^*m>U)!H7v2tqw(f=9R_&bV{!Vx;YFZp-3VHXj(8Gjym+elIrkjb*I#`}VW zEJP3j6m8mTEfZUcaH?7Q)Kpy{ zq97KdwYPk8Zy>2rELz0SSO_~gxUB*hN!%OJK;nHs&12x>00KiEdPgiPKlRD#-{9S< zkv}0!6Vy7t^M5&CVNR(de~~RLB%y~5-en;US*Xe46BhD^(2>V|a#SFuszO%Bqi<+K zy%g%z^ob5ALNL7EF=a#rg0L{>8zQX{NOgc6?Nrnd5ahZrJ6R!!RJ&o+1k|?a8jwWO z39GxOrS`%^@|HlkY!e-@7qxi{Qmp8~%G}`PF^&8 z3TBEkUw_lq&8OFe9UiMy5h?N}xL35@DQE_hs(!f(Z>D0$Oj}C_ugCzHoHRo82xPL3 z9c1*812EFf;%cxLuo;6%BWQ11vzy!n$UqDKi=;JF_&umz^41 z*!)e|^~HvmCCBmd5`*gx1CoBikFxgTMBkxfhksC}*>`AmqsM1gNd@=)cAUOmDrm|j z)%E#R#)EbD6!q$ask&PSqHyKX7|>INu9-Z?1j=%>li3T0)Hnc&D;HA;aCZKwk2HtD z*pKeH)&ktnFCK^UN0IU0A-%uYk}5ibGULa7gyl060IoEd>JlKS0||xP0VB{x4tQQ= z1Ak;b1T6gI)R}ng)d7yN1Cb?@%vd!kk{O}GX2N@H>nzMCChn1qLfSz*iz0X;`W!C@4gK+n2_-GqM(d5&gn#K@jTbZ%re?82=|Ad8B^K=4m zKCE-~lqO?)eMF_M3m=V#kibA#J{H8V5#asBy1Fi)kt)=_O9hexVpdn-#FIm$bPo25fU!f8&*u z9N%$RIILE@HIv2v;>n-yf>0o{P?0cBP`ou{XukkhN70MIleQwiigA5dbe-s1}0uk_j zL8#HJ5{fu`RZ}z?8M(N_i{C?k9Lf!4!;SF?$GQo=5q%iWln!l3|6Nu*nD)a@Uc6EF zI#l{gG&<>rY^*MlC=8S|WcfyRR`ufk{10Wx&=;3EbJd3~G=(mfwIALxsA7#CKJ?@- z^6{by_0M|r_E}HA=)kL;zw8X#F;T?KzbQnv|L~W{m(z?(5n|sr-|hT21ln&Va+v3`HEG@u z?0Tp!zUDWtc^v;_ENnlV=7->-`HR;IN|_JUi!Zu}Rr%W&e`t}Ux2t_+X5H+o5B*E3 z68iV=snPBAKJ@2({%WJE|H_EKJHKc>uD-_qMX>1xN7I}WFp=!cjU2QeR*$~w$nO7+s-uH$!NLEj6S=t5V^4Nlb6at+s=&fEB|5#Y1f8> zXm(`RzV@rHZoIq1Ukda?U0NTe()sY08|_3_f8dmWiGCPMh7H(y7%tkani={fUt=QV z|5|pg{>yNY_Uk<^g7A7h=r8_Ly0+iBxVwKsxcxUof8PH2?GL#d?VB6T5y$mg=U4v? zH2TU{gg~e_zBX8y4>zv9#uRkz*V3+;a@os>wl7ser|ni@zwK&YdoSCYWfBEa!B$OYvH55NYuU3E9#!b`cw@T9Q z>-$Upe1B!-zwc*s6M1FxIhOy2Jf`?1@Qy$F;Leq?aP=!;qIY%T1=Fh)%tJy|tMyXm4)ybS$3L#~7g@+|%=O*z zo6DTbS^Toawjf0FW?!&RI&XH*IAd(Lf1l%~`Z+d3zc@!IF8{yg^W+c5YWFe}U+ijg zv5OyWFumx@Z#|-mnY?)8{+}|A-+GX=_#ukj_2M9_FB7T!vp&NsAEMLlzC@?qMWXYY zfBsv2gm#&^?Jrie{u~blrT$G4HWqjF8|aTR?8aW--R$2PKrN+#zj#UYOMuqFe|KM{ zG#v=#Ds$U)uKD3Uf9G9o>xS1c>!+-!T%4!h{}+~C|JAp;e?88R-&|_*`JUx&f3J6| z|3!I<>~&?d|L3gpUwzQN2;={NmbBgNKjSLvfA@Y@{VonQ5N9UmLP1V_p^Id0kN@D| zI8CF8inEu|r50Piqe}9mA{Ke$a zq8zR zuHP#Bbq>drpS1r;SDGd>zh19D>F?{!w|se}+i&?#y8D*@r2B9APx|>~IDfI0UxxD+ zYuSDo&M$xY`ZAoq%6+&07i+nFxBtspF5m6{#b186|5v%&m$&?bf1k|QAL8=UclIxd z`9n&|-!b|nF+Z$icKi^RpYqU$XZ&6M*O>m2n4j`_{pBtH`0hY|6PF)`viu<~KeXh- z{oyb#T9P4t6PKT|^AB-uHgg@bV!K{9Rtz`#xI&)%b?&Iy*R?86_Pctg zZP5%Tq4^0nn8ZpUj{DTzD(Tb`lYB}>|l9aPu8zBxt7-K z-@M@VKjshV|D(^{Y0m!hbFp9Sz5b*npH43h zQIh}Q2pRD=d-vh>F2)l3@}AuA-|&R`Kj$m|9lyEJf6G6sPf>!Y`wD`)=o#3*71J zO2{vUFTPkt$-nCTz8-2!>>rMh9xiV2%M6y+dgsMM{@?BI&wk`ToZpaGKDUm;Cqr<1=2^ z^2-10-}4{%OyR?V_8;DH`}UvD`~L8l;&!dq{MtX~r+;4SNuTBR%Tr#>$v)}(c*Sh`#-}z7KebupSA{lCLz{+_?G z0Pgqv+&}+|e$5A7Na!`>^lM(xDrxYVgu0hZd4-QdXfCF0ondIUqivsIN?xL`xo?|S zfB$pFQr~_UN&TepznDq>cfMWzW+t&;`KaTy|Kg6l{}VnE`(NfGnQLD5`^dbiZZyqk zum6aT^#0%@U$ot!{+Ex$1f4}nnqg_7wrQz62EBB))Tqs~<>DzAGS^9&oiWqK$^x*< zSPw|nO7aqoW8}jIypd04K1LWg2@D(vf5B(ilwTF)?`>@}%^rEZ4PL)LYDa>3(C5C&4_eFN-t?&;NCX68^$}ZH=h$jSs{Dy(ooN+kj6Aq^3@hwnWJ#dbp8k?1 zuzjGA_iF|p%w8gIPgkXko8u7^ zW+~rNYjBXvqdg6B03Mu46x{Slf0jftsH$7Qy5(+xXY42WEXI^AZGS#RS&M)w38;XOlj;&z~#vK*mLY8GHWmG_3*93XI&f8CCv?uHXB z>Y&Sm8hXQM2I$}L8-9l}DoJ!v8WRq?S*79m4K4Qbn6mX96+FKc`W6TBtgM*KqYsiZ zAHEI%2EU&-3mO}SVVj5RKU9FGr6qzpFRTosVpB>e?Iq+5S4{(xW4d4wD=uim^YD{g zCB~Q8gnY#5UXZWz%nWfFf3}>VFYUq&2rkC&g+94N=a028RcSHO7qNJ`9kR+4!|h^h zS}pV|dN0ap+{_y%L1^*z`<$R^3jAkL&_1FE1=neB$*QZQ&S2(yAP06K1t7bkyY@i1$~1yRA{wk`6gNG{YKkM(feM z@@8KkZ_dQQ5r2Oue`*CD`q@8*0D0Pv8i}GxuhN)ZN|{yUu}9dX%l#3IBTQG%E}W5P zZQ1)W@Ld8u=4_yZ$6bHDmdzfQNLz^_s>jTmh4?k3Eu6_sytz7G9So0}HrlB3h`7}r z;4NYYA;nrSV`+-4j;}3AP=}th^^F(@+|OKadiC3Rzh)y1f5*OOMxSaZ`t-O&( z_0rN&T}Grl=^+_76MBtgk84(hEo_F{(;T+K(3MioYwKd8$AO>H9RhtgG5dg1ZWw$6 zNCWHm0rl5hhLTaV@ddd{rTTK^F-IqXOcXynkf*)x8m^?a5HT8(?Jh*@iT9}0Q}7Pd zd+>GXN7tROe@JKbXHpX??C?i0xvu!`ZSje+`M4{tR6m*<-0d&jCi-}OI&v`UiQ+Gs zr#VR_AQ2)Hvu-MCzreB}qFyw$sxE1khguh88#G3CpN+wo4PMW7ThnxZ-*-IZi~SUI zmNaQmzxa7F=Ky7B|3mfIl@9jA~J)2k!RJ1YD0(g+SntUyBZzqjo3*hBpUN1-UWena=*pj ze-N9=tD^>q=oePXq?^_%_k}0k#lW=3(#EAyLDBQ*$^ z7yD=qD1N2>_I?b7(ovn@pQp0m#ik`Ml2ivdtajuWCyf`+JfBj{?+M#50jTtGUg{Mm zgr;0fY86{gM}|;J8-*SbGa5mIz#d|Ee^ji|H=G&YeG<7_g$UM^_;wDyvVoK0Uwvm1 z3Hv}}e}!syIunAk7qm{Gl({uN-qUXigo%n;ZeIE}PfJ(lWB>(h9N3!?O%SnMV ztgMA8HISOrheNkVr6i+Z?c3Iuk4Ru4dXF~!>2 zq!T9yp#!Nk;*Z-Fk!T-9VDlD%p!HD2^YH-n%rc8$<2iw+Ud$BFn{}OXjH5kCm&s@; z({D6LSGvNg()NId2UOCJceRJt{lKWKq`lBXLV(np4NSv??raeb-YMNEfBV3aT+=%J z_U!5=j7G~d<*@RMy&6c*k7Q3#dNOxUGSRKYaY&J?z_)lpJZxV#2g;PPC`z|! zNgX{LCRJMDv_UjP3#ZQef3bE|bn;5yN2hYoTioh9=g5q+OJZqH4gKbs#(0Dlc$QY+ zH49yEVP7Bzwz{4z=}2W-evhIey*$p+z2W%WN}Y0KdJtxt>@jhBXGRyD!?roA6~5^f z#{)Nb&6~tcn87?ihRn%3SGL*MfF~7T^LA2hCS!Ap2p+A>1Q(3`St_RaY?b1k z-Fhq2N0I4A=0&OZ@@d=mw11Q`lfw{&u4V}V=G$7|pzr?XhbiF@>P@6IHV z+8ixerjllY&+WvQ53LbEab+D_zA+?o*0gZfkD6 zl5DN_&;8aOw2kzP@!BAsE}pf>GpQ#a47^#=*`yL1vo~Y#_#mSZg3U*Qz16c=vA{6j z5c}@Z{++{fSZ@y3dy^Q2VYYI5XXJNOyOY~4B<-zNhzfY=x;1F*q)kRS&~OTQXS?I> zX?iwyTa7Vw7NYEMaDV8=_aO|9bCk7bRxTmIlpiZ-rv3{W#JL=W#x z@2NfpOtmxu?SyQexzn4I9(X_Wf>a^x9uI$3c2a?kBJXlENv~`u@Mxiq#lR4FwR=1nWX9cq$cSI7O1Jk4JEd9IA>G_rD~(iQovyE zAjH|05$k)`a?Q)9 z*5$ckX$zthn#~F{Q^(eRJ){VD2YZo|ppwd%mwz&tU7siozuf}_;}bRRN;W_4M;+|? zq{anq0OKh+>q%ZOvw%%#Vq`^$VJ*pd8~9o29Jf0y{ zygVapNK`61PP>Ngj#3eue`77cz1s$I#I``)wVJYCVB>YOq$fT$aC)dh57~;3Sb;_z zvBDZW+`1+1{Q$=l{jR4m}>du8z`hu}Ws3hO4? zEbfnZALX$*?2Yn71t$hbW7!^6Wu&)VDd6z2rQNzxlqky?-Zo9BhbBmggSHc5>VKpY z)gYuBi?^2ugk<%Ea$S7jwN+F}T}d@z9%m zeWET;Y!F6nD|FtPX`QFY>q5RrkI~_p)q!)um^sMCGUS1Yo+U(nKHL`;eSqN|Uch_s z?(2nkYYo6|RFUX_vkXhj0z{d$C_Bg*jK_%0UTq=s zkPvAP2ss3#dApHBo9WR&21Q}wSW%(PY86i;-y2gPU1~46NgF*15OHACB;<)%4-&n- zPX_W(2JE}&-aXJfGof?tCMHN*k|iig1ezS{&BlM@kQzE7{D;FuuVR|bA_^!phBE7t+44X6PHYVsh8 z+hJw5j5^HH+h6xH*ik(OHd7Re$GeiVQ_p2KfMZl+|jK$}t_>wiubQ8CnMy=&s^ zt&J%+QQc%6qcI_+{dU=-6etX~bu1pMo6+!*o2|N;GQ%brcN$Z$?Ftkr9XxZa z(xHKST9qRq@iTJ0(tnRBf%N6Q4J=A5_7&9*$!;CZDnbMfh199BFwjJ~e?HYllIAJ% z6^{wk2qXBX=YLvh#*_KoYaK;80C_W14kpfz~44q6DNnQ<_4nt!?D44V<;xl6Iv0GH4!juPt( zQK&BBwl?v$E^xnQ1;6$LzwUzJ{d|G1seEa4Z!@Zxp!=P=ja`SN7qFgYwTH%NKRkBj z%~X?A7ZKXz{96bgR>K>GVdJgcFAdP?v?tAa#`xij!L2cdPjSv(c@rmY^oXVAHb(NY zy0P&rbblG3Qg`DsHx|f;bZ_GmcnXn@18senXo_!f%z#c_u)|bF|Ih!NLe8__^E#Ig z{GUR-?&i#K+ArIFBeir&# z1-qbrC}fTLmlX1W{Xrp1da0&A6f(ztUi{SWRDaB#7pfl$S!?JY)tDKgOz6=5heBSq zKPlvG!>b<(S+MLsDdc-G#Xl5s*nJ|$UR4oacA>Y}4~5J)^FN`Gsh|vP@qBtgnzzS8 zkCDw3?=F~lOM;{6N@tm+L|%>GO~z`P4)S!7z&QTYfBs>yx8mdtnvZ^dKPwElP1m*d zH-Ab}Ko@8X-#>UuX7{q~%8_M*lsOo8O)Uff3x;gq7(6pJV{ZK-@dVkCI8Urz)X!s= zxfmWPqdnoiT=9;X4ZTm~ox18FTAKH4Mg?>l#QT2G1}UKukCqg)NW8gl#m%jNj=LG} z#>O}Dc|NTbcmx)*0fRuc@AoY~6?J{-PJf7mzSF8%^b&3FNZg&a*SR^;BaGGEpW@j? zyy%5>r0sEfNoHh38kvi+B_~bQg4{I3Rd# z_9$P=WPlo)@VrK4!9Jfn$VUyT*RfKE{v>2-WgubQbhPAU)i{m_&CK!>RAzcFWq%=h zM_97Fj-29cUUrdCbPu~~lO3|eIlwAb#O52ApWaAeR0bB3oGwzWzJTKx<~BNB13zkQ zU8769T=(8bEFvdkl;z76lo9o$i0FM*G;3rl9D#l{Jo~gE`+ftTouP(6=Wg~$LwB@` zQSx;de&YZG#m4lhsQHPayk z^d$%sMSINr2|kBPJ2jWe^c;w6x3$CKj=ALs<=l3Uq0;f0YMF79g8J4BD=H_t1*bWJ zGNgt9lUEYIRvL3e${}jo&#kFM9(p*SG(>Rv4Hbb&)WVae_5R=)c)m9T{C{3dMmLT7 zuuyjfLzXEI@whPpL))-?NLvl{?+LcFUe-WKgg@gNMA0>!&68vLJX^5gRctoDuX2Bz z@VOJi;daA-LVEe5#XZD3f)_MWJA#YnkFj$!a3;#WC&i`5<93aF+E$&yPfD8i%3QaB z*1jIx!pO;!31RYaJi2F7^M9g^CVZ&ieGG1mFd_pTEiqOW`|e}5%iY&p*8pZlZAqjYV(w6s*$MAUXT*L6aCf4 z?CC_@yh4QwzPGbx8h6=Ou8x{6HTtDG)e1fy8 zXK0Fl3EW$8gPc2$bR4efvm@>*bidJrMzmcqbvA4BbZ&mz>jFYk#H z4E|`yn0XOB)H-*$h=2I89x!PNS7@yR#!kU!fg+hpuWxHN`4(h~gTznF01^;bbtXoA#@VoMpovZ>upiW>(k=n9@#< z4ERaHkuoIu9`~nNW~ZDSj1_4_%~!z;RAMU|oJw2gYL4s;kbj8g9wbn8AMyDqZ}*!w zJ`kDSPm~8K>zRebV>BwF4T>IA!%~cNN}F78V!&;!WR5Q_Mm^Y`gG1LuLG6a#G36ms z@M-qxz}()ZQ?dt~Swo`5KIyBaG2)n~a=I@?>-(r*Y?JtjXGKN{P@v@c@@yicN7Dv} zF->L6?448Pm4AE&xz%-qn{qZouOLSPq%B&DE7udVYr&xaIPJ?3uXp%jD)llUu>9(s zuuBD!kV8_y1a%{Biug>TosrX&0QoD>D?R9G77{!f+w^la9y!Jd50&W)WDcT7TOHo= zLMTJ+@3D(Oq8vX~2}xshf(uXATwygG+9#WiG&rLiH-DCf(v#5cxkx2m!21!-Z~?{| zR73-i5F*bc0`jcGDC$(kpV4Iwk)D76UTyvaWxG*9!wkKaL<&3*{+_<8ZgsN9MknC9-^mAm0+TJ%tJ@RR9-RP!1`Vu8;y1wVdEX0vF$S)r(_DtzM%`9X7$9gdkW6kW!I70w;R>HZQ?6qzK^gDGzqz40!)@77^P zLP~0)P#?qk^a3Bm#2I{b*gj$#s|(#?(0?*pA|c>l28%bAp$HHfTZK&GWpRj1ARuzwaG z2aE}vM9=M(ytN`|;!ueJp$uMe3K^9ch!#kLC&UJcH@hRit54rmLEUF+kihkJR1ZQD zeZo2Ulz2pe$+JCdC! z*MBc>C&u!aTea*^W(TaEg*2(i(~laSwq;Vx(I|UP-Jms9du9f#iYLgONPi!d#ZgX= z*F33b|9T+_5tP1#7Xt%L5DWI?$ftxujGj80D$Y{f6}!kAj~Zhi@i`OQ2DyTW&226m zXi!F{ISzp=!6WWZ1Xd5$ti0B^-*GoEHJZbE4Z;H_y~WV`#KPLsViuUNty*x>LmYQ$7}vTSPjCd%L8n z&S5ptf2i-SC=0jPh+(k@fRELE3_-Em4_7#m@@DOo+gLVv*`aXH|av6Rbw znzGE3g_xfmihL}sVT5ult9in2?7Rg}KMvnKQSVcpNbhKkhXu0P#s`ZzJj`a}F?W`L zi36&WtVHW*S#{1lz!hcmF~Pa=xG|3f_Mvyys`J8Q4AQwgT3AUg7QVo8`x&6KZKqB% zsg7z;Faz;N-_;r}C4VSjj`SOpU#+aTPU_A%m+5G>K3tGaSDS+Mzo>hT=~*!@qmpJ- zqt)bhh1sl43+lk=+uz|D=eproxzV%D-D$IBqEag8{Khi0QqY6VqMPp1htCK&wiWMR_i0|g_qqK(zV6%YM%lD=fiwT?4^SjhD)W_DqF>g^&keN?O*-GQs9i!h z7QU@vyN!#&Kz|)Po}#l+ik==Wy)x8XlZii#iQi=&NV$~8Yx*f?cpm0)bz}6mFe$dX zuIQU>d_NgYKc58TzT%@rLc)}zNIJYrani5r2;WcuZ6vZJ)vcx+4b&|wc<=zD8%EbJ zosMWVte8dqwSToRQcil)CwenHVUZt9nedX`=s?6VZGS8(G+jbEXWj(SAZ}H`b5OT? zmoz3Ygs8Lc+k&uiG`+=JTVLyfP1UbXBr?3s?b`;xGlBb3=z8FautcFyqiW!uy~f_Ai5`e0OCyvws!3g;jYaeI_CAh>P1E%=Eo+#Oea zMWs4moqw2dK?-A=MQ_*z<6t0&g54FkAh}WiNIpU74kn;`rvMUN;T?UgHy^z-E~JPg zL#oB+(X_{Hec4y2hu0h|Ie|c%DJ3sFzkKY!)Z>oiZh@# zpoG<*sp&L(%ELIWts)}njf#NvXLox$dRSg}(M8DKFXD>jgBn045;h)}DA7EF95=w_ z34i4p+vo_~1lC$`{SxQQZY8A!rrWIf_w()TLxY4B5n2Yw791J@@nW5QI@1WgwBgHm zOQIQ|21E1D-g5$@C0-_K=t1&!!v~(I7w{;LEnBe28=|%8});NLQ>k$&Ja)WdOr zEt4aRZnTzEoT(F}l)sD-L=sVXgA%vU8|e$YJRrX_B@&X_0!V@Nk&&&=2@ zB=(pL>J9W1tS7;%)rf+q+le0y5^Zmt()ekM4R)@pPKNrZ2#9O;Uf98P={7`3mSTj5 zfe{RZkyfZ^?4zI08P??@Pz;-|lRyHzMSY~X-)j6JsqMRQRUsm(UTo21cz?A~oh?Ao zjM8U1e#C}~=hMM1CJKjk+8gRUgArd;d;1(-o|^&&m=-z%1%A^A_jGgSmmV|^MZ;sX zQ?M3#>`o`5cw@?NzdI4*5`g zwiP%B=qqY0T$yfnCDETvomr9&J)M^{0ja{)g)%uAakupCZb7$Kp?^{me9OqEqnmwL zlVME__w9a5Ra9OdVeE^KS;I;*=_uE-+x+Av+;5yk<7Pa^v6q;R5@~6H44ac(vXOeE zMHe_58V81mF*jR6DtS4Lr)c=9o*w0ky+Vf(f;qw7f;iF2)h(Hsm~4w-)QsmXwNpdW z&YAV{XOowQ4){xmf`8d2s*!}P9uvG70-I<%gLh6fT4k=fw-*!pz(TP_1;;eZEV92; z9qH|v(EB#Ojn0Q?NT_rj!dcYwcOjUWugUtrSS_+KC5_O*FlzY;bTzov)=+=w{k9Q}Z zVVVKyW(f=x8-EOk_FRE@eu?hs-|Y$4Tq_4vv+p@Us6qf7s z`XdBx0e5=98+Z_|Jm+jF4~01^Yw7kUT~3<;=OS5%cYkL=@6eetug()ZjZ8_*qc*AY z1zR2heV-V5<14={1yLwDG0|op!5*YeRPr`b5*N_;8QM|VNnTs*L zb~=l?y_2W$5L7NJRQ)q#1UY1bv0Hbiv|K}EbEZGG_rk77=PF6`hWj)QqUBZbq@%^u ztI+|}>wj$$5{>MsfSm12U604&>D-^Xo#xI9!_8qj`O|$u_`D@`yqwRpt-{A3$oQ?H zQN(nC2Jc+#$-Rh*ISadlmqo;_4)vr1WN-p3X{w-X90O?O@uqd_-Z)F$#b`Y@Ck2!^ z0NS?=$n^~a20Jex*?PYOX1tRL-o7UGeNvzax_?jC0k~J5BzK`DC^9EZ%Od9&Nzv{- zp?v{EdTT>xVUz2smJg%31Gc!2P{uKve%O90=5=l~@|N(zpj8f`Y&u7unVTFLr*9^A zKvA#{Bwf>xDLH&u43p-RkX>w4YnmcW2W%8Cy69m1U2Xd85&`5}@*KatD%z0vCzGp3xn%y)J0{I)Al4=loyb@Cv2bmcc9KdEe0ij05rgp;zb? zz@R3C3N_%TnVRavvf3ez?wy-fds(YBK!XV!7(E^M8yz%Q)ne3BWcpAClOWpQ_n`z? z&S>x6ZCdkoqu^6A2;>RM_u`I9iG%U*vwyPcGxF9T8vG=&_B}Z6MnE*@^76=T%!Pu( zHeGmhX}X&_Xx;_pX&5*AT#6>VITozpT6sgdm>NiOdFii|=*Df(ZlQQSZ}$`H@1YVG zRr18>;s$klWh;j%d7$!wrU(225%!TEhj@Y>B7hCv5Tj6pEWx5`kWm0#xI_qnrvkoH&R)bs-kd?u zElV+H8qVAmKUnenc@)un5>o$`(#*w1pv|Rm8^rgBQhJ)<2f0ER5Kz7#qw_oQN_8ER z43E>3Y>i!do)oZ)jQ6HGhe`^{M1PPKRV_3cRO?BBTV$+M54{@W-qRv@tlo^~6SSupG-_dd>tTQZ4-3Ngn4#2uP8SU6PbRR%57r5# zE54F*h-lc;iRm>~d2f_?_RVJoEx&jHzCTLC9U!Ej8}jBTxBUE`Q-uTPlR4 z){V@}MxGt>d5#B}nNM|bR+LReff4*aLM41=ysys>{|J@P5|Mo^ z^U<|Fr+-2vgc1{9ag8q-kf!NG{LXk^Gu*#}`Th=-kONCe({fw>6@My$mrljxX_`&1 zR$*SxzlBPup8xYu2@9~_SgK4zn)y`i3YGBTy=jKGKVcHC5Czr6)axwo&>ml9ELBD} zU-F^?;s%}SF~7+UZ%5`T3TK9h7;wxYQ}UcvN()Uw+`37tROt*;MjrkqC2*K1q(CEZ zn1CIKuDwtJJI1P?dVgH)q(l1oWw>dEW|%T-m|CwtY%-T2{4P5@{UP=%ek%>yj-GUC z0iOF|;04;2eLku3N_O%W!;7cgGW0HCc$h%wyU^nq zqO=MKD&W=sPtZ|SYt6alxBhSMT?aMLLadxqy7V4xg9C@RvNkB&fo z$B#2QAF+YRVuJw5{FWxm1DPb>F+PP0H2{#KIDUmZBLJ9&ruFJq6Q@7uhThQ2Pnen2 z#G#4Ahs)B~^nZ6rX&_E`Nt-e&jhchTtwH0e^L;nmbB(kdT8Agpb4&4ho;`II5x5TzX)2#ut%S`y4c@%-$XNs5n`(f*>UA zkH@4QTuHf#%C}q>^5FNme;9~J&W#l0arvbVB$yj`S(1KX5YnGI`NJJJf8_BGLvce6 zXw;HyCm4*ijvyY?78Rsz6D~rysS3zb2yz9&+<#Q!oFOy>561WydS_h(oC|Y9GUoG% z9}(n(G*I9E%kYdw3{sUW+n>~&4Dy6RAl1ZTotF+y1bg?U?vo}5mq5pg2yiSSvg|xo z9@ii}yRZF&Y7|2%oIdd48=uFHfKw#9Q&r%aJo1S6=+Q}~(u-5xD@E5`#+s~4A@HG< z>3<`Z0FWszS0UUmf;TShmMkIu$(MIr8&izLDV1U2leQuZ3{bIZEtU||-No9U$+RWL#b(+G}nK+cg86^RmK$b(z}qkpxA z01E>q>A^e?kN|Wpj{ z!jpwMfUniW2oxaw`N9LaXi~P{y5tjjXBq4Gl(aoreWzQoP+V|s%t>kmh-sN09@oFD~n*5u(RIP{86qJ+`boKDTb>x}R@8B32&7iF~86&<}6FK(66HEDkRIj1T;V z`TPBP1{vp|;MY*0hKy%3x~`Y;OQGV0Dk~qVWLjtv9^{6!(0?#Lb%WG{Nr7f=a~A&s>P0cmS0;wRq(*jE>RW}KKFqbC?K1b=b0>RQs@po)q{ z!5v-mGe1Lw`&h<75vDF8?ZLsHI{)FV1XGBi22`Yt3IYYnCT-MUfjoe7nd+R20x*OM zVMdUl*q%$>szdq076gU_aIS(=%hR?GEy4JdH>3_JR%Mdk@>1UT{YgU#&=h;i*!}6> zZl;GDkbj62c|y97BYy>s;t-Vp0qpa59!US{$lAq`XCOd1)yXlb`gAa|Y&Cm^3v!a@ zW6m8gJOfqPzi|&qxu*@a5DqtGc@7l1ilmS^VQG-|eltBEo9~4~nF(1VI>-mL0Z}oS z0(mrvv_p(}2MmFLk;pF;l3`pl8tS=$0G1T$o{)70f&5x?pnpzLg%_&phlE-Bo)6LW zsb61W$TKUS1}Vc^Li(*!!s?Qz3UClvxLj7?%IT)j`Q-bjK2cV+QpZ{WI7R|L(ECwU zLF73Hw?r$3NP?=R>&mR$aEmu(z32fC0b~ zps1LC=av9?^8FZ!J~Z1h*IDQt5&Hh;yIDy zL-W(0{%byyYrmy${q(Qja)$+0fwt^(q01EBX?1~cSpYBTFqih-+C>z%;Ai_-^Ph5E zjwj^f@)*4L!|?r4j%55@j-{5r=g+3+zCeK-o;lh!P9eF50u}kbb>Y?&B!~w%#t<+H zgn#u7q*AW3CxiQ(w@&|Y-Y)vd|8V5rxvPBVli&V*Nk{bQ-_sOKzP!*|c`E%~J^S~0 zJ{f;N?DLo>Wsi8^@C+Ht1o7I!@h89PSi-fkgyTn!rBvb-RW!-`AG`8rOnN8#{JmPm zwF+`W;6bi{(cpRCwzBbk@83Ul?bY$eHGdV5j0kw)ChuJ%?ZC5pO^38n2h@#{6H}ifsFPnV*37Y45wMFp~D7e8vrAIgFS27o`mRF1H%8Y8n8@g8Y)7K>e~==A?LNLf%prM}PeW zaJ(>c0vr)lr*eQ}S@73pysZ*|TU7uRcO!F%+~5Pih~^aD372)-y;tjOA5!7KbGbeX zm?PJH#%c!qYb5i(762h#f98KFxt<8c!Vd_+hzdjg#UC>7&0$m&GNyu@@%?Pi`Y(F} z90QABG@AB&=Yl~>QT;tNHrgIBgWyi#7UhkQBKXlvX{5$4SJ9E+k0%2iy- zl@AIw8~_midQmfY#(avGs(_R-b5e6e?Qv;HvJmoz+A~>}&l@1ochpAOXX`^fqrHTAv z1O_v8kmHkiE6Hc${nHlGU}*z?T+gU;N}He+A$W>s z1{yqw9l~77?;^+qW{-dZZOEjcF4O`u70eaM;vb>}Xp)A6Tn{fKS2{wEtO^K#V7`A{ zt1hhHpXdDTI~wV4jsvh_2WxJW6p{{6UXJBaFdJXmtc)V z{B7%>bK}6CwN(zvblM1^_&vJbeu$4s-cFezlObWJ%OtPh@@nDj$xP3=Tj4 zex5u-ITXp#tj)+zz<*4^9*ZS7b8t)M+Ew&fGu8b z-Ty?dF?BXv;D#aBU;hogCP+YdfGGzG_Etd1d1pi>v>9LlLVu_Ef|~Of%rRM%m&BLj zjGEyFeV=qA^Sv_r(rXmJG@p@qU&BEGs}%WsGNKA>2B?f?4_re?Mv#%$Gd6nw8U8jR z3UmXsM~%>}{EZYrH$XTp_t}LDxy`e!!96#3ApX18{2lTDoJQjYlu=M~*gzieuN>!~ zXW;mvT9E40A%B;7YL%1M8*mh;!{--#()u-AfY{1$Ryx!wmrwcMQ*Z`ZPMYN9x-D6mlWLcvq64YdUkPb{|5@L>RoHAu9_TNM4xwhp%es(Zkw7Jc)Gyj;_t*pnm z1cUteZ>hLH{^3Mc#W9xRyu1c6Ars;g@ey+n zZ;UD?WY`IS@I;okH_@Mb7I5c{xxy1lY2d}H%u5R?H{?tJd?i(Mz+=Ls6b3{z zNy86=cALc<@n3?8a1esVf&j1cg|ZvqQNZ(0-1?+Qcp=;f^$_+-`(e&ZabeHMG3Vgu zUmr2~ti*J&qC>n;Zip~A{AhjnFD4A?{1*`yihuRMFPz-;vp+>AJQyM1KA#)O=SV(( z=myOfi=4dw<;1jwkQ0sz0cYO8DJDyie<=|GnS_sWLmdE@C7&g{-6Tl#bCf@X+{N8U z3<9wF5OcW*B=s1EPC=Gi*VcdVWI;{Hv5@2?VUwUc=7hL|d{7YH4ALeJbs!Vsgm7>R z)qiyQ5M=Pn0}jCG70I38M&$_yN8-Xdh$Z&R=q-P(5lB#sSs-d9;aUnoQQExh3HgJ* zB+AhVC5WYs=!QXFg|2v1bO`))NhpfbLiPRP?g(+04gi>@!`>K z(g4dEK_qHJLKP274 z*Z@q3nh@kp+*Ywmm_m?$$_$(afsh^`mskkG;2*d*_n-GU@DAAzSoZTPk8UHF{VMhy z^ktf?rTr7eXrv7QOp&BRnvEiK7n;f@lo+VP@hFK%KLM;S$0vl&;X}lUc|HKH!hhG> zh>)o*^^oUB#DD4!a>w5s*vu2=sroQ%q7d`EM#vK6n!GY^MpRBZVWyzupwM+kA@r*Y zq3Y&I_k%+B*WWN_C`dTke8NS-6hKu7f^tHIDhxm@a1R`9eOoxag^R-+q24C65OM&D zOMNh|zF7WHa7gtvtt@$sa4v{%3x9?1bMEigr2djJ{V;SN#*4fko(Q3LA?Gg>Qtq#H z$M_$ZDYV?3CuNIS0PLR34aHCWh5i6Ycpdnm4qSy+a_0mr(xwD=X+yjs@mmSzkgUv( zM#$q@Tk=(0cq*~@cM44;WWpKwB2_rG1)SE)Mi-meq2+#4M;A&k*!YAAx z28=&c9HI6;6kMb0Ncx16{C`b9RGd3c{D-2u>pD_D;mkmYqJ@OyxqrubYN@je;XeUiV%LzTUhG)H@+zTr}H3v-hV%x=O-M5Ka)X6 zSV$%sIQ~Lmn&p_=gw6YB%5emU*0|Noev zB6Jj)bHtT%BXprP^0g^_h}=&2q(db2S0eMpzo*cabX0`5BRr(W0+ICBWM1)i=_d2N z_%qL0m^&~BIbrU;gzCSfhxA1@|C{DhmhsTP{bs!kj?^35{eL5l4m8qt{~M+C7zE}w z(bj($J=On|)c~L$@=R{?2FWx2&#;B5JCQN#CvJQtB%XWvH`;mr)~)bAxAWiRpt1C` z_WZBu#`7-@k5HR=EG2(WqyDOc5RiWVzu?*6U*r8P`Nsd#{>V77kb-_PinJN~#RG>y zp4(#m2TNuQgMZNWe~Akp?qB2LVGxGCEJ1F6+6&2>|M7fCUDJCol0M$~mi?#w#ouy2 z7Xuh8T<(6TKSI6{W-Zr$5ozT3gdJ_sLeu`*$NyOycG!PtD+@i$YvsZwI>Lud41_pb z=8y<)&}n&1c<}k}+fTgI&Eqn@*u23=Gs+bdR}LXOXMfU_sG(Jb4?fZG2doXleY6rnYmGkHA@gHRJb&RqJ< z+OL*=x^1i2D}SZWE9ZrM91uu=lnl}+<)_P{@-}>hlu;5-xz-}aSOAChA$Df zux#mvWc{*o3(fZa#l$V~37>|3#Xq~>_};Jhn-3B9C*_v@FmVgdcHw`RIKs2R-7iv( z@N9DX!^C~!&Oc1t5|{f$$}Mr}4-xlAo5{)_CT=0_s9#Ln!n1)NQf}eda`%UcTPQdF z7k?AC#5I0Mxg~DFe~7r>`N)gEm^gBdO8mpbE%{Q+KcpPV6JGhn#4T~_A0}>zTb4hh z+!9yUbuuE6>&|k7#dC&05u6~e6mpFhTzQQJ%8~H0B4qxUJ3aupS7j7#s3QpC-EjcF z#=P-|iUa&#sJID$_Djjt%N?+PBjk3=eSdA_PAXU!xDo&GaFT-tH(w~ZC&)o5mfyI} zQDRZ^;pAF{JVF5xuf%KTH!BwqHtse7z}%leriv3l)d}$cxo;R^2_WYmn3|frCz?dSgF@)`mdgZ6hgPkvGH{2eY3!f5&(jsPD+;di(QkOGb0;nKf_o8T71^MCKx z77(zo$)T_iM*!}=lv4AdmOl44{OW#@UVIMD z`G@MV2xTP@V(tH|yg%i*oX7i1xIMqbShc)fs5OI}*RYUn-`9Hn*}n}wG~1a_T@p?5 zaiPDGg}%%GLbCn8uP1+(u7TuZu@HxUjsKej6Bt6)4di?K^HQFKTqE^g`G4lmQhu9X zMBArceMvuPEtDPp5N-clKS+B48VO&@S2q4a(D{VclVVaIw%_`*?DvP5J1zCX_>_;l z{H@9R=cRn`hj=5e7s}25(2uAF-jePm|F2&X@?Yt@zwGx@?*B+0e%k-P5O{x0AED$B zA@_<8#YX-r2SXPi0SIy- z24+ouZ;62BL-PDRz$xSbQUPuuKO%$tPD03$OQa`r@z8~ zk$E)wMdo4u_x|y({eO$X@9e@yqH&k#S7rc!yF(5gsKKEf(mA66t8QLN5cj@o^bCb<9~M7i$c)Xm%FMwat5gK zsL$2vH9MnHTbj8RnwJm!Le&gQxL1-fmFovA^K!Uk*Lb*r?6IpFEj(_LN&@j-3~rJL z#na{N?B1fBOzhfY*`rX#t3rQrhr+O!3)ncDYN!wIBJp}^dkN-eR-E5Y%?##Aoz7-U zzms(*C0!TfL4T+Wk^Vk%RgrGGLy=d{Qk&y!Cr@U@JPL+-4W}l1-4dFWlQr|y zxyw2)Z0xId<1rOO^rq}?Vx&?FP;=%MC>^1YIk$I#*VHh&jke<^#`9pyZEfnLucwJ$jkG`1|f zwy{!`j|=daOQu)b)!O5-e4ii9r?J)3BuS*p!;Cam*a*1+vT(40x-E^;m%AXgQqL&f zL*nhNGQ3`k{JFar{uZENKt~Q<8PGFA#_7jNzDeABYP3zt9;b67tj^cQR~vI`sMW~r zy?;$}@Ho3+YzoR-W25)?6Sq?>>cNGtjFL+Fa9)|!Eq)18HE*|>6}%g&kDZO?7}k;6 zTvj0%bXC$Xhq>Q_qX>NN^&a$j&uhl1n=tm|u~n5mttE7Z2x>S7Mo@qBUd z>=%KIwUPCbNb%WrZPxrX`N@zT_K)pZY5i|QpI8O&7+-+!^b z?i6*%#xg!d+8J}v8aBSf?;6hOYP(SJy7R=cEfd!}YC(9392(Dwf8FnnM;|;iDazpy>exX|zYcbq^Dd-xU>x!Nh^8u6Te0Y}lHVbTH(p*b zUZWPNnm6rVa4_~lWqZ6KZ3elX27lSK^11Dba67P`BohLX1rNMt{9O+KM?kp0ykvX3 z7mw>UdrVI6hoFPC#yMuq#@9j23rpg}BZ$xML|DmkuDyn>4-{rIR{CyR8L&4VS}vbG zEDxubj+IxZn;S{-He_(O+oZE?Z4XqFc&4CM{j|2KFfu4W&)tUEW zt&A-nwW^PbOQ$+cr$W|si&Cp!xyhw%eE=Oju*&I%I^eCDGlhWUJ>D6)HFKf^r zS9d4d9pK7oicx+o56W$+M*T0EY7T5UsJRgB@@SNRMnT%;=#^Zl+=k0aFyK<+x%5gsArF(T-b#8)sC(~XL%d+CMXID=h^OnBD z{>CdWz7_oeaHF{f$F$Dzb?tH#hC!RS8tZ50kvFAv*CbUo-Sy3% zSDJlLG!t%gd7t9;Bj&aXS=y|5Ed@pjqp)XD8{D-jd_mgJvUiK`*4ju>yBVMg`+D#lJGdf~&u;}vaqY{NL+cy%?IdTRkK?lxLIjGljJ;KR{@Sfk3UORsZUc8sj^A0BZ}s^w zDe`X9B5~6qhfN_rZb}Cp#8TkeM}AZ}-HdiX+}}7>Om`dGH^jccC(q4y$4I@5QP!|5 zbFtc_;`o1bJFT65twn3`ib7jO%|OdL0N$*2YBV(L!M^@RLLj~i7u_n3da`kjJM?ny^cQN?U_;*N<=y`lKD zi#B!QCpkmI`Go4Z)7%v7=u7 zqx4dbZlcspotwMRy*=10TB$RSJ1a)eaN}6HxycIqcP!OoVHRE@UPU*4*=T<~XT@EG zTG)TxX{|gSXXuv}?Z;0j9<+q5=GeU^A!}ZTnL!~VO$BO{WFJkcBSh<1pE*Z7>gs*6 z5}^^=Y!XDD$9Uh=Y=_Hjfo^Syt)8wy(?rol|NY|Tikv%Nd&y3n1T{Q%3( zh~lAm+v|HZg*hYaFFxRq$kHzVat_MM=fhFulne8C*yo$(jju9votENjZjSNsx<&o< zd2EE4`iN@Fb#BAtS4WdwpVUFIbnz>ak$Id zV(r>zjlOlZzd5|iynNle5bs96EH8gIVI!kxr{CS+7AA}Y!t*5?Cy=JKHg}leBJ1Lf zEBBpDK%!mKHu)++*2CoaFy~X;d$TIl#$>L<@2o zz0a59JEpU9n0u`d*Z0%uF~jG;rWRPuesA7`XtfL9$CWlVZbcT1$5ken2LXSG&&EHy z*YmVj?11*-=jxQOrmRW4?#PTAYchY9%M^2dqO!NQ+KNCdoR_zo?|Ia8$+3HwuxXL| zbhOlsn@toSTc)}NR{6Y~j6>$F%lR%{y7xo9-937{x~{fwZrz1Ck}Dldxy(geeJecU zdC8CLxZ2iQL+{efoi&ha92tLKbK3G0%E;k-O+^Si?=U{-)o7o0Yi*DB(Nlttn{7;| zyDJFBt-hU3ez$uBxn{}|qpQH)8~V@`Rk}Z?dhoQ`S{lyn^@_C8c(B@W%NtygO&cA9 zNjp?}lqaDxyzkdFHR(Qg=|ew8;jWHq71y#KZ`cj4^3es5lCjP9!M=Z3t2awpJ$k2L zUj?fR;{CN{L1G{K_a!^Mw0pv;F58;)aUUPvH(og8QUTh%9r1;}yBKH2O^&B)uCryA zn2v3k+;!5-s68C>xy{Xj;pz>w!{uN-x89Isa;!LLKX147)|kes^|&*Y`6km_&okX$ zxC5=OyS*Z5%yGlQ8TEhnJFD-mWz>w~P2F(abJ!ZA7^2%dDNaKukcB4(bcyY9wlve%9`(xc_I*{Q8BzEA0G+YC-L zp`dOdz0EJd(=4XZ=iu7+8TK9DI<Jkt2 z{YE=5*`L`^d->%K)TcMz=C9q$InGh3;hRant(2-_-%Z#D?9FJrr1 z`A5OJc=oni9S(odI8$kHD(LvtpRtP~K@)bH6${RorUYwK=GhQ={aiBb{O(?jrOfmI zpTj^Fw-QoA6<-B7wUQ-HZ6Ch)m3MwSt`4&mt$?lV@3gjub!QA*i%c_u*OQ>2_#Pnn zfn#oMpF@AUdF{{d{hoz&n|7QPtCd6F&U97W-u_Oj?nZyG7Lbo6>Gqa+sg54~$SFVP7=vD0?XI5MmX=PsVG za9LedKzGlBns4rYgUYkdI;QLH_N#-hn7ZRMj6ddr<(FO1-f256i(rDaF2I8nu77b=+sSTJjQ|YtP(Dd91{luiETr zX}j&CY|5=KF$R6_U&4Aou6t_Ubudv^F*6+t1)cnoZjU^*$z}hsMOnbT`i-|gc|s5= zsnXa(r08s_OkDF_b!0YaZ}1l_YUi__j?ToioT@p zp{d8%)?6+PR{QLc8ji=jUV@(u^(@`>WbfQCeA3oEax!CNN;R*$P4RfU*L?@f_tqNg zykNFFZlpcf8BgQ3QV_p6ITd%m4CABmH%*qDcAURwgVPvpMiCSSQAqWtwtDSotcZUd zi*CNap(HDxp4T5Zy9Dt=!cw_^W7QO~)^}I^?E$52r|kK34!dFA2~Fi}xoNF-xQRp9 zJNDUTUW{Va6?!~-=&I1{*wtA8${1Zw+;~4`t6d%8?-NPh~z} z$=_z)P8F~FM|!?L?y|z?M%na8iB1V8=*hjh4*KTZt8PvE;_h5USLDF9kE-3Hq>rpu zJGWA$ArOVdGZf`#Y&S8r{{Y=U9~wm&dLsH-jSd8Gwq||o}LB(X9<5w5B4>N z;dnW8y|$x-bX~7@BBzw+Ew7#>n=rFtchAPcg-YW^|mg6+Hm{MqUW->D*m{3P-GOBcvS1X*FwF&$3=54 zAF(;}8zVca2vgQJ@Lx#d06Txt(QR-C|0L!9zH7$4@|IjylXw+#nN;7ybM!zDWQK;6 zYgTMV>cf`jwC!3=j@I=Zcu{iOJ*tBy#iw)a924;k6KYQPp?lioo&j+$3p#fQr@dre zQ^oL(Q#&X1=%|n8meG#CU9G9;hE`$Du}axeH8pM%KC|xB+GoZ`w4Z;{+QO~zh!7C> zc0KBc%{ncUDnSM#dQzFCZ*{m`%0r|oyza7r}VtHsD zkL|1jedE3gYY+3GOg3E1sWOZZ-M7weV=3(8l~+%3E!_6jv#-{k5M_uu9>Jy@6{8c= z_k>4or94%)N$%-oe4O7#)!#3Q><+db_JS(sjf_uf5c03EG1h-Eo%hL^dZ)otP=%)O z*o^=U@zk>eV7Xdfb=%F=q=x(X>W9Iad!E!&Vy<4HVS?G$S^l(tbvt^VyLPDV`eXf| z62U`pRobk)5A4e)@7_Ug%QnWMGmrZ!#`;@0sp{u!Dw zPoo2Oy-;|L*e8EeStr39ROa$#j=}4lTLvTR_T4Jz`qeYXysRECMm;)#m7o0kfp5i3 zsN8b>7Vmt0O}#^D;3qe+99wSM{r1k>l8ftIyZ~L)XV>Y^tltg##=Iv7aHv;~dneWpU`7B}^Hgl8h-iWqoeoq=BJacv2NQv!iTJwV09n{x%R&9dj zq{ee6#o~Xn8UsWu_{aWKfP=$Cy5BDZ$u2#5|;JGGw)!v}du+%Dp?&x8PlC$ePM~uYho86#AtM zyxo5)NQ(1gxb%b5Sfy{Cm=s;Y<`%ajH}0ghWG5o(F1`_`*7=GJch*!@?9ZaH%PdE& zq*i;|!G7R-$9efVdY*V`sz;~Xxcdk;S8?;cB$0fQF0mD1d2lbxN?s?XvX!*`Hi~r| zWv%w+j9lfTWRljEyS;cV1;c*ZoNIf7o`8QpORBZ+YK$&Hc=2dyoD=o^IEVLKPp|q_ z+VFHV59T40gjc3FZ{~cZOGmMtHD8Sz`wax{y;)VJ^-kJ3=(5qx=myZ2xnI|8J>ApO z#v1mF8~4}s{HzY0Jl%4GQxK^z-nbL{q~uN!qbHYq$fdG>x9et3l6oVin+MpbmmPn7 zXU^UH!24*6^J6Us^$FCNmC;=sDq0D1+#IwbVD$iWrZ(rqCfTgsBlEmllY7j{i+paB z?fGT6)V<$jp{T@kvO>3lLaXknow0iokHYg_J*`7$oO|gM7rVI4%VM`z7%$10NS?Uk zNvO0lc2Z}Qpfkqu=;wtk1#ek%1+ahH9A)jWmUL8{t}Y*WPZ{3J9+1w9Z5nTGb zR`jH;OrtO63FBaC?ql5c!5P1NOX=G~zir1NyZL|P0xc*vClcZ&>yxhQ&cijz>xg*> z@*QvdvrTO!U&)O5z+K|T^gDmI8xP*)mIkWxS_x){_Qx=(H7oXjCV4wgo@Z}VWHsDG zFX3uRp0Y2gwp>l!MQ@7hXoutNdcHNfF6ISD90$Jp+wtb?N~61B+%(yvd9!OvG;#`@ zZKFe(kBSbjDSBtR>Xn&x{*mHLw69J<-e#NgMLXtr+ia48mV<5;X*++zs$^!LWP28IOQF?GrFr80D#zLA6dXv`-1nQj(O1@%>hqRVC-f0dm zca!q3QeHQsPWghWR0XvvCFh1sC@VYcNT94v$8&i-Tha79vRn6Y&sx3p#i}cFWmSwy zQMUKIlYQ3~0u{UbZd33^da_LkXO{<`XY!`8?;VhK&-32)g6Dsf6k~r0R?>R(JE5SQ z$~=m;UEHYCiOHPyy?gZjeH%JiASThf9q7UqLw*oF=hf}Z%D_Ey0!q4*n`$q;0&UU) zF&Q#_HC-;3VcV=>;s@z;bnJbwYRm`a%`EEJyB{>^iQhg(e+*)59|<>tS>sU##@ip(Tun6)7zrnKkJL=Z-yw^H#LeSAFe7@s#d1@GK~SwnlPYK!d`wL0Jj zA6&~@5x+pQ3VqP8u80!u&ProI2jAx95aH{jc*$8v{i|sNF|J>C9jfbXU{PIUgi$P< zfSt$&YbSrrF&NIy={eD_y*jP@Ihfd?Do&3rpRilSX&@Wj+N!~BR@BGZk?$&bCh7LV z$A>gLt##9JO~|O%HOF=PM;8YA`*}FplHJNqm7EHjd!ucyu<_Wx!L{zvS60W*;prTo zJabQ(`r)yoqwP~);hqZBozzSnxnsfac{l1`+t+{X@(x7w6u0ScJ3H<{zQ2RBJ6DRw ztl~2&=~_r7MRRGb&yp~nY|fG&e7(|yeahK_Oh2v)ZE0?G3KgiiJZ7u(e&|xQb;|s4 zc}>~DHec!X03B~`1#Xk124h@z&7O+&aMg{dpdN=v$~?#(MoT@vkM7uW!#rf0>+O|R z!cc$d&tbEV06vGAC790fus`$1%LK6l>WB3%`?V@ax&T6c zm+||R+i_Ue&ay}4H!<8@4?neynzaufGLOLG-Ipm2;5rYSVS; zqnu0Bu=73BkGD~vQ|%o;qOR+l{49vk*^{s0ifIpL(>TaS(|610g-rQ1>Fidx-7npH zW?B7oxOtI%moI7H-laY{+o#0$AFL#uby;@O<38HSCCritZus2?zMl3 zH#;Mfr7;o*%J@OvlsF(4*blW+V56J{Lx8^js3Q&~H(6n&%<z(e-3|h%QBpiEJaTReodbJzTbyW4mZnz4EJ(aO_KmRRLwFIrGe3V<3e+8l zaf##Tm%@^%=2=(G;JqUlfnC3N8qw$eQ=(Juj z?3tw@(D!+1VcC7v?DF^s#@9on8W_!(K@fS3{DInLh@L?7`won}y@^4t^$d^x^i#B; zlfvWiar0nbG?*NOVvft#m_t=Y-No2fDF8n*?Ofvg)6*0xh&l@R<-qrv`|eCg2g=eentkXB@xfK?@bf%C!i`0!%17u zS&$8LP)ZiPUU(Z}lq`|>0VvR@;9%kM3dbIXB2(@}eC@sME%9Bt+FBY(ffkq2dXtC^>&$-ws+8syF4rQGfq( zx)G-jbHwR?tS<)$U)#V1San7MrI*wx3w|daK32-lJ?w%EE|?0OKRrwJ zN1050U-+PS;IehwTtv;XZULG5^)pQAv7AU^Pc@PaE^)jiNT6wx$5?6cs#iW;-%bnY zLY#je`PJJ+0I+2lG+lQz+2}Fv8ID9fw{YP#Z}d>@^}c>pNfRdtr5Se~OI->9Z%2)S z*4Oq{E1Qt89E)mXZ-l}846CyEcgmQnvhvsAF~-Nx8{CXuztF6;v3n#@+1qBnQU#L2 zD9mouFX>J2d}J|whqb_{5vLz&Ka9^SqIiE;kl9NI-jcWtCwCpo_saR^#uhkleNJ`D z6KPb_W1k)t|5T03&dvc%em7Q3^lfIljxXCuF)LTN(D1R1r&~%4ChaBgK9rXxIDNgv zJ43Q%Z_kF*RyRUe*j=vu6SlzQ^!IHDeeHNr=~@-;oK^3~q|jL*=$`#Aj;Z^r2mybO z7?_XuVPSh9*-Q@zACR|yjdNyf+PEf9&0FTfvjkb!x7g$LTYrYPAwW1@+`5d>b58hf z0$MWq)chG9<5OuAj#+PpSjkk$+hJkLOOYdb0kB{VywWVKaO@XP#A!Y2MA&JU;8FPA zb0@{*?7)KwP7uTb$?u-(y19En4K{y53~{XZ9j!?EzHfFoS!S_a>!bE#&~P=dw(tO) zzR}IO3K1`LZDxC0Mg)Yjb}?c}38gKMYZ)P=S*Zt!1mw|voBdZ;{C&p&K%Pc8co(H_ zoSo?&q7E0h$y6?@K1=p7m(<% zPjc`j|LmZr33vC)OO!#U?^n@P$zKzGN^rCPzIqASsGZ9Cey}RN120$C$hvI>zv;QJ zs)CUE5+D_;Pvl)7N|i8y)ORZj4^uD#W6R3>+lO^QxJ!a+!t66go`(>VG0l})Xxfi) zc9*$57#~G2gAR|2B6*XZ%^-i@*XOv1r<`#sVOW<8fmMAmHl#NmJ02!2jPQ_I0Rar~ zM>^N`z>3xXEW(1zXe5(ji3_0fG&g}$`b2Xq@oE>GE9rW9^}bev#tXiXr3}lOG?L#>sSzE@@X?aMWuErMLMVFJi+UQ7dOV$|(2!a={c?XfMw#uC&f4xU zu1~U$ggk^qyXMo;1~KqAwfSxh5#2{M@m6C6odyPpK5|L0%prE*&$)Vd=P5i1H;R7*BtxNt&H>YE06a{e zM)b^VcQKKn@HDpRxB;TAhAm4lb#@`HT(cyZ^;(MBYqiz_vzm5&&->&_vx*qeG(ZL$ zKv@mlVtaBXU32<;vhdZH%X1?Isk}_cb**#s`mLi`B*kk*8g(zEQ;#&=^e;4?@R~1B zSmKo-J|2Bp-AsS4?S!M=Os6X(&Tp3;QBr;$CF{dXIxtxi1(zB{)Khf2j_I1tD`z- zf_Nd&<{9kCme{;tHqj`zJ%_yBzA_tUzt}3i55N&^bYUp+X$b)Rv#50z-XFP(~ zRhNo#7f?I;=-wa35WGt`nh-+N-kHjx%U6RggTBgA;#E&sXBQ?BEU@;-FYjuTgmv_G zh9w1pUxZvJip_yREZMU3Cgppa?M7hl@;f)Z8Ec2YMSbSAe;mKkhy^VNg)XOYPVOM45`ll-tRA>GVj=FVUfcf4D94*;T)|^AY?1!fE${HglWrG~uns?E1i4iFR{@jy zZT%XYFj7;SEUDv%eE?dXym;vN4WyR^VZLJA%ZG7Op{0pefEzY?^*qg0+#=<|eduRJ zAAU|-7l^Qey@RaoH-)$)U!X-3O$g3%JLBEEHabwjvL7Trt~Us;Xn_7{P|V@v!ttpX~cU*JA|Ug6OH)81;eJ(z2GLseOMm zaU1vEf{5DhsDP@4-h$i%EV=doKof9h`xZsH3zn|Ui<@4&2;2nkaaJJ^)H3wBQT6SE zuL$-zox?1NnjR)~4d?X*Ik)6x*_}WSny-JHQ)eSdqG)KXgSBm&fVftOxvZ&E_bWH! zi^2FiWA>X=qz>1?ZNw4ys-XiUTfBc&t##9%Ey=)lSVamnq&b8_)T|YujYj3z*ZmPD zZYwT7DZTQ0fL*8UEh~^SQDY>AEPQ3PM7UbC^8!hJ(T1zrU;nqleNCYb$Ap+!=c+~- z+c;Ia0ln5C`;oU0=v9*^OgA&Tl5ba<5giH&v%>3iTgDjG(WX63J>{aDN5y~UrGBW; zk1(EvvNv?TngcU|I_jlQlVe2Aox+)E>KS6Px|XvR8e>iQBcIQx^`(lPU#uAg z@}vFf{CTt>)i(6_-H7nvVrfBti`K-}cl5kmYw9i?`QTl_=4{=40zpVpS1WoEyb9P5 zMBnP$xy0?hAWQ7GtQ3JufFFPT#5b?9$cqURe2g+>v7$v=nexYg^kkfiWYE~qSw?Q?yLy!I(NV_2U$U(e6)WYSY^gRR-Np? zX*xVXlny*mfNz!8l`T^HPSm%03Y#OK?uTf1y`o1>hN>YRiJV$>HM5B;0@Y*D8hR6X|hC2lD$J z!brWs4T_aSWmuz?{aV>|Fo|Y8faU}LOEaqjATfHKN2m{LzDBOH$2Nq+{H^g+ec*5{ ze!6nlsTiZ^s!+ESP~Gkepu`j}chV*r8?A6^G=QnBz1O_9D%g-U+cs5u&jN>*_o;ax ztkzMp@5Uuti&TH9GpliNgK2s1)}c0$bmh<@CSrUDC;Z$d+gdXb(d}aI0NEx(TI@Tjyw?`gcyzSoLsm%L}r*Mq&yvqvzi+Il?`@O zSQ3~tE(CwFE~^a5d5e@q1}iUIRfih1i=WY;lKw(+8Ukj}^pnEwt*~GjpNK4$t&ta# z{epGT=+3?R3T|OpV7(-1M{de%P@6NplMnz2*O8m-O29q$pR+y9GR}#~R`m{DdLEy| z!zalSOv5Me2;(6#a4eg?_nzab9^`DWU#(BN8=!xtA2HgQI^9PNM_qsmxh3+{Aqk}RTk$~=FbHrmtSa4DTU3OZG8uL%(q$a}Rwy#}hKkDo6`WXCW`O zFJZGdM^M_4NmWkaFirc-KY(vx5At+P88&s2yAE@TwQMp>)`R605VJ*pk<3rUh!fsw zGIYsxJ9PwJpla`^%NWc^>n7m3c7$20ww8aFx~e|UWVpE%HPC+GR`$i!NXn#GGS43L zCiH&NM6N41{A$x6Fz#|WDnB@;=YE@UblWCX6!LAsJS?n!G;naU9_?8=$_6F3M2avp zUYKmuv#|T>1xJIrF4rdQ-hGvIITay_rV}n57icU=Mo)E&P_q3Z7n%o|m`GOst%!d> z8oyT1FXH;9xw{CBil(mA_Q?>TK?wxK1;$ zx2bP~;B^#2q!*3HhAX9lRb$eZ)i6K$nB{UD#c`=y?wGxiHs^6h(_2H~l=y;F+cCF_ z))|-8Vy`jw6%TfM*2j~p!ZS@2Hi~})*Nq9qkeZgx<^wC#Q)Xm-W+2buTwOlBk~i?0 zc40FTf8AHG(9{E#VpI-3^@rUs(eN6yo4}*la`u}GDn#d}C%o2!hT>JO8SypiNSp2kX@BJ_s${MuYsG^KTnRd|1~P+5W3 z7z97k-~dgNB^;%w5)0+*(#&Hu$r;B}2OFUi4VBqfg>!z$ut;TJrlGkD4KrDli^Bm` z!W*d+z7@Qvs_5<^F3^)zdy|N}Y?MZRmI1_Z45xoaATk2~%KHE_CTEtVlh=xYO*FNg zrlCk3Ls-w*2uG1y+*;%Vn_z#>hN0S|EQ?`c^Oo`nwGQPX^l9EqEoBd$fe?*utWKDQ>16#Ugq#j5Bg@*?H zN8B0_U9t&&*SRXbTvrESJ+ssUM%GV&2=Ye~Ni+~;WAru3F>NHW48MndTIG%Ix)_w=ry@7%QjFeeVN;hO z+hER$J|vtRY)MrnRB(S+#}NXy((~LB4G%0KIeDE~!4wPs0e;{+-XivVzxl;iu+pf< z*m9|%aze%y8mhPi%HNX3Pk==M1j4e|StwjEpZBL5M-dfSf*3se{s&@5ZG0@n3`&=@ zs%I&R(FJOMSWYmCeqiceM_5fNsZ$aKo-TN&1sa7ouMb%7N3egD+ECLdoI-Du^W%%h zvVsGChhOp-P~07;V#j!)HhN>Mc-;`J;bumAxRq5Cq~@fDn|M0m#1C{viWWK?HCU?x z*S%k>JRtcbWnZ;%FIAwTes;w$l$kK=>A(V6@rYflSc%koiRM|t0Xn8@#TxiPFmOnej}%N8`xIH9ay`C-44=Pa=@+`E5BRl3WWW6QJeylgD@TbW+o z71E*MwHMPGUzzrZc*)62^o|PqD=qzho;vbBK=r?G|KF~S``=v~qWDkOCL^5xMdD!X z;HCaV@z4LqjQlHoHg0g4zv!R9&M>_7uk|$jeVQ-lwil70DfACI`PV9B{^2xHqYfg9 zPlZ(e#jbxL`)^PGq!i3ZI+CvcBeL^E@E_#l|8g}>iadDww;4DL!-0RLLbn!ocKzHX zjl{n^;`^^D#s6Ve43hXQcm9{q{#Tp&$D03%PYmaX8Y85c1Veuckn!(F^W49DDeG4J zk3Dk4{1e$9?cb6ADTa~>3OzRcm)Oqmg7V+?3deu2rbBf3B%Z1B2fO?oMEWyZgiVU; z24a6XYX0#$^q-Z;cz=JkH0+(Ac>e_Vzy0n1PR8`F@u-f!Mw2RYFKZ@ZWTOhp9o+V9 zo!Ut*=5UgJRS@n|BaY3_Jpf(cZJ|aGc}jju+0NGU`z8Prq=ZPj^hf904s7jMxxGCe zSs8!UajAnSu_O0A?{L*P1I)NAAO-=asSKOR7p%e?>dkR|v)rC!+>6pxLsp-?QVwf& z9ND^t;>8LHCa0Lm*=3#!T-eTvdrq*08C@tpN|^9a8jxtEmUUAo_3+Z88CSK|#E@5O zR~<8^)?g9^gHfAd!pQ0c{=H$*%+N1l?Mi=W5eBR~0P|u=N^DPvGyA8;n;9lwIr0R5 zJZGq_;kw2&2n7;$_}H+#BsRaV*5)KQ$=csNjoZxpRjs;e3{-7TXH`%@VH*w#iYgS7 zEmNfj4PqLW2f&AuMW!JdhDHQDE$}l|dzjPlwtqEoyQye1J57cgAkL+kJ%9He2`_($ zs1U_?7r?FF6??u!IpzCU#oiIw zh6sqHtdv)|l0N9ublS%CHE-xy_K1Hy>+rxeLgYuWF&qm)lD(zAeb2iwfQJ6<`;WDx zV$n$yRt6p*%keQ6tkjV{j^aufTvrk=9a4zUJHu^&ktJkzxKemtQ+7Kc&Cj6f38yI) z;L3H>2c4C%0NzY#Ew&RyNH+bn)sRf>>wYX6?mp!Jpb*?oZ$lSX4w* z=2YSY$a{bl+9U&I|0|z8^!8EU{K7jR0=F73n;1fBQR|(XOY)(K8KmjC$>a|&R6>*P z*yjY*ZoT)0P>U`}ByV->%Cc=!5ERyFy+$xh-lF555QKASl5j3&mJ5GK$yjBfKF;K< zuRp_Aej_c&H^qo_rBvjXg2g6?KH^_!G=yMkAu8v!qZ?d|@`=_S|OpYX}@+qoH-<%GX-?5!TtU` znt8xQYp6xatxe)sm**fzaFHO?x($6I^~L#QsKe36)P+;0P;sztq27IVv~3yhti3Wafs&Qc z&)hY@+Qywx(%zrfa~6C=h3gN{QMCAx9PnA@&X5U77sL>DA^>(uHDm^}x2PNH#;VGG zOYEQKz;CuRZ#;jlt*-YX@l4_*f_Pe9H68H>WB~3sshhd2G06EYn%5T;gotTgNdSTJ)}?;W#c%2faT7Qwn(DtLCNogb)A>-y(o zRP?fvWS>I4k#xopg^Cf!Dm zP~yEw(sY08-oEB^;Po7r4IXBiE|byoYH%b-0A~+gJxpm>*mus$5kK)QZ`TKuB#8=8 zhO`hrN0RwfSeB-+8dt*f{!mr1QORlymcmx(ts!i_4Xte+#Mc+%TZ#yZVhc>uAIl@z zK}rv5DH;>$1%N2Z`#HX3d8+l7@7@ECo-z)y!fJo^lAsngY1HLU7>+ON`h^0U{iXsh zb-VdRb%P(md;J@NclBPuJJ3um)zag#C=sJERxB-tJct;tKl^Y{q-dN$rRDP7nG`;fQnoS*7WeTlbh zqAY*!J%0?>_qC+oZ&^2Vsnm^u-#Q6lvNdzS4?}YXg{p%7{RB^BMT$*XokOVclvLOp z>Pr{mq*f(qwCAuTgZL&Pv*VhpLiaN@85(hz2qj>Fq5uZVvBmmq;#u^sg_449AsHM9 zo1CZk7ks@<+Z2z5Yq$=dile+lnt+k2dP;x4mixOFZ$em@G_TO3COuZQ^!By=cDT6Vf759Z2_*EX=6jL7N`}=G>y8<&Hn*mK45D*t&)6hlvcM*B~GD;~@ zcRyl(Asz^NH#ev>R&v3WqL*YU16F@>zzR@&QcNAQ!IZZ&tw)Js*<`UZfj~ChAYnE_b3P4p{Y0Y^aFq2C*|D(Z)f4We_qI;fF04)LyP27VdGTJbaw zX5Y^*1(A`tP%%o%cn=2TBO=0^5IKJ6phq#~4Uw<~DJ~S5~JVAU#I2#n-U9|U^ym1?7WU#ao z2iqADtNOUKxRO5(S9%Gc4Lg5J0TIYAbVMS(j$NX^uCenOr4v>ER&XUN!k;Kg%D`a0 zRq@x3IeQ$$j16aASs5BS|2pPYo+~~PqMm7D>vUstM=y-;BYO*nk%Xc9<+^an%g%Qn zi1nFI>L;Z2m;YkkrFLS@tv#%gR~5D;VpPX%7HOYa9WTJuOn?XFOuc^tWri%l=p_D3 zxeFOd<^`14*BbZXynKNxeKf)pytlhTd(RIo6K~`~Q*zaa-j|7YE*6ruzxDR%#D1H} z*bS`!t%X#4)-V^~K3Od_xFu0eg_RCD8q8!ED%TqI8edXh14nhhc)Y!;qPoW!-)d`S2X)DnV{wmz@(HMEL>=Hhj+@o=hXNadkYg}5;nNT|T7 zMqL^*Wn2f7{*LZ9=WO)_>~>!98u zXk}YQ#y->MlUaWo{u`KhA2@%n2-hS7Eh1E3ZdM_kif&=2&&U149gE5=xd6m>S_h-M zz?j4GnSS{Jv(sXgw{<#3p>s&Tji$+t-pdL_ofM@w;Api+UML+Hvdek3L?FUHs5?cS zLq=Om_~5QS**TXJ;Jrc)K85e|7p{x^DdEA#jk0 ziGP==w1$6qVqKR01_Zp-Wg zdQXHR_sw^Xw-uG<+}4-+H;Q79OhD$x!Rv_VXxMHgm#Sqt$jXIf1)na0 zR{<^#B-@lJNOgU5g@VHa?Jd!1B$Zhn?jB||_yU*ZOQTeVnuqpeYv(j{xX~R-)2qZD z4}cE$*@!0gP(y}_2@=!MYOkD2tJa1?l)x#Sm8PSNp{vJqUuh^@gH@yWn~6Xqxu5HQD*w zETT>KJq8_#-6V%fZoE*1LXyKZ#r^I>>PA~xo-6=KyY`DESboQ&(0aS;prhs701P=l6U=k>W5dkZ;7n(ACT2ab|g`0 zwH76udqAOZd=4gt_f|lb=6)d=vdz_h@!>Y%YZiVo<)UxmgCLVCGa&vyohIfJg zD|}ZZx*z2DpAunh56w>*J3xBfxmNQS>6#3Ev24p{c`kJ%!lw)Q#VDF1A3gCjFs_*5 z+?y|jgL-d+@@j&ajA_5=b-iqPp-fWsqFO+bQhzi~4ru$}LoF0^fhYKaQ=A5Wo7mlo zD5ZiA8Bl5@X*3~H&N|j6`Xl87?>94k8VRyyh(Vfpp4YQsg*!pg~<4xX0m z?~$NAz4Es7PxixRJ0J6tegMkd0~v;0PPFVk)fwZL6NBnEF0r{P2jCVJRdT6sG;|pM7BYb5t zVb!VDb!qddmj^)T_>4P5vwLbN1Ay%`FeD*nFAgv#nca+)p)1nWnK;K`ej3G5ovJ@{Ow7`k}e+rUEG^m7M`}F4=UrW0!?bzzisa4{`^3(qIOL za#I2^R9$a0@?|l<#I?MCg=4RLM$6sOuynX3t>Hq@tQ_&T%8V-tov?exYDs6_5Pdqt zIAtHNA5|#9KKk^hMS3Mmsnf=7?mpkC21c0CHS%pstkml|KOH7nh#cYVp2N{~l2d<&Yu7P>-Z4eE@_MK9ML zJ7emxzJBEfLpAt+JJ%8Lq#CxbJGU6yeWMg%Z#UJn+{AA)_&CP3jJN&`IH?3beZ(^G zHOrp^PqqV^Zpi_L5Wv#lbyBKxZQ!zA>1KL?4F+HiIA#(*pI-&W)700=lE5DJlqs_` zw|=*eV8HNf#a}qF?|k!>Jv|C zWeg>zO1b_o#2t)DwVM1ZV|&z9X@c8=zsGNHE&NtJ(+ zOz-J46-aV_jpoqbg@xKQ8A%8d0n8Y0;k=jyH9UBd&8*UK&zmHb7z+>MmcBtC_1Iuw zxBZcVVx~PxrTf~)7-k1f^L&xu)wCY7NO+zJaTvOATNSUvH77((WIrSCbJ8N|G#LGc;$uMX0i zaq@6tYd6^IGZFRgA%nnDj72+i=}=5E*_{^0wUexaJjp|&&Z}wEAmD>u*D-f=INjkS zcW``QzjHS3ZZf9Og5Qr8*x=WWRtP0!Zs*S=(eV&xdU-3~gs2_Aw+OxUsXVS(IvM9A z{FXO=!A(NXrST{V>NdIk@o+)V?Jq)BUcK0jxt=^F6+JDzvgZ%z8RbvJ4X23j{>Wn{r(LvNNrIu^_F7J=2e@#|*WcqznTT`v(p&=Z{1?9d6w=~9;< zI7tBmyw^CP+3>^61Hj@L_+hvIm*M>70d4vjkeZVFi4m)8ynrBK!?j{4aS{YYAG#`k zEN^E14~N&2mOxzq>`s`~(aCI-n8LP}>f#H4sBnM6&$}`TcY=}hyhmMzj?O9s?G_XP zHHF*3f_wBqshiQ!nhXs#tJF!qh zgSS1PZ}?sN z*L$%0;0ryEkJvF9UZps_8t)!dKE|*SA(v#3Uc_{(3Q1Lm$`}?pNeDNQbJ@7He#%MF zUExmra2V5!oM))arkDlte!>rb9KApsR>V`2?oN)C+xVNlfxx}y-GTv%a#|Yqer0pD zMdc@GnPJly`6T|S`GGYQ{dQQc9wH^i`51xBxgH0y9_KI}0BNtr*S>0n(BgKsEpZyx zNN&HrTv|E`d^!q~04XDO8UaLUj+9@qm?>QR^&>|Nw-*RDO$&5^GQTi?3|#Y8RI?k? z*fyv}Q+%30ehDP216HRMiQK)mT)`33KbnGj4OAhm7w_PG)b%-Iv%`DgwFP@%ghgM6 z!VAU@v}BD2G@vlf*$8~Q%SQC}RU&N_PX_iTICJ%ej_jK(ZI_D&-BFEZUVO%XE;@q5mf{*{ju5!13&~8bDKj_gSHNe#qx%iN#rP1$tDZc9 zQTM1kwSgC<(-6JreMvs{_5o_s+!~%^TbL}xM!ISZF`~6$nf0t&BI(nq+9h-F{JSpc z13*KFz6wfVCcA#aFhXfH{leDHvR^Sj4Z-xKE*MDp|s4dZg=m)8Rm4N2Ld+&RA?*s_*^>0YI z_dd5?-BGPnf)Eg9#*C1dt5G}e6klZiVyOs>g0EHs-)*$XLf$SZdbW6O<)Y}N?1A3F zHSws*k5=(txW&emgCv$H=KljYf)jXdqaJPe!{X{9fw{% zNF+(`T0!)ztY0D`+R>oB9_1s}%Oc{A>ULjZ5!=hAP1G^8k&Ql1h&`UShZf2(kUdEl z4dDWhZUvFr?u3=rxLA9uV5yg=a~3$VC|pe<95AguI5jxy`q8CzSY1XlEw@bQs-W+( z;hNSZv7c~%<%Edfsfu?DsHJchxmqa{lb{y;Ro#>dUtWY*4x!$K(IJ*OlJ93m4z+;d zxUJvEYPf6~GDOS`)8|6NsZe_YRg8&$5$KC-UqZ3e-=9V}xu+a^AqJI4HEK6Jxg{kI zku$%uS{`~>3^AUZz>kyo98FV^jpte50Xd?eUGGqTE%BzNz~*@rGs5HMY}NA?OC5IM z<_@gc+i!KcUf5xexE@w-fgqU=LSdWj3r6m$=BODb$#}lFK6`C%T(L}ji}Gxr*V!T+ zaE7p45g$I4liM!n=xee9!pmJScI0=pSuu?aPaur_GmCg>hUM5=0m2s?vYi>4!_{Po zQ=n#lV4<9>y+S~Im{g!1jp)Y2lWr3TMBMs}f^j~kr{>Huay?pKeunjybR^QJqWd7E z1XQc{Km&#Lcv(BMhnqThiZP{pwHOgcGSe1aPH8Wsj5hRH$i zS%b@vL?+x1N!uS+3k2hpI~fIBP9r{k*gA?kjvz?;2bAde#SJNOjG`*1Z;+X(fJ#|^ z+s?yr_Wb%?iW;V7;`KCd+}DQi9xnZDJaUCm^3jcOl#Lms(;E3??A8kTNDxP7bO}Rr zAE)Wxs?)#41rf*{Yn5!SC{vfHW29O^HgWC4J)*cVHtJ(qq{E8IV=K&%xr2QkU;C1h zSZ$e~lzDE)#&zM2(Gi5JpHW#H|5DC>kC7HVQA=-pM_Cv2(o# zf_V0GlESP~Dhn#$YqLC^VS8_Ro8-G`JSM#MtauE<3^r90ap0o#FYtNIqmFuiw4g0o zW%%m!l|3)Zl2OxLmhTtxoTf@Vr59A~Wi&@A7t9*6j&`V?S6XIh7&d}zs4NP?+xX1RaLhezxf`v%DWyP1^53R~ro1Rs|148A>UHRWLRV%VoVx?r>Hr@aUc7b7S3n zG1r*=4U_XF*uID&0=-gGy3M(N@LfQ~7|3Mgm1b~;bG;4~?3Cc*pCxk~Ul`a*KDy## z5~}a+EJsqKucUY5hr=WLivQw0k__IJs|r29{sQCf7GT33KR)Pb1qGsiaM{<-E0dcY z_On76+AAFv^_NK zSo|1A`oPH73)3OLrIz$+N~H#4ZYHCkZiAA%y+}0|f4#Y9<38S{YCxEdZ8NSR9{Z42 z(XMaQGhRtUAw7{NNl6}}9%*IB;GUG-~nf$6sviN1P zIuX%nx5MkKkPAY+LqpMjD&x?R^IEjd&)OGiTYRT&E=m$nAP2erU78mF0yW7SJlT-1F0l_pzc)uVf_P0+uTu^Pk&I1Dk})~w_roA0{}ZwwCjo*XTZ=o} z4|5%*(@rv-6+c+5a8t6RqYPQ2_7RQ(jiEFXhE5uL;retcLBirG1Ao;QD0f9md!l;( ze!Pqan}(Y`sHql9g1I#uD5XNDY7Rxm&-ICYWqjuD-4d*SVk}vn*U$Z2+)STLyjKxn zAe=mcr%1L6Q++Z;p|}QZ`WjyJLcw^Hyk)^E>Dv?q4(k?!qR$Xx6BKC&x8_I-nxx^kMC8%*=P zz=!E<8iqoD$H&^V5oKRc3XJZ9-fNsAaJYcmAmC5-?j;(;TmZl&Sak__XN=W?26C+)XIIO!*La4%7$+m3ZQROi?wtZ*B?m$I-5r6+ES}x{P9l zskbzWmxoobkO$rK#_!L1Hs9xIa!mAABNi`hXj`fbj^i;<$J;d!h2Zzp=JOEJqt+%A z$*>MQ_^#}|5Sd8=<&8XazGc}XY7$&Dm~(mZjJHr?Q{QZV)cD4x5oS)k{w{>0W>jRn zon2jju5B*eCWSMK0F=G5#CAR+F&(F=aMun+?llx8%WK3m{iJt}yfbPDhR73r+HYyWe7ixds`R+Ma#gA4NEPV&5k&Sbp-gh1 z+;{y7G1O+lF7mAH+u&Zxd*|jjHTZG>ZXJDpbUQ?}Yc0^cZd2EJE8-2=I>m;SNI-Ix zh_$ZxS~FcmRyJ?ZC$xzpMy1bR6d2l3VDa_&;KHUwl#ffj*KW?oZufb{8MDk*5_r7^ zqRBub+AZ!>8Q-bbfj>Y_*T)l4-ENK#<`M5cvf&?bq^!ypnmu_gVM%C`ewyQ@w)uH~ zS>CmYohq)eV#F(C0h?$zV6viXNH3Bsl=B0hHHHkq6;I>SKd$W_WgiAFj(Ux-lWzrM ztglt>s)6i*+gfQLMA^Dzk)ek;+GK=rdzNDYz^&XeSvgtYi>qI+sgFo3HoFCWvr>;2 ziVrsu3msVY8uF4d-pA+N*3l-3A@Tu#$BV(h&WUYpj)Pg;5B?KXi#@|CA)?`~AT>eO z4oAN2_b04o^Z9DvX}Yd~PnZSEF{$kja6h;0+3P~|nKk0gu&e05sMm8AJlh^onp$K@ zi#es;vQUCuKUeo7zVXMgkZ0=yD2(5{A*5)admoI=f?=EGw6~F%O3EdPP8m0U#C~3P zP0Gjg5#S{ZB_SL)2h5o)?!Ks3lZ_EUL~^-hYe@8On?gn_ekj6FVg!DKOmrA9l6lf~ zTLM--7q@}KmrdA40Ytm@ES)bW(oR|U74mpZQOADuz1X1}VmEr<9r&Xqh)XaJwj)GY zJCO5i3yrXy@%z(nUwNY2;Ouu<3PKvw^{2p&mhqFfW8urbEXney`bGDDS@j}guFVP} zWUgqeNV~(eLJq@*s}6-EK7rF5!l4k@BlkeOcx=8IPc zjLdyR(WWVN&ot?@xGt_mwcv1#`#ztxal^R3@9fIHv8JAF&h?!5sd~(;px!Phk6#gwwrdrN8RdZGO&2Tsi%+AA8cX{_>-{4iEWV>%?x``COz0iEC;SE#3zGIP zKe%*6wsk_=s1c)o<~V`a^(FegI=g(}kUX|G*Db#gfnJ)OH=#LUfZ#G(PK}T0P2}!U zG>o_)YHlN(uXLm5dRtW#sg!?2z8X7ACArqQjH6XGsyi;Vm+bF?GO`3ztcE|d;X@uW z08VPCAXftU1LXyq&c!_J7fs-3Olr;;DhP*)Q4^VgE(b+_mh^`M-nXJZie*8yM@zm7 zAs{e@+E;gR+rv$wwsDPlz(#pwI#y$a_Ie0o&ta)U)hZulLp4)kvsMWJ)shYk^u8*2 zd*;d+#6XFYh}ihD?u5ZRG)N;2ck!#vR*XgImGi;p$-cBgqxVxZ=?5E3>kaijCF3}U z+0`^E$lo)I#2t7lzZ)7Lolv(Y%>C-?kB#tg8d;+K|Fm3 z@v-et^Vk#&)U717E~+%R>t^YTq)ODpfa{Sho+OL=SbR9=Zq}+=>INaX#W+CkUeChl zp0wvho*Y7_DfdbwD^agexx#`Rn&{q#i2$H7x2Xx0Lp9g%aiy0R_EqB7fa%`e z?459b9Y++IQeh*t}db?{O)95v<#e}%gEda#U?P+7dew}T|gCo+-4 z>qPLLKH#;rYEV0O+roXhC(AJ0kIuOwV$MH*>qnH|@<%;0HtC3n67*F9MI&K0sjbNr z$uKuRl697w`g$C%5iZHj#VQ11iK0Ren{`w$7Dj@&D2+D5lI!S85{Vb#FV!&eb1jSV zzS5cu!>=CZona8!%w@$nQIi?PQsp^|$kMDX30^h-hR$x9Z>ZNNlHA^i)1RyQHYQSk zRB#$zWNSidB)tw&oBddm8#YB%y%S&~VuZ-Y;v#}-g6|)!%mdy=Q}Cc2({F;*I=IcZ zI_f-)$0!+CF#C9jF;VOY&Qor+59DicM?JLBY2J_{&RoSkAGMA^-Cjgpf4e1&Fi%Z^ zWPn@{|8StP`CzB_q#qAae;FHDu8!M(Le0K>pxg1Kcewd=g0HHB*cH48vwMf7BcQo% z2OPqAS0Ec~%E5*cHl8+~kH@nnAC-uPi2m8uCTHHkGU+lUT{~EIx{whj0WDN<=;+Wa zzs+g=xX3bMy@!e421`9%%3E03Yd@c4f>!oa3zwUw`m;3e8q=Y>?hM4%s%eUUf^B6r zMZ${ykuL>BaQGg-qDcRcs}(~l_IMf*{FA&s=RUeE zS!@!7!SkJj_;@0_c7;rw@9t*4m#+CvO=Q%C806_#+MAqDf;6+Uyocs}511P8@3BOL z^|T_2>7V=S0N8YX*89%p)s;Srs~*siH%*XO4zo*Xs@f;B^&d|5hK9R;o?|XyQDCWi zZ2+fp9Cd^;Z}32n`c{~#dw1Pn`h*j9y{FhGy_3U`b~hO-P1QB<7UHp1=1Az#i)>nm z^gs?>t*l?0niJmRS0^?#F&%3)~187=A-mN?n-k5-3OMpcj@_bGanKw4~gZe z`^dMU?;T6^P)Lt{Bm-A}!elV@nD($C7}==#rJVPW>s-yaGI*drIA#y~WdiIKkedGtt`nu0;U{ z&6rbuBjA9Ze%dqyvnG`@_^CD02$3@RR&j?{wrxiU*tQRaYpV9cO5EH zaAx%8{JEbb)>tinXYZM9`BgJ4$@P-+{*2>(Crl;cw!_06J;$2lZD?*FX9)W+b@?Xc zcwtZeaXxE7L<<@9>c(dIHlaj9q(W$ZoQ46>4}MMGkAXD}tbBY*l2TbJAn}!XvHC)= zjBeKKvobFD5`*_4k`WnlS$eoaPkBi4(a`rfh@vTa94(K3uOXCS#U!NoAfC1&oFdIa z?fc~Nx4EK3)!C1gE3KeHRG-Q{s-7?gj~{cc7_lk52vi&l!MC(@(wvj3Uiw;lBV|8Y zBQTZ)nX%MxShzfzo_-Oh-5RwOWZtzQoSmA?M!uhNgyVhU%drU?rbR_6B_=Qkm3$dUX~xp zekG&Ee&#i%D{}6V7*iqhEh3h|gxNi)%Q1JZP97HD>-$D5uMg$<2xB}JYW?U}clM~D zLkawOch2)Roi`;YLUw+NKtdBJ#EJsUqdrxb6KGa{8H^I$)b_>@+>X4SA}2``F}if8 z3QO3djZQnI5wuevt!t~vcb0`N`M53IzECghZf8~UBK8;`smiB7;Jh@J6WVQ;jxrrx zh&{Ik=QHms-k{1-5M}d9;H_N;c%I-7DSU3*>Qz7IeSIku3SZ?0Zp%n3OD&4=cT7aD zD~s`em}JF4LO*ILU=uV&#~* zYP4q75DkUJ))$*7X-0F+nedWdoyESxA=^w*T1Lv7KGp|)j-rjkr0!=-68cxXY@c27 z#e^s`tHw`f^9n3h+(dWS!R2!lSs%{si|G@8kKd*^(jq0sfj@K2NmyC1!8#4hN`3Ah zaj;>9QHW=pjE@^X*QA*->SMkVBGk`}eDY>UobpAUGxHoDM_Cl<8H;bz_PWA-?F(_h zPi8{dLL-arVp7tJebiOn0*U3z(o8~LpgWRppsVG6L`*+7Zoy!Q_QulyD)UaNk|z>> zU$?3zo_VmcdxO=%pAR2#xqE+G#E63N`W3X^A%W`!jxa z3UvB@P(J1lJR*Tx0TMq<+PIB)B)R>PF~foglIqt%&3bFKjH)nZm9V_{ZCq1KKF7VX zTFpHLJjAK1DZra-OWugM%F5j5$A8v;^0LP2A+~C0&5-#{E_+M;C*HmbLUPM4LUjnu zY`}p=k&?k-Scb2G?bVWUd>9}h;(ezDetB%T0!!k#@M1b0qPToNw!FL5;iwi%p0AcH zpXw!bi=E@sK;L=qmghwjTwy2QyJ6lgHdM zTc^s$wlLJ8S=F_(4z4E$db5pYWESye5!jp#h~R|DqD~&>vdmF!#pX`Q?QHYK^Oe_} z&c*AK&z;`H`;&jtQO*xmu-BP?%A-<9oV6v$T=cB_LWs45J?BYSq6Dh9P;CM|0U$3=%9Klg_P!ASNcQ!A z)wf_F$|O_JaLiWy;SB`?M2vj7x086(8J6pRG6JuHk2Z?E{-2|tc z7zT+k==t=l2dzXZlnFdLg%PjqSjx4+gi1K-6_}z1v!{4_tIWaTOP>UJ#nFJid~Nk+ z-r4ANP)_?D$o(Vv*b3}_Y|`6TOzaLL1=5;ep%OYD+OL;wu)}OdFO1|F_n8(9$ zR?lx9zG6DH5-7dn=bv#gU!Ai?89LuNiNvH3X^(B#*v|KyT(A5zCRc0mlT+*bL!>rz zfq-H!j9mdh`P1!A!7YWk;ox73j0Gj*19a?x1DPm;*k{wcO0X{mI1< zU)6@%7li>}ZTFBBG;QVg9+yo@5%JHBr3WlC;IU?VNw5WmsQSwr%B5Gf46oGD(6-qf z`L}<*Ii|$w%KGY%h;8!b)3De!jjhpT_Y0qYR}wH}?OTru1*Ty2BrFsw zL*i18Xb-m3)gk#U>u?JeIoh%cNc=I($yAE~)FsIZ&3m(MbffRd5Z~~F-98#Bj z4g}EmH0GB!JWO2E!MXt<$W)0HJvhdvq#2E-ETdE74P(>|7 z5iRj@Y6wTVh!N=vWE2!CwMXekE8YeAXvBxnoHh%<9)RJ4wVrhI; z7D;AIN7l;lt)4eAM{f#VEY0KEIdb-HkQXMWXcRxVL`cxIC+~8wmIr^@gWA$_(=dIg z_4~?y!Dd>%>)vj6T0OKk;O~^m`hja6lvm%YcQ+Lg5mFpr`}{&+Ibp1Ha-J1VO`B3# zguI;@2WbD^loh-iU{+iRRyMCX3?3%Y5Aw(BLpQmFRw=PrF^uV!7iSbt$KImow{bkXYOD`RD&hY5x8$;z3Tu7vJ? zg!{?lx>)EiPIowA4!X?j`*vriA(ghQTU?#^D&^)%k#141dKaf~pT>FV(aUaxE==A_ zXEoXOD%z*%&=w`m(b{%hYSbl5-0?09G~MIrv}*{chm-635Kkd=Pg%U#g+W-+iYPp3 zq{${d?@|ezkF=(*y*qT-b7HX<4l^r!FGkXl^tImY@@wzM^Md;$`*`8y8Obrz>7 zGN36>jvkowFd*$EABS7RG4#}8%j&ZAf_Nuk9&r!iC)|_#&aunB(s-cFj18$Z?wE0m z9cz3vC38czSx0WIJ^GOxO6D9H8dB>?>2m>;iZ>J5VQopWKWE1|4_6}=h_Ty$2%60H z82}E&XL-Gb*(4(sat_pmU2NLiE>^{k%*^>IH(lzJXu0GDS4%PZ}= zL$r@r!q{V!^{+A^fu}#_<=h@mQuTm)XQ;3g>~@4!hcAo}B$1o3s?gmC5pq&f5Gv(Z zQNA^vmv330!7?wuo=lkwc`H?8XF49XWQC&*hvU}r+}B`t8tK_ww(8J-ib_S*BYt9& z&Aghs3%ghPA!HUa?8rC}Lt`9%&t9Wc_jofrrX+hkU(gb(w6&pqr;``F!ca0zu?M?T-&oYr5n6Nl@C&^b zo4F&?PaoEMWwm0rRO0+rGDU~Oc7^tCtMZSjZCUT&df zYtdO)szul9Eh6EE2RK9#?`elog{!LXL+=Z%M%tpSK+Uk!KNIRgva;ki0kw?Yt5*mb z42&y1@Z@V7y!cF0fTP=%B}KkfMbo2qTHOGQJspl^4AUS}9yB^&}c@?4dc^)k+xW1KcQ%q|q7VcrKC9XDTuiHg$1-bTwmKfOu03%cE%8?E$5I zRFVzf54Oc1m>pZ3)?Xy>sE33reHr(|wa?1uCP*S@%jX-`;(B>!gFlBlwBXtOxu7>% zQ>_blvC!v+)CoERvNhdSX3&fPyJaBEC7;9{%#|>IvaQL>z=#$j(85)<>qj|^C_7C! zJ6bc>@Qg@*PtHPFWOCZolXys!L#{Kl^X1F3Rm7`6gKpC9D!s7qBoSA91n8#Z&9~i? zpEtCg2;QC|rk6Ie09JqLAEZ)Fmz%ct8K6XG_mDQ5Zxs%6VZ5t7H|+{;G=DZh^8|{> zD9_4uZ3X)t6~Efh{D`OD8}>H7 zI;8O9lu|V`%}+GE63DAJVkz?$dLu}dwP?nFI(-&O{FCOM2Kz$Zm7)dP?jC%jKBY@2 z$HsvPS|K`z&)dL^b(33no{&*yT!%PJx@ z%uSFZ$R~v9ACS*K>_QGOGbRS*PPRXG+y)x z5AZ%d&D9(p?g-ttYo58f8GrF|(=f_)-$BG7_U{*Zrv(zPD0=jdCv-)9;^;DnnSyEN zqFaKak{zd?1;2Y1me0$9=liaYuz8+;<)p4})QO%5hGAw2ESw~%)pqau#*S|Pe9Tk` z9uJZtd2z&U)Xwz~!*1+=9}r}wVhBMb@{02se&+ipoRFj}D~@OD36At2&1$Z9=M|0g zpe(A+SNjEtwjj;x6_IRRLHpVux>OxaU9p?wWNn<-3>?Xxie{Zo%^S{axBlCIn;!gy zo(-xU)7`w8@Hu#iAUA{+)FQa)Efx-Ais5Zat+p5ACifui_F#_0kInUKR_kSz+j^24 z4cDucSZb9Rv~pbCrdwGv!{F1MLFzpK?b7CsZCb4s>SIGg=_cV>^*TP2POXg&03g_% ze)ESCf&d`@|8zS4UoxHB_Mb|B=id|w7(Z#rgRWde`)Hl7emOYc-JeQiBz#pjnV-rg z0JI-`H|l4bk5&Z+D=aGjV)Hu!p_6!|$25?3C_Dh*qpu33!TzhV`J0o_@A3Dt9#gR& zf>jvx-xYk{&y$JZ*RjCi);j`#H-2*!{ZCc%r=ke}?IAxEOnXX08|Qg{{444SJn}n2 z;j~ns+Q=1tZ%8&}0(IvKUNhVT zfG~dVpPt39Y9{?t$pnDzeix-m->J=4r4(F1@&V8QeE^`H+|4=J%{kiD57wnsk`EQ@ z1yT-xiZ#BUX90Qts-X&h6_7UDGMP$Zv}Sd5HoEyb^5`QI=tdgkD|7ie@_7)oQWGIM zn;Mcmh2r4}bj|JD*E4lhO7s+q7srUKQ~{g3dKDN9$jsFw-k`=~p#h~}uhnlRC{DC% z2@V*@?=-Lnph3jiQ$ZzN%BjJ6D+Ro5iF6f-bfY!$6%FzgVLBUsU3~3%^bra4@x7jG zkeh5MYJ3roOaMnE@w6r}Koe+!E0mfP=}MCr+Hx6Cv2>L&=4oZZ>FH6w#`IB1^x3D8 zg?l2s`fMPlf|W@O$l?Iyep;vU*SJN;Lbp-GYm1Q;7pX@j>UE^dEEs{6dB!2~!inene`(qD#i^i2GI08`+?=9z zbvm%uhN-GRq>rx;Tz`~Gz6QR2WHf9^JZtBM>T4*lGMcTtTsR^JSd}8#s~ghuwg9W) zD%eu(Njn?vVgtHjPW%fnE%buvm!+WJslNmO47dpR7BDSv+D<1um4?gs3g>B8dULmh z>bHbyXB7T&f4us~_H8yu*MIiI^wxvHB@Mc3VR{8Hz19B31VA4@jQHF3^W%T`f$7Zm zb0m?jt&nfEOTVL5zATzKw3K(eUaTrjti3+0EE-gj!5$LB*1+H`js|wbFtufVUqk!E zGRZW`K$Aq;n`8!vEPZ&2kQw0n%;F(2^pVlPf*FmWe{{y;7{19x6|NL;W>XF-2Ao=> zTGQ{Z_(4kE!g)|x%yD?`-|LVvtH`NcUSTZGm%C|qz`^iLtSA7dg{qqvd!o5-5ja2w zK_3087OZOb zF`58Zc&qC~poQJpvEYz0){->F!6jf%gJ4M^e>mWTskKT3qG9{1!gkZ8yIdumT<2IY z>Vd5I`OM~A7Gx}aS0cD}NwW@Fu@6f;CC{x_Ne73jyI_@U_>fPj6~QTb?4$z+mW2j~ zneS6`=IbzAE~e>dHZ78)!PI_v&HLa=;KNLGfCPZ#W`RWiTW5dx|6i;}e=*%gnC?h| ze{`w3L{A!fu9fL>CR0s0?|=myoCxad`_s{iw$_JoQ-f%Av_0RQruzbZEYjq=gXsXETnalDLcLJZQI=!k7LAfTTI4YH? zafYV7#|OQP%&Sh8mICIe)9C{52C6ee*X9$#t8J86`<9=b^C`;Y9+N&?fE=y1w8EqycPYgKC>38vmT-|TJs;5`O6tp zzp@=P+Slj5+!QP6)>;qIS!>W)Z_wQok?4#8U;Jr~lLnK((UnOH9Ibu7m(Kzc6@P1$ z7gsg8j8$=J3|a=oXjI z6)v;1p{cCRNumv#+-d#akF6$Jx1|5CV}BeJFcI)9aRjyY{Q_B7Pl^8e zLAHL}Zq%J$to~}Mc6xE z5?Ea=UtXcOwxC;`z?|P2iqovad9;Q1l1+}rg@B-zSm9W0rs8S7A zOKf}|+aEsy?iU{z3w-e*l>Wg5z-Iq`mPl7Xq`jwnL09fOQNCgy5Y!|r+x{U za%C1S9r_v6pX2SPz1F{uT-ufG}9OL^;{zxK%CkLXuN z{;Ad98uYb&yIkESRMAuA&yQC{3*CQad&nh{4r`s#PBQKGw0Ua=Yst0M|0aV0Z&Y{gvf0PBha59QeE8wd0kuj!+bKY;o9BVm93`0;P#A71f?NA;JZWcD9s zc^-Xn!#}+Lo9hC7Br;dw0$W=K&Ef-dz#Lau3d2SST}uJ9X(3ldh+c6l&2&Wc0w$fU zJquKl>7E`3Dv6r_! z>3>}IuO@$U2>$h#bOjQWfBobCc*?(dd`7qS&EJ3GN&1Vozq+)%Fq?Bu$92om1Of9Yk))Gf4}_wm^_)wT!A4; z+Ts}cggwTJSWsDt*d77raL%t**pkfK_;)Ulf8&`rpo39J-IG;QGy2Rxa!^bA;$PTO6=9DGe%D@`BJV>mQP zuPQbK8qZjnA%gJ+ZUo=0_TxFa385gbzWurOjSu5IzVU%+ZG`Ad^q(((y~KaM&TgZU zP_312?Zm}@dvNWqKmT8TH2>6}|J8r*8(WlH2b$w)9Cu9ge{ncm<*78_Xu2YKng+f| z;~f`&um9V_YBaFpM`>}2a!;ew`rkf}45jG9(s;nzqy%cfOEAwG4n;GTb~2U6(8t7$ zGt9A-M+3`a=FDL{y%K~uOItw=JL&_I&0`E^M3ok82!JT8n%v? z4EQ&Y;baiyj755CcQ}m2sTK!29GgwraWEoHZu&WuoRzmByaXCdvI2dajIA|towb}D z`LZRm$~~b1ebB5E@$3S%z(d<$%;e7bTE+4V~KjC}6LJ8p=o#0rcWP=37QkN@$P^dBdd&Y!>k?)SgBjucAv|HB@CT+vE3 zrHYcAsibGsNrG*0%<-LRWbM~quFbKa@+^rqGP>eKrj=GTV=}F(RIchIPq5ny+E`>oWO#L{;jxkfDM8o8aw0-Fe}h4n)~*7rW@UA-Thzsyq_P%KTiE0 z9yap-fp`DR)xNzzuQO5e>#4cH+v?HTpolK-h8_Uq*IPs)LjeAnuH8 zr-Y2LE!!#a^#6zyV5(N_y}#x^=iHncV=V(pM7Gh)m=UT1ruv}Pef<4PtJ;lumYbBi ze@pI^DDS?|556(M7aBzham{)2<)I&5x8#VU?M}Y#4@NMo5d~VDqhvvDjfu!I~8gYSZr7`l#gT%-ZwxC!dRSlidY)z&!2Hx9hv_AenWz))nQ#e zOPcSz9^Qj)q5~z~{95lMne2k^yUF=A{-CDcXh-L2_&sTVuT=ly_vXLplP3#+`Y3@AHgL!SKP} zRd553ibe|7)(PDu8 zwIjt3hHU!~R1T>s_hIxJKlrIrka#1!$|cX$9{quUKFGkUngG-!_tbvyD_BX!AU~0X zOFrTV(9b@U=uh7yb^dam`>x=kL#)5x#H=?gMGT(rN1sK+jVp;M$U>=@!Tjpo6n}ow zsV8c1Ozn)5n8AI?88IhB3Zs-+FAJB-PO8k^f7iZ#4YGy zbfx_Y$CoJLjxHv~@OJJ}DXJ$+$&_D5R;Wo&hlEx*mjF`l6ulv77tybuKStGr3- zUan1%h;Hf6sP1BCKcz3l$R~12x~9yp-ySm1i=@tDuPZn=j+3d$is{8`ZGXg<>{>y! zE4^y6^^7VAF(iKdGSqNSj_$8#w3;Rv-_LLMp@2Nwj>mbWCRFM@C7ntbQ!yAGeY_;g z6hDJ853^4X<3`k20x{a_WCFoAFbADV`I$dL0tKG`m!nKYpZ&+)J|M1BBG$Md+qjRr z;^ecO?UMK)(az7#@sz4ua)0!{Aa;UY4s+M3KFNyLK>MDT@{I8NsnF7+T|6V^Nm)GB z$cUTPgK9CG84gn@eT*|0ah@~l`~1YsOuWOm&=7aBw(vZt?H;st z{3#xl)aA*3$N0yB=)C~lBv5=M49a>6lQgdPQS3A!bK+0hHGazF)l8RlM2kDvLPhcy zk(y?fXoo=OY(Wh^9e-6hgR*5r?K#JNin`p$AWGdz)-N8{dAQh?k6zv0*>elw0hA2l zoLi^o_(ev&Oe*usw< z@`&c^ja=NX2*}ws4vES`rrjm#FhU^rln!_|xk<8dFq^|Dnt!*ssB}hr6`7ltyH7AQ z(1lM^8FI6DSeiU0rtxXl?q=E8fA)np&E*%@4)S0=P%l_}^eIPtxJ00oMtLJs02^QY z(FN(L|L6G9Pn>Fg^mDd+9^+c?LS<%Cz;~D zU+D;pAAxBLjDI(QlC#;k59>uSVOo+%>^!Rdt3>hXlf`lIXmfIp`&Muu9|cpJ}ve!d7g0< zQ$Qb#V63pDH#d3a5$O#!`04YP@x79QnxY}P73hPr?|<7s=prIjS$L%MxRUoc2TYaX z3G$dIDT#5O<%d$DlZ+@Mw#UzDTB^R^f8b|eV!>c3iE=zj9L3NtFIzU?-mt{^`i1kw zExvg7e{sH^VDLq@|D2;@n4_%N$LV(d=N!2=&EMweHr0EN=q5R&g-1&kJ+>yjk%WJOY!1YghbbK6YrQPhf~wA>IlWZ_Q8m z>N%cj$8Ggw@e?c8XP@lk+v8cr^Kim-5X-Q(WPFs1vdafwUW+zxg!h=1F(d02MLnlcFZc$-J{bHMdyanB6~ABSmpBgYojCvKYSkJ=cs28!^wZz#{pAZuoPoKjTq#R zUi$|52WrOD#6um@-oWJw|XN(Ia?}} zvwIs48}RNk4)&*vjXPJ^{q{3ob}y}r6Mu4D+|Le^ds4{-37>YL8-|$N#pgIKk5zm_ zAtt?vrPty4ksj#6qq^8om|Z0bKbJ9bcF=A=`mbAH(&Ep9oRGiC317=IKVyU7qGwq$-~o zxx1Kqdb);Kv~SK>_HnNkH<6JZImLof<%emRk}l2#iG7}yWX{_K2b~EJ>qW<>T(7-! z7`rdXLZo&4KCk4QqPy-sD6zyehHgyXhytJ9AZcTzQ z@sMGC)A(>Y@0ao)ZDWhu9R20Zf4PS(NBIwOcvtKZwqK zFtJZFSbG@+#Ke@CpaMN$t)p%J9-3nKjxt_tae5q=@+k$0_H=Jbd@b;i)M(6tl7Qn` zD2zw!y&)cPvR`pu9&Hv<_7}e1Y5hEc<2~80KUmBS&X>Ux5)^;EfPx2gLtw6#MCB1Z zxEOm;LQiI*f{gHh5n|sH((@Oh&4fYmB9Z6P6^iSWD)bXbF~fK}>+(lpWQ;{=wnTAI zYV(!rm}<*A+XywGqiv4uHmFA^^2tn8JGy+|xIFNP4)k*JPVS=`^km^-%uWyAgVM@< z6jbC2RLS0xW-dM@R1DH<_-%MLcv2!6)*I zLd5$rzGT1$ylERF-t=StA7v!Tir+XX7kJ(n@w|1tcwx<>@73;?$De50=`Wbaaag~^ z5fqe~a^t7ENSF+$$pti~qFwQM_vC&GWnz)teBGt)oT2+$rQ4_4dGev)KGmO3uW=RG zXQ#&)ztewsULp;5h$AFhl>_1khadkg?)W)A6(3}uV7#I#`ipqiaeaT}AU{|P{?*!! z*1v0(M7N&kbRkTU3azEr!yB*0G&CZP`;yY}BPGp(dZhGu`tX(XjW?T{$xIb*iNSb1 zBt+Xa3_WWHa$jvz;OZ6@x*?9qRpA)xjX^lzrK`)kDAHVSIAe=!ar zJ7sY%k}6+XJFa{^{{4cJ4miax2VTlb)TMLQ4+EyXinrwz3_cx>xhBTy4aC3EwQ}!eftgTATn=d-QX?4yjN_|Jbe$uNJHgUBA8ShWe0aM zC*~?PZ6wTXtk~_Bj203ee|;{FwsGl;v^d6W%A-wOTwmyr{3I^}%Xz%mJ4gG5U+$e# zOvg9@=sch#Uf}uWsS*!2`XO?PDd~#TGa7yLg6abON6sPVSU`a4q9GN~ID35LN)j;c zH_4oL?CViRIFy=?us71DCLo9SU&xFip4DycQ+MlEG44MBRrACT4bCr`wyCEu{uVRR zsV|o{7ZNQuIr0viV|?WG$4*3_}$5Tz*`?3%ue%64!b6 zXtU?waIMtsoSW;HnimopO8Z%A^i2v$sdSkZhdl?SIaNjqttpry@KP=5L$%}GZR6{f zL*)|oQrs_?dX@^~crqssmTMbORfa50R8UPM?evoAHslp&eEdn5{udG-0`*{*9~crD z0fCn}7!o3X(cbo`x_M@4jQ$qsd-=>%ImF%O&J|bcpFaP$JGMSZinvvdSq=B2QBJwl z&&7{@Yxv2x+8$$u-UaqM{==uB7dGz#W!NCADvK=?L!NA$*lH=w)YZ8OCc)y;Tcw=qYDPQ2#2Jk46o-QaB&hU-|R~Tmxmy z*4I|vtdcqMG*C{L+Y?3?cP=~o^49kJ;r@V$FS(_5&aM&1HZtKVzqP!zL&LdtDC>$D zg7K%Z6K3o{Xkv!CB*ZHRIs{8m|Z zXaws@v_m+7CZ=k_89ZN&MtAqba&o1xv3SD?F@`hi&!I1h_U~bIb+UhLJUO=ZFJwoa z^7h0NirF!~{)1s`b4Qxr;*i^5$ZKHoBJON|zi;=Al`D(nluvMq(I4_b&MR^6RXiiF zuE#4^%FallCsd@lV4xYz3IbUXfYC8Y^9AUMM~T@|;w}gW^j%Z0$`fuJ0V}vFwkBN;1=LzZGO`tTP^x-SiWkQrBRF&hg;tcxui5Sj* zC}~&Bq~go_j2MxlRo*5d&68~GjJWQihr3D;D^Jj`@V{|hmncsV7oVSS(ZL?JW489g zZ|BVJFIgcjgfixgkDdJu*~dQV)H7^<_$|Zt?cBc(xY9Z9?DC7o-zo%6nY z4f@9!azQy!Dbt13cj%hKv(0A-QQryuI}UQhfN%~jxKU@^p%zE}Lp;otbL1_5qpffd zKQSKUoGs3{CHi-F@(2G(*K4#lNC!*EZxfVp5*kh#O;c?Gt{p+ld&yv*k}hA0akKop zUom})W8R&xYb(Ec$wBR-J;@n#4-cNU<4+jok{CBd`4^YmysHb_064@Jhv$e7jV#aX zk9h%B-ux&1T4!g!Zeu ztGy^3s`fp_@kBt@N<6`P13t=QUIhl<}ZR7jrNp*cLS_}6nQ4Oypzht zndkD-P9OaLg+RW4es_{hHpZ|E%(y#L8 z#k`TzaVa5dJYSaq*Q$xA#z#;Qx*gBdc>j^*P8;fph_X14b<7E3upf@p^JbCM#V6*; z!yI$1oZL4&Ot-kFQ0MB9cNlZlcs2;B)76Z|W51IeOui0(p~QS7-*e*z`h3b$cP~5R zS$+b{k)L4qTd2XcxI*U4Ev{hlI{4u^y+X&g!Eg43%~?D7H9ve>jXbBLUxTt3{UXg@ z)Il*~FlSH2MDb_8D_1ag?}pe~bCa?7drAr))c)BQiI>isda7N{>Eu1(6E7kK$}5iz z*fAlUix{hal})Sh1lOjWr|r&%6vDdvx|x5jg0%IYdNa-~UQXy}v?q@CT%cBNg@Zkw z*B5x%^$!pFn7^8p^<~^B{zrU?ZK$GUt@ zNE}9beN_lTlx_(qGZ(3|^gDN9Hb`!H2#erk)QKplkPm08_R__j?up%tJzitem7YO!>^{M2q~K^#nv4W zB7>yiuX4Vo3!WcRsu%^H&Xh^1$&Grf))fV>28zbsvrAdplvGJ6Oyh?YtT$Z$ zS~%K&<0f(A3Uxp=)c#`(_oKojn-Ei!p&mzB^90KBWo}|TmnZwclw0(bkDR;f_^%x9 z@>&d7kB#Azkq)U|FA7x>s{4nXif7BMxf3wxO0L_*@=!%j)qbkBdiw#*OQ4D(&dLXB zycW)X0xUal!UFh^&ba^Q%;3V5~zjB6rx6$2y z8`|+)8=q(=vvz%2G77+e+>AWq-@|4PN|OXU4)o)(m^+^$Hk$qL`pQ-E2d7w?VoOdO zF4PT2Ihs^uA6G~H@LQgzwI^TC`M3igc{ron!ngbRH{QmPtH14BfW?~vEE=wvO zSXVwJO?zEHLpjL_Z_+1jO3$m|Q4j~?bA;0POYJ}rwU<0?3206TbbQ8cV`@?s)b}Jd zO8Qw|gu2~@wKj2GyvB?fP~}W?&Pq`J;zJ=sCkv~eF$AonL^+ftcDCy+RyuIYCREIa7@X`2E)MP=GF(oTb_3s2E28*FR1a4+MxYx{ zkb*#Go|8ULh>>L06-y^Acb;iUZ+>J2gGIIK3komIJg=PjlXliNE>=lJR3qSqI3zVi zl*)p>+&&F*rL=hH5U+LV8X!7<@?v%ZVyYWPxUQLQ@8VCzh5-1hm`PH6cpI`>(m#j`(h2l@$YG}mHoN@Lk@x%t#J?>)HSHQ z_DHdnRk4=ueB~=$dAj^&m){~19TCaz_u3y_M}Aj$$n{BJa{U&U4I>gIUViMY_z5ij zRoZ-ai_f3@@RPsZpM2|+Z+MR1Ie*4Gez=t7N~Gm=V))$R8oS50!Qaj+nFb!8n^3XV zC#t04!7N!9$1z|rXp}=-qYqBJ^(!GVBQ;bq1CMJdsZqC=kRuX5e_z(WM4adm@qMVQ z->phA$j{4Mlw!qCoF2?6JNnf;i(i9nytMg~Rh)a!y>gx9=r@aRTeQ}vZ%LS?#gV7e zsF9Wy@AILN{(#xx*|fDtMk6a<3fQP4uCfVLRnQjQpl&gj#+t~zcDqr4PpJbRL6TLHyuS^uIymrNxR5PvtEgq7FiQlHy+>)KI{+l5{}hpOowDws|Wsp3jf27+d+9A~_6 z{dqCf|d8Hny;dvkTD}xj-B9C?K|STuarxqz$GeCv<^vx+ z8c8E7*yQN%4RPe&DTuOhWVDz2BOmsZ*RtY2_UC18wIeU`n+Jcl9Ae@ba?xXt)WPT6 z$SL9Zm=akCHGlD}EUhHJ)(ZJ1S}G~0O*-3PKK2;K%;*VXo&LB8lICNzylDdTnQ138 zsz>+Ez&Al2#5INFd8LbwP3}r>dpig1-1F2~#UA(P13pwBzv$L@V_+5^+R-82@I;XR zm*$r^d2RKKKIOjt#K&cJai&mSO)y?C3`&PVPr|??S?Au(+bX9NP#)|X*`eT`Lm~;BD#jByyBFo`{#_8-{`Z0F|BQH znzc~cBi*An?aGuv8&gVxu$%0$9YxMj6^nSwkrx^BE|=6yVgXLTk(`&hBTBfWmHLXi zRuEMzxXJI6RQgmEoOATMV2ysi0r?JWKk(!R{(oqHUi<#H*qha%7OVZiHWrt8pE!3X zdkog)ntKVG*7=rb-bJ?FO_LOg`Q#%W_XkO&O^~$AT zlsG9hx{oo2jy%~p@BYl^eAqcc<&%sUi%;^=sHGJ1wwTRBU(sQFD39#eev6OxVhIaHjGg&I8mX^a3BSHW6jFk*AKIAKZx@_ja-jDieS% zQj)$&)<3@MA8Y{aoz1RtJKBxc57rMr`Ig6MFI)SOY)2#T9e9{PV(~)*1Ab^_urJLq z2G+}3_Q!M3$A6EX9^*Ra{MBtqZmMGGReyUY(J~OQ51GbXmWC!<_28vtP8P}h7*Cyx z%=r|l{zM;wtK6sB`yTuc@8wOa46z%MO8)kq6Vjn=EYI=N+HJ4qMayW_*ti1P0em)^ zVE)I4A=bW2O+o&NM-KMxF@~}@)sZ;IrH;7Ukf)y?IZ0>x_~grMNvFKu@^Ob441d~E z!{>EuIW=fw1lwZCNzNByzzZbhoTL*1D#kO;MHgqp7)hWgKyl_nSxG?t;KT07mn(fG zF%NYvG}ZuWmY9?2f%Zk(7F-uI))^m`aVo6sIdBLe-RrL^7ob|5lRcy0G6KdI3)NUz zq2u7-`4-+UX z@30wgm=5b2#TT)wbL;}=s20_P{%~H-vB4pS$dtati!W{3E%FdCFVXJDOP<}2-_u8F zd;s6Cq?a$^3;AH@+{B7MdE2sq>ub^EvSIKiGf=#qqTLl zGb6ALGR2<)E-&+R2A?O3*)Y!^Bma1AEx*&@Cp^N&3I7ET-52SRo7ADLg|WgD&J#b8 zQ+_q_C(nENBL6nSwJHW4(tnpZs17z>Rjy!>&XLbyxqzL-+`@qH+ynDzxrdxQ?0<_z zRr&$cGK9L4A}zKtah4)=Gte>CgIN>O5GWo@c*ZR*_~jVu*Tsr{G&ND}ZLc|UcuV?c z9Lq>GdMU&@ZDiaRD?e=)U^xl-gj%qggT)OWTa*tR<%9f(DCztyx5+Q$!p^w;OQlrLP*o2tL%13`n zDV3_Xm$lBHDRLQ99FkO~MD6i4q3Oh1FV`DlctA59(DeSLFvXV#x#v+w7CQ_+a=4vc zeL>b>-JM{_U*qJ+SM5c|Ef3}voCQxMru<)-ShuIex`sY}KY;W1 zm&aT{%RPUKtK!19ajCTROq74jyS=CbfVM(;^hM9M=lHkUC{4~~Rz6O*ZUBnMImC~i zmYDYwLbZPc$DP{xQ(jw!#3aPnxoi=K#koW>sO42&n-b@l=+FVpInA8C+?RELDnWD4 zPr;`sGqnr?8v0Vd2@bf&%0Q$_(zF$8Ps%9O9_W9#i1d29R%%0)&V_0`oS%gu4s3FQ z^#^U9$8YmH_%-oizy+yM%&~9&pdN{ z zRSST10d+~gUZa$qR(vetW_lsU=rN1@A`zAF{j z`Xr|Cx7rt#Lz5Kkh_)7F|HadWSzCb=Umkzr2T@KYMyz1hBA!^Fzsijp{4+oN$dw%G zPz<;Mc^Jnvyn2N9lIj&-a5RFZoa`T*Uhb z`U}T+|A}jSQTwR;f-`T!_T?F>i&$tq?UC<~dNp9~bCBf^RJW37j&*P5%xGI@YD#}{ z3Dx@BXRkR?`HZDXB2EsflspB@S!cwUN}70vdWu#>M2P(0q)92yv9JxsH<5=y4%++1 zAU*b*4*3Ai4<Y}|m$9S_nxu9DBC3KEi%AMb-p^o&6TMZ%?muAKD6h?^Gvo>V z5bEIXzj(^dTW@3P2kT=Dxy2`6`cIb@FA^3i9t`5FdZ_boDIZ;Wf%m))7(uZm7d2jz zZ--HMl$@4xP?ldYrG~px5Z{+pFA^nx{n9>xhdotx-(&vSrCcgc`|ZyT;FOnR z>l2=NU5}cH>0>*5(L8mw!S32g#!-XuKb4JWZrFh8f#igpEqu2|tbJ%ay&e4fH!2ng$C=0`mB zjD`Hf6zg*LNaPfhW-mIQaWe@szGp(oThTxEn%&HcTmSK%PyYI4wE%UEZ!81rQrkD4%*Ki?BY5|Wk#!f z?xk`H26;~k|BJ!IL!RF+PR6s*>+f~{ioewTBg+JuFxGpluQG<`m@G{2fu(dktgn#9 z)4$7a%>OO>b3M08EkEZ{2c1si8up??PfHi9f(K8_FQ4*)r?Ql3lhO_crWgeAs^L7B z|1c6794E^!F*rZo&$|6fp8bVQ{crLMSE=(S$NsZ@k(V_w5->>nS`#(o#Yi*MU*8xg zur4Ew@~xi&8|)_J0iX{lQG3gviHFA@tuN#nmdDz+ z#g1S-i|&$pJK*`pdb0zM{G#s6U-Q_~rhFvjlwo~IBEJ$^etJqXo8R=23#=JIRYR%L zg;0*f{B{p?09ezE7goX&y6@f8KpL0qx4^oWs?;+K=LYu80F_TCcf{ zedZqC5cgEtKi0*T#N0MaYs?|V`e;4{J;nWsd|P;$U>**$a^}RpJ>x8LoK(`*fr+hM zYR`Gbt9*AeV^Dldh>Urc<1!Kxf8C9~T>{6vI9sC~_m1XHUio){DF~@oS9~EIuzvY| z?pgg51L6YkgmHFw$NC6sg>~z@D9M}zhPw4n&|$4%^;0ty&u{wAdUY4}7#m`aLo6<_ zx!1O~#7+EDXJc#cV=OOcA?JUo6?(d__EUUfD^Zxcfpv6WuYRk0XG*N=f4T7bd>3_( zzUYgT*mwfp33tB=ke`fuE-O~`t6FizYre%iWSgUWRTvjb2YNQwMi-PQGC(M$#@8B#k2q=y2pQg?3IJV#2CCvhgQ{^(!QDpasLa zQlJt#JRRhx{sNExNA(x}e{Fq*j{kZ6FZC5x&o}y_zJgs_#G{(zT@AT-q*Du1{d{h7 zGdj*sHFqd!o-Jm5kyD#oaNd8*v-`b{!cb?&^5Jd0gnK-Xu|Dz@w!iZgj(g{a|C_}F z?^Yi<@Z0kD`XvLe;Z&1o7lK?6RpdU6U!!W{{aL5vz6Y&v9ILx;f7E}M_|)i?FF}XP zosx8hoY{ZWCy`F#8P3$9kd-?nrm_6eC*-W_W5d}f_Kdf zv{V8bn+pN+A|R$%e|~qEx3C|B)(xqo5Gp2|Nd-@Cm+Feol*-92jT3@$+F9WkjE77F6&RCJwIG4k!$_KfA2fiM;l`CYRF+luHU|0 z;$HD6pRJ)3#K?tTxfE>?*jCRBxdRLL7Bub;IT?6Dt4Qc3ncDnGMW>5N0$L`kLqky} zLFGP9jhs(a6OfL(R4clMz0Gl)R?_E6KF^>eKfEA;&kT^WOb4t38O{+i7EeFd%O~`$ zSDF*b%sVA8f1_D5p}@&X`@3n?gw&XjR{IccB%5yAwEzd-+jAG#^(b*Y9&H>c9sD1w z>q)UV<~H{ivAqlVWAWo-?T>PZA^q@BPxbvzHDdm5@3Ila=DcUWxjc=1Xj4)4iKttd1JGm1c`^c-c=fA-IPoCotBazH#J#m9h8$aUw| zP_r;ob9KuOHtJhpWqEBrpacoSkfH+TS zapkGlCn@AYO$Hp^$~A-aq&F6aAD+iB|Lu7U$9fZ|n%TcUI|KciQ~c*#GkW)D?yt>% zylj6se`58B(+oD9o#)WM-+1X~u*O=pT^rI$%>%**U8q`~<;|36sn9>8`UPTTk|ft5 zm?rdzR2Z3QoZf6ns&T1smEG<)Uu171W8EOWBb;Tz(c zwyu-K_j!KisGoR!m-P#}eBh{P0kYFG+O;|*<^hc-W}Jnjh_9Da^f7s@_RmDMqi{Ut zKC~3nWJ)WUa4tWX=GFg~&)aZ}n8MupDGU3%`j35es>^q@_whfc!#n}tLp_ZW9}4eV ze@=;(6Rb5w!HEme7AITT6Q4O8>lZ&_{fL>y`Vl}B@>_9_STbT8Y5`9m+4JoUGX)vqMm%7u4Q%a#9I##=r77x`oIF;X6Z7kR(aE(?CL%We z!vm_+T+;dOwvvT_R$?tA&`kguH|9`(e-(gIe8>Y)$EVC=Oo(XM4w(W|_VZ}^4C{Gt z4H?>B+VDMW8mb`Lpv^fm$$>z@r;xr7zvVBG-H}-$8q8o=6I+&QZ7y8*mf%$x=_a;`l|6-Y&JecbPtK(^@?mpPqneEN!j(dA`f8Pm* zmt_{EVahu>Z_=41T-IIIi)6pxUA`BK{h}kqqu8nCS5-#hTCVQ8N_hbRUsNV?%j=>JZsdDh-_S(Lf7^IF&eAXlotIg?)WLJRTlAaH;D*0Wdi@d#xqs^G ztJBl(?0N+6xA(K)nH>&efC>8rfBVNq_p`|cYL4-$VbS#Y+Apv*6!5`!XiG@@2)%!3`%^e?MS{`CT&& zE#8MK*dy7J8(89VBsgvo*5E!_V?0CP!|X^5Qyd{v;C+wo@jbI3n}QhF(*CX@8o$>L zMT@Xb^fNY+2J7Y9B31_RwRki_asebbSyW;wpR~^h0Vwc)zJdq*Jl3qi`uYol#SZ{K z97E6!_wfJw$O_wg7@uMq{oV@i7vlWT2KB$jN%=5rj{HTtULTv0 zXUGHSo6-Gj)I!aV>-gtC1!&vgjM#Iw-_vjWSQ=XE5EUSC0Xkc&>9>FA`LJ&eti8Lq zZ|<$d8aqd!ktWuogN7yMO-^$SKxZo*17Lh$7aunVKi$mEqV0sue~$+kl5Fkf`2t%k z0;b?kU=MVU1 zM*)0ldIQt>AK!n0Us`+g`2EpnJDSW!-?OXn=-1sS`F6X}!|Z#0dz@E&kokWal_ysW)Dpuq-Wbv-p8f}#Zy+3!-%FdlERJ9x6+C$~|t(F(k&VLaJ> zK@P>s%X;mJbqoAsxoEa65ViCc>t#lomaLg{f_1b3e-Tk4WCT1|ibh7@t1&5BD7WeZ zq;%v)W_S;wqYZe`aZaIc^*Z8y(Ma8b#XHssO+f$n$ThcwAeiuSWXQrsUiLTvxTOW!#Jnvdg>A&HJ*!H)JAz^z5W1F zToZmAXBb5?U>YCqB;V!6KC|bzzuNXMX92+J& zr4t?);@r+cEh*o~UaZ5Kf3`4bTZH*Hq$zUs*%D~+_p#D8bCS^R8GPL*bh!bgoOinq zWd!QDf6|CzW$$3ug4oJPbMU62y!H&w6D{SNmJy`Vk=tkkS;Rl{WyJZKr|TesWSu2$ zf3$&l#6Qy||D2z$dA-2v^W*hAia78lSu7)-@dnpb(3bFzLfG6Sswnl93!C3353 zB3f|qOeNBuA*qX~EIj0XVohW;gZFk`bLoTzmR=`7f3&jw@Orx@OPnu}wVy31+g9;K z*k2ou_8nCDN3?-;6md3O-?yz&!}Y%JNNTT_QN+FBy3@9*wAcN*?6fYdb|YW#Uc7A;e`p+^tosf!!0~OPh*L>%e5!3#Y_A7d-w|rB@1lr{ z;X2N5VXud@??`X2zeN$3!*!hB#$JzVqh}G2f?k`bH@b+RC@4YQL<&B&x);x0OWk+W zr7`VI)PI=>T<2q9&`)0thmE7iE^5#WkjG5e=l~e3Yt_? z(%SWj>x(_8TrbT-1L=c_vv>6E7wJNqW!l!ZZEv83$;W@SeL{^+Hp93RyVsBH^Tody z$J9Xf^>4;GTnF;t3|0=|vpRO$xgsLlSJ4KpBMKAA%YQ%q2tZwPlrJ|CbyqWbZKl-d z8P4;+)f3bJPjIbRanPSFe``>Nf~a#H)x>WB@a)nc--UHkZqq)gq^N3?X)W-r`j9Rt zz7Q1Gb>FrC%a&Mu%dJk#hwrq?fck()^`9?w?QSK7NxjA2YybJ348GDn@#Qtz7R6S5 z@ECY#s1tw42%j#ZQLm=UDCQD!?cK#Z(D}nM53Js#4eI8^mrjXKe?ahx2t2F5|F*|a zH}@wyCS`@Ewk<4Ju}sx60F*T$?+0H*&$WdOST|xi0X9!(agPlVfc?S&tqV44Makc!dKNZTN zNfZHy4_KjW!E;Ble?mv6^*k8dH}{uqTG($@jRB)#o8DpHl4?q?&8+TmuNu)Ow1xNu z^~$hTo&k{-Q4JHcHSzPGYdE+03+k*>TT(9#$riz&mm2#(_5!0L$G!Eh&w2bMd(bnv zcTw+08yNlG?>8pt?Q!jitp>FI_}d)DKYNfvi!wg7`~1NOeZw;5o`y)%Rvvw|}-(-Z=P2 z+m!ZZ+JA*O9(!K)I5+=kJYR-1@mI@G#uIUz!{Pj$*8SQuj3)H7Pg?k7VIS)mg)e!i ze|{fzsa7xQf8*jF|En{Ma_3*36?;CUd=K%-ZGD8$!40Ge=S!kZGVR;S?!^c^vuX)>Qe*9eG>*MF~(7wmlYG`j?=RCKXStdwBIJ-9Rd7qnc==OA1&wDz8}sg ze&@?5Tqi$9d-&Y`5pCl6x&8Rqp8T{g@c#E<|06pd?cR1iT5jclYx|Ua8HM}CZ-#&N zcZ6cwC$uT&=eBoI#K})vgMB%-eYSmfFVD=Cy?6a(lpQPjYH$_a5rXYC9on~CencBe z?7dHIrNn#xw8cKp&uzKAZ+|h5GRJ$5PV-V>zuDwCFVr8}ALm4By!W|njs2dV+lICu z=eBLz>UQuuBk;UC$w4Oqu^H4^%Upkcv{1dY5I4DB|JaV11Bg}ih3c69o@93SiS240CN&>BPxg?*pH2IB%=j*za#dg1mx zb%Fl3ewQO|fAPB-e>cMRg(4PzR`|SUkHZ?s3wRDZDZI{Mf$xQel_7sQ_znKv3-~3J zn!F+CzgDCNA@T1k_&IEVRM?+XlW*``#Q8njPmTS{G2Yx`|6}YY+aLS!U;G{FK(4WW zB|Iw9;`d7ZzyAKx&c7xH@EX{dVLoC7UpQ}5#rtj)d9m|^`N+b~Yk_s;7k0dW?{9yO<93Go!IzDuXp-A` z!{4EBv9E`7x-n#D_lFtw{Tj#VHBjQd@eF=%dpyU7bN_t%zB&Gm9>z2DFE=E>pq|9| z*wC-9fBY{0=Y8#d5yi02{TQ#?_7aADY4N}3JP z75M8C^$xXBNQ0r&DZm6(a%$u923Y~Q6+Kq(*`;i8=#PIGUG#CDsm*uYVjMWYI4&(Q zzWIisy3m~Byzq_ZK3349{73-CzzTd$xr=eqDb_P#24)JbFkci)L}HzBh555Q$93>6 zNkEYhc#HFw0C#a76s}LM$sG6nLXjoLpO-+^Aoj2{qy*u>N|R@+w2`ZzV%TcRAaP-A zIEO@oi_d=;u#d@P8|>7LVrxhy1ar_q?Flvb?01JS{eRl~w&uufEY0`&73hrVmb4a_ zykAn6?Q&K5GTpMJ{E{R6u{Vd4NJ zli($pl*@i#>^nV5pzQa5{`5Wh;o*L{{^}d@ z`Q3l(@4omdD`~#}=I!0*%O9d`@aJzMk@lr}^H+W!<LLmg ziZ?V|Z`eXp^b1BG7!H$wCTyXKZ#hi@numYMdO@}?2w7QxOE1%w1rf#vET;T}aV(CNFPpr8fAzT75;LaGjHE(^APQCwCGLNa%m5|5;A% z*jDaBXnUuAl{ideu>C-DiU}2dF0n5Byw6nqJj|F}adrL-3$L2osx6WZQ8?J(kTvKD z;X-yU`|&a42aG;U2EF`Z*ca}juwa-y?Lj^wSwR=o!$k|57+FU2f`8(OJf>T=ScMNX zXc5e7OcO=(bSIZ+<@+>>F1mksdrxx~`Xs_&)5&6)GM2_Nq3a?eiHzdafT4CN_SBUL z^g?i>u#ySLZVyBP@71oCDNmk8^P(MS|2`ZOW>D|}D+pa-Lfps!~S{W`g* z^Y`SJ>BAE*gF{B+VgUmWG_KvHtB5{azy}UO7>Pk&ej07#{phd(TIGLus;a-0()Sj; z2@@!H1o@^pRMlegP_|O-XfrOhS+QL{@%fBscSKhWS6}7CZ}euPoKF)8O`s%F;1l!l zzchTI#?nWMGzx=@K9jiUQ$j$B4HT(0A%6iw8@%Lev#c|~h9F`8Bbj zeiSVL^Vm<38ajh%%3;IyBa+pB9HQozJR(`&1gO<9K;T*llZ56HopxHjPkZXj)LPV5 z;X{j7GbOK54!5i!#FOy7!$LO&1k>s-F089`XTfS3AGaE&0X_Is&12C4vl<4f z%_Nf?Xxk0ileF!OQFfY?r)k?AqwPwx-AO9(#)x|j;)Y^QbRBinchm-bQ_;*x z(re?ssY&!b=ScbWG1u3BJFc&|M)fm=hd#zbpAIkj7zF4&5TM!Rk28gdImSfB0^?aa zvc{OmpkPg*ZI98mWiWe|mYgvr&UEC_$IwCVK?idRZEuXW_blYmN3lWgVgtvXXJ}p< z_hGFCLDSe6V@P3ik;2hT-L)LeG^X&-$9U)x8d(*WDLjlZ9>#QkG&073!sr2tN<+Fg zg^4xBLa*Cf!gtZDsXj-iR!MH5@MrV!W1h)XcyOrmX!(KaNAnnv6lBW_N| zCUXon%pTa#rYYQi9-}Vf#Wdp17;)!0QzLT>9?T9r9K9AZ$56rSK?Tj8xV{l+~;lOExZjR?+j`1)h$fcIxB{+YtW8Drx7qBWW|1{PKiSWL2iRd0;8C(-uwNzSH@dYcRu zhH92Slh}+qwI;D~>}hQ5G3c;+pkoSYeT=k>7*j|aW29xEm_phdBQ4{@6w=lh=_V{F z-W1aI7-@St;@D$gVE2H5K83bBMq9>!X_oDc5tl*0(I@rgAr!FtP(XF2`Q^A{%IQ((BSky!?C#Ij3R^6MFx&M&(OR!?!j6MfTne@ zJBAZ(7bgtU(Z$ac9{Ly${g+^oI|dkT7cflMo-}WNcE zV~F8)5ySSTO*q{##Be)^F&UNIG3;==*fE8&H%8fe4l22$z~S}*hiR(%lm)Om3LI`1 zH~^oft+Tx`;PAS@p}U@8X|APtQ+ViOJoH}zPTm-Bcs;TfWx$P z5B@iQrm(QaSV*{G>8d-$?YuGg@Vf9}Ij*A`@c$GZ&KM8pmq3&^1|MDrJ|;tyHwGkL z7f2>Ko;vDzGK}b|svDZ=YStt+;|?yvh-r8(a5?(OhB|tjO6>s>Q&U}4HICg5DrxAW zT=W)P>7K2c$NFGN!x-fvfr()_mZqzgIf;pXIm$#r6T`I}-BoSPnZ(5!aCT2{D!Qu~mTJ2CG%n^S7a3f1%`y#Bb2)6{HW=&omL5}Z4{k7wXf9hiyh=~;9_#jZ;2tLz6Dm$jO2u+10A)$J0{%?Dyfh>Cy#y_ z6*N11ou;bbr{%pf>>s1AZndblntYk7Qz`GQ#IJ?yFO=M`Mfwok;(qmwEM z3ueuTM+`#k?`rZ}bLTC8mIn}SnLf05NfY?y{n!xodPeUGkLuSmD-z}-MOryTXwrv6B>jw%9zfufpT zqg_0QrCB?@r@gYEifYK`ynt0p1??jA=L9F)xa0b#GvudR=($sWUh>V)$yezanw`xq z%5)qieokX3y)Uu1k~qRg(2nEiAmu8*G1ldH9c}4T*Kzy+-l)P#3!Hds)DLh(mS7M7 z1&1rQoX;nbe`=|}MW1k6KD!tir)wUOR8m@tATfUrg#JGNOIp0tcT)%j62{o@@Aaogo_b3dm zW+cmI$d5=-T+Pb)3sz^>=<+uhqdEcY!YD#ZigF6D9H2CRK_tsi%w-4=#z3?rTT+8J z3n2b_fjr%zASXMTqiDSlPdLqJG845Bx(t(S%eX)+U9TgGMX4ErS1?`8{0;T*mgz$+ zYDq*Co*|x312n&WQBef>i!h)|lGozN0+uwO5Zd6v&nZnn!20<;wp2*qQz>klwYUgf zatDK#MRg>9CV5DN|6R@eC@p9uMqBuDJu$n!ob%!h>b+|8(+Dls7~U3vbyY=tO2^kK zsFDz6kJv=n18&NCs9X38D9Y&sf_F@Xt4F+~YzOU+m)JxxYY{`}a--MOQoeqQ0u)D3 z?p2z=+olNkOmR8i=yiuQx<%3of?0W*H2P>%!zJ&3B}>qfC3w9_3&uN+PhMKWB6JCp z>4!K!{Z_nj=!muhKP3!! zo58xnWDV{6G8kxoLD~2FVy9*Gvo?%C%&T%GmTwcEFgm+_dJ_C9h5xAjq?A)S^Z~|y zGx205=$oQT)&WISI)S$tfxbjC?Fb%RJo`G(Ra&l=>vP zWob^fviwJ;V>9m924s^M;85 z%_T27aJJ+#TGmQG3AexLtCG(;p%v%@Xw4brg}9D1Md=AU)wPqbe<@K&LFt5T84r z^}&79RGSvsHWdM+;p%GE#`0^VHYR6j7Nk4yH1U9G5_G#8#GJ)xPG>_C7=$i=%N$0Q z@ZwLfpBc(iunv(U8GDSi8CnEk0YTQ_Y8Dos(}d=Pg~_^a0`V(bKHKD!;vi)Ou0q;J zPHie@0K@md@C~BDuWI&y4ftKBK$?NnSjE=IEZ3cU^iDq@QFO+kF0PFtrpdOir?-*Z z%PnK7@tW7KCh?l8qCADbRscbN7W5wfsQrX?*wRskbyRFfknZ}rJ__rh7N|!l3HU%O zjm>AX>q~O|y3D_v6If@2{nVPHCCzg%hMED5D5HbqdF~ML%lS5H14(PJ-h0X36&e$X zO$sxqRk(f~k|`ZG4%fAT=1MnRenTlh7nZ!_Z(?;S` zK(>l(#q@L}=kWh9&$+RnCUS|+^Ypr|hD=^lMRE=xf0@(kI1Pt(JR50>VH!SGEZxDH z;&|9lRNJZ^Eg%zNMfFU@&^)5JjsgElK5!G;QY<~fitg!H(QF@Uifv-W^>l2&5h%q| zb*w9viQq$v6wUIWaGR8WV(EyJi!g8kjy{*7mJD&pkczliQB|r%2sH_5Hn8%!I0yt& zJXJ#=S1Atvj7s6HcKcA>zDfZ;NP`;El#fDBPzv|xNzlQZknX`vZJ!I{dWH}NgO<1~ zSZIk$gM0J@%@b_O-^-h)CU{weyo zM^m_N;rkftiftH*?ixOrBvxDl^knL8^=Lb0sV%N7jnmO;YGBc}2Pv9nfyOj$S#aAE zE2hRNC?<5M=9>J|5(?w`>QVzbHmAw0#^pK#Q9!)CbiAbPn0o{4(!yLOAzviSOY=dd zQaEm9n2RLD;~LX{gmHDL?b~VBPUiZ!k_&UW$|s>TytlD_a=H+)>M7gDAR34WG(bcy zo+*TkDm&9&_UX+tE#xYm(J&&PcOtHP7MHpc67G$%haT7^G8MydxNG9lH$gNZBZy{; z4i!R1wYl}%NZHu;M}{Y=*+Dms#Wpmih`*#gjUt+(ut0u)nuRn#t2~Vnj3mWkJ`YG( zJZ?k9Pvg0t##x%sgcWmM9q+5pyD=~+TG=x%-G31qLEZ5WC>%fk{EwezW+bAFQT-Wn zOV~bqDJ^f1y6FU=?k(6^jNtuK4mK6d(4-O&zEUGYu?!0k*|6os5f5@{s-oK(($p_B zOX*?I4Yd(}163HOE1Irh#dW-wbt3#T6xB3iuxAZ9ilMvMQ2;ev-Q%C`A8q6)n(My5 zF}ss!wKM5ef%pyJoOO@QFFZxJ+z6YBs#(Z<&y{tr>5A!D?_(8-r|&i6{e*dPXX-X# z;#8E(p`T78Zg$(OdMc`&^*RDVklEMtfdvg;wGS)+$^fH4v@j9S zR`fd{->c~rNNm>RBnZ>~h9)GYB^O+o$m~!bwJ{QXgidR2&~W+#3~qyLFcZebWDO+sQv z04>nT3HtEi*w}nrtif?AKC?z7Y@4;)RF=Yj%k6R*(HCv2;wxSr?q7xLwhb-fV$Iog zGVbUj&rrgklNT0vT`!Yci{^G&8iWjQ3!3A&SYwg|IHzT02y6x+`>vq*O|gEN1mD&< z9Wwh$n2Wssep9Sr*HhNQ5j#K`O<-djou4Iar?m%_kgZunnw+Nazm1v6?fP z&^?aR00tP|*iKjf{*FGv*r)J@DfEGV_)N)i*fU@-E+oN?;CiqchrxE`iXr;s3Z49Y zaw1b0iknG=IN}#;%tAA>rFesf7hP zL7yN=F-pOi*Dl#shMYYF3q2=07>%=tFxoiLunO0IppRe#Kb}A~c;YsHrzhv=gr(WL zyXql>hd4~i2lDWarFS$bPJZg_2_I=DHS>>XCqLFif0|$`^GYwBt@O8Fzy5l`#~Rp) zLIsJl2<9>TAR`URZY$1F7Ew~b0X;se;mZ#cCM-okxLQ#Nzo1$5x1(mL5P$Edb9@F^ z-j;t02z|{qG#4!4cjdl+?HLw@^ZG@7SfS*Bst0jCN!Ug(yOSiOl^Jv)K%4McZW7a~eQuN}Z z;#EcTcq{C-xTmcn4jeot1d6=NNwVfXT&K_n5%}c({FuRpvT)^p2j;)a@^nd-VHC2* zDhW2A3&5(5ga6lb@?l5sU$+al8urdV(S$@Sq{V+;&+Ll$QM_`*26LKG!rn!sVDA#J zrZOSp(s29A;Lvp9(`MQ#r$29L;y*SMgZ%LwT-FS;D5QCn0_v)sNff0!8oU!T)GD(i7TjmSFN1r3xEvF-C&>R<%(o@qN3q=_SGq7mEZ0+E@tWTE*^k(Q3hQGHX%ap1 z^+ExD5YT&AiqLbkEyS@!@rQ#EK>>|Z=?p0kpV49;M5+HWanBHN_{2s)m)rH1oYQL> zr~KT=+3fn1r=KB_)0JTG5Kix=!vrPvNH1J$;9|i(p);U=ZqnUbKI8lo&2OonCP9Cs zwt1{ooQl8Qz-c&M7iD~o0=2tGS%+IZX)#e-$E$4t;ZV4T98(4(dN=kmxB@}qzR>Kt zr1H17Uw>73>gi``E+(XaH0LO%D-jgAtQRBoU%8v1O-@&0_YihN*CE?%mwYpHy{7q^ zBG{2|${M$D#U*Eh@;$VgtTrX!=vvH$vtGdP2YZd7H9}ILy<6 z5hlJZkak{YB=52+RaG=NLESbwpX+{Tg*C9sIu4D0djoW>lb1t5Ix$R$!n3{zOdmAx zi%QK{#_alANCLYl2ed;$oj#KMCdtbrcpVl#$pe}AE66;KVly<=rG889sZ1JDjtdf1 zk5G>*yAd!F3%qJF0|(2g0BcgzNG#&4Wk>91K8;P8G88*&+`wvI2snYbYwMiEj1x;GL8m9jN4$?qVJ*|~bqELHCp9~SRJ z`$T)~uFVi!-|j$j9ti_&S@u=AV-2j}g&Q9w*xWqw$)j=nTeUg9+eg`U*yS;{Xua5B z&mM>>%c2ejV3zs(2)h?Mr!m7iit}b@w55=LN*%BGjpidA3&_e8Z;Ca{2yVB_IAncJ z)SX~9dkP}Eohc=Z%?OUULSMs~W#0MKcG1Ar5Fe+@1L4KGzGHG#^?N-UmUPSC{7iKH zB@X!fxqT-Egc6genmagkY4>eO?za@zd1df=QKZ}4r;=>Is#$i3B@1J|9t>BACOnjX ze&F1E_h8W`PF=)lydXOcXA4$0r9^)wyf%#NsX&2O>f^(rqH1_eu|rVvAL{;-!X(e2LzBT|p_j4_HtrLbwK;w3#6c|{Otk*O6GJQj%thS{z zL0pkA{HOd>9BRb%2j45uJD|>5OR4eEun~%SdMa7|!#|*XxD&3!%E)wR3>&$7bj+Y8 z?&g+3#YPMf#5y@mQdxWCo`ext*_m{ZYG|2M_QDr5r)UR(xO_+Qpqli5iX+t-;X7F@ zMgAs?ATn1Tr#?porHlH=b-{>#R|Nujtl{S^EjSX-O~=qQ&#>nuWqvS*lf(Pd+0DLf z28;lnn+J$?hj&#^r2uVPWAJ9K6Y$3Js38~Udw8Cq-*c{4v%jHn$WBkTSwI+F>`R=S tqrc%tJ`@oOQ8C%ei<52)q;o-cR!4-vOvGUY2z2A^_V`AT0m@ diff --git a/examples/server/webui/index.html b/examples/server/webui/index.html index dcdd41079..86a79b77f 100644 --- a/examples/server/webui/index.html +++ b/examples/server/webui/index.html @@ -62,53 +62,57 @@

Z*r0xL``#p>&G0x#N zF0G1>aT?AQe3`j@Za~Wz@=y|ij5mSTqfu~ZZvR=SPODx(tEb_FBw@yVTaqB_83cMd zq66_HNsIOpb*E4^Rx2f!0@)eBEy+xSgdYO-tZfdF`+LEsz+x_RJBPfHKVKipZD9`X z3jiN~=X~s=1I;gCIQwlwnmEIhEo}JI7RZ}rIVKTBrcnrrU}Q;-=39Hod+OvX@* z$NM73VdUPfj^~h3&x?}HM#m?l;?gAlien+vIw_a%L#r}L+-aFzBV5APb|iUE+Z769 z^HXm66*Fs*N`qPqi!v}X07`2_lCGZ?7QqXDEVO%DL@Ui|OBNA%`E3P7jqd?|;#wGi zPNa8B)Xadxv;5A1xF%elEoosEA3jLjZ}M>jL6C4sT?#P<$gelnv)_8QM07Jf<<_nHew-f%Uz z+XQ;j=)~h{3VgPh${eJQnaPD@^Y9t(m&-vA88gXGg3#b8FMbCI_oB3UVXE`j&h3L> z%PTUJ2uM%V!togj6WOzno_|@NNHCp$%*SWpm|)pm3gRq+vknEmYtpR>#e>x;82~Ir z=`Zx^^C-8{vY@$<4rYn<%!Z2kI*`$B6M-LeVW89kSpA0&!vegJEYy?QuE9VQtctr| zfcrJ%`YT3YsneX*Lcm5$$li-&5ikf@Ly5HQP@ILEQww7N!cErIRdMNC`&Gl*@cbpZ#bB}Kim^|tE8yZ#s6|mQu8m%kVCaIdTqlJ_{q@Rb$?shZ&Y}74|prlrdjb5 zeVJF#N@qAC@c!fLyLEPlq2ykF#n{RVEufriY@wWUR;QeE{+{0WZ)3PI3xXmJheJsP z1@X*SWp_ENM5N2Mj_5Kyu80_SGVK#~Yq=6Q;0ZJV+uX8L8A^C>-|s;4;1P!47hI1- z(NPQ7n+IYh?1WRb&lPPGXaR%4f^x9U8Kx{BHz0uZPgY>L(k1)!U;Tc6e?Zj5Nw+kj z=v5HSdf*+npSxh=)*pdlkTr20uEVKLB9vL83o>sl19=tH*1`A_(kDu~E|yWh00ntp z4GLFz9EYLk>LtZ5D$>ZCo3cx7-Lft3V$^4~C}MPWb%sxS5A>$oOJWLcX|Zk?9(j_Z z6YIVKU_Q~3{JPU!SN&OkZHC>iG#ug@@RG?Q^dZ3?uRKo{vgWd;2g}m+@h3a^3YLo`qA46SPkGoSE1d1tUd00fwbH?VYYf! zC%p`2ngZq38r8IV_4MK+5nZ1*dcJ1{USk$%AOcS}Oy`Gyl3F?aJDit7-ZQ-r?;>5Z zuMSNoXH4pAhVV|ocLYN2Ls?XpIqe6076?e!?)s>t1CU~@B3K`2MnTKvXrK14?mhat zgfWcqy>p8WD?gQg6{b^Y3QNb)c^_1=t=P@DtBpo{$6R}MZ1vW%+HdD3KSLG40y?W@ z(eU3m@~U2nnrLQ38(7^K&T0uM!?njL?z^6SY!aQfIp{z_N{4bcC^9&W^>^2--CI7L zjw>~7N%)!b3%tx$|4bp7cxKJvl}mdQuk$r%?ThWV{jnu~a~23~_b*nzvhMEAeTA*i zI=)1oOWTRK>HAGTM`g=%5q zCKtOYBFUZ4T+rUprGRVs_ugF1$a!KA;kV|xxlNL!GGO-b zY(IT}#^NaD?jmT4Pgq2H8olqf6Ao#TsJq@i9<0rJ7!V4mC*orcC94h3xx*Bifhc#9 zhp!|s%yLjustZT}=$jv)IQYFi-efJ7)}_cV9o3v`*QRIix*EVY8L_u&5a<_wPG_3Ql&@1ob`3SVKo8&%gW=5GOL5I|=P%j$py-MtD3>r~iacMcn!WOm-^gr^;@B z2Y|w|Q0*^dyj-$!<8t|!MO@d?=ntGP%WIg+(fN}C;OZxMUDo&fG5!+KT?s^Ut@DRC z$Oq;zi3?gka~KQk8-zXFF}F(&oSMgLBriHezhDoVlN@WR0N1|((hxCjZr6d!8mymp zo*Yo_)Mne2alpKULerusF9|P@=9b)l(`MEsw4aY1Hi0y$m!u9=>rtjaA~rE1__clu zms90cHx}&9LSo%8x~P~DT+6!uUlM9R21Cykrk9v^UC_)rHIvRN5F2Ikz$TnWlaFtqpn?lZlLgj=jnmd z8{FkPe9ngN?onRxA%XXg8!R$~3+_;7T)^!BorPQBD{Ak#B~~Xqb<$q5hVBj0ACKFi zK*p+3;%HlrnLjwlUsOnY2s*8;ffC$(r}mCsSsjKY+Y!; z9Tbb~;Qox>T#>BgS*b67oDc1qdMh4u6@w5U#$5*!T8kQk#G74^=IK?hC0qnC>*qFK zW6~gETyt|^9W-*fLXk|KTPU71%VeK~Yq~_U@PxlefGriQO9LAladS<*1($su&Wf+Z z+EvLfQ=_p-eXwDUli>Fu3ScvOi#-7fAj+2;)x5gpYSDq`QNS2 zDLSWfs#J~&rdvUtq`M5J1|F;cQS!hg=rM2BgUw5Zz>*b(^9WWoj}assut?X>y}#50 z%M0msCire~;am4O@cke@!Y0vg)J&u1S-?nbo>&GMP+`q^J zF*vNjynXi`73gq6)sMbBfRM1b_Hok@RTD82Lp1~k)L-~@=terEJ}feQuo0zI@E-b^ zdwF`mTbrzZ>P__l;F2*bJCP@Q!u(A5LW~U|Lg6ukRq>D|02m}Zig)CkNjJ0zBMXd_ z%RpkA$kfdJ+7%liDMb^0>~AdtSn}RDjwc@~ns02qENjOP;7r+Xg>(+4wQC}mQP_7+ z?A>UXYQ8x^Cfb^gRl3IBXjAJPBz37McGXcP4F|}7XDt1>rO|R8A)K`AV-qy__f!c+ zF+>%GI@YwghuwaB0bBcHfKzhwDa5~}w;HhB!NS6L@p?XzwZH8rvmq;%@edVHJAA)> z`W_K6fZx59UczFwh&KuSWFl4ycd)UT3_&WpCdW`BM2mvoj4`xvLKSZ(kl0g?Q`WvM zJ$~VT+T?4HtoqUh&V&YVXPwzrR0WaP04?>i1qVDaUBFi2YLHKl*XF0GL06C5H08gd z1&;)&C|ZJ@0tYW2X5@eJ&p>$sP-OQrPPJo;bj(s@K zU1AZ{rensvt*I&k(eU=}Jy*c{0D_xWhLR3eHO5D%ZE%`s9Iy=4oS3K`&sdS8E-v$b zm`M!6aA$Cr%RrO|)e5{axMl4ND`Lpo#RsorITlGTeVTfg01&Xl?R(=BfaS0(9e1c& zKlo$ApkQl9f)EX^*pnfxrV+(gG41Yz*0I4$`-x93BzkLZ zjH4&@@Jut)l8Q@hpm5qEBXl1rD3NlJ*-GwrpXxVf6>i%ScnGG z`rc`pt%4&Q-Txxt5RPb!-m8T*H!1C>lKsV?Ugevb(!|R#O2#)R zg`r>u&Q=MuC)Zu?L582;k!7QfXN#4d0k{aqrzl;iX(jWmEUfxA#p?Mha|_2&PUXjX z%p_M(*P2yudhl-z?exxxk%ucy% zsWwX1*f?I_bBS0~ieFSv4j#OLb8e?7H2s`&blhSBkHL>8tQAsO%5kE*;I2$cfshDB zn=fqvx;19ZlJcjBsy5#3k!4VpMm;!_hY7kVTsC&j_31nBYtCoFJ5Q0{lB717<*6U|% z2ZeSn1b^EESBObH46JRKnHequ`s)cQ=?T6S2i^~$-24D=#sJxWGjiBE-&jgNWJu3& z#N|zBTcrB+TiVw8VIo3t7fo0`QidWB&s!$(mqo|a)@cfz{_ynU%jwY-Y1Dy?LPJcA zjHn=-%h4%R7k}dN9(gl>G}XmVBt^Q1>o8Yy&CXG+=&j)^Zq8w@j3I(tAu>B|4mfqk z@542Ou4{ZE0*rEh5HP|!9j@{I)X$9~U@P5pv8aDSr!+{oq1Nf1v6;tHm@ zc0k)tcnc&Q+`E;@0)a&UjQiR}6ut=IP8;7w+tUorKI@J)84Q0#vZet*w z>Dv`TM|a%m*H3AeDD_IK^prpjw>9`xkqEF0V(-2R0abcUzI|^CSHxKtzv8h`bx8LJ zntO;D#1++lLS!+yRB`7IZuM_ z$*cBeqvr`BxzbIKuX~EvXQmAw#cAa$pkcqw$9TDWA+6oddq(5>LcAxLjPDeoN#Z^*I?>NPASY~?dLCHb79Z>%;Zf$(JUMXBdY zgaI}Wrqj~Qx?F9=GoZ>goxlM;Cw^x0B2sFAUu8I2TB|J6YBs?VTl8`L_Ot@TQ}av# zt+v;HEX>0!$cJJ4Z9!_Hif2>cfr^#8q3vQQR};)ZsuEN@kx1?UqEM@Pg zz|HiRweX!RL3_^_mZX^DS>N#h)9- zSre3I+^;P8;Z0Z#_76X{wYabKq?=HuJqYE0PZEr0cp_DUYp+ZN`hw9z2gp3$Y_Z}8 z(0EudQDFY;G&l16$n%)~5PAZCa*_iqzTXw^Cw~&{)3JjaVry_rpl)d1TCE3=c|m)^ zJ*ulV-~53RJvDIIPS`soi=_DsHX*bBon>{TBVB9lo-v7NE30<3A2rm`BoYDQT6FP$ zA*@e%T8*ms_m!pocUGwd*U4tEjUS{(vL;O$F2O*e?W(7f5ZeCgd61ao%Ri`}pF9@!;^AI`T0mv>I={YMda| zRn?4HbbVB$gz@*>Exeq2Q~M3i<`V{g3H9zBm4905JPWBm>DTPn!T5CJ;zRQKLwMS~ zZ^nmhvyB=R3%UbV-n<1!S+{A6PkCH^&Is>2w#JgVPAUDeprX1ZV2@yDqb?OFJeT+# z=Rq1*auEzd!>cQo<8aOkt|vBH(~T2)9GmU8%UBCO2EJ%;SrD!!UKc{I@T!pEzXZYAYPcdpz&Fv~^KK*y<;UGQ%`% zR!{Imn&n2$*!sYDvZ`JsWrZ;9WGcmKH&Id;ncM7*SSXY4$Td&66xPvOO0vG2J-6d(S zhHY(sn^Hk)r)ui$DBCSzvFz~u{V356S8shJKY+P%9WlN*GiRF&(dRc-R={tD zAA^9Hgo|2gSVxk$)M2)NTKg|KPI8{o!5^}k#!ToW4M6HAP%zDS7fO^DDm>C2(Jb#P zN^2h(Ga&AUQ=!zX^gQ+!=$IzL;1#jXyp$&ox`23`2Hn8AvQX3|^J`Y$L# z-#;SXx{yvT8E3L7awMvn=laK|Pc0FW?=Ulz+o<0w57PbQ^`CWre9`4_m>2weX0ip? zqSZ1&99&Ad^DYm9Sh|2h9^VK+xYn$Y+Bd7fcbCj;h|^k#Hd_-4`6H|!woesi#5+OD z-aOrvKTs|z$9u1wg{&!cKUOdt@qN0lSAa|bVa{YQL+a-+PEd`y8t*OQ<^pZXcOrpP z4?0RT39k6vjCu!u?vQWI9JxcS(X}GQA90Dtw6t-T!A{8%%{di{LL+Fs9Y*RJ?pGSEP;o?b!=1C-rk^RD_Ba3aq_hv9H-Cn~R+Q7zt*Jz%IB8<{h(X90(ViQ-r z>Nr=C9~znp&s13>98D{Ij;)5xDjtoN@u*VEvxXb=;QlF7P1-+xw&BAtfnj}GSalMU z_rj6+LN5snIkq{E8U1M?#3ke0>l+#12KE>MgQ032`}x@w|I3wg@HIpZUf-%CSlv^2 zwf1Ky01I4y-e#MmG2E$;s8Do&D|b#1JbL0j@vhsk-^n8^T4^CH<{gW5Bt0y3VO|{g zZJx;W@BpRbI4?U#EqwMTDLYiMli$J8tFr|M3~d&O4wj)~AG%V6zs**1g16}hOj#$& zfq6Hfb?QC83o>OP_w4JLp`T3dt^R`L1!k*r=qy-&L1j9- z)QboQ?9WZmuZ48USm&CY!pPMbm* zT$SMj$;DZr%7&7ufH>2t1M;eBpzf+wGATjF{Dz1(<*Iuw~Y4bsZ-o`(UAPrhjl94YW&fdZe;BCHq3L zOTk*Z7n(a4f!ali4T>?5Vqg|YJJ0U@mv_7Z%J}|}cAC+vsTy`sYM;>$JYLtuFYHAX z9oyp>?a94p7=J+E8R@eo!=yFso_Hn23CM+ila1xvm2WADOB%wF$^AYjINp#w;YMj_ z!CHN@g0mWku>=D1SU)M)Viy6H6LS?j*+iR0gh(H`ZJilWeYGa?u6>yI=cjc)bDXZ= zI@56r3^=ynW^m@fdM4xy91z*paB19gQ;a*v4SjX8@oaapSdKzyw&Ksr3sfE&2#02W z3v_C_RM^KlMW0|v2h0ep`_ysX@hX^G0Fl0q=Bk8exOgNj43waARL_Z2h3gl)t?}?3 z@ebMr#5CU|R81O25_C6j3_E^e{1a}*7EU--DAW^n#bF~k7;$&z_I%x1t7b_}j)J5n zMQ%-GqOY<32Q15HTmhNRaM>4w(Lb+$|0@0{*d)$h#MC1hU}^$4%3E@12%xU%wjRIN zc3#>GYas&RB>_vH9tE zu6UH1kaJj$L0aK><(_R}xSu@EQX|bod$tLY%ZaE0Z)^0YW6PeT0R|Sjp(0w-(!5e0 zg{*In65B?5gqc*!o$CESS7$kENEOS3*I*Q_>b|#WmvI#*I5*t$RePZ{ONi1nQlvigyv`yR~{Gu;ok~g3}VTxCdv1>$ zbH}V707ISu&fWM%4jjjr(nQR+r@JyTUNRs72g0@pSgn?IT>S+5MumwtuZuyrqqu2f z9IjR?+rD0KZ+Y927bWltY14Q3<~ZA4MT*SW+B@F$ClTSm>?5TMyc%5ZJ$-%(eYr@29KY_gSV*lt{|MY<6=8a-@2TV=vimyW-0qj zK5P$Je<^}z-9_%=NBVt)9RAMorOs_bs-c1{ASBSt^xqQ4@JwTWKM=x2(Sb_lHFW7I z7{zT^BYfD^7V(+IYejTaHC57B6N4>|GC4}=mcpmwi8czo3Vko+4YYTHTXs#`){SVl zEIJqBnsbC*Qt9ooDZP?t9P`NB(}$8JGG}{E?(N>*RK$!Bz%sJ9C`<1r*ddU0la{Rc z>6+CPk$L|QpT`(~r`MPSy~U^+JJjFqG9v(gGRCeT8|ogIMHsHZ*hg*))&1l*`iYt^ zA|B)-j8holt0#-Pjxo{o;*s7Zs0%|g4ou%8kJ6Z9nC6e>rMabZJM{l04beWwc^~U+ zeStT}*%bOh-GZ_U!ZsS@#V@$^>xe057EaiC@mxn&+V@<4j;!lE4^v*a-`@d~`Cb(6 z7WSj+-1UsJb3;fL5s9)u9ds4i{RC`m;o`aaY*@0R-H-twI}Ln<(Avwt?z9ryszM|- zUyU9VqYc>Zi^$4_m{^~y`B4oOFs*GmL^2|3Hpk`C&ePGNews)jj~M}EfT)MnCTPB* z7#Qxlh>Dnhyn?(8u*s9-@6FVf#Cat$BBUqf=_)H^152Tzp9sIc7(+6oUG(QRsIfJS zqXtq8*tZddp}lH7xIqdu-5=M5)ubyN$au@@-pILJkZWQC%%r;zo%Iv1~hq2rigerC$rB*JRgLP8qO5$KxcFRj^OE-z5Y(FpQ2 z#UV6~(aXI1E&&o7ji77cVDc)&v9iDc$N{S%wnEmj1Gi#+xPK*N%-pcvj-L)sn5zgD z@oOM|>3-)E!QPftM7@}bxveB>^xLVWau-3`W^OMeSCH9BcM-r;)_P_gowQ#Mhiy|d zfrBSxh%HD?kZo<3k0;Xt{ZuYqDYgG67~PC5K~zsX(8+KmHYx(fl)Xi!{H)__2+9o3 zaxN1lT-ju#HO8Xt#s)Bk`Dw+}r=K|DHdq3G^_FG`Ng){E9B&;NLqx<>xvKmY66 z$NyPoXZFA9Wj?{b-v6CvM@3-&RYJ7nHVpf3sO%>UgZ*oMO2aS?^ZzNV9emWsFzmR0 z|6Pj7Uti;1tG7qynZyjkNXZ{1j{U>=z4!rnd4i^p!Z1vZ`~xBXs;JQ;DK@7GSf{yLR^f1r-WGBuP*FjO%NlfZvdDSs@5n1dSo#E%>? z+rKqZfAQTPKjl*&8PE~9t^OvFc<2A(G3@7%az2S?>M%@>|HVuBA6`0QW3`u0j+zj| zu&?zOkN+*ANQb>G;F$f=fA^342Q~dwx;j3MCROGhf=>QqdtI0bzE4Q-s z4!9(N9%zivIxhbOHynIT8cDx-3!Q#}BdU&tp+}9h@H~#941;kz6@nU%O@4dFjM55+ z(owBI_j2EGPg>xyv0iCGG$@iI3^4r=n;cz)iL$rP5Kw%BtGeapMp*)F+%rHipk9QR4VJ7>#MdP27-6>}d?^i;_ga~8`*1N6&Z3R4VH z{*CnSasw-Df}okmZF-7z)jWETpojKER|Ptd_*G#>6O{w4#OAEYy}I z0{r&+7SdM*4YJLDJ3kQ|8>{(W@p-0;d2eXTk!OZQ@V-TT%)e3S{!GqiHe|rgqO5jT zcn0HsZANb@oc``6-K}vr6+3r;F{$;R8%;UH@f1bhm>u;{4`|&HJUVaCz#CF}A=?aZ ztGi?)hPef|Wb@rMxrNUk(OG?>)Y~{@+o+}q6AK{2iGu=n1 zNXQ%mJ`9Hagl%G9U17rub14hhA#P>NswGP5_G#hO5`c>}D04j&DzUz$T9*la0P~Am zt)@Cs_z^3A?_kBw)Vn+O2zU;~^`%X)@q|3p%`&CWAXrf54Y^Cy`J~-zcgj*!va`wj z2svCAk???j!FTUqL5UKJS>(UyXhz<^fM>0IzW4rIpzyA1XTzRJxB>N1){0nkb~oBb zL7}{s|I3jccO=+Ue?x~gaBHD)P<4#H(Am{;x81BF5g^4p4Lz2_6}=77{Ce%ZCBdN! z%B{-Jl!KNbPr{z^xz; zaoDfO2yrxldaZ6;K8mcPlJ?8yG}v{!N;hrbh*$+%WGbKaDscg$=^PHJ-K`(F-X=qk z+5YIfWCIwcSpOb3*}n7GSapixvB^e$+@O2k&di6qyR<1Xh-`q7#NkJtw*%OHLTw&K zbKRYPaI$U~+|jZzBP3XufDIVD%z{fTG{Yz!;N)g_)(A zy;9hr&S>GjV^=W6+{R5hXm1efpu%LIe7Vz~77u!aeZ%6T(GT%k)bK`uC`{t$7QlNi~zh@Rt5Se1d7g1!td1 zD#qdI-50~;4L-D4l4h)x$~1?YbNP2Mpfw@1?xXLAmhj5FF06%JM|-#IF(*y>JJW4{ zM0TPDW3F835W9VU-pzCsEf=Vy5k}DNcqus)Eg>z5i-wKBivDcS-ZF?f3%oI7s4++uBgVa5L0kY%N35e84K=| zmdZtqiG|BM=lKV*%o>HOW)fq7S}|W@X20T5^CIDvp8;oulrqcUiFG}^4>LB$unG{~ z!w!qzY|6O}A#~xW#VjNr*OmC3Q{!M;aP0tRCS7rh>4__|E1!&1V==V)>$xC*G>nJm zx&p!H{1Ia+8yG=>TR+?QywCxhifVroXAK?B=v9Vlk5!DyoiON4h0ae`m_*4TxikYj z$l7;%n@Pq@`+;CrCQ=koOWC=TpG><#saQWf6cUy*a1ad+^lv9+>M&!Gy4o;vHO>aW zlE$UVWQ6^UuK&H6kw~&{$pvA5tMhGefdgdXVY({Q@Ejo$DhP)cSpxIuV^9^vvbuMR z{bC8Z8*wb2U^W5I=?a9RQAWICwThaV&(&Cg7L`CaM286XI5cY8r{UL5Ka*?c6wM_f z-u1>s^ByBo#CX?gzb&<(>27w;NO0kVlYOy8WI^vj9hs{NQ(o_ZOBe=TD z6c4V;G~o6p5kJ1IwPx>gHLn1JPMZaa348*a%sV|97p!x3%l=dq{M(Zq;@lRb%&6(} zzH50K$ZX`S{wYspH2b@l6f>J@Ci_Tv5B(gafWy%6I3-aZIQo+d0Dl+V`*;!Ph~4u_ z29G}$+$|-Qi=m=3N%Xvbj+dwu%icQiC*ddpw^-#&J`f`@;q&4Jf1Z@DG_9g>oLlka zReAuYO@#K&Pvgjw#XeBzo3j>4hn6Nwg-y(XQx)PC#|UETU;bTBKrm7p z>Re#7zj!S;h~7;fJn{EpK}Y<%${h)0_IJo~f`y|i&Z*u_9mZjQ3nw(JwQJr;0jZdM zXpRKPst^AAD}SGt=$eFtrfP2DD?pDpv5(qPV5PlAYEk2 zD2P3HJ4Xp*AMYz!Xnp_|$!j0hc_cxCD|$0bDo8hYkwBA1DwwbpnF-JP5sWyAh1@JH zL7=}6!3k~cyGF%-K_zIpq|#tFrBDXzf7766XEawhO2wCsjn=%f1wkx{`hA1hu=U&1<$VOvbGTK0 zzm1b0F(HWQKU3nFy03-fZ1fHAcS-pSM$)vH?8k?QotNl;oYfpJD2UjJ<8zrkjrN*r zHRRtm+)xw$PHG(j9qmKgZn~44Ge4gdyyDu6{z$j>e-fg6l0jF7I$0~J{8NdSna(mpwT=|(iXD~oJO$rv&4sDQ|8v3ly!x`L(2$NovfLx$K0VoSBXu< ztHUw45pY9)M+10+TGHUCn&2JoT-x(ljp&$pQlyVv)6$@S^YwnGr)ah*MH`gip{#_s zr?9XsLdquNjF+V>5D90S)Rn@-G`|hPGpcf{`!fH29Hievvv};lu&aOfBTeFqF8E9V z+7kCj;v}g8+zC!68+G{R^#oI164c4e_!n4wbMU2qQQ$(CEowQ1OQBNb$vavBdjHHx z%#G}Ja}4`lJlBZE)5&@MH9KlsF|dX7m?-+oN_Z7GEBcGcaz^lI()nR`;{n$=JAX}g za1}uL&>uXe=%ZTt`Luh0z?68U%-MqBrhcVRjuDTgVXjVF>+{MtRh2;e$w`tZzd?vX z$a#Q&*_Jt=$e*3haoo4$K11zl5KjBD-%7gF;tHbNHyg+Gx4C?RclVaNvjevm$I87- zR`@B~#r%U(zpkc6wle1U^cIx;o2}11Lw6AXbpRmpNH4F@qcSqM$z?Glo8_4s=kKpv z*!W#N2gLze>srC&1ERW)P=E*Jj!g=TD->scQ(K+)?{l%zZ*XH|WsGk-I>X>cxW{pZ z%n1ud2jD&Tc!sYddiOlo+!$SK51#UC9l9&mFyh)ODdsL1Eu#KQTx?T!Ph|Y`&!|bB zkW1fDH!+fe@DdlJh0!K_etxSVgG&<-^=Nbv(7P$!2s5UBSFtdK=^u92QkP8W-*D=G z1L5K#)l7^KGu`hFWn*n-gQiFTl8Yuxenqav%+g5jD9bd($w~szS)akz|p)JxJ zAu6DCH@7Q6)DJ3w6sf1;n!zv#@$;Qpi->#EfNfJmWIuc`)J_Wd@v0UHSM|ZbSz5_@ zi3V1DYO9t-6e-6vFkL8%5o7ez_(=eu(SEbZ$WL9T%W9-nKyOFeWW->?Yc9lp4(O-4 z8fHti6qL#tcPXeH2TNY#W9U4@>G7tQ-lcQviVnvZ-s`0HD9Eg2Cl2&WX)QuVrc3=j z26lrlco1I=!hcee9%y*2wN*AqQB)o$91XFe?5^O%f$b+tKpW=vZ&y<9yTZ2&^pMJ3 zsm?mp%UG&X`dI;n7ExTt)`h))qu)awpl2(hyDuqEWRo}D)S_8MqJxqM2Qt691i~U1 zSaxiu7;4jVPvb!G>+;kMs@Er>pL_xGz$}#c@%|8!)XxMY_Uu9LCL1Ph$eiqe^w0eg z?4t3`kQV$ArFY$%MaIjUfvGHb1})F~e%nQD$;7XuAeVI&o!gSDuFoca=A8PfMnwW& z3+?Fn48P7{n~%y`G*gZQ=_8zJs-U|sagvq#fTSK=ZkYeIyy&(ykI6D*n^P(#nu#3@ zmnTZ|rj6ey=_s6%o8|F9-Gkw{9POzi-0oH5k#{!;)Hb4^nWXs78vF|eqAxjrKYp-x z5;wz&&X6UeLF$P|rQd{q!i8Atz&}oyk0~kGAU9&=mfL873h?=^8^8=h?N#CBLGFk0 zCC`_>GAlhM1HaixHv3ynG2I^X@4??pYv z>ekBUd2XpGHnmAQbB_#sZY4YRioKV?fJ7u=W7f7usa9CAecy!TzxW^nXia#3WT&7K zzb$iR-?#bOCfUnv&Nt)3{V*7oM?O|en3|*(8zm7a8ezhReueEealufLcmWgvqM#(W z4+YuxZ&}#9Ra(b?n1hKZT*dHJI7eDY$=CO;W)or?px~UR(*{`E=sx=RfYS3;lUfK3>BXcAdH$Eu{ zpc^!$CQs;OO$)QyAP017VSebNRw!w}6N$eSSM!a20X24iS1<0UXs?ue14nx*y+#{p zYQA1>LenGnR6$^I<`yjs&MXE52Wj^vHZ94-91d^oQpGv3b~Oq5S+ydL;6&ZNC8LWA zgDdmc6bPvV`M}{&#Pi*n&cFbm*V_+kN@!{JYhg zhl9UIQs-8GrkL)$Se?nxP%-5!Kq0n0Wh<-rCB%qu;9;g}mu8v#U`$?08hc?>A#(>> z=>)elfV%?um*nU#lPRE$%e30+WsU`H&pZt-7(*L16MPW`nce+>@cp^ETAI*W1*?R3 zwLR=UK1U|DUt0i6?wcrEh$1%e1B)xjNXbC`AQ;Mjo=mB*%Shu!QPP3XQgY*Y-Qmx% zY49NJ&9UBmId|=qWh)D$m~Ja8mQMV(h60i)SpGj0oi&og01N~lK<%3^qYiFG`4% zBat+c755EPCUQ2r!kY?p!b4?nTP`3k^^@kwN48M_fF7lB^D0jF#9BcDJJ)#0Q03iK zPe$&br;LEvRfk+Zt~}#Is@B_|T$cPsoa zHFeQ45G@*nN~p2oS~w|d=CtCXduI2`8r@dS%v(8Xq=S!`uK=!q(eAFUJ4D`5++XT{ zeX&x=Yr>HC77XxsV!qmcIEBp@{toxW%{a91p7af&9MBE?9+>|s#~!~Z4`*0bzz)Gq#>9T@OEaMbBi1bPI!?@F?*gRQ(~gF;21X>nlJGwl!gS zA`k|-wY{keopvd;&~(t#fpcTFHRT_F)**e5d9UgC)hMu{5@`+O)y3N3bSC3mmTe%a za%?}q1f`0OCn!ekln}qk^$d*AnHypmOx-EQGV#EK-xaIfnDJ$`%JZK;%=^&EHaGyn z^NTPl`M3NPaMkJ006ffmF$pI~#4=T|JN48uV?gaE%zsO>slGGTnSk0R04!5~RH^hY z8uac!q8|u+c?me>b@k--xo z{Qa45WC~=Z;(uS1U^k)qJVN1>+~~!&rl81{!(~+7kWRKUir})!Gr-p*eJ)3YFdWy=Ew!XT*k6otq%N_JWbp&Ba-cx+y`;Dx<>=*snmT($M0;rKHJ~r=o zy|**NA8&uTv^;2RCsA@*8V4P@&kEf*E>E#-0Bz^;d}_ugSDGe-V4))ao%1 zV?(Cw4@19%3>XSFGl=N(F?%Dt$QZVuiC;M3N0!F9Ig-}<73t1#+3YOlw$Y2%*yk9L zz&PscfrvsDQy5T3l@(Ao??I7^eAM7-nqsCM_ervzFpjlun9EU$CtoD1MHN$vAA||yM1876Sr0s-S^=M`AzD(vH;N8y44tAb7FX|@5 z0Y|Z(Su+oowC=J57F*42UPN<6ZJG~oX;W8e<6vFpm#5_rzvEN`e=5_!6qZK>7smEd ztSu=7cvjn-8Ra+4ixj$A<`kPtWlp?)OLbk>GIV9Dp9Ct@!8&{|rWerHKa6jPX8zDF zqdXqRh;W%5@Y1)!?yk#o?T)@Du!O7f*;rBnZ>>}Q?GS5M0%ju*rNFBY7K>!5Z|63# zB0(TJPjqb^p7sMpf9IoI4@Tx?plGO#Br^h5dssy65tXEcW*tES$sFti^_(+gh!rqm zP%&+D2|H&aCE!itraRy^RU#(JW0bxI744*%d$Psv$83`55`9MwkUfHC)Mj zoh=%2?Us9LUe`3G@9VcV@3L8aB=kBlBj{G82cA*u?om-Iw)D$Xf2ql@4kZ$|*)7gZF)#{BoJ zC0h+Np@rSp9S<3&j2+85B|5I1|ku3P9+Y3Hy*)8K{}T$p`EuZ)JQU@1Ya{=}Pq zP?Zd*pvMNFcuC$ren;?$Y2$+%;Ex{WYjjv>7-i5YK|*f3FFk6Z_@EVhvWKi993qQAK++Osz2Z8nhP&~7x8pZOeqTKx z7+IL{eOs-R{r1DcMFv$FiA4RVs}Z%be+vl3d}c<9-*L${TIA(e^wo=fqEquUWPl5u zXJksE9C)vxGZYXIuQm-*(bZG8Hw_@F{qmg~SS~_s>Z>e@%>09QiP0F?YW&sV{Yi^1 zSbpamy3VszexUyO9y+;ENKP!1@e?rHe561iUpo_N&VAB79pMxa2!$CMQ3`<>jl4 zDa-27d%`>4LhQ9NvcGTV6oFrfcpfH^sA6@FA~1)FhOzcp9-g1jwn)2I)Udp{SF7T| z;c(2G@$s43!A<^&1olHOQZw@Xe+TSk!m=z=J`*9pw63hk_*=YN;|BNfn z4lne@CYo7AZzo28*%fJ{+X%|6dclXqVT#*qi=spJfFIz-dYmVssld;=<~ zc^DCh!lm^z^uNBZ=NJ>2q4#kjZdnY;ANc2+9(=Z1u?S!caaFMmBW1=>f11Y8cP@(_ z`6(W{=1HFWRgQuWuW!mq^(};3@T)7n7>XH&08~Ur%<`WtaYdhw<>&NW&&6+Jc`Tft z{lL{kJV;kM|LMgq8L-WOLxs7F_bM{&^i3075gjWeI^D;bxF3;4gV#;L;hnt5DAREX z>uB5ZR7fDOnD->&2ldISf2T8+7iwG@0m;rJcI(bknpF}3J% z5)6jg6u8Q#1`E8ZBdwP?ZpjeWgX_jYs+*|F2LZdMI?NRyU$a4DXWr$^7Ba#_nWyQl}6m>Kuq7( zU##&Y^FiHp9z`&Se{xOP+7uT~8+W=KC~w>Vj@~WoM=`R!Da*Lw$E(Scqz-ByB6UjW z`bTg0dh5k=AI)MhdxoTsJ7p(soo#uwg4+vf14mHx5?0RINl#Y}gyKSA#e;4vVfB8qckc%>Vr?>8{PRRA6 zIK!7hqM()yS!1O^@keJRaXBi>zSJc7j>NEx3l0+rm0oib1l_{`=_>3iq1fM|+?la= z)D?8N65e#%% z4^SXcq!$(3e?IBn=eY~iV{gu4aI>yAkSD;-P5E}Y`b%zX>&L^%$$YQabKtVa0J)a< zL^!qw3;+7~iQle}M{C91)V^nG+&(N9G&iNnXd7QY!H@ETCWg{qjE$faipO4;_d*(Q zjJ|lDEXJm#rhV;v3&z*NKmhQngv7B(Mxuv|veAS2vLKhk;JY^**C&kfPrS^bxecDRT;ewlTT|fB% zt?O}We{`YpAcAK@Xo|z0Ivm#Yu7%2;bz9tCmIUc(Xe`fH>^^%x;GEvp%MXwNs0ydi zznAM}blFxwy>4L23I?&TF069RDf1bSpI69Ym3BUK}e#th=7O1A2GTwle zF}J~0&5n@bYu*-sdiIx4>)E-rrS^DYTd2YmR^4?l{2X))8~%1tH#XlGU;l^>%j6}_ zrXH3)MS=Oxe#SGS&?)5~FT4do`Rh`0GfD#*A93wL7K6Mlf)+^zHL?s`NV1oO3!#f1 ze^+!>(zebwcUdYs4A{lN$U5tB65L9jO%X!V6NAmJ%}*mvNZcN^wE&ZKaCTL+<@dlz z=pX8ia8k^YEFGzCT;x+?n;XZT zvBb7Mf1yRelM!iwwRJh8+JmEjOsSk?e^WCtV&z{?q%n6%V48P@HhcmE_wnyjDJ{&3 z)jLj&iAC5D-RqdQjX;t%T+*b=C`fc#?;Cp)Bn~dcX5H)`dbe2xRZHyi3_dKLbvMQT;q66>LvObrvEys?N?y0Kl-_H$l`5w~Q9Q6xWYon`%y z`a76a8A}|F$IVoO^-GVzkShw>f53e(E44WB0llL{5L;^^uW-A>`d;MKpAM!Yq|{U_ zF7|?q<$$qxj3|s+jx~qg_QMPM)_TX>UTxb2T0L$kz zjH012DGh6$x=d~sTzRh2GEA#>%7(nNc;ere@gQ&FKB6Z zEp5=j7{K3?@SXuV6{jk|G-KI&=Km-kSa|gt{KIY$+3`BvUjG|1Kq^>R2q?*_bF@B` zk;Ixrl+W0&JS*NtPSWf*289<{{o1ckwT zJCHs{iLD{DE_z|gveA`z}5dUGyT`7x7v z+alI_F0}8-=*~M5CZc<13hd3|w1TYDY2V8R1dBo`@;6~kO1_`=LRzyj0liWHx+uc6 z8;cofA@qC-9R-7WpT^sreW2KW81g*a)8(G*JSIjR?16p@n@J*oJYWdSst);%@_0@JwYeTtVA;7IH$<=XNQwfX^?pO z>|Hl<-M;<&B9IRPM>EYu3|Mm^ z4N8&mCvo0}?MMN3G*?mOn3n+oot1amsC0*{6oe~b)yh*I)a_0|R*Au{ z*fcCa0^<_1e~L=(UB(3O-B20e5?P;HiTVp?7u+SF*QW zD-Bj<>!@LVCjeNlk$Vwnm|%|Cey|fm`=DeOj-$ zi27pZ<+hK3aPUsni9neIxS{&JEv@{8sZ;0BsQiE;{W4@YZ-d;lJlB-(t}U6_Lc%%s zdros(WyWWTN#E&3Ak!E!Zn%e(y<4}lYbre$ky^-lMVr0GJLT3ivi#3^>Ma9v_E$%_ z^L@JQe<`PhQ}84`beubJ;VHbW@TRy|{D5E_D|GGNzGyPrzZ<-0Anmw(d(ybIIAxqQ z^czK$#T5AZwJeF-q~M9WGevbrs^2ewA}kn>(;jBi?mKg26Meq*YxUGS8M)d{Tpl2D zRBG(cT$Rm1Vz2FY6qzSfq;J=rca+y&c)Q^ae+cL8p!LnD;b1tgW#sj}+aykAkJ=9a z1(d3TRnF2cIC`ra`ZDgBS^BZ_&+p@=%ujRC0YRl%IIcf6 zfB$DSFR6yUL%445GD8`{XA#MIn|ZROS8F*G9(q$rSuwM|URNDRA0~39$VlNb?tdZ# zTDMn?AQTEXnpqC!>aZK;%Sze*WN`ko16Tv)=O^JD?wA`uFQ30Au-uZr#g#mY-PxK9 zkc-v@MrE%@6L?E;_v@}68w9y!R4BO_e~&l?wFcgzEr^#`9wb8EMEH_e=*IBifqIH$ z($%AWO70X8T}$Ng?$#P{>ODNdW9>!U2C=$4B$fxS|>HE^SNzB&Rq)AcrUTIScBQxMOQqdqBMk0U; ztVhMDs+!-MA+H1ie1&*cR>a?eof`+XpCm2p33r@s55=ydapH2qmMwOo709aGf+XN| zU*{F5pdY16#A&W{e?fqOF|OpIf4HgTyjcMf#o8O2ETD4h92{Y|Bt>(7?cPf>hV@`@ zGFzSG@}gb9YNXSvLwxm+H4LGN)Y03+H70J3tMQ?)EwRJzr@)7ni5*ZCIWI%KWXPp{ z&MTB`a`&sfri^FY`#>Zs4jxem?@Ew3vh4LxEt*m_rxELu}lJkmZfBcHGU(K@1 zTUlF^#n)@)mn~8nxmj4q+B~@awvk*=J$wk7v_ceP>pZoK=F?41QBp7o$1*xetomVN zle{h%~N!o7UQdIsh_?UvwtS_c+JV(zdFoJ=Tzi~d-i=LM9 zm)|SZ1Hi2KHGzwH{+qGgK1S0yC0Rqn7|&o0RyGU>YYmm#(slFBk$Pq3zOw#k<$)=9 zM|{I^MEZhT%o^a5O8?GRGl%k=@7vt^41U+he?hUH^o0787V#xg z_xgJBg5{)2j|>Q-EreWb^b4$O$^6old`qCJ6@l%#Np%b2)%;zYTTkPeh9j zhMs7D4?R-Qo;*X1#Dldb3u1uf&@}d2cz&)GVmcFeD{<@_rJq5JilH*t1vp8l(wNnO zz+88wQ?yZ#^*jWjScopquy~w`e3sV_`jiZE zS0n&sWB#F?pNv=Rf85>08SBcxwolKreLcukc4GI^~E2p4oHUb2e| zdEx4+;IWbyb^Hw?$@oTHj~%62_y-Y+al_`xNj&wWkY=_-e~;tGq8Z~xOEcwI*F7+( z%j)qk8$hr&jK+qlurj@S3xt)qxArp02wCOge%SssB|%$IkWV2}tp+P7`Y>$Uz2RA1 zHcc%-7DhHa&61B$b46+j$s__(PglTD+^bxc7$%QcR@}L?-@V%vQbkwXSP`Ew+5vEg zSIJ@T8gLIPe|yYEW`(&u!0&y#g7Y~3f$SCjc890Hw|P>b4--drI-nq13ifH#yVe?FA`CKQi!@6>HZ@!Q$_u%ml^ zBEu;v!{T2trz|D2hYC{^N!S^?nLOtJ06vZJ@tE|epa!KI0=xbs4uz@uIYXCpIb+4m z*8>#+DI*(!;WjhkDXroPte^Z4@=A7h03pp6PMC|(56m4DQeFd<<3sv0pMi#dQ&*pz z$V&2MfAXzOYv2(-O?7+!A8Mw9iuhe?Y&W&>H+4I)wMdjCm{5!6 ze`Qqb-E*CT9X1Q#)Z`UPRvx^O5N%WY#RNauMopC@UJ!-1%X#KuEt9I2@Ngw;(@uYX z)uF&bI0>rja1266-6`3GQ`|sZZ4W^n4Z6-(#(`*YSfD>u6o;HoIAT#Xr@C#+)#o`w_S0Be$q}Ud9PO}o1ZWgi0e7sK4$dy!^E)JMRc~xWaiHT87jN+UI31p;m$z30eX6x%^ST)H3;B2L(pXE+0?V*-~IzH=x7}U9S2te-Yvv z6RXbX6WLHWBPu^Vd*QHWSaniJ%#)8kUM|Lv#VW~ zVKvUAQoz8|b`RXD0vX)1l;~eEg_?VQCvMI0wu7?SenGin_M)mTL0&z(#c)RdHk5LS z=s@ZdI0G8tO&pd7Baho~#c|i&e{6K)u?v%XOmixo1*s^m!2U=06^;juR47%q%v&H) zbjgm~eAwjq7-BaAPTAaK19k73vmYo-49J-+{QydFPothwZ7IP6{kz=th(&Z(k&}m9 zvV3llU8ZhCiw^0p{~*uw`D*yf4JL@@L2NUY_|wrAoVGKMB?9cXq&XUy8-0+0qSi_DD=jo zRITG?cR#gOcyfuW&@$#KR_}sx;yr-C7lzGv`WM%-t^~=$nj7&YNKE<<+~@3@+f;GT zgiQlJX_{M`ksr73tXA1u?Z-!4JxychP7@+I6CW+%MeKFqlVigVe`z|0^4Z}*VS;R@ zGUbyqfX3dY%rh`66^A`U>dYxa!U`1^W<*TLHXd(lrztiS)}~{0?YL7aLC}Hh*vdu` z{r;dGKUrj=VGOn9f@aNmGZ@b`+R-wjsj78jBL@W;Bt2D>uyD=pfigd9hOCP zg7Li4%ErD`At1DF(<#-2S!t)QD;tKc_yw5NJ$8ht2~HZQf4IpNWGw9`CV~nS4xiJC zsV6OP{|yyh-&<|9xUGpX71Fo{c|YTGo~FzgTS<7LNslB7-QIDk^IeEj-c*b19Rl1E ztxAA5rOU+wr@aqEKF~=j$?_ia-1{z}ZKqUE^kj=Nroav#@@FVPBWxn-K!ec(Vs2_h zqGG<`tF|=He+__AXHP#cl;2mEEY$^N=1zp|*{w6iR)J*KlcV=1ajzd^;;#Id zHWKgeWqMUafSWi@#8q8gYTHWWCj&%2;y%975&l;Wf3NKsNi#3TL26+27YR*J(Vsy& zoh;|5PKo#p6Vq<$T5J&T2UfK9k!yV&4F$9xxlxPawxGTz$GWB@9*y9+O>w?Z z0`_f`?mN|p*oUQ$69K2&WWnnO2~LharqMVce=C=#l5-r>789ujHrc$b=Sg{=B`!u}bvtLNn zg~~jx;a5q(U~DHZ;=zF2qnD_^iilCP!1&ASwF!B|Rxr)u*!f66#O6}fO?^1+%F0%K zEN)1d{BU-oPhZtm(NArb;(IfAWS)jAsmRsI^RSH+;*43Mp8!q1kpm!8>M6L}hMsfkYSof~|*{DVnmMcJ^f3=YfdEyoJ zbqV@Af9%~`?c~R05${~+?u6f;@L`)@(p>PTAv0hcv8P}kVxMtDf&G{OgT@^hEI<~7 z-jm~-VKDa_}j*z$;^o za0~H5y>?079@3KISXKv%qm3A~*6khoLG@2j+3?VuNCi@9?;t6(87^-GPBnff0NI2d z9fTK-n$(B}wXv(vf8GWm3H>Pz1X3by2TGT>BkOi%-I9~_x_2xnBfe7M3#-w^^a_k%R2&+ntnZy&C9O@Z}b>A!T z2N+X5@h#hu-KCd3)-AWGgD6~7PSivfg>w=8qzqR6bo~R|e;3M7mWiuUFI$$12yPO9 zLgtpXwtjf(?&5FSbeed7$TB$=%hS)DNHS9O6t4?xB7h5&7^v*`IS7AR^H!Rh)`{#{QSXwk3HVX6m*0QKAdU@YBa2s^!gNs*r7`s^#qL}_(|O> z>q7Aw*Cu2Fe^NS67?*+Ox6fM!Z4?~zx)h;&l@qQ90l68m;M3=E)fVOrR@6PdVC{B< zj&ok52!w|(2si5DJXDb5I=uc|*8L~XA|A2XShAbiD;9M=>`9LIhrR|nGz=csO0+W( z)%}bic6pWLLeTx{hW!r8+R8)04(yn#L5P0Cqv&Z6f7b9h1=t|ckM&ejmZV$OR9Jwj zo}h@fyIR1;QJ^T$it`z7DiqAB@ zM6CT&f0Zc0FtmZJj3nPGPJxB8C;m;_HkDACD=1k|^IPzh1#am!Y)4q6i_7nNALJNH z5z)QYe`SACSWA{X0=fw$xJFp2Q|))kh(>9o$=#)i^gkpzG@%BZTu>~5zv+G#wjfVp z8qy_(kpp82xu|lB+y_fG=1mU9s8Q7MWRa^KQRaO|N8odu&xhxs=SV=0(sP4?s~P(Q<A*DcBGCNk6m~?nf(rs=f31woVQP;<~RnzXEDG*MQZKFj!aN`2L2eDFgxU)5T=n9qg`6`JC?r?nl%$L+Uq5Fp}6& zcwiB<`NOHrwC0Nu=@&9NikAuqwXjiYe+7tk%q)UFaLz@yD##DccrY*(8ZKuR=K4ev z{3(iL;9#BlZqZT2DK*Y$Ht`3S0xB4mLZ4sO&;&sBK7bW1O&zECmHV+j_9Zoz_DWti zimV!9eG{3?|I+f1AWWw4I7Hl>kJ1#A9FB6G^*$ee@*p%M1#u{+&NhkPn++^ie=Hj^ z|G+vg`8C;0t!?$v!!gAD@eIqihEgIioAs=5hym{!g4{9Sf;9D#7D@~SU8^n_9Yp%8 zl{fCYH5$Y2Y<@uzHg(yx>r6|v+WhtH0c|u_jw23ij)C7j6@5J7+a3~6VfpGN* zoKJ!}joVyzvDWivMH*6Y5Ird`^~-SC0ucFQF>PsotR7nhhb4Wn5g`L}zq3C2kiaY; z2W(ikd^Hemw3;Q!FsywPfKb$tYYqmsfU$~7;z-+}4)IIT6?+f)#lbnTf9}QLSJW3b z8s5Z!^We1wUbo)IY;m@!k{BlvI z8enmnNfCz(DT>9Zj9ax#=7YC;^av_}nIDn3GE1m0h5J9)e*+Xmb|jQ_EiR7k;S(cc zSj_N;U_cGcOLC>-)3jeH4k24A;10(#=G68NhISu0(;H|?WdTV_5k(Mv8w&Rz5{f?p`~VvgHXX0*hy|GPuUlbt}O2Mln>AWr@2@ugk0#!r;adknkXE4Z747SZ}+ zRrY)NPS%o07^8BQ9_K2OZSlUgPmr``1>QMo=C0X^f+YHwC0MHidW= zDVnWORK;oq)L1`b{E_GCjG>hJUjhp~;H$Cyik5zu4ZHz? z^^590O_$ny>41D}O^bGCVkC+>ne^dAllaxLU=`E(dCQM>$@x}yz-8sha_J&5v(cC~ zkU>dK+nJX00Vrl&dQvJ5ZO7!3+l{$#E>rn#4Oav1o|)oEU>FKf;E?Q>FsO9R(OBe$ zRD!tyf7n)2I-n(?REE;G=;I*2-xR16Q7Y?5Mm2^p`Jf>o6*I>%gP&!|VMzlM-2i0v z4IbE9CzNxzN4Eq%>T7$hTB?LmkbH%3NGZzh-z8Dmypgp|eBl`OjgR?;-(|qdy!pho zNRP#3VB{9SrYPRV)!atsl4x*T;7Rz_3HDj9o^kU}HKj?C_Khowg)FOQZHw#?j!i zN^>LeBwSZVA-s`$I2FoW>JkvRw+IRnX*7q-@2CJKLZq6xC^ zi^}|Y30o%tA@@E`2{tq%r5S_eH;)(@fA`q2C!Nmj7_vL~O>pmTEG@Ck`HS;646;1n z`Q<%tx;N7?A~ZT-Y{xS|oX_l^W5zEvKSaCq9ym!!tT{V*gdK_*U+t96c#U41lAdaIe0`k1qRe{wBM z{A269bv0D7CHz1Hco*Wm_hfj_k9SW$+UMxrNv4GnM4hx?Rjsa19(kAxq>u72`zJ5c zkMuAbzdoJZVR0`KU1CnET`j!AYuu{tb2L$x4JM`YGA}X+Y;&b5LVl9rm;+j7U2k8J z=wz$BQ&BM6Wb=Mitx+>r){_@ofBRk9EcbjZ=sgFst=7Ekqnqb(kv@~@r%re>a<{^{ z(vPY~s27d{87B=1-IJLfbP)R#YBVXm8g49S)rgS)YSh`Ov=to%Ubvdve0QxSU~#^h z+F)H&$c2_9$R`44=$-C&QQ+8gVwnj0;XUeGi2WH3jK|Uhgn>T&;~8rDf9XQyGq;@G zB-kIl+uUl;>qg}oabTIG)=C#(5Ne#`b#AQJgV&et7RPZDZ7#Bls)W5|(yG`#>x_3(f+NG?)A;OS5*F zn4QLKWpB(s$wAmnrv1&`Mq14DUEjWF0RwT__6c3O*eOdBf=#|ne|`8s6>N~W!*Xu7 zUs9tR1LsS(mS^Hqof3t+t%8XaSt?DNA>vYW54sG{>b?@Dc(?@pvJ=DyK^q>MCII3U z0f$*ogL^gNsR+~70^1b;-$4`?j2jG>^sM`MjjkEiKHsWv-(k-|9db_`C4wd;V_tFp zWI>_l@Jj(X@@sI0e^OD_6GR|{jEK!B4LRkm*V~jNLcLmnsi6FyVurjtU=; zSNOtTdY+V-wPAZxTjd>nqF7(5rTYRSMam?7&xD%IEIYl}6gpTt$3Gxx(}KLD;179) zXjURXpeb>P#e6tEjNq*Y+^<+FosX8dclUK{7ID~bd7(z3yKHwQzYcYhDcbw5zln}L ztN3K)7t<}}e-QT<;o>MZ+{4%dmt75PcQR(V*~1_JP5ekSZSSS}W417kRf5PQBUzf4{YN~mPow=I1w{ZcB z=drHntrqzvQWHNhF7iF(Va#z+^lT*v%RGkFm^-mttT z5+c72DN)j%gDn-RP9!C%uGLq`y!|?wpauoaimy>5v?qI)v-K#_E3or2e>gR^22euf z4Cl;>e=%Gl%QvMfmKe{oXCXiG-j_*2R&D*jaYoDh$`%%hb$C3Tu(Wo_R)$}G^7x8D z11kSo7{fmo0a1N~UO1_p4LeQ*i1Z$pGTR=~Kvm)(Xu&4H0V*gSt+cJF5t9qu4*a$z zJ-_xHB2|Y|d1Wkr3bm^NOX3L9XyHUcymDa@L`pwN>PJ4y+M4Qe)GO`~$bddA= zsOPu+A5i9Zxs4j7lOv$$tn+Xnbn!M5V z=J%EjUepMi-Sj{kbFtBtb>!w>$OU@Kf4^+E=NV9C3^C;xX{70)Xadd;5{I?hK2DrG z<`p1d;<}t!qRKge!5+Zw)1OZ5Qcq=Jm?+Mmnx3W>=Xp?^Ykn;NUx>;S;S;E0C5huc z=eRhkDOk3sTWc-F?&`gf?+yC_OgCd2f~HOPeb{6O^@UAelfQ7$a&8_f8nM0 zg_`Y!2}}8t_ovu(qOZTcIHM(Zd|6ykya_+0Zr9Ao^5sLuY73RIUICd_ zYHFkGN8o*u^09wl0=w{2M5u@8e^h52#lBnDa3IgE+wx4r&b_~c@MM=q$ z^P|x$iprSVAWXQTurXgBCHaG4!n_>4n&2R{kC+FA&9 zYEBFNWpx^^v5HK~JrCL9jum^vg^9{`=Qb)7)f(1S)I#HC!zhaPHi?TrLz8G6bX_w~ zelx-T_H|I3U*bNf)b$q0fBlq33F@7PdjQxkfgf0BFq(a()Bz0S)1H`ea^p^-#VEBb z*c?g*n2*~0e$IAdrZpNEq~Cg1iUAxFJowpA6D;|V)@)X~uc?*&VmF5f1r(gV zvMWz5s=Of3`oZE9|;<#`c9mwx){`L4wOD!t8ZG54+#jvQ;8>lp@c-jEt-JoBcEO z@~XO4DMzSjsHe_Q;HCnT__V?&coye940 zs`M0Z3z|CE`yedO3m{@y3rTy!YI#{y>D3InMP(KF>EIT%#67E-Qxrr%npjIA0C8tq zYGu}xI#QUGzuiTZ2f@wd`&;n=JGDe6lLZ8!S6*DA$BUiQuv=JgdG3niF?Bmt zOYdnhsv8d3e^!wY+6e@Efnw`Wgx8q+w0GN{!v(bE3N@jEsj{wKF}vFu^fre@sa|Mm zCiC&;YTA@Wp5P;x()!V8!f1t5a@acy$WBv}CeIZhWHu6$*rJ&m@}%d6UryzkAH?vI zovZ|XN^$6u8yq7*ksE!?YTaPF=6$$=bPA&v$gPG*f8W)8SK*=7WMNvtC6JT*?~)G& zf3mkaEo>g-4`5V)i`(rvvx1<*2pEGRmEW(t7TVJd{NR3GcCjuStnKaIp}*YlcAzC4}{q=P3UY-+b@3`hM-Q+)CpoH)&hgd^!`1 zSOvj5zNuGsL*jK zVEfx_DWqt@Z(P4Cavgm{tAp6rF{Y#IFzW**=}P>ffnF){)y^;;pkpzIpM+@PDetWS z1dWV=gw?K~WW7q*=8w0=@s9kUpxLKF@G=j(yup=c_oL3i=G4 z!r|mXC<%F(+&rqjYrL|^o<7#8UC7k>28M9AU`z{u^CZ*!O+G#Pa(=5F$MTL%e~x0R z{oH;6E)U;akygKJX)aq6m*!;|+$iEtulzwweXjK5ti6NSZGWR+8^zN{d7CoGiMg_B zfV3(#taM&ui&oCT4IBV>WR$u!`dJOE31^0s`giX&TXnM`#s_`Ztgem%>ct4FuYhXx zf-%$TqV<)e;S+4{J6^C6PY~iTe_`q{&Tq19xtB}Fsf``v3sV7H4V?$gRJ5mOyJ9Gn z*_fcXM%O`ChF()xozGgpB=)8jYfc}ic-^+J?auOM5CB14gGWPJwhT{C&Y`rv0i%RG z8J7`~BiM6vK0T5tu@Ve>YD*W;6Wb=e&zCJj=M`s8&kUb`$4Es-84U3yee9Oygx%Y#4GyDGE4)yik_;# zGK*0M|MuCeLs15)D&Pch+2x*#Uk5=WKO(9yV)tM*>ki1O6(-Vx)3K*GFY8Hy%RqkU zgPFCM?H3@umu);^;qBYxf9BQD%7aqJv}Lepb#?&&WJG=u4|H#lU-$9^WqjYvYUInD zq^mi+LTAzgaCDyTFtf}Ir920aG;*%;TFs*OJ^!IYo{;E-xT6FE;uepSgEq&~pRNCk3&{cAxmpOBm8FPvyTxqG?IQ29jaLePB}1Tkd;%0u!0<2msc0<-SgCb{l$6~I$>t^LCwS3T zD7~wjGjEpC#ttIZE_Do6jwcnq2|CBr0n=1%$iK|2N2old-j%%sc&!EM^e%$3LDjS{`th;ZIX z#(hi5GMGI^Y9xUMC)Zi1qA%_Q0|Dstd7!$o#HTrtFE$TNY71am&2DLb6|` zP=WmGjbxB{e-Wpm+&iptus>0G0j^fEaT!AWmskQHYd~Om1_{9ik_oB97Vb~Zf+pS6 z)&F^tbl+r>=q4a8at<~Io?tReHuVWQH@crAOO^!W3b;oIs4xZs1BxEQ(pAsowA4|0 zYk+h^$scoNLTF!R-kx$%0)+j0_q4@^zm8&lox36*e{r^k_M9x84;LH;nBG{_Vdkf0 zbi;AasG`KjnvoaJ4d|~7cnUJ}KyfaQwPc7@X(2tgvtzxxtgtz~@K8hC_V_QlT84Ll zmyrpwK7nri&D1B^#)cfowVHAHtRmH8w%Cb3=3a}_m4clTkg_;(zr*mC zud2#C%x30$=6Xjiq|yY>D7*I?=Cma~pQz`xHnY+;1!Qo)H%r;7a-WW_*+spyI?r=v zk4oYp!lWQ{P7N9fr}2Qy=DCf&z3FOnl+@QXe*go4GV;s8yW$tAczIF#<`y7J-iG|a zaANRca(jq}*2wE6ob!QjYjb}zIO(L3%l(DH($gfulQ))e244?t`KJnCzZx70eZHEI zz;mwzh8L8?&gq%gF_}#gWC+M$*(uWx_DSrDU6{EDt4fUGD2VpGCvd2qg&t0q`Eqei zf4?aPW6wXudP8hs9mkGQ07>7el1bFIZQEDa52PT?sB`|Q=XJX_x+1|3l;o~UQ3jTV zmx+FP9^(YD5CSMM>|)Bl)0yKTzwjyE6RBT656UnopUlH{mZO)ptxqaBj+IDKA=8T#YbjT_+yFa3 z#J?c4vvW2Cn#S4i#b5=xm+C6k#0gnIVy!lfPA01KnQ=BohN#*~R2rx}HChvuEdHeH z1b^iEXA_dRl4RIQ_B~C-i#o#hMN3DB zE~Y@NJItP(1yccF-RU4}#0 zj1(v!@mtq}53?2OjcyIY%hjf%Qs%g7@nzM!_+EbKUHk|X2%k1#sLkUQiVIOzk(#ap zFkSbyyJG`5QqWotcCKw?$_Q}xR6y5fUoj;6ve9dp?wH+^xz8?To&hESlTiwApMPb^ z_tmWT5|n}*0QX0E6anA$q4q(uciD~3p2yi zPAj>pY+l%{ZXAlBWiz*V$UxQ&Uf{Vk(b1gAPQ~?g2M%e2zy6?U>e(6ex%H%OJ0;%W zzu4qgPg@iYN(QDmfh9?Ns{pRr+JAanPz${5kGrrl%1gbrvuwO*RIIIVg2`>OGv-He zwM@2zSJoIpHHX0hP!@^&nqOw^JAeHkK(jlxcwzp2lH=KMQrk)bnn76dxoF zU*33;iIc+HbG;YDeL9fee%okB6I;$~-K0njBL!Ic@Q4iF*e!4R%UVNH?SD4i+Sbwl z0_!c16et&v=01ut8;9{e+ZWgZK zUy+lW=)dt0s@;gLLV!uO0CIwH0~&8{ck;;KC}2cnLNGwvjYhg6>3`F4MSC}}&RQ$j zggoZhaCcIJH9VkRgNN0qa+;KIXO~5ZM{`&Yi}flTkp7+Da19P^U{2?US{zSaph@QJ zkq|y=gY8h%Y%+L&W}LBTCmdi)?%t~1Qr$;>QrLGey(*NVe$ z=WuAa-F%yB7vq_u#(z0a&7eostmm2dZ`3Rz&%Mx8J;)SK2E@TeOWNEPFJ{Kv>O6Ks z@ZivP^whiBE+$YtES0n{pHDWWZT{m+NHl3mO_Krelu=2LizuJ8w|SEZm6H(X;&^dX zXd;6g-9VoAS+kW1P-Ntbi8-Y#Hx_(?Dy{3g;GBC^qH#qdrGErt$JU!6obnU)CajMl z)(~jWK-B`#@v_&w(MT)K%fw-k@psFgwz!Q%NC>qFl*=QmoK}G^gNI-9eiUsy(+YBG z{VN1$BwMiN4Y0QJM@qvHri+e7(QEl)@!JaNea4^9qcJ$66-X0dg#b8Cl(1drVC=U0 zbs}HW)X_ucf`3WhbWtvYkH&jx<@lL{ejm~_jDMj{j`&N5@Kqm{carcAvO|oyppj0F z$BMKtI+0G0XglO}^Aq&zvd6`avi;3$CnAWrJDP~svu*kUL3+wZ(T7pDU_;|Llb)!m zYwn>k!&!4--EDAm#7ooTSJJzD+gDdk@svUODY#XcHh=LB#?|c_8^XzvJMS2P!PsQb zYkbJnK>hob$2BG#j~U?8dc0kHS$Jhm0HYYB2$4M^f;oxkj1Tcc`eZC^Rjl_MfsRKS z%K<9xp0ZCW?%5+BbS1}SBn;LNOGJ(8LVgAe%$YCsDfhjKna%R&%vzE$m(cY|GuZhQKeX z`Ds|sivni?0P;@-utf6s7;jSI?0;KuHB}3p3UAXIk>%+j^4&MU*rOg@H{H0uAR-mD z2#FSSgA+a#baln{HVZn#5%kA5?Dv%(F0rVv(|@gRwYdl`Nmb%vnR|%HxyZ$_+iG>QD!279H0@QYFC`4?SawOQu!MCYE z*cq0<)n|B3r@RAt!d(DY$A$SFJkP4mG?82uyU9XFuR)jlndVZm9QD%Ta;QO-S_Ba( zz<-lDDWk9>x&8=^1GhKd6%QnyR3cK}tITy8VFSnEL@pU|2@q?W+%0-F`ytWw9II## z>sEYSn(cHltGitS`(wIUa}wN7h3#0Q%43J&q-1~b4DqpW4*t?F^wiy4_N#@e;l2`X z*%En+k6!VgTA4J4UPCJWa1-_BHE^9?Z+~1)_QxZU%HmN>4q2NH5Y=h*cEX~|9W7q& zBUCyOylY1jwC1nFX=kvBFO+LO>;?CB=O$bPsRare+Je~q3^etOUrPLqBRO7DkZYU4 zeDkAVd$QxkuMeGw2_Hbu>X+kW|D(kZ#^GkCBQ+qt&hECNS{>Gbj^f`GK#jurMKliGsZ&EH8n`B#I(=qc1Th^jE>i?=xp;j;8KFGy`NS8#YjrTgjmU z!OZq?N+@aok+E(fHIHNrowCJ9zkfBfoS=LY8bj@OSJRY;H>1!9CxZNHn)CGW>9su` zKNQ=^tc|%rstozOW@#L>XRI5kDhxZ;&MA?mT^$joO`K)k{FFroa*g!=@bJaZd4`od zr^BN5xy1wFTzM?jN(VA!;%l>($1;9ME0n=lzu`$jch|*nzx3y!TZwM4Wq&k^3&o)yj}H5eZ9_ z9!s}P^%>*!B-%zb)cQK`yzv?an5G%~r@^pm9qFxc)HzQLIsol?{sM(XtQA+1+zN+X zWXe9uWuiC3(DcS737vS})-zLj8y?c(%uBntN39>_8s@`d%VUcttAFl_;p2prhyGz8 zB_yG)(r*G9`m20p=Tm3ITSAcryYXB1AGA$J^5O*TuK-j|Me>Dpn-wChv&~wHd`)@6 zN5&B*m>>wR(6uZWKH7pz`5sbD9aXkm&jF2Q)<7<6E0K=`j;p?|!%w+{8U@<=+z(7A z$X$=9b&6n&t%HD6lYa^~^_P~%wjrF9&#T=+I`z8}Gi~Ra45an)_ygj65L7h&`==hr z=rLv}$8e$_v5Q-0>ML{2)^CGMRg<*y@*MAW;53*he6f!)%wgV;4 zivp1njK#Ohg&@ zo5re{0sG5hNqcI66BMoi6DDA3=%;+y-Qx0SgRw_`%dEijDE)(Dv%2%6i+gy#Q$ z?hvgz0@H~1%NDl)1j2lmF*Co6=1H-V8LhrDPbQW>!GCIALb`sU`jg?dN4O4PdaR-x zdv`jO{iQC0v*aKF?mjvV&0-~kLQ->A17#bx_{rmLTDL?>_3ni?7)x`Z`BWa?+&x71 zpmnX-`Ic&8edsj07tm^cDORlHRKvfutq|N2db6+b)Gx?>H!);UUuMy<#6XKzO-^Py!kzgJzRDh*0IRuRF;Sh!a9FrjnhO_UepMReoflR-422lxA z_gZRmuyH`}bi?Ln+eQdL8221{S{?a$>=F$jA3>ldS=MFB`~l$Q*?;B=C=~pjXYHw7 zxENrVjAK4l(BcCTkH1eJwr$>^9-Yc^YA4_LGN0Np=^lJduqwDu;LTgxm=@tgOv#%O zXMcK_Lm-v9?ecEx{%(5MvuOo8`$1~nvy-Hb4f{FL`vg6D9Dc&_4QFz2gXC2rZtMPj4X{5~slkZ$nmD zLGh_>C1sX?ff(-ap!47Lz%UNOq^_=*Fn?v+cEIqr{bT;uu+a{@9EEQz^zV`T=V1PC zy}-j41!5pLZb+jw{gp|HmUWcgC0)aP%qT?wp-&HiC%>ou=A_#xOoUa2`3+j+HqT@6 z1)&(D&sz^oOt0L)nMSkwgpEGXU&}_7Z}khF}c>{FJl-+ z?z048heYBl@T7F-@nn>yWe_PpiV?+t;d+VT-nl8n%3S(HQmH%ln6SUsIIvU+m{W; zgk?m{SlH`sgHag%lm$=uXMVXs@s}DrBE#Qk1&;_CBKl??2pIDJ{gxq>UY;jOW)U6b z7s!r1DyD6QeEGSNuz%vI4RBk#cwc(AaoNUH`BP=D8{KI{K?qWZy01$lsYBb9?cGQH zY-+VBZlliUH>%0HCQ6j4XQZ(a^R$Ym3vJ^$EazhiV-}++4m>;R&r+#rl;Dv^I3D04 zXSi1bP`ST?xg{3s6YnuPVQ?V@u6$*}4VH=?Lrr+$=&kHE>3>{U$~^)smX$HdD|+m@ z8J6Tyu;=7rKIoN%^D8}RfQ0SK4(U9GY>)+MeqX3HO_oAPh+B*RyzF7WmY)c)N;-9K z|7TH#O$i${Jr~(d92GH1p$N>?If%vt?AvdW^`KmeMn$m@03QhjMkJF`__oetHE0J8 zkSA=tH@CGXpMP)d2~NY899X-ab0k$B@n?c{P3I(rhcgaO;x#RQu`bJcq>Gv6aF$t7 zW=K&YO$;{m&`elSc~>!;7GioYm+hQYT@(?9n-6+uN)0h4Y?riEmO)*{TOIMGQ<r=9595snO@Gfk;rQ7DH1D~iv5R^m!_8-& z1*>=#z{u*4(PE6zFpORSjqJ(|bp6aEiqU9(pzwLpG;&5T7y*7XjOUlXTRrN{QlIF0 z_?}2oY+!ab!`OYwjKbgV8HTJ+1fyH-Gg!H9V?4Uv)TpyUe39H8cF9o{WF9CCP~EX` z@mycU`hQU@ErtA2g2#`;Ij;hzJoGu<*ctpbS?=fX;&oCYBhf%-%7+Q}e=t(}gWiJQ zHCwPia?%dj1`qK%B&2y@*65(k&S&EU z1NGb(ig52kE`!KnyD=V}H?V5Bpropp0yhW{Dj#%}m4mjotfE zhSf*B?Lxa|e0gLB08UvMJVLTSuM9|E+p>Ui7uQ zM1Qo1ExMN^&K;Of68Xt1dR)I2?0wy0g!%(YXV1}nAGG2yj=;)8azMUZyboh83!2|qR8Sd&Lk z0!xF0Y&?qBZni+cSWR2yxptE6V;^Em4(AO5#8U1BTwI?v!_DUV#&K`Skc1OKPB#-UgJwOJQI{< z1c9zBPP3zTYI^SzU(auVg=^WsEyOUBrp)k3)i-e=tS79*JFS@TpRO0a)@VW$z75Ma;=S`~bI{;JHVt-SITx1`w z-Cv*86^4M7QVsZ0AgHo#Tu(4H*OWR^RjM_cw+yjuOeOZ3^T(R1gdT}XN|y>{zIPDv zu}Hvtdr3iczW4NQ>KDtnC3YM69kuE+1U;IzGYf{j5I&2z?jb>Jg2#ZcVa-$Xa${M2 zP~t#nHwRjv2|d6i@4;=ECx0q0StPhN=jQQ82zx6HMJg@ss_?>#E+g#n!)B?^!(tOF zSjQCbIFcc@RfDj#NTeK@=BP!+0&7`5N()z1Y~RAdi12fvj{fE~h`R7sNTw(%SBG5K z)-2Pfa3IEGj$je9Kl7`YDVP(AX(gCNcLhX>5Ncs_k0%~7m6%-41b+}d=?jfCLyE|1 zH6s@z^7WeSd41L#%m@@}w(H!Gg$SvZ*0kd^2`KYEi$PcL!zuImg25okc`}M}9;>m* zxn6ZTGwROCtEe~+jV3Y6L$B__fO%P(IQsDGtYjZNsPQ^5QtYWJ3BfskJ1*kZL;|lv ze?!FYrr$F|o6pwrDt~FEEN4`8J~z_?n&O&YJZV(vPUIU2+)>uMw$ zEHttWBwg-ZTA3DF=EG0{W#ErhTb+;&G_esCD=22r!Zq+*JnWUBpNcXq5H&;cX0ZQ5 zSEAleB8iA$H z!dNX*G-LMoZ4)k(K`Pw*i(?(WJ3Qd7flfljw>r?iyw2plpoE_oe2plxhEl)2F z$pMuHEp7!7uD-0i{hE>$KBk$YMiCWkpfZJh4N&zce`0Guds&wk!9+gEIM;^`$;Yv5 zup$WkFuPV!cz^D|iN-^Ct=Ab2H4(UouHuMg^vT_0;FnOzZ4mE|M}`^8O#;ViW1ee- zJ_rO(5(7oj>?Qbpe!~ozAq@PosvW0^rkO!r@c!0r0P-LimvJB+S%sH*Cm=^Evs8u3 zu8YJ*iX$#e0%lB?vkfVGWU$EAe`CqJO2hY08+N+RIw zB8Fe4p^s5IBDo*)(I|?NJz?T(hIhD%Sx~QUech9vB#i5U1h=!#=r+La_!u9d;^%rDd0&FlEq!}Co7{9t0 z(XKm-TYq9CP4{i}HVObO)Ioua=A|TCQFYA1uO?2}AgPO>e{}-gtc&(pa`Dw2GhCn< zkjytudk84puJIF1smaPTTR7PFcn&{l{n(2n2PO54FLXm_7K1~YtF%sgXTo{~wzLO{ zRe_xyT)I}1Z43;8M~4cd9Gj;P@8Yg)Z6OI8F@H=lKRyAawN8MfQC14&(q9ZYYr}k1 zpVJAq2al#_RCt`Ykk+5(6E7{I?m}Vu>_dsE^kU}U%IR?J-JQ-~y>m0rps0V*UC_^0=SR--v%{>GXASiYWz^GF$h`&F5p-Uvb zNq@PvOQf_(EkW4Zf;Ik{zBz`&1v&AVyWq{e$vJj;NXCArv>hZHW02@G`sVQlIn9N> zyM%k`dC@Ujfahw1b+l{vapop4ajQe*r1>i)kMx%NSJ6ud2!`pJR|%x`mPj53xRdVB zfW@_S)e&g?Y5**r?%W3s06^jbh(V2jP=DuK)mh!&nr1d_Vk)?|NPXo;VJg(tmVI}7 zw^$GHIr!fm66w$`?xa*1m5p;|#hD#(m?wjcSR5Dx!V2zxTYnRS zmd#5f$+ zX_=%cYJy|J&-uw=5NV2{EKdj=$H$}j1wj-c+LESeo@8Md zjz_=6afBdT@B4Y4tjBT8w#WC^u^)SN9mh6Bt==2&hy~#J#40BRfw_E z&;89qY#F~tL@A$TXh~oUhB27aA@KttbGJVp!51Gc7?#W(Q!5@s*MAE~F%00+flWPv z%Gjqt1~U5OF?|CybniVj3Es)-E@c%d;_xd`9-^-Np=&#SIip*(%?`>|W4 zrIRwc@{UKrr!4@00xfL3U4R~&^k0l(_hm0SoRe`2>}B22c^moe`ExbfMjsRYIrSgs zuiL`Es(n0tnFX#_JZ$32^=r$Y(t2qd>47a?^c-8;^SXKQc33v1@v+;GueX;{JdsBF z$DQ-_{8EOP$Bo=xjN-`W{FuL~D@qryw%uH}eBao~!_&jHe`v18cpkp)Z*>aU#_MhQ zd;A(d)>ZPHy&7#6)5rWX=#E^z1hXgim|pf~-OkgQV{Sfp&^#&?eLOU4ELM+?A4c`Q zEgqL$)r`Lie=~l5&ZSYB>xGZ@e(aAAZ1Ysc=QG(D{y19CN_l#>O|{)bc-rJAem0L` zve?RSG5TuX68u(tor5cox2hDE+3a+Pl1VLIUu;k9=J^%X9eC$e{uMqN!Lc3}^ev|6 z>yFFL6f2=02#p{b}(sO5{&zGpHBXjpS z3BuJx&L{8Zr*pN7jZNZI;`1e8GlfD?eXQu%PJ|n6n<>UTRWdyBl-QfmE}qEeLw8xT z*<&*o>CG6nGE=@-#@%=WrzXF|l<4ZK2=q z@~xfZ8#|7~BLH|SuHWe&^Ln$Q9)i80KfkZPf8n6stpjr&!bpG0ITHX7WIOw%Ov4O~ ziy46Dr}~}zILY4}K&^Oo0C2hJe;<9OtI1(qBu`g;{Y!lj!7;O6^w+JKYd;3yR;4*x zdyU%78Xc7MYykZHtbc#b03@{fVGAtL?g%LW-q22e5i$*VdpL@gOO}k^E-maRfJ7Sr ze~J*y5g-nZ1a=M7ib#feafy7Ac05nlRvyP7GaIS2WJPv_oEv`0SNe5Su;VJCIV(&C zQHnvtlr|NGGDAE}%X$0aad0OLtK3$ck@1s{fMbvuh4uX98OI8WS>;CX)fZ%NibCOB z;3EZF!4#mFxQ#gn$y%>r`g7aOOPAurJxp3Qb z;*;#b@6rd>2?rIta0~Om%ha`FHOKPeV*^$o1;PPayZ+_Vjo1SY4jij`PQ(=~$tZ{_ zv|53eRJKtk_pUXYVYv86rS3dQT?;71k;Qm5Go3crFG0?U$ZQ-=RyYs{LDg4lf5mb( zf~kS866N4f!1LrIZD#-_`>%+U-zME&C!$<(n3@BGfhQ=!86FnP6_{Q?0^Ea`zn z*%|^taCpfEk$S@^Akep3TliHVynWesaMnpF|ddm=Wb zRKlmqh(^E&YD9z;^&!66AQ{0(g!*~GB%+9M4AchszzU!Oq{2j4^FT8Ff4>)~-v7XO zCec{VKY^OC0#pFJPuEGaiVU%bj(yOM={2=c%N&-cE2~M@R>@mGicfBl>Alo@)A26u ztcVXk@6QcCxrMgNbn18M(QVTcYvPra-}Xpm3G3q0E%>qD_ng~SAl6o%t^G)ZK2A?o z#9O`Zc|JQiD04tk3yU~me~rN)ureOHY!QkSI@9t3x3)kW8DeWEs8?9f7=`eK4(Noz z>f9n5fn+>-poimO=2+c~oH}_1nXT-cAyi~=q894KH)ZM zt(*ot_aw93ph{iHFa*r9#NH#T2}j~7@Eb~f!ZEQE!NLr^=7DR0fB8j(^|t;pNla>Y zqU78Xy0nYXcqYkS%s&CoV5!g&tQn9Pyj5bkBB~lMPbn`hsJG=dTU#hiZvLV7RemIf zb8+97?#DO4;4idqh}?n~NTrE|vK8<2`HBsQ^m}jn(f|G4j~rRc9F)hg)ubISvj;yC zSeC4fD>Vl}*($&6e**#!eu*qEd`;3otlz3?Nx=v}&R@P%%m^}Z(WLKIE*2!G8>f*w zh+-8~@!7TNonr{)e;O+1v^#Y#ph&GrCq8VVl3g1MpPIS z$@PmRV#krvf^*u~L4MfeGG#blh~=}*iSdw}*$0W>VPkX5e}gLBtkTrRP)FKDDNy5a zOadJjr~xa5&f4xblgCX=l-t|=-KUmW+sA??z1ywRuXXQ}veJ)uzn;8_OQcMURCZf8 z?XThqZ~DE``*3nqzaPh6{(9=;v}8l<(R(xXC6D`kY%~47qyC)^r(!^*|kJ2No@!_?MZ9_m_b5TYS zLl+;1=^XM}tgf?yF|19c5{>f$a=>}x4G zSr)UGd1RIpMBt3%j(^zFls`)3Ni~jrwl>U6K?V&$2{pZWfwgOmNQtvB24*oE> z(DnI^e>6Y{q?q7Y7QJUS0WQeo@rjBUCFyd#RLsUoaGxy1!`CxAD`i9ZoXR#cxB>*^ zd5n~`hEEfi0Q=HV0+IH|y!A`g@uv61cMRa%&bhX!fa2bEPHt6UkirJ_^7fWf3kByF zug#XWG~eh${$Rs}gJk$w(9r|qC*Fxum!=n6mqca=6ag2PS!M_#e-FQX6&qsra88PB z@NtsnR!Ebb5)7mC&6-}&9%Xw=R1UId2}2@rn&bgg2B5239#)5FvN??bijw-adueKQpX8xA13boyFNQr{pA+*hM{z~ z5XwTbT3{g-UM?__9Dx#{6{cTFm?{?fc+t6dt}2&ElYul(3ze-YaDT?7Rw)^9mUIL` zs{N~d(W<vNr^kVS|x7 zP%RnC_DL}N#6Zs$sw$Wk)7Vem?z_w$033n? z?o&n4dnQ91F#Q-q3V-+^7cqg;XtOu7dCo#24RY4$j)d!YoMI}x1)vz_0@yWVYb-(M z6X<5=f5lT1`(3)e#a|mWeO$f8;LOl**lkmUrFq12nxK$B9zNJ7&2{^wvR+X&b{iwc zf-+Rn%Fk;7l44Na0)j8$b#IH{w5=A$+|4ZYfTZ=lKt>WcMt^(67wNU;b|-SrUd@Vy zUVlqFSmzV!8Q6ng>Gz3+sYETG@Gdr(Ui&WAv41+;6AgG1BQw~IuDS4~VrGbFkQ-`F z@{~dy6X>4XSx!d_kaM(r-jA=dQGaeopA%S6o3Pa9rj_&Fk2SZmKIAtKf2>^}53>P# zXuBxm&Jz9F>3=hH%MW^7a>ptwtJ$r+mMcrirF9G-DASqKULI{j&)0l~QW`Fnf-EJ= zVE-6_aEmMlMdjIDB5pg1TMV3$8(wPC)CUV^Uo?;hZn$I-Ts~=cxgfzmAkShDf=(5q zc2H{+WY8{~IDz>bD(WEM#1i_x$@oREgz=>y+{4W$I=``+ zjxFwey?RWNc7B;oZusHuJCCvBFB2ygP>W@Z;~eD+dvmgo+dlGnp$~vri-0rQm;o;` z8_9Z)8kj>N4h&yTbHXnyWFN49r3q1wICmA7BH{R=to)Tl6+QW_P#nj~QZdDFd9=F9 z^c%|bM1Nk|us(kww4TgymRcmPyztWJg*}guo45kl0~Ubian4}K3t8CThsIMA7R6J) zoMEoxoL(QjRuNBF6L(ga1xRLV$a#=&;*hM#=<(^{3M!=t4BHmM0_AR2$!U9_hfh#^ zw2q!P4?@R(Mv(q0dh+*M_+>Wr^V@#?<$GBkC)&n;;$0sjhrO(M?v$y4u}{>V0oH<~s`K0jT55ix_x)?0_i?Vfo%@@= z)BjUoI79fs@FVG$l}H^LnTX}HJr;F?F|%N^q9DB-ho%dPBFJIxg?x!OqmUkI3?j8f z&L}8tGu%ZYqaayo1Glbn!uG+V<%_h@{25=4Cm>B+N;m)!AQX7lPEEWvU4i5`ynD+n z^K8I>+#09l-#N{%Tqf)78AaMB{gFeiX~!OJ=cdnftU#>N;Pp-5%RDa3J*e1ULN%Y4 zrbVWY`(!F(EUR@Wr56I7FOrP0Xtt$6%>reP!XiesWVTg2`cPc1zNFr~5QZJl2Uo<7 zeNYdyIe1BAOl{1iUCvHOWDg0RbY7Qp3+R@A2I-E<^fW%P%e;uUkf_z1sHdguG4-Anb4?V*S?GP|g^}^Ep zkVKxwRWHJxX?*7>&7WREta|=5{Dm`zHIN$5WIyNojyjgfDDDtqYh z&Ofekb|A9ayM$kes68U7g%m$hp)3$=O|U>gEIKa`8wVRMd?LmL_kxIzUvWLh!gN4z z%@;Fh)2m%@f;3LH$@pFISE!WcqwPxrJuX0f{N@K2-|VOm75|kVArY?-h_xku`70F$ z8-IL_J|%at5GW9qKKB+u)J4nZ?f7~LAr#1h*j<1Mj*p69p?oaD>?nAGm2welIj%nm z5|v(80NWr-VY@we2feP$N215I(aopjEV#wBa5(cUq-!e|PSRvC64nV;8KwE|g`;c* z>i#@Mk*K3S=TTWq)tAIUtnn;=!8o03`%0Ii4`>V=D#$c$rvfx?U*Gdcze^8pkyXRq zrJW_=e!eWUp7}Hv*t^6io*v`z$BdwcEwJz$1eFWpr>F5plTcv@0xVj;v!~#()&Sb4 z!Wp$vxI`!y9@||X=O_gb&KrCv;0ne%&MirrBGvQr#h-Zf#zPD~;M)&>xaBt|=hGuOhspz>7Igg=cVV1j+A04<5%?%SwKdpmB?a)ju#4RLZ9d8f~tNpmV95}Eq z7sqNooO>9LYa)dAtn-C`wf29>Ib@C0_Hf|+eenX_e0h&~y8fKqG0#t2xbY>{UwNHd zWaZ!Qz2yMieQm8KC4cjlaG|3QH<_Grb9s5(Ic+F(vq(EkWcC^99o zprY|;rBIEl3QZof_`uy1UDw;yp-9zX2PYVTNVovGqGBT%P>fq@ipD6nIYkhem>*|f z3lZa)q}cKk!;;QurYP?7I;wH-d;}jAEOq|erMJB9Cx_MbDxUYbUDDsv5O;p(e+}ZB zTkmeR|C=`$ctlKJR_~F)d>b@&6(Is@fk+X;CPj+cODs8TKJ7sje^G_JB9K_LC(n8s5W#f4)|wvHQTo z>l~$tQA~^rl;#NXIHO@Eg9m@l@>zgO6M1U>B9F}ljs|vsYHrS3#v)7N6iHymMZO$8 zAb$2_Wx@V=M%%QCq6~wYkDh?)vvyfZL1v0k1Z)vo)I|%;m%&NLrdoM5;9BP!UY~EY z^!xn(dClKB>Tq_-OrNLHe-HJ2@I-St??Kp{P|SC>!fQcaAu<^*`4_V-aH7jQbmj=! z`q@b{0-cHH#H29h7(sPebh9~LX4vgyJif|!fiwKlnYX+sC6VwR!D9M4eGvLO<2$c= z!^piKTiwpf+V9fV(s@M__q<16_a0)zkVnWg9?lgT&P&+ao363+e>{k;z1(AqzHg|d zxqo~)WqF2u@@R&YL5K`A`|2X9D3t;sL2VG?6G2LFG`qO%0}cxI>V&SHhjNUhEl8t_ zuQc<5nBWOuB)EkQww#x_7oY3cddTAq&UO%{ODxzw&KES|agDMl#Qy1QXJv@Y9dvQK z)ZD6|qfH`GPz*a7e}e%9SI6Fo!XHv=1vG!zN*+~EYyiooB;2T_hj z#O$xv)-h~T>+6mC>lOFwg>Rc2_9@fX)n7{)EJq3nKRYqsX!7vhf=Qe8@r1Q~T=|+V zmOYEUBb(B8J9|R2uy$Ua5lE8Mku&+ zoQco5s>8R(cuPW2LdKAV()S(wUi0Sy>XH9#mzMG{-dN&;pPrRxXN(W4UI_NRS3 zj@fks>HTx5qt~1C{>6X&UhiSO0@n3)n3dJ^m_hRr`OWsqZ@U!yUK>9_!3!GD&Jy&A ze3}cqYAzsZ3#02MBm89*To{s+^=D`QyrM$?+!>8j%&Z_~BXPmM*3w#Bo=4+p=yzpp zLHGR9m8SNe$Wk1;_&Bo8FxAQ(;`PsLftLo0j|2~(Lb_gJU`IE@Q1tj-q zBPcYKJY65Z&${NV$5o0T&4zWqj7J6t2LVGC2;>0eYWB8CUM_Q$FGAy-SgpArIzRsQfy{PnCG9T0hDca{@J2^)G!g8dPax8{ z6I3%liUpN7{Z2O{uJBCF-^B{O_4*xagZzAEYaN3dv6UtIn%_O9o;dttPabah+pn5( z&%5iq!tf=(#o-~}vzn|O;&{K;YwHKsk#CMq|E}Z_RvC7qx(>p=)~0>wk`?ChzNfA` zX|0m-{7D25m})0~E-OpU4x#v}IlVMq2U`|a9a7Z6ohX(in}aaDg%UhdhIDYnkxb_v zHbRoqE(sgUt};r5AQ~%Ewr?th^4RbB4r!bgkRbuHh{iCYB^78tgQav+dpSc$Jg%)00vqmm6YKM) zMB{Qpgxb$(9pX<-<%csEc)M)CioHMi#T~S1UvC&}Fx5Imw*4hkQ%+0=gPGMz9 zYvXqfLmm)uxXdlvw=qa7t-!JK3r2|bxh@hsu|PO~AkI}>vYFe=tP}W<$O|Uzb&Nx| z3si=Esq?ZJgJg{6F-0mcd6ybtUSp`h^0y8F1lyYin*$ibXg{n|VIF zV#Nvw+`W)y?BSP-CFm7J6ocfO)yg26p8zR>h-#b-S`v(3q*nrt2YpJfkKt24q*^<1 z&s*+)dCqT45wJUF)x@P2#JFza5zgb7=hU3Lm9RM;+X!#wLufa%Qtp`p4ZFDL5s!z#NWMrrc=M=6F5M!d*B?~3u}i= zh9Yib^mxfvd````jLJ#6egIAgOV$&KKL*@2AH~el@)hw_A-`YG=huh)lD!gV*W&|! zRE|d?Vwm=FW8tW0lpY(g*&zu#+opJYFN_hf)3f78+NqM(!&DS0;W) zlsU4e!38(oT))Q+13RiF{yZG+E3b1LyR^62#PSZ%+r?v(37S||bv$>&`_d^oZ?)#8 zmy&AfzTe}RxwaFf?l|(huC9H-pZD9}*VNbg=-+vyj#Cj&;9x{Yw||O=-CW-uE^}(O zxdoFsR+mr9%F`8&k2rU`4H#_u(7fz4UwB;=pTqSD8mDhL3+ZtEPu1sDjI@8yX(QCJ z_79!*4@~sBrN}R{oS^|XS^$aF)tGlb9F4De%|KwS17Wv12@r0uZ!KOTkkncl-Mdhu zaSa6IjFSL~2(y&pY&h>U6pcThN6%8sL0T_}`V0G{d=U2M_MBoAWCDcq(nyMMlPzs> ztw2aT{;V(tX4!~RC@%;^x~Z2veFz(W+ht9j$aGef#-Dm6anmcQVIAB7SFLEy*4E%B z4WNLu?i-^ydlwW@X~cN{IEIV4paK0H=iO){DxWq2!BaNm=lhurb5rY~9R2roz4*qz z_IU&ij)(I~;!8B0*~B0_?Qa|tWBvAz6iA4en za|V3~`-CF<*7OfH!SDK)@>@?(WX4YoHchmi;9f&>b*-Yus(sASSOLvKC_N+y?lxI? zl#OSM5G~<><-#IuoI7od(@Bd|mMCUO(1nYHi`8% zm#TgUB2j8`L8#iSH|xrrcjq86L}KN$J;=Br!rohk`OJm1y_jB>BD3M$q-gnEdh1Co zjx1x{C9{E|xyB^{9gW|ssi*cG>f%oiU0FPI8=c1E8cn5_9e)TYf9!~uStuq*i71Wp z?Q#VpG2OOq3`Zhj0qJ9u&?X}46%lVMu24HM)G^21uvu)m&1(J=6h`%#Esx4Ph#=iiOTuoYp%-tb`xdxN;IBS!-*!~}D_=VCyFYaNy6!sokoI1~jKBFwA12Q`VY)%=YrUA* zRVKC}eX>zne=iETfVMJ_u-cuK_Ayux^Le4_r=>~}w6J0M?C1uw9gWxtMb*Rd>sjj_ zDWK=Euh=JK8H94^^8hh!vi z#&o{xMtz;M->;v3g#vzZd_|SpvIMp1P*c_YHaD%6kNAlde1FK@f9nq0&4D%2!8m*O z9~zf~`t%#VaFJTkrlzQ`>od#l>x04p6=NuBGc(vHYSIWDUUO^(nOj98qY_6DS!F5P z1GQM=x^^L8iAQ4(B;PZXeKEwgEVAb7wvMc(TF&uIe;=>ocGZ*H#VIPdP1`DQ0ufM| zYQE9|$rnf%4u279iS_i7Keo-KMb>WOJhHeOP-o_e;ma1^m`=Y8@L-hCYycA7PdZJmbA?+vn-O&c~m5 z80~dZeByyU(Od<_c}!4lhj76V$1YhT&DmKX_}cD#hJPhPfv6U{<7|QB7^8^#@NTkt zOI@B*%!!;AsqArM&ZNVTIhw1(=WfW;5Rr7DeZ$}fdd>6^J~I6<*M30jN`^kfKl>k1 zmreO!wS{jzSC_EwK1g)lu4FpG(42O{a05Y!9aJ2S>_=Z!i@mWTXEocgz@s(6=23zbus3-d3N(zR5qe`R%l*u>Orvw`dXU4O^9$Mz8IR*KqNBKBCkyrZUGSmJ`v zA$=yKHZv+2v99k_R!h>!N2^IJ43m$oG+7{+#f34%km?Y7-7W7b_07DAA&0UYt?S;E z|F9QyvzMzr2Ud>@vs*vP5GVhz`-Y0XUfRVUkX(1Die0jrT!=VWL;UkT5kxNq`M1x} z=YMVS2^oX9ulr;-{MqNZKf2OEFEa3Gf1ayee|XoY{JoYxI>bI___y<_ue-kJ`e6C; zwP|K^T)w^sRqcVaEG!;BcgpN-+iEUd2=)K}v%0I9?k-;hx-awU{IAdJ6D=TF>`lEqrc~aD=ZrIxL1zH7wkxf>b?rE z$Gvoo)j#?w{?b}z4_15YpS$DBp)P2+lDedj&Z#i`H8m=JWRDvno_4UA+6Fs2L4Uf$ z%FJGKwZN)I`rGOAus$TKS%Sm#c|WPffw-U~WC@L~p6JYg6)ad*J}h+I08Pi5=hW-( z>Xvkkqrx<~`K$iiThYCb`Q8umAMMQ=bRn7M8isg`hqDecCy5mar#JcTt^AiW!1_JVvE*BSgAT+zAjKK3s0T^rk#%$J&voy&7FQ%_bwD!# zVSI7|DnK!6IVM|M*>9U>E)EdruA}ib_3iZ~MR8482zyt$SMo6%m5;Ft zLhO7MR}8ZQYU2r3enQU+mp9pZ)%(}=cq9k^y6kmC^$quFEcCqx-JnV7`;@e<_ouCnEdYytv)V-~8`G&xQZ>I@5Kdk`Mjf!c`=RZCjWL zR@4$GFKyBHT7O^!T34G<3k(}_YB(tCY@Jhta*p-;V(JhZU5KdE9IJq~0ux|kqz-AC zGiC=BT1&dJJ9zBZq{nB^(`DcP!}~T@tk^|X@#DTz2&>q_`sclWe7x>L*{npGp#i^P zw5TWIRNeUD9xr&XQ<`kFFKX8F!~7Ta)cTBn;fQ$X6T9jjHGMYMyjhnd znZ#JUN(ULGOZ6BM$d!CN_iHs;ublqt zy|TQ=O@IHq*PmYV$@k2d(2wmVow^`n8XXL6*Bg80<3O&rfqeA|I3%$g3>$lEE94SY zxKwRX!C&n#CV`zMw&VuE#z8J`+N#gkSWZR%`0-N_(HMacX5tiF+Cu6$qCq(7q&?HR zTFtMao@dtiQ9SuWliuC+FE`#?q?+gWg`N7k)qlNbD`qWMgcCq;K~OJ<*U6O|3I_$V zJp_)^DBXj#o7_~rQ+I(uWE=$LtY8(=ezp=1RAdf9 z1zr{@2dQ-_j0d3^Aejd9Djr@7GTldy5#k|I)Z*e&Y)r71AqzmzfEIO$zGi>|^8k`M zH0KlwnR?>gAw9|L(7@}j zwQiCIFksGS)AMA7F%k*>q#Ii}J9xcNE9mDN*t%XZg88Qw^P&&_(d#t7`M+%K0AE=s z9%`{a_4k3t2tg)Nc&>My@@^6A63ed#k4W`mv~ho2gMWfD83V^7q5p97)sCw~>G zO~Xe^_ptY`;dm?qgM!M!H1GGub}=gmP(-k+ll(y&2*?SwG~Aw#PA zzCQ_E^V`-2a9>vp?(6oWW74|sbboFA_zdkIoxv9M`s}s@%!uJ?%?i$`5*{cvLM@Ya zk)ExVl>~8$CC5vdy+V>{oj_<~&Z*!oDAz{KZ2jl+y63YuUt<#*id%t7urGS<(s>h@ z_qj_GKN@rp{XC^eeB*i@FK_+#`sV|-95h0=p3UsxM~j;cvzC76W9qkFD1W`{f>u90 zG5x(eX!skpMfk&YLhU%JBn3gdQc|S#idaYpGh$7f(Che$DPCJawsG2cizKxmhv_n( z`A#5^S15!X=+`C18s_t6xKoHPJchssTHHBsTX#WZReWX4N}TWwaF44hbhJ)t`H=CEaW?q zv!&Zy<_Kb&kif@4=NODRDiQ!$X*m@^Ef+=VZAUd$?|HE;!bL2tCnWo06?8k>T;@*XXIqP$rm<)Os}TXHg_71L|2l zreIJ5OJQT;FWKLzuA{tKvi?&K_-F3E?(22~4>RE8GJ7~ThxI17IJ}ML4yg*4L+U~* zAeSp5t|3+i&bw?%0)NwY}anfX3hUfmw=b&0Y>4LO8J?rbdjZvuWrQpMC4n<24RcwijoVW36 znA^b}3-LMZpI?U_xJw`Z_TyOtyYcu%!EWe#8~nJ&B$mdoL4Wog&zZ_5eLvt$7dFi+ zXRhiR#5Fz^b+?c+3i)V@8#Y-It>YC0npcSQ>w7W3()3I1k0kDx?#;r6EW* zf>5Dx_87bqP;=ZWoqGkU=Q3Ym{rb9;0?!oh`zaDbx$yP;sC%TXTz-<}b4sk(N7A`s zVHEB|6>A+Vd_C77_~?1Pd4R}7TR|OZ^8TC2z}t^an18PzkM&Gc`_g9X9#?+P-^qUZ z9Qr(b(6If|RsNb0k_~=Q$Iw~)3NmP4onWFlL?mbQ)d`GO!8r*ShErg?Is!w^d;3b#?-%|7@~t5#1CHVH)1CL`YE_rxyvCr6)m!MZA+MT_M~WHnanU+Z z`_jUqmvWE@GJkF>mRg1BVdU0m?PfkQ)-0$}hNv@d7;|vEXR@}D5Mk2F&jd2fIk*(+ zq|ZZQ7%eLqa3T?%7%nZ0k~Nuy#Mv$<&4D4Mi&WAMvKSGu_N&LW6U1|p_gXe*8_uqQ z+>gxVbER`wt;HJZ&ioDZ_!K}cSQo$_uj_~gh{3Hdkbm@?*Ywwnud<&RSpGvZpzZ7Q z2lnhVKN%lyc5LWeauNUc3br{{+g{Vh|xw_M;-gdC4YOIhtF}u5D?0Ncw4# zrhl2+y#P_7bA@A(?($><#eSFB!g)z{O|1LwpBIfpP-asxootp97b97{u!FT^Xcd-U zPcNiGpQ~o(LK44x>lMNjDXH<_wnOr!d+Ywer{8Ndr5`^2drehyfB#ZP1#3Be5rKja z2?3xUXGTV*N9^f!9eNz^^S^%TC_P5&xqr%@+vv4|$GiUg)_<#Y=e<80{L(iD82Tp_ zJ-WqR_wuV(PftH8YCQc^)3`aV+Prez$$D?Kj|=}ZT0x+&?k~bE-+AaI7bQ1QAVfxyJ$Pl-Uqc6l=2y+gBjN7O>%Cx7m* zak|5zylSlgFTopMrN;?#`aT|^%O#VAuVO?yD69f|=?+DUf`2*BPcMVLj-Oy+XkFS_ z&$AM}y&6xvz1Yy(>-XH3z8~U`xpr;8rni2z`%Ij{meIYh;`gl0PE47uOAfUm^ueTYuD{V_XkZ#pq>qVO@Sn(dlqq;Fr-tWzOPZa-br` zB!2OOe(oE0DSF}h_x+XYIhDV4$$4)R@w`77b42Ph)0lBTdK`BW(Z1fohyJS}|Lr+( z^gRpZkC`Xkx6ciA9}1sDAExfTv?ysNi-R~hjd7;HJ=|4GkeOxaS-QKLjejo(Ah}qD zo%nQ$>gY5)gGz;+AG-+Rf!F-MpjVWN5!hYuQP1h_=jzGZIZgL4%z0Y(8lqbJD+>ud zYAJ}(QF%K!s1`}8n&+AtzvxA3p-$6WD1gSOX zRZ$E348?Me0cOAEYx0|RGFx#y%b}k^x@o=qlF+U)LC}~Qjf?Xdzvb#eX<$K6r5=tQ z+RxEKN7xJW(CZhgz8?||dir+gLH?!p(>488$T*Zoxw3Kum2?#A>eR;Yr#- zE-}~nGwQ#8m|X*7g(bz`^91j+t?oHA8sbnNi-z;8Z5}Q1mk*W*83L_;mnxPB8-KQL z@fRP}82GCDU=T~0QUR!nK$SX9ll}4B?&l?83-&YgEao0SKlg!MgLsMLZGKGg*IXc# zt^fJj+mnT91v%B2>5)dE`183yV^yzUZCD<#wU^fcFD{;?HQf4J0NZ*?BlgorU32^n z!(7aN?W6AdWQJJR_eBi7GQ+HbzkmAUmx5(_<^TvfXZdVm$83`KIp*(ved-VhHmsRB zXgQ5z)W@ zwV+}?J}yV8F)jZ-rqrAokvL&YFfiR=vI&5 z4;~2CIDY0KV($Osc{JnTVy_EAE6UT6PX`O7&jU1*Ej8S|#8*ZWFZ=C%jJ&g|7uVRRd2bfIZ-A8;?3G9HnDTuvX5F^|C z=&W?p6K^;wEp9+U-u88hW`D&xOWM?Il>h3T+a7dN&n@BZ%A<&VhnzDH4&VY}z*XXl znLQLlZrtIa2Z`*>zAYbmZwmY9oKfTFR68eHAMs~0!`{T&4M92kF}|;=doYP0{J^Zq zsF2U2LT*Nv?wANQe{^zLm#Q(nI?GEQvh8}C=Nk=2)crg8xZYcw)el}8=iW`O($w~GkJx$@ zha&xhny71P{gM3~BLY|hQixY7y&Z27?Z-Hf68rUK+c z|KdY6M)XmZ z{M|*o$2oh#Jl-u21=e3ynonlY;53P?3Cs_@zk6-i*OG0e<)7u0E=(_g51T6cbUVHu znl!v3@6UC0PJS>)<&FMO-QsOJ-uIu{B6n>LbMfN%(fGU3K*RnlVt1$xqpy!JnHGQM z19{gWF^36-BYV;LCsmyKM212uQW*R4+$flXnSi9As!wuv;8g1g)p-aiwa2A?C(3cV zXR)@<8TMHz)pmq@UB)xVbF|?)pVXsH!(cB734<&ul^be9K_?N@sF9gR!99OMt(hS1 z@(6~pq(8yDd5(F+*~;5G9_9tfx!HeryJytZ;BR&OFZl0U{Ug-q8vaoWZ#cKN_{Fvs z1wZ@rrM@-LL}Gng$uZH=VKLu3DrJm^Z(~~1R4fOR%;m4g5)GVU2@9OLuxLn6K>Tj>(o6YsQ2S_S-LY*OEwdZ>450G^6_zynxCvI_s z%9CcSz7yk-Bz7Y&cEET{*<*hdW~klf@glDv_oT0?sXDz4lUh4OSphNHoeUJ7;{x~J zHt!G~>dJoVR=2R@??r$3^R(m5F8NwwkydZA^Wa%~%w&SULD3Rb3}hTugN%;TM^2& z+^LkAYQ_j(PoSwiu0)xAgF2pMokVWrB|?onW=!ZtrqQQ$itKe_oEI+!k5&HrdZ!RK zuc$Fok>~?8hOpS*{vYqRd@a1+7lY6Ha=&|5ADjE-TXo^Zxfi_Jb&ZvN`^>l5zwuXk zos`Kb(A6@ASHshP@KpjiikLT>l2Nrd8%d4j5?y_!x%eWV9edWq|Y~s|dKm6H$Zngdq%adO`U@l|3CB?7!{R2w*m3$$>5`-%S|%L$8fqgIG0&Yo%cO zNl6Xmvpb+Br+yGz*7QlfHgm2$6Rf5;y8ZQcT|s`d&Mx%_ui+MD zKU}Ws8KSHU&P-ADOu?Q};tK~d&co{%gaoo|66U!_<(d@o^Y7o6jvGlL&)K_+JVEi>CJ~z9JjJIt8rmaA36{vUr*GC3fw=m%Ey1jA&0f~ER7V%qhxq4>+I;C zU?al{WpP#z!I+osYy8O_S(HUXRIWj@mDs560c! z)M|GB-RpnLu~*xi#_FOCtVKN{5A<&|j9>KL)yfNq4rFLNc7laFNH!C#$yFz~@U!t> z{VU%7R__?^i*gbfc}D!^Rqv|4?qRg6w>I;WH}S-?@y?2ok)l*P5LXk@T22|{p%?lQ0~_@wr-+&0y5)6Fu*H6m=pTpp ze#C>{HGI=jVtwu6Id;LiW4{zN3oe|XU%I?)KGvSS{`sf={zva{aO1<&{MXzg@$V7@ zr2~JLOdZfq&2O-dmGD?08GqhLsVh6CNt_uunIHyUS#v?`8Bw_+Naw3%97c&xqvRz} zqmdxhaD4I;%tvXIA0^MvP}Gc}mb>z&Bt)$jpAi^2s+vkrz8o8s&O@oD`|N}3#HEJE zZaGp!OHK(dz4O5VJzDvNU+v?3#T|bsvXxUSAHqAo)9tcRx9eLI2A;&Ka-*Bq z@kJM+K+lES_`bzo${($k{f+K#i*vj7Iez(C#ee9aU*6x}_Ue9VX;`jlSB?ydIl-Nf_rKE-57e&GlmOIhPWst2Bl~i>92BJP1THAkx zkI+9Y$4jneL}!eN(}ZbROa?YL=jHpvyj4vcr4NZP6$NFUBO-+?Ao5FlHje$ZU#`ov zPU^)uS-Z70Lwzc+>Ioy(894=_^d}JVYNA!Z!8%Tfzk&~ofYM*i8J0vTLQFD2oF8bF zI4a2r@OMs(p=9m(+RT_T!_X)~Y94A<^7LEgmqa*Dd9&A}~4mM_vITDyN8ngZi*xbvEOK-=(D|1ktew zk6Sv#^K^{j`4fl2GTafG)`u{Nlwhi&PVp%~Vg89VDie_!b{;wEnBn=xR|IkaMZRK% zFDFSb@mHyEtWh;CKXqw7>t%m2YT}S+Hu{3kjGPrw$nOT4w0!yBwb38Hw?FryGPiDe z+rz6K!X+LL)#9cubK@I>aS5*FPBU!F+rJNv?}dmT)w_r$io?BJzgn*z`P^`mPiM56 z@X(htZHTM0Y{@epYG4Es&tn;-f_$gx4(5mzEXrU-M-T6K&RO`Za!G&QMq5FALdrnO zia)xXAaSR%1WW!kBaQxELD zAKDul3raN)R>0j^p}#$TcoRl(ii7~vW8T_=7x$@ak_6`Bz%Xw)W%&j&O^i$+%zqeD zx$um}Ib^$KwRFBshrATqiq8@=U0MVB}bW{^Y)jJz`Wng`zjB^VB2BGpK4x4C>yaUbZB! zdvIBFn9R8O0EO5{Ujg;qhr{dIsx$7N)m5+hNAK{eH+Pvwmot2AlLl$uA5VP@0?hXP8Jx~f$}Q?{0r1EAyGc2 zDod$K9HMlDVRzKeGrI0fs=CjpA`X(*niau3B45b zy&%b83??VU__IJAs8is)N_<}(a6e%&SPTw&D%EkO!V!Pa@;t}0GUTt$X_WKOa;fnh zsW1*5DO$>RH{?016Z|fb$dpIU)*Y?wlS^PjjIUHU68GWgFsZfA3$t}0TM>#nDU$IT zeGn{0!{p$aV=T122r4`)q<;hF!;lf{bn)qXG-?Do1Fx~DZgEWw=2>U%ML+OXzoo@o zUuu{M_v$3=zfOAD>v2x>Va~O3NEqJD!Jf`t^t#9xud^{J{0EO<@18FikqctPDi8f> z^L=-rEsUBnr4}y&^D;?T5yWAcj}ZU|@YtUjS!2BsJr{V3wQtXeZGYY)EFX3nMpy20 zH&6CZ^H#gI=I#7ZM~PUz#I;hK?_d-ox^hUkdq_f#8XXf>uFIJ|5Ju#j0v{`f?;$fj ztCI!?xaK2D!RP2Th2Y`Dl`-Pkg&O%R3OqJ@DEsfIHzQskwf!i^{7uloR7To+?(+$Y;upzxw45!zY9(vD@E&j}o<2bVRc2tNXj8<%#c2s3{f zdqN@MJQWX)@)gxN7mI=7T8a4boB^M&ak!C+%pQvVyKJ5{gHcL!!ug%2gg*H3U86hc zba?k~!YoR#l>|VV@7mSn#fjf_F?FYb+3|U5^xaJ^^+423Z|B~L<9iPI(Tn}zi)lWB z+HaqGsnoj?$$Q}!vk)9wWD$W(@VkFBB+kEP zTs(u^AghW}h#8&*wHLhd84&>(XqVL;_5tXa~c)ij`xl{ zI-ti?Y3?lELatDo^*Hs0m%OM5ApvHW-lzx~e8 zZfjS2ponivu6aSXw^}zC+u;S<1}n>R;XYp%#g}t<7NK=2$7O|H>JHIy+0QfurM3(* zk7zYVEU}FJF*l#C8U|AilhiTVqgBEd_0oFZ4(K*gTMA~%l~0V38uvbaO6O6C9>hKo ze}G&nyB|Bm;~H_BSus^@{~3Miaor37?wfVP@)FdIrp!4b4Ibksu4{Y3*Lc3u!)oz9 zmpHvyH!#W^*jW8ngZ>|uqL7oxz>t6p9-Slvml+O4<}md%U0Nw6I*aJmFNl#;Xys6q z`O>)6q}V*|?TkRp18NQ~sBi|VTCR$tfA4`tJ+UR|ESQ#@ah1)Y!M#5-_Lu--!F&$& z&b#c>64X4Zcywj3!FfK8(3ikOw zkKsI2406egJzcn2_FTL>b+YykW%W+Xc)>!+AfG+icn;P6Yg*|gjwghQAt`lcf7Ln) zrJ$;EWPI?LK1P3?3zs@N@EQ6?qyH#F?>hc#-v*UI!&Sc3D*xf@IafbrjK#Ly{@1MS z)g93jr_$&lWrY+rJu5x;&bQ;L?HPr9lDvjO>CEB!aF&=~78%ELnNdF!TcAtVcA=0y zj%)wBp7p=;@DhZy}Zh{9M+llLNU2#$&9`tg~`H~Xd z2*8m`?TM8BmPG&I+`<}son1V;g^U}HYNubPjAniaUX4?akQ-OPlP=Y8aGN37d+?%+7dOJ;FbqLBG$oliqpdkwR@=e#S8-IF1;$ za?CGJeD`y}EQ-^vrKP z*j=L3UWALD*w`^>i!^zk$=KF9G;y&|nA_y#gpJ0qIub{jNHo1{6*sjVtic&?ydzwI z&Lky=yC_x?)J;ASRv>@n6I~A1dcSv?%eK?;DcT%jx9Fwn4aO&lf1fXeN~RQw@z(eb zH1R2)fN?(|tbAVNE+{>WSiYjj(JIDCq$O)~B~;G2N@zPzINU+I-X&b)?MvOX;qrD^ zgcra$)a+|~=T&?Ts{?sAhyH}(_c4s?=KILX;8CIoQbb z>h$B2CDb)2n9m&^=id}Pi|Zt!hELr3Fm3iM!*hvTW20ZG;hrCxkm}0evhZ9rawz1I zDwUNf#<%d&Kac1E@#&3+QBry}-?zFYSkuG0X;+Uaf4GuAxTXtE?AF`3J4>6G7ym}t z`mwk(ZjNK`#2M6fMH}swTs8ie%IDtJK2VL?0+P9P61-W`VPJ`91RL~fA=(;`nfV4K zr3rKTV$5-hbvs)5{o)mS1Y>S6(c>D7uO;eXgH5qe&P*kI>z6P@!#lk-*8{|xRw;jNu-2F z%W8xEti-6W8RK|vt?C55oSA#5-~3W}6f7ZhCTPh&=f`IS%4bCR6=)lCll2(qScm`siF})=__mjfng;@jHC6`e@!z<=#2WV(3+ATj z6J2~jU3t6b;+gcPM_?1XT;>z%(eTA}f6kS%tJB#ch*eV$!+flnU#hE_pZG8^;s_^+ zelIG3Be^rjIS&&_z~WL@lI`BVn;Nt7N#KYf_wT^X}e$_uBZp%V8tW1W;4&0I1n9A&wnGv$s^f7dDD zL}AXx%OX9@h?Xu3;aig9m=;GpsUaXnI&ARjNR8e&C|m1u(s56+L z;!v}b)B1+K^uH5&fJq;Cb$RY*zvz0UKl#k-@A^f4y1@bc`0QQc%hnGd?B@6lHl>;W zuU;2#aXcS84JEEbM!@HIACUHee*pF%vWb|>6>BgZb~PhD`m;I&Vw$NuB14z{x=!0K z9y2jDjRprdV&tSzQW8+PG{=ccj~L@-)J&cb=S;Ozhek3&vJmx${o41+*`q0;N_l~G z%mlHe465>u>i4?{eqReEcp{V=&|2^?)Wfq!N1T+pp#8xEt&ev^e^*Cgjam{hxmDtG{rnfSNDY^ACF>vhyv+`xRe7Y!;wU zlO=E!8@(||B!kh(t#eI?SH)`dMxC|AqasXfaD+2@2-I;o7lh9(_;z5F5t9OhJLVM} zO?Xns?Za4eiJ z_b%9~uJ^14xW-{=`vMLW^EqEo|bw82K(XIBgh$rgG*~QrdqoEbliw=w8|3L zPqb>hr6+a%F=mJbR07KUE0vIYks45a;4K$~6Ypfxz8(dne;B#UBpkH*esy2v+sR*` zel9f@7m!T|;XY}+3RLXN-B)&(S(7*kn6d&ElAd7MJ~+H>GPXXKhC~1Puo$1>|`Wql_3_W9GZ<`ka^TFVoNK%8m$ExRN)TG&a zLsIKW?2HmRiwDTrf7&R?cQv?0vRp4D=f(2lrC1;Kf5c3Bb~$QeY!%Z6kE9qMu3cA| z&b$3cK%4dKm1nf!n)t2=(T`2d3WvY7JVn-j*W(wcj<@xwKYjC@>smWsF!Jrba`XD> z#}};8NkLC!i_GhxxD#orZR}H~i_G_)hld5%>1nOdQ>O5m9oi~l7!sljaEWv zgp1GpP91Q~(_8%?H5=u~({L2}WI=qR!{EyL_U9kXi{M7P=%Qnkwwet4UwTCE^Bw3a z;pRZD+C{5*C5&8LbQx~+h{)kV2|-?}H+8vh)G@;E+<)bfCL?__`GO{8Xgx?svmbOi zVdo4W_dWEe0p+}%i1fI|pC!>SSqUr#ZSHsd!hG>UPMMnSTHZy!Pqf>IFRMvW`RStb zAtS8k;a(Tuhlc1MsH4-kA`r?zz`nOJlPN2BIQh2o{%hi~5No5;eOo7~zy)^zYYbuW z9?<{crhkXi?1vZ48q8~&{yp~doemdLUVA*v^OmhP=%vro#!84&Yn}z9GKI)`WU6vS zXw4xWZQMKUh{B{qSJ5P{QYh?KSO1IaLM!DbARo40BFT-hrX0UobAL{%MT7rCbE1t+h6be}8V%l|`0XXr zX=+HzE2Seyb^>)^Vi*pyL9WDHJ}K9Ky&tTyRLnD2R}}Vd)nX4b*d8BOOFrcN$JMJ? z&R<2=*7JxVeeFI&+k^Z2&(nMi^}1TW1`AHoa+Z*@r`;uJ?_nwK4Kwm_I(!X-qRn69 z?|&yBo(u~VyxyFX33|F>{TFrrf8$gy_3s>-y+%);SiwpEC1;fIkSOuKQhev>#6^t2 zYY1bacpN&41cxd`o^%lU0Z~5bbHgb5u;(A16CUQoRGbVmf`9wot=Ecm=~FhE=NCPtTOWCvze4D|FRIVA zjP#Z(y>RsIB@Fp+R#kx@=y`pJnRyz_$tWh=CMZism*4C{Gh(b<4_h3F;Mw9ZO$YA{`>-+JnszQkDceo?d zpn&nXT!%{KBv1o4)Fb6vmZBT$g@1;=otOiSOaF_9;UR6W8N+iPyoA17hT5HM*BeUm z7n-J@lj3N&DRvIzU%?PuF@UG354{Ccl-2h!1=(YjHHC9>5buhra^#9R{}S2{S4PvB zzE`*&0<87DC-kJ6yksY!i(60G9p=_>@k&s6+MdBSCB|RKu<^&DZ_s}_7Jq>GE}l@s0mxnwt*a>W;*M#zZ?h^dH+Iy6N- zrh3uq#p^1FDg|0A?0Z9LQKqE_I`{gyaU0Qf<@Q!b?==Z6dhKWSp?~`x4Nk4aiEiZr z;w-wZ7oE4h)@A;x6b~I;5qh2np;lvB{=9wFnTpn@b#O}D8ho!dO8@r3`;rCeO~c^a+coNlQe<98G9 zcgS5JP25+@l!7F58CEa+(0otWndK?8jW#xm2fHi*PTA17%zt|H2ua%SS*5N|lRI*Z z3F-Pt6Xn@(%$q)Z$1h4R8fRC&=#PKLMT^U7^>ni<5Bs9Wd+qma{nMFdYoe-YKx|2& zN&+yHRk@L2^*#p8yn6I9{>wG?QnJFELt6*=K?)8>*+VPC%>Mp-G3Q$iB)duAC1?=t0KoRtPCMB zY^XkHp))Cz;S+IuV@0MXDj`BuOR7dI*m94}y(x#}Jl`7a7NfVwATHl3H+8(P+Gmoi!x=eD^wYZNU`5?p|JH}dKy^pa}F|1L6_Gi&(H@21$IUTq( zhYc%C>KJQ~ASP5N(j~}~&o|@9zMFEcVV=*$w`F?8F>=T~nFDtws0COj4%bA^XKHt)j15_}@$e5mtBZ|paEiD5$F|3F@p=o2i@)Dr z`98mOzWpCO$&bh65Kh@*HBc)jUO&fC@+#vXMDI#ac|bq~D{J)PY?td<;%|u=IFl2f z&^j6O^ohBovKIV!@Je}36z2BaWlC{2|9_mh#o?T>0h1;VCMkpzEXwF$wuHBDsjH73QzmNa9a__8q67)UBmcge@LGGczTY>POId)xf4-y6>aWd&#d0nXV2Z2h9GABcrO zO!_=0OT))l8^t~D=QZqcTT6j?T0MH3Y;#*hK$dkj@5eyCv9_W!TpNrSEtlrM2o-Jl9h30OFA#{lQ8Y3w!m68zt}lRz12(xT47J7#TnB!S@(J ztqOhw<)_pb4@!QXo{izqBd?Ij{&yDc359;%+~t8l+SDK_rE8-2hCRy z9z;~=2nIp&jyyWBJfa2e15uNAyscJy?p0<+$yu9|VCn};D7KV-^Cvi!zSzhW&Ai9Z;6V%QGa^sf9Dwqk3QGg zJhcWRUc*Bg4;Dg8?GRZ^%K^mUk%F?6t2uMF?*LywpuZ0?^d~>}AR{-r7ppdZ(EW!a zbCov%YGJ&K`_}_(v(rEING`ZYzi#&l8Fkp?S2`*bPvyzj&V$W)u{wQ%PI~a4Yu-pd8SvLr(gW<{M_ANaq$S_O|A0gylmcI>u;G55&6d{ z`TD}=ef&^Tgr2jivw#$1Z?0agBla%7A79hrDJrPcL5SF}EOt)n;4^;j7;fY5!u(Z! zqgTJ^KbI=oBmX*zwpxoYJPVO^Yc+Zee8?l1l0XJx%0EHG4`tF_210FTGVaNXbRjG zVM0F*Vo-)Mi>fbIF9&I2hp0o-^=D8|u$_~IeVLO@f2uF5uHOr6Fn7TFZ0bkf+RK-X z#0VaLcMxOlc=ainH(GG*dfc|0+O9{X$Nm@^FT452%jqZ#$G-1`7Y*jg+c}{fZMBQo zhoF8z&i4YnA}W%k5!V3^GYJS9HeT-J)r&9vDOgX-xqzQqkW#6D1*u^v!ZUQWphfSo zfLUlEUQ$LRSv$1$Q>q*#ilvNzdpB{%2;&ZaSD_C&AWtgf=fB1kB;sKgGx31^C5|dK zH6V6(ia16$%0B{@J-J#$qIm9o_a6ZcFc%x^p@{`Udp|xW!&zaIw8+wa%9n&FGFAK@ z*DDLL&g4D~m92!{F2-6DR=~We18PFA>y#WD{lo_U8*;?r{Sl*f_=Vyouf=h)G(&Mwcxf3?EINsJJvA;Bbwqa)wrPkrjxI<3-)iZuqU(Q3Ryhp4tlhA$o zssVAAQjlbfzGJGcmi=`W5uK^-vr=bs_kd+i%>Opvk6^Ds7wNF?xJ#fBgK&k><-!4u4V(nt3$HF${&>M5^^;rEKEpP4Dh>@d0@np?|;&zyk5y=o8cWLj9oudz=48 z*`@d5N(H*{&kOUpnhdpw{YkZdsv1A7kEK-Ze3`zJio%EGzss+WbgMj2UcbN1h!`Ix zuo)6i-~W6+73I4-Pb}fNcko^*F)sul`Qv*_tW!S%PcCu8`4`7W-)N0!e$iiV@L43v zFBq9&a7tbq{BNBvuAFS`?@4j#Bd!0__|edMx?frLa&mrqv%VjetAbB|rRBL>;D#t} zrt{s~O68e6`IGngK6I+5!;64%?|ti`-{<=hCSI9Lwhxwqau-`gMkGVkaA~Hm+h{kh z7LK)Cz3g_*)<3%VIV-2C)nM?V>gV34vkwV@@2Sb#y!Xzq8F)639DO0q|6(4K+uj+* zf5JfWBT>fwdTjkU^xzVZx8dA z?HfCvd%jX?GFXqp4^KArghmJI*E-z-9sS?qUUohzc)sCS>W=4Fc7A6TeL9fjOz)jH z^km|zVZ_PcHC^c?=yCt!ZJNG64CCGVckgQ(!t7P{g46z=@1;0@g&*(3v>Nv?Hy-(C z{-h%NZPwv|Fy4Z_g?O|jDtF2<53f2@5r2XD>V=XJvwY#^(0Nd~@1Sf-Ldl;t38i69 z%P>pGavXxH7&Uo9VH{ODNv^t}`v&)hIOko1|2!uJ)~RHiG@3uKw|!3^NWaK4N4W=3 zhgU|brMREQwA7P->fWC_s`VEGtdAic(g1kv+0U-tT1~w{4yW2b-}q{gJ7q-|ObRpl z&pI*2=exJCS_;E!h&l2)CcNRacX3|(ya?iEJ~hN$pXch=C0pHI)a@SBr!k&!wso4$*WDa~+ z4pT<-)c7rbJ6Xe}45>OOJkQ54&frO5mSA`5>JNRi>R{yds*Sdv^p8bAuXVTPc{+Gr zml^gih8=Ep(CAB%EWPwFnz2KDmKG;|=?Arby;ls58V09JynERD>7COP}TK}3&e+i?Ux&SWF9DD`f#cE3c{gC9M+h;1T^{Q^pN*-VI zX|hF7s&+1!1o3<)k2M$uw5#v!6(^aIPc%$CqT=3rWql3AG6kK@DQXibpS-X6Db^w* z#uq|=Z$~@qMb5|deDfWgfs=E5%)q!}QhYx6F(<+_x(&rkXnogO5-0u+&PR(dC;!Cz ziN5<=m?i8JaY59F;Ck-=wN}gH|L%44Tllkf`!{-ef7Q)9=YP@7gWUh?YtPkRU+eS% z-*hY>1@r}3V}AFy^S4a?-D`i&bK@R-DzEo{<()?4fkF9m5BYcR#I5W1+RCkr)m8J) zFa1B(iH{V#9T^|k@@5?PKa9ga)zbgzbshap4gL9Vp1X1Se=|Oe|3$NK+w1?U*N`X3 zXy4@M5Ag*gYjGUS8JC|C_lmjHwZ?Fo^)LsZU~@25Bx?P__E@)Bem+lODa`bNp&zGz z$y2V|^MG$gF2s$0U5K%JGhW6I|G;yTS$Fn4J7ysu?QaUnZN$dq6*=i2jCPY>_>(>b z_Y-j|498(B7d-B2PbdG?t6ksI=Vi0E*IdAtz3e-qt|pX(@QC`95cbiF9`Cz>`a+kZ z_!o?8$PJ%(+6cZGEx2A}peA3=rk-$roppYa?ilhz7blP-VTSA!l(%*A;1|aBcg?Yl z%`?QMpdRDhZ2Y{KG5VpwJ7+S<@*$)NUE>|WwoYbtHe81Wf@DcjO6G4cp#{bjT0g*(W@z_?yb@8OsMHx0}2uB$mm{ECCQ)R9~M zi-))v#5=|7XQ6#^%14|M*>I=!np5%3_lr84qek!B$M_8M=!BSHiu$IpMM9GW6c$fw zP!7R*VCp~ZN`DI~^ZVZ)y$9fbjY~{(E)+JN3lR|0*28Q5H?{37mWb;vRpX^Kt24<6 z>Z~eKE7Yi1^b}OoQQf{CcM# zUN*f~BUfM5+CAw%cg z^S6NKPc-B_GJ2-AqlDB_f|io&JegT?CJSDG;nVfW{^Dvq3AZVD_2@1vAaE)#n+Sd% zi6GgiinRC!w|{Sc{1M`%#+pojaH8l_ajVyR<3n&AzwF27pus~nnw1MzH+~(9>vL$e zAP@)AapJPQx3>l%IVKtNU8jVnh*egiq(2LNJx<|$68awYpZUppOXxxKv*p8w9x32r z%&U>l^qQ)}Ca~=Nb98*=wcVdXB?3si@a#)?OKK|awqaV)Hzu?U7MnJti~ zE)L5>Py+UUcYVp0Z-tRT%Ns&%zzN+bBR%lPjrV{O4{z$g$@|(S>ZqUJ5<%WMJu81P zO3qqs^%yz{nmi&CXf2T>BEJb3$_3)r2A!gv8SCdN-&I0%vrN5t@{ z|27yAHFt5{NL)7qTsNe_rru(xx;cdPfM$L8+us_0{{P1sOUXrB?gr-ui5G$XG)d|i zYDDND_qVS0Q2Qcu{QbD{7jj?8zgcTS#mBAK-p`|Rt_0KXut**<82RCxY(kyPGj>sx$MoT8O6%b>9*(5kUtaDVl@ZlKtk6fVf2iC zVFgYB5hqhTIjZ6}7dJV2*q{dwiGza?@Agwh$jnjGHvv_fMP@YO({` z_%36gru$BCo@v?evs?cCWo*>8t@-4yP42FLxg||DgCaQv=e!T4qztvul^AYR;x{_w zg_@B@vzoJeBHP!{FC!Y+OT)=>sObRYjOw`82Yxp-_&4I7-aG*6p`yRZmk!=3NS}bS zgO%d&ZTEIX_UtcLb3f+I`7`T}Hn6biK$ACclO|4>Xv83P^QqJKyUwrHkEe_2la?!& zkkSYrf8y{nKDXTEans|vOAtqbTxj%IF_t76pHD&=SezH&P?Jy!Rmm#skxGo}95B+Q zaJ*c1`?KKTuip_+Ktds7pceu{3xUy|)c2$`Si7(%0&*KnCJ|SHJd7zj3gjH^l=i5q zgi5gQ$6d(}j{;CfM6y!%JpQ6 zocYQF)Z{p2I|7ck)1y$%9ufBxl2g;)=wlwU(U`~NSA8_7RdlNt7sPFEAJWl8_zbu< z2~|OnIk51H1EFg2#o_4nx19F3*_eeqp)b#Ge5&tWVvC9ncTEg$cu~mt_9C0#;Pw_m zN8M<#6G?!seJ`}f$S0;yFEfWM8B`G?Gw2O^!#O{$z*vfh{pf+PH>(&$Tw7j~TeLWL z=d4h7$3orW|3)vf@C$Xm!A)|nm-N#JD1Yx799NRldvIR(iQfs-sX^@ooy6-ji5g#P z)LAeIO&3W_kXxY-4nH5BoekdqY~A6x{wGb!i{2Gt9f@v)J9ht-#?}9z|3&Dgmwn%t zt2(>tISO~3F@2w{YlS|sad(du2j>OO$NxQ zs6A>U{twUYqI=^qKwXN%-|ODo;1s1wigu6eFfeta0d&PJ;`@Kq1M2q7j=KuaTJkUx z1P+_AI-vH+7z!Y%Zh4vg*!>TBI9x$`#SGT{kLTW}(H~MB1SvAXl=fnIyr0%~qnje; zUsj;x1}7k>EDTPg2BUJRk<)s#Vt+UW+9xiRa=lw>1f_~gWc*t;Tpp_FN1kbEhRo!Y2D+#4O%XPsG9cQo_o6=JS9EC|EeXk z{_HT*gqHp@`c;AAQ>VaZ)?usF@jIxci#~;uXWLaQ`y4Wl7Jq!?H#rHO z-~EU-F$$T1WB={^`k(Z4{>?m#_zr(=VxC5Srv1G?=I>uMncjR7sl%IVj>y?XJ+Xj7 zHTZOnX{)#N;ICp?QKbVCW)4w@Vr;g(&ptlGe={HX6h0HOGWO?Gq1K0_PMo~4?}vFa zmVY<*ixGm12`Tv9q`~m~7k}fRs(MB^>RzjoS8Ikbg3%r!rMK8TL>bje)8M7{Xb|K^Aa@)TRhAfF<7ec|5QAOaPYq3q-xu;?B zme=?14&g4<>U(1N+vGIDUS%kp<#dT2M&`gLr_uMsAkN8m$?V?h?|nwik+rY;6F7`g zswHJ%785;rI%|0XtYVq%G z8~H(kuR2U! z-K6&a;Kcla{iJy;f^F}fjb4aBU4D%`-vV*0oV}&96mzoQ_kXsnbtL1%r{dN#{6F^I ztk-#6SrGdxK|giF2_$RqaQh{s=GmGpy#yL;&5|`)LnHq^puN7e_w94;=?)rsi-Cou zautik8c13siy8Oe01g5EN>!)Jd63u-cf5#;%Wye9TYh<2!9V6r=sx#s;OPAwTK~QO zc@O^Y_fP{74u81Dq4aTRkI^b=oT-Uerm;*aIcOg+SadWewpg>`+2=Nb<#R?G{RKeb zz~_3g&%P_vQ)Hn0tPD^F3w}Q~Ks$3XV59ThZN?oyLNWfLl3+_%9~dS$P@HU95fmz} zSnLJfDqdQA03RrCdt|`3Cf>|Pi{59LppY4jlP;jDnSUTr*zA=u1E;dtq%$rg1myG*zYS>-7y>d`;JY}{y8*}c(>T|K@%F1>udF49SenDmDR@1pd zBjvElwJclU?a_Nef8(E(@8sc;pUt~wr8s}S1TfEeLBWF)tgb>Jh1dLLqRfY9YbpeC zj4yB*M1N*X)_h+?D@&c6(8aiWXx0gc@8lWg*dQ@omT$}?*-KtAq9{Fr0v3`lm1A*d z6iMonf-JO9!l^PVjH?W#$}Q|k*2mf?d`*5(O;E(OJi85T|NH+}CSm2>ZA1HkDbVZ+ zrskBedo5c_C~Y!W+k93zFo^}JtJyMp+o@3J?0@ZT7^gveNlW&o*~$9cMO!P)E!ve_ zn}VHn^i8{FxKY|@|LL*YgIRADf+!P9rz>(Rv(ykxs&q=-FfKI5)}#?h^ip=vXvWit z9ZO@c&QUq1#$~)P`zqh$%Z1qS1dZ)YFq+tSkecVnL?CL_#R6THTInumOaB-!F z*?)Qr$!VE+T-ls6It%EW&DlrGOnH~{NGr7XrMfp(ZV7f3R@NYkfJ;q_)`9ATo^5-TqpFGQhpZTQzu0T4KHd51fR{FMdw{O&wkslZva6AE=gpz7Cg9=2 zRiOoHt^wfZeoho0f@^PV`egV02*8Csfq#GYVXxHtj5B~j>&o9C%6@Stz)Igo7yrTl zz)>+%=&TB` zGYJFig}E(Y0_6p`0KpZ47X(ZH9c+GJ@P$9+Km6Ed2D2}J{Qv~Ohzo>2a1EG@)qgV3 zf*qCDH^3y!6@V5@CXg3HLh0sn1ji;2OxXq?V{;Ru+)l1T=1d2IYq{k>Whj2fkXEF#8dFq zAHriu-W$?CSAFCC6OSnVDfYL1Q?&v6m;Y3z`r-famp7vS!iM?B9)H{?em?s66Tkl) z{*mXmz2H|q;qUmr!SBnT{s6%Yf~^eIAN-5@kbSssy#C@jcF(@;VU|ySgY*#~r;*GL z{INf@N06cT)g*|aeId^aSWvzs$o0+$vz2#;5=K=tDr;+do;M*aX+AicCR40ZO1KU_e14W`IS%g+BDsZ%OoI zTufl>)8E*cVdYhybI=CJz{X+*fDt$+kVUIr()ID#-JeLzkbiwv6o1CC58KaFj{p4s zY-mI;kJwKR11&(+3ur2MNBxKbK)0aO!3!8ohC+A(uuotF-~hN4cp9TGstCrBAu=@u zw8=KWhyhKp9WY983fSHTf&{vZXFw=HJd}U4G$huU6BCzFJYR2NR zDXi2bMOi4J)VoE`sDdbHGo<;SH7w(u<-+v zkutY{o1{v)RG|B3r2-VvCe>1r6gNf&Ql%A^ElW~`7cL{`QezfTA)lm9D)K@xq!(9o zh0;l*S$}MdI!kY^WEicGW38TT|0*Y1ecr-MZt(WRJdZNA^tY{lQw)n?7*=8j6u3=c^z&kl6m9 zq&UkBc%j5LOAqi!iDDK`7(-I;XE7aQlDa>$>3`ajEBl1{g(2VK<9t2W?f$y<(oj790VqWhn;`I)tYZVaI(?6zRBi)jpIQsuT=)i%> zFnb6GJ$J8MQi0;~d4|51zWkH`j@ihDh{vKgAP)FC^sRXTg}<1x^CH%t@(-I3C=S39eIJ}N9td*i`&RD904m2il+FOSd|o5D zLlyW)m%dFb&|WgjW){eJdB`ve#D8_uSAi(u%EB-@BW&Dwi2vXx%Wzyv|TeE5zW%zQsufUjqk`fVpuY#U=z=t^%!DTL2n z@gM(ERrMcZfn}CV;@oTm+<^3Ko&hKOAhUw53~X3QD#*Sz1M$d0q~A<9L*RQAva9I} zUN{1+mI$8vBLi#yYn~%xA%8yL!{=8=%>p*J3|xytZC7JpeBz(cnQ_520CXP7A#7Ui z+g+~4;HIh^11f(B1)fu5V5*<`K{kRbYY)h0vTOvG(yW_><8w zADlJj2+t8;JkM+^fHLcO1v<6Hjwn0T04(L$EPyi>P?%r8Fp5M73&(W1bOtc6|ALYKBAbq19*pdNzxQKsGw}y%zZj^1`A6SD zbkF)QVxS033F@cC$&l5%ELWgQD-6TU$DatAdAcU9#eZ;#Um zvvD*fPuYk?u(uVgAAF>$9QVbK<7m1cyvS@T%s=>4RXOpCr+>!L%)a<-TN&$vA5=#B z%l>B|1l;8MjFzbbfOl)mB^0;JGD;h>aKcYq!Z5RsRv>L)9s$38`WTy7RC_5=*505& zIyDx#fvU%6rNo>TaF=4q=0i0GnF^SN-R2blnfLf&;R>CRIDcUaqz}AAZ)(f}or>V| z$@*MeWC2cQihqV#d5{gGG!DWhBVM_xazJ9nX=J2vG`S45PujK>6aclaQdN%q;@xpH z%@1BKwiVhRe5*ojZ1N%d z9eV)`R4^+AH;pWsv0sLfxuE`Q{q$cO_}6~M=()jYpMSAj;*v~KfngE`ipfG!Q7jjW zDZub!D*}{(lf@tVu+rarkTYQAF(csok>&ak%`n-SD&MzMLe|~IPEL9pRhXc#)C7W3EV##g{6#sSmnFVl2U&Nws z%gpb~G=H+$D6vasZU1_l7y)FFtxQ6r)ELm8J`@JE#88=a1^~q_C_K67UZR92sJ{-- zTnde&60U3p;2>|A$gU?ep6~;>f5uP-jfvj3Ow4J3Qx*o-hA`-6w2E|-I#l2JAIrFz zS|z2m>1b{tNVu}H05T((SY>0D0L?Mbxat)jl|GD_S7!OZ*KPAXjGt}zFT6TAmNXX zY=M(aBTJ3QS4WtX<^WhZ28`@}Z8ZYu`V5ub7!B!+`8!Th0WZgB%=$wc)FW_fJN}GI zG=p9g2zn`PKfdMv$8oL}S|+iZVldjLpMSk=|6Xo$1M`pVvp@w|AVmLEhkyAX1gPIv zs|fkIFv67ec@VpG9Z2jNsspzG%N*1g!hfWbVwe91I_6*TL*s;n#$z{Hcw7(Md@_A6 z%Wix%j}Cx5>R&l+9NBL82URg+~2Ve=v?_hvp-H1!`As<#0rX!jZumQ&BHA= z5X^D}9vk?dIV5KiT>OD~G=Kc(@e_P}GyIG%H%6m`jmso1D6KQ%jrvhO{HuM*IGW(3 zY^PNNoQw2jqd6Qq4wC)x#RZ}8>jSr}&l6DYk_{^9K$`{&-VW2`#sx zXY=#yex5B=mEh0!_UHNgz4N6{(6jrc|2%tN`p>ifrT;wNRTXWWgDk4sXMeW+^BZFu zr;)ElbebM;8lsC-6&>IBq1zB0fM2?09MOAi_d}O{=m7tryBFJz_H2CVF5{R1V4*pj z0MIl1K6B%kaX;Tn==tY+?L%)PTL|Wd{`>5H=)cdwhyMGVAUOcwK5^WA+GRox$r0H| z4gjnVU5DBQf9Qs9`}q%D{C{mf{Ly#)9T=H)dW;R957W z?Jvq1LPVbwpBVj=8952j?`S^!7rlw*e82Rm{X_5k(i0#4_V`!dA%1-Os+=J}WgZ{D z#)pyP5Iy~+pBJ-0v>oFA7cG+C`K7&lWcj7V5#Ph#W9EtG@hmlr9DkZiLhQr4m=_b- zNdjN`Gh`5LhUWCnA6ka_@cyOMkQ~3X`A6o<-}K1vAKu%KUL@2%0ql=n^6g9i)d?eg zc>mH?i1#mTfn;`mX;oE4BNF=ESu8N0H*^-m^0HDwzwx}9QO-aLW5CKhr2aN{oB(=P zrRBD-au5V`ID+3XZ+{ul5%fXp5U30sfVgrLmJH=%QU1WP8F(Q3!i=k&;qrNr&Hym> zqX|^hn}4pm2NGbmo<`?f0cWo=5G2gA5dW@k48c!WrFF zpIg4#Nf`hTa*Pxlt#3I1Ip3>8-)oMdnOP7>hB9Ojqx=O*#D9v;#Epwr!3=WN|K#gD z0e}E}mNpc$d6fC&$QnxQitcVe>l6!fLx)0jpv(wAWdce%f$op@J}`zI!H~f9iJh=L z`q}Vgl3*PRKqfI^-#IATS|?=;8&*m4I7@^0fbd?+5=x zt^w&WKzbDZkzW1v18_k4Q~^*>xyXb#DklMG|LEsqzZBU~K=MN2Ye$G9xg2CS0S6(H zpBqtqE-L2(_(b<51}4%ch3tk_7lHTDQw0*VZ-3)&e))~33-SXT$q{_?ec>%@7F*4Px~{tfBYKJ zGvLP$xo>-Tbh*~QkdFbV9R$*UWFkBVf(-rtw2MUaAHVs~edRJuX#JGK(LHHQ_Vtq# z9e=iFJCvX6jp_@QuOEFbZ4fnN&##;S%t(((23wRT$XTF(4v|Mcah81I`~uVk(QAnA zh5RxFa7TXojfds${_zGJTL1r?&-iP9)9t|i?r-|1AGEptvkr~4hWc^-GvpxEg6flM z>=T+MN=bM@1FH4eXSe_tAlDE}Tr_q+mgge*XJ zy;6`x=PE1?avA3UbkB#$`M^q`0GJJ^8kjPMM4icGG7Qip&@WsFtPTnb+NX^}G2@<} z9iApv09%7H;A&t8P+qtW*cDVVZht_Xk7{x=urE+U`v4pXY72)0$Ax;vk*E_eOeO$M z2aSchpnL+ijC}*=0?`IL;7ZV3cn&Bov`oeZZh-C#lYm=6JL6TfX{bf}iChXg3vU7? zfv(Bhz|%kt7!PV1Q`S!EEYin0S?PdxPcIesZYN&7D5fA$GE8$t<*?JdUND}&D^UWyv&;B z55CeF=%#t>AM+o&-Ed6a6MynL6*$2W_$g?t9Mt&>)bIs8{S^|&93H(TA?h(EDApD7 z7dXN_`ok0F=r1p6(OBtQX9*T<`QSH=1^8ch_ZP2czxcn(bDR+S^_@cgFZiocYoM1KEEZ+m5c?i zNkN(?t^7_&Z(*A6_eoQ?k!Z@nUEX3ctzt5r|C9-uD;|ORw@fHde`KP6+u>9{x|deN z$$s&M`Ga>k;t$?*er!|n(EP>A>@U7D|KQml+isext-p9&)BeaAH0v1pA^xWwp&*ax z9q50Y6OVV7In3VCzzs`Zb@PSXavGB*Iq<`%%{?d_Y)^|wMW#F%Q z+?aAKT-(Uk^H{iUtWeLXfZFDjsZhV;%sa*n`k(HRe|*IPzx|OxXTcx-xVF?~Oweu8 zdZ>-R-&=odRJsd8cx&p{MBuM=-01Z$pNjiKPndsvrcB{49+$L4l-~)Gi}VVlTegK; zOu{;8^<(mt7k_6(xelAPeu(-pX!SY;^cMQz7~bfpG9 zcXZTd>Ic7T?4Ov^ zmfx7$A@hqb|MXqjuOI2I^hZBOv;0%uWB%A%ceP)<J{KZ#4wy3z> z`YCU^ynpb;%jjmq58iUXru}RJME5ge3%LE4|LxfgO*H-I|1=HrW7=Mi|MXw}@Bj1J zwbTFcjFQKH`*~`Ldiyseavt~pON)y3zka?Rr>%bcTQNm-k$k^d9z~W<|8~al|2huI zzs*DapVMfH{tY3qrF%Y!{~AZ*<3#=MDSj2SKY#ljIYIl$_XINZMu6k?pZ$gZ;RiMV zWaT3HS(C7klbB9apOWy9Gmb!K#Bva*Sr!2FPP#A`F=&*Y-O)-RUOIL`w*DiT_AEIdRnU|Ta?8Vn?yEc-1Bo8!PhyE^TQcr_H(bC*OQ{e z4u2EV0T;dO7-DddbF?T9W1Qdb6xfJzfWl zA5P($9wjI~*5?B(=RL2R++`|RJSr#Nrhf&#v32Hx-ODran6a^O1NNZMgq+8Lkx8bI zN;DKw$vZSIcEec=U?m&3^a7N9dFn4s?R$=YmI_PGIB>ANj{t8A$G4_8mWwZuxnf((tmfe zU%^~a^;%+&?4Eb8I zVqT#~Y+Id9L-;)GM7_tDL081s- z{@iIEl+JKzPp|s0UnSYw3bmR@dVhtCh>v7zPo&y$7?g?-#7>pz3lBWu3~x`OiU7f> zTjsHBSkf|EG`=R4sghJho*qT<*wax;`R7BuxuyH`-D(~)w2WD|EszRMz6an5Jb_3C z!i#1pKUu9B*Y(nO)3Y7o_nj~*qdU%Hp!&sq)Sid)`-nW!r{{Apx3wUZf`4mjV|zH! zrRu8pIB8>Jw5Adq0;xi;-qqj@3=g{nqlk;fi8lf9Z1+Xq1M@2CH=xq#^}t{^{Jhr5 z;xE$iabio{QMUg1PC`g5G`g(>>wA=@gz)LMrEXFe#g60zdh0w7ZC)jVO$&`b9P4#H z9&w&;-;XUK_VX^BIoYd}tAE>IE_Rrj9gFAZkUdXl@@g5_+oZdxT(_z2OC(2M+g(#-y<^6cm`+hu4E zP-5sE;>P5651`+s;*xCknKts5VX7(?Ui&z38vb(6nsGUtYj`j4Q``#nO}jT$Pmbr? zR@14cR&CUscg2p}qkoe?hY0dSo|a-W7n6o1Y$R1OW*tfrxoVU!(BU;P)wMP9xx5B? zHI-NW37nVyIJwE)A=fbS1AU6x_N2L9a?x2I_ewP&R4IZNQGmPRU5BC#ZoJ2lj5V6{ z%xyNy6XFQYYLR522GaSNkU3FKDUF|r>cFI$mKZPYb$=hAOMe)y%+?@ro)C(jMLif6 z++VZBX-=jH4|m_!skf|F>=HPF%CbtT_x-~>N9XC*0_HTA`4AuJLxPVXcH8bsqCS+J z9dpOs9&689RwL4S2bMRVr*l}c0ETDLDUE^W@+;#7LEJIEXEBFjRSvo?Uk|5!I*!JAbi_F2aFnSc zphsC+SRvPyk?l%PVZmium?|}at%_zdJ&BQ4p?|?j-_(mM`@HEC%#-&iE|$65UA(XA z^NzFCjej6Iuisqa_y}e_N;$LJZf^2OC5g#jY`#UAOW(|w-FC;)!6OjkQ4!-&8b-op zz%0?CvEA=HojtF{9gn*4gM+WkG)&qU{9!E)qjxm9oTold1!(378}WgIQ3{t`~@YR-Xz&wRRs zlbA2cbbh#H^1cTi)<2rHsHWuHoiS@_j*5Q{q*S&Rhd#Zfi5xH4p?=zy(`{uFw)ZNh zr{o3SsVPsr*YM9u^j)1=)Ht9XzDnydaeu)#*Y2pPGZm|cmwXXADwasl>T^)~W$cK@ zu&?^^qB)$& z;dG)EjnegM8Us^5Jh@sbX068an*na;SW75hinoGu%TTztf*m$OcXLO3V5=EFHh&Sd zv~(|Hr70Hxc5#8jj-0{vx46r#Nhj$ zUkvHOKCsM}$^K0R>Uy^C7ndsTg!@#~n9!d7-04QCfAI6hwXbMy>!c{Sr z6}RB%JLWMv1q{K@=tf3;w*#RC*ef!!+sJL)DhL6}`=BNpm2_+vyaz1Epe!4k2#W0 zjn z59pfJF61llxu|zFtM`K|^D-)u;coVH<4-C6+D@}~X_kxg5+0Rjg!kzhcoshMv#xAg zZ@E-w$aqz&%u79=!eCCevU3E!sQ5K>a(Li<$<2v43<#r7D&rWboU2Z5-wU|k9fcL@ z>xP?2y+{7~)Hs|sGJjFl8J8^JYqoL@*vWh6cdGdf|J;&=IotXD0aJdl7!tudAbIYzy942!G{n@y2%3YdIgzFccJ!25)9CWk<|(Fo zp4G^TrDt1G+#$pI>kA54i%Ec%cyJjH);n(}qF&oxUuoLg*}!Gf@I`y)DaM<~dfYnK z0#lZrcYiPBmHiT4GmtmoVQoO}2d^uHTc+akYcF?tckqr&AOyg(P#= zkFnci(0W+&Y9=iyyw2pw8#!xl6|>N9+I6i0b~cr*>Gek*&lOkgywc4ouB*WwPUjNe z4{RJ_icIzD>C~^rbIyjIUN3K&3@e;j5ugYXZS;x{5hjEwd z=zsd=j+1nUixNLXaCjNz*6c@Hp0+@p>K7M2?xGjtan#gTGj}|vMZ3M9yGyT;jmf#LtD=m_8r{{Raj30(!}?QuYW`EcGgKSUve=$aaUU@Iw7j*lBgI-@8#yTMo#aGF`DOkPTsIF(!mlUT} zX+DI@k~~vwcLZ8Ho4}aQ*GJT;Pg8IQeR|)|@yR2DGV5jOvR7I^q?1t-V|J>wZA~24 zaAop(ZEJF76^2wr!Xu2{;VT04?te-cD2)ropqnIt@`t9sNWbI#;Z9$yrE@@?1I ztSe^PEqrfjHa%ZV-L1`PKvcrFi*9 zUC$-1-YFZraogIBVgrMxc;`!ekRwrd<%M{d;t~yUJ$8$BGcOMnf;b}7kUW2Gvwau> z$c=oZBojN^m7oUY_EfD?m@UkdYRoQgCrmQZL*hC3^C8Q{*hI(7IZp4p;fm#kt+e^7 zUfU~D&$YC3Be|(BJnW>(0fa@K76=YI!~)lQArC z!4JFgvg+gQCYCGHwfe$fY>FG&TyREo_arzi`{@{hVGz*OM%h@(!!aLU7TlES&q` zq%_#RIj@~<*f`?3?8*9_dpSPFTIu11iPkFanqw|$U=wM>S24Rc(P_`e%X6OIsZm+6^Bj1A z+UFC|EX##^7qYb{G{HgJ+LQ9t5!=S6cZa{3&w+6US>L;gUHE^VygJ$Swhdv%9lFtW zwmmtPT1S6Uw@i_I#9j+a>(LRxf^k+-401Q>Rt<0LYWYi;t#>YiIWc!VRS- z#bcSwlVoe~+z7lw*@V*76q}nIx39M#P#~d<)T6|>)w5f zYY)?luTdwHn8Q<~B#(k*b3!?mZRkVZ-yLNQj%IS-kAn6B-mSh0j(IBsDQz;XCYnwF za)Y1k)Cqqs%ORQSTX8ck5wDQ1CNupr?Z3%ky53n?zl+N}*S+_^-p3-b!W(tLPWh4G zMR7l!;``h14`$p+dR*U`xISDB5U~*xYv9rs^YLAJgms%vR6;V&7N0EVWKO9>7G29F z>nmT_-n*So7=HG_VaUn2?Ngc2LbH;2$PI!#g!g}=jjQ?gp04>|>sdYB?j;=3x|HX8 zTJalpWuMz{X>m0j+R^FqFfJVJYTun*V)1on4O$Nfs#YCrYj~X@f>LfJJ4*RX9pB4q z7>Q;%w$<|n8CYA|jG`pv(J zxj)gSDO2PYg)?zj`kf?t;Q-A}C5E>=jVBg1VF+s@m#Nk{U8qMJWCihTAJRqcOZ9=D z@#Pf_7H_Kk@ud_lHOz%}dY`Og_@Wo`IZf3fgiQKcD-*v%*pDhgaCOa>2bRZVUHz ztoO8h)30m1N4X?!D zmL{j_<~VorNxedKa-LdfZ^vVQKWBfNa&-58ar*5%oI3v?ocZ*bKD-69^JKSDzB<6H z)9<0e)qZn#qLyyM9rq97-j6q9WSEP0@=omiL~(O+*s=7&ixCkS!Hcz(dZ;d@}Xwbv7WbMbAobYEcP=NLmiAdsXO1tP7J^ zuD)?|N*T9y^EE^)dH1>GRnGKS6%zUh6zqH0wuWGzC$c%#%f!7hK@&fcM6^ebJk>vTMmS3HZ!@~t}M?8%R9zPC-kzuK#3 zgS1}T7C0ku^yy2z1lvmNyLHb48fx|ae1jP)nw836OqyeISe*Omp<`)I&8;D9{#lvF zxCb3Ei1<5nB06dzVP*&V;2m|-6^kbe9|6ffA^%SIn-119HCgx+)JuPJRtsXCXh)!& zuHJgmrfPyb!Raja1NLe(5L3ofvGY?q7{~ZRhv)01JQ}RL?a5rm5r6KC*Atg0ms_FH zMAW5Et7JZ&$+)iubH6OpelQB&^z(}E&SrYR1nu68ffdr1{C#mx$EOInR~W(j(;fWa z8iq~(ic0a^obh~*)vbSFyaSmfW>cO0`57E1hF4PVaXVe_JH`N9Jz!3?^PG63Z|BQB zG0h6k?5C@8#54j@*gA-3Y-8ow#f>M9pK#`nOOmlzOkc?&MUwTZ~uvNOz zl&a6uaNC(e7j1{?(zuJKAex1NP3Y*7Rxm6yn_lq+79k(8W|$6He@c%& zh}?GM&C^QB^qPP7p^YiFMrhGE$fvhrJi>F*P9WfBU31CHa_9OM!w>Iz4NmD@iVsx@NMq%I&_2Z zSW}v&y~Q;iDmCR+zFy12)kxmQZ1YmNK4FDo9JZNYYlS$n=MIxrtGs3v{Q$={Nm@BD zB4Un&;b?XHiHyMG^u9ffQF!5l)J=!MyVuXwv()lsWcOP`-`JnMTBeR&_R*5@LEkaPM51q5x0v|9WjsQQ~q!U^rGM;Z#&p_a1Tr6MgiwF zz~2;i1F?=^~|=o{NxVMCja!CiPr+PS=P z6u|83!C_;u+w6;*r1PYf_nww#^6Kb46FkoAsSl-y9Qmd^&+E4_7MalVqfxO0hlb`OU7o_7S~Yr3DQ(ND~=Kc>lnqo33rwXObR$ z+3kPr_Rjo2gq^p}u0W7r-(|BEBjH^z0p`Pd4;kKj&(n|o8_cYjm4L+emT>X2u&DZ;Pairls@u-wPzeO8z2zvl|##(2FhYGoK*vv5uHJwDt zNtP)&i{xSKi~P6{RNV?matRBP9xtyR&d`7FJ8Z#HvaZ+$fccC_}~WEU(qhAzC9?uq{#LYM7ZH zjz0H_jV+d>R{$iS<_SWDNG26G@+*L4j(D;)>(+PQPKmp9sId0|jb|yR%PfB)DWi?# zH~!dkg6!C^9-_=qJrpzz?=!+3D^>1=p8?TXkItpW@c>T>7Xr6Mylo%VV_}UZCsymk ztGQ3`nbM053qHJPst?U{ZEmDK z19*s;^|;qw@;0e*>Z+PI>GJM9Qus}+B`6Vl zF18)qRAvY`wKx`uCN*$3l##`HipHuE`&sNDUCb9;Ehiz5W;;jSpY7>3i6(Ftg^{HW z7M3*?ynUd8Czq?Eh@=9G0)}fdVGiM}5lx}Ieqg46bGD_`kwky6ov^6xnFW%N;^sMha#S~CiFJWU6hxl*E&U|Va1_z`r zn1<=7lDG3HK9+xdec7N}i;-u^cp&}V0A^wBn5ZwVqvc?m^c!I=xO#4Gy!NEU9g3?X zNh?_y7Lmy60Sdm}mJgViJ>Tk6HhibfQ-cAxJ3o0nl-{&2ARBL*P!f4V)8A(sF@Rr_ zPm~MR_xN1^g)f*ZjgKWk4Jf*N!umwX8FY^Z5HqgIPvd{P<~}3W)lO}7|FmP8aep;_ zbK04mZ1K`4iiFl>;ZgLdYJsp73)OG)OXJK{#%sn9sGHX!hqVuTW#Q-y zsVfGRV^dH*Yz!3F8S+IQ-wZ$?T3k9VkMdA*q2qr6DOZbLRDM(4kGL+z`u>vu{D_cX zoEZvap)>KeNFm)n7bZjmbhZ;=U@nn|hDf0e-wbqM ze)CU1PoAam0!$MFzWAlJHESK6>;iq0v{Dw*6)RSNQ}%Rpnyb?5T+)ji=)gv$oOD6l zvTPl;-y zYCjxkIP3STPio?%c|#o1wptJ)W-#!CjoIpk;w>=|7LC1}!llR`@a$0O1P&4c;8aE2J39bf9O@Nyl?c1!K41RTzHqXH9g;(VjCW|rjYH*f-_pcPrgxs$CH2$up~?vBt^{*`Aq#0|Xemz8f4E8Rw; zy$;blDU zJ7OL%cA4!~aCV(s=K1$ELu2o)F@1kw;xzF$-hc`Ui?wXZt_d3W4h+ERU6y{6|vMztGA?G11 zM3S7>Fa;s7{#)pVHV^12;em`+UKeklmys(Kb_%JU4gy#0Ro^Sv?)d`1)LzDSiQQxT zid%nPtS$PS5rL-Z4zAUCXj+`AcY62UU#-lSJ5fb^HjI=7YVsMmjk^$jPxZjTB|Ft% zYk3GxeQtVQ<&iFGG(PcY7qWlT)b<9p6!s=cO?;?B4hccu1Z2~80wyB!R{#`O;Iuq- z2#5%D=RyigW-K7;o58v%e`?97!p)L~}DxGlyDj=-NpWh|Ix=Li+@r2Ut-S%7XC;OkvfCEj_CUGiqi zkd!8qp@&j4J5MYpHsZVeWc7qenm@(BBNDEH;g3-I&PvFe5h={0K4FrsK&DT2$C6Q@N=&8hzF)-V$l|t|irh0V^e6a%4Yf%>McE6gJ0f$roEZmH zh&P9)V@;Gj>*;@{LH@QZef}MA7jnm-tP>K#H9ZfQ?)3qND@$4=!94q9$_y9N_X7uF zV1;?@AsV@ujB(U{ijd4Mtmnqv7~U^I@s@9hM7wojmSMf$nW;xy;H|d?_PItt&7pAG zE&TG1X-|mT7AN#`>bq9Q003#ATc3m@&RH3Tn=c4ui8y~M8JKFEj}ULehIS|RtK1p} zFas{J%UEgg-kGkN8EK+{A4ZDUR*466$ES5oU`&Ulf|ht+;BW6e_-%1W+q zBKTvN0}Fremb2cU9hsZAzT8q;@1m*9LA-DHTi@FtqJ{0s`EBD{r%1cE9PUQcxFV1| zu(vSxK{&cEyXp<>i{y38KZ61&BR&)Gj-NX?yC351v|x`CI%>3zS#pGf7#!yUTZpz_ z1;jdtyM;gX)qf?Q4AznRPNe2y6oprLk$sdZhueRPuT~G%A1DMhZZq{Lm0rZggt7*L7NcIu#H^Ceu%G-ycN? zqOwNWiv27~hJ0ocQ8{|Td||38d?mw*Tst#B7!NgyU==2yM1A>pt{9|HPmEpKNCtnA zSqBw(ULs~z`L<)eF<-Tb`B|BWzC{)nyKO~nE^%mT2|FOKAYtto!L5yMnWGJl*Y=vX z0*d0gCG!*y!CngEwO;_F;=Z&#+$eAe@)+my^WwabL%E2rau@^h7Q2?Blo9@yY(FU$ zzubKx9n|EeOfRHFH(mgoeoYPH6>onDu1YSVl7kzf@diVID4l1Yr~FLFg`W~)9Vqcw zJEenIQvK?5SdtCZF{;pQ?b>prD#leG@P-vYf>wd7udtvWf_5iWK#oC*4rum}S#LjT zrD?qMCXC{>td2M7AIz>a6;ZBPr=0TjE0C}ay5|iG`~bK_1Fk#7zR2kLa?pRonpmo@ z#>{^8m0aTqcJy2HC|zZ2rxXHsc=x60R!d`$uubeN<_A90)bXg$+xM=iNxjkZ@(gty zI6?Vlp7-H0w;^&60A-fAcQF;lLZ3CqDQO1$Sk8qJ|CVi>A^)VZ%tr;BSuprUC3i8=`UZCL~O-ONrN$ z=9a|_1NG(D#F*Eg<$5((O^}9d_d;#IY;BlIv$1T6)CYA+tp>wfXK;W1rWcdL3@y@) zD-76zNnegy!KVLkM#LXY>GMf`G6J3}(Fu-`&E1~(xwOz`XA}5uCjg5?m6$x*f6mPw zjrmx|tf{%pkXNTqCtv&T@yZkN%4mx}GNS?~qB2nk;WHKDX=B@fT8H83;)!PAHYJ}d z2JbTYpq2b^C1oLEQpA6~X;OSWqHK6MxqU%|jBtlTTuFjXbp<^`>}+XD>^oP-F5qZ+ zfq+RD$l#nNR58O65i};!64cMCBQ!K}v4H(}6O{UVB<0-H@OT4TcT1Xp*0o&$m1+5lDZUf10Ab!NJ~ex1m9V zc4`CLU8+R~&1eq-ZK-J`oN+TiZWGqkSP}>5P(d|5VMDS-t8!T3S=O#v_ z$qo?(a0qh#zVFwFKmvwK$wTVgc2qL}a^CT}Ul6OAE%?ML39qFWk=j{3qinmB$Miwq z(NaTXNhsyx$Pa(82w>GTz~@Wb@a7|~VLfw%AM{MAj2q_W1v?b6{XP(#+zt$>tX`a4 zUL%msv_7-q(d9aq#5lmF##M8cvDP}=c|1v%J8mtQDbgLrq0qW&6w^h??D_FAekBIer3z4{*x&(sm6wtTH$C_w0+{M1sY9H3K-!XgSB# zuR`W9vRnM~8Gt)*Z;EqZBnfXW3B8x_V2vDq=9~!(^8PY~$dKque!U*io7PlS4lU^L zgn=QJ!<&EQ_t7LXH>z3RoiX@BU@h;3O+ZK3#qEk#kKh3TlGkvBigMzxk^cOVnBB+Tsp;A$%x}?2mG_k!hZpmK)oTUYbe(DN&Y(`ZbjAwA2Do+O z7u&+oG3`9yl3;;Fhn5$ra@E#XnMQo~PKVgv#2|m`f~*m&=E_7EYm2Z6E6|7GMMC6u z(Owb2>qWiPMeH|R2H1TaA!;sFlz*Z<395I4k>VkNx%+}AkeoG#jdwcQw^|ki6x?2>2^05L z(HVaW--OM@&$4`An+(u2lxG985Yl)l{DNheh>Bv#^j^0@$x3qFDwf$LbXmw`@8TLa z(&4uV@thnJR?52{JKTA;__4%unl1M(r<&YQm_sVi%$Xt^>(5MbHTF=7H69x@6t-G3 zeKn6{R6&Qwzg;24)|-KDTF63dfikV!!@GY^FjuE6X*cbgO+xH%h~x<}#ZxDhOw>Y^ zw42d|X>Q~sjUkn#&bjWUh54y%?DrVLJEFL(2E<#0^Vh^(#)DBoxetK&uw&BAz&D~Y zN9xJaoh$HeJefR1l|5ZBjnSE7Ao}CuK^aJ1c*TR`rtH{vRFR8747!I&GhzqKJVt+? zshI9P(w!-5NFj}S=7n+6WLywmt=}tXGfKqrwm=TG_)?w{LA0#G2aSe@l1N=IUO_GV zYUGBy$02TQcSz|sWh?vmqlX8;e>Vhds8w^sZ>5;XPps{^JN?~Hm!QR#HtB9T1-uoR zJ>b@%#liby1@L0?hSfRw0GPq}t4n_q(ICSMuh*e)HwcKy+;)=Z*TTy;_L@h%juC=Jh_e-{5MiWSnjEjJ%zZpAjf_rC0sZSjNj2oHG%0vz^v zxPWWg2(uVkUiQMC%TE<(-ShGkWE%#NRNH>#3kbr-n7Y0g;u}g=0}QWC4d{Q81fdXM z^&R93s)8c7;dsfZs5tsLo48Eqr8%6701f#QDUzKe)C9NjvSRnucPHD?69o(G<<;=U6b zl`T~?>qm#L3iZ22ol}MkorHg0%MB4SkBzrNuEILCDBmFIe2e`0p0?u&7{ZCtsI4F1 zh>8GlFK6gdv(9o>h|D;=5d~CYGE(x|E^bpkNL=*V?Qj-9qOG_YA%&!c)=y7hTGyd7 zA?@oJ7tbGZ+3~J422f3ZNCVS}Uw6IIw^8SUJ<%691|2GtgaG=x1O9)m?6Hxszfu-^ zjY3}Fc!o`WLWV$Yvps@!Gy6~(HOHwRD)Z`1MMkdcglQfe%#zpLt1k3gFHoi`f!UX% z8RMce$KPvJY@Rs!1&an{zO!mOZ3BJnG79rruc1I3nHK?y%(rV`3G;vXje6y^kVQCjtZ+6|z}IT>cl6pxjAqG=ZJ%p5SI!&{8!ry3lDskLfqkJ5qg&DGtAW?R;zOFKj??-NiVbAEs2S6=EJDI1Ck&0HP{zqjSZSi29s z4NX`30aa*1;(wbETgo?(iD_RHi*!+9n|H%(#=hY@f%8cF1>>(tj}P&x?yQ_&-e*H4 z+`-b24vp#QUgs~1L^J8iN*zxTFiQOA8+DIO&6M~Pk}_En$H2aon-K-o4xv)@o&l$^ zB`JT1bYSV2AbPynpi@zKb}R1No-Z*45A#Kp$4gD#S)UvLz?59-Dc;T81)V5y7j*#7 ze8|Ekx+#;C?*>V5$ng>wD~eFYm*F-@==zvHHc3pvC2LA#e@$6RE^U;LW%|(l8|1X( z!@;BOM459Ruu&XOKHqLdCW$;r8h$d&A|HR@h6`Nuf8a2r;DU>U{nJ_q4<1d>_P@z_ zL{e9ii3sCU2}8hYTOo<7n)8a8uq-=^P(RCg{|V+q93Fk{L9}XDDH~)~N(Att?j)Tw zEZHh;=`0PwjrZg+v?BLOn4;;Lm#j8-$N=xOgOCf54s&?uY+RGm2_R11JvI>0d0u}` zu!Vl3oafZFkCqIboiiN}h<}R_I(@WYfGLxfz^XA&^G6-G;iLNRNTevv2=JyV87Ep1IV;&D7+0F@Y|k6@$O zZD7EokEws*nYBGV*MwsYl;z8;sAzx32%n&y>2>1X?q$J5Ub~h+kI(F85>h12{hOj2 z_t1`Au2r9eRVdE~eG|s~tV`+vJWaC`m*o}$gY$QQw^=eNd4qCuR@SK|S^e?=tZzPX zmTKCy2&Jmvv6J6(BgMJ%=E)cD{1wJxRdFZ1JsrZzj5xQ+F4Bdf)!Cun`^kUjaV^Pa zJ-b%fzVMVog0$feCU$VHKpjoUr>gP-QOh+!nYV8ZAbmHUzfMoQ1a^>mK_0vQwvo zEmCRR{;TAF<=qGN%xC+_L}Y(KpF?gg53dJ{SuF&lsoWqLgs5;XTCoR2AIx|;xkUu1eoa>ts zy|1#S29tTFcoZtY(Vczcyt`|kgA9Pdi}gJkv!n0!TkA^|q04t*^QRPBjM6aW<0=QVu8r>Rut#ZeRh1I$B&796mFKpMO zS9!>bgP%El=bi`9jtV0WS2bk4LGO$4Ug~KjkfI6sl&!`(__}J5YO)(82}50}ls16k z)9c21w9L|W9NpS|;>aq=!?28}4d{}gI1|#1uuUM`TqCo%WITU3Y=9>H<{P87Z^z9n zPcKw}TAQ^gJ|vyjZUi~6I=>9LsSnMp5A2zhrgj*uGNO{7_M?BTufTW`$`-GukBM&A zk2Ottmux}30+N$t!~Au1pJUDj*QUN}iBn6zR89?Er0pzYc{TKYpIMg^#9l5n8e9&2 z-aHq4t&18-t`UDz@W2nKKqHtH`Y;qg&&ISv!ru8%G1eK{7SzVy6$WC#lU1DP+Q4f* z1>0s)y!;`MRi2U0J*rt0So~Z0cf#ykse;K_@IXD}do{iCi@MNJV5g0&_5-BfA&}NL z@7OaMJw3BOTL}eFhF-b+XiDmt{47lHF4cNh7WUIv0bGAvz#_qu*`+}9_*}LftuF6cR-@^$;MxPcQJWJb({iA&MA>avQxHF$p|QRdnZcE zHi)fvx&xnaSqPS>S^{FP{Nr6ClS?yzCxH1QRo8q%w{#EET3aUt02xVpYv;}P^98SH zpd+m7VEuVut$V%(wC<7T{AG%8nBISoL6#6i_f3Cvm`W_!>@eDzwHt8T zYe0(Bg#+3?B0AX+O9GTOv466*1@shX3D>7 z-K6h+^%@u;XB>(9HquvHlV}>Sr%Y=IoeCJ}dwxS;geTuJ9e#&5n*=A%GKsT{(s_R` zcIQlFC$pZ{{t82y%XM~soq*U7cN)b77k_F_tk`7v{xX05kz-COa@~@^rIc!(?iAWS zq8wgqHk=v$jDg(IKuSlEiifNBG5BjUfQXYWLpqN{5MMpetiG8h8iisBHTGb@NFgS1*7t3&_6r~ODz3tT8JW3!e-$?@uerO(%)wJ1t^8@vPSzuJ; zQ=8srKiy5(G0#@-mrJnuQ>HwDxJX5?@3!W}O%v^cVc+Gu1ny5K<>QqV*-(Fxn?rLJ z!#1Z6@CY5gk$~lc8IQ2mUcO83U0H(07q`|~XGAB~!s$0Os5BtZh~@WQBhub zrFaIJMvP`BFXI{Zo__@^*zmqo!P>6qc;yM|1dff^^c5q7|84KidolSxtGnI1S4kEse zEO{WEA#NU;6W|i%(xj(SOTA*X={Cr;KW9)vy7QBCH8>fZeE2rc%$p?+(RS>J_=EASoGlE6? zKJ7)o?#c-e`EHHkrx*i?Ctp)iBCC^;a3;)Dx-b06J$T-(8C-u$^xCw9T5-Spy>$+# zUVJnZ_;|Xo^q?5?i|oV0bPlLZlT(^(&uV?54xb1&LOakIt0!6w!YG$0E?7f7#$VvT zyFferQXdH^X82L8jJ}W>;}}hfP-dmg?S2>&lQ&P|)V@r?J{)rpS1CE6{f4QIS-QV?kwZm-hIG^S3NBC(qUWsO?^V&e6KSGNL zRiIM#?ZR$7WbPapt~u>pBjj{6^y{~|jwrb!ai4RZ%ol&FK}csP)f#Bv2q%lbVahGU z66$7zsZuY1?v8KDX*P6bwyXD4;Vj*yHL@)|acT(Sg{{(#yh-U6TL{cO zAVf|(ufcy05$)io9U3+CyEo+0K-G=@b8yy?-FEg38#*KpN^h$BY0CR&a+dv66Lxtp zb`UuDNz5Zt9$dxl!+7!2LdDCd!;^edWh5;XxKJ|a^`@XIiS(d!aJQrgShLQ2>c`bCE zg|SFr7Vq#OVUKcBTx68mN?+#pwNDHS6T$7SJ#CTZ{AH&G1)K1!8wV@!Gw}dEpl~8@c)^9XBw5){O%Q z19mih_Ecjm45}*BR8`Pt$z4?y6a%VQTWMk%=pXfRK z*QfPGU@aI{bu7FtC-xeyohW{5gRW<8BdS)S+H$~$vM}p@p6E%K!xO&<8vT0JTVvD* z`CfUz>!*e|07#?&smY|ReLa6Ptge9XbvzLIN*i(nM%-o)drKtJj~vK6w$XSozf@b! zgYcr1 zOu~txB?+6mUOlQ*7vE7I*e~~)VyLhm|}+f)uMkPq$UaM7S)WCYCI zd@IvJEsQeT7_fB5Dsh!kvFW8gUx^9ux_%qJWjvLOJeK7A8y_i5*>kctdI<`SZSkf; z&GCrO{zFCsE$TMQKM*^}I#sk)&nwbpbU3bTjmv1zFQ{JQ){O)Dj(x<+fq?+o+PMEngkk5thnRU2SV(?LQh%+Pn6sbAS$`U zB4z53-MA%_hIVzDo1(n7&%@zJm&O4;8!t(u3ot=)eb$68Cx2j?!F2e0PE zL|>RZ#M-RL`IsyE8y84}V&8;O<*)9!--b!6=@b1Pw8&f8Xh^TwcdZ_MGz$_X9(_Ch zblL9*)lhTV;^wS@Hs*V0Y!^~IT+VVke>YHXnMoGkSH}}mj>ZQiKS&I%vyFqp>wwe^ zNv*8ng4Q&WM%sT1b1M7mz2bKba7}sewJigWI(~(qdI18b&5$p} zt3}C?9cN=Csu8=ONy(n!FidANc=whMELD_(A@h{#*XUd8pg&M$3iOBT@)NWdmLQ5f zBp<~$S1S7zK&KX&0KufBzh9J>*3Z*d7kbw{@Y?7Tv1osWe1A;ctPj*Lv@ISeKSck% zKfO1q?CucgN@1=&e#)x$kl$ZJxO(vWW-Tk62d=o%`0P(Vgaj;Lg< z3mBGR!Z_XKVsC0fM|^t4c`?b8xL>aCVF1dzO2 z+=SZlbxi!Mp;=+ZSrC8;Xu_H0V z%$o&^JP-E^zuzcA{4%-nOq0RlH7{-xP~CslAr8Q8veEmEwG)kB7Z?`+YZF4+tK|;K zyynINWNq22m#MM^YFwE!kb5nfZOQ6$LSEtB2uPOSn(LG8l@5?rV$cX#MeERT3m~T@ zkdw@7=!CId*wEv;owv({gy3Eh!K>E)BsD79{>@+lpZc?^|o=LBX*5Wasp zih_$l>u%)}*EBd*DE{1)cu!9NKS@U{4{Og)TrSrVN`J~J*y~Y>33g6@a-TAQO4cy5 z&>BfP8SrT|0RcQQ)La#yJ1Mf&U-Da~ePDVH;+XCdXQ9U#-LdJ1rMsy;!OJeR-i&cj z;i`4aml>5QsltJ|1HxqyG?T+JLnWN0n@I=4CAJ^Pnc=;o$Yba5=-g4|V zZg=O?Ss1YGhYwVXqUZsvr>6DeLr!kW>x$KL76J8VtIdMfAl4D z3Pub>A<5fdUkssOd}v}>6a*SV5!WZK0&mOe*Rq3LyW6ks$$B{T00j_(1CGU>*63Gg zW>kuNRVx($weu)^Cx&5%JZj|!beIArQT-yE8y+;>rmQ*{2bI2d3$U%DNQ9F?RG%Qv8#SP>y!zD-<2Z<}vh)l7dF9ybxe3vUzP zCm7mieN+{Rf}$hh*=RoxwTbDOx347A-tb#s3-S%`PVy3z(2kugZ^e;OD-k-NSJnob z_4^9C``TSy$0QaH4jL%gdz>t9##)GS8eO9EH)YdT&*PKFL5mf`A(@jOn)9Hulu8s6 zV6~nR7UH+>J<<$|6k5aDJF+7iQ+1 zeroPuyR2^M>8NesS$+-JJ@ByO zDkf_@4i2J_7m%4f1`uD!Mctc8^z9Id!6Il2y`R;F4CkS4{>D}|84CNgyU%1`FIRe1 z-*5ecA5_8=F6w`F-#bDDABkW-$7Dn#pd9|OI$n8@RX!PKcX8r0}tpFD5l-NrR3U|UjmT(V2hok zEXK@BDO|DNKB)ZJFFdIDQb7Ws#L})9Z}SN@^N%7oeUpFbdww$KqV+g zF?L9mF9e4QmgSQaMqmzzNeAAc^Sqsc45D7Hnf zpHwfrhJ_1|{NV^ppFa1VB`w~EPb2R6RpT(3@4&;bLuWFvoT5@a_%DYJdzn9P0J0Z| z+@58d?@q_W$c@)lPT_c?BNZ6{9t zJ~{?MXWax24#EAao?2*4OSNay0e&g`E?f?I;Itpxfcz-wnf1DmR3jxsl3?m};aq!C zxNYW7c`Fz4l6@hbJ?5oN#6KB>T0NedLIBvUQ*RayDWGuG%YSRZ?hF5%c|VV6XXdQ{ zNkF#0%^#w9*yZTS)sMhS@kw14UEWV~q77AF>FAbx_AhV!YD%_HKPI(TjXd-L@`RC2 zQPR1bG^yyQd#SZdTvbQegLjx`tYRMtx5ECrw))j7>sW z{sR~JcQ|sy+QF30yOquD^%EnlGpkP0w*=U=G=i}z>qEQc7_`Z$RG>h0+zLv(DjN1S zKD{FN6tz1$P2T#32qhHkrQ#mXG~p9tB*|wg%AS+oz-;7!cx}E^a(4C+outJu_&>43 zf?@LhEKJyLeC~hkTu;i`omCuQsEyqPNRCLF&8cQ*55&yLfWDRp;Y!QxP(!8Xmd`rrkE` zWn?-4HvXT9=^ixp^du-DgN&iXF-a0Mra)27v!1l(xF~;BTx@GetMTz}A#t?1*r{lM z9ow;mk!^$4Oiz=hpSzhjr1N20v#j#ioh-4OA~{f0^nhMAGr?ry{s~YLwAT7oBA18T zp?#%~!+g)sCD4hqU_CZ2^ZjMYybSqSibKpaa~cIGj5SYRbs^Z!TTTalOeNet2LjA( zux`%Pl&F6k?Bq$EU)d$)>O`cK|p&JxAv3Mh-QB^l^ z5(AKPDUy-%dk#&QoUu8H>YF^dg^J-DrS|?Kxc6tqp0}2{M#djfjiEp5m5BmZ^82>8 zh`yJ)z%-eaq+vQF>$XJG{P9C5nyN)E`64f>PC$PyL>?~^tl~=l440=`9$sjg2)kBc z;|rdK;Qj@GU7Nyiedq?@?XRze?r!yr=_!OF1Kpu<$D?AA9#R)>N&PIjrDa5-u@W4j z(3#iWW?ILcA}n1YP-1^C2Fz*?|DO)mVrO<2UYXE{yK^9CDEV9PK{7Z|+SkWnsJaNb zu`hpblUF#A8Je~HpVEU0K}3#K>eJIh%ky#-Atz(X-hsm0Os%n8ykvqd{H*we)osH( zPW`Q4sOSvK=LI^P2oNz6a)%;Ebl%-nmlwRg-13*Vhv%ph;*!28z7|l==cLd3bAUpX zF3C;W0J+OhTfr6p1w^Y!rvLHLE?&+@xrl#z zQo#5NnzOEAEkqshD&T2j{y(OX^O14r5Db zT2XFOa?MMSN_23-403cw>(YUj>fcKzU&3>KVgnoXn2{VVDX48U-)R~|=ICjJH{6%x}`*!`t(>McI>KEfyuVH=#h|waS6*T=oN>*O% zpuoJQq^H&#(4DG1Tj&RD9N>Y!yeKO}_|*zR`IF`C3~l|N!%t-xu{WRpp6`FHB2fgs zYkVN~X;9V@T3#%?RK($UCba|F)EnV@4%W%_95zsza01?aT$9%{xSF$GCG1O$&Ko#n zYxU;=yzde_{D=-%qrI*3;6XHL+Ui!w z8xpRb&z}=JPSC*K0T8~|W4?dWuo5ers-hAk|5`Y)1EMO?=YQ|8VX*qTG2pCen463d z@tUu2!3S%GqN(a?SnUxh(rtn~QA0{r_X^Aw?6eJgI%w_zke+_e@=rbNTRrS1FmZPI zklqGVA)kM5=>^L3aHv^Q-|uoLmG4*!2Jdyaqv?0H67|F&$Pfb&Fa3Wz@U1I(3Lh*T z5`P<0;geW?d7A_lF9G#Zrz&EWTk5@*F#kRsMET-?$a!+g_mh{$w@$%SWIo5Gj+dvT zEGEhLNlXI(<7jswG*YzbA1UpadFpqHVPnYzx$UbOPfLxQCnu)!;TIZRvsWpd2XqP$ z)Bvj+HZM3!gtXZkcyoVQa|_bxL~YPnbUWJW% zid&1i^~N}7B88|&bC(i5?(IBN3-;{HIf@at&UehOqR@H6>5}M^Q!dXOqCkN|7%iR(5zNQ(ty_O5PyA9t~UopFB4PjRt zFE;*H%BVx)Ca0nm5i+d0>6nK^CvDAP@ojP8(|V8p267K15CKze|K_GDWnB=o=3^9Y zYjbNxI3RzO3Rv8j#MYQZ@8lldC8F`I%*DQ{{M*UowrqbD0u8t@CYq|MN2`L%bJzr% z?Ku}m7zIyeqMVY)hgw+&FP{p(6vSjL?)yHR>uIAO96+5B%L@(3U5Kk|CMURqopaFn zxSpZ33sk^SLjV!ZkZvru9%Oj&dcCot$4hC22ZU;{scF6HIWfz&^4WeXj#<=k7Tp*} z;>U71=s$nh4{i7h?ALS`^0~SIVHn~rw+bMK*8Xh=cY71RT&8KlE~y=A$Cidfp@=D6 z1U-oK{g4W#Bn7ozQu4ti8A9yJW=sC~^40#HOx#>6M?Q z94u9VlXdQachd6)&O+D|0N~msR0`!&U__)hNi!On?a*)LDZT#CShTlwj1U>FNMdl^ zGunT;V!`Wg9mtNLAr0pD>|_M3LHwd^LZZUws$26@IW)iunWlVB$XjtxiZgASKQKt5@^vimzY{ zKFYzBn~?>MP*k!p11qI#De5_55phHx>D5%^2%$ppf#OYmV9gu9w2h%pw=YfiEgZE- zUMRRomQY9nKP0L2eha}8%>t6W=zX$o*lBopi`(*-)D1VHmgBas2VTUrKhNB@dlpvu z$1WkjBf~T{>X`EpSX>vQ!#G9Z8v#ag>n=;7v7{Lc{Z_>1otUvK&@c~B{!qjvb&%7PiI@6K2`PWk^O0o1ScX6F+g|$Q zGaZX+T#xq-9&eqnz-Fp=ez401tlLfKC>Wr|(wDPht5xN)q&&A0W%PZOBYq&}Z3z%N zJbRbmyCs*bZ)lM4#xe#%XB})ck37#)Eu-z!tGs?y4USC7?0b7WfjK096TI==GXQd} zlka0e5%}~l;bMQ+n^rqE(TAp!zsh!%mOu}9R>6MDNj(AbO=~Bcrw%<(OjZu%P)Uh9|j}I!YvJSUIM;=o^>|tv5 zr`KN!%h-&gg=`_MK50qY`Or>Zl3a}h*KXcK}`e|)>tjfUrCK+Ays^jnHI zLtIZjg$59v-QZ122b5$ogJCX2kCpbr=Uvr`>uzjm*NTxk(P!|UZg3-ioLdGK*c?-k z9v~N)=_mgyRN7?%Ey=l%K@vGlO0OM$r|uVy7(O2;17*ZoG|gaTeWv3N4m-3= zfpu6oFKOO#Z`Yv?hyOT#UkJTYA7usPS`WNj?{*Q5*C)_4uKA7iUf@Zpl|+2*USu9B z@qK>E;;<(D9^FD@-~9Z%(6!Mwbswp9uB^rue=<&%`gI0eWf`*FdyeOe8UL*0S8=Wb zG0Y=v(SDWVoPj7f1sqt?GWo-DL3|gQcqm(Yy|bn7RYwTB)|DoI9k73agCRQ5H==In z`&xT338AHDEu*Iv=govJDIY_aP78uY!O)FdQ2b6>8U%rtdUs(I^(G!B**RM+inA5{ zQ8J!7*45wZ!SrGTRCYkGItLFo6LX|5gr?P&(0aN}kU^0x ze5<$57Z-w;1;F7T47@!ZUB7+&hW6d=m(M-Jj<>9Dc2L@F`jE8!Q@Y8U3}b9=-Q=Lr zv1)JBx_-Z4aYPY^9nkk>dlf|9a7l&p%?j+&%3;aApJLy3ul@9vLyBdl>DjXU`Nd?d zBEt&5BKZ`5D?ZeLlMJhDYUnsK;>1qZRRyQ>7Q4uUG1bj>c#{C2h>uu$C`zRi#CB|? zV`dw3n;Zm+uZvW;_@N*#DmyijlTF)ApGb*6xDL6YLT?Lu$&)`+!%uX8mcE|xaJ%i1 zwDvaiF~?rZ?LwBOfP^ z;(-a7-XcYUR9@%$F(QoxPD;kGBCh9^z~d1upuxc6E;3#>Q|&d*b&XnihI@oP^et() z@5+jQWO^%4W>8*4u|+8`nmX<=;g*ka3dMhelak&FNayZzBdMy+3P|>%Cp$+tHLhqvzjh z%DG&*4VlU2vq1flV3Ov6+?km1GQz9sMz4i#m{h zm4PX{nW-4I$*T1dHNVTAuL&&+>LmZ8HeprGl5(2^e2&3Zy*iM(<;Qm3RZ7NTRncsMmt$vM-iv>hgd z?c0)U3O*YhI+xc}$g=8|6M>jt8=Fvnsm7sK|EklE31`hlcM>8GI8oZXn45=nEh1X) zhH)+yZ373A4H!{pe|FV1T%>yg8eX)y$->eTbXn%cfLm#>L!p5aVSoIbx^K;v-_Kf? z&YedcgghPGZI~&L`+}`EDSL%Meo~jf%_uHo%TV7Bk+o@pBvrngTeLGdJ%Iy%RyQuj zx_84>(Oy74q9ns&8-9+^$U^lJyqAQPMW(Bwt0hXhOpWjUHF4wKisowfC?2YmaXFoX zo28ZMibz34H-rg_5FNWIJv9)((5X?~xy_9BS;xR|_jpx$*veM*CDku6T=i_^iNaHr zBhhy7Dlx_XoKK3V#9;Ivo(jExJiik1o4v=8&80F}<~v~dCHBBbTpwAaR3JU{p0Bsf&QB99 zrCXbMGNQ&F5c$$Zh>Un;G5j(kKWhPYzz%Y@a^qJx`jrsp+(n3f1#)>jPgMq1!3u9f ze%-xea-U1IuzX!&HX*!!_6KyFxn9L*U*$UvMby|-%1Sz5uy4wWC`jjplUNy8BX~5G z45PF>UStVP((|qd`6x9mLzYq0mav9t3JsFFhAZ+rezFgxZ&MYT>q>(5G2Rx(gNw_2 z`5t%{wzG3kUM55Z0~QP%A7x5Wno1k4(a+p-(qdgNwiKF1*a8BD4NtqFvYs6sP0xrIZxY!n)e1!*0{I5>AF)9#(p3LQg z@7g8NVB!9Qy!VX5WA=~{a)JR$%28#JYc@lB1-z#b$Kv@|HbtB_x3zLt=Ed1QzVwNANeU?&+PXJ= zW}qB+2d3_SL_^`yYC-=_3-50q4UuIMPJh=|gzU24zBx2N6sERNc{C#}6^$egEiDfB z;b%9S@$zxgm4ONOe3K5zFO56-YCCS&j;2H)GB9zr11$-5VeBhA{|APctRoS5oWi_8 z>qkX7Y2DN*mS*MCp7H2n13BLh+a-SiEyXi4gIBYE?mg5c;Wcx{Kh>aB_{ql*+WozA zOIme7AhKqyM6x`Zr1y>P@HZL}G)-$wlH8M6df@#mR2DkqKmajLDmVLS>XkNIfR!Yw zWNWgafeM>iDNl*a)6x~E+Esp5xQyC4jK0I^O}K4_mSA781)22AqXQ;TqV@RJP1Cd; zjGeN7z1EZyqzLE!ypORzh<@(V<3)vi+pjdr&6{WvrL3CXUU$@7u_;-QvituXjrZ-e zdHX%^X{4q_Hs*^&;I3XGYOj5T0Os}j4xXS-9znZ|l^f6l96rah{G0@~LjYjX-flf< z5u!)qb1^I&%i@Q;_J0NES9jaaV*B4Nur39ELA_%r_lY_|5v*EmF}P-f)&e#8@tYth zJuYQWaJS7=;iX;TWnAUg%s?3q56nN8tJ}7JKzVZH#hu%q!c*4w3G9pO(9Ekt-Vw%9 zMdrl?adw1nLZnHYz#cU9Vsn;WD*AsJZG2EA#Z#yxjE^WpUwFc>KE@>Tv`C^ zgd-&1uX|EPe6BI>2KD&mhr*aqq)KI=sR^biX>_Fb)9qyH;$6I#eh?HY;!1k+5h-}o z5goKg=2rJy#$(_2zVoz!el2;JTgcvcQL^>xj%mTp_iew#7#Yg`i2TH;)7o5rZYF;U z=*h!hGCyt>T=bpEQrEVD%3uSCoXyu2q-=+!PsYuja4?=_xVnkX_nD)DXr$7W)&)ng zcZc@U26kl(HXRmztn)Ec(`(5GkwON7h?p?a+Z&d|2#tUPQU#d1T3Ky2p5dsT!Pc;I z^b~l`YjYJ5D_=t90mkHh>+1f0ZG0Qdl1W$&EMaN02s)%z6*Zs)tmHzm%uimZcNdME z5DVUmujH1df|svf)gRXcy+WY6+xqX$b@EW{3@8tQE;ht_7Z*euS}wWHIv7I?mBip5 zKsdyFsVqsI1JT@Au<8tV{~GnGO3kMH&@23^a`PcJbJ}^s&x>B*IHQSwx`KI-@Rn6+ zUn$zY@MTh$T+x@JrvrBF5sc*P)JZ?{EFZL&yD+?Z%{B1zDbNL8Y6JMB1VXx8x7=L8 z+NG^i5AmEV#@Bq0_6O9z-;Y*ZB{n{7B|k-VSizOq4)Jf$K!s1{?%q6jcsFHS1M^i8 zDvaPkS&%4F78=@|feImiok>}S)+%Kc5Fe|GC2fl{ri2G|08J#~Wk+-=Kb(#cPpc2F zq#wd9Q4d(USiRYkLf3pKJWubnq9+z_qK71CRVX?8dE!1IN_*I=E%nLHC5myqVu*A$ zWV}Uu3a&R!vUOJbGa(Q!fJgCkJhSqS?)WtSNcA8v!`mLj>o*L4@k*Uj0pdz?1gt*Hvu=0-ezw9Jbwyjx?x}fP#cX|%hapri5v-Nfw-IeM z%cmDAn0s=-8a#x5-3~z_F2Ls{0CHw#431YEArq&nrL}12!Ffy(#775`PF* zboA0HeO))@ipzch*B~)8T~jf%O?Rf40aVMBQp_wjWTX;!)?=g=8z$u|P4cT`lzzD4 zw7d+(&xn4*!*hm`Mt&>8W*sw0CnzJQs3a{=% zwt>8V6rOcWoeW)k?qUM`%oGN&?t;G=lR8v9`S{iPGv2;DQ;3DGgw_s2rqdf;@BWLQ zukf^eoaSE%b(CVV@LFf|qDu5oGX$OLoQTrrWTsqIi69a-vT3O4RmA#LT%@Lnn++>9 zZEl@UpT@kIcK=rLYhshMu^t+yLyn55NswB96{tmZEx~XXcwLhYX?Z`0Tpp8jjU@we z@za!3O(HILpSCndD=e^z%wq2|Sa?4o6nzb7xSyp8Yp#>gqq8MZm9(z-@N$KslIq*x zkh5r5>BT_aBvI8Q@Ulq)@sTOgE)>KclDb2-ram{f;A`L1Wd0IQe-L90WS?%<0xtP~ z&&y&8vObP52!9A9bp7(^P%!^CF)ZUY6Grf|_sMm`zM4!HBMB?`&}ugkSqn%Bf*fMC zcQQoj-x2Qlg5wg*C3t8nWvLgA)y~bW$!5KwLy1)tuZr~x+fC*5fxCdmh)A%+1#dj_ z;9=Ef$a$qrJ;MC>Eu-7>%=1xLrceccetG6&iK2mXc>qF>QpCrZudy@O>GZ@Tdguh7SHkg?(&*R4?C`SRM|b-zkD8e5IcfBV(Ag(>ShJ2uDgLO->w1ZqpAXvC zklfCbf!J^Gg^F48%w8sWi1LvBN_&K$2{0BB@Y@UOEIse6P4r%cY#W^w(4mT`_eXLz zk65~1u{0*}WE7qBh>kS>7JGbuzHklr20VbC%rK*2>P@%RHvvd#wzLkd&7kGs?b;JV z(pk!&PJ2?1gF9^_xD37q60O1e%a2an-gHay4ca}_q+Sk#1KH>^_+Dt(^j&XNdXsy^yU2pOo?p4md1Kkbj6|Ch#)_LX(0IY1p3W%ZmNl{ zPHuyFru7TAXupgtIL&9TfLXD(X(1JH&lJP2+sAb-xb>JfCktNv$BPa#(Cp;o4$Bgk&avyX7U@UI4!q1PXe{ zXdkGb-%%rL5ZUaHraZK%_TQPvzTgLay8LF&IGLF?!yu-&K;PY0$YAqFc-pEm!Vju_ zuT*+ze74CSs2*p5;;MW}`;lLZpCv3++_Mp?sXDQ|Gf65-uZ-~tM9X8+smA?-JkYE$ z$i>2nx8}~SGFKdbDh4$nX?lKoWNi+)bNb@LI6|)}ns#a9j;mL9BoXJpu+MkW)Nfx@ zLKK|0f76bV*aXD&0qPCTsk)`W0PeC|VQEBRu}3 zWo!^Wvy#Ik<1IH*@r>XH2$V*3QsCCviX2l$VVHln?=b3M{XN$k{(GOsYaLoA&d@OW z>ush{S@habLpXR{C48d-uRcY&<)s^fos=0zLf$^+7BBh{Raw7f`BPh^Ww6TYi>jC| zOTMZj2Ro5wqu+`C+>Nc$qgtu`>3Qbx`2mE5mvLnY7=L~Pn&>O~Qc`R*+rAh8fGi76 zlN1D3e5R1|wfZO7hZsUyW>qGbq*FGbO*lHMg@IXkC~%Gh_wpzkrF_~#(KL*ru$7H( zO&d$`TMQ|fa_Y#y2_v)P>lX3#l~3D5M;uuWqQ-`;&aIuZ-58I<8o5jxj3*+u_?o&S z3*Cnj5wo}oXga>q zLRFa;9MJN>Sgss{w?2as3xU|c8HB01>1dYFJp#kGV2FxpAY_{>CnFx*k!++$xyoIz z=T9VZf(&#w@bVKl)Z_UPn zPU(6njtOrUz#Wp)Cw8&DH+$YcEY#Wwk*0w@k&%(CZalw*a`Y+29Z`ap$Yu#6e=hbA zEJnEcH~5OpZ-sR-g!{_J3L(+*)-u}t)hUaEz5^wPA)3MZYQQUh6(|v`>5IC=mgHS% zyG@tUMZfE!g28D`!Q_4Gm?=l(q$c$nX|o8U+Hs316OjZ}<4>!m9ChJ3k+uC66}`=? zAaj9}{xwPGMz$e8R2d2T>`V(xe^>g70tlouio;bmp;dzt4nSF)NEARrGfw%7BpQ&m ziMs`x%RwWaNsHndr|%DmmPz_rRtk zikZL@0zi$0FOBhAZm{%zHzTQv6xGYUsUPG)uLf{4HjFYeg{G=s;_ZZCZxgvb6@f`& zhsKzM4XPMkvLDlK*x(ac-ISn8N`nFfUE|dBPrd;q=_5=5cJj|Jf7_IE4QC0OgF;ut zG+>C7iD(6^*)*y|#$bJKW^5il^kUT~Zczb1wRaRp)-&vh=z8NqwTwap z_ce!%+I?W;8exYQe-z|bkkRh_2unC?Sy&^zTGIrZe)PPSE(SkQVOjDw@BFk)r;DFX z+>VK*DUyY(v$RHF(i1A9u(E2Belt*X-x7r824qn5A!QOwL<23GPiO=`CqeAPt1+)* zy44Zka%Qy)r5!>wj*l>oojoZ5<9aZCmzJ_G?|y7!>8KEKcA`k)+e26uMtmUOXN3=t5AJ;O4duE3_ZpbG zwynmXVf(n{f8?V5d7805^a$yz%Lrmi8KyG(gV$?;ADy0Nqb4tX^FTR)?*11x)SyX6uD#r1e#O15?0G&31WNo%DCe+y+TrXs~S4(2}e@`|T*J=s_0qIftEuHp5H6Bd|QV<+zW1|RJH z-F!z0;A;>Je?Jzg)A{lfq+D=v!Y-TUT25pR^qs#OrqgRPf$~yOO=wyOOMUC#-K|DE z&x~XCe}Y~Su?X-Bu>~!tUJE*05Xlp}7#CH`yO72INF62)f0Pz~eK^i1P)F`xz(f=*L45Z^Ur zG8@4~&J@zpI%~+pBXt$GyI@f$PYL(|xS?kDHt{Hd_B^!3tp`kl|q7!9A@Kl7qTIQqOhl`TeDIvY# zfAQB(59O6($#mbof)3wI?Veyq<|iZS0>`~$T>~wOr>SU_IO3@{k4-juwP%`H zm-K5YGc1GLVi6j&8Qe~-pN`uP9YV$$97WNMp@oUWW-4HDTHvp^Ku#mnoT+{HoK`gl zF2b2ldY}%!I&UB?zVB@xwhvaY7Aeonf5ZYiJp)jTW?u~d7Me%6oHM)5_Ktv1f|0?A zz9_Rw>AYbFd99@MP%bl_3sp$3G2qmiXy&-pVmO(~cx>WXQuiSckhogB!LqOrn~xyEY0f3AL8 znnDPolhw^=p#1WvDLJ2Y8I~Rdb0c`d-^;`pp-5DYh@?p%s)%R0JUEoR7 zgEvTo#w$Py#bD_aBGTz76wM9@O^-;EkhOP(BO4dj^i_2_jUT_Ss)~@5snrMn zrpJl{zi95f21MY9+A{bpof_=iYu|dqQCQ}O&)<&0(c%9lzUlDj z3K-NnaJZo02TW!158oWX^51!@8L|8AeGU%4WO05{Cp;l+51ga=)erly@6uwe&O7?L zV!^DoJXV#iqkobZ-inMNe^UB$0hb?rUVg-ne>RH$7uShSsH1QrWXL~aYAqDH8-M}cre zb`VB{iIMr#N=f-wyKHH(QJGS))ufYJ6h`^ayxrxa}zK1`M7owu#WhDd%7&N>o-YIH8 zR8-WUpZ-}vyJzN{vwqM2V%BuGh*+tYy?0f>xLZ4C+3TdYe~}=tOV+ECXTdOk=B|4? zpH6-MyqnI-85E03rh{{5bMQL%uGzN-S{&I%Xg#8Xc%(y`J)<&x1eVUN$3CK+=-FR& zwQWVM&5|1jIqt1*V>+2H4kOdqBJ;VoX3temo2`t@Sv-fky%=@J_Gq&7?mJjap62#7 z-<7Vt?4p&ne|{`FV=n#eX=qHhFJt`79#$}9E)Fbm9HzbV%fz!zJdS7>Z+5erHO@~H zvphw+#qMzl9uqS;ccKM%>P?Yrsvvv8d7S1Ej_=`RdSvX6U6M2Tt zoeiFiSLma~Fn@vlJSKw&KJ+{NuzxDUzMmfnzAY3*f1P4n-ro-^Fw^Uz&?K4PSQjyn9Q%88Q4RJ z_M_T<4xca>&gsiXk8WTtS1WThvkkIYCcR~Fe=pgx86z|^`)^;fFK}?1vXVva*1ba* zzl!s_HZr5zd8X-T5e@wH{3xrC6uXG_<~uOAcl+WP+j_a4@2t|Q#=G5qHyMsz<8jYz zT!Qem7872C^z^)kt0DriRJT#rdw8-R9?a&QSp$DC9@a+Rv>XOFxs!53N4JC4u*jOf ze{QqLT@J?U=X0Sor`xR>cNyPp$yd|K=1Bp*fH6y>`+-HqhC zlVsP$?BL>IvUx9`Ya$DM^W5$y`^jdge_KvznOA>a>>nrk)`ExLS*Fcv@~WG=Q@$o0 z=+ET(#BcuQw77I?ecoNdY_^b^|4O>MZ0x7jBfja~{y5t`58cDw*be80^L~|!ll2plxig#3n&n;}Sr%x2wvm=5sIY90uFU+{vp2?io?l&C z#+yy>JUAB1BYa)2P^1ZiiO$;xP7>^`#$Ho>#&(Ev}w-%dX?1o_U&nJK*PRYLa>rzQ@xAd)Tkh zZYry1ZX(2HS+k4m1>5jOf73ZTrLRY8dh%Tlv-5gt)1@?w35Pj!#Q(g@e`dF{>>?43 z;8}hbjsDz!Q|}jj4*F}ygTNXajS(-j+x0Zq+)w?nxu`bTt}p$HU8CFm^q8mide~ic z-B~tgZ@)Bd@$xO-q2VoOYwf+gY&#D#8=<0`rHS^ohe@SBW75Bu!%o?K__wn)tn5W9 zJA5R?SPw;w=SNeIaF)jpe`)K(@jQH;nuAl!X7YXS4Wnh$@weezZu?hlx(!l$bZ^F0 zxO-gKdflO#W{)|WdClbVw9#=mCYMcBE*{?dUMKb`d#;Da+hwwSoExvQ13yd5pzqLV z_3T~D!6-SUgN1co%+{K7pG>p9W=3nP%Lb>_Iqqz8v$5u3q^+|se|kIHyNkw=w{ceU zk?}k@Lvx+dXj3gtiN9`q?Nkn*bw1Oa?*2Sg%J_0Q*=%6G9C(dY8%eTmuH*UG9$e*f zwR?|ym*=G;Lu=w%!)J82m$NmQ?Db_TClJl9dwlSguW`q%q?eci8b`}IY1 zuJk_c4~lW-f8Y!@5a+gcc7rutL`G(X@;>Yj>Tx!HCC{DVUY5@p zx1#aco2A3IVEsCclIbGjw0|2}r$g^_?;W4Lx}5I9>wLH-M(I3X`_;a?bgsM;ns?4` z{Mh3NZ0V=RvbE36+AMeXJ2UhC^^xon%Ui=KJKX$qf9GivZQ6`^f3RPUUtYg-ciYb7 zb(>2^Tg9{Epwk!A(sjq3)jeU9y`*^tjd(e*MQ1Ey*bGKg+i8P^f16zO?mTkkOjZUT zcOU6h%eozH(OplMrC1zS9?*Dk8frPVw2^tKJQ~SLXG1saU1qC%sm({(truF?v0YRC z-n!O4f7l$S^R@B5JmU_?AzjQ)mqWU{NA7Sx)gr5Mwxed@*Q;A_dpK)++?w}erOAG= zm`>+Xj{KBOoKRM8|DN`u@mUUUizUyZer0z1#uWD~W2_l#IyqLms{tKY%0&pXJkrgR?QXk%r*Jy6q6zeF@ld;DFM*)x7FimB4R;R% zZX&(*uGGcz&}IAOZQ8uYm-IH#uAIf>a7n{Qd2+9KJ3)P2lC{0=Z;r-$f8y7U)dZ*M ze*s;J0w0&`_^63C-|TPsF*rQXRI(db@69ATo=DM|#QD39--Q^bW4c-l9+y#1bJv(} zs&FXuNp<%GAG`M^BY=|k-K*hNz+8|e)aF){1MO5f$zrQ^HO$W&dxlP1dJaaVzLT*W zu1mKg^4-*Cm9HHa!$U@ZM?GDq)nwzXf3ZJhOV^#5qtYW*V9AAl%_BY>T6ww8s^WFJ zr*v)ah&(=4FK4_sm~v@ZHv#(P*nPt$6y}5ItF~lGcZ^+|2U5IexDm_6eb*zNsFp2l=TX z$Kz7pbo-BusTU9XWY>K$-o^Bce~e<&y!YNBeA0R|dX&<4<8b91oz)^<_`@U&y3^h3 zc3BOMdzZ6%6DH%X%`UE&9(~``(ZpDwN$jSb0qxAXY~)HA<=Uz{WGl|gtbTQ^aE&{w zBdmtn(XbxGygq8AFW3bK-D$3eb9H;LdL@fJes_a;o<=$A9M8sSkDD__f8A+bdB?2{ zdoR9>ndJBN`Z2tOVIw0pzIJhNNW0biFkArX>le3voi1}DopwXnkF;n|-J^0fbxI?f zUxsc`ZW5!vGp*S&I=K36ztS%|cev;tU+%4%4CY0)_V3B7PR8+596J485oAXBu8d2% zY+$cVU*$gMqtkgVFN<5Ye{ftmK3tZ*6HW)S-6dGNi(IB!92cGGx?4Q(X~V>{E9N=y z@pU`PW>r!haug>=o%deeB)Xh8)lnK#i!QW%BQ&>0QFKZl=fN5W=QKNx_G<%TzjL`Z z_s-Cm#t^-;zJ1-9ce&)He#Q1;okp2Cwaa&Jyd<>Pji-~4r`vlGf5+Qpsy;n^yOW(m29^3W8_Rn}h4=?-7#^;2s zjGWEG_u705a!EzC&PKyg_qv)Qe(xVk-*{=Y)?HWCD%PUve^~IzwH&jeNwiV_bq(-4 zohO?Pzum3=(A_Q{QMTrn9pTGl>|X}R@5)U0dky!x`}Eu~gWfq|H@&>Co>n%Bb}XxR zB_C{9`WoGw-tuXI-kpqZ)kEuhL(;3jCo{rZS|F|mp3wLr^ zycQ=Xx}e7(Jk6t9G`|ckoKBkkl47&pkJr6)H%lIIf73LYJzic%IP${DE90l`?B?3~ zY`a_8L+`jen6Fth<->77&d2U0drXCWzDRiY_1-3J7F&HwjqbDBWpi!rJLiba&R1~W z6Kq=j{-%5^ChR#i%;l|P9tK^_JRfxAj|_+>9jbfB{pxsnJucq!JvPQ$b3eRkY?WVc z*(td!fBo)N&)&~h_twuRhX^cZ=U)oLAEOLhUcGrXL)g}${ldQNt@WzZ$X>G=^Dwtu zn5Ea*f-UQ5kazZ{bKk5g-8}OPDJ<-uD!fj(v2Fq}9Czp>H0n$DVeLmenD$};MgLA8 zk$1GOmN8xlD+N^St@$lD8qZs|ZhHEmaR&ZAf52BT{gMCd39pGrb-y)>{1I-B)9d&#u9wqY{kYA#dw&!?UPqB;7k~IzbbOY_qpa?SZaLd>bKC6B;f!5nBXu(< ztd%&2yq9S;c~@fU>dQm_dGFPIW`wg~e>w~t+Zis-+f8mqay<{{9qX;1 z@_Lp}=f#a&$DFyH?RttAJGUO)R%Ec)Cg-vooBP|g?%kdq*NpX{x9l9o@A4WNM_Nqa z#n`&m++LJ~`XSMd^)Z#rnfvyAz1CklXRsFCZat})ZcNh&S_P-$^x4nehqXDwe{3<_ z``zdAb~&-EzU_9|&=^mQ(1%?>j&fD{0qadltzRuA(ri{fGbTIc5}8XL|dyxoWSvXceYa<=z6MmHXqk8#(SF3#iCV?070 z>2=irLNt9o&P5#Fcg%B*Rd01&e;miBJkDvRt?Oue=a-%y+heQI-_FCOll`N6-ISJ? z+8Q|@)*EJON>0()UMf#v&d7Osq=8h(rD~kut!3Mx|@F0)V z{*2W9W~9F@Z{?tW&Ihv%E{2&Jf#+Ee^@jaoajyHzEd=Ed@yC*v3wDk!+Hl)(!sFt( z+s{R0%;Leu+O3cMqtI;uf0wGy-l474uSLAAqAW^N1wPyAKcSjt_~N@7dgrntJyvuzBx%xnT8s-49)5lLzMLJM}i>$`)(J&_>bqP z{MTbt?)TrPsC-qOSu4&+B%&UBvT_p?I{{#y+XsB4?Nd=u`xF!ae{_wYeJaXfd*%Xe zY_$*oyRv=u$$E80=gopXx=RUQKh#-g=Fd}5S_u!sN3YAz8|QceW4{`;SD=nhUTZta zS2o97Nlx~zm5lsT`_z;6DJKBv@@k(_5>%UIpa4lgw!a6*baIgZtM+*#)ZW;WL%Q7U zme&-(Ubk`+`aI>NeX0onI)Am#Q^DwwE_%l_2)ar~++CH#6|&5pytlUXh#C&y80u2d zVX2;Kg4(B;0H9&}RF)}K4{~WQKAMXfWW3Y zUoEE&pc*_5^d6E73&5a z*JoyZU#9Q>hhvc4=zrxLR|l8CX$FRcgtS2nGbh-boPrx(!Ag!(-ITFyLW<3!+**Rh zG}mr3pwiXuD?cn<0u@1=VqBs%4(Zl=3(W9#x`^Tn4!}bYV-Ocuh_V$X_j{uQW59QT zE}%5{X(HDecyORMH)yZdp)#9+T9ZSp`R}yvhs-er{j7t!{(nRK=ldMk+ka;oebMvx z_&0m2V?O`bn=Y(?tM@0?n+@9YsbiFFcs2u(Io6xC$DkK;P{ZHun;)pm9Lr>HU-}0t zSRg@U$Ktwe24I4|I7OzOm?HHFFf)%QGnCU19Vfyo?#~TRLn2lkfh&I|(ya-?Mj%6w zD>COjE7sJxGJlG=3^34`a$^!(kXQ>VSMOi;{KzZrQT)1I%z!K~*nAr@n+S~}IX7U1 zdQ_&N$%O-5i9lwt#WOQb$nVclbF57>4M9?FiqbYIYJgL881Mr~7MdUec%@x?K+)$j ztRUug0f`krLI44-yM{1pK7D}6AP^`6TLUu&Z`GU3gMYW)v!x(^d@}p7>(V-~m$LJZ z?U4YRzUXJY1c1*oyIrpr{rIQIY`5WW_FXeS;s+{Pzjcs^7gip}?ax1P`uVH@JEW$C z($D+eY0t;3_`JsoW~OoRPsB2jwzJ6gw$+Ro7L`wcl5WIiNT~H-$OBS(P@h6m_aKO) z09OkyuYYWKu`w2LfuT-KZiE(hjw~Yiv=>iF&h2H+s%4{3flr>Ms3Yq@7pPzx(!ko{ z)+RVymO-Bd!1XBTu^Gwk^TNB0R6tc?1COVGpvl)nl31NcBl1)MhDJB?mdLluUI*m+_)aEwk zE0UKk%c1aqy0b#=9mz0ox&*=3UcZZif-wlGg+fkTQCsTv$j+M@^t3C(`j2rj^)e&y|IhQooRwv zZ`s3~htP-&A+fbFG_g%Jq@`3EllwZO(SA2X8 z1*rBxh%A9>dt1r~bAy8v!MtcR7Ju*&dek%EJl}XnoyZWO#0 z;{bVi4J$tJlD?Tj}U`g^uQtZvBwh*s#6zE6*S~vLm-PeWP#PNITC&c@ibB~^YlP&kf#^Xm87 zw2z;~?K)-evI-%dMemX!8Am3Gk7gr1rwG6^FAJ-QC)ulXtsE%RvNe)fp+0XR@EKB$ ztd=LN-zK!57yZVR@_!4FIc-f?7$$ax9mLBBK|a|F<^z`&NJ=nIRMJGiIaBA&6p9Z@ zxG0{Zz6S?&-uEBurhGI_kXZMfA~rELb|zv^IOwG#YS#%X0+KU76oWSYx`qQ+q2e$6 z>wJIkJN)FHtPf_^hf4TFGON2OF$OKhkU1b+3bMrGocNf>UVqmrF-F8UAvUj^PZciR z%)3vhI6eXdfJ?yBxUiH19q0{(^bh9v#WXEuQMIW5S&tUCoZMG>pr6`mLpU1%*PhY1 z3euyo@yIb~3jN^5Y;J|s4)J(zgC5|6IM~>@lMTzTbLqcxv{=6H*Q%VXMaZwLAkF1m zmUa_wT%KYa>VM)f=52o3w<8-GJ_=~sb(BB$iDMAdu}r%BsrT)*sXG776)j$Cn5JH9 z*ZI3X?e`ImZpFC+i`2rrw9uG%N34i+JI}~+023~^~gDq9O-tkYq z`_`8>uQk8i=7Teu?DdN?+B}!Ge*7nHGPO2m`kUiY_4{vL{O0Z-4EbA6*oQCleEgFO ze%62Uq@f)7nmjv2MvnpI)LeMei0p7964z!6+LV#QnTdg>wu}i1P%)FSZwmd)4s}j= z=9|D>JAX)3y+`;UPORjarD;2y`!3P zBypySHH*}@fEIS3O;Jx)B-hX@#HDCPr z#hE|Y?rTqF@1(7#|6s*baRgQKwQucGxUj9SEgo#whpM=1r#@>=C1|d{ia!4By5=0_5Ofiylia0N)4P863o|*(L zXMaKJnN8^-$xNHa%o61WR@#}GOwJJ?H)LHRhYu!FIrGBtUZ8$+3Awdv=l)8dpB+8QFQd$Ssa&jZEL7$pXQH0_<31WZN#tG=%w~nds z5>-;rjb)#@7JuU;QyAofn~NWOps+mqhkq9%K|gELr09PGE8sueb?_fN^cUX} z_4oXTgS2_*lYd%n+WO=lec{V(sU!~9bUZDpH4Wnx24g|PEGJwBR-QfVhCdxCToO=% zlNcjj`g83 z8PHAAtCpU~xXUZEJ&&*wkTF6*y+(ptMCdl|3v*_+_DkZp%Ise9B0N?)6j64|gcpO! zArwIGG4!Xn)z82;(}DlyXSew9GQ}l-`TsA!O#hQt_CNR{{^Vrw%OC9)yMG_pPwcB> z(Fhp{3c#X5Tmr-*i;FvZ(}|T!MndXPSzQ7%>$oMyX2EXfQ#Idl_8`JYxeyN;$b%)S zrGrwH(_934VgpCx2(Fk@tJSTdvU1}Zp+aOdET0tj$ds1{^n#ULG@2c_&*T&`eDaYc z^Rb)DBhQBwLFvH}C3TX^)_?i%7FX4+msIgcm7n#jmbZ|?Etsmif7?Q7Sbu8N`-{sz z`ET%Bo7HQLic$8_DBt}2$*sz#Qj8jFZo(bbNLV;XS0Jb!>+#-lQ(-fN35YOb& z#)v*p54rrNK(g&(i(GyL6&4~3j~tbsDFtaTN1q`VwF2Wy*rY$s7sa5=y+2TbsAu>d3K+0suZ3&fSEza!MkhmkiA5TBvW=-{G}JD zTKv)aUO&F_`ToCkzRhR!ME~%-h1F!Q7MBM~JNo5aC-;@LH5~RJJf0=Th3_q;#2^~R zxj3QR*OPa>=OED>rGG~!s+cvh+UcFYAT}BB$XcX3L%IS770u+Z(s>=7}lY4 z%$u3YGti&}6W2Y+wN+k@CMr9K-vdA3$u%lH;I2g=rX~pP6udO?vs3iwc*?m)csV04 zel~;qN4IgtT?5=2E*uy~MCms3h+iE}U*0?W)mN-f{%PanAAfE4@L$;IhcmP_86vLY z?JsQe(YyUSq!gDn2xb#RiXcx@M~d1n`I!|m=C8P!6|u7yWa{Fbm*mxc7RL<{M0bL0 z$9V1Axp>(Ks%g%=x{>KL5GF`!qM|HZTiPBbb=b=(jbvy^lAdzi+gQg_9x$6l7Ph27 zSySzuAkEe`B!8Z$7MmokKXy?Mbhq}fkzh*LDZAK#+K^BG!G-MblXp||zxl?-aBzCD z@)TjgGWXEj|AT)ZyGXMsG6Im~;DnhPl=~l?1Ta{e?wtQ#&rMyA6VU(bdYr#_+_zq& zXZP^Ui99{180aE-@`7{$yfPh}8ns!Txu}+NsXq|UTz?E{$Z~B>MCsTs@?Y26FT^Mg zsxmGG_c*~w)UFA}jfJ`c1^ejl9Nl4%qJo15C{{dYjwNq-4*A4jPCq)l)m~pgKK$5_ zON(<93d^;E<%Bg^g23`1E)$DeJ`O7iTnj955PyCa0K!x)AiUyJ2S^PI)z3&Q#-PIg z;KhTA<$wRg^=(Z~tR`;bdoK9vlP7;Qrys2Otr>0ne{1%K!;&w){Ae2Ox*)U31P-=F zAQ?voayKMwoi>6QPPD$V`v~BLp)ar6?@4t0g5~ z0O3vm3tQ-s2gVWyM9{F?85Y^beU}GY#*GuS{jIXw6{Y}h0v3Z*)#s#_c&Kje>f`h; z4L!5bM_(U&?3e!7ujNF4Y}nd=`Gbe(D?R_6KfX07`Nj2WeLl77>=qy1WCi?3pEYNW ze}AB1PUitz1(+;h+d(;kgA2?7o-JbCL)i)<#L3Ju>F{QkG2@S;A0VG$ew}0H`O$H{ z5Y;v{oB0aU*b>y6ONtOL&-QEgup)kNUgYf{ZNx-%g3Kz`AZAn0R)I~s^9h$oP(pyu z>@Y7EL0S0kJc_K~fPp094cc;T|)#@M4MT`4?Go7FT7V2OBUBH$i*8Gmae zDB1#>QmFSJh~b4L!9rk=;$-#7Vc8fyEiTikC2AtX4hgci%~bPw96$jSBteSDgRBTa zj3U7>oD9=(5U zZ+i3JX-E#d_{%5XzkP=<#4Mj9e$Z{zFe8F~ZIzlJ2(kvitb^qdsUejIthX^YrGL!qgghNt zW8i#>5G+7a%)r%miYMm&hBji{y`&ve&a7P7m9yDO00J*keI6ff21H7qf6HU5&wuLt zPyYPIW7a_t?K6e3|HG#4zxe#a4V&yoFHS6=AME9TE0c zH%{G{X_Fv$GXrF4BB4T<5)9cgSsD!bZtA?WKC=QZ>hl>wa*qOi4}$t=KJJ24edm;z z-@fx)h3bIgn}D+#!@$${;pK9s>fYDyKDxw@AAR!VFNZ@S`{1lF{eSf{_r*-e`7dXg zi?+XYYJDq|ca`H2qz+EMF}T0i;o_dhzP@~TKWK@jSTnI~^AmQ3z6P1iag!3v)0Gnp_jBNVVpexk z#-1g_919KXa1^?CeevzMgaQN7`U5^I} zYus|hH~Y~te}Cc}e~;|2~Ov#pD&J(31NKt?mcJq*;uChWHOawbiz zYLpFc(r`hr`_U$q9^xo_{n*|*RIU}MxeH+e=jP542!HdTMMR2KjCP;3g#)m0E)yGS zvHHQ{ksPs{_07p+TQwn_D^hmwa&qp*BxlXfA|{C$x)W2k>Em@mR)M6Ugh8% zE=V&j9)A`DWSwwj!(7ZvP&t8O7J+RwCEb_li=tJYgxSOt+&4`z1X)uMj82>3C2eSC zN@>%^(16DdD&aaSWn#XEF-5aYX66d>ATy#nM^bF^sMD3q0zqvAI=5n!sMpC+bVM(E zN2O8NQmFZ>zw18&o=yo878nwvkhcAbc5`E=#(y_~M#|oy{Q7xLLmz*&-cJr_IWGI+ z%+@YHGdXJB=bNkNf+~*n{&20ok7d|0|F?ht$?8)vlSBU#{+nM@0POsQ*w-G7Mz3KkI{IoI)!ujk?K*lw|92h#>M?!%U)lGEw|(no ztMBli*{=WUP1T?>TGN8#RiM>D(A}H2yw2> zMT%@7A^h=8tZZd|bMKEWT04SHl_vPHqdKnD*@sF8trc!1MCp1XD#;%`=7S9!_SIy* z{i3kdj{l1FuRp6YK4L1zQv3$|3ZNgN|d;wQ{D}H&9p`pT*zw1gB#`ygmb@^yt zG87Pbb#VOn^cg<5{Vlu)IQpm0Ae62NufO!~Prg&Lbcimj_kZ>rGv{ZPPuZZw_mKYf z?*7EycNVjtEq?gZNBST5tj(S2>wkwEsT{0&NGhMw*H;t!e4gnaE`Vi95sxjLlg5GS zyc`q}6r})JdBCf}vM!lj@HxoXBL$oJhZvzI<}OrtDoCyLIu!&YNY0VXqaPn+5kWy2 ztJfWnZ45{3WNgzyryz)hOxn|(^}m0&1uSyzi}LSlkJ2Ia@? z9LTcm1HJ#u(fEffBA?#k-{1SAiJ(;Zb)Ouv4*WeM@%26RnSbjdiYqCt^&dZOXb;4f zPrN?(!UuZxftl9fKVyIu=*U&#wQyr-q)aSTH-7l)eyQ>z6I9)vbbrkbAW{nF;~0eY z>vut%wSKE=kJ$vJ8wNYl5UZ_UCTa3|M;%ckB+JAS+<{U@JO*#pq;~6kQVZfn-$re1 z;0lW&ORa(Yhl`k4P7mE+X#s~G2yOuO6f1(A7HNv+ez|5px=I^*SqkRM!(SrP}mS$}IM$S~q0hCuO_zkHuZ z9WE)_ac+WO7HarX#X96v)%13)x@%@aYgc;f<`w&odfxKlO3l6W34RKQ0c(mEA6wn_ zGxze_SNKmp+~Owjk4~#z|C0-ZUF}k1;ju?dUea3)xNo1EbWwY@4Lx)ECVN{5#}vBZ z=H(`h#L5oJKz}%?5$dmz3B=#`Yt3Ku&(l!DXvHjIX2Vq?dv3uWE~Y0Oq5F_sjUTQ#47O?{i2$0W98I!C~32CZ-2o1k2dwSdt#w%`K{IMdi?S&(a!yTHHK^ePoP+G&wMow4rt>6WmD{?`=DN0 z#bJ6xtj4FUW(E(MLaEH0T1Q|g53B$QKaQ$3$;kl;dO(7?{3ZIf)P3*8!l{U{2HD+u zl_!d8lZeuWhUU&uhH$7x9!e?5tvx@m?wmZ>Lw`tQE+;qNOtf23s&qgg&q7xiXrSdY z6a=ALXt(@j#1@f9B*lT5G{KxPm!#$O&EoC}G$+Uwrm9Z@p>7wVvO8bJ(p;s`B3dqrZRWtp1wQfB&U-CBnjrpFnCp zjejZukxR<;{j?|N_>e^&RF(}oL4q%A!EHM?YMejDW&?#upa+ruqcaeUtM#f#3Jd+} z0!R}HnqzPX=Y*B|mcB}cd1;!6caR_{1PaS?52XI#_M9OI0@FdpdrHV2iL{)H6G3K- z%?Nx1RORc$Ey@|3fpG$C8ZaBdbO8MSihm!gqog!~|D2CHu+JL(aDsxKzZ%&Jz}g`oEt7{}zz8#AdS1~LIDmcHUdIg5}K=Jp*ZjpYjE%6~C? z8E2K)TcUTE@eDlvDakYOt?)gCE3L*!y3!GJ3r*@!G6H>PaoE1rsn ztid~g)y`LxKltsdUAH+xi{oZ+91m{$ASo`3yTq363j;Vbq?I3PGhbL<*|+ta6bQuQ^^>=8IFz zZQB&A?b%?SZE}+~`+!94oMgz#LZgTXLQyV~X(MtXQoGf`CbGp(h}ULc#(%9|j-WKj zTWqGyvon7816ru>P&-&KAA!=4KYYEf<}oHx=`Ml7`Q$SvYJGcq9r%CaSAWe7Wc?3T z&I-}J@f3z4iukr04p!eeCfTVBsw>JKlj-og|JCNRtGrU2iTfB5DMzv9w%m93rd1Pe_5Op*blpiGiEoMi3LL*7Jrp1H5X*Xrzf^r zzH}G!3|XSS_+{RYfRQOGEu{GA3(f52g5j!%A1yT_(Cu#(4_5UGbAo1q=)g0VAWD_J zNWem@W(S_b44pk=r+iXmE(y5X`Vu#jZpR=U@x!D1llj@7VfrEa7h^Sl`NGGCoHm!K zy7W@M79ajk9ZzAg&wufe`>F*k#b}dr}eYqPPVhw(N7gp}SSc%|^%Nls3 zDBoFmMi5Vl7gjC?;d<|2r}k;m7ys}qU=9x44Sl(L(zJ8WuKPVUT*vmjQRj=P8?0C$ zv;<^L5HI9`FLu!7>HoZV|0?G)jbmTb6l(I zC_-#jiVssmts`pl=}+JKQ=gR9dUBxI8R*@r0t{9v|idG z0Wx)YOp%l>0&%vjRYdYiVHGtqH+EEwbyliAnBGg8Se7=*plPQ`0Kz4X%QX1%Gj7`f zur>@5rQ&6X!Wq-&aCL-^)0CZj{ja)rr`^&XI8Y+a` z!I0vsmt5BBJ)ihpe)6X3FM&zg`3FtedH>N-$%{d01&QePxs!n1VbtXdP$#(ybZ%d|P1fgT#Bd(dM z24OzW+xBI@7kL>)3gizmC^pRw=yFPI0<=QZE338 zZM%Jf_5m1S4$bGNpdER{*O{Aq|<953b)z#H?xQYlX8f?~4 zr~4opB^agz_F#bRNz1>shae4N+zi_Lf6iJc>-@Ie!Lklu`ThtZu_WjQ5$eNjBY%t= zcmq|V{>6e%-K28)V9oR`h@_Xxa^xFU1m)ZBHi_IKyyN(A#Cf142nGBctCJ=EIzC8u>DzB!dX07I?EQDK1}cqz z;+L*^W;{RM`3nb<`O#1s#D7Q?0mcnc`4oCy339qHa&Fl92&oyBRmJA% z$qJ<~gnqQzxri0g-*WH(d6d=;8h=aPt=l{O%M{$%Z+=lJ$q7DByw5zYG?O*r>2zhhPf6%_ zvyplQ`E}%^EbM)@HH-9VN?xAhi|!j?Vp3=f+aSese+ZU)u8hWiv6{qlCwyVUWSz<^ zbOeyW!(b37p2A6kxdf{oLfP}zYa8>VHLt#U(4D7~U$HZ{IOM<4`j5X!r*HigcXvCt zf744xUjr+gtbd3S%o3GtOXe_0*APpch|)p%YI>=eDJS((W6~`2!U`ul0%=TC8g{;E zr3u%vrYJ?q1uDDMnCAZ1tkrXyNNI_TKqS&PnW}m76#m463XCh6XU)rslXT1Xer8dB z3ptNhQr}Y{J6)cM@lRq(&T*4@hMGa4j{MTwErQ)-1b=%;8h{2^=f^$>NU!gA#NT3$ z`Z4?FmFX>}>sL(Y^^Ah=9D7`mTTJM;e>28V9o*tYISq4+W>TzUjT)EiKpI_`x%M=b zY>}JmS6^t6MPW{0^6JKF3i3R$^D>(W#inEuQdyOp=vy;P=1D$jtu}l1_WXeewk083 znQDJ}1b;T3mDM++zi`sBU8ggcYeC;>f0>qnuQlDRVWweDv zdJ4Y=kC1BgEFr)+1@sR!RMy$SZg>|zaee6(@1)nscD6&6&PkqLe)i^j?4@qaje<)b z_kR_{X&=XQ9~&oGWfARw@sn0K(auHn?#0Rh>3>>8+8})Hhc%<2ToK3;w-g?`umPUu zy*>2LiDEP?yD>9?w|0|euFR+%O6ffU`c-fWx&z00Fl&-26~4JdOb;oz(dL2)zi}0J z&`*44D`E=W0X2U$R6+?zMiT+>H8>0|8GosyI|8UNu8W^zzDFBvkBjj=M%Zt=k3+yS zN>bcog#0mv`$t#X)zQ)F{_4+|^AUvg7boS;YuS<$#P-p3hE6#G4_lRk`;}Svj}FZy zPFTlG>J1@e9yFB<@@(c6{>qlV51sDS|1cpq5O6Ac4_hnL8v9&M>7^?0?2UdvIDam* zNFO=z_f{dMF-spCS$+zoY`gJS`~t65o<*I!lPAwqSX1kJ9r71khiip$OxD?{%{~TY z#Cn~5fx6{8KdEnQj=LDhUjN#DW9|%p_)zu}tI^4L>7Uo7O7}mQJ$-zPbCnHW!({u1 zbxGkdoigbwU+~bRiDtLSF{0%=G(B?X7C=bLDz;a9LkWM=_>>A}Q<5T2Gbs^PCpWbI zzyRMTmynZC&zSV*OqQN0<=TY5uGwB*B$-W(5Z?TknXd^SfAdQa+#q>oDxU`J#Nv<@ zM$Sm0$rVK(0mRps4Rn}y>$~$;3rH8QK1SPD@4KC}{@>T_H@*JzIrFVnqB5Qd=xk5I zpfc;|5n9v6IPa;N1Xh!TLI^@Za}AKhfX;W&I7mH$M@~XgI!psbJ0A+aUvtYchDKYO zM273b%IN*MfBrIVC|BY6jiH=0TcMovigc%m80n5(uJQ8n<}>`uhWHBi*f4qaJAU~u zI{)Bz{J>9a&|S>IFIgkL@x1Q&45Us?k>15)Wm-_{oycW|8oXvQHnBfI{Rz2 zGIPXC$P&^>)UAc{SbhnZOjy2}O(A{7a|LOWf0Y^PZ;oW-71H>69^gi+9X(YNxG>bI zSx`tHj$*;%L;lalTCU7P?d-^qI=~lpKi2Cv#b^Eb0ne4r3eT0MgbzGF@LbM7cJZ2{ z@3X@%oSm$rPqPvC*kx!VEc&rsyN({Fu6Q*}#chkP3Py^`QqFUn)bDm!S%DHu8cxQH ze`sqW*;x#I!bTI{D)??OPNz@_-^$_nHIXl#t#+0nXAp^%XDivYo7jVybTVAep(H7` zJJICW=y2SU!om29d%xxG{D^oDH`%wQon1Ii|4cjm`v2=*e7xcJ{qz}QhBFej^*cmj zV?*z!WS7-SC_N@|x_F9j{^JRoUG>zLf60Hkf3wvsZtM;-)%eDDn117|{_vKW5mxCPuMpG1cj>-F z!^GE&4w)h4r$gkFBz3bvXaIcKdNSAvG>A?vnkr^eLO=M#c zF*PEe<&XZ%B-Pr4l)k-V%ztMGn-2z(JWA|u=iiKR?Wk9{&YkEm=emD4e|*uaUi!vE zuQFLT-{u`=!4(JbN1i=pukKu}FhtV!kBFE%krJ^@s)fPJn-0MQ!Tpjaf6M?mql(++eqtJc z@hrk+bZeR7hwcwZ2d4amr*qpUzvY{sSh#B}8^5FdU;8e~J<<2RoF^9Uv1Ta#v?t%r zgGWPh5;*191ax}f{?8hzOd}dvfOvXw<$;_?18QZ3rDtiVf9G?TWEmtCwL$l({5eCL zG>vRhy>L}7En0DTe_4cq&;z^4-mmM&HRWU;i;6g8!j0Uo=2U%Jdo83yH$kzCNR(sh z6n4)M2<2FzwsOPUdHUcuIS;rhBSUK646ypH%%z_I9 zu=KNF@4x$N@ez~Cx1jbtz0AWyIW(lEnPGaB+p2|MR;7*DjgeCZB$6ryvSaDJrO5VLgmVr`f5-Hoc_@A>@FD)|poHmd%yLmG~MoeZN&D9HH zgcAflf8VbHN|ohY%1>vXWeC1XLdcD1p2`LmEY4y}^m-qVdK)R#>ENd^yal03y{7EP z`I6EF$^mAFFZN*n!Wa9;H2aVE;Y{%d_`%$=?)>cUvEX`~gl_C{ZFE>sBuEZn&Re`wu_V3#w}6QO>!N302v_3S|b1f7GytkER&$jI)t%iMX%F6%GdHp_P)(6! zf0YN=y`MmKiy6M@ddESX)o#c=GZJqmq@2kTePR$F%A|QcT0&{#m}a(aobSw!%lDwrmOjR^b+PZM zh5Anx@ntv8qK2VzDH>Uv&W>Go`Zwo0p5eM@H{V$0H^12*EaRRR5C`MAsaiA8(SEq% zk(k<%3W&%&#KrepwxclF@kXCntriS1o~x-o3?Uf^e{L0}LzV84XtJS-Z_FY?f4os* zNG{=+(z5~&NOulN43z)(zxRty#=QK#2ceH0QXZ!?UGs^qJESUuB3(P+Wc(s?s}*D3 zRK=9~YrdCz{9X!NLaEb6BEG4_@9iyBlm2_@FQ5INwTyA){Dv9T^F-va(tL6>Kc|#Q zP(#Pp%9xxITt^oALs2CvOJ;@{e^%TB<-@!74kCTEh2}W4s%Qg}2=ui^KFNp?Y90lh zvoJ-za~xcB@(vIKO_A>uM(2;z={bMPoZtAod>nN6p8VTB-mf3CB*M3+3e$jW-(8`{ z&9Al8B7I^5+ZcjnAbi&(f0YF)nOk`?^9#t$eTjBYlPH04Wey~Q8$zKIu1SOxEQXC8 zYQ#^7!NYoPHJSr5UH#xO=mLFwXrrAV)V6!JwWu_uDcNHjGG|-B14c-rZxY7yLntD| zR=}8a6IhJCTa2JD9XF|rcc7#}l;B1JmJ@NGrk!nQ%Jy2+=jGTxe>^M6N#hqrnA&7K zAA;b48Tz@)vIXK*5>Yu1GQs!;kH~64R4~q@scj*lKAx@T&4Z;LFiyhQLDTOFNJrToYrEh0Bih5YsMLQm$|wew_ZrDDR}ghX!>$?fy=&Q@=ff1LHe3J?d53nbt`MBYY zb=Z#mJ`K11y3=#?JbM@Ufgv=3;+z%4pSj#W@HHu;{s>#gkh|@ z6S67ma;H3ge?(K6#)VQ)%JCECmti%q4zk=f3O6`e}X< z2Ttph6?|c+G@RwarVVh`vPV1AGoScVo3WUM^3^27e>5h?hnMy-c8z*N+ZXQVS{G-Q zd<}Ha7~1W1U@0UDaUmbM*jE97|AWi=jT4hz>Hf#wisweuH~e55eV?Iwf0!QA$bujI z*4ug9?Ec?R==FC(P4Y&V$q!*QLOT`@lUJh*h*u^7k!tliN?Q;q^NRCqmV?Ve=7lLC zffA7ue?p3QJ11i@aU1Cy`KRgR-iv;zt00s!Pxv2iWWuRY4Z>vZ1hPWR>(FepEzc)2 zmRD=PO(|{JibnOPgo%nZH)(cx_V_=~9QZ#r)De-rz*1#~!WnRe#`unhL;Z$1+yZI@p_ z@!$GQ^tUe+#s&pOq}8-g#8^2sBmCV_iY%01G#6wpnB!vbfj+Dl;b*QCnOAIWQ=Uy5 zZHcjxa9q!x8y;Gu-g&f+nc$xU^;s&_16kKe?%SIc=6KK;EP)!TGT}`TTI8}J9WP%fXLyx78>Tz=&j-5#A; zrbw^8Xz86D?0XE6Pivje$UYf4h5vhBzpQ6&QJ!b$_uuNN1sTQ#HI@<%<^>^*_z0nn z{dj`<+E2fKuIvsYywhPMu;%}B?D&!ne=&~ZUAhlo)sO@5^YZ)|P}$k>9gKVW!3nyp zN5l~O9s7QdG5QPRx6_68b``kub?DBGy5TBrby!8WHU0;zqHCQ9)+eq$fCe?VqYvH~%*lWJ?u#{6tW$%$u{vWhP6Trqgi3?b|p z7?|vU#2pS_nLYRovk-@mM4f)aCigkGa4+q!kABlV`f8hDe_sFoy+{)CUKjI|C&+Ss z?8KONtH(S%kV61Iqb$s$VM>j$v$VqOz?j&@R#Vs__4#;G`i1V3WEg()f8m|(`US?v z)sdO|#E8nuZe-#LzTl*qvjgl1$%2T=jU(g>>fNWqcaJq;c?dBFn?Ln02VyjQW3dV` z@gMC2doXeq6CvHuVj789WVZ8V-1-TL>cY~d-i64Fgy;j!rj(|gKlvm2qb(<-0Sv2I zU%ar#!$<#@{_L%r{N~@XT)(ri6EP*~uUNg`^2eo*v&%*xD$5`cxQ#{#<`V(w(hzmw zY{m^Z`$orB5vz<-q1nC=@#-?Mji`R|3Z8P386GK(6`r}4*;Nm&V=Rc-#lb;k+p(1P zKCM9|D3|KN2_1jKZ#~q=vvM-1GO#;;8v(uE^&8*ehFyTM61~mtiVuVH07=Exfaa#e zC4##|>Qk4hBl^UUW9Oqm>@GgvwS!A$Lo19v-a1*Dkk|x*1aV^s?J$D~%o56_^9*l3 zZpl`ydYlS~uD-pOU*|H6Bk_n_GFfdEhiQKiQ*rU!px1Sq@4Eikf`_66P;2E!(Q z%7;R(n4)*(B0szvX75$Vbwj*zhZ6KIGL=w2boz3?nHoQRrJ|!3v&Q~FDv z2LB)E(_o)}U=E+Pi@6Vhp-$_@@1)-T1|SSXvdXo3A>U?Oy@&5wv(SjJ^3}B zKQwc^U;lq!JLorW=4QbcJoouZp-#HwxKE4cWWn>aU@AWn$ZSg>{h9`RbohrUQ+bAa zyCE=EEf|w#@@h6Rrl!J}v4G0NfpK=jBcc!BR>1s!&dzT1c4y2n@srPT|0m4HUjOdg za@wd4vnIKd%YN|G{xPN|dH98?iN4ly<`Q@bq~*X;^!^A9Pq$J1u|oUm6^qHojKf8T z6ZehxM?__fMdEaEaE*AkBAD5LzJ-Dew(72IMt`v8x_Aj(V@3*?r?unXIB#o1`hA|( zZ~mQsEm%K)Ij7y+0iO3A%#EP3r7xWm5y_M^7$cw352)y%iXU>}J4E^9MJC=fKltGx zT3>vPHV8|Ae#G<2BpudP>JZ`Xh&;ZiMM?bmP+dCQVpT2h%T-h~y45-h{eCviIoxk$;3$~Mycm6cRG^7U+SIgJt4j6kwx zGId#3VqgnK!YU%NtU#s-d?age(T9=oLd)mJG;V#x@+`~!HjUS=D7O}2m#Rj#zJqjOnj|Kx_{)+8QYXj)GHp5A_+PjG1TS<;@5Q;=iwp>5f@>Tfzt8) zfFWi$XvRC@c!~<~g^AJmMkJiLi9r-AnVdc$MsUX?pSg|wCV5gYs5&xL0Wd$rhhJnJg6^s!vMa+EBnSuCSbpee?Pm&CS_ObW(K=) z2I3!KoWgfJ_6G3}SZ8O_uKp?i%pP;)@#w>xU4Hby*`1HTuXTX=j>o-B(UminQX@Zl z2A}%-D&HLDZX~&JT$M(Dwk)%#s6T9(nIunqLr6MTXcKu}56&f{io1~b#^S&{Kzpc2 z=?Tv*!Nj1LSD|4J7h>Y8loeHpSnS5`Sfd(&QCc2jf_QOwt~#Aj(Cfxg2p6$pKb&+9 zMRJWtHug9^AJQFwDbUY?+WooiJARH7{x4th>pq>BO)~x+HRGWxvR#=pW# z^hdCjm8YdXVwVfa2`PW~+pE7S%X8f;hQr>TjI=g+h(ROTrG9dtW*u&#Qkj=9)V)Nx zVuhJIJ3n?HC|E!`UjgA6GM&yY?{5UbHn{d@Y{M8{P(ORZC44CnKPJO`hC9^Hj#nx@ z45zl}aMeaZl=j$AKYPOeTGpz{$L^}<6S`-E0ot9kvhhJ*ob#RE@Hpz ze8k^PkA-`p)butln1g3KPB$*pS03KlHj z`}(@S735&L7CWQjp~|H=`@0zc|F@53(Mq{myz@8$8J zi>uFmu=Bb+hTnc)ICtOk62$3$1dkQx?egOrmfg#eH#vU*@iyjvTYEAkH2hf?Ahf`E zn^c%<$OsX)X7V{z{u_wO4?vRiD88{s&3OC3ZML$Y$8efmCXLy2d9!?iNmNz3@*#bi zio`Kx?UVpM;1u^Zm9oUwW7A{`pIX9$@DXO=-Mnm!Na&DXrc=|Dr3u|U3Ob^AhNz)hdk z`9SbvK= zn4nB6;;1N>Hs%bh&axcwctyhXG(>y=v=i6XJ0yscZQ-?}(x{84lUd0w*@H{rus*C# zNumj}d=I<>(^yd=j-E+DeVQ0IA~a)3wKM5*nFy089)-LKd@Y$6%{%62gj5=ja7>8@ z2sM9>dM>}uPi$n@oD^A)hw{b)B5Vy)-93C&235@a3l;^2GsA<3-XK!Ha4py`Tni?h zc1;Y#%@T-fF@6~Av_IG9bCM{_>++(b9l8PgrfKdwKf zpll-2DSczFrnEJl>qUU}q1-XL{OC|wXcK?@$&YSqV9v=q;zp3pPkBmic?LUL^cTN; z3vTUDDVgSaIipqK7oD?@J)r$;K&m|~n*N!B#tuYBL( zm_;EwuYw|TN$w+GaD`c6{xxbk-nx_KW5{O7#)U4JaU{nuq6tP!vM zgSR2}VTRZPS zRaf*W^G(%EjL;7%&_9fGGV{bkfIILTF$v2cqK%6uf%q#7!Su4ZM;5WU*Ow5!a`Pce z?J!R)o}^V?POW9K`f92Nt134nJfE!xKTDbm*z;_A3LlsDIo(;!!z_W%VKGd*N2q$GJUy zx=KIA#i#F1^xBV_vp1{HX{&F%cm0bN56ovwJzmU5f0Z_8!`}OH(ysRIV>i0uIv=xV zcOcF?<8m1SOpGV^)1QI%C5y}|`XH{8CcmFOfx-LBMCo1>oFtjG$;1hCaD4;S z&gkS5fU^gqG{Ne&0MP)0{`oM23j7!tGvw;MtHqb8^GZ@LBfFNZB z7;_jQPBh*FZSefJ5U>EV91O5OEW-Au*nW#s6UaN{0#@St{tZ^f;92nmj0DHS#`i;M zmV<``pW6_XFvsy`DJ>+iu)gkn!tnVXb}59?K7yH`%4?4E6H0%c080%3V!5P)-O{-F#yErqCql$C-eY83RQe9M4L+tHB^Kriyg!kI-40rEDwc5XU!_7Qyyi z`$ueS&kXO)5#D<^{}GTC7?1FrP{4apWBFx2-cto8L;5J7%^*T#9#i(?f2m=qf)Px8 zW2$=k8VUayY@~k&QV>T+IQSCx%M~8A-!ynubn;FAU&jHFWKEps!Q<{lC$s9hF8=bz zG2fxRhm(R1TdJx$Fj(M-ZR<`t&V@K2g8${k&K>WIfl)u-)tgu=bA|1vAZ_8x!9~IX z5fWyI%tU8P*Hi3rAcAM$r!U0 zbuDoOynr!*r6Bpgs6XoJm!1P>Nyjja|5E<}o}^?gq?}v^gTY`wc<=`LZXgFQgTeS~ zuo*m`2K9g7p&mRf2XC_nd!XBc*TKuj;NlI~m;NJ-Ufe&=V35bdai-jyk&bHjTE#>t~}o` zNdlsrDyI1uutjUZ6QQ|{BLbra$C%qv)}Z!yEkD*Z@Xt3h*Qr3*b3lE6f)OpRw>U-{@ks>XEGWw^w%QYHEQH&I&t)i$2 zXsH4k&~yVdeOoJ7OcgAsB5if9p{#k|$qbpoWob&;3_YOq(U6OeR7*tMUo=W)l7 z#0V4q!8hF*_)1@O{fD)H%3m2qR6&1K<2V~Nh(k(=SrkFeEg=!96z2@cr}9mb))Y3_ zw?%~WO48N*O+lON(L;d@aKvj!d4RKUA6 z#AWJbDQ>1rK#VY^qUy#q;hL*pVKpYmQ_bs>)pm2T!0|Gp`dpK&t_6RayX{C>v~3_y zPTmGqp2v*-c~#d!^v~P2Az}AC^E_tt&$YT1S@+y7+J=(u`Ofp0(>7nE{mZWZ^-?H7>&qOmRF6TvKm^a*N{F3@suF`jd##byJTe zlzytBtvCFDb+NieYgs!|yl5MU0QnO*X#64}H;O!4%ANjJAIf}qaW4Yna;0kT6pjaqcUfZ!i`dt}+ zv}K zmdIOh-H|Aj-fccF3hCzMzDy0(UKFTNPxs+7wk^n~bfjiQi?piDHlQ{52i#Zsxv~_P zVd@nyL0J=j|9OUEn;dY@x|JoxLXl|h^!rkY337iR9r^nkZT;hQ?tk(Y!kG=}~U4Kk>a$A=xAua&684tg z&*Jap_&=*}&*`Jpjm-&`VwgwPkgOIICX48)@qs1CUq#!hX;J^Rtgwd8-^zxxRnzv< z#r}VE>!rf6`B(k-`?!}% zaS*4wI{t^&F!G&$xK_IL!TE=qMpO|N$Q7L`k_7pxlOaPR4L|W-cJkvN*AMoEAHp}> zUtq#7a>FB#VG-C}-MH_}-+ubnwhb87$vb~B1);$4;wY0$+dA)P(E|%*!l~Ck$F_SX zQ21^X;d)7A_}s~@C}$Hh6tQ=Lkz0&ZRQ!wQ_-)J|@eE_Uk8!S}>RdCe>cf*WdlVe2dqjFW+4!effXx zI_=AM*I8Yo-h%n~?bP%9?u>!`UqtN%bv~jiSJgG+c0#Uw`k*bG%;l<@@Wte1F}S@2^{I2SAAH z*pk>U0<0U;_pTj47?xWX*e`Z1SNDJYPr7n--+!|!x9R&&`f_^T|GwN8($I|7KYYUm zl;0-qGXZ=zi;(Wrsb&nzxBVK$cRrQCd9rwj_EAKtO; zXj?ngtm6ZE*QV%G38Wjl?8`qBie+C>ri{PKK6@S`KV=oR<-6>zYrB5GW*&dKx{FUG zy1wFB4IbQQLa@A~efgU4leeLytNT`#bFBNPEVeoRF3Y-l+q-e(3D!NhjZ2R0HYV3` zp>|*XIwo9V-QQ&kYVx*6QF770JqkRNLB%fv2z zWXDJY)K#^p^AL>e4{r)?d)|MTHUL{%x1G-aqZVL`Q?j9eaiZ1j{y=iH19kU~IMsHq zNaaA=j|Sk3+`L!6N$}iL+37JukCl+-A?}}rSv;b@%8Ese3l>l2g-(I;g?BA$mYX`t zNMV}kr^G-a#stsz^Xq%S!WHUp7Mo;C31cAjk58}%`3Bg2!zi%mLKc6*0v7&9m4GSQ zr7Zlv)5G#PYCgXPgD=QCxZ`KAYcvjP@bNr&KznEw4<2TdhyJ-c8azzsS=tO$nt|-z zzy6hwNsNBN97tHS(;MIJ2`X``pXU_)e$PMm2Cxb)Kjj(FikLsoO<)`~#J79vFV6#b z6GZ;pPr(qzBf^1N6UcvnJc7+(Eyxj#%Uhf=Dj`+yj(T5Tg2Fea0?ugn@lo$>;7k(+ zRsxwK?_g!Hks!hJA?@xH*hsR+|2?damHj={f&Mprx6Au|_g#tK^{{*@$S0N)0>8sx z1>^vh2Co92GnnJODN!FD;QboQGY%U?YOn;>o$xI^MDx~i*j{6pa(tE3hPbr`(u1B!|zkT_b>SW2H&%~_IzI3 z)wO4f_m=3}+0#>XZBLB!7llc`hIP(-i~hia$UO^gXH!eApUaI(}^u0!+SabPmv3Lr=ugg z>+c2I)3;NBhx21|SSk3zeq(rL&!@1Sjt_icJ((i=D<4x`{Yc;adZ_Q|!&792d?miw z^#EX?`$(Vv_>Xt{1VL`wN82a3MRD9us6^J<`4@jpkz`r76ge=3g_4eRy8WypmHOJn zj&BrbFLVG7hCH071xN;o0?dIHzypCxD2b^( z@Z%=-FI3ow5+X@FY)mZ36vrCjsav{jNvs(h$f`vZQbPC*uv7`bV z28Dk>if1rX*f_)vF;bW^{HO^cg^@Bc2s9W8V+SH56foypMW|vv2#pw$&9HGy&=x$u zH%WQ|&SE>{`x$3}BwD zt+|$pm7gw##mZuIVU;woN;4T9j#zfSB$t2nqWRj$K&-9ONbKPKTIWT?C0y;(WH6Y3 zq5kWC{V)I9t~$M~&;R*9oll>y=l1>eKmC{g=l|SQ$MgTOTW7ET_x0+nIJE!fuH)C` z|K+cX(*>1JxS~c5I?NKw=sXbf_x0mNn zYeODi-)pO048xbp@g=3&L$S9Uf2ukAI2g=0&!bsat;`j<|fP z!uR9DE}o3P_6KqDH|~1YjJM9=snw68)5ml?-M&m}!+m6y}jPLMe?xp^JKBDW@PojJ@YVCn{EBBHHYZ*JQ$tNht2G1 zMhETg^XPsZHkYk*8sjjZy(fPkZKfVRzmAJ?_`DiCLgSHK1NzAP&0_VbySwRBF0Iew z?ji@t{(X4h8KdmO%>^ou!;OJA1B;Y1#n%l3aXnCv!-bUAq> z3-;;eM`sCx(I6gwtzR^;eH7;^cnz1ZZ~zi|ClhPpzrBCIZOSKa^%id`(`~a5KEJ-sd+VeYqrld8$H3zauOfI{ey!}` z^uZdxUq+t^SYqXHdL&=RiO-45E^oS`+_QI~;-j~x$>s>bYDD<+xY}!ca(bC*U)$i3 z9goR}T{X+;!p(N($M*4a;*sg_$x{)Tyy!DR4y)x(@h~_t5o3Q``Qv($E$1}#dCPd{ zY~rw8oEHA1Y~mxGn&V;_4~l5^5cor**}a&{-OJPLb!?Ab>cLj4<@WjA&%REkc^Ep! z(${8Br}C}H4gU1xZwwOMT`^yBeWJe%sU`x?al z_;dDh&XlM=28=)Ip(yfgo7>IdoqB0J(cZLNk(VqIupWOs$)_gdBI3L&ZdcUjLdhoh`3+Z3+>fH}j@<8QjwlaY~ypBY+ z{O}@ZTJQM(Qgjzd5&}^aMHj>Xw;}EnZZq87S$#(@BY(zyXN`}p;Ete8I1RqNiULgp zd{}?%P^q>b@4n>*qV-cIgL@(!#gRFb9#0qZM>+zT3w_*)X9peaWiNTK$VT8iE#mZ% zwiA7dDgJPk(k2iY5;8K|mw}av&>N_qC^1@DX*unJUAGTuZkQ8mo4%c?h<0lO(Lrjz zPfyJINg%C!-mhu;ok0Bj&Tml(|C37GdjNkjqlpt+vh&+xGBcIlgp!Ze@Q2{Yz8$*# zWp5~%9hqLw*w9eaKZUV3DY?HO(ta+ZrYmf`>!RXHozx?) z4j3o6!&#Tzr2kkuiR809_zTixQ(hhOcT+`l#*`pZYECrHnHib> z6U5qEFCj>PSk{^u^Fz0M5^jAzLn(hShWq+qn1HUHYC#-a7g1%z!MBH$c&1Vo2RU^0 z+!8PMr!Y?-3@Ix@YVxV{@tU2J+q}RMRP9s<{dq9!%boMp5UI4Z>B$A(xy-lVV=`RL zXCtm##%?Ye(cLp`&Y_IHA7HB|cc;jIbMsLmH^W>ojh{;DY4p{@V>KQ1r9Xd~Ek=T_ zF)*fZr{n|y6y?6^*cI|ZR~MU6Ic2RdSYY*e2xwC7USShaPt&?J9!!l+@@_s zw}&`Db87ur>N7vL$v$c5Zo$G{NPxha6>8n-M{!M^Re=}O7v2-31lfN{zY}pA)@AZB zD~|EKX_-flmO^Tp0BPSQ`6kBzoMws@=Jkmxwl{}+?m7E;FAGzygr2RzsCXt|ay=>M zyCK_~(UpW97KfkLZStyXYC-y?{Nqy?d%NxT&V&%KiW({K)58D+@vnuh8(|ZJ-V{qF)e?Sv_<-c=a1Q392U_|0C9eey z^VB#A$}JZw_4P68EcNmzM2IYXbyNO^I>@CGf(=MLAE6K!S1LT4TMYgUM<(NcSax@@ zjF+lxGe)HJrsKQ}*B7>eQb!&d?O|etc}An#oBMGZVEQR&k&%Ba#=RV+EWLH`YSCa} ztW~O8*c3$`=XsvCm5U!`0z;+6i}ws#PABhvxMhplAtitOkS8)cNeP}kLl{sx5mw9v zg(v5cJW>8olA_7By1}$B`jGn`XWfYY>h3aEaaKq!YB>E(lNi-aq|&vsNM5db3;@uwjjtE*os|>x+T}SEviSF!j2g7oAi2JY{() z?LkEKB-+~5JtV+7gKEvReJXYxiJNL|be_!PEV;h=X=X%+i4Xl`s2LGkO_fE67gc@T ztZc*OlMPCQzU;v_@6cT;(}3&1yktsKhegwGTgTbtF%*9m!xbstz!<1(D3ds*!d07x z#?iPseuvL-+jc$!xc7N83<}u`%wrP#eswSL8e^*wnRwF%^M5XXew%i4wYM$dexBPH z?Jn+Pcd|FNa7bF8mm_89?YGd^%TBuhzjp9a<*n#?4_>>gHrjPx1UB82jkoDnr1hcZ zKbO}V-Nb*ZC+YaiW$bkfU0eId%p|sG97brLc-MZsmr!XADdrX1xTvf{LQDrqA>B~( z2f~xg=5w8E1Ghb^KLH#Yb*`*A;49b0vgh&atnSHiGs+?EBPxjJPsb(RcEHd{n5@XTXO=blm;2%#d5(TerFTWc7cr$RiuzP=Aji^dg#-%F|xv3}%ZzN3d7EgAx@8m|U zy(PbOjL}KUy&|&`7-$WG1z8&L%pVATTz?n8{(Lu6m~*gM>9>H`(1W1?9t<|?`iuMZ z{!(nH4*04~?1bZaj$0=4`amf03Oyt@{3boE=A?v-ji!7UnWU&$X>|kwAR^Hfqg{U= z_%79e^iI)~$^-C=18a-Y+2WZ7Jk9|PTR_w_rd9Xehmf!85sTLw+2so;R9ceKaG%$q`kbz= zbA1d~x&y~?`S*H;CxsHezjuYh=lvkDFk(rZwBt^9A5z9z-`AnqNsLE`&fS0PBY>>* zo~$x7I%Wuc1v~!+$#`!H9&oc(4$(pjPF)w>GgyCZO=IpVy9Pv)llGX%RJIy~17}p# zHxNfX)+@}rbG@NaIfP&I`M2>9CwpvF3=5AGKR2p!lZ0OJ`T|Y^aCe^xt15jXP2K`L zuVO_a%d#s;j5r5CEwtkEh0cHQ+|SJHXfVxrv2NQ5(pfTv9l+=|?mo^2dqF6RH04n7 zjVr2g_e8o8VdIS2KUv4>z83#k^RcTS0@@P3C5Y9W#0EN9IcV~^{8%|>_^BG<;*0x}-Dnn2Ex~5>ur>K%&K7@8>>F070=dTbiE4zz;B1Sk4%2dfhq4t_EzR2`>x!?vAaQjJyxP!mY zVjYe|g>t3x?#txj`&%yWMZYL}!`c(B0fa#!k>l(3HJ!Jf*`pE$rAzhJ;AxL+mW^s2 zi7Lg<__p&&+YdOgS}5dgx{5)y@f#7$6)Wo_>J{SV_5%> zJQdWTvxRBB6-H{H`*ZaP^~cOO9uozD2%Wn)HOw7_d4lhRMdRWFA6c}}v`k9S)lZU4 zW}^wtcS^bGED`}u#+=cxiyEQhp30@5le z!U=wE@*AlmPdt~kUa0r1Ksk2$YAD54#Gql)kAHm;;dZ+7z`RsSyVub63m=3+b3`LloFLj0X1wsXxz%Ot^teK-#xoL|Q5(z759ew%fXRQSyVubaq)l zZgAK^h#X*Z^V?;~4iZpN-dB-=6f|;tevCX<>G+uDyx@PYF(>(Tsir7Q2?w@P(&d#N z^Oz!Cn_v6*f$wa|0}TaS+zuPz8_slcwY?2K2BS_G!)8W@0G z1i0NaTIZt@F|D;5XOQwd+bDsN!@hdJ!5_xRp56`8vc#WxoM0?sc)PrfL-KV4OA6ys zBGDnvcVc00Y*5#?Jr<3L)8WjtR>L(Dzg0bd!GM3E_7IM7QrDaQxtH4+A7RUyh>Ks+ zeoEirDG_l>+j46!F47M_b%pZ<#8MjlT!0rbxDAJW@(~Sf4;>k9MnNzdq=A4`QJ^gK z_@f8Ud_>zK%TkDNo#c0ALy=C94G`JnpAWIE;+rWY6`qKj;@Kl!EEPfJUgME?JYQ2h zX%BymV!xM#r*n#V>t7stydU_RFOI4nJ2>jh_B?}r-CGT z*Im-NXkJUdg3r&ziIi($i{t1{LpFb{1w@mhEKPdU(zj3?=c`?bS;pCr2`)N}YX_^YGep9Kn-YN+VRRStoi;GkGkyi~b0;`q3fa3yp^0p6Nu(=3S7O^Vi zrC&TBq!F8+-THjo;2$C?)$SQ1DsHp8GN1t+rt(!Wwm37s(H0c>;--PVd4XJW{gweaMiGT>H&IzKRnjexfo08f*HS3l+aMs)j6;k@dF<^}PoUW2> zX`FnUjWnER993TW(R&_ECV)!;BSdYr$2$h~b~TTX0eoPaHca8}OB_v>d)ah0AFx~P zL3|JF2L7yWW{peqHRWRr?hSvfn?A9nzKJNHS_0iibFx%NVFJTFN&^*gpg(YnpE7S>znrAi?Ov z<+m`QPa&sx$LavJw)-8ASXKPV9f$n9lF_C%-6VL7%i=}G!I>Lh6#0Kp?{dI?Q62Cp z0mKz79hHwP@@4OqsswQZ-o)Mb3F)EO1s6GFDLUMJ^2*R#Jgd`F*tljnvq@q4f$@%9 z%$Uj9S&$jhIdpW%$oTf_Pi=CXHKoXilxxFK$Wy+u;p{}vjfE1oa)xSOo?b1`uUyi_ zzmW$l$wxqR`-r1Q^F4pseAT!f#c}`mXvy{(-%*Whf3N6drPUPXbDZE=yli95^>26N z3RSbtbntVv7OB;s=|`~0og};7ylC%Plk~>*|F+YW*KL_Z9(ZKP$B?SL+Ao*&$ETIS zD_2Yhu);{8p~Y18Ne(^d8%UL2@?~5Yc7*#eoQyxjk@4T`UWVC2c>>s~h^KELrc^VbM zu<)FS*S{`V>M{ViXTn?7ZP3i zT{w&ntVGx|rUHM$bl~Nv?daB@X8v$7!F-FT5Wlt&o9l{V>U5Zm|B@EF;oY}c3+KfZ zaRJo3)nVWC=Pq2@NF6Ck#ApugwiWH`A5%uI5C|t6X)Xvo+q#EMeV@ok^et{Nc!(HAG#Y0OA|QB zb)hLl7;W;5u0}eSJrWtAU50Zd(}M5scpN?IhpXhfdZtvFbanBeY<%*BWeV75RT*025Y!+W5a(ja!87nhE{lewJM-SdSPQ%fKL4- zrYQK+y6;B<ipt%OYw3YKyJNR2z}o`%ecLNEN=_bC+dzFFtZ^giTbpx`|P zMpabHGlHq_`ugMybM1b(Zk-ofA!xDu)BlHhemm*_&=#1J=NXz7z^!`@~Tu%;atco%#8I#A`Q)7>owZ zvOr1=Oe5)S<*U(Cw@^Ffe(0u}9Ec<2z!nwnYTKp*ZY9{5bb3?o>R1x+v8yuv8BT;e-?sy^?=>8{qZ#@s%ju#;4Si%{3$9sRRJj&)lM}x@A@<~{L z`?{cgJ*dz9^*2lUkF`CPN;s9_OuQ0&Zq3$_ zI_-3=VwwhrF33PnCbAB6jEh62Q$id7u*i>nJIo8yY@}Gkd7=by5eTM#q1-))bb%B< zjUAqd+z?sx=1qTGxUYvytT2WKg2lm@qs-if1};2YdiVSc;S;Ao$uXVol^>156%<9P znT#JZyy&Doky@RfYrofeAk_TIH?WFRG~mwDyK}7)ch%X5S}pC9wCEr%UH{V-EtPX^ zrx38Gz1dBnuXG+AI%A2>kp<8tQco9^1_~n#k*OMvVGn<+h%<{xOQ5vpz{P8(%yLyn z!nT?VV^0uPolBl|ODFZ@HJ@}5LwnT?q^LRk{{Tc>~7 zVZr_PhCV+b;B|N7PuAxCMXwM(OJw2U4;CNVeTOQyS8EDSy(71b45)2uSo6-Ex6U_7 zY^^p2?R*a-H4CCVjg8SR?Vg-Z=Q}S%i)yXiVH|}tz^)Qrh9*6LNE#2i(PH$`8+yM0 z?H>PPjJl9S30`}cF20g9w4_O`20(v03U~y}V9$3W-7F*E;U#lP;_5e7!^;6Qa^ob3 z3NTM}#YxN!%DBTRgL0=ALd-_;CPc85!J~}xoMHBurQyRgT_F-eqQ=%oH|G~BQvmxU z$hK4UCc%pg4vqTgE*ASrbtX44o^HB>#;I6NBZ=;a(26%sE5V7h8 zxh?}~yVf2=9_jfyq?jVqWZ}`UNr?pp^aHQPUj?J7>Zx0HkuT%uaBS-wcu<}BxML6~ zj2^Afge8eQW+@?ZLv1==H2h)#kLm$%6Ix^XlK}>DD6NwyY{}ruFbLXMyopIG*8aL+ z|Hj?JC^t5H?|S3e3Dp-Y(AIz2Zgsz8!xbLievmU8oJn98@W;?(m}QfQ`W#pdZ&b`L#Bj@LXShg#|Ynn45RT^I)@km;*fv(Oe>_ZNErw4 zsC3#&BZyif=q9ls0ta_{K_G#^(Zt^X-8fZ829&0I6y9Uz8E3&KR6UX``quzKK)$~c zxqr-w6{k+?yH^0XIpLRsAH;~>jmW^XE-9cv8$IHRq+`l|2*IkUc3b4`PClBrtDOj8 zfm~MtSWqzgGpAl64CoRTha^#dmp+Z#xKjuktv%CxU_eIuek66gc5@(#h4+yZSp+&bPx`&{_)kna}<>sg#`r zXB2mRfoVLY;X!)hevK(jqZ-z|y>+@!s5F;M=!C0#J4$o83KHsX0)Lu+J@p!XK{`f? zRzmfq26NwDf){pmFJHIg)q$H3ecLKy-rA7YNq8cf=dP>3=Dk8;0bfM+G~^45=~8rF zECVUhfxoZ^l&E(C&tD}-!Jr4ve3t~8NI~c|E9g^Hs*rEyr1XjefLnSwN;sleLEY&c zhxvS%5;Q%-c3PDA9+fPA{8Dsuk8dY}`>Ad?!{TFXye69Vn06xb6LgUawnU=9(KLMRhVsG~!x=D(U70Mzuj$G zPP_Z)&5tI#;{z0IV(T5rEThFhBBPKKUIbP8cpoEel{~v|KDr8= zEmyxj2%I9hf-dbHz|R{Bcg*{_vXPa)WKp`b1xI28-aKAj4Jljzo`g#n;zOe}&NE$1 z=i!XB3`5hY@Z~4v)_+3(0}uRoDoOUx9OhUpOJ3@b+&6`P@C7v6t}NMY%}i9?zxDy%y`J1gnSV)p6x4ef5(Rg+9|(J) zJVxh)4mO#d^{KjdF)HEdQ2BiP`rRN#!pXIrq1&}6E!CRzgePvgq;3Zs!RX5=k?6fK zs?`6=t+K^`-vz>wFE^|PdAtt{V{!hZb&G!0>yF;oa~n zCDb)fMAM(T2E*1pXfkCg#yU!no*zHC%#R-NG*(7kO?YQFqz54@=*sg3*y~}SdY>95l;P%WH3r^S<5L*LSEj!#=Dt?0Va43X~(u2+FWbun}A)2X`>x(>Duq}5y~6auh*2PVx)e5s;p&vEkOH=X!w z4e=U(yRcs!9&e*!Y{ThOBZS_Js~W5tkhTjHblMW7Ul;q)&!oj|>wtrN{JemBH_-L; z%No$iQ-EoXb^0T``}A7^MPvby)0*t=8u4)O!@^%H!<+cJbj|YPQ$R3~tvC0wi*`jSvi}Q>n)&@wXcr>ca3yq{)Djb> zL`;e(`2wnc3U9!wMN#iOYtNszaNTSef%fxSN^dr}V@hZ!H3RA7qEv>++{jc#!Zb6r$7D^$ zs~P`!=|)b!XYks}cX>0dy;?sLZEX;f7L1r+ z3Dkhz@hhv4SSmKF?k=0+giCN3>Q*aC!{w~)hz=6Ega&Q8l-138dbr2Q5J7D~3)oD=TdoR=tNf4TNLg)B9}-qhKKUr4 zM19ek=k}qZqaQJ%=&P`_2>V2TmemV9%+yIjzn}WV6$QdbFv`24=d~#LP8Xh<>k95J z7n1Wc<2j#szFO1vBf0Yv!O6*pnJ3^rx!|=OgQipG+#D=-@A|QUv*4JZW%AAfsv^i@ zBU{%SK@4UuO+2q}MH0}+57H`s{(&%?5T9-ax-WX$*t#g>zBzIOBl4?%Kub!dkZ6qt zWKpzK-d@Ln^g%)B(p`<*a6fhCJF7{S^;U{co|WB4n43beFj*&2)v+hb7^@!4TgIzo zwiAA`$s2$g9U5MyS66iRZ6;Twd07IUNB1LPMI64gp#sK8( z`rGloXD#2*dj2ci9}l>HXrU%6X%E*Zbt1Mr^bhFA#ktQ3}jn<~EBH3QEle z)HBSx{JER#eV7~be5IkG+R$aquzC$odHo1|^0{_bNYvpqWSe&i71bxK+duJ(`#Le` zfd7!kJ}65oM~|+3EI;(Ps2Z+j!2hMwN@hgdLT>O#x@XHwnMsQsD*#@r6`pHp3bwEpmsF;{ma% zaD=!g(4MD6xjy`u%(a(?{|49BtToMtI}>ovz*bi(quqH6*4Hg>qKZ7ASDZ!Phu0l%j= ztvW{NC8$KVp*?5+qFhD=h^UclO3T~p5W4+lYefc@f9e^3+oMZJ-5m_OJ>{8R_D;V3 zWGXMDaUsR#3jse@x+#=T%BX|n{mARA>D}?|AVe7MMHxj^jBahsdn$HnPi>YeHb4oj za_KHRc+j7w?!47e=kkr;fnmss?Qm12BHl8AaTZpIcoOh)};8B(SzRtFK62fbz+ zlv&X188b0euK?*2FH0*Da2?@+gsCM9;t9a-lFhBg=lbSG{E7K!DzZ73r&wUA2nU-v zf(-~92x%bE*s6QbtC<@)Poq^(CK2G|NBR(l$DSyE@Wm3(#US?!bISMGS8CV4ex~G? ziwP=|zNjtXi){UW4nC{hyvXThoXFCYO%}v%MDK`wtIqm95D`g(Vm&~ZLt-{S6sAEc zj~vPD7m7!Puh?yQU+nILkO+5O^d8FH?uhmQM~zz7T0qzjW=oy0woR#F(jCSp~rwVz@- z(}@n8J;5h(P{mK%uJO-0P~LP8GIxiEMxnZ4;-?MR{oF^2jq%ELckQ^AuDjOsD|CYt zn0@AB!{T05UcMHaJ7;-^ zb*m21HO$&+#Kok~iVf4yAE9?q;PSD5vsw2b#H7+-Zk5NBPUjvjB>U~4F|uk_b^PMn zRCL$D@@nZQNT)DF9fZ)k&A(=GF*aMgG4um=7A5Q0lBKKqRTjv}w|bINE#E5cLG@_R zBS+pdQ2SSPt*94z-RgtGXu*i5JX0cXbyhd1rR*9I(Teh5hXt-g>E5A2mMC6-?0Tm> z?^A>2%F-@R6C~EELaIhbo7?X6Jjvp;b@VWQO(S6Eo*EZC&*xAB<%`Wt{TxH4Cs6Hix<8|6sa&to@VucN@m*K*Bss474pFzrv@Ut4N?B+aErcI`#vJb(u99;MLH&-=8{ zD|4|>F1GN@0aLr3%g_AM(1e^ROWyQ%lzPb-dJsh~^v{Rym!^Q-UMHP@>`Kh78yv+6 z3Vx`h7~#zFh7(t{`sbCvCpY$79^G8>YzG)KL$GDvJL`#Rg6|V^P60Oyv0+64W1GE%WAoK)4rqi72B@e_>Gr}0G<|nNd z6N`Mfs3Vy!!{1JSzdFqx8ulI100qZ!(N1Kk*6?Y%CfCvTxXI6w zcstv_6$6y`n^N|=Ie|)X=v#KAUnsSmmE#(GB+<3y@-5wz$1i@(RA>Z8!at=w=(>f! zU+un($ZeI%`iT~Qe($LI@FL72Qmx1G#OLCMt?;XUNjG+SiX)t0EW?A@M!oqbu0t2Q z?sEx&bk2LvDRdop6y;%JM4C~fWp{5xakx!mv>$BuoO(s4gTRvKP(FjR(OlEJO~!hw ziGQU8Is+0{+!r?t=h|(u?h=d_BhnhBIYP;BL_M@cYF(CpSqizp&-_~b=R~4B=T^{a zZa)${>v5Ejj5bOXID5VTiQV`vSvK2s0#JbyDwc%KMcp7iPF$8?l_GpdqoEdsR~a9W zc{_cazwXN-+SKd%@*^9l0_ry6m;-$Eq6GEet+q8c9*cdBAFCI?5Vi`>P{yG|5K=ii zL--k(KaSsjU5(qt&EiKm@WOY`eIKo`+eo}J(66r*F_!gXLtc=7!SWykR*8g9g6uE}FwFeAvpkX2&A4myitF6FN^t@h z7*vTeFUp)kug9zkJ>O}8-(V1*r!{8M@`yfvk2V_GE0z>e5E>$>4^yl+tlf;PTKcu{ zN~>ZOWh2LHP%V}dDu3rs0i}fK{asrpP?*%CsAt&uK%oz_K+ZOQmx!&K(EV&zIGo@P zusuya4dZ(Z>p~V9qH06N-(PR|$;CfQISt*XN-147!Tc?zK@xh&T$Y%wrjZdlZV1MI z3$(c4A;0)_nKWhAyc^7u%1h(7gEQpa##gsjip^uJ5nIF%ot6bHvPB87nYj64Zad2k^`HUXMK%XC!L4=cd8^d#tLs%_2aLf4?T^L1tn-1mx0fR# z9aV!EBZT4>Rfx4;9%NpT$*J=^G^{ayV!VBZO1qGv`FJ=LEimI-*dA8zj7wQpOY2h% zkXHV?8O|%5SaJm-J{~R1v3X;Q)ToNZ^%FSNMPyh*C~gXPIIf>OM^6xBHrt1!%2QvH zpVllnf?e%B;&Sr879tO9$2r#5Tlvv8lJtmGW^BKnP-+g$_Tt)qY0vakm)%? zRYApRwVXV>phbXlhviH8$JTl5Yzs7r^S&gT`w?HAaTvN4usEAhTK0*TB---jVN zT)NZLMK4jnRuYZD`$O>5)HX?fJtJgz5pAJHGxv!iBN$mHSNyrjo6c*vvbqi**m-~Z zLqErh?9UG$UgRY-OoVkab;Xrd{<6Gp1 zXMQpNE1|HQWd6~@OC`f}@e~EC-GfPFFZs>;b4EIPeZG!j#P80Gb@c^*ZlNJp&q+*i zL%!c4*RMLxIU_V&v;z|$6Gw&G%O;%nuZEAuF3?li(zAV zpBY|Goi^&<^e#yqfQjqEa&0EQ4<}8NKShs-&4f!ds58kQxj<)=^Gz;hh?oj^I&CKS zYU{rBJDj;bZH~Tq*`2k2dvnIkb7v)iPVQbQR#G^q+`3E_`Ln%-JqE}=1rb0g!(bHK z(FJ7M@%*PgD&U+DtR{iaD2^faB8M{ViTQZMMSEMtxawB97mc z_6z8%WWIWJ z(N0Wmmlzhx?5@Be8BHdVkbt8VUtAH^pQKs5A$A2;V9@@5=B1l|9O7jZWQst8=xK_~ zW~X()=RXoM4k+JQHdJ^%9F}sirSQMkct97>UgQ@L1d28w%n8#^fJ z4`wk@XGAsH$p!qi8#R1a^QeiaDXf+XvGdjoDx|+jCxzn6PW!6_*^R5W?))BBx#sfblFnK(O-mD*qMKv<2mTP z@FNSk(D`#MCRYrXbM(xgssgBy4rIGMCV8CVu#__k(=vJ z)mlD|YF|JXNxwElsx-KBo{s_lSkAwDyD*l2O`!4jIa3y3KpygSrVeGSFkW1YXkvR< z6%MU+**uNi105raPwu?4v=iAca}Ui}YuRsDE> zADLv0xD~_kP06;g6!t_bbe=Jj(x}e{f z+$~x?4X@u{>75Um7$#t#XygxOrIqHX6N1DIC>aUA6rO&qFk81gcitao)s-s}323wC zCP%%B(wc4d&)F>n4e|la5Z79t)oId$khaDxCan;z*s7CyLDC0ac&ZXff+5U*M9`%_ z%)jB*5n(Yvz$$>De%i6iUlD+~(*p&quICGc21cuPb}`xw||4WY((w?s?+zXeuXv=@Ie1ldMIH zwnp95pm1kc4TlLVg=$mza~c!$r!j{X6m6#eMp+s;U)PplD1BEimuu6Y#b;zFGsydd zaRu-~p?-Z`d8k%Aw3kVK`pZR6?AIS7LeloDJ2hO#Tul-k0{ovwt9NR0Q9_rHSV{ah zq~WQW3#SNA?Q1gX;k=uF@lAIGuCs7a3$S&R28GOiyXM!4Kpfsru`ToPCGPX@eO_|e zA8%8!gfmybZ2ibkr8%#Oa9xfHV2{|?O1f2_k2LK#cNn}0rEBO7_LpW zYuo5Ucgw!L$(}X(R;wLS6aT&K&D69h*73OAo&9M!pYv5gD3sd?IB7>6`zc1`fK4oa3W^PQ}3J~*njga{6M59Q1hsM-- zvRZ`Sdw-9nbcoM?)v#4}1WLiURxVS$Rq1TkR!ZH)L1 zHN={rCCrWq@1Cc7e8)HaHg3gdWqb1*OEJ04vxr$;m_+Y}Xd%jECLnh$DLK4s^WxTV zIy$mlU0a&BWtjw(xm~CZRknio;kf+DzHH@T6W&HPEW0xaYlGCv{18!JEN!dpC{UZ& zd)tNqiUiPqzVnq&0AnvU9uXHfAMJBLW$s029!U6B?YIH)Nh)_aDdOYxk@Bx}W#Wqs z;is6qCA?Q(Dn`9><5{cU> z)>7JiCxNgs_7)x)qiW4HO}e}76W4M7`~2x`dBRqIF(q!S;iia7&rg!zmY4C2MT7)I zzH)^valPvO69~^fdnKBtgfnOz%mV?`l!#KKbGI1@B|6!z#F+dij>c8zTH`oBq!U!A zrCisdmc#H6xJv~Lt%X-But0|L~Q~;^=+8m_oBQII5HfA7klh?4nG4 zW!#*9ZxD|oqp`FXMDT7Fx-da@_z z^*xbDE=)jUsP3b@#=^!cNY_wJ@EaGZ=tFFO$vB{)8LM96*}U>4mmsI^+ZuFAo3CeBMiNBd{X~V8Ei*d+8{i;5CdmhQN(8`by zZLkwfEeW-Xo_(MUw>4EIA8Qm|tJT}Z;%Y^ zn+VWp`21$45j;WFoE{fyUz%+ZbNhKsnIfw;WWa=V}O{ zDfvLtB)m-lLpIlO%V-ejt@7uLVMJG9%=gDAnstVdgQIZE+X<(}mn3r( zV9Q)-v>Oai6=aVX5Yt?P2Pl8z2m+A;fzemCXGHRX+q0hJUy~5$%A47U|RNCF{=x9g_W z@;B@JOeys=#lnv1cUL~G(_l5C%D0Fo;tVtm`@{5*+d~+o>1irwPLN{?kyju z=Y9FXuGpY%j0z<;*r2t|CYV}(Yr=5B5eBgA&8n%0jvWdU`F{ZH|HeH2ha~=A|NFoG ze^Bv%=M+2tk5l~phg1CC|Fh5cKZIi2f2lwJ3j0saAR7PF)lS-Ktz;f?^cPQ>|C*4$ zw0x(T5TA|F1o|U!yT!j;|M|4TT1_P{8FQ&P7>4DVzewAkbOkfwmZU3xsc3TaUw;07 zWWN9U{}SZElYjC5tPRk`Fzkd%4W(VloP^>Z5E`8R86x6;sl-UaAz?Eoq41}0_~&Jl z|9N?Ta#!nkD#1|6FpN$Ay}Vfcweaj4Srxu#i20WqF#H?&Z%{$$igZp#xGCkoG4Ox! zl>eI~Yuu!ejpC8~7{+aX{^CjhI^_cpRm^i|s0lF)s~`WQG=C~&DWY4(@lf_n{(ZlH zBQfLOLsy5p)}+#0jq(m-2^!t4)!cUQS21p%5Lu9eaKrJqjE`r^lFIm5;mrzx$Y-u#%ck;Wy0D+elokW&+H;U{2k?@Pfa^+UO&@ z(KIUb<0y+UfS>h7-@QZy@JCm%qx+QK?OWCBV(Y8-Qq~a##_v^u=+^Mp-M;F^_!3o) z^kZA#lw68xmDYrRg=2ib)Y#6$V3k%_&Z}t9aZ2Iv%J_ z*OmyuS>m#aV>=8Y!UH_|i$IG(6D1lSa3u-2H8TV}Gq(UuW1vt;QcGp`0gI zUaYM~-+92XCpdHS04HZQ$P$y_pk-JxV6E}aBIlDb zz$oLxtL&D}gOns}(&X>&*Gx@h*> zj`{eQyh@LMw%5}YhXVQtqpR*4_?aZ7ntYx!%a5E4m7Gk`g|a+pX~JS@8lrOeVlU~V zT1)2J*fnj5NJ-l4P7YL?*mE8eRaL?_^kVJE=v$v0W*B377Jy~{n>pzQd5i; z`-kld(xZy6p1hQNDNrl-#x}ZA6QKEJY1AWTR?K7ng-53-1a+=$_Qc<2Ey`urN9CBnwcMVQqJKNH# zaGwrl9xAZw z1aUxdok|Z>htzccL!ud%RyGQ_cCHrjazMOANg1Zjup)fzU$rCXD^uqkYbbkmfs|4TjHiL)= zy+f|GpMKIcN$tGh0%`?Ubh!`#6eEtMP`r0^vQ}Pkd`VXI2E=K{@7II1R6Ppz7Pc1GJCq?mpR4|Xq5zz#Lgszdv<7qvu?HZVQHg>CvUxMuFUkDa3 zLZ)8G;ZD-Vd^vUC-cp8-87OpG{VdwSCx-6&n*-=vqUhQWP%d-Vf0Xq$jL}sBJ)XCK zN+%La5suAe~*Jr4qWoLha!IO*U3ipz{+K%c|ym7yER zNkYRW30~X+)w?vF`}F7Gz5C1_4K|WqU2cr@V&U%}fyFiVbs`mGjc(P*UT-I>yYG(~NRFni?lQi2`NWpl24xwoh9(^c|MBzF{ z=_whg5g6DB-{VpIuBGx4_kblAN7(*Kk|ra~b|`wk0rbSqBGhLg`fx$Rrjw+qtc;>H z=cS`UO3>zg>Axz@cC|UNR@n}`fy!WM4S1j?2A{8*me!-2$S|a26=KrSV5bsE=(dj4 z`Klm2)hAu7%ux=1qqsBh(iZMarUDjI-Y;kShXbxQmPEe6feqhIogIRMwUZ{=ua_OY z(TZfg5F$-i=AWY`$l|>rmfH03UaM|p7tw_P$>54%>m(ov-&&YBG*SX3cG!Pa8&KZI z>BjB#Mnkwp`iy7pba%pF=nOhtXsME{xrJrv2K5_GZ#15MPMi4Om&NZ>gRb8P#SY6&N7h1u8ZLth(u;}xYIP(@GVy3|mjzrBK zj5HEaKBAO&KuAQD0axYMbP%80PzZ+2f_BX3ov1I)4bK7UihZo#?CdMs4jIF`) zC@{#`7%C7nvq@6$zhXwx6ak6#z0dA|LJj%yI*UDj$pn^DOEL1(Gdv7? z9$wjhuRa!Ys?hC^)1M3WR}QrV~2zsJcPu0qn0M4yZGZ1 z1^Fe^T+AEI^UrFG5LZ(f3xWg&a|Mz}MYx9#e@2YubAe#sP3;z4^1fquSTjo$PadN#2<(T54UzjS&Cu(>QUR2 zQ^Z-72)k!}jr3CB2p`a}1sWEAu22?`Yy6f^1Z*9D zec-X&+Xy|ZCIJb#)bvJ3j!R(4aP4#IS9T*h`Ag%`3lmE5XOPyW0OMeg={0BamWMc8 z!aD6w{-pL3X^xW!x5qfeZ`T#2RQdvx>Bj=yonnOrfM==2G8>X)-R}D(N+Jt&;3+4eE@>={mMO#bu zsdb>su;I(F5^r-8(yvvwd-iQYv3gX9(M?~vFCbIw+%KUEgJId(r^}FWEN2maeUWR8 zj%lgs*9c`iq#%ht=&~)H)LA5^DypOOxgU-DBrdLNJp`501&alb5kDgUy0gl)gjyOL`mrJR&D7lRBaI& z0nlZGjMf(0``~04w2~k`(~ko(>{>e3tfh5k<@$vZ(UGTCGX1?N9SdvMc7`;Eh z=ayf=J2kqZ%3k#0;?oe9r^!tuI1ohxim<(;@#e56V+Bg8?^o`)Sx6dh-s=vlRVa0bJiot>OMJKrS3l`eZm7Xr<6SaD&|oO zA;N}T?l*12<^};PZ2kb1sGJ$J;W5QZLi1N^oo9v~A&M7TNy2X^o0<^JW$g<3VsshT z-S6ELC>{J=XF15VjCw|70%OtijQlc8KbEqFf}@} zRC}Aheb^^en35s^4*1925g#g8$X>zN!L5Q8So2L6x-Sy%Ui&R;5rLlhBfS< z1uDdL0J*?l1OAxQqN#*X4CUVNq2#J#f))0t3gh$A+0H;|3MZIWm%E?vxp(sp4TuGY zc2D5l;z+gBLC?p3XXw$Lzkx8i9G~N?GS+X+U?L(0Mu_s`&u$$9D-t?_&m7dA_ZYN% zHxdm*!mP#ow}Qgg<|w3h+6$P8_AW4C3_JJ=g~zK>91?I(^~Ki7CHks5z53i7axn4o ztpdqn2tVSI`*(ke(@=}~JAkDi^eWz0-zYUrF{|yg+Ys`99n6M#(a=s@S4^)2P(QNE z^8oNv?@}F)_vlF9i%GlvPIEYUG5Wrpn_L^R_z1bdLoObnmopVGCPu9Q62T*!t0We-Ui-_Aygg5FiFOll?bICs zzoj7BV7=0Rbot!8ztbFWtP1*hkyKSv#Q)ZJAGBGfD6#* zDQ_nz+|vB*o1$5XwY{z?Zi%Qd-%2n^HeJDwk{>P>40brJ6w)8=au`R?l8YA|`n-wz zs|PrLjfmDsUOU|3`!PVSMsVjMSesYwUHzIQQYSK!1q?G^Q>^cxYz#0r-Xmi@`5FwPm}1R9eXG1$Jfqp-$9f=W~u%MMkc*-Cww- zX$!Lge2wyf1{Qv{d50i8F!Bd$n85AFk0t;^=lqreiW%(Pi8^w}3Vj%AVQG-@LamWj z8CO|>FjFW*n^l<#J{yJT_|%s`>-8zN3IhV90W~jtjVFo>m?_W{77`K!511W02^Xw? zx(+%pq7aX^4sD~vL_CwDI9AX4TpbL=xi>05W$wHxP`ly}g_N!tjx!lMtUUl_qPMp(l6UU~E5SXMc(ZkCXx~K#lnDbJ2N};%v3S z6dFleG|m*ZlwG`7J9FKV)`}|JRo@>{C^~21oq=c(n>LD|Ue0RuxKqF8r;u`gW1*%Q z1WMQ}s>H*@O;pnXOAV+KByE^TR5Z-FXd%sP`Frr1WZtL4v%BJ}qrjyy#yC7IUkaJvA z!f&l%t8m$7gOmNuYu6={zi1JEIFsJHLFd%hy)e^LW_Pc80W(3d6~k6e@HC>*bT3YM z*LL(CuYjOBl~RzK`>58Y&5P93H&YM@X!{U=Y;w)~vt)m(kjr&pFWeC+ds+%V>(%eE z_N|8LuB@TLeG}a;ymsu67v&vK z35!8I-(iBr2a;^3yyg6V5w|G!ZcOWpSrCJ5zyW)Oqv6Dk+B-twPZ`Mt$_yeL8%BXJ z5lXWx(l-z8TivP(LL4S3xvo**o8O-m{OF1739%mbeZXYAnrn0=x6H}sQhj0 zaE-Sk*Pd(niKLlU(iKw$%frsn{07^Uph74aWDC@Wh{0RX3dBM1#lX7ikZ9nQcutHe z_;cEZlg!v+PQ4E5?gjCZat*mL3E#eV6qTX9+Jw3Cq{h+zDrYDq?AxjA09;lT3K1nr zGL`MLSLHSxL8!8SLF)!E4k^sjYjzXvYyz0AknMbFr1+%~{q%dmu6;Glq~Gw_4feO( zXX>umJ4u(hc6B7u-Tbz+^l>-i5fgK=3%csN3CBm(3qsnFL%5&pyqg@*oJ7mnS z^xE%y%mb_pnQsHEC^cYy!!W8VFqp$Y#CJWPtC3fV(QkNv7iEoJK1*QU70h!Mf4 z+w=CLm?_=sw)WXyVf{Fkk@yVo0&);caX3xM0}A=gYa7$oxVIVm{k4bx{wP#GbX?pn z!tlv$wq3yQSV5dwq&KA*WtS1}UDs8rM4CBA@*&&eC2Z45orbR7=KPSTK>UF5my(?4 z`9dMN+L;-DoEXJP&wK@*yf=B&t`=cNJ1b`k8bu$^uOk~o4zH9egfD(fl2H~UBc#7w zf?8Lifc*~k?Uo?w`Po6LFzt-jJ9~XQhW5#Ef~KsKG}VeatDIJ42#Tcm8s~E{DR=9F zR?>X|9IL#i*L?ny;cVMm4Wv!9qW+!SzzPG(23BJG)@MBl_zWj&A=8d! z$?>8w^tN~B^24xj;bnk1;ojxvNl<{DNHgI+@`{uE43AmHLlRO7Vh9O*XGM0I zAFdc-mp=uI{A`b_2MPKPqFPCi{8~8ESMsU~`EW=A&x>8BXU;8}B{i0F~nrE=dG z9x#^mI!EvVryUG7Iif#uLjpP_t55BJ-OD&Pk9;qi!e&{wRu0~+N)>Nv11pRJ-&BJ) z^DTZQ%Kk>m3N?Af_>FY^h{vmPF62DpO7TQSZqvN5>NMX5jRcH-2Og%^CdJp}5<#|v z50)NONa}07|M70Rzw^msrb3@d`O4U5wx{<^AqSY1~o3n;m8|($NH|V%qBol3Q{n2$~!GDxze&$`8-dy0-S*cmnO-q3dFRG zo^r%QOil(%^ZKw91W8BA)hnSxWo=uc zROL%175!roh)uQ%m8ah}cyoEwUrMEySl*Swet+Er2>W zws0Bpp-drFKAvU2MYKCt4O@t#rW%;Eqo79z*idw|1H8b^q_kg!;?$bhlFyrI$8fXZ zx|mJ@lu_u*)h_s)H!=)=6wg)2|1~KnVMcv$dQRD1v1a3GN=B;uWE|Xpl4$B$Z=5G; zN$2lkGht-v--cdSe!o0HE)v>?SGz8zU6Df8%U8Wty)j!2V>$@>T~kiudf88skCf5c znrH@#jv3Y|=Lelu+_+lariPre9A4Xu=@49qL$*H5rP2$uYC?11q${=;-xipd1boV23hAgkFB zp9`@$%+%5qZ!gW=lptq#j0Z*JN@S>Dlq|euJZh}vJJiHjRp7{z#)KX_i`m+urk_=l zVnA7z>Kp`yiHCrHED=4-D61z46c=ZZ5uIbYkXT_An*-TdRcdWJp?;CDnDI9dTO3#C zDt?^xx8}^3V-AkNhHR%D$kZ!18f%`_r$q{vt|B47+8^%Dt=fFDy-hD?ckz*rWEFo% z5z~r~-a{m?#;Mmsxj1QhapjgocffH0vPK4uPt@u+YG6Wtsel0jV<(9fFNL6^?hn1$ z(JBTeYUOJ|_q+0KqUKADfC_|#P>%K-!H%l+?I;%`3$sM3jNV+sO{M_A%^S5sOAQZzmL@G6?RBnGv8BhYu6qUysVN(kgy#*V`FSxz~-SX7c7rgq-+1shZlhy9`OIqA@ zcxPkjL_5<~=cNS7RXiuxnAwxWaC>7gl3DMdanV}K$8^&JLC0pSyefD3h&{T=BqgAK zoyTn`;pP|%&Y0vsbXHk$%4x~3V%Aj=y7$>Kk7>g%FEW54LQ>m&CvNn0T<#z2y%!kv%ulMe7n{{{`h zz%P}p&8sll;vv;oYJbd1XsLfnuMB0znKTSEiIk=*Z{e-b5s#_>vopb_G`o6=Z}W_2 zR2~SYmRP?t6?4_@uk)dl6w>-(S&z#(a{`)U8(kS z$0jI4=Dbck&XGNt?7jc#C{+5cw0d{XtP068LCD%*Uas|3FuiOL?2qMsiW8S%1wB=D zA0%k6t4NZ)!Wf(~g)TG8I>sU`O11@;@SKJfjD5WmO$lrchs8x&0)bY9L;v_@RJrnh zq^3kLOt#Hvi9MTZ?^ZQvyXHsa542aH;AS`!GmE&>2tG)O<_Ps5&L4--cIs<)A&{PT z+Lv!Ko7zkDq#haQA_=%phI^O*eVE_fxU1ojzpD!I#}_4-kbfLN(l$s<#zAX_ueB!5 zv=4?{VA3`>44Wx4J3pQQ06Rd$zvfr!=dKGx+~_({{;2Y9$s3tK<)kUHvI{7+a)#^V-?k9>I@sU`U!@#T z-*q82{w0?OWBkZ)b*gEm{Xle=oX_v@e+uwSHc?9&mI2CHX|;UwzD`tUSS*irUGA>$ zYkqGo60Y7U-%@XTo*1^%JJM6tKeZwMiff0FLrfPt8ZQQf3q^!9NQR0Z9_L z_YBf6@b#VBbR8qYCl^>m*=87_PSc%53g zi=0sa?VgKcq=X-wL45&HtaX+Ze-CMD@SekdYg*Ru6{zr$vQiZ#L{d_PJ-MUA{g9&4 zim{xphZ{QQLDldJ$s^+}LCY?S<-oR2T@hHW-34WqXq+A1T8+V1+FnUmeB~jrZ|6EX z=PtMJ5~oqIl37}f!li@tMD-dShpih3sd=}FXqwwPhJ@V;8%V`3H>~}Lf53=9G{Ep? zhh1<_!KH1FK__NhZh}a?iW1;ZinVbo9%%f;dvi3Cjc>5XxmTSB)PoYR0?(M2vTz$F zSp@c2J-wTLp{W>t{GcszdVHS&j-5k~jv;xGI+C?-^n1aamVHEd;3-60C84t+axBru zNTof82P+S$tc-z3D+Go1f9ZX~MCLK}JVYgnbap;~s$9KDV51I5MUB5pK-vf_(OI-9 zX}d;}-$OmPz1V=nq^2LT>nc_;WurPEJF!bHGK_O1a(r7DAH>jY=fRHQu`dfg(1eia zrNo;A+gtB$d=C0IJWcgMO5koK-*zXEUgCFK%Qo}oe~Uh`zwiNm&SZyP zl5}86UIc6VfHzGGb)CQsN0(2E>$e2mbij!gv%E`clQ322av=ATJn-aDlY(Apuw{P7 zuN-VgdOEc~C`1ZLgRkpNW<^1QrIaT$A7jD?bQa5QNfkXggVF8Qz;=T?8wCJ8G_!>+ z@*3r9y}g#P**XAJe*ne3fSXA?u;3F^J}WY*VX*uCPP5Pra~%Vc^mx{22@32T4K@}N zABu_Seg7inn|OPO`&yiQUVTBTko-9B%A@fv@-$f2}oIPFHJDa$8O}G^QuBoh8Hki#`U0UKlk z7Q(8H+*z=8fAy85tmRteEDRlyGH&{)&P^ppP<%Zt7xf4bkw?uNUypRr z-)zg{fsg9Ou6gI;>q0JwbUu`~ejC16o@-Bvij+`Of8BXMI+s*S^5vDDf$6Pk`l&V8 z2m9x)p5WP+>i02~Bw5Z7dan`g%@12CNZ7Pr6a5uyFxLa7jch}dTfc$)XFCE5PQS)|IlYNk+ zs=jN@#ce`8Y!4&ckRwT*eSaT~#bIHg=Wd0ue}EcZJW3KwznER^)~_g%Kda&spR9F~ z;AO2MVQ=5kmtdtPortL95DM7*wY;hgdZd2Dgs>{KMS;F47d+*OyK*??TEW)T z;YOu#6x~VFnxfYeWiAi#8;8ZZN6cqVXnSeV{++Ol#nhbK=?~(Z9iz6*9aw}1=4qOs z2grC>a}?(e^GiDKf~MX~EmU#R7>yAgIbraKP#o^P zt*N;+46=JP3jTb0{{cf02!{FQ`tXs+Opi4lPuKl%m7GlzB)2>aIS`cu1*p z>-&pASv?<8D^-s_3%P&C4Ga&j#(g{B{ah{b#b4u7O?ZA2YS6A;_EkdhCN)|f^$}}jf0+HNS-&kyOH<) zYUC)DAHmFa3PTb0f|Ii6n6JZzv7a+5HcXa2`pbVM~h3k$Q)y za@h-La>(2m8l^_)CehOi90-b8IZx=pj+}XO{#w!_@)rr|M%lP$I-+lT(*>9$rn`~V zXGD!hbdo+=tQwUtdDKrhhp~jRf6rIL*iT-v&E1Hu6^}S2%s>NO`y+2uuu%bZZek&ly;TVNe1UiQdkT#C zCdEBr{R7~X-$qOQss&$KOovPb7_%`?ga}}9gbuD|4X8@8cju#1@=}t6g$yqA-bkgk zEK^kG&jYXGs6bb&Kr(#jS3;zpd}AJ-9Yzw(5ryDjf9@^#B5MXJZzm^8hqC()l)QZx z(f7sRbyLsro_+SvV3B!`(AI51?)7QlH@i!cMY=kTi|NwHvSI!VeIAOkYEZ4LbJo$; z$j51%T$CzdL}A({W=diVyHonHJSDC#EE6|Fx|jk%a*``XUHe8lpO_5pGlsA%8L2`U zQy}7_f9ccXBajN8pYNJ4BGsRq-RjVA;xLDw7P#`NnKG}h0FVCXgW<1u#$>GU^yyC2 z1W|PkN$h*tW9;*U6UwFj!uEyx@nrZpI0PGvBC`uqOlVblnPiKdUNje^9XENHL4p4Ag3V?5weCu!N)Bi^^?@ z9=bbuesk4A4R?Jsd&`NGJS$N&Mt1H5#gzHyDW#^;+aBT~mokguB(%Du+*J!c5a z8YC}?)Y}|q$+s49bpj z{I%_EYSu4lp5owbz5^fcweJ|j_x{UZHm<4cqyv5FhDYV?;?sUXgv{@Y z=Ao#Bt+*>_O>6b+%cZdCnO14*^$vt0@V)2^a3x$w#^5t3q{TP2ytUGov$AlyfBKvh zkF@413L`4De0!hUeeq_Lm*2_>F+S%{QC zrB~FHvaV-B0gq0li)#gaM+3rwoQ3li?=7KSb;5WU{WsRyj>*ap$6-RDQ!L5))D~m) z`GZZxjZidOfU;wsNzF;))#RMIe{}L}R{(^hu~__0@cq%_TYqeRc}+QuPoptRW0CB63M}DU-i?qey2V#&5ZRDpf7xL>NwPcv*2? zhilyis*aPkfxGcAAuS~be~cB!1r9XKk>nON`3fsmQr<|6L zN*hhSP}8~enP)B>)4rupP6&Gd5Qxu|g=jFN;}WWR{(x2cF+If6XDTSHHt)*RId) z0AOWvzsA|&mt*&d6B}LPewi7#Xc!)sl9LBQ;EIrWt(erqQ^j)J-3yXdb-cvAGOViv zAlRG?-`ysgkerldVBysuQic%H2EHTC{ia*btTyO2)K3sW(kRG&EmrlWTs^@w#@6B) z3i!Jccl&Cv9r||Nf5Xcn$10-KOC?|2k=E6umx1lvxZMc30B+slt~CZ6jFRIhIurVe zYI={=pLC<%cD?LPdQwB2=0#VP*5x!yHFJFaz8yvfxS-&nUSfIDVrDaaP-IARc#lnA zyJ2=P{OH&~JN8yM82)L46+wq@51nD-<6uc)$<$(aqHWG@*Q9n{Cs1^aM_t%joif)AWvu*MYedBR&;kW3)oHG(-elD#V8sQ** zXK%j(_d%{qY;t&f73r+19@)$NO}@kDiPf5Cf%m3J&I}_s;e}ev7m=OYMPYyf}a=3-IGr3X2e0T@}{C!DwKcqM)l2`xShmQzA^ILh|)3BgOcdIHo)sHqu(0|oFW7W$bo zwrB0YdQshL61nyJh;->U!SgY7JKCZ8^ImCBf55&Q19c?2PPrcqIkq}%8=(x0U1AQPx^DCd}gxQ(|MF*Sw#+}EzEPz7H)xAoi z2E#0gAv{*PSo7Uc`P9zNnfrEGmCvdz&{`M`f*gVH?y{vVnT?GHcL+S{m>>Su3j=ys zf8QG(uhs7$-@*-7+NkO&q*vs;GfUreUL6ZPTj5@viSl0&a`)mCY-CjVA?-%z+l3P6 ze3sxg9q>opCXzwDOth2L)?dGa0UxWN@c@a+7lV@EIv^|c&V3kwcTn6T2CPfcpMiSa#B** zSYQib&c=6)zTO{Z)`@4Dc5~QgfSH$WNw(TF7oc9|!5I=EiwYylO_^3HW3)1Q5OF-fY!L@b{t3XGa4^Pe;)(%KBnUnIQPQfFRs|S(U zl&&jbhdI+{>Tb)cW*YV3tvZc4mC+i~V#tCqVi6bRIv3VW2pO2L>}Py)n^LD+$5Oab zTV-pLHlA_6Y)fyCC}AMkf6#l1^vj^bUh|u>+OM(YJXf141CXCLZOty1qZFSI0Jd4uTZmYt&C!_yX)*BhW$(f&D!539^JA1O}|k=R=)YEbPAri&&>;1iovxF zPbcSAdR8vL)=W50fmw_^QYffJiECP9lz%9MvPrdsNR687{4{k;fA6xAWRB1H(q}w! zmijxJH2_?_Ju9YTr?;ptqRa!-55QBtE2bVWajd$p&X+{9ygf4^r4+)tq>pT>z3oXmXB zgTUtnbNbB@(Qb@~t3;9sb4NFD75ss2nmW^+-XOyebpg|al00p+ER28%Op&HMQL8mZ%}ZRJp=s%Npa8 zYB+(TzEMS!`z*99Xv_02#g>kw$FnlrNB{yCijHGgJuC~iFA=FCUsBOU<5$W#H`R6W zCbt+{Gq}m=hU+gR&S21@LF;-LFG?n<1Ra9mx?KHxe;i^pD^)4*XU`DxGUI2>)yn=` z5~sd3m*Qjd105fSc9W*g&zO0=P>jiDp}M)A80AXL-n%t*LTywK!>j-a9UNo zD71W(GGUW=Cy3YUQbL*07HR#IG-hvTB;=*9fyrGG#FlOK5q)Qz#z>-2<&C~wx(BGG zFf<&2e|<>_n<_e20ry;2HeFf_U`GAO1GwxcJTmsPJB`y(2upJEVX+|U0bTid?{LAD z*46EO+fWCaf-Z7miJ=Y|oBeQEROLb4h$mp?g)~aTFbcD+3qJ@`p;)yz>bB6eq^_`Y zv>#m+yk>lO9sAp~(`FM!uyU2v)fgyt==DOde-4WIyWtP14?(60Rjo?!blxOd0xe#d zoz4L-c_0$=Z@D%2BvpLL08fK!UMZ!Lv)a2qzOl_3tcLq(Egpbo?uz?>*5WMEO&C@N zy+TYa(PRL)w%rWJFIY$w zfA+01+yZ|%&WT(~PV#rSFO2yLI%f{QM^lE-dG^4`DK^EL1XzLi+EAUmnar|?_0 zTU}qBo(E*e(FDoDVl+a+(@TyEyplsn74-(ql2-k2TS+w8ce76ix^g;$jRwgrg!+2T zT1qblN8+{ySGeqA6aph#r^p^hPn_eFkD zyhZQQO1ni8lR{=>rNeIM6V9SlH5?A6N~lNoS($mf?!ry1QF0K9DR*^)&fG&pLm%&n z|L`-7Tx;gZmc=x9v)+X(syNbvO#jpdfcz{MM>{gH013&Qf9m3#3v6iwo(kF?f1K|c zDG%tk@wBW?*0ZNV}EVofdwk_TB5tgof098D2b)uKmQN7!`a4MlZOi z_zJgJ=!U*GyxniVSP+4oQ|Fy%XFpB6iY0yriL73HmZ&SGlgh+?#Lj}RSpGz_K4p1v zIKN^d1<&24V@JjZIX04~GB8Df(y9r(Bc1)ZoL8jmoFbiU>zeNCfz@g;e+@qOJ4>=J zS4+^Svww5kk(-|%xSZ-wF7cdN*EZ>@A(&yDtMu%|g4&`#&Eq+z(dFcsa*2+>{H+iQfW9LTUhE`sX*NwpA zkRYkiai8vV%ttpx6E5zye|S+6QcX6fZSwH~{1r+7r?6wqGIh^wYgZslqpLv zx5NDlCR0obl?oYw0-*2waHOD6PR;Du{KB$C%|ArTSamq^0D33Je+wDD;q)cXG(r+9 zI)}F3NiP4CweuoxqIgd0DIs!)kB{&f)F-u-HyoMc=>V56GSHdDx2AUPi)r7_$`Ldc z+}t@6u_MZCUva9fWzOzcXGXw=v#*}Lz;NsapYO{rr9KK&*YY{eRc`?E4p}QoW`y>> z@F;{(D_ly3N{lT3e<84C**1HARO^v&^`@(j$uRDt`Ds&<#WCn4P0on+A?^J=c!-m0 zbo+Q>$<)o|*~O@BH|VcK!QFro6j`kzvuxLO_TdM7RZC4ozT}dT^TE!)an=n!FQQz! z;IKUcPxqppdV$2KA6ICcL9M2T4Ls87ec@0oy;}pQ`MgR9o_^^n|1wgZ%=dqj zX8vV?jDYl)67tKI=l^8RTi;#`&{Bb^dtwF48t@=%U3vs$WE9yZIi+63San_Gr&Q6Y;(CPFg~ zXHa%+O9|Jv&fC@>p3%JN@-jY#1DZ}w9GVLUyg9AsR$1|xV%AuB;3c_tUU_HBr#>lFK3BL#5;f=c=B7#&A5YF@ zbuHp<`jHl`h3~JctlB9(VPoz9aNr+2XiRtuVt7*2#{wpJmH*EyrqGLp=+bV$j{ggq zfApXHrZ`%o79g*$Wi$Lkoq?63r9D4M(=X{ZqT@EANUZ z#3=}WJB#w-RCKS=e_H!exhVHei0>haaknys6ah&@h=Z->`WxO7O>ir%^cSgidNy9WVkJ+k(-@Bol z&-E(G{>eS$SoLj7bVz;WJER*S#vZHoecTrhp=|9Z5B)N68GUQJKq83fdJ8YN-3q?? z)I@?^M&g1EI9%1oP(Wt^F-Y%}e~vPabPLuZ6Pq9YyQu~qT&+wMtp8Y~zUQ+gEW#v-YzziV+ixImBY%Mg<4&42WH zq$QqiBfw6XMU^y;4z4Eq7XKqQg@w<6)2H)3W*FZ|n+50qm#a}nW_f5>5oJ4_#_ ziff*!BPg%|pm(5DM^ZI*q)8x@SINsCsbWE+SS74@W2v#d1{i-}K=yem%Q|eht`)!U z-Da=7sSuzWTiSTTY_zX^6>8_=M+8BNN}oid6XGV%8yU zY?^GY78pe|^vveH3G3zui1S z4|Hj$k2Zie(_HNFBI?C{ptqfV#_j9SiQkT(vXgXhI4N{Tqc6wGWSa76J2hyDo-ZtD zx({CpmNhvmR)<^v!|&euSstVRxcDzrMP$sCCa_c3oSFTJW5KTx9Dsf*m+DJc~=sq$j;Qt%o`e`qIws)D)Dvgi%;W)3v5 zKIgG)rH|BJpYM8z=HAr@=1kzS9HXlg(uw*}#$YjRfIY@byoZneikSruc_G?0p;siu z&+dRcUF);GcEJ}6oIAk1f>1w@JBS?hjX0ulxrV}S&02NommHSR>PDH~E461Ew1#Jz zU3-i_Ex?Bce>|VeE`b-YGv?G$#x0zr%>5Xpy+nKEYlI6iT<7BKR&Uy6yV)8@hR4_F zE-}YXd;PR%+6ch1NaQMkP5FMU~9jU#|m8{Wu$?AwSlOQE_KJT;5 z$Tv2Te`foUcozP4^=&xzO7fI>=sli9oDf!fjXxeEq?{A@MRh<4v6{7lCsFbw>_%91 zCf@QP>eLh~RDB&Y=>oX3<@W34U2I2K=gT6sy_LNizCP7~``YC2prX5Nr`m&IvlOu_ z5)rFw-dTgcsb?Hm+7RslYOjuF?E6tEUmjJWfA{ENaKbadEn_B6v5M|i7O7pVq)#^# z%r!7FrJ5h^#a9ORkU??BlqzJBNbN#7`*1{4lQXOj%^=hK?mJl(Y3e(~&_WDwIO+ZAO4yBl2w=hBf8WXF$P zBGQd*J1V{{F9lcP^#;cayf~WOCzv?NDyR*Pw}Y@^Luo;bKHuw*Jy)+vZAf6P{^WHd zuE`=FWg)NbaQZAKZDC};2-tf#H;Xp)eN(o;j<9uvXG;7~6A#VjcZDEv5zjP{Vxt2zs=Be~wdD zU{_b+PIO1l*-u|7@|iH8FqQhSaW^a=Wbqh$jtIwa*%)Z#AAV4dE7%lk)MHGuQgoy4 z{FI7rlm)HdJW?n3;jQjHzYmK+@~fZoiR7uD&Xe`a+R;YzuSOR}hCrq$LNht4O)1Hq zJ@>BR=FX3*kEQ{fu4by~S%&2`f3+*mU`b$MkLo<`XRfQxwT(KES#);STjfJ{NtKCH z_44B%1hd0o+IGm7(65QR%r!fYTU|f4(C;&?|ap zznye#z;o>ApXW$R2v38m@)^2rNMF%l9LPdL%YaPksH`j34K}XRW@Vhr9uedujt!W; z^CW&78%Ga8@8n`Mc2z`oNZAEEkb@*0{p7Jq*D`}70|I#-+uWjlug1}UcB!01xzP&m zT!}u$roD!52O5n3WTj-Lf3dyz6;1?%D~h6(Zr^2;wMoU~i%BVRZPSuKh&F9!|Erl2 zp>6E2OU^fZbvM3))iY>*|GhyW^nCf3Vk5JqDz{jzmOt z4R|?@_UehP{0Tay(diTCq}0|wQPNTV)Vr1qSs`a=l-B~H9*OAso|_;NucU~Kx+#sE zX&uRvGNDoyguNDCV21;55cR%?7w$t7%^ki27&U=wLYuv044}`eIyjGf0v+Mv4VyZ1=9z~$gc@6X;EETr+{lQRl#wFTvQvVwK^Bjb zQCeC;8!)ypN?+Z>XhNAI7kPj4v(Y>~Ly;mX`ZtJJe+es2RBAZUHE-2eWsY!nj_C4N zCVJj{%`U=sZ4KQ+TlqxGxmZttW-jo*EFyD}#kF|gOh##b~JwVX+0wykpb|4#uCYETTyIBQ-4R^oT0IkWZ;A4tc22**(CvC z-CTQ{B*$u}#O?^g@_9M%eBbpE)?e4H4FVK>FbjXbQ#BQbW6M9FsxIV0SWz58)=`pg zp5+liAefcwT-PDUg}7meg{-TlMV@7!5Boefe@g47S(q7fzdqD9rs*4dure!(LXCOk z^PxVlX+qeILLZ6Lg?r*&P+n?+Yp=J+wU1wGKAF|ghO+izgpysqZ!fCqPLZ-MsRZ>q z35`6SNp|77sxAtJsX9xaI$&LUO?ysXVy+$ER^JZ(QE-@kahe82@Ms}2V<56D>hi;3 ze~J^>ilexS!??OzK4Maqn*wm{&YN7a&{`;_84&~(A3m+e{?V$00jgA zK>c4lHUA%)nr-_ZmYRP@b&S8knUgMEL_4%L80)bFK)C*>U7|nM8TyZ91^{~9!5`~P zlMhx2x-r&E01EXt3P&gLKo4mk?NE3Ee;{B#>J@|i56jHo5Z-SB?DtmYorvONCDp zMOZNZ2y%qxS1mLn{ISUVvBm&EkN4mG^d1FB7ZN$e0ssp8cfWmS!sp4&sN_5fe*j7N zqu%NMSYzlvmKXqN>W?Kym3~Ayf1++6`2c8uJ^|1!?%|y5;T-Mm`rl#&$%nq{4N?w( zl3srD3ZQC$w*H31HX9BZ02r-aX|0NU-pT~}j2fx29Q}zfvy>ja)KI6oA~J|h zoATAT3%?pJnG=L1Ba{jXX!r+(v^iA6S>B&sQXL^W8$Eye*_KF`e>x6w3a<6;4fDt#ine`uW?I8Vm3 z+d!<(Mkw2WK_*GGIgzzinzlyVxnKcUk|5a8S7o$>u_HBT%bc?^ohc=rX)0B;A{vy@ z-+*KWQ7`bE%vRDH5Iw-C{++hJJy_DBKNhAv8LB&4E#JTQ%24gmF#Y*^KamHS0m@9& zo(%gRY%WWruk7I~OO(z`f8gEU`7Gc;CeXFk&?nB*XC_Fu)a$N=>1_4_O@>KlX3$n9 z{$cx9R~Dw<+920nL!XiV(<8f>L$2d4E;Wq>`jv5nO_#020-oU*DFjyEyB#MmKfSz7 zaa-Kr6snhd-xe%Spi3_cf{)4AlF6Q{1WxJSvXUiL8OSm>j7-KdIEjmPQ2iaN4A2-rBtEc4k7*GNSR6-Jl0XBY;A-zH+L1+D zp1_d29|Xz zCx>c}6!2H{XibLxf7AZ{#p(Rv|6e$iKLLNogM9s6{|AdZ`saERg*ubvql;;!vY9oOLXjqkE1WP&%B8EO; z8XV*WEgnN#k;DRt1@=nw<#=hV_Ga($u&vDGKvXM$6I~q7f0+}`1={ks5|>$PG#St_ zTq)6PJ|86h;;wt@NZw1;TVShTf)*#o>KNz3UfAd$q_HMoLetL;?<@x;27nbrZ z(`zN4Rx7yes$Z4>ei?$QtO2K%yQOu2Dj_;}y2J}N`1+LTQ!+BnQW%#bB$~4MYjx;S zW56w;8VzjD(tf_K2G!1GK$)HDnf>gw;_UQ=*=5Sp z_?Is^e})6xDlihGINByxZ7(+2z?@vbR#_#Ls~W)`Yr|g3jd!cx)Mn85Y(0@CU0K8+ zUzs^&TC1azw>eKNJ%+y4My>G1w$_xfe(h=Og*&1cOG>?mD;+L#)fil=bGFO{x6&;I zf~{GaO?hG!@xYQeP)h(XW+98?k^(YV4`k!IVlMgvU)4IJ|ozDn09^W|pRcj?k4EQ8Zi3~o&ri(}|t zcfevS=!_WJQulxL_^rUXl`HH2H>PF3co6(oyz_ZF3V-cJg1B9~q`PW6js>L%t{Aal zf64*XS<3^8;09Q}~h$7})f&zf#;Z8tn z^%_cb^uO=@M2OBrgZzsfkMwBQf9Z5<5`Ahx?&TI|NtpyJp0+HWW$=e<6v5^O^1q(X z2n_#wzC@QA{imZpz!TzKf3ts@Mq5CZe}4YgD}k^+qXS%N;hsMm474i>`M-MktD9SI zw1B^?pI!cY?ms^G&sKka^jBBHpTDi6U%dOL|MMT){3l-iWAjah$#vGtcX|jHplQJI zO1%p7Ir8*b>cA=)zLa7XoL{k_Sh|c9j*e={;cD&CINIV;wk!n?XgX7RB1>}&e{#2*QuVM#)0yy$17ug^$6Su z<*&%+K_v35cJo){{nf{rd7%8Sf0%XuU;UC&M1jo-EK9MKFrY|Bc=M z`1_ML=<{SiwdI**aX;VwIy07L{||59*44PqZ2NZ z1Tful^6SqiI|-rJ`u0A1f9x~P17od*kYGtwHEY(a5^ywvBWUSk6VUtK5f+}8^nA1a zeNF$q-*GM6l`?T{P~{J^q+JK;?p{{jM{lQprarthWT*Y##hjqu$4lISj@GEW1_&iW z8*nRd^k4`d#b5=S!R&=WkHPpz5T2Wc;N~O3tM`9KAFx&I#XwF=f52vWBV7Z8S0Pgu z{!rJwA2{M3=q_T`$xqa}KKiQwt^fFp?k(X-=;Mn*s9T2u*FRnrW2p*xRZ-f0VfYhv`^7blD zN&thDLgE;rJK%!+e`?9ezjwO6@Mq*J$fx8`^>}Nod%GCUl-es_9dXw2*|Hc){|b$Q zL|1;EUo#K_;YWt9?G(jy)26FhRm)lhVAS0_Q32i*Y6igva}z`7fpz1MS!G7|hNBY# zOv40BBv>_rs$pnzWF|R=vX8j?RzY?x(Zfe;@Ye{v0Dtm?QXF^YUch z13vA)^g&hw#-wKq_s4yr*H^i(_WEVOJ;e!sy!1H3_CL?%Rv-7b>|giwnMH7ojb&YX ztCHzU=q=Z1TW@ylJ|u@{21OM&<&$uEIkT-m)2?YPe6CxalrJi6*BqLd!E}b8mIACV zS4)*2;DbZ)e-=WkXA)Kd%2`}Z5M9n7QIa{w_+j!JmQefFuGLcCDl^H}-mdTuyPMfz zy5Y6hGb=Q=Lg`v)G&$$X+(ij21DkUOUd=)I3{gF$ty}Oa4m!KjPKV3w^uxE)8C+Yz z%yiC-;e21D+}WGV89HZnbhTsJMt30l_SJ}KCa~vuMxiEy2o$f{vuYL z^RmCEmGN+^+|_lnXhplR*{p(|eL)VG#$ zS;@?Fth${A+i{Z@hWWX)~esB{t{x=rF+>T3@KXwIyY=<)^R=Ek>n-p7+aqj%Vr?A4&Aco4aQ;z34d ze@Re&*cJODpD_6DQ~p@%Qw~BuTOZ?EeL9C<`HNm&oU#5KAJ^PhQxhzW5Q3+|fD>Sg zC|G)}1b6SdQh?JDCdJKs`@oou;6PkAj?8;TJLyz4S&6E>PY&ci9csuW zIGKZMlwa*6T9vWN`;AUXux>WPWEJK5f0|d-%dQeE<+oN=ViTHjI|DVFv4^`PHBXVq z6?$5>ANsSg7seY%k+T8PLkTa`#9#Sz3syKwhAx_O@vQ# zYmu?MiBPAq)sM<5KRLAZUDenKn=<3mA3bMkm?D4n*xQ>XfA&+_-kuKN|!w`!Z$ z1Mcea;YylI4x~KsB3H=!T%lhpf4IaBmA;0@X{@kVh)Unyp+_+@5jLL$*fNoT&z%-g zW_;G?ruhuq?zgc@5PyWWG-%&nR?fzvvaewC!<%avhkPONvzYg|PAClW$=nDZ_BkUW z6%yBkP_nhv0a%4-wcvlUs~uCZs7)l=9b!i~P(v{qeL&r?7Yi@IcaT|#e?GnDm)@5t zfTndh;V$yiL!NOyGnd>z`7+?8;wG-j8jpBGlI?Y0u@;8h|I44(8W zEeOBa-7yp08D2dms0E}Ve~g5_Lr*?S+wP+-FG3tY%%Pk`8FhJgmRa$Zrqfth@0F1X z&Il1~&3llR?a1aIapMkH!CII$Rjf&M)86-Ys{4P)4&MmJ%#U`M@STf*?kq1)JV-zs z)&-WN^X31OFDrYze>zuRHkx~}72aQ%W#8|?dy~kdNf4%qZznoVe<#m@6&#F^$nrgQ zqLNHF*5yCbe8+^{Tq!ob&ct%&Ls`KRe(JPHeN-!zcrU?WVqvmd3N%k>lw{CwzV*6| zD`p5`@pgWI1-DHV8UHb@cguy?ltD7Bg2PKEC>1*L z_%Zl@09ug5Qze95e|jE2`<^cp{QJM}-(=!GgCu{Lz3i3&bB{9dJ|QbgiC*h>`NM4S zGxEFgA*$L{khb)Ba&&0VpBGvBJeU0UpLfp729D&E6QpDVrT-(z%UR+rgY@=YR_#Yn zPkhOpQr3iw`gJ(m3nv)3GWf^eM;tqMt3W1-)taH999J(if4483GGh9oa}^kir)MII<5m@r?kmFw1Q?8~`A!1%u3J1oY+FT`1RDOtxT%T|Y#wtc*d*!y|Bg}t%9|YJb#=)O27QoYD2+8O0d;*?Jhe8si= z8>d%!emd{He;@tg7RN{a{`!jxJo#V3euM%4s@wc-6X}de_RfXYm^hpGqV~e&b`l6X zUG?iyK)2`nJy|X^qn&E#BJq%@`xtCLuR^fwVu1_Qh+gwm3<7rFlh3L zk9HKnGioT;0hTMCcs^PkiE6rwlIe5?A`Q&M`7n76e-qF#LK-$ogDpzGUR+h^z4={n zH>=uy9bXosF?2a|1}y|7t>b60ZQf`dLsR^Fj_Hb?08K!$zcc;f;mZNj*zz*Z8FKZ* znKRnU?k`NVHQV@vv(Fshl<7mC_Xe;p&Rn}p%=+wChQEc~89@g*R>^LIMlMr?L}#1k zMdUw&w0sE0<9`QMH7jNXGPC@6WaaPm%`OHb(jwK%;-zKWEw9ZXXP7q2=->_#aztZp z=h?5>%eZ-+TPG#hOx2>3i5EgQ`6cUo$11T7QuQIbGW%^NA;aZ5Ka_zOcQ+jLdKO=^ zd<{!x%`9|u!%ZV$s|~(BS1g5!?S5T{RhV;-%Va#=$bapesYbstYY;*IS$WCtkG)qj zD?%(%A2PcHQ~sKXwCZfMckGt zT7QMF+tyDj^(>vB{C^5f7qhJlnGJuTd|N}Rx653Yf-l3+MFpoCYZb2;no@8%7eJSu z+p$dY>AC|S2=5ua5Jn@IB1BLvajR5o!s{d&;)fN#jACF^oNaaJKp^aXXfTHOz+|(X zzRih$3GYWgoNl4jB-HoYL-8>1sOFqAoPTO>N9;D_5B4(Ox~x%?AL{)N`4GxKmlD+A z^T|(b^J{3u&&Bn_gHFXev*J@XZ|13MINIqFtO)!`nE9!?$L97;Lc_!gwt@d}5KO6L zZ#Xnga!=9oQZ0`m1fS{nbECbt?Z(;FYAg2DlH742eL0uX6v(c_IAX=pS8raFY=2f9 z#C#5$7@Uzbj}KkW>3KL7q)Q;R1xFWs0Ba#3ypT;k1*je`3^j{#b;ibn4cFlT#^Kx16M-}23}rp_d+6-`R_;d3YO{px_@_;s=E?iZp6Kjp8x zJjy#u zUu9l^&OC-#O0|!DgyuzoV&HNCe?}whJcKG=FwjyRAT17sC)Wj$rY-EyY1C4>k&lJdY@R|^ygo| z)f74LRJ1P!5I)=UzNn$T@T~e8>oJGuW4hYwA@{fzTf$u>`9@mHN>=7rKRR+w*?>j* z_ox*$;r08Wk2R`=rSnX@YksV+(B2ODo$BM)fF+K$g3BttS1BgkO@A0zJ=D=`xs@*8 zDz6YMElWC^;7Xjj@&Sxma#tdl{3EvNhlFV*t%$*Ct2q1+Sl%(h*rwN9EbuU8w6Eye zwcyq%2C+(2#$3P7v=3*$N%b9{@@R@{321bsx+Y&`sNTDi5!!L+=(QriK71BISht*6 zify@s(G(EM08>%B8JUGR(Q*QZ+){pKc{}=w0x1-J2o2sOZa5*-!e43zbHOHduai?jQ z1uj!*8j3>#t@nZ3f`c;)VaGH1mR!rEjviRET3UyEYyFH~XMeU=SGJTrPh#E5_df4e z+4vz7&DNLk^Ng8zmU=;7cW}mVoA)tIfamj1A$g~ksq0m6*W{nq5PeV=D}Ly8zpjUq zU-=i`JFN4n_rG7bt3L7>eJxcVbibZ)+OK_YE1GckLD7F#1Bcqbp2@^KLT^XMSU7MZ zwBFlqez3 zU%qauhrDX2IrM9Ov?F%Pb7c2bGa@@X?R|VWo+B&Ts4CaTwPSq$e4paD~+S)O`!K%{WCM)D4%dKGmBqiFacN{ zAb9!k^WFwH`KcjRQ{C+6=!XmczH>{6 zvMv^wIFHk7UUfuy$$&c)GH4mv647boaDU~y<~oNR&NKBgtBTCuHyr3atP1C85x`>( z$2bQt-m!Pb0^0W?ne=t^e~)oR8fRu4?*Chx=K6w*xTuXD&(Dz$Kk^#nS7kS0GU@B8 z!+-nizr|Vl-Ej;<{gH43ZL+_IP9Lo1k4V9AHOfECRC>2wKHLaPJ+=K-DJDMO)RWC;=^1F z_;58Fy;;OcWmFS1x%er7@UaefGG$tj%lH%-ZfaRFH2gPW3XMzjr!{#;uyr4P^dBUt>rfnI;46AmTo^$VOfC@#ql7p~VK z0r}w*52X4!IAMYETI`G0i+v$ZFO{9SLf-qFtvX`D-al>ouj>nr=yYb8H2wa*&PX@0 zSL~7>k}zR^$`&X(R8qJlGmS(j7Y2o>oc^QCVB0HUS74((X$H}`g!3f|;`MF&7TaCJ zU{?~Aee^jdM3bY9XDRs`Q+mv}C41T7Q?|>0p7yB;%y1(;);QOLLd%F1wC%) z`6C6-PosC}408ZWBFWh-fAPT`bv=E2|I!Rj(Van)4Y)0cUrRV7DnzVe$p zb$eUS_P-t$UM<(n=q=5)k=yE7ZNb2H=V7H*Bz8&m_KAUD4ph8dRVsaXd{YRcH0XZa zWS-}L<-fLQZl98uIml`}E1*TNlSy*3e8{Xl@el0U41R7!eGYkt()IOP#Kkz+5{mwKacf7UxM&$U3-4vs5?F2+t8do`Z!>T?f62U-v=3n4)*4-e0w!p? zEL*x4&L#^4|G}@On=n;X4CP|sRnw;j?^g_#@uK^X=3zA1g2a0FibmA3b2Dbj$HqEb zcHVEvce{kO@5~X^|0@Zra8|emvuk#L9rH~!>x5E!_2$=tjx5BZ(K~!<_U@?O(GpL7 z*lgQ1*`+>fFV7h5@4Ra*zc@A@Pu%Zi&2y_RYaCyk68Txz*D)Spt5*77Fg^@>@v3Z5 z_nd~CaG9L)YWkY|hxmW@^bS+TvR{$FWw`+3&%v9^q?ik6D8|oq;z`bbY2Lwq9uLzi z#l&AZm5~TVw(P2V->UnFS$_`hURbgTsuAnCU(tb|J}T>}V=hebNI>lbo$)i@PF#k) zUFcfa$jdWurZINEgle~ziFsyjr&wyoXsKz=Oc&1kDU3op5ei5T|1I}Yze7e4rsw+} z0@2N5D-hi@(FfRm>Gi@}XbV_>Q{LfeOYtu5PW7weya&D3cx%3N%ZCYz4@Zo(*Y%hF zy4Q~#>s&|Qb?L8N-z!9ZsXo?7zG{i@4Nnf!6!*^ov+51A+fa17Cy_kAFA$<7Yv#W3 znA|oTI#87Nfk{5+a(5?-@`k(I7fbs?rOETmHH;Z2gd<$6^xTqGui&_U(bI)hQJ(3U zJa|j!#EN5x8S_RfTe5|jK5(mi4o}ZL{{tq#qX*Owf++E>L?Y&@df<17jTdgm zn4JjO`>4eZZNU+#whIUm)e7|syyv9H@Y@6UgC4SY?Q48{v6o%Gme}hF@?H;n`Wd^> zJN{=}cdSbd@k4YyKk>zXFa1;!8uH)%pGPM!Bn(;u<$8i8x)9s^lQf2~a~!>5cg!i$ z=(TvfOKgf(AZKvnyG|xn`?HnW${J`w?3lQ|eFp*3`Le>lhQ5Zx!I)6;-P8x`_dN`M zZc+qA`q8iaz?u6&k5_0&N))c-P`kTzwG)G`em44H2OPHSahUFZaTumYzd&)4=da)5 zNdr;pypIwFI0f42JHacTp$*pKe$?=bAqiaBxi;QAbv z6ZhZep&;O}7`>z4$F=L8cfwwmh{_JSg@x7U+RmVZEf2?-HTbNfjlvOQz%~#1bJuH> z6=|thADjMv8xBh*CM7*zd?=~@M)~evpQi_$XSE-@He^QZlcqR;5gXsQ7CI1jdNb$HbS3@M+YyFWV=xSAj&*mNkg1ygTv+jHlsjQBCfSJlJ|>TNJPPQ(TdK ztC~CNA<5}1zs~(hLb(Vy!{OCSeq6N(%XenS6Y?K_3l5k2G4KU5syW>ew zdmg#x=Sy&XUQNE>xRxi5zU=d@i(1KSQT=x|n@Y@vOh z+xDGVw-ruB%uwC4jc~7-fyf6}=G?R#C?~T$R^i-hHgV!IMmch}dlh^R$n6b6^E)s{ zzO!>lhM#lpJj&b2W;r+dF>|7g#+D&d^PW?G;(gsmNSFvP`T(z=9*0L|L5IYH2;=#b zdxL3GPQK4J0et*^qFxW?Xzz6luA$zO0r$JI&-;Db-aflky)!1FT%IRAuS|6#E~vkf z_(oIUtq}+P(r|QT0d^<(z%3zY3cvCDS03N=@#8yAHA4^8fNf6udbYKrZ6C3{I_n32 zPPz`DI?tCMINLFIYJTj|IDF$0_bQm0KJR*#E?3+Zo0#W}vB@Jiio0e$+g6=0wC{Q5 z+g-G@vk*bd6!FDYP`^%Wu=wL6%qV{nnR^w%_|~^&jIPm9@)e&J2Z~SU{A&h@yOGzA zzd!78O)DXZZy7i(MVe?f`W;Nss1z%I6QSEFs@wJZa@32!J~ywMhgUx<9;p`u+&M;3 ze?dZduIu@{9ogUAP#+WhT1wFTqQ*;@?^xp{eGQy62Ptss0WZEZXJ<~V^z@-Pz(*@_ zw4In0RHjysfh#3*g#o)s5EfS%aTX9XU;tTO#pu~xSXts2=F*8P1jEJa#H4lyd&NJo}T;E11(6$a;g>WbwJN8`TR?rY`66jC2!P;IqM75HyJ~;Xr${ z;|KD!wwO~*8UAz~^m@KvG^Kuj$W=*wR`(RYxG}x1e&C-j*7zcx#0Q$EV7Ai=bpM$P zE#c7If-5+}CWl9Zp_}=v6Po(q*dNy+P%V`DlMRE(6ni_P`iW5nJ}gNa{kF670Z*8t zmZW?=au7c0VJ~#WE6N9!YQ$k&mx>V0%oQVWLPQV)5iK4jn1tO#eX&YEyqixVrCAZqHxuGeC_hU@-RxNbLxr(*tG2EH9X|_+rXIYGwj+PUsBBZnxSx^(cJc2a&V!? zy6DwE>tartTYRnTMkdHrWfzd?K-A`cDc&~`L&LyLu2@jKP^d3Q4WFpPYu%GMEFQaZ~ zKMi27XU-b+E{1*;;>+mozT7i9%Qw0*{Mc7;iU)`9b@cKjPPPAUYasuVeH`O|Gc3h} zN)*#nUp?)!v6wWaeg^SBu$jaeSNVdKPeE5ah@`Jg#5^%$X7wKKn_QZ8jFte7AYME} zQ<%{`_@bL~s`sXyp?pM$=i*83MZ&0eYO8(z47Wl3nLfdU+3@TV}$9F zE!CJ=n}gN!#i|JXZ1SqDov#|OffX8@evMtjscHUsKYj8Om6~p%{pI1wuy7%W+iesH zYb5u|1*GMExzDZfiw!plL!8oDJ|)|>m?icRtB1MocGx5Mr_tN!V9?or8InF7KjP4L z$3Dd~-`eXt1}?;vTl~Tc+wc1S^2jgtToug^@-FI#`#3wEo5NrIjaEK|m8Kj~@T&E+ z>uX#x-sblqObUh5S4?0#tDzK0@hogU=dO&n)j1el`l)NN@BJx1XI7BjVJ$oFhS_qf znYb58!3vzLu`5GUv_lPl%NneI?k2@n{NbQp6t*uiQBTq;DLZ&g?XP%C`5R%xjZQR7cbamqp7$Eo%z-W!Tj>r;Onak59dxq@xJy{M;<=4;@| z2@ezJJ@kjJdOg{&&S&oFn-3ni!qb@w@MULw6h2t{98_X6gBH1eBRkV9${=O?sR!dR zC~O$Fxs$F$N(xw~f6Oh_mfq}qmp^Z|xn^neOnP)}N)tINg2Y+B3l~Jb3R4m`3TKrk z=2<~DhstQ3d$`>0VF6Bvz%|5!&cRIcpL5{6D|fn48#N7MaNd|(!8_2Q%rr8bzTSEH zvCn@rhFZ~Fq`nV-K5*Vimr|emN$k1jH5+u2!x`)Ca_HIq9?u4!e~#-%zaD(&zRPPU zyd8rj*QSm9u8kN>GM;E`TrmrhoN_?PKn98J0>&l8+|vTR1guI#rSF8(gpCv8g;dij z!HQNenqwm$<{-jlV(i_^#G;!d+=NTY+xxsN(GsiK)vZ&1esn0ifK7hp=_KJ`Qh^vdkxzAT3DPnYOCMxQmGAJsLU`!EXo{QRi5Vx>0!WXDBztS8{ z>kF!Drs0f#A-v2$92yjrn+ni~CZ8|Ud=C4`i*PyrWi*LBl0Z<; zlM-UB3aBSrZYKPf5ldPZn*JYyY>(?AXkApLhjr=uuP@g{pZEXR+qpzHVHdr6m;UX7 z*F4spzw)Lnm|Z11u?ahq%KOF_JdByOSJIXp>s$`L)g{JB2F0*T!M( zc5|M>Z@(_*eXsP%uAX|k+L2u)PWO4s2_MLR^UFsaPq;5Gdy>xeQNjcIpoTuA0aF|_ z;im`HAN0f}8QGxTA@Mql6*%1a9~dTF_|UK=N3 z9ahsi>a>x2I>)8zb(xu{d_(!5xXavh5p<=03Q)YtYPamMI_N~_BE_x!U}>XfRn8FP zc8P7kD-pwU{)Jy@{)&7W{W4-gIBwSGaXUWDu{rv$%XwX2IP{ypT3B^+>HA(S`K5gh zb`o>c8Ct`c$rG@-@M+#%+8n^-j|(RL`TX~wdGaq9>Y(1aEAq#uh+p`s{ye3l(HO>m zIsUfT(Rrd-DgX4-rLXGJ6W{ZNfBV^`9o0u2^ziKcOrN3mN1nCnXTP3*`K!M%E!pjW zLt;CF2&^;*HDeXc6PQe)A$}~M%c?DAsfk!}`u>!(?J-M4zqh0hr~V&~o`>d^2{5yH z2ql=NO%sUjY@-EE60pL=iO12n!M`Pcw)6Mmc}6-vV?G%uTxS;9vdEPGIbzs8Fc5d7 zclNDY@??X8$+stG?*n6TZtR@x#au2m^&|OccCRbzA!d2Rx!pt3{NygIv2wo`OK`+F zKob8x#)>qo%x~p(8mnezGnySm^~XP>UnX3_{SKCN?)IfV>mH9dW7q@r8Y9hrJ^GqW zF|6rXD}C-UIQGPUpFwrTDF3!6y{|yWtioLHncLkd(DMa^3+Rf3yVhK+yAesb4z^R$ zkH)cQ4-4Z=kO&5qNJY2Hz~wWnXl7Pkr_P$Nc64P9gQYhC^Sly!{211>z=bt)&cu_? zr~R6OYd>{YZemkN1dm4Im$U7EXELjVMpWi|jBzhAeY=Wn)8`2rI{%i=KNw)tkC;CQ z!aUU1d7i8AEpDzadjH4gUaHfYe?F@$Pcr*FF|v6`Fez@s)I0_UdruB7usVWTUy06A z*&OG~akP$hIm#~byQ+2j-9e_wW}T_uJ{rVB+s>bNs>ca%KUz?~+5HZGQiG2a50_2* zh64}q75_+=arnNU7rav6*LBVVPWqPnnKs2kll37_q%77>8IIJeJ-lWIxx~#s3f!AhaepX z*nq*#sM>o;G5=V=WF73wrASTh!&%a5&S4|%bV2n?>4BRaCZAE#4tkg#2gxzKDoFZx znh=JHxJ zNHgbLj@&rovq?gl&pw8`b{_1Tr=YfnFx5Cqy`dk$s5snDGXag_Hrxy2R3?a{Ctu-$ zCMjGdt+ zm=vFR+Pnn9F&^MDz~&`z-WLx4u=v|y+1H~$4LF<^-}sMX9Dh-lIP5>2p)((F$(xiX z?b8wanJ-TG%%%71!?J#T^3t#MSX6~mm8QD%^V|!4ofHe1=#8K-ktoy8i1EBYfUP4V zlF(RgOydXC%V7ndTO9}8go(YFe^I;+s$HdEhXHtKEAt$6-t+R>8>flzrJHEX^3h^D z_y5y6ZqjKgq+;w)L<000li(Gr_5qt~rp;u<(R)_x^Hegxh0DzN4?X8A!S zE|Il}SkmsK)7Mm+-Ei?1)LsbI&SR-*&O^kR`cR!mvG1Kz&y~~jnSIR!Oc&ayg-NO3 zzTni~GBeXoRw=6hEOVzWVKl88MeL3BGeSJy-L^ zEBE>Mkhcx}G4||JdOxzS+9w8fwZB{C4B5=im z*;0zcy9p9cGt(~of6gS_OliQIU>*3le`gPDTFJ^jW3Nw}7SI8{b(&$~mHnLfl=!Ve z@NSyTSMEnhH4Wq98PYsuL7J$CMdcZACZ^&xuR((;e4f9JO54P&(}s8u26-4$++CF5 zf%*x#g0SahpvNO2#a(`!(KGZK@B4i26eEthyo;Hc$gEjlf0ge8H<^pX76QuybL|Y0 z!pfv?PWO2ya%N2E@7uWt(+E4=G0+^*u4l%D%oak{5DTLM;)tQPLtWPx7FpH8n+MF2 zl(-&~eShOOyJ5z-+^g5)ES2LNEg_=Hd&jhSka(ygn_C}tLSp*~&&?*kUp6sy>QP98eScMC!N7TY6UyUZ5{dtDvl%YZ|+CAQ4`W=T833Z}xT zr%0CXGw0?}*KU}Z=`e*%Y|R4FNx=8{Q}~=a*%|BG)(m*@Fq#|L-wP6c*Bj9JUY+N! z*LoXS^#>@<9nN!P2p{)z>BiQCehs~LXWlift!P|Pe}3#IC1J7n;iv5%VW^KS4+Ko+ zvsAC5UN@zYSw~!8$W!%*uw%Nv$AaqXz(1+)L`~tIek?C$=1e$n^BaaU&19x;Vw#(iP1g(KCDeP`>&;v~U(_PqhV~%^yBr?twg zS=M^4jF^!oNm$L%-rqsrFHmsFTou`gVMau_y9=2qUm~L;aMTBv^AQb(q4R;$CC%u5 zT2B~SHH*`S2j705F>&nOo<9Z8Z+A9VH1iuFPZ{U{O!!ItrldU{{hlBcCH2{e(Nyrr zf6P9CO~F{I_rKqyo*wD6OsLN$KJ{XqY8OWyrss;Qy#6*Tw)gAhpi4T|aYzTG?764= zm>Udp^}rry$!{;`YSCGIN*2ZDF%_Qjy~#B*LKrA~)`3iEX7M3M_Fny-=Qy9+q6uxU zz2b=p^i1pohcHRDlSY9<-h3D{wts%Af4pFf6z}DDmH;E|7i638etq+>sHYlI(qASE zv|2IyT!P6(a@{U*vVPXsw8*?q4kaSQE7+QAFwfHXRlPDh8?$6H8tdQlr=NdzZpx%B z3ZKVUcdss&dshM3Jp`klf2JNZisp;`AsoG_dfmu+sA&(ikwMFTSqcL-f5xs@ z2Uab=X0@GroWAJ?jy+AMJ3EFt%Ei12xZuRkIPh7#OJkPIP3}!!_wMI?{GL&asD!It z%Ox0{q)hl|jSLd8;IR2v)aBUkihD#$`TLTY-IKI=$U&oz`i{HF0=s4#>^&=t7Z;T( zqpp1*P9!8@W3p*gl~eAk6}uCVe|voBFLtC~{#>QYX6;BE*0JS&w;--ab;UaD=*BbG z(14M^(C_GogPG@@T1YGmI91Kkyba9MP->%U*hz5D(bLTjvl1TxG)#5V$FR%$c~?uS zV||i7135?o8w1hjHLkLz*h`wbxzGR7_Y5;fzpzbtS(p0Kh8@h!KeP$of4I;hh!0Bf{K** zxdxkS-*_pw-SvWbBUQ`$<;*<=2x*!s&C}+`BGqDK-4(aHePoRdl4bIcDF5c*9_IMi=4#HSJ8m@T!N2Y~ z>9q~|)ZcKvtKygw_4{zVo54TEM7)?mKJXTY<1D`Ja*xaFXI6DfeU$gARkSpd2&W2?d?}OMXBBB{Eyk5?d|Vnu+T7PDMDAUeZj#7aZ{uXQVn*C+@%Fge ztC?`|AMEeP+BgJn#I0TVP(5bpWmE;Tl%nHs}=A4~k2HTd6X8j5lJ=>@FQ>Gyob zder|AvX+678z2t8`*-~UIJrSfP3Zrdn!JMDpV0?m%bdj)lp#L+=l!Am?D1n#4BS+G zo>SHLy7YH+eA6(i^~L<|zce)FJhxvS6#3tS?C--WFYZR7fA`M4nbq5K?_%FSU=FAG z2l4*g*ZfbdV5M$iJHJB{QmlTVUJjboooz*}Jb9C;Q{wYSUPrZi^1I3u zyW}zTrjmh4{koCDww&KxIdJ+Bj7f#-F+(D^Y8!(0AVFvZ@g!h+y)a@ISdZgqSBIU* z6M^s2s27z@e{O@S+2AuZO$UF_m|*m0O5^W1-)rNWGOQ<5zaXvU^E#jzkJj*?PAs6u zLQRB;9~G~MJHu5!pO9H0j{O1h+A4`wXS1qZ$Zlee^UV0;!JAUgNO*ubUQg=4>~#s~8l$R>px^4I|fKO6mo*$PSn3yTT(3fRZPf5TEdBXhuy_sN_!hm>0p{yb_> zE@s}B+pif;XTI~qUtY{=>i?bnrMEl#itw+y3x@uL`)1x0k$5g9ehQ3xyC*Q(50>sV z;$;Y4?mg^*aaJ_kc0$u!+N!TN`QosdF)Wt2oi>E&aM_%6siKdwF4fr)RUh{+v4CDJ7+)rI}{kaZ>(@Jjqtb#W`afpZfUG?)VC-41St*4PCykowd{w}AP;w-5a%XWp4xb+XALXQ>3QOo+mzQDR9KAsI|KyeZC2bSq&( zywDF`Q%`jna@(*MP;~DVRpa+?pR?bFS}DKr++Xt~?XmA+U!Y2+sW7cHRiBTFe+}Jd zw`7nK-uWzRgF*YvO0#fB#qf49oww{LG?cGpl~WJAC8M;Qs@E26caSGHoo) z(ktYic*UKMSXiD2h8^j})s7polM;WKkmeAB1X2mseuS){?O}TW{CdJ zKeN8Cj1hCbL$p&9?M=drMR+$Af|<;%@OcLxA&QXZo4`jS0@DOcgaVBXf6y(X1=743 z@t?l`H9xc!G^Y~&U(64s*(KlRxgC3K`+Ye47jr;=AKKF#P;^@QDWnF2i{=SCJ%)YDbOaz*`#=(1m)H38if;se!!hI#4< zowVN*I_DYnf3aD9CyDp%f6w{g3k2PN=ZZ=VcM$>guJpY?mY7{N#zZX9L;<+jRlE4n z4YM4A8eACat;W%t!@Zq_;0iHXiWwezIpISKTwE_kqbZJP<^$;O^K)1;%wP7zL3LIi z?}50Scl#}tk_~*rC|G=9Oi=|y)8VM!f2DuV(M)N+^$S!^ z2j;f8OLp7W7xia0_C~2%K4w>)O60FimV%DC$mBBAYi3E~&hA!C-%Bj}zGKaYo?uOP z5iodpe(U>ByK+H2Ys1VK#kre=&;sMdc$bQ|*Y_06c`iy^C3n|v^($|>Wb7Giy0Sg& zGqAfEnFHE}8|zkdf9KG^@6+s+|M)ZVYPZCFS7xR`_V;$JPV>w9Sy5m8-hJ<=sMG~F zqn5{d#}H#HGlyDz-Q{)e6W06TK92k{=Ukm@R&sdz&5Swo>2t4RbaC8(!brmMfzof5Hkwzjtq6nJ@zVZpt9_ zFd|-!K67!sBK(5TFb+x)4>|Ph7n~=;7mt5Fzy;+YDd>Kko*R{Y-@S&E{Kg-}-VE$O zYOr))$#|U4>Sl4A6Yd@7W1RS5|3Y^(j#K@Wlb+@fWab)^VkO-vuF$+Ay~WO4@G9ly ze7K&!?q`NOe{qqs9b-7XcV`ZAxXbb4ZlR-&@S;9CbtP7>fRH%qWeykuM{5E^9{skZ z5|H~e90P3e25W_A#4A)fr@lidQc1JapwbCp8qra|v$oi;jVKWyN;1@^vA2^}FtD*U~`8>2qd}sC_V5@e2 z;tkao5kv&~gZ&ruf8u2Qj$^#yijqC+Zy40nf16-C{BxXDXyXhxIb7pcW;OmREji~Q zn{nMF{_!^Pk6&nc-51$Ecrv_B;*b1bi2gR_zV=*|T=*gWW3N7CzhR*Nj2rt`oaaJz z*LVr=7JXv{d@=ie?qUjJgC+~2^Kg0b)POtDXVb33?Z<`mM?%?++|6Pji@1;Y0eBb&IVO0kWIht%t0S?Zc|1qH;neYpH+DN-iN6tKuNtsziFfFGu-ZP! zlD;SlPq29NjO=l5WIwYd>p2a`x%+CrqJF=JieGy5gBS5L7kGd868vV@N6vKHzm2h- z{f0fUo)P#zy81WYs=sgp{s#X~P6VdDf2Iy=eDV9Dvu(2~7z0VLv82qAi}=i2L3jus zY&o(^xpS)Si_ zJCph9>`&fb`1$vC!oS}~mOA}+F%TGQlo`0#hx+aUDp+JxsgSoxppfASybVFI_kl+!=hLKn2!!jibY z1n1vj7kAA&)DekYSa3WXFPSopiw>}+R{(l87KJ7%B%q9F_46Qj^(zL*PuxyS_RiP? zHbn5jixV3PZeBTXR6uS7AkWF#f_W0FXk76(f7c0KE#w&3xna(cD->9Ce|N+dYV2=h zesK)l+)$I!orA+iFKRTB|*SXS-jF*T3N8`~~L~EPprO zEOPiu-m6Y<=szuY_iJnT8|K{ZS6m39Q{Vbv&a7C3qB;L7Sv$`acl3@2(e-t~wQp!7 zG3@ggPr$^pA?JW4_|llg!{hh;V* zC)fB|mPR1MXTD-T{S!IgHzAn^-RF3o$TcVN51}Kyj<#uGBhCE>n_@K;p(ohpX4dJLHh%*AdAMphMe_~&#tAEtL#v*fm_Dn8r zjV0cI-2CoGavKARJyF%Ky2)85k8EOxoLPwYndv^fjzSOm4u}MQZ2aQu;$L`dudwf~t`{=Trxh%aZe~ z0GxApwN>UPe{Q`4zxI;9+cN$ZFZ#FmnX3&S9)xKaq$#_ALf*@eRgoR(jzjAt06eJZ zSSZ**o;pRyUzRiH9?Xfom#><5NYH9iP3&>_1)K+T9@QOy9l<@9UnDNh7Y7#pffxJ- zHp*A0v)^K(M$3PI1}clA$`dCM%tK&f)tYs(@58)zuR&FhXV+lx-@83f+pbYVwAe>B!EF7i+XwT23r#4P-Umr;#amu#U= zY+d5Pz+djVO}_I%o*kX#g1}h6agJp)jT4{iBJbFkCpY?I_fZ^>U47dUmI3jNY!s}l zixbfKT#|s}BV3RLDKZqa;ux&h8ECAfY?RA2*gNo%;u?sbbs5>RMDW&xky~yR4*9%0 zf9d0Z`MmC(34i?v$?zJ{6|4a3VAVrF;w#GQrNfoaNASlV&VdV{3^4KMS2O62(uSO0 zo(N9ocHp+Kn$l5~WVv}=9M^12Cf4fJ=SOlc|kIB!3m<}OKMqy`102UXrseMn? zBsz9nIrmPihGc|8D(>|R!x>6uck!KFE0x@po=%pu2e~*SQ#Sp=b;X&)p2|yIl3BI} z$e|RCtv{vrP@JI&%7|8a`hG^YSrzMUbX%Yn{xdC~wK}j*4h^xo(0ww$1@)Nqf2VTf zG-VvkwI;16p%Qmk9+{) z8q`3H0mP6Rc^-T?PyxIE?87*RfBv_;L;)y6YJ_ia<3O5A6u=!x=OnF>w78~Ok}d&B z>m=QO%dhnzYJ@b&A4iP>N{;p;ID!EH6KFb6BE*eg2c8iya@7Dl!R13CcxS-KMFe=_ zy9G@WirTQ11@IA{lehpRACUTd^dPw59}YCO76U#*dJa`EjNmzHcc9B?e?RgD*koV< z+8ywKq(#vBz_&^JYU0#2fg1-QX}?gD_BS9KK?)=th}o|+=?}H3;R3+Q4`(eukn%an zC$OAA$Oyklj8Nbb{;lPKNb+M_Yzci!;(Gfd2nhBj9^e{J@sSgH8}J?6lk#y548mV4 z`^xi=_IX5@FatXsn7Kv)f7wRjje3**2DU^2zJ~e;HE^mymuor@ZRlB4{;Rx0+i=1Pi5fmfPzw;3~emD^b9Z!9zoZ4X=e~sUH>hJ&Z5g?HG zumcu($aZm?_w^dp4@Pgh~{>Wk$?>}xIS+m z0kXFaD{DZP$un&fu?Da&l{5LawHb&8Bs3BdM^GF5kh1~soGxQBK4^e9P)vWNgbonf;1AEICS14D8rG*Ml}HF zh8h6wk(AAg0e?)qNWVv^S<7EtMr1L775P0n<^m9vI1HU4OaWB4O)MV)@P(ow?T$4N z5?+QbGeURF9T$xd1x1X|2x-F(H6Y2fIwE8samH&M5n>29%a^A>?`6@QAfq5z12Un7 zbr6Y1V%K8`(fZ#UK?{ZG2n4mn`2Y%Dmb<*{0aTB8gnw>0LKHip2yKxO+%U+i&=q2` zv-G#+2;3o+QwT!$m+W2x$c6zlym}-m3i$}juL0!40P4yv^?U%JvRiQkt805~(sHju>@?d;~CZ|4bA8B{Chpbs#1D{vM{lvgs@7zZ7Wz{Z)6!yY^(0+@B;I5I>J^c#eFVVj$sM%t)nDijfaKSKA8NprX*qXNV3IM#yXzQ2vWRvf z%prgn;1MFbR#(0XM6N(62FI4wBhu7qD1V7egp%;f@3j=1H4Q_-9bN;>hruQo^2{i@ z^8}_fKhZRdzxl&3*#4R?p?QMEnt#zWEPeCYVX*UW{ya~}S@U}h>3_=q2*e1#{1EB6 z)&qEUhC(Le7L9{EqY;Zg;}XWXyLBRY2JZm~(>lkv%&56zC6g$KE#av-DlOCkA%CxA zbQr-|PGwh!wG`wUKqEiDCjb;75Xupk29j|&@+IL1T@66V$tgVH&CAIN;G_evg31<6 z0?7M89(r+BNL_`dk?6j|Ye4ca*f=NsljnH?B_RE)H4XFMe18~hd(BtMd4lemf7diD ze)9=`vo&9J=Lw-T|EXzM`R22O%zw`#2uY+?ZjePZio}bHtW6>0ziTgmkp|vsiC+Ux zc0yb~Adbvm(>j06;9u(*G71yv>sl`3jGXM6He-@8S;`t2%jIZ`FkWTFfGYCxbd?V; z|E7c3gmr)hfWv{u&%ySg;0KW|)T!JMT73W-u+-y!l-?d>KK%BZUv>S5UVjk&SwF~} z_pkc-%?IHhn^}ivD^tTXaQvt2GNVzBxrLzgultWifC;~t(U`}#u#B&PC&4nu+?oIN zIxzsq6It1e&T1*xuX8Aj8X2*J^KiBH4a%O|K^U)Q!pUj7<*b|AVsz^t-+z$+<0 zivN|>0H7ZtHn)&Ycz=d|&yzJEC@ER9{-BMT0r-uZuIthskqFYde&41f&dj{_x+2@f)P7RZP$iqK%5CSsqYjr~OTpZwq4+W6=O%uud6l*;u zhXK8oLikrYIrW9V(4l`-A6X|HvL5@%D9}da7cV;u^4t>Y_kZ94D3JLT>%+iz{atj!#;|kz7h{Etj~S_DOZMRoABlD~fPjy}bq|Sc2B&LUAp6IE zJbt28H{*4Ev44=xiZjk-997Pd)J^75A^x*}*)Z7Ppw5R$2iPb4a>*W!A9~qcb#a7b z&0flHK56@W81=PmO~y+g|BSI#3M`v*)DeY7-91^`Fo*?(T2{q@<=G#UQDTA*OTRv!HI1B>K%TriMDwzKW6Z;A5b9wViMhQ_=n_p^;TOMjGob_U zOE(S!IVbX0y30xjgq7}G&Lq~$4;?oQIRFmX!-;@A!|yX5h8+L=E|ce<-}RN=C9)9h zmHzweuYdI4=V+z>K4+vI0N`~TcWb|Fh)FvVm$U-_XQk^%zu*sD|Lwo9(xu=2!!I5E z?Z3a$y}$kcp(Fb;e(ZmnH%795v(S6ANm&UK*>malj_j3|J=g3)v6EEMW655W`|0gYJzxRul zTif!BmL~OXejhV8vXAH4e!yfe38}U2@;%x_PBQq?A0Z;NIoZ>Df6yYd*8PiCC++w} zTdZy7e&Zt&*1G2(zQ|;L0@NRTDdsQzmnTSG>;6Ssle&M=mZZ(zFIr90$cjY1JIfIk z?|+%x^EyFZ$>bX^XpnjYa-0HQ5g_~9-th&PX1fCk(M>=Ax|1W1UH z8HhEAN7#|zAu<3R5Otv1AP2lmq%i`%CU~?n;XH}nMI`M2*pMrX#D;euvOosVBjpks zHY6R9dkqq;6+q>i25(aJf8xCpiGKt9+k08^UYF2|7^FGK0mkG#1+jODZTKSLW?Yh9 z{l8&EaG5-tE7~XoMmV$-NV#0ggaAlJ;c8Z+v~fmVf(cuJ0L0IiJ|c zh5+7(-DYIN2kDn)|D|4yv@an15pr-Ip+VA#4c`FK1ZR_8+Yd;29RxYyqkjxCVt==` zhrA!JN9^R@UM9v8l1;ou3u2#l0q_IF{9E52e3K%=pEnV%mV<@(8~pFdca<9^{r$Ep z`?gEaN&gtW3{V(-?^&?#SPp%Bf_j?F{`N<( zLGrKKhXCmZ^D{n%RSv`t{M)Y`pw6 zcm4PP%n?&>{d*P>3F&^$L?FL)s#l*e3Q8 zIw1;3h!s3uenUbzcHn@_)#H$k^oA}wa1bPW5zX(E@;&$yWZuLA>s zEs3LKjDRKcfSH38C(*LW)gd7KM&#v=P56|NMkoMxWW164S%0G67HO1kLzn1N0AN6H z$AXbGk}N$iC5=6kdDxagl_MS)EFgvvBtBpVP(eby4**TT*2%qPL0(Gi?13?W9pEp( zr~!#vPQY#m<_@q2$OCKun}z&H8i&D#FeReElvUxnJeS|M5Tm+y8az`Zjs}*Z=bB-`Cgt zxc-;__W%7qx9zi&x){eOR++OnDdO-;P(`G4w3d4K=+_2=O=H`jl=lk&~?m+@NW z#p~Z#n*R5p&;IS*H~;mLypn$-nbg?bZp^=@$#6Ze|NTOrL_Pnx2N#nZfYt5;&`l12 z7)w9*&j0iewg423Er)rV@es?X)6R;l%G!t-P}+Pc*;>J~fP71rQZ7}sujihe_DLqi z0;}Eo;eWzd(ncRn8wG2TDZ@xSCYEEA*mIxh!J?>-YM0R=jXgs*1P9tw+PElr*@oMj z`QxgTzS2mV>J*iJKbFV3WRo+kPD-0Rd)xKaD6 z=*y^+BO$4lu5a0o9}Om#eoE^O#8p4w!^Wge+e_BpS=Sk5Y-`ZYZTErs5(Bxe zO^WpB$;a!(2o02Lj}lm#JRghK15u2+E7i;@H|$S z*wE>5TXLWCmR20P3UY%!bCW?S>ZU6nuc%?4QE{${qmu6U&3LXof43R%c(`Av3x9KE zE`4~s*-iEE0}RwkQy)^f2SxsQS*|k-OVn+$(`_qI{^8j>?QJL>@pyFE#3h)-@>5AN zw%r~hE0Gj5o7}0J=iTFw*5L@boXc(|ttn6F0LMG+SYcN{9%B!}sza~I*wUk0Sl-9m z9ZuTwl@3&qhK1v&{=rXUH+r2z*?+!1?(u=UxF7CAN3$h!sXCoxCy)-oj@raYpB!m$ zp5?n=6sPO`&`k7FpC8#LwSfGN!-xkOf`!0N+QDhETIX=ncl>FR6Dja=MGyn)^S%e?B2lJf_u1SYl)3Qt-Gl4GS$Upjm1)uQJ%6KyffESS z$k-oG$7rBLyBlc7DH$_hTRcpiqfmW3T<$I9htIPs*}M9=Pv5b1t~{n##$tQg--63l zXlD2EqPtw4;$eTbPKQcs&_D^*={U#g6)Va#RzpKE(RBAVZE)P2Ztsf_I}E7n&f=1T z;GeE`$fmfS_>niY>IC72w}1Ft97}^cyu?gibgS5~=w`n8{XV!?w~3*!P(Myx)KSkm z(Dz^&$8oq_G03%JN=&KWf+}{hD%hu{V}@mu^^GF!9jl64hH1r5&hFXWcztuQwys&1 zUZ?boKl9F@9>S5MROBW}8Mg`Jx9z3VpJ+G(rH{ribx&00oG-7l7k^6T!Ic$6-imJi1bji-%*-%N|v^6RbC)rfw==zDK;PB3r@V3EcBd96j zm!c`0DY2o(s$PoG^?p1F)D~-=FjF_?vHa59Q&Bs zK7)Ry*ysH^%8!gruYc2$ZCkGW+;85q$|(8rKC6c-%fWIsEXZu?dvd4s9YyQLr*SDw zQ@EQSZL`a}N=)~(X&UzaIo$8Z_|`Lr<@BobLw2*Mu8!%x87DKz z9d$GoYR^Snax~g0x%tfdx=+sMC(9U=HE-!#nMoyOM(1VPbAQ|h`Sr3N$Gh|~-);`5 zG)`D<8#IqMBgmR|f&I|#jij4GwRCP$zfDBTx-}neRux2jBxP2ldl9nT!^d~wFmZ9R zw@Pbww?9a2xMBcaSS{3;U15Df`O!?<&TC!P1v zqn4)Kg|_WVd?YD5H88q`kGMXu>hu0S^l0dA;Rc#F)k@sa5%Xc#a*jNEreE|zw{4zi zR;s)*L5Ich^USD|ckkvy+@3CaTq_MfR?1E9@m4(OLZ6GyVn^4~C~d34!te}sPe)~? zUND`waLv~>3Pn=6o391BVaX$(x!4mEADl^ZCQryb)PCA8d{_Qc}UM zo?Ic_twy685%qyA$J=T?a-*n?jQg!mMs|%Cu%`7Y^*_M|jn^Q>J(e(VqQbOWU@ z27w!ZA@97$aDJvAaKGq4~;8y#qqiyIJ@@?1Yq@G(k2TU=9I zJ?&CJzW|hiANS*uZjEVwI=&uVaHCpUQqt)Cxc7y5pUf7Y7ty?$PUKck*>>NGE!44K zsh#U9rtCxQS-)456@P>oD5^O{umpDP&H z-Bm)bk8*BseonXit3ievu^s1V;;Y6lvrAve&5ioB>%h~!e_902%0m@grN&d}s5h=V zrPo)kM3;^6ezecCv*eQ&UOmY0qkq)wUJGIjhCQoeyB8WSvbf>}uU$6oCEXEtSh%@= z)3t6aj$)(FLcK$BbD-*-c$d`U@!={%?V!CniMD%5IPTok*)c1RelF<@cIKdztg#dc}S^$x0JX#iWc%w^a(-sl}8d-&elt%rxipcYn{^tBk)1 zcbZog`BRSeuBo}{j(eGh!Fy@LedKA>u=-M+i_t-)f5o|S5Ri5`Jd!caz-25op$gfP zaThikz8li}+u=4mjMv9mr{b%m zEsyQvbgg@andIwvOr_mbVt<9ZC1&AaUmcZtpZ0GFlrKgJl6&r$H+e6yMdslK(5JUY zKc9l6=${1|j^pafu5N1UaHi+V~y&n=6_%tWBP&2s?($6 z?rG(=hy$ZASV%)8c~G2(LKfennai8~mUH#Ie&;cJ&LA&Od>0p>cJom2RR3AJFHLWq z%Ls-^72HCZpM@&DfltjXZ;7(0_a^IlpU7)4q51^bU7tG2Jov46Fg1LZLT{20Y($1K0nUTpXN-FhHhV*;rF9iYZ*vZ5_!+skHt85Xw`n33}7fxnt?@z3g!282jPTJ?gw6`ND-ez+?Zavfr(#@L1quABP?$R-> za?%%X*gS^G6n{2ag`0iPu;w$lWtrRFU4QFPwz_$_Xn&k)*-p{+)?b42y?>|-l>M?Y%u$p)-+=fS$|1OH?#ieL%OypSDsIW!Nn=V zKIWHRO=V3XX`+&l5&h@$e59|(;Vk6T^Fg8U7O*$XOL#ZFPY=<}Hrum%x{A7+Zu-ta zn)Gs>vGVZu)?5j%a#0g8$hXVQRnm8~oX)@x;;!~z&c@hZya0D~T}D?FX&y~KPxNLl zO9y+5Cx3Kl)u?|@!4zBq10(f#9ki`@G2@^Im?>~y=zH|8@rR?uw|)@?g~!B z*Tk{h)(v)DYlKBBY@qm^~B-=2Lsl1_9|h=n~G zBWCcJTrS{MxJ+Ri?jk%wZ+4wz56F;=?fZAoWGriTk6qw&Le^`{)dDv)CBvK0br$KH&M z?a@IWbZ+S9(|K!L2hY#vna*1gfg?VQ6t_IJmKN!+tiSnrrLx_;&eMItW7A&2(4n$%Bq zNuc^l#r7;ux`!D?yi8&|cC_Gziuu51pf;%;XpivORm`)Vpq@ENiI|y|So0L!!A7;+ z=!K1@dDjxsC$T_L@G)xjs(&Z5`ZD0-K3}{e;`CD~9vgcHkii#NyXyDbz!&xd0w##W zstKot#hQ=7Ev#xTd`dSx*=8a>*F&M=_vXq@&PiiGQa{PmM;b02vN<8&9Pcd?XqXwM z!&b}hmD{@RwvJ3z6#2a*?d4B&b=vdetnEkUC2^PY%qXWc=Gq}%E`Rq&u{&pFR|Ipd zw0E|76~47Go9ETozLw)lXtRfV8jBlyr|$2=+0#Yu%%1g#Qn!uai7wwJx4Gg06>iVb zQ`|MSq#MJn2pX5Jj?Z*Q*IYSQEg#zZINjRsD9U;vgtci`hPCt{Yhb99qW}b|3*+{6 zpad@p-_!g!6$g*4Yk&OS=9qjSM}0{_)Rd=XJjlBZ-j0{#@YyPlR_MK5IDC$qTx1gc z^AulsmDlL_1{#x}uN|4C^><%(H}2h?DXvS!dEk!=ul2WKTlkJUb)5dreLb%A@}xJ? z5SZQmmNciQHx_iGie4NK$~C)uwC)L^JnR(TJA?WVmtm^vqJJM43{A;W#<`vVtb$MuV$`ZD63bSY+#P| zaKCE16Ql;KKYu-*`$RZZ!K*i*<-+K+Ov3Zsl4G@o+3#HN@PT$uAB`Ux=g`SM;0B)! z&pY#o#TR!>(no0?Sm}25>FeI!UE+vhL=pBk%2MZdSj8K~nvOdMr-SikHXgvtBv351 zvomk1FN2Nu#*~&DK^7%CsukaRT*~+`Y2&-eT7O1U4}WSN-X4*0eI7p-dM?y1dw*`` z>;qF~G+qb>1B*#ymUHuH*kRpACeZdqhlNR%(67-Geog}c&`hzjPBQ0d^?Nw&z zOE{{FT6P}~oxA96;ePKwca4_i&r!^q#?kg~n13~r^kPftdiUGs-e%lfS(2)L*5k{U zmr#6aBcf}X^K*%1>Lf*i$ZS=0vgWu5(z;H)WMd@G6hvE>7jE|pQe;j=JUhOvB4Lp7 zH1MFixNWm+;pqw_C+{;pbF=c(t~EUxz@KV3%+)J{&k`DRZ7U z(y4g+X!D-HhO%hu;@yfFPOlrf8KIaz>c!brm+C4#)#I44(XubGW-R4=d%d;#_|RXM zCDm_(;zanf>ofgRM>icuc(--80i0<0sL1<^$}jsZ&oVS)`*gm=b5mPIr=B$c4S(vk z1#R)r^>kvp@qUf=8nCK#gJ!j?Z~3;nJg=RW?&xHbNHBPgmi@WOtpCI!8%LDwni{1MU#JShkOd|lfRDx!-D0zks>>?uUkhbne(fq z^(5#+n{{{Pmg!$`!YA2}wztXI6isZrwBdH+!pEHj7d4H1^Mf{=tp|Py41c*`oAwdC znWOb~w2k(WGXFsz7QcJbH{JGJIgp?sm=Oc4wM-&0UR6^YM%b9;ez$Ye`EFDR`H-xm0$B zrfk?*z|!s6k6`XSAFt20ntzJX<@qHm_+~I6h@X>CJtJYR%A+5FEVx8ak8}<%Z*#6L z-3FgzL4##<{2cR|yBW>Na2>s7ys@;{cQln{jeTe9Z8ncd-X50cW>l-;d1yICy|RtY zr1AX)cNrbrUPFGOsNk$rvC)@_!g|Zj)3>9*wms+biF*`_*4|83e1B`^XxUata@nYPci_i^WFv$@~j z8)slk(*k>6LF2LXZxpB1r%A7l$qB!1Z{Wm9*~1QohlV@f=5k+}to@WPemdM*)}Z1i zL&6zNd3}LJbbDFjU4Nvf>zO_0p?l~TWe2{vouTjQ2<=|5F)k&4;Z8aiY zX@Mh(HHCt~0F-N@#D1nmA)a0(xCaaWsCx!fQyd06y&oI(=(WqsJ5J0)vd+oS+$R|* z9@ZURf#btH+dEA-4>KFh53gxD`PE2I1r3qo2bWWby^TbJlYhzk!gV)^uKN;FkA@{B?B>Y@l=9xMwA%7nS?G+tnkU4s82spTH&%yJqU61u8REv-5^hHzTXG=!rvhjphZ{b=)hd|iXZr9pgLNMt@^|<&B9>81#NJcx@k9wN5`{X@BDew z%_`sLd)GWQ?2=BNoWJ6GBR}Zrf%zTK6mggM?bg>n#ec_BdN!RjoKUtT9?K?;AKkmM zHG99fkXw=_5Bv9mhhRe;aaU~Z{;qpPi!SVS2!cGU&)Ci*w!r&m0zNS7E$^Y*wRL?9 zJM+3%xr=6RZ-;k82mMs2xXo+PN@{nRu+F(|*8APZ*5yqI>$*i|+fE9<_UN4Ntivj^~ngv1CXr^Tx@h`uI2bT)gIx+ueAO^ZUmTX!6m;-sYhj z;!|)7U%HRRo$ahPHn}M|&h(Z^(aFlkh9f zr~v*vDc$Mxe3qhoqrvCNo)aPCGwUfAxsX%Sm{d^rm&g8oufvjy^O|9E<=WQ$!D@~w z-+y*aoL#%#VvTMWR!CO(NSrWrpP3N+kUfZ_K+Ya~V zr~P9PRzE!nYiaxOmyrV85Ny zt3&sq&Xzq~uG1)Z#zQLl&!Cl+!bu-62?s2)MJavKicZdj>1KNKsvj4uZOWj>jek=G zmnL${yN*Of&7a!VFsxVk_8+%o`>r0>E#A-hIqIxf-^B#Q5+;i}4XO+>f#pih@956h z7qi&OyEuP7_Ii&D^S*WR#?2O$Gq0a$%YM{SV6G9x&QQqjk6hoWMnTQ8mvBOS%$+ik z>)obG_j}=Kp71+nPp-66hmgMG?SHk7&#%$PeK{m3ZD#%K(R={ zGhd$;vv$Y<^D)PkTcTN1E>2TkjTq#}3YI<&8Hzpq@NX2<4J6zn z&OX6H4+}9rk9aXzdPFQQa^sXTwm--!=LUfAf9wE~P;gGP=kS`rBoxR+YG^P(E~l>5 z_An$ejzVscK7Z9oEJ4y4;GC!0JAZ*nWn|3iEf>!CYKFWA^(kDg4u739wL3=}iyUf2 z?*sR7Bv_Y_FmUEwCCeu+TnYeI;*pC6(hatG~-;464kTaO)Z&Ld z*Chkz|C=2c_Cab{e}4qw9oxB}$P8g8zPKi!?&j&lx;CS(7|sovJhFR=FE{z%Wpb*X zwo9)3xc>nlGZ%mRrRGRvy!$S^3_AU`&Up)J2*<78p<89YpiYF`_*%aYWIrfOrQ`HP z(#NWuQBF78Qm`ad;8R|D0?~_XXfqcQg3ipNgkNRQdQh8pClFQ4Z;*7*JTE>s zLd9r*P9nKv@Q;wf48-PKU*yd`xIp`{7~o3kg`rpJD9hC_m08Gmu#N79$F75GPm=Cb zD>|{(`rIDstz1$XD7m@S(kD}L%lexPvP?ZTI2=LXv3~%LcPd)uAF`FzY* zO@KMy#}giy@p-tSE?AVmZ#Yj_Fzkck+5E1ve1DLy%zzE`=S-fd3er!ublW1}Cq+t}g z`@R)e-dfAsU?i{Qr{6qeQM8yW+K1~&GDWHOW`G1f`!e)=%;SNJ>9MqkBW)vtD+Bs( ziCQLBU^o*E_CYtJV6X)3t&W!7j_~#8^B?}isz+|4d$K*Af%yJH0!COYTwaR5uq(^kGE76)|Ah#Hu$qC2k zDH7<`9)M;F8UbaG=I~$-%qqI6Fbe;jVBY)4y1^J_3(P0g?J+dVd`B%|`D) zQ+@)m#?KAHL*kBFpy%A+b4?<)S#UI$X@&4#T0ZT6AvxJ9XJb<5uLGx=CZfme`GzI9 zFu#+X^PY1b6sc5%7h*T3FtfNH&eJNGeI)g8Gj4O~2~Lnt$WMG-(=WJe&f(w#BVM z0G>qVGf*|CRKXJt7Q|0hT;~<~1(Hz4uPZbrX~B<57*lyBC${$6%zsG>e{V-QlcH&M z{p0fO5_3H23JuEUyBc(!8;hY#h@FL2Q=i;;-l*uV;f&OoJn#~)77&7wBKXh@uK&8A zpbN+hHJcy8wCk3d=1n8ZFB4OLA3$zf^&6g(q}#^ZgK}2e!-Gp~6Q`R)yNunBDxiKo z_|Ti*tfU5I$McG$6@SU3?jT8U8Z8F~0UhLZd#bNs_529XChxbFm#iXOw=PQ}2d?1} zTK5Ee8akLLY&S*ao&i;F#tIuHZ=-K6HoP%0Lp&id1%v2DEW58 z@p~IKH8iYPfpffX((7aswCOL__nr&M*fdy7%Bfb_Rj!d0pMUg5cdP!>Nyw}7v@8r} zmP%xibs>idF@HR0Dp<1IkRLJ1Y~l`5S#rHW?dTBmezerWo%2-VnW}ZUk=2!+=PtaE zr^Xre_cl(j|ISq23IoOfLeQE!{?1m^_CrrJaFAc3X#-vFukJ8&{Z(^dr6SqJ2;`6K zxZIE8_c_rUU4Ix3a=iPh9!9v|Sal8Ix!6n-;#owdD0FAWrDNVJOm{_VspIwM_H!6!;;ZV(SVawea55PaP0Bn$(T!yAHVhHmD4jnZX14!LZZ=d)u8-33rD9@;|AZ z<6-sy3tIhCft0OnuIwtV9RO9f0QgJ|cj5arNzho{gw=C6)Z)yZ`Uzf5Qgx)>?w%B(Y zno0J2B1E+9PKtPZ^lJ>(v74LW16r+Ttaq_ivw!zy=11R;sO~s>=tJNhV3@mm*99t0 zS(&$A10 OWzvw{m_f&{W|E=pRmWDOu~HU8BeQn#ruJ%enRyT z*ngKkwwIWyHdWOSIM7EI6o578e%A!4DapgES{_KT6_Ft?26TRKepqrB2qL*5?*IpP z_r8(E%5S1Od>^omyDm5#+U(sjKb#T)l^dw9T)%soCByXhvxhs+i?u5Sps0kza>2^jGE5%>y4rGML4@-k3$;Iop0by3i+HaSwE;wRu&UwLJe z^n?w+)2TKBDY;)!RDPnyu%3ytl{;ga6O(;CDM6Ci>pM;~Wq*0u z^6DQ*l?J#NKX=9DBv@mEE0ilHd7*A}N;~ z3+bD2<2<|gdU2DA@W-VN6!gCU;^D}-|{x%7!}C2%H$-kD3N z9WmDnXqXO#;fIb`a#KyhOyerCM{TOJOGlhpU@IL%Uae!&uH$PAk})Kzp?`C*{y&Pw z{|Eo@Cz7LSv_U?PKgTZiPVoX-YPpv&2spa%W2M@%07i52T;Mr4#-yCNu&o{%r|q&# z#CSl3!WNAbLd%XK1X9}T72+c(DvB&O|66zH{s6Jy(jQMfb|Gd|>3|9+>q7a!i9S;9 zp4Q4S3~MYtwTO7(qHztt;C}>sRWv)jLlRs@QmxJy{|YP@?e$}H?29q>D+LQd0Vby# zo6DVf#Dc}P3L=D+2GmL-tIO_Qi+84)8{W92MPllq2nlh>S!Dc@v9A8wf(zgEr7m)7 zJ3T0Dn3tVyGF$SFjU?nZL84Sz@fi4%=4(B#K1 z2|2Zm0IX{5)wszapz~u#4Nlx5pexJyffc3I$+-6&Mu($@B@XWZccmLbcp#%FgaoeL z)oW%~do)*#dW_*hPP&BZ7WcP|+ z5eQn6cHiiz=|$z@6wg`{>w>LwJ;@cT@4(futPV9}f^vH6=z0;Ghcur&6xyv{TF2v@ z$&t5W3JHq&;hjD-4L#$pFs=P~vWySq72`3wIl^!WtMT=*9Djm1FbA)`9Um^;>`G9r z0$aa5jl30uoHZI^>j^irhtRYS2sp_gtvqb_zlat27i>qqmwUu+-2$#^^ zi*TXBF(1e$8g_x0tf&6PJTRzHC%w3jEnx>a+AH4=GUzr^`&cH4Zyqjk^CCcgCa1NMRMk$+;{lEe0?y95!6a~-@y@X5^ZYmL&a?V$QNxn~?m3$kxI`B71H$dlDC z2SZ&xi#q4-R{NuuaHe1oaBY<~;Ehr_D{?FQZI90EUqvn44F1!Y0VJX+hjl^Db{Il9 zC2-@W+WUT9I(Jf0aaVhlnPKN5faL<=EU`!u=A>oKTYu^xA*sNU7XPC2;CJx6e;f%2 zG+VnU>hvL$z|oWWoQPteSoQE*sl|pPDU{dNj*xJKQ%>J$Z46DJv2W)#auJovu54S_nAv!6>wkHEPduit3^-hJ{)_4kh+Nm zl#vTv2El%;gu`QUnSI41&?FaPfa4_=K2d(GoR#80)F(IkH!?A|lEV>tRXWL-j9}#O zrI)pdS9xCI`I4@@N34M4#dBsTg;+hj_*T;nmw#bxwyrL3{XM(am^0y;lt^)Q*^2Y4 zT&`(5V4&vbAVKGixK=New;^VR5m!n0e{aj z0Pu%)#e6{BERnn}X->Md;1A_Er8m$Ob;D(7&*IS(h(+)aH%%@qkPn+q5E>3z`N;zdyV`zhD*LoNd;CY>h0{=7e-~*=}s*kOYhEdCM zthJ8_FPlv+K%483!I8Cb|H1G z{RWW*FD`Un7rQpHmOa%)(rpClX_O$lH8wNiUpIZv1);MQ*>2i>!`^`=D~^9xcZs#{ zROUMVG8z~SA-fZb&6>`Vv?alY#)-^ER_@}*44yuZR`m_wV@xVTCl_AyGivT++N&yHYM(6D36Z3z>}$v>PkHG9@u| z&zGPo?!CN-p}cQ~#cV%`tjK>v^P!{ey=eV@I8|yjo^nw|wdhch9Iln{Y;vWp5nhkC zCoUZ`@Ui=@9|UJd?(3!Cme$nRzcHJ&j>c$c+mWOXoi@9U^B95jLsGS z_lOLxS-@AuZRXX?!aw~ti5WDs!yyP&rRNc6* zB*t*3#AJFHsvQr0v6-bFsiFi#N2-LHiIgL5LYYN2n5%>Bs~3 z!>t%Px(R=Ubu7SK#XckTyOiqTHk4+{Ob37Kf*d#MyrFHL;OTkwUekwP0qZ(MJ`EAJ z7>5S-?GZG*r6grrQr5ncwp{Ygzmc@p6^UvwvEuBMSVti=C+Vk@%`ys?=NvT&{q25% zW^({8iY5+A$CYIzy2D=u)?2JlBm#@}Q;y#t_O*XXYxfYIvcb^s&vMQ-5%DG05r(*D ziGnmXZ!&X!Nt-M6G6ZZ4{Lz+uF7RqmU&TQQVv6){-bJ>P)0V5910}aY#Sd`vZ z7X2aTS+}4P4!lI-h(?dD@D)yGpTx8LXYPTEQ8NurC&Fkz4u}O|!OQld5n_-qp4Ifm zk9>csYydwZ(1xZ~q{6MfRJ@Fw!$18-EV5?B-tDy=Fl+H#<%}N&Z|2iz)P_2-)~RPe zP|1~q9=1lS}`Nzee-`okN6#DtTxre8asZ3PcDY#tkE#E`e@)K zW@GVdqWh1)`Gqpn7ox1+fn+y(=^9l%+AJDo9G0Pqa!O`>VRErAif!%sJ{FHW>?$4V zZ+=d!v~VD}Uvez2B?)4Y&yi@$JRKR`Uo^2ZxGarC@-|Z=)xT3A^xrxH@JlnLfC7I} z=ZrcS)u*L$Nvqyid!=}7nw5!3YLcsjODpucA&bf(=N=^;7;^6V1n^|^v07NFst zc4exTDOWlAg_|B}&y(9ahSP^dwH}t#*9R3L-y-a3w+ad?hr1~6j}P0YQ22GSq)BT3 z3T&G#wo|ygB*9EfQeYNJf-~+hL*=xvbA)GT~w~+^L-%=hzNx z&yS3gZDsn#Osn*dAqHlyDZ?7ekp(6p_0@25iH>W z5dn9}iXVips)ALfc)O2;P`i3d{p3{kcn+c#W_rRe((q{J2Sa!ACv!UFrD#Sf;vt9% z&)-w#rN{xw?{nCT_Q%BY!vjD>a7P56lk35CzP(F^Q2k}CL=$D;L z3?t4?0llYV1rDZ`rH&3$jhV(AJqDCPg3!Eixdwjf^T{hc*t`9#`3EH3`N>m=GX&X4 z;HiOqxPw>n5CMU!Yd_rBUuwXar}bF~IY1Fx+k(4p?>-<5Hco#%U7x}C2up1)2PmFU zhDpnM`j;waGaV)2foDTeS3l%2tPJv2k|?(^r$ec9U$=t*EV<(6W!;ScVFm5*xd`qa zkC7I!v;Np0QG1mZSDt{hDq$O-nW%P~L&mq{gvq~;)KdI@9ww!)yUQ&+Q z!ak7iCb|brmYkAhvG_S+4a}}q!wVGkke-pYU2f^A{mOp=aQqS>&|HDGq0I=#`oe*< zdX#XKEH8u*XxNiebOg4@T~_IWCs_zP-r)(UIg*|H#Kx;r4D7(9_OgK`5S7;ir`J_RR zI(7yxZ1p9({If^*ZnT-Ban?!@>jzV3!)`s1pnrdf=&o~b#2QIl)jLMz1Q^9NqXw#T z=AoJdDzOp!GCJ)}n0a*%Y-@(8TJR@a2XC%cvJ8TV%4Mll_?Wx1+SiAP>z(9pkY?hv zTikMH`H9+gZVNE}M8H-Tmz233!@sy|LWm^0%PFO>oTq8aIV7XgL^hnlASUC8+ASze z=|X>6J0<1YjA+HiA2}(Td;EU#fwD(B@}TdkWd_STHkBFOO*iP^#N8-Y$c~*4uWtWc zNdQER7;lFhMrd}HQ5y+E`wPWaJt^G!Euy?IT1VYo6-*@`JT*{SXeQlfM7MWuSHSQF zhF6y_TbL)=b`^9ko|cCuxM}#kOwZ~%TIqjpH*+t8$|cGm;t(VVhmB>@L!L%BfY3Bk zMWTD`P>#rh(eP@3Q%YykCf^`rokreo^BKYAPEx_yq39RBY{hs^8s3Jc8+VXcH&j9N zWS%Dx(~iRb{*@7~hXGYFz9+5&;)7J^%DA-~q7LL5i^p?xO{)Sh4o39DOiZ=r%MpM0 zj`L;soF)x9>A7=(;4n)iI}w;2Fj(P96hRb=60ywlr_H)*hxvAlowHM;QA4L5~3m0R>WC4IrLF@WP&LGXlshp+?sV%5^>n9Dfyv*_< zOa%)WgdRp)Z0$u60m>p#_t2Fong5;~!4y zNDK+7i@SKbevh#Y;VjI5iJm6h=kHXki1X#%oS4_5o2f7=la1vX1uSI2}dFX*G zg4H^B6~o=fmim{u>hg4xWL+HFaaeZUQcuv5R?B;>q4x^)h)M>P#xbSw=#77ax4@4O z%~i8)_?r)!3&zYbRR+_+Wh^zOT>WYwze5f2QLxXt@WN0PEqUh-=jExYW7hd|J5qg; zU0hA|*nXdLV6}C>65Ens|5Asx%Iheq?9(CfNVMVY*@~H;?`qJR`yA%UC~=Lj9z*c6x`WT+ zEHqH~AHp%3G;0YW!8qYOPVX{mjS()Ku|Kn^W@^978GY2Z)L?0(n6Aje{%*jC2g9kD0&f1Il38CXQ60C!}9_ zkf+uEO!h%l*dc#B=}?CN{UGe9{OTylT9s|Y1DiAG)2@lOIc~`!XL#bZR8l}3csS*M z0e|)qj^pZT&MLfcO~TG_5mxBT`{*@I*ey|fec=4#-&28zXIqo1EWCR~Z;RnJ- zIOGz0_LiS8W-wwpFe4ga9i!#f-FKa{F3OhVy*13!XP*p&mg*rl5lInhJXZYioUGcL z9{}eAq0y^YQ}3w>jG37`V4A%vP$TxIyCU2bsmz1}ct^9kn-4x*CH@QF^5<6B?QuPT9CB8Yb z?c+K^_t~;FkR!o1>|eUciFX*6&#p|O6#n?M))eKQf+E1MVGynE*`@TE@vpm%&N)Hg12BDfNFYE|AobRWg@_GrIUSvj-v*i1Fbk zX9GORNd{(#d9nEP$5JvIVZ7;Gq*~1mBmZX42d3`_ciUQFAgqu?u7c+EQoUT86hs3# z2uWN68)j9(&eQA)P@`JQiEKh*e6z|6b zEd9dPQj&Jp82be02kc+f==hMdzHkOs0`l}cQ@lD`z(G_mgfV^37Y+v0CTga_WZrMy?`W$ z(Kxv8+=iwzz$bNwl0-X@vRtq1VvZTHj>ACDFZ{_ROyK2%du`Ygm{We2{DfIv91DNF z1^Y|Xec%p?Y9JO7i)&X51J@n$*F84LYmNhvq#nv9HfCw_Tiy5FImP}Jxvi)({&7~9 z`Rt3m`HZ~z`wk0-gx2=5NHV1}X1FAJMNN((;T^?M{QG4p!n9l0)%c6NYDiMMxNMO{ z&6blOHI{wNOl23{vq)z8jKX^4W*UDbC9B{eT3;f3m~{4_`c1|mn|E)9a4s%3Sq%l@ zuFITBb)W4RxDZ~S7<7$6aD!TipPER&jULwd@O`~O*97MwWrFEO9#toAhKk*=p-QJ0 zk18t*OxH!U^FNV$R?9jipPN!P4aL+ok;iQPLnzKXj2sexSa?dr1jRE4{G=nCO>G}DP%o~C$DQ{D~*xJBOi-wyGoRxP^d4P=m zzSx_t{AW8sO19l0{`BNf0?Y+%*!iM)V>!;JNE&lnxaKI9F6|tVYH$v@1GrjX%;35yvNT#HL*eRF1WnCJa+`+Pg)|T@fNeXa)2DKd3G2ElM!^ zbhTedihsdVTL=%rvPTX9H@7{4ou?nBV9Q5_Rvl?D$lgJSCz8V&D1h+7AZO}^t1%Wb&*PL2baE1kzIF;$+o*3W+}ldEyH_0Qvy>gIOi zeQu#FtuMLjlr2?5QoU_unvF^aC(Kk*fJPs27wk;Yoqjed|Hl3zL&6&aCpbOwZDF?b z1L?DIO=Kg-{LNySj34}4iR*CZdTn%qfvT-6;JV&8PeK*CRu^@5!hJPTv)VrV-cPFT zUYV~g1@wLIn^1q~jX5!^L#mFm063FE`5f>B2&@3W+>6sYNC4MsL)hsdJ^5h#3*a{B zN7eV*sH3O#xKWw%)0J{22<#=Zt9rBkSFZ68SKqLY%silpfFN-2q0eJvPHMYffNME( z*>@&s-$5Q@%~M#X;|P_n#kjxV0TPsRMtI)B0>p^^O_igt{PO;|x{L z0^CZzFf-4g5ufkHuWvbpIbx|Jgsm99_`ms9L#o=8XqsT0$M6H}17J*EE{4?)$S>;G zFKPlKAbu!r&VSp8X`)5fE@T6i%=lpAE1d1stQ|uZl)@llf>Z1A`S&o58GwJnmUVT5iueYq5RPeas5Ix+_tn@Fk^&%8e=t#HHk+huZ3MO=gd7mJSFR+% zboli~;Z@D^qnjd6?@JXp9~aQ?hzHgKfcs{hntfBCH2Y;y_NX>vC{(T#wQ!$ftQt&Z zPgvU*^WEwOV&5gy!jp|=Z*m$}P}LH)_t4s|3;TZ~v)BFzqy1cNXzbQLCJC6h!54%| z2plG~Ob6WTpGYG_9>w%g$__P`B$)!}SIlfQY6bmbi*MG#iJ(UcGRSg+*{!+F?CtNe zQPnHnZyCRbBEq<0H9wx>4E(p>k=SseoEXth807T27qbyH zA({qXYV0opc(31X^F(d*J=E{%cqkHa+6g@!gw^$_8h-hXMOL2N^W_Z&=T;i6R`NrV zrBHR5JI@$Gxgt919F^O-j$&YigNc7=U*}~VAA{|bN9RhMpIeK3&~e{Xk$bkXHRc94 zXpx{iV|(|MxNy4CQg>5H5k|VVP7;EccSoN~>t@^9sSZL$$ie6L1c%TVRhShQiyW?< zH_h9RdD9A-wA749ioZr?EIe%*xC-yl%(#q2tpRsIcG#^d}jL@mt@1#v*AWiwbJu=k;tIMhHH|soi%4b?JF(b z!n6IuTH>-tV>L`8R?xfJfntp>Y#l4iI` z5RoW;8;75ZJkqcd%^lXw!ro#EL7jx^j}BTM6v-sjpQB0uaDC$l!&(cFW za6XqjH~l;))_x7{bsFiy#3iQS82Z`ZjzVeG?~#&0Jy2)SJB1xiNThHCy#$KzST@~0S!331|!0F6d&y9~P#J_DJOquk{$>zqVu<`@Ly{HJ7x|7%(;GB8Zc zHe9H11YR03JMFHnu_Ou_j=K`8d5@A43|r@ZtJXe{)ARO_SDCjt-;v^#8S5^A9 zrImJtLVlAU97%sWTMkpq@6YfZVrrTsFhSB(IgtZFJf+3<wND`3^!MY#ad1%ARQ2KlGF-^Ei(_cIZ9OOO>!P;va+Su9o zEdD@@dE|fPokP_|01RGsw+b@FyvVXiua2wbZN@rDzny|?@GZM-ri9K${DzJ~Bk4jO z9dY73>{J1ARsb0Cn5*+~{9GF~d=&&JWAzYW2o}hpEQ0T3>+jC8WnwBxBe(Lw8>=E!Y z_d#M0EYKS!O*C?DfSn?SJ&BC!P|H&uCkhAA%x_Ho+o#>r;2&o*naxjVv56*a)*v(Q z7KNuN;}Ze6qSoj|PhB3obiUY;T%soF8(76BUa`TQHUVFtm$F3o&7;bA3WKZdv_}^< zOA3Er1#Nh%6zwlI#_{Fj4d7rcwQu0TO*Na%h4YA^f#ExvNP|TegXODq&;!`u4o`{n z0ECPjC*oY-VpzPL5a&RJ70Xx-*Iva9ChyzY-N$xSLpe2oQmcuvZ+JersL*BCe|>$ zNsnIjz0c?fw)|Z!*uGwLKo5B`>^a67X=>){k*0sYW6dFiR)xpa#tpIBH#KKG)A@fo zclD<0e)C&C)lAQ&3_b}MKht>0#PJmXx?chAdA*c4QyY{CR9rXE?Zo7%4D&);`sxV` zMY~ub0Dr!AG_n+)hQ}AY`osW$8u-??v~_D*cXo{^N;u){8{_$Ey`~I$U4fAbNU^i z^4`pfPlXZ_ch+HL+njt;sTR^9I`Bdy-Emubf=Po84nj3|dVsSJR_r{3>Ijyw5lMAV zL?-_Y`$`~#%f+z0O72VeJ;T$wBZ$4L23Wn~c>I`Dr6)_CUI z{&M-mJivWUYFx|Q$R#~C|KhDl?L06_)<6*KriwKLCm|5j70gXDcLD-RA~`T_p6ef$ z7e3@1d+J1B^d%H9?=qlyt|73#q)? zH~!=f2#||iRAiiZxMp5<4Q>uN^7=u;UcOOib?6*ov@-1y*lR5eTh1n?4FSv0`4(U; zU84cVQwa}n$-YArz?Skdj(d`9e0I?_gH`9wvaSM=dEb%g2B@v}nEHRir;aQ5AGWq- zrS}CZpgW40J&sqv#ba~Fv`yMZy-VbppI3at_mXR7=3t8n;==AWd~#%btqQeupBWdzF>25cp2rCbgy95iq3Z21857wggwuC(@E{ipMb>A|-Jh=p zt!bt3CqPo(XzW*}1v#m{!2#TaiIFqkG$H`sx;EQjgfe#vyu@d%QN@xK6w7886z_=! zKY(J0$)gjSzIF?=b`giyvU3N!!{vJ%3_C4nZbc2pMit1`3%h@|*Q)a$@_nPUk%bU@ ztP$hHqEo*JzmW#!7%>DFB!i>;=1jrZC&gYgAoaGZ5n91Af~bLv6*iR4FQKXfilz_M zC$7)^e7Hr{lM!L=Ln}*5rMe*R;tDYRQD^BZtE5 z?QP%89>FON>{Wkq*Vz8?7Uk!x8MXy#V&Rx`h`PI5N}~DelPPG%KM;&SNDF>ok|z+s z{{R#B4m}vdr>P#!YSRMvbTj_oChw63y@OU<<8b5TBH6^^6iV@7S=0s?8*H3?Z3*Z-kx1*u5N4||Xx0N^y z#{drAFQkxVqISx)sb#@urSJn+R@MqbXX!v*govXFFS)$#DgB(bN&=B>CE?H7pT5I1 z)rRODz3_xIP9w!BP-kWToIvIWeLC&M7eClNc(Ph&Zxpel$$MOSfH^t4iAJP%@T^ok zf5^>aA1Hr%XFNzG_Psvb_5(%wWOUmeBrn6`46}UZQ}~0bMX)+|nc_^3Dp9=Rl~@G@ zUJ_JapkVBbDhTCl?Y+|3iN@zjG$4Ai$>3304IfZcv2S;5>@7(GsM~U{eMR7F9C#QV^{o&eFL;T_qI&uOI|Ut39jiju9Cht&4wjpypF-Y$6F7qz@~zn!ZU=@9m{u zUl;`1B6hmbO$`*7D{Hi!EEyC8hAz!_(()8l9~m|Q`R!Fd6#InesGDF!oXJL+P%SnA^ z7?ywGgp0sT>KGUFz-rJ|Fh$XFnt5*U-QfL`oEIl~jBie%Vk+?XLMgQ24i}Oe+1#wR z47u){iSk+el6~CS@kM1_%6M`h1!rxN8pNmm7K3(ZQLFS=(DKUTQ8>i<`|Au*~J%x+bPy=Ti_C4^3XsR|TeuyVN zS{1oZPE)TB*#Wd`YP~{Xe6MftnzXqyOK^($+cGkA26Q+=(&1B>AlL_6NZ|FU@H>Bf z6y?*4!0nI~s(45=br?D10Tp!hJxn0AjI%m00T-! zTB5vFy^(&g1|D|_5avwG=sYn383BLpky-k&SwMkHs!6Irl?)=gQJG$~nj60H3M5w{ zoB?%1n}baK^cDYfDq;v8&Qr+q(Mg~E<}@NsfPXOG<5R+~k|USC%4Gxsb%$y67rBAT z{p{OYV{Q1eqLnINBg{!kRYre>_D#=J+%@D}eq!y>TS_KD5ZemMtMWHg z!6hqVss1EgW;r?cjas{GOR4|OE#rgbsWs-mjI z+=^zgG{(RTt8kBn8))d`c&!azctcgg_g@Z@LcLW>S~_{gj=})nlJiK4-&a(Aor-sh z-~zqf^i+inLo`HQ9rb^Q^RM^xp)$*?UXSGU0B-T8b$ZMuKZ3_j?>)_ep5y_L#ckdt zxqd$VwB`iFMH1DQQ@6X%4fEz4`707Rq;ydU&mpEGf=nAoK-enROd5~0f7eQFE8eMLb7(@VtEo3FK5zX5sWgV7KI%@l z*8SZ7wq!P9gK&R;I=?Y)H*o$d?Sh5;ohgBOHb){;^8vQGb&&46Y6VEBaS36Hc5f6L zwUE!kjBHF3iI>c#K7!kmK8CF-#T06a-;oU`zK!599s30@D!m5-Z@UudH@^{yY@eU6EnSff z(wmO!J&@%536HvCDP3Q&ay^`0rXadi=n(-aa)m7?oVY>4( z(x89f!KkK#;%5tq@3|rGkesKff<@1V$jsCivAw4N9l%e;lawJpU#abu>B5OPcaWcS znh}&ML3_NkPQllJV;s-twB@D& zR64``?s%f}Oq(cko1DF)jJ&g)Lnd5^sRQm>l^EUkjDqvt8MbZbu)#X{7A^Vw*iU~n z5}UaOHN2BUlh;9im^$@HIp88XJ+QHf;@~lF_zqx|@D|y_?~QYiI%c73$n`hXeuC=) zDgXT$j224mbSkenlH2hdeC0cdU6TqKrpee`t*E^3I-(463hab{DR}c+=H>i4Y;W<( zAG%-8}_@QY~jiy=*Z6nI6({hT@hkqQC`h}dD-$0d zGRZ@m=Z!~-dwMCM*qboxX<5 zWQHo_pf)CN;nuC5k> z7XyabEhto7#*;;b)MSm7ITf1K<%B`#}wF(yfp5N4hf$7N^Hm5!v zu=3O(t{$ekmr#wFZy;32l_L?_`60)`Tchg;fN^FBkun?NCDMNqKafBRRJnv1i#s5U zX%z=iZn8$A(m@=LSD>2Giv_G&R!P;EPQYnR`US2zcwJxEj3{I9Or!Wl8jHqLrmh}O zq)f~>r8De#vUaE4LDi)NIGGy$`27$qdobHF)G<4Du4a|Z^x<&u&}TzQSzl9YK#_Sz2?216PQe9gO z|B+Hfm2lC~G!vPq7&t6O-Ez%`6E0UYPa8jpnLnI)czA#G7AFrYpdT&iWkUOh=$jma#c32SDCN%2%WQsNYrry8y;n^1?%M z)`a;t-Wz6+KLZ$Y0_y_8q)V{(AY0&E63G_ zOaz#Nn*1xCsC2H=%gVkl_NtQ1{ zYvJ9$pD4~S2VmTZD1h}4v-T@O)6vO}QrU)>wH*)kGk@y=^#l$Zei~{DE6E^vARi z?oe6oGcG-dzt?6WB72KPv@G_Phj5qta}-Dc^r#cHT-24SV2Xi)*mXb1>O2)SQ165smg)IoR(e-g*8+M-HpGzW!B6qYq<8 zH7;ug%sm}q_E!knN?RNRs$k^Aq_Xfz%h@$6x>P8MlK9GPAt&3Qs%UIQ%}SjDkYK2K zZV;8UY+CuErbA7BjWrFj&D_YaB%c%Fz2cKjR}}Z$xS)Tf zupVORdYMTmFSG1p&iA-j56=}z{J`t7M@TGK*xHXPrz!~H#p@rNS(k_C(!Iy8!N9?^ z(;s_KY!MtUuN+FG)L?&%lhifFSWU;U!vX(?FG4rKMKz(f$X&fCa(yI< z+AmaxSM-)p%HXUE7r04>>H3ApI9j6P&ZfJ3BgtUED zEzg@D2kl$-9J%#qUeL^eHfVdRSyok2D*25kdIuIRrbF8fXDcgRn6<%D&a;0RB>7CI z35LD4ysAH})x6+P-hI+vZeFy?Wx~*pVNf1Si_z3~sGR=c^!u@tMScX4n-BXL`7t;E zHNAMe7DQ(e6J?22G58aRT*iD9mo$RA45=^7>iW%0=Q3V-DDsd< zf{~WC2YPDhnOqJbS%QgaU9Nw;DNVdKqJ6=p`t)+GWg{CR`9WW49L&$dOq!mqe6s6Z z&+>tktR!tJC_D*fW-PYB(n5=5SWdzATK9KD9|G08Dg$#n+4#wWOB%L{YouF%$2AJK zEbTHu(x;dikfOUTor;RZl@EUj?PRkbW|BuJ5s4y~nl_Dp$tbTQ(Y8)c`* zEc0y+;;})QINXMXWlVpF7jee09m!OLv$OPQ>rTUCl$_rB{3=9}Tu3WuAV+dVsF#;_ zT8y`8vVaNAv_Q;!q9Ll3Uxh^+8O}=X(T|93ceX6v!Z9CJH&fI2D)VfUAj~&8E$Mo) zX8?;#{n%p_d~z*Knp8-WqxiQL`iNOTf`sY@ttlg!BNn?q1Lc46vy!~UC%II?AabKj zaxD&vw17Ed5jn~Ebe6zpt+x!%Hj%P!q<)hCCBH=p=X(T|$ z)Hm4WK?PvMN`GgZfmHpyA0|OYqjYbculrk1Saw3W+&=&_{^7tWv(}$7qvq|Ih#Y>)+~m8?)m-{{5co8u|Bs{pYz2 z|50W~_Fwfp9pIn!zw>Ob2<$&gh?d-jVVS=qHxR>M{}g|r;AR+y`F|2?2QT$93_Ih0 zhobRkYuudHtjIi)m|+;H|8vGk4Z|?7GonDrBxnjL48s)bpOGQ{vZzrH$uGx3Di4NX zxA4!?b~6lv8Oe{NYpHAt^q>CifAzV4+W+T#Cq*7S{VN6z!$=y$Fx)zc!P9uR~q)zxVD*rfjREa69OoE|`VVHFOYhOVah7n4W87+I* zbHr>JhV7-l`rThw6yO%mFhk(BlK-yP#{SPA!+tmEJnf8!@B|J;G`FVlb3;chgkGS~gQ!v>?X$R?g|LtSwg zKebu9v|CW&O*`H)dSSt>mhY`}OebV0&!K2(`o&_JK4mWfo(>;_Rp(8}wy8~Xh(J6} z%cZrjG(cRg{{pZB$#5en$qB`0lFv5G;K-I}mDcnf%_)30#kRwYjR_vzY>u%Fk!EA* zqnv*PE6m6dv>9rx-OT3j{T#>t^u=j0QIEHuT$WpxhQ`WWF&}ik5PNouE3O)LLBQnn zr4CjUKcv*`Tss40)j)$Xj36SQL8fZ0LBy@PHCdfFsu2c6@X33MzbSML*-lG)T6H>w zhEsH$4GC(NeyFYla@(wsYdcHJ>p-suvtJ%+i@#QpJ08KU4kKdkV4-R9ru;_{t#YjD@fR~kl~%~Ch31JN~mh1 z=rR*FI*pOp*2K^8chjTm_nlZYDQ}sRNTH;u<9opR>zJKRn=6^+A1wLfnApaW+^>?t zXk~&LEU{MV?exwMRm=dt-;m6aWE8Z=RaE-&Nh|Iqub;K+U6*pI3^c;t2^=t{mFc%% z9FWI}ZlL+BK1<4ODz8pD@6vxUCp-s68B|b6LQ>rL#?K9GO{|*4DdT`=&1ji|TRtpe zL-YCYYyeNmm}cAU;GNQg=;&-k(K2dCd@}k4cbgHQIYi<1nm95H_43K0==}vgTK@~P>R%D(<#siWGa70nA=gX{r*8s z$Cwcrd|S)BmeRWB6ZApdW@ITjmkUSkTrR!WD3(K%sak?)N~;zemzn5`GH@`Kfq+*# z4GqY|M1Oqnt#Pl?orAIJgBmay9TK#;XErLAIk@3;H7Wz)P{EMLziRab=QxR=^vyE8f@3*2VR}vgUh*Es!~Ydvt$jG)t|oN$H;;`m_@P%B@;R z{KO*OH4(F<24~J@M~wtN?R%hStYG#UCz@gedpr}DdlAYJb6`z5x$_ibZtYyA;pL&@ znL!I>$C$0;opFP%n0Ds5B_t|`7%z;_29-DAAb=j!DA6EIfE*}UwpP0a!Ri?bo1w-J zRWYpw;HH0H1MGt_={5dD@8K=sE1DEBPE*AN;Wt)}wZ5;Vu&n&E87-QzfEuiv*(JVz zXC}?R&IRA3(iP1&YF0JzsgR+oVzHRCDr1ZsyK6v_dNiac;!)j`HX+|{Y+y19`R#2e z!N3KXcRD0`UU*3psdgrSN9*^cAunZ=>v#4!A+xLr0sd&@SrnURaNMS&14l%vWcsN{kF>ypzgzg z<)yCZn8^u&`umM4lvwzr>k`|zqOHOGEJTfNLHIFn(uc~EG+|P;DnpU`ik12~p*T@` z?H+$`JVC2M{w$_iA#*(DnuQ~!yv1E1EvPAz)%a@I!je--TLrESfJPkMkf`QsbcdNi zgb8V_@J%xR^TU-|8MqT3YypHbN0pe(C*}I=S9rk`xp{vUQg8BTV&2 zr^r-^O{cq8IjUF|PHLLn!Bjz@%W-I0^vhP+d=$7pa&VL;xKhI)zwUehML@d0SNoLL zHO3#Q(Jh|uo#Y08vmv>Cd1~yFjosq>i3+9p!F5Nt$O#uis%>aX@H$5};Ef?3vG%XsHe^6O(bbDwKjU9s$o(QgI(s=Tv+s{ki$S|Tq2reEjCHEfl(wlJwsSTH5goZSRFNg7P zq0{s5`SiDZQxHO+DH)a5G8h4*Ob0;4r27$n=cOH0K}{(1gI-AsdGM10 zlO0j4VM@RbLzIi^5FUSJfsbD72fDQ7Z3ZkMOdP+tFT6YJf*qZ0~1Btih|SP=ijH8Ujth(ra#cJ--SH$W(#^vOfoPH1c?8%ANmz z=Gg~A1P-o>o5TL|&+xu2Do(GWH<$S&f^qa8ka(cs{z=R0 zvaG7^>ZnqmrpN; z5W;8@9z`*DDFY6#a<4y1dCK8cJL;zxcuZuye9k&>qi3A9Tj!wG)3`s2KJz{I@kJ3M z6JP>X&iVVbTzV6<(Awn9_Z)3=jIVt=TQ`h40UDQ7s^7Hc6lLhRu@X-GZ{^LG)T|~`D$YO9n zTSD-I;?!^du0I^}cj7EKF65MpjGE%3#_@yg2wxXC)Tp z3f67eenx-<@aS#y&fl|Qs!?#SH=c#`2eI!18gy((aNU7qNEwTMPI{lG{i$23RJScv zZHKih09Cea%z7sJgX`iEf`T_k{B-`&LD08zE0{*dF8n6o+iQva$LdYPu9}<@htBX5 zej>C+|0>5=a22nAwGnjg?i?QNpgqWgfulwjdM$V+c0P*jNS{Uqt2v-}5k!n9XB^{s zTexhKx^{}!#XcNRLsAcf(79sxq%I4JG$YfyC0t5nL7J5+7eMPAq*}-q^fv}*&?FI= zRFu$&T-4&7x-Nc`XBSslM=Aggvbm#D!lQE!x*)HzsSE6XZSM*xJn0G^N@Zl?2|Jhm z=?Uv#fLE@6w5g&d+GmY%y(uRIl0 zjGrM}k^EAoj9}UW!b-(DB-ILg>KN*#YVnt3tjg)-busvsxez|#D(KK_BbEtln1r;f z-#&fqyhY-Fj`RX{Y?Hf^&eQF5phgJmS?EuDqCAd%ls2mVTz(N+5Kv0)FsaN4pu#;n zR!_qy=DaVX&U57S)~!PA*ho5aBVvqS>Ok-@cPYDlDGf8V&_9&yOn;u~3rmve=DxZ( zyZ0+K%(f>kpE(G3iH=Uo{=#CV>j5IJ^B_>X9y24gfa_B`EMi4te93oadE~HvW8~ui|Soz)lVFxCEl#l!FfI6 zX6Fe6MdnuuwS*yK4IMbDs8>49dXJpoMt9HUw38@D8T5x3g+v&o= zw_6EU`4?#bmNCH7V|+ZKQp0UNxRg9@LhU4fpA@`8q&Uux)Wi3g`YsYY1MKZ;OAuZ( z^e*w%+2KRyjdMSKz~y|XXj&H&!$ZaUa+05vu+1V_V5J~ZC30Ka*ZB7}nedw8q2|c+ z`w?NpHN4Qk8kgze;fE>|C|l4BH6F zaki|5oBU&kjoLAN7&D7#u&!+KnD`)9S49T_lBh9O!jnc^c}C`}D;gB^R#Wy6MN((q zUOV>OPPfkko9>xga|=6bEI(S;S5Q#V5TslBOFAmR}=qTnS?g%5r<; zLGhPrZX)p$y2jWyF*F{2$}0(fMnr-wbkR$Kk18>Y0qlMFvO^kb1{)fS&&Ew*JNQd} z_ejxmBkV6TvEGkWp#hv@h``vl!@)s^4HmsAYy2*3uQb|O{H}}Rk2@Yr#pgLcYgWe$A3s>D4@1!hx7_P?2z7Lw$P66O6X`XzS{~qMPg@b%{zbv5N zeU)*!FN3J(@Ru2ywI6fI- zva~dl7)38dL(iTU+q^hj-VeLaq=r2{g|vQ7DVI9sHtRfYOb6Ly+(>t*beN?SSgeKZ z%P8Iv;&0Y8fng49QV3u{f!Riebx}N}{+>V3i=t%dG-zLta65{BhyJN3%ZDrNO2I}4 zgg=OIEqVTN2UNDusdDeKa*W9@l?y`D#Yho&MG$GzT0eP-h%NQ3Rb@pc;D}lwghH}p z#+>Tc@W}pmH*mrK0#@*bR2oXHt;imc*^u}Z+kDQ1hN#JVDU-AhxZxoQR)d@rf{D`s zZ~_vB$C6%FML+p}csB_5nI=TeUr=%jZEwi3nYI$RvJIguWd_akh?g(6Vl2Yf^qa8!AQj;klzySN1ISwldqpD$5=H=^H!s4gjS&+dSYIc;~; zh|hr2kV#C$GTl2t*5G6jI&QhMBQv9_n;%w5QKyUT%;69*0tnD8H}ius9kC{R+&2pT z6o-AziOP$yf1y84;3_;8owQd!k8fbYdu2^?o~a*e2KtYy7pBY9B<|4OJPN!>b)eUe_QzJfJOSQD91+*JM;At~AA%^!>dV7GP( zeM98kSfU%}x5Sb%fHI+vbcNaexN>!wVZZD$fLr^2)`xIutEy(qK z6t}TXY^0-^O*a=cck3JnesiZQo6FbE0{r#{cZNl>GovlV=snpICvMA95Xpw8`nycO zD3c3J0m??&fRfHK5{|enU=G7GBbq>~g^Us?R|JSp+d{hV=$9@R$DBv03b>K$JUKP4 z%1A(e3@+#RAM#aOU?m}B$E1_U^@n7w9Q=F}tbCqGl|qaI>~Dubal)BVj6c6bo_0k5 z%_&;H<4bI%Yn?VaheTg5CUMItTC_HA0tbGPp%~vyA@0rreS>We%4=!RZ3Ja4lV1F4rmkw}(z>hx+1w zUz=5+mYcL!M&^}e{I>pv&52y+_KqkRdxbA_dZoFd^1# zBTGilMsYc;5?mWp%1?|1Ok-`%qG|9oNxI#f1zslmy3*QTq*kRB)<9-eQWnc%lTqG& zKGBLF!dpv|0GEEgV^wDoa{B&M8)m$J%6gZ_6uy62Eg- zE|AO}?}O8XSa!Kk3a4q3q>$K)8=rP9l;%uA^x@RTQ9$xFe@Z9ryaTf#Nt||s9QE^D zn-BhY%I5ZzZ@;}Aw{O&&){)pR`HJzd1A$?bmt5#JN1``V%xNB`!FqqB-iY^qj3ST3 z{WR_b$ZQZFpnm=9H2t!CcXunjGPaHzY;AZIA!wxDX|k7X3$Fr9njdr%c>f7BG$w?E zN!5k^YA*Ut(5>G~SqkRtPJyL<;R9`ty^+QYvXH4JXMT|Qmx7Sx_TqFHepbrJf)`hp zm44rl_|~yNVt>KaahgLs!;A@k(~tK+%)eO*Q$->r@ae`4gGl9WSS6%iMvLKP=IXb* zbMqFGmh)O}hn-kegzxfO#TbJgMv6H`axwzvC)L2SQj~O>JHfUK%_@Wo90IPsA80u7 z<lR3iwvj9jz)|>JH1AKXr=lx5N=&5 ziL@k~zGof3rua568vO@uve2gcT!@2A9(7v34GgG?!K#h&lu50)PO94q*{9djXj6UY z^*HXKn5;pTv%r`T^>2!ji@2L3QQKn#+6=mlR{Ko7*WazCWCh$czP#k}#brfLcnb%O zxo+C@v%{`3BN4>of>bMiMyG0wEFE`>RUtY1m^x+?qHJd)gfhT>;?&gaXrA0C>ByRc zaHJSe{z~B+nmRY+TGe9Hx|C(U4`YNlAX^ZDqayP}Q=<@*JizIUi%ri27(WYuuQ6MS$=N&A7rrN7vJt z%Plfn$D9N`>KI>C;yTG-ieJ|p33JMMj2s|~3e;~Xe^^6-kF7F{h}z!HMS?f$qt0Dm z8BW){jcr&ww4AVi_Q?Fqb9=JoB7m!!pS?g%9f^40@p+F3wWyFNx`HQXpS{Ca2nCwy`OCDgM|h;j@Wy zS|+ZN2QDmr)Ow_u$eekpY?a;<1kITTMoD-n)=6Qc>x$n`ZfwW&$r59jl3rA`Ral7t zhb*5GN2<#Yk67s8dGWE}(vc|>r@i-_b!B_s3Qp0DBALCg6yhU*7nRK@K0)TFD^}XJ z1tg%%vQZg-ixXZRrA0q}U;&ghCT^Mq^%$^xj2Yma7FvnUf@OA1lfH0*v394wP0pJg z>IYOg(hy)^)y1+8fJ8Qu{wd0A$qh55kI$8m{UbrlN<~6Z>=Sro)xre)@_$HZw5UHTX#fn%#g8v27SMzs)9rx35#$N`p6ia{O%#a zZXl1^Cc&IwcAqwT^r+e1+4uqe!BbvEYUZTlU6{Y=WjwULW!mDpXM@0RZoLPPY>Rrb z693?hLU^>w>mw+m9RM)$=aV%UFR_Ud96A*^bP6ZU%2J`}>6OuH_VutRhx&mNzO%|H zV7GUF!PSkJr*8hvZrpR4GRCf+*xZ-=;*fV8a;eBlQMmQ=PCV`W2}?oChGL1PwNZEO zn+FU$*3Sc%Ch(pBuESLT#YinZl9BuF$;auGsLeH9Ha$XR%Otcz7mrv6jo? z?5Lt@W|CwfSY!56D7e&qSNy8ju=G#!M5tt8);n3|>@J~MDvnfiU(hqJLcRGA^-O4g z@a0$i1>u^!iMREQH@Dv6>vc_CD-Y|2>VAlGHd7Ue!y&;pLnqj!!YQ|4G6TJ92opsM zA2r%GCeEX_qZ8X3q`vHkJ-lAvv<@>7h{kcbe$gV@C;)!|KXHkVPVqT%c(UG(y%S)4 zawU%(QnTKccA6jYMcJeyQiwxn?in+G?Qr3BN)4}A(ypgPR#FCBStK>_rzx5?tD=L4 zR!_b+=jcN#G5W;MyO18p4K^5S(gQ#s9kq^$!FecmS6EqI2^j!ttgGqI4a@735!X-k zGZp&?i7thF6^-B6{NBea%kl}OAZpjRBh z;^6f@7QtgY&XAAV$(`_C*&E-YyE=<0OZx4zXWAnRo(ZeOdi#y&G;NfGPj-jY{EkSm z81m#m&uy1|%{a!`n&)F;tRrM4rkw}5gL(&bKbiUX*8s)hy5HM{l?RxQzD+89YP6a5 zODxjGwB}As7Nr6*swQU-!>1pA%1uj5icDz?-2f+sewmjJ$|klse){1YeJ3SBUi=5j zP2fi1rxHx!;&In4YeC-{%kZDqa2%PiPs4`j{JMXU8L7$0u1l3m^;zTwq^JikzS@>{xjV^Ysv8=cO(UIq3M>dRk4wgueNj-|6AJ!~H_Sr+`3>`MJx0 z1=uy_m0@^^L&z6GwSKdINkL(Ee+O{fE)b_qmE7@8hW*{u1Xp+o|p+6!D zUn|OVK4!z1Qfvh*<1jhK^4yb%Z4zGr#2Xpou$J{ELK!Nhzk8?A!Yp%wvCc>$@6iFrZ8={kl65QyhM0*yEb?CGRkX)ss!Fj|MM=I0*!aGD zPom1anCClh8!BFZJ63~wxx?Pbz3@|tzKAzDNnje@tDaG&qx&JHKW zUrsK2TnX1sCCbCJ$Pm=Yd>{DXO(D**y(HNlV2u5tN@a3*LlaN=UXSdDxGc@wvW)XV z6Cm{+w#at)F@6VT>v5?AZk0x)`tPlBW^*G`+j$mJ)Apr*&mb&XIB`37CWEyBR7#Q< zUFR{8?M$>iD=#7I*_WfFAFulrxfhd(WQ33kjKO8r6%u$i;d&T}R$P-Vi}{n8zRn4Y z;SbJ%BTp6lL<^x`uz1!pZG?wv-y-}~bW4Tuy+*RF1HSzZ7mc+baGAnyUb(AG5d7|b z`t=4<9MOz_w9_9Aof_&cTnck~&MEY@~wz3je#mpG-|qi7b16seI;$urM{nuuPSMO?`ab;XZaGEOMG%xVtam2Kbu5i{DlTldOXe<=$ z(*9%(U;q=*CG3)aK_|?M9CeDOp5=wB{w3#&?ljhyKBG%{2J6fU%2?p%#`S_nb~%G| zT^ve(Rp!$osmgqewjnIxuj*d>4&cMb7BgvlfCFS}iHC2dz=6%F`y3y2hbs5r@64yj zpwUag=+aoDh!C2QM_#g4`jOuiv~&?uw;@l#Fm3o%@1#;>GPJ=oosrOeaLCA(?Ou!^ zl8W1u$F%d4X15*JK7e3i%%nBQ%xmnNB%G>$Y?b&;g5z;V%+~~FrWdsx)XK>!hJ)XemV1D6h?nae9d<06RYPit&xe;)nzxoy`B$fxB zve8Lf@-pvJSejw)fZMF^>2VPtX-p)ZUb>2A4B@R2OKPp;Zm!!HCBnI=9l4UDFpB1X zsi?^Led$+Q{Skj*8PUYp;|x3y(^O~S0L~Tgw`a2^e}TVxh=1@5uSYG>&s1>0B!Hy3 zZH$H`QI1mHuV^lwbY8Jg$1(1AureC%;umRnxLii;$Tjz>?nO3 zn*ed!j`&xQtD}X`BJI)QTS}(7nT;|1(Tj#q;nw0k(q)OIJUl%I%7Tvl;ZX9qFI(_w<0}p=)o(Qu%nMl;B{KX zejWLJ>%TfRTF5L~Zh$Br{T*Limu1Nq_eR5)vz1p@yVhro_j7(4e{wa1SRDVofV7Ug zk;IJgY==D1K33Ny)F*gjBP4l$m?|86dR0P+=SmV_8rvy$-nEOV2zjVKNvtI+K0Smf zsk;hudEWc6X%4Ah3NY0*>n|#`m$@}5%E;~pvq?+YY`5|7nr+zRNa}=Sk1$%Zf9R?XSPUL2r3e}G%Z0OmtnSmidOu%F3wsy zWo(w#V*$nPw`^vyy0Gd80}++}yS5_BhPlEVo>u%gm)Kr!$A4FdmzY`!{taF?PFCI4 zv1kQtV?i=%Z#N_IgWOSn#0s?fY@+aKNKSjm?JKklg_WXCR?$hsC}i=;+LT2|adD^1`AQ9|n)QePHT%+RaX) z8^_EQ{SMs8(k?|kUQQl1iP+gt)&VEW>4+D8@nH*de>HM;9)Ie8Lf^`4Q(TU3YsD~C zXQqM^JC+`XAmz$f!Mc1USV}!8xlY@rcmpNVFiNu*xakQ20w1YUc}o!HNxJI)wpT01 za;5kH;h-Lw5X7?sZ*lWE!2tovuwtY_Pg3Lfa!OcvPd59@3afLdU&F=LgG`F z4XEktal_DLEMxd9S1-cSOpAlaMa)nRH!6n1{L@`m4%V77V4bcsrYB%u6L; zPn9frP=UD|t?H#U##~vG(d(5FYeL z@hnIwoW#tQ-HNGz=oCW!DkENt~@lA!5mI;`(JpdJ4lv z<`gBu=q7I%H&)RW^(JR(*GGqiLEa8fc1glHGRC!mY$t zUik?qN(ov!-R2hys^Y%gh+ctrY(s$*Y*j2(P6jUL@<>(T^A;1uLagEzbg2d>4d)0k zpc+P)TVHM0KNmE9rtRBqaDpOs6(ymUs_(#NkcDr5kLtgmgT=boK1xv2`k_k`^ME59 zQn!Ua;t47yXCs=rv~Z}?a{$w}Iu3`ZrI(#`MpNY$mk69$k_<|W_5y$VJZ4DU9+2OU z*|F9IebyALy3i+|$oitQNq;m(?75rDn|@vC98}A|20kU#_xwdr?fcxU1*BQ}tCuSt zIh*T$qoTD;y@T7$9$tK?ZMRSQi@iFAS){-vJ8L0_JQ0;U$LRxh&OM+H=7x{3V%oD- z=Ib?#ICn?gPpv_PM>)A>@iEb>y=27t-w%H$%Wi)sobnb2pIfnZKAaymsI*lgoT7_) zF)r7wcX$B#QDQGu^3gs!#0&8Om z$u*5wo>jEMd)SviMk5!HInzQP{M_BA-nZJcK;l;PrqWr~(~C;UIE{e98boV&c&txj z9+U*3X&pNis@}pK#lo^Cmus1rF{CPCH7VqKVp(Y;`vwk*?LfPts|kg`pDwdZ<& z{D8K3!LJ*mw5bl7JHP74o>d^efm^wyb_wx?Nvw?^U`!Z5qgLIqXFiuzX?+had};^S zf&TcAa5Y#Y2S382cK=xVg^%laXgzAq)=yoB`q1ue0_dueOR?^|B4+D2hS5EK z=7-wQt@qepRrJ98)$ufs14K*3FSCVz=t#(u&m!rZSA5Q+=a`s~zlrMOcfG<1xCP%Q zCkE>Ex|5Ip6$}7J``pn$KGQ60%u)+7)J9m#%xq0mMAYdF{GJYM>mUqCm7Y|YUtsAmx(L8n)JtC3>OD)6y4hJrV zScw!lpFAMl6<`n+ zC=PYR75T8&Ie+?>CZv-XX%V%58Fr(71#~MlhtB%?1?`$cw#Wq(%360$D$YSB`0C4%$Q>y1*bYV^J zji2*~+ZymwEJNdlYbd&4b)IL65imX(G^oA*0I=5LTbYjNh4rhBA;x@vAYd%Bw88l) zRU+zp=5>F?HB@w7)!z`Uy_4S)B7aE43{@agknCl=sI$C;|I!JABTaVL#ekKCh)JOS zdajY#6KY9c_x-s>s^)jyGc6ImZJ96r@=mE6lnWnAOT^4q!7i7Y6*`9)a`A2mOS6g(VgVdSbV z7MK9DE3ySDx-T#7r9JV@xeD+40enmaHTNu&YyJis%(uZq<}#GGRrLq&{fh4g&jm;5 zUk8rRF?x4Vj>z?Y6LT3mrIn66wFiRnA!Iu2Sc6j;FKu8)R`uUGJ}`{9uSJ%-^V5** z!Bs|CmuRcQ-1Ob8MX*gz@pPg_nxHb=ll3#D*~jSS`p_Ic zV3Hb~@gRkcxf2%0&U`5FkURc9?I}HxE2`trF+aZ46K39j5#$HqU8Noh3}r9#t8F&y z^Qly68Kt9I^4oaU%Y}awZJa_0UaN3$f!f4_4`q6-_su->JiXt0c(@P%#Ck9{M@(uf zgnw`$d@6eUS^0GCU99)SeTXBh_i}0l15)VTfQ-zk<0!62vJA@E| zsb?LMcAIE_2046FcxI|VgT2ZzL7Rw3n8fR=;@Tda#GG%G&FBotfv9!_7$Zm}fK@ZB zn=QeSa0#XHbQ&3+h5MV2!au9l<4Y+bGU%gq{wR!l{)KFH8rqJKP z4a0BCrj5y3JzFtKeMyJ-mctj_cr44z#495Q4lM_Nwt8nFCAYsf`JP;oHK%u;=2N@< zBWmQcWpjQrf~8e`(P#r_bcFm7bvC;A$SW)(nM56n$|Cx|%A#|ub%t3dQDX*?Vb4QA z(SJt=;Ft@R;Yp~<6d%N7NUQ5iu0qFRx5*M=-8otPp_itw>o`==c_w~8JJM?g= zwSb|2nwR_{VzffsQ7R($DA2axNwJBJ;LBrN_uU-DrkkDoIz!)+2VA*4%$)HEJP zkets2sMt#3-M4{hll%=|A|UhRg(En7fJ%u`NxTYg&c7iRyXhzrM)pkp9AW#^F?J8`+||6%VpRLE5OAY}V+Wv2pyP+g&tTQB$O z51y`A@@8$fV_R<@VPGat=J=q4K|{{YJ?Ml__=&DE?pjL1fLt{QzSXS68WksVU`*G4 znKoiZC1V^IzS6#AAV@(O&smYmP=@q969U1WQWML?QO9|q9xttyAw#q}vU6ITftfL< zy$XQm!-w}%gY65Sa%Hl{fW_4@iGZnG6PlhrnA-ZLUN^tgNGk=A7gLhj$1#37TP_zs zb5gdHPOQ|9X-q%BCxe7J@wST6rSe~Yhzs8HH!dbN%=j7cg81-il+&j}Vl$t}CGSIz zBcgR#Srg%#hqCU_?_pS*4c> z?Qd1-m)TDw5Rg+_qvdau?~)oNSDZCcFxMBt8$>!BY<+y!{1Oh2n@1h-FkBgC2cKzh@0 zWnY?<(=(Lv}=-5ck4Gla>H0e^UBq%JQu_=<1>{7XWAmUvV^t}z#< zV&>EVETecz@O1LY)&yD@8)s*~WHzqroe1IFOMZOL-|ymSKga+8P)e7bv6}w0TvUHuzFV3@v5+9x*v156h)4)*4CIG zkN(?AN@!!|y4vqHiEGzBasARU!L0M?IGKbN5iGmi?UtLRxm}z^RjVbtlM~C0|!_}FxrfmHD z*vmY$#@-H6Q^p3)U^_&29SfgqG?)b z;@8=If-k7KJ(UQ5FdG1Z2R-rtlk14DyBLADKC)!bhldZcDjMO!lp-a5C=nEcsRQi5s1 zs99`c%`vT{Axd_)aFPNab=0_doJB0(HET6xKJYJJ#;nB}!zYaRepbptQp2#{{xfF1 z^Pb?OqadY~45cvZbv~inbCosf>*Imo3e={GiDwyq`YOF6s`Rg^8{^!^8cm+*s|VmJ z)ws_7y;J*()_!U}n0bE5;u%!fCK8Gpz9tgF@(LrDK7@y>CE-7Nib^!q{uEERNuyj~ z{N^TyV~b`wNn|*Q6`lOHnkI(!7NYW?&tAA4aqJ502g>sn;kDhgjL40BvRVKslu-tp4 z?yu3{N;gz(e$%P_01Ol|M-$2L8hwu^SAHWU`b`ZD4dY$tY_^0NNnp~_o!Q0O;<3D& z+ONV(E2qnkS8DcL836AKgg1|nZQyq>?VKZj%&US9SDV@M0(!pby(Spg#5Lo!ynp!# zbC_#P@D1asiKeSVM9MZ)QB+8PbA5**u4@iuxuWz=x9HB*6wQ2%w6#^!RIC zqZoYuMf<-hAdY!O_;;GA*h;Ux9LMUBOVt+<_Y0#BNprf+pi4)O{jzMsG9rpl`SuHC zZH$)~3B0-5g-BuHG`}t4;OKRcO^-1Fi%W22*s`hl1#;KHGMfOQIr}Wa0D+``R=9A{ z(^wX-qy^$efkLoSfLC{B*b9Yv+kuIVcdQ8upiE~Tl# zzRC03l_l|ZU^%jamf7mSpT?s&uoBSB#i`>6122gk$(X-%81CD6K6|jZ=h*SJZuv1! zKSRlb!#lin%wlkV0xXDkcT&^~lWf*%W?D`OC?Ml6kjXQmcx@h(Cewz0Kvi>`F8lL7 zO4+)jNZ*Y(+*}G?FEEBlKZ+;%1Oy9IJgsZW@WnpuOq1Q8xo>=5AG}zcEVo^+tac)O zXB1UBpS~7Bv&0~oqt$&_UCo0CTjC@fKaQ)ek%F|%SHj{Bcmbj#m1t5jxL2-UAuYG( z+oDhQ=Vub#>Fq+?@(;g%2Jh`kKDa*P2k2kq6ChMfkY09exI0F8s->ctbzodoaHx~^ zE-yVqOAYN)8HbTxpb(g3{j+M=VeeDDUmVTN79|fkUA8xTJYNj+|wk0DzVnFaEMJFjdv&K z06O7$r4Bn24Q76Ss2z&vWg*`L95h3gue#4G&oD;|{r4^5N$qa1nC~e2@W$OrDe=HM zT^L;tR^!jz-Rfo2k&UFPcExi2is%qPPAj>E2R@bq#g?3_XzXS|_Eq8Jf~|{k%PJzB zr(T%NpmW#C%wsH_y>Ln%KFduGtUT#rFBq9@&(n{kX-%+yc;32y^{1FGO^$r~z0%!| zcB^ZEMyVdrJ*xK!241&xzG>Hs=;UX%QsVPUbKZ>oTNtX~lmKLEIiZc5C`QF{cj?|= zt8!)BNeRcvh|`LC){=tR(WVU_SJEB{`*S!RO?ujQkG*Yu*^Pfx?SvIgT{X5Fnu2ly zCTKr7^sg{Le9dJpXd#yybL6^W!&psUV4s+Kx+AD{*45I`bN&iZ`o*#g0{R_)l&i-{jczBhbHtB}9}Ftz^&;zj zP_G#wHX{sKZ!_Q1sTXcfFnmzXi=|>sx<+wwa%C-w&~h27BP?~$NaY!{g~+sNdfnNi zSIcLY3!Bsxioqq=P0AQ?j-IEw~(d$gIJ9I2b<1|SL*cxHyi zS7?`iGu1tQ%H1$c1e^F2Tc68NxZP}#;=WchG z_sRxS%5k)6Rb44|bUhj*0i7ribp!ROkqoPUJ9ujJxRQphm69;fdFg#C3&``~x$cB_ zhfA*LyiISB1wY|H#B8TJy7xxoh-;LAo0!Y1a;Lpe+Qh6Psd#0vqi>b1%?N3Gmi;6& z4`=VOWh${$g;~(kkF;Apc#0R&EoY}xySXS5vO|x&ZZ=kHT|=XO6g?{Ji*ho5pE})t z=jM}f(7|TsJpO!6oi^VzYoxA%*JoSp;UX6&c>cmAcfrC556y&dDOkKpv(|@s)K(4O zs&IwUE-TP;j^f8ZXo@P3^nDAwzQ_k~Gp`>9Wt$qDnUpO1ZqeXc)wOo`PS?t1CWX2& z3AFm?$g!GvXx?3EaL4hjv@Vs+(~F~jj($<6e=qbmfywA+mt2RX@IER)kwoY!*A!CG z{ou=|X)&2@%)*Bv__Z#XD%=$wm@SWVDul^dv?XN_FD`D`Ra+NubCnx+!t)WorTe8U z?$#?iRr+{Cr4&ps{>?fW_EISsNKSa>%EB#Uw4~SO5!9Sy+fMBn`{m}jQe60dWS~=j ztCgq|v~=fP78Oute1E+Kvr~u}spJhX{X!<=hr9A1W0*91Kfllv0@5~Nn$Q!?h31ica3#oH3cSF!M?|8 zg6oWpEq#2hUrU$be)qt|yJS3Z1B7WmNa>Yjz4%qxyQt@^a@OV&Fm7kFHg0s54?Pt; z4!PfPnl15crdJAUG=f%QU>%_j+{ z0G44hd{kRO5yldG^FU~D8fw9~E14+*4ztRcs(ATKr$`mHF<;yumBH)?m4fD^q4Kxf zo%SS29McMU-7Ye>m|^|e_8T4f>IFx0P46E1skLF=A#vQ`fcY?lfe?zY#wmkU;zt@F z_p4aXtM>j&K%E>BI3Ji!$*roW1OZOJN#o-)mQmgRRsAu$`WiiGjz zx0ze~vg)*P8oo;OiG!Wyads{J9@ z(4ID1haEp@_!0Q@Z}HmDPl32hqr$iB_HbiZMXrrf`Cj%ZYHWl@jZXumWSBdG%T}im zhGJWrwuK(n#62jmvNVJ{PciFYmh&CYh;;;5!Xao0Hydx<^xB^qm&OX|7z6^UapPzq zYq1ag zcH~(`6tC2b-$FkG*K*BRL6?ZR3uzgtCvQ~GiBGm)9 zh^hmBXqQ5o*6Pp3uh>ElPNzA+wYDJc1StoGkMTEo4ed6( zeJSj8v@S=bf{^w%8$Ch_Tauh?9$gH$xjJF$4bAAyRl^8Q zhKM#rjjq@oRX$Tx`|Dsih8EH7n>Q*B1W(Y%n zlDK9Dv3~8e_*r~yj?NRLrv2zZbJ4=82f5Q-e=Io%W>)%M*?Rt#6-hc39AE+@caH!~3aW zIZti>vQzVuU~<7YuZQSPaZLHG1wILXfroi{Dnc&S(wIq+>thL$EHf4rC!&7R+Z46& z9>+dHg9t-ban1IP3uy3blYGIsNnfOi1`1gNZ?KjoEfc1)IeCbU-n21yJf>Uy-`I&7 z7^UCKMSAjpVpCtEJ}>+)Z@%yAtHRsz09+{k$V&js357cwA_{|TXr3j$IHWay063Lr ziKIg}83zSs^|MB<&>#xL3j_MtKJfi^ZpLV!Jwvi3R*UqAch7(+7R~!4*qv6T^ewrn zYAcIGicgSZFp|q?D5@`$R(AtjksrPO1TsQ`S7)xr`{!_wuzEQ^Uw#t=>C$$6_ffC3 zg-70*YrV<9uRPfH>pBd2Wd~h?9{;~KkYDy(~pCw%*YW)c-6md{QQ`4V} z<=jyu+z5RCMj`GUAt_eBcL7HIQ*wWZ#hQAYWCb!EXLgsu!u_z)-2bwFf4f}H4W_i8 zQnx&ZdZ4jpr<-Y55lWkfB~6KbV97ej7YOFTrv-hY^$GOELNo59s{rI`HHng9%d_>> z$8eH&stk~!)z%6zyFPYU-!pAm51}HZm4Nmm-ep+A=spU48w-s6;H{$Olr!8r^0i5C zdg^%ixaqDDsUK8<8$_dj65WEH`v9o-G>OOni|e3z_KH)9)~7SuQL8?afzcf6#zYL! z?z)w~{ULhxyYD&D+v}R6^qyH!9hu?VM@zCkySLG+`)%I-0+j~ZFA?~B!&^7zF^CtZ zkuTw+_^M*j0@+LjwU{F|>IU#S)i|O8u#&iv78S&DYe3!WCE}%jm=!5Muo6Kw^6UNv zTAWZMi$0F{>WoEb5(%;*yjp0LCpQ-H*Vhn>mguqqHe4OfQuW^UI{BXUA6w_Kb1jT) z(FbBcPD3&ratGLk(5tIFGZiB^ITLb8_?*dR>}_w?(Gg9=dd zGeTsvJ!ms%Qjacw2q+hpMm}3u$*qgZ%w!#0S*-8V?<2gU?#Z{DndAg2wur3ouH)`` zS_?1rgXJ;2=FYHL93b|hi5MOF}J%fxdHRgW4x1uA;npA-Nm4P}X=tlBsc>!#Dg732-iArf1 zjy%6<6B%c;f93&BGPSG239`TYLdB5XjV(RlJgt;scuUzhe^D~i9=L>mUHigu8NDGl zN6~{dl#3onzU-S?ZEuJWR(79qPfPrF>=4ZC;v7uEAZt_5O+>>HG~K$SOzVC3z zbVFWadeoC@f3t{%1j3?el_3oX53%O;>gIuE9VGQZqWe-v+)6(9!za$8m~O;C7@hkm)B zYy2bn1sqL0_<_(sAlZ$V=Zf!^MKF^zNH2APZrp@8*756@H;SgGi07;dVk;-jOU)n9 z!ubn{D0~uj1MR*Yx$_JSN-`xvz*9BcO0sc-N={GA`NiBJZEtb~tb)KmSdeMwhk-Db zQ5I8Se-(IZg=e>&xutPPjq`O#?S<;W#0+`ICue;rk#Vlt(eYksK0J$+z0eLrR5EiHdu;)SZ1 z1f>J7WUmAv#o%vzn5C@79;4~7e69eq2h5(V!8~u3qWwMvM4~c{j?%r~=k3#vAgkYn z=b-~tEB91r2T`zwQYcvz78%S0nQcpx8VRrQAsL%3NoNPr+0=>uL5g56EK9Kme|9TF z6p~X!*WMN4!`W7T^<4MC2(3it3_=re@i-T)3c~r{HT8Oj5E<*&ShbOudsM&V_A?zz zVF$?v`c0MD3tIY8J~zP)oX3+6jd zSpOLbB(yhvS)}p}WenG=y*TWAbgN^-;ZUR;$;9Z29J|X>9?(+op5}* zPD?J7lOly#CbY%=A^@dvlHcR?%T~B_D*GhA47M`kCnCqLX*$O+=!B;-#D`ZH#c1OK zAsyKSSEPdkTBE%RMKHqrYyQ9y#<$_Qt?vpW(wY$HbjM-iOh2rd>gtj2e+W^FHnC&y z32URbGI-JP?_m15{r$WTk9Z=AG{14nuT6oj6^Pa6^dunrlk|tqn@R6ui4=|J+`5O6 z2c%M5ME#Sg#ffYkZrbBVN8gJ;Ww>QP>l;&shI&y?p*%ozK{kU}WSe)l%u3Y>6-w8K z!_G=eGm-IMKeMF=SeuQqe@26AtuluMA&ObG6W{Y8982a&@eOb<9=Nth$FCvaD?DgS z*l%%_$>Wk60G2TDE7_FF&pT)`D)4VZ{V>((Gp^lF?n{Jw0y$hFah`{Fz4v;Fxz{R= ziG{gCHBfs5RPdQZ@^RH6Cc}aa%T8&D&+ggxOo~$n!W+pji&(vDv9v`bRd zz%h5`Nt4Mp8-f?Le*-ie__a`fM&O+*^zA(#Z-&&2|GDi}d>yOFM!!^XUB?rp>(e6; zb@iLL(YF^5?0p?;bvhhD9>~ z^b(XNSXltEe;tXi!hXie5rn>}gPY|Wiwen%41MIdufPMM+l^^XR=DZ~5WY?qPq^dH;a)Z%p^E5cVXy$JM$CDoAQt zI@vvye?ciP923WmP4m2JFO>5;90R#zfm&oM(zDJ7gW0_>DRacjNZAVr0Z2?}n4&T; zZYLd`(C~AP4|2Zb#3<-A@Gp!Uc%Y+~bSxeO=v67k(F2KPwac&!vSNfM^WNCN1Ff!` z9zK}X5k8)hq}EGAdj{<=ICKzqSiLC`F`%}=e?y|GiMISFml0R$)5vr(14I?D0l?{( zh4POSmsR7_Ew4xZXqfjq{x^+7OsOiE-ZtOTX@y}`Vt=?ojX~@}`)Mg&9PHOn ztc084uEvs$=D+G>57Y3Qb)Q=#uPGxF5HoVhQvbk<<7*n}ZeCQb9%LDZ^aaa=u<#)W ze>IpOfKmh%zB^IW383#@%R zM8>Wggjj65!zCPoYKAyV9B~k2t;iI3CwOIWnp0VlZB0Y|3V<`e{DAEoOHGd0*7(ba z$*jN+bW@-GmNpQ`DLvH=96oGCf5%%CeEl#l5c{F9s_9fGvfN(D+X;ZJGk+|#RC;Yu z)?7tZ>0(mxVPx^lSf{xFMGz$TRb^X-*wpVERl8$iB8Q$LmrZ-S!f3|bHOe@UZEUJUYF z*<~?dwo55Z;x#eAqgj-pvnQ86o~hQ6F>~l_T5p=yMKpe~J=e565jiL>ChJ8V=JLvZ zmxWcPuVt(1g$skvkA(%h`J`18G$2N=an)6elt7oMI*a{s7`ko;eTGs$+?jriulegp z=$fijGkina+EMI#*Ya$te{fNgxupws4P%3|dF=?d8{Kt$FPz_Ce~%v6xL&!xuBi}8 zKHDaN-4qm#`;wxF;3|6aDzUmM17wFoSY*s!lzmaEQ<>|Mdb7;N8)7m%&*lPDNGv}o z#EYMjJ${^GGyaZ!FsNZ3>~R)7xgBtwZ@{er&k13shhq0;Nbf4Yf4SJkj4X!a$9NxN zTykm6^|x^(2pcr7KWb!LVGv3I#Ae}%oZGbg{P-s*I@I~3Z7&vdv_k?wL>Bkt1DT8` z%ZD}&ukhPdq;xmy-mnjJB5<~@wyZ9Aut&y17>czb9z(&Hv6ckkfrx%jMf1%2r~uJ) z>N%}M&lT-?Z?iQFf3X(U^UD&7jbv6C#TFL`3~c=t$ch*6(cgpeCeJwG10g1Z*4;M? zGZ)oT1L?BjXc2pL!FG#8mUH6j z>5q&#MXAsqe;VO*j=1k;we5Ji2I7%-)$!ZreU}j^RRwM$OaY5EDDw|9g&!DyQC#E+ z?fc{UI$3x{(Kz#s%E}}#Ri!qv1aQk5STeTOJwTpbxF>V{o?M*VM%G<}m-)UinNL?~ zUVrG0;y~cx1;-x($hwex1#*8Kx0kslKO>b*N_v~Of5!@^!O}LTAMme)vB>bWjp7Aq zesl-3D;FoI;Kznv8+i_beR%@s7?^Hfr|JMo5!_`ur+f4*Ll-CI#B_ac)5ow53;i)@ z7R%SZtb!)Zf!P>mbpHuxbxGNPm%hAQ^|{`lg@^rkfRpaQgChgt-3XAvJ$s+Z&&7$m zl*-j1e+ucDu9)@;>6t3b)-$Dv2EQO9`eRB&0>UK#B7aw;yj*@-u{WY|q5i1!LW>x! z`i?eT$E--iy`(PLoAQ91$$WJBO*<|ocb7-f95`wfR^z{*Sx1vz4kim;Ow;Dw?f<&oe?&4xA0{G9O ze~e_hA<lKH~8`@u0Et0Gcx!o0@pIZt1IXuK&mec@dXfL6~8^m~o3e>-*Q2G-JTDIxwXscEph7ia=1sKBoeA$p^v zXn#Ys(78CUnRHxx9r#sWjDt?>)(6m{h6nZmr-ESDJ0Jd12GannG+^t}wAS^iovwop z4FZc~$^3%(M$+zJ;*gPktD(Nrky#TTnVQWQeh+i78X!){$zQz>1&g`xo)&8jf4Vi6 z;ZHgu^QH?G1x{=c)UmWXo-w$^95I~ZWM6i^N9U7X*}A9sNq55=U@Q)ciUtU%Sr%C;p5rH*L5d({L)=Ok84coSF&f#SM}3|Tf6|pbGyts_ z_dA=6NqQVT6yBu9^`SD6nSNFk({h`tonW zJpAmcDlChAZ<-rV0PcD)#W|Uwm0p~sRP}|>9PMqCBZqoO(E&V^V|0(<-g7N;auorN zsH9@B^IymOhVhKQ1u@Vs0&t(4lBG<@OCxaqttOA`$Ne>`)9(BLf6&l&Dobe73|fj` zU-3X1&r=GI!X#Tl!>Snb+8_)oSZ__tX#uk+O9;l0eO&i3wQCwJI}tL6dA19ngfK~f zxHp9?sIbq1;wx&`!UO`22F@r+w~DAIyQ%;_7f!PVmFky}_@dkJ*{)2CMyY}Kw=S!8 zO=j@K=lfH#uXfn9e@G$Vj56Y>x0Gji9W5C;Tu`H)vd2JC_w66vTLOIoJ<%L0+8eW> z3}BhmxFpdR+-BUSoCeAb1LkfL|5rzeje|c=dctbF)&rSo z=r>_L%DVqKf+ogAjjN#|T4f}ZvB0;iWJZwoU|IIvdgoWI))mKl+AkS@Da?} z+#R^K(9bZ)?&q{s-YRyd#YwT1JdZlvm;1{(xAncjLQ+R)xR$E`Hadgi5yN;W;f!l+ z^1$K(QAIMCAKc4f3LUca59RTE6auHoEZ3V@sd`z&e*mw)`?y|xH%vDru#7>3U!IS= z``&bZL=S<}?m=QteO~HGU~x!?{#6d}QQD1*2UXXSAYyLDw3_*!Ywqk_iwMxg?#)@1 zm*FUw8vlmg1wXWMjC5Q9Mz+sbc}^d{4PoId=1CGcpLtI;EtMjGy}@gh?k%BpZBf1DQO&BGT+qmv+N)#gkD0q&=Cerz=^ z5Br6K_Aa$Ppq0Tq=h8rm8lm*llA+~S?d(#f@L?hg3-O%BIm`2H3NH_a>swzqOFpmT z+%UACdz)AN_6iO%AnAKv!EE8=>}RI0T_mGFFV)!eO?l-ldki8?>Txtt>rO6T-m#J! zeyHNztnpi6kyu>+9EoKoGEj!jS!dIf|;w&w|o_4qo zsU$!4scRvrLLolVvbg}%AZPp}oHQiNP9q|DJO;t33&`n;xeVxcl~~q}NFWgytVHI3 zfBnh88wN`?LitdS9b&6hWxQua0e&WJe^Iz^A}^p*5IR*DFiA-YQYNv0cn~_S#T*4L z*urIRpm-bu@MxdsUp1OIE|dju5{w}@^c3u_r5h!vCXbn_$SyFZb3CzO(@yMaF1_<} z*&m!ZsVRI*vWO>XWEj<)Q&jA|oU0f*KpoV*xjP~kqh7vW4sa~1A4J}`95|4Xe_xEn z6w6Os$as$_JWm9*2Z)-^RH}j&ePJxY)lhsc))%Z`5#&)I?3t`oje2%Y$%os1@Lff( z$Denr~D|qrq^FQ zYa|}vmWrEOa0V>~!tL0WK1leue?e^V%k_nzAZ`oFIX9@z{JhG~GMeIbrmfxW@n%zs zk!!C|P>Q9oiMQdyEQZjCa<7bMWd3o@ym7A8_N5r9jxM`_l!z>{@7622wW|zc7+H9MtFg6H45{En=6OD#v9*PC;WU=Kf3+c=naWVv&q?P8)8^YD0^kM>Ad{b%p5xq8(+G@DgP^>j z32j3e?u@R4(dq0!u{tw zT8@I!(>-hS4t>W)SQlD<%k?%2fxyDC4rJ?9a*ewV`%3H^ zcJVx$ibi(mIMXmvL%FTJm6l85RGffeRhBfA;4H_;-3)$ue~WrD$Z5W~_}ii6TN^-a zMrwU`G|4Q?(-2bUpz?+%YirK2m38V2KOi$4?^-a$23Kob><9Kt^D z$%+EN23Bx#f7WkVzj{0EL%9QE$Z9!SZ;4a6i{OUe;3xs*3^H0&^=r#x9mfJxD=@!k zR=Vv*QY)0Y5|Z7~pqWh#yZfZnu3OiGUq!zwz9;c6&l*MTB@-b#o`)G3g7;=Uu!ky` zfGcvTHg7aPUk%He{rxN?^W4i6P@n>6oryLEPU46zf2|^kK~V~IudV1f_23Yq!?jBg z?G1!UC}Zmld$tgM&jp&DKRZlHcN0&c1}HC#3==bE73U4eUF>ZS5d#T zt)s~A?Dqu~=6Zd$^L}eoqD-G^8W@XYN@PuWxJX@fZKp;mPKIoQLZg3I|{ z=**&H1Y87hB&1pn>WPsagg1jc2PwdLbv&B&e{>a}z531N=T9krk}yYpr@7|Fnb)E+ zbK>@fYU_*tPNZ6vGgmGg(H&{ir4BvUKz|X25W0FIsWDe3e9%o{X~b61M#yg_bRS2) z=`sxj|Hyu+8m~-foVGBn8os|O8Iz079p+6G)HdBjr@&**;fRO$T_W*@!Ot;=P#vOj ze>g(-OM+tn5!*`GZ?R=uNBbHGNZh0XxA(Qju?Jfm!qeaIFbK>eKngzqUD!IV5Q3^N z+>9cH9RxB+`zwS3)=*~!UG2+!y}99_V-hc@bWJvB3i&<>+Qk<_w#VC)^03CEM&2lM zG%^~e?~YO{?VU|vKS~sH1$D|B~Wy&(PVaemwD13 z`6dyZRWVJWAZ%9910tiB>vv?Tx8LPQq9%?SFQpq)3lep$2KH`oPur49TLbEf&8K^I?nuklRWAtN zVWrYsu7EOIz+Jw?@YQ{xF~cyz-xt6#3}5<|%|Ap_zuvE%!WD}iWL!J49-f8UiQa#q27q8cf(+g0J{#lAN7atXmN9(rXRG(|a~ zSq)UC?nREV6{cqFonu)i?tiS0eqf0`0|ibN2v7u0<1aqb;OfG^H)?+&bf_Y(mIRUf zUPIixyb2|7O;205E6j|L_qWeEs$d)wL9<;XT8!VUlEOUitFMaw0w|K>f8hGC#C&%l zq8!v}wqk46Vp|67i@X(Ig1g-xP`qXr^!Jj^uLa78u6o7BUG{<|A?bz;1wEU2yw|oFy%VbE%rfUjB;8 z+)V-l&j2iL2?lo#280)dAswN{{=+IA>S*jueAJDH>_ z67E*5|Kt2Ik7^K;gr6zA2e=(gV{nK#prnRNO7@P$7)6(-z8iq_9%^dhqvvs0>aw56Vla z)SUE#n_)_0>0-&LV~GT5OlaBz#U~&GtMbcjR`74G=9wP^u-y+sMx&00k2e*G9b$=H z<744(owsj5NT!d8^-g&dMv$61M(h2)vOm-KHhE^Ye?+zpuI9QOUPN_lo^_sq zSWxB-mu`_B_LZJvUY+5o*)D4qL&neYF}BbhM#7AtPwC^(&$2y6B-6;?|JSwyDPLzq zO60R)76d_%5g45Ujo+6oG#SxlBzrg_+>R2d5K~L{_>0-~XsqyMr;N$G{df2*Baz_q z0!GjRMZFX=e_146T76^nmC@Zq)EiMrKy`7w#`7j9bD$`McelT<9p-xys7BDYLDwov zF(Y+|xZQd=>%Fv>t=YGWWmj+F{{E$jSUV}9J9_w>znI&Px=ddV&V=8wvk1ZfGUm2< zD;qbPl2VV>=>+^f6;FmRxN?MkNR+Aahkh(8ho~uge;stKN*>0hn3VMKHU+qkhglz@ z&s{sDdWsx4H+)4Mn0M12xb>4lPYM~qG{|l^R(}$wIxdI194J&@=ysuNp)X}fIX%py zAqqOa3M1orl~G_IEkF;M#UfUHI3VMc>K$MVR6WUZ29qhoW zIk&ece=;q}v`o+r0Qw>5hga}e@MFQL0cRpzwf-Ylzk`-gSgVAybOc}&OhPnU*6LY) zW|>syojTTevyLFHtOJNcYa?6F-&#NG@uZ8LZpXSc+`3Y08PUIwQ)3T-8Zz~=V`dSg z?J~E74X_ ze{QyU@j5Ayk!YYbS4Bo z3WBM1Lqpi?QKK(WhgUQ^qr|wZ8-C3$5e;IA&LxRc3udE4e)5VQ*W7^Jy!)kve{HS8 zbg3V6l59H#%SaFx(-3o)8n?Eb-9fDKSq4vDUDnux3%dbuAsj`bxXQgI5 zM!eEdyjQO#_mdX8(|E3z;QM78Yw!q4V2K}-wM+5p&E^Og{gOtxu0Y92QO9NzF<&V- z$hABQF06Zr5YE`NQqBF-%w~DSe=hs@H6 z6B$*Nec3`zYNkg2WnL~%EjD1k@i-k0%rSHCi|j;rX*Ax$J;GUrWgB(FSZ>(}P6v`CtO zW=t>tVB?v(4V4Ld)yFzvcfg)J2ki=zNlf#@JJ;T}Ek|{)l_A&jStT4?&_~^AB*UKz z>kZdvSbuh**~iTyA}g^7f6Anl(+qe}?OP5JY6||7zB#%93dx$Kwv$pKw9c@VWOsf^ z07xLcjW?id&C+bfY@!4AyDMd+a`YYg?bRe;TXfzHgweqrBpAf54E(-&(S0St0*_vw z*Cc2t!KYJQcl>#t4KP(D)>XiT*6~{1^_gw13s@mlfG7FFPuBG7fAPoq8WUS8i(kcN z4MS`jQ;MBx_tlG)LWhI}rAfIm-8%?*SSVngwItwgy7%O+s~1bTC30%{9n|vE1uY!6 zGxhsD7e14??mk8=g2#ZMW{p#IGksY-P~t#nH+ov23C+hP_rWchCrUS7B)BrB`tb$` zdrK8XN;PWB;KK7Ze%~6Yt`R1~Glp3t4 z*u1%k5y9s`E&WZaAGX1qONJ;aSBqTORxjhHa3IEGwqO#IH}R{ODwrLJNhuh4d-+6y z5Wn2w9#=eMDmJ)`@gaQF78ce{Zg?HjH`!z^uacC?a)xE=aTDF639fqi zlZ2&ahn^n8f9;2WOusogbh&DzL3D~QIR84y@(_o zN^5Njho(#Jp;f46`Ai)4q)PF4Xh&)aW7SYmrO7n3=3IRDKG@$ z7xz}Wn4hu-g?a32Z|?{XhhVRgPI04x?=ph35{Os9l6tJB{H`1A+7K%8y7! zpUgRWe|`y+%mVT5cx0F{o!Ga{CgQnT=zL$`B+*wS)mr@d^J=C~b)n~%_1AK$Xc#Hv z`tQ89J&^hFunawE%RhLTb^@}cB29i!(YB#jOHs%LvCj<2ayG$caIC@5$#OH<#sSFc zC1HFFZIuT5urv^1X9u)&5CMHeLHL}NNASzkfAld(M<{ngHt2a?uqRBMb^rEPJ_*0; zTVLnor)YFecL}hY>iQQlORV|f6%lDI#x6w8==5oXh#pL(>{=E{$P-WVpMygmzYsM` zfp+~A$B1jg*P9()i-bJ7PFr)ghU$?dA|rxLKno920|;X*Rd9?{RH@RJ)-Q zNe+su318@%P)!DhR7Yv-=1zt6@-1oiW3vQX%fB?WBAW>41(yyKMmaWD>)*v$o61CD zHe{H1dOQM3Dvbb1y(ksRp}z>S*P8jte=ehAZucKmOR3;EaUrQb)gxY7M4g4gw8?{F zL+QlSyOq=Cn!7#izkKOtpkDd03v26q|JJq)ejwM|-;a?RY1UH+S!;yPB-*}p5I`B~ zxPa@{ULO2F;Gp7%$(bg=ti%47f3>m1*9q6Ov6R$_DF}O$vwG|qFUN4WAjcl_e^9)! z|M?{@_rwVAm^8j5_qFy*q~wl^l--Tid7?G;pYEPV3}hRtcNu~W_P#wO!yF_CQ z#$-&+>@s5{)5+>I-ud=+^L22Ke&N9h9ar>rXx?Tl<_h;bJgD&b_ z>F*&M{8ED&495FCS(hLa_hJT~KsZ+27Higg8i1db8p{&>L|~ zUo-@bBAc5BfGiZ*i{xd2;W*A!VnI=U@pCJ4-M3@Ywp?hI>`(bAL+|(X&_(w9a;}!i zn(~Wjifh}xm42$KhI-zv^P((ky0$|%&1tH-zhqeEC5936Z2K0$e+fbjt*VaWL}?nQ zX@*V~#!+(BG)&_p#&L0IdNn`duU^7^-hxdM4g&5hwu{r7Pmvu5w#*b16P$EA!tYju zbDRsKQa5FUE!YMm!Z;&hpYU}%=1h{8}h3);)#^k*5z(X1nYdC39#yO^@x878tINMQ z4;~?!9o=yYCgzPJh?sm)UZ!NZ@^jK9}*`BjL0`Ue-}RF0HQ0~;1xvo{LlC} zXy4N3$V_yXowt}oI7f?{wlsvQq6f4JIT((e5Sgsb=~cplKHp!$Nb)3m3c^bjNEhvw zsvK*Z?g>L)Hjj>`EJbqks)fbcJN_gFC@F;OdOEpvE$Kczu`ZA~5lf4}+tEluL89V0 z>kl)-xoJRhe;7yu1AJdIUa<>M)S?i9v7e5Wcs2gK%LyU<)Dr7g!}*2M*j$b0p3ecl zYyD*VbM|CGS@p*Ew4s4NL6mN7R7iU0Bc4pBw!4tV^F=8}!Z0K=_lRinftK^$)Pt@> z#;Aw(oddC`{ScA8fg91bq62@#c?Dj)SB3wz1r<#`f1PyJTZMhLKyNr8ltP3!YH&QH zB!ogXx!;wk1Hoe%sFavgZQVMsh*zmG79Tz)>CIDuu4vCN9=v;YS-0$8+YUVf3kF7T zx+KqMLf)VoDTjsePFQ0T6!AX*O~qVfGsrpIaoM)-Hys)Uh~k3Skv8D7UGjlvK|ds1 z45IbFf4$2fk=n?u?XO)rb36GGTw^PKp4AYs#{C1-tOs7{4p6@#Y=-)kBMyY!lrYtN z*QP`W>FOB5`XAcftvPuf>Jt1bQ+?H4=_(9yo{*X9i>;F_+cLf%UO(7e9%w~4I=DO3L=oIX?>3#Fz zrQuk1??$g4m-sU>!2^lCukfHB|4ifB4VTi$%P~9M`8u25H$l65kgRTfxqN54amMea zZ|N~|&D>dCR;mBysg_6MH46{5pkZ@(@&(U>412^&&9VK1ChHU0zVCwRV>ddFKiAJa ze*`Z9jYd--=70Q;|Mq|Fnvdh=^k4srX?DB+^56dZ=f~l{Za1gR|GjHoPovXm_y0U^ z+Lu4>f4?#SvmVGdNM~d5CFxfF+5UJ2z5oz_41nn9g{H-Yrs8}!%CpdXF?wmw@Js2m4nmxiw;**9o5CJ$+ zZ=Rk4?D+>h^J7ekNqmv}{(tY46Ys^Db@uuS51IVJtK5k%`F)Iq$L;eoIT07WVx>~Z z!wee>7MVX!l-O z?}G|X^DKY!_iMgD7jawbJu-#*zL&G`kuGlcn8sPL@9*!rE_AQ%dA~QsCB766 zKZ_&EatNiJY>GIta-e+3%S>G6wEPe^ZUVAo>JqU}?|+ z;Qc;dj5pbSx`7M-h&$==vskb)U$Kt9@+v-Zb@__*d(yA*o~2^U?R!*(J#Q0H=2?tIg-q_TL@t@Nk&9T_&(2_pn#6l~ zY)W^9v~w3pNkOUrf5!Tq5!bLP$B`bVQDO*4xuspP^UH$z3r)$n-h>$nXNmo^Y}Z>= z`3?+ae%ic9$tgXHPjPMt(j3oBO&~Q-;;~(F5U)Z@1c^eRIbuw{hnI(fnU>qyk!0=1 zETN2c`Hpq@4rD;F@&ABnQ{RJeGHDjLntsxK^C=N$v@0r5f87Kb=P|^^hycZ~YDmdt zlr9nLXhFHSe>x!*sa$g^amgmsWfB295{%Xf7w&(ZpzPLk~ zI5S-UGPk`bCiId|c>jB0$348-we8m%+1H7SuzQ)w?AQDCeu1C)Yfy?KKXbdc3x@1I z=NECY|4TWLe@)}ytS0_Bmg&9_C@pr$IQM3HyPZkSKnsGT2nhI3BdIAeIs)w%n^%hB z%a}+lJgALgNs&@xVurXvNMz9y3S(nDaV}nO@Bfv^1?#xNT1A1(qyBj&M4~$7vrI4J z`)*!9js43s21)0s<{a!kN;e$MTZX`Jqy+(0y}YCPe+8sKaUvC~(L|OqrRS$I|8hk( znFg>g9v9>KVTSU1kd9)gc#u@&aR->5pQ=8*7;m^F`dfuQ1kR5iyLhz6-AP>v(8^OR>^vq58T2d3BuWkE3$AxIH8nQkR< zxj%-2m?rTPjh2%|p)T?b%pnq;yN~NS&U27>f6pdX??%rqfwRj3ps^?4rZZh5sHhk0 zRgIy3OpSQL@&GGJ+d!WrtT>vd4^d*K!QxV2EX9mw_ji~)#gSInpR`|o$rC?QU_Zqk zRxOMnoB*l;eTX>#J%^q3_x&0=-2C`0Vb|T-m@d4n{vFq3W#by@+W$DNmG0O4B5hpj ze@%MzHT6I__)$X+u(=RTk*He6u-FGxaiqikP2zoVjah|9QLzN&q(l~r6LLN`vPi-D zdj0u)G+!N-dHM#_eK{_-IplLNIYIDQkJh-x1`v?l2x)q>e#A52wPuPSv+=Sx3mXr+ zJh{eu51spXXJe+cK0PLeBB=!GmX4o3f4rfN69UEu`Tl$~4T4Vxp)#yTZTXl#JZNM; z%oQ{uA;sH}3XrD>a|t%?5;x^Kf70!e z=$00e`pD3X=xKI)4$|J5nduD6`hIQ%rznq*&apDy5_dcyK`x`RKuG5iD}l?ZUSa8G z6mky^0;NtH(FDYygimHD=@mF+Kl;#73UvWYg4cG`v-=S5|ez zJkv8sTZDBTBSYVb8QqO_u|cE9f5l_c_=mSVCZoqCMw*XK`HX1#Zr3~en%+Uo;YGsQ zt~WN^uJ`08|LphMp6}P7kJ+l~f3MxrtMWZ)_mOBxCG>?(J#b->2;D3B`M{WV%oQ-+mEb zT`(J5VE5;MWFBHLXa!M?H5JILripAqY;*{Zj?MdDM|TanK)3-ULD)P#l}jS&kjLNR z>n#udiM@xQ>f@r%;2IkTe}8=+@AE!B277f3j^O61zV|wD2s<+gtJ5V;+cikKT_AT! zfg{jTva)>-lXgi#TG^C@`jCjSc~=kKv)VTswmhWA@%UkW2F;uxK{a3IcWEE28@Z}>g$fBlgti-dOhBa{j? z$eZmE72du{FanG!_knh?njs!TDSeRBGfx`pyGgI|k|KrFCEPSInCH9k?opAm+h32c z?xCRedhYG@`1-eB=+ElU8tcMm!}VQbU$Nks8tRiFSGndSg~y0E_pdv;1^32c@k(Q; zM6pN{m^tSy*3v`Je>DF5PYjx6&&d->GWSmaoPtqn|4h+GDmSUO-KHI%&-UUyO{PyX zpbQcj&7>eFBQgd8uU7Tz(&_LrleWCU0n%%3{Vx6({n35^Z#5`w?Y-^ooqnJ5UuVc( z^WQmfW#hq*z2-jZuCY7oWBC`{)~Jsc{onTezUixme|`4H%_F%?jVviivw|VX z`FtVAVOBDE%u`&cjgRGq>0*-Xi-IbcM^!^W=h#PX8&zWA?fDTwL%;~iP+LMVt-Q-2 zMqOUcWs}Utjsi^)lx#b@BuftqH|mtj&+?g_IfKd>b0yX43?4{|hxnzu#bu0`cAr7l z6alq)#nQvJe~tz#KSW~_9smmnjI}>iGXVVedA;quD&y>0>vcsUI<>#n4J*;9S1QE{ z3U^lA>lsd_A6RP&yT_-)Lye&nDLQloNX12BdvhIob`b*3VK2|o=dJEB-Wo`(Kf9P}@O#|GDqAbre|^2>)8Bb^pGOb=z!8`H!mj%( zE_}gcpO?R~?3#Pny&QbZ5GRjq&QrcXM$G5co9}nx*{jyG5TuVgia}EkG*6{Ud?_`< zj7|Nn4riIkSRF5RV`mg0F$>OQ$&-4;LFsUB4$DbVb0vK+IeqUQQhd=El}&*Vk8_#h z59~6!e_GF%uG|x(zUGP1wk{99M(;3wr=)(qhhe>K9|jJe)I|U@(3%n3~Bxq8OlSIfAVua#lRUM{3A{vr{<|x zsE)?_>7S%?j4wqIVYL#{gaXrxXQmuOux46Je`Q`%V%O%NF9c0fX1NFKLpT(omJG?0TW+5Z%&p@IfUKIB+5tsf< z?uGSTeZFG-S|2B5{yTr4#Ft{Q<-zYQ_@zJ;fnw+(()R~S&bflU46V%|G)+C^P~x~s zf8vMjhqWb>Q;Qox(csOb#R!@i!rLv&L;+)uhpL$QvJPmXOJ7dkOIPbqYD&f3>ZZiDNJ_6we@tw&TO&tgJB^Q_mCH4WZb? zsayx39%q{7X#ae7$MT0ziG-$tr^YR@)MDOA4r;2Cef{C5&V6#x9xZb&_h4-w1)5Kn z3wBvn^1@6hVl8Lwxt(I5H9NzHC)FX)bdIX8yUG3(^RnIeXe}J!X zfWupB5kwE58qW!T^EE$*mqPVxHPqbaVU5)IpEhB0qTUbOu(=+Wk~TJE-*tN;uYShG zgP+-)SVAe$a>YyG3rt^8dI%NKTQ8X&W2z}F6<4YIF~{*f-ao~0Qf$|qwf52GCuedDBI;+ zz0nZoUz`+Ayml);b$fg=v+-XPsFe`gQW-~TL3HthPyUq;=H!|e+{ZVc*t8qowS9i? zon`(zUxX~LJ(O!CB}JGkubSVTE19zj7y?OoDL32z2KqRfYZ6-gmPk0-fBo_7lriAf zdT`hSHsG`#?B(F2ha7r{)0g~yuK6D6qN=)P?{zXJrF;&UD!rOgvd;rlatyLPVND5W znjvIR{LkwdJ+?89J0B&9gxUfX@r+D8hJf_lE)r#Yb>1k1;}e@SyEk4| zmylNMiqB4h%=!-bx`5h4h8}A^^^uA0?!O*2Dmg(w^_`FT=o74uoRPf0Z-rzSRKQbY zx5UrKGS{r{C?W-b^fa_KUqxWg$1D?TEbaN=)y=nweUE|tkegV4?OpSf?^xEa$*)|Ob$`Eeq2g~G@f+JJ zF6?)1a9x)xw%=eWxEf;H25W~SL<8ngTHl41H(6Y9@Q<00qmVONi7=H&vOgAqndB(ysci0;m& zontLN<4RPKX+?~whkq)HRvGdWEYBb z+4|{bwZ4c?bze(_VFjjPIB8p6ZPHveW!)A6QhuzDuxxB?K6xYR!W}TKi(3;~@@(jV zi(+UE2m{mpyzVWpX5-C(+wzOL*u9~p&8Ni0t|uMhi_Mq!zkljM8;f7LDSqNhr!tEd z5B$k|M2~DupqQsfqK#J|=pbMqkwKauXdPeI^Mcbk)??2@`!|LpK~=FtYrSQqJd4TV z2u6Iuxgd}!Bn;&sb2KW}KGHI@wIiI}6zbBSU0M%G8Pn87%kdUw51SPXxT~!faAm>| zz_z`~@^J6>DSxF-*ts{{^%xAe>Xn=LuB(3H>QCJ}i4Apvs;GFCSE%ZJyT=kMG3d|P zBrb)0{FQh8`TN1l8xhiTU7m|Njw}9$nJ$(3&09+cfgV?hykCwh`bJHIc0|2HQojbc z*noBjjAMnhu=v*5+ncZTQd+*jTkYvPt=bD5U7PcLf`8-zvpH;ME{g;RrtTD#e1*wL zVa-vbDaG2Sks~=sijYa3cDC-49Fux=_vdnu*myx@`g+$Rbpr#QGWvkG1{(<@o4+#1 zdpw18%KP?Z)i+jm7iw}@Kbs4Uh$f0is<^y zB2}1x$bWHNPe|fEBcL7_>sPo3@-UkrvD?t~JSd+oiF0TPC|gr{Wc(glpTXYCDNcEb zU*F%DwKMIE2`&!yeMX z*!~ScV@CobkOdum?(ZJ72tae?EAPBsIY+wgY)&BOCsJ+ff-KKs2_mIEITdtB@X|6u)hpT|zFxQyijJeYH|!}n zvFk+iAFYFb=AT~DN+cBnBe7mh5km67%hlZx=5_w8DjJ;j(_L(w71uu zBe&+4`W5JJ(R3rN3V* z!teNVwUd|E4`(=Yg7h)x>wf;%@0!j_SO4`Id%l?UK?{N34ZIC;=7vKC^tbAumB=#6*o6n{z)xw}IUv8s3`pfG49ymPU{9avlW*WdeVF@l4kD@byJ z!Cvq1g<*d8=j-tejJ7t>+k>|oy~CE`cY}z%-Wl$3o%?TWw!Fr+-tQS4@8x(7R$n8> zaoWegVIMDiBKDTsvisD>MR~;nkNn$a)@$!9w@eC*FL_RMy$y^)Ab<0W?wx|5jl`8+ z@`Pi1Z=uu(orAI7c%VIKYT`?W0f);>Vf{X zRZ!VF`mdbR))E#Y^M9{##Xj%&Z@)R5VOHvi#h>dpZZU`gbYXp2QfOTuqn8AzN9wjk z&(Sl|&5Tyf0t27>oviVySzx0-!JK9bqh_9VKEde6U-n^r>*x2mE-8KuK4*I)2}Zf! z`_6t1WsNolk3NckSRSd5&y+-ExGgMVl3q0X&6eD(fT4m`+r zzR{m8du9f12|8H)#Ve6~(&LLV$EdSd_+M+3>OiIhwFlf8i7n19g_qRDW_HmL7@-*O z9KPWrH!CHK@2!7Jdw$dX+rL@= z-|@wj-yUqh`hU8PF8{s1MKT9I4y;C@;;+ytk0H6B|Ef>&``DGekJ}t;ineT-b?yww zvD zrfD~SI`o)l@J7pElhpgWX(j3KJo(b+l~+3&TRS4k+JBMk^#*79yB@ozb`APi(B}&) zn*L+|joo`X@bu>#f{teW+DRIa<+>qKCka>%zPa0m&2_crb|>yM{2cz#d0+UnKUc94x8iws;UvGQT}$Ri=+Ih0}8ueIJe|7KYwNN&HqOoaKj=7+)i4k(`{cL>~ja9ItZeRRp1NDr;p&IZT&X}mfJ%c zo`1&ON~xkHAS}*U{b2dU4<6IqYN2b5@Qvg2kNe-8=D=Us`queA13!Cxs}*m2&n39X zaj-82Ez@u=`q%(@8X8S!vis+Gu=Q>JKGRN&66X}`IRRNSWav0Fc@`#hLG&~X8$J#a zx(brkBdGM^Y`sBdjN+1}^4R7GC|M-x*?%il>2+k*D=JIP(}KfuG9;dr9*+hbbnkIS5^6GikVYSXTRSvLKS4NV(2mi)2Z)z@WF_ zcVgi~6M$(l+ZGSV@-It5n)hNTcJ^$pEkmkq!+z$J)xbEvD&eEZd-Q4}5Mubrs?}%oLnYJ1~PB7I8Bod)c zB?ufwWUIgwF#)~z64)G)0)xzhFlj+?Tk zcmU$qxL2Y4SmvzUzgPwE5g)uYY%gf97-ie#nKb zt;*FuG(zKl)qSQ=O!oHYef!bs?;Loj<0!?cI7vk1oVpQ0Hul%C15Q9yEkIh`BAYH0 zofgPq(0}E66t@@cDS8Nvh?IiWB{Dxbz_fUYT%LkvvPhe~6}kBm0fPYJMf zZS&8%_Lq*;;(+`@_kTSHa;u4Mb)D)t@k&&^4czmHR%(mo!{-aiBbK@miX?hJH&&OL zE#JAorh6rI61yO zFxkY(!^;dxc)%7Hw`aD?Inukk%h#g}}4QU+x5G-bMFG;Q$KaI}Fu zCf$@{d3!#uz**yq6x_$f4$G&nm#&CVN#9ef4q$jlx_^9brR%)ktDd{?o~(vH$q5uc*hm_Ygx;`)JxmU-Afr1wRvzhLJ+UcBv9BM%@EJRc)fxms#$7_?TK^gK^|uZ` zY#SH)5b8gu8!T4VpTW@Zzt_(C7Ryy+ectq!y$!iq@uT1G6h!17cjsr1QMJj3Ft04-+ZJRPtV*|LrIe)`a2cJ(cDc*Aqe z1p-rV5q2?DE{&w8kH_*2!4+$fRtIPmUpDV{?fpK3>EngfuwOam9MGO$Zgu2U2aj2z z_5=T|EUz{GRa5K}*`l#M-(PkW@E{WHmQ{-Hbfl-1NandsC~^Ce{y``l{2GeU1(2TLT6&BqF3GT7lu% zT|MQ=o&6$=0Pd4qbZ=tab%e#cO=o+|5jdkuG9H@^NgOXR9+H(g_0y zo@TO~f-X4G+(gwh@}Tgb1dHPi6AI@e41c@%=)vlae%5PYc?j5vyC79VKik)63@?tA z28@7CEe{>av>5uos+)f5rW+l6!FPU^i3PgG7dtJdkG(gY<+tui4A@iOXVm*|=Lgrn z>6125RJxof*Of;)N0wic`HaZivwQ~5p!DoDb({)C9i?3!bVZeT%?#=094N6o0DmRW z)JJ@QeMss6H0?ObXS8fz<^*PZ!YccbN<^EF02dAF%z<|`g-+2hH8t(TSI`x6-{ebP zLK$d1_mfRE(=&IydWw4;MrLaa zG2r5Toa8;1{jH%mfA_Ln&wsd$2l8+I*ucHqLu%*&Z0#&o7H2+4>0@#Tl6`(hG@CB! z=Fjk05JgIML0KnZIsM`eWC+3!JbIL))+G!a9O(wdrZCZEyT<5L>ODJd;I?;vKIUh+ zN%jw4VM>o&=DnT!R&TPNT05@+TRT^uq4foO{t+a$KjOFMBmdE3{eOiuUC*@c@4eTc zqE204J@rSQxeA#6@Dmw<$zuj~c!0^g?69GxYcnIm<@hPZiuz2M z2mJ7tk1uX{<9)op>Z)zs_){DHyze(RV{=dzM5JE(rL+Q2j3lKf(aLJsc+!651TMI1 z0-8N3&!-ABpY7_0be_30D5rBO+uBdfGgIbEoX>_=t`0#FFdg>OhQVFEj4xsj+IZ6v-E2`{;Uol9Wht4CmT}>6ZCOrrh=h*|?Ih9Y!KWd-3RZ zZin>L_L6Z_PRCJIwDGxl_0RccM{<`ZtxOPo;)#)?cRt6E2SKSKoLE0DDaOSLNvHbe z8%o#qQ4jH5_x(ZFctF2_cj`R z9HK(LPz`*@H%q`3FE~d|W%*-nd9*X#Fj;t{>htp%hPnv$^fK$(gL3#i2Ris9)8oIJ zej2g~w|@YRO~Uyp(Guyt{~){dZGLN;bsrZlH2LOW_rKQQru*N=SxUSVo=GC!sO0<9 zG9_9tQkoTp({|Z$PN99rKq)37f;R{IU0V}VH%Pk$pSjVfks*>p?Lc%mKj_Bxd0K9H zbly!b5I6YM>S;-@!_TR0HMIHBzvc|&{&l~-?|;0g$F6$)z5g8x%b(capkEq8XUdM6 z_8K}e=5D0qOH@vCU*`g+jm1-_S8??p-ow{p5t-@dJe|I=&fT%DR!E9@Dh+ytTwz0p zeULW)1T-??1lZL2&(}f!*>j4QZ@w{;KRn5vPx{Bc@rL}STl>u$-SWKe8fa$tFYesl zyMJsnQf%Z7f9CeMi(oBY_F?|lk_RvnPSGfX@qna<=Ww> z(-g)oBH(U7>UZ1$$5R>g{H?b9?GR_*&DZP0KA#|6bE+-I)N2t~aG6qgt^;b^0edQK z&*Cz~+F8v=gZ~fCG_rao0P4d>4q@M)XMgZeU;N|^*ERTkcCFn`EEhNM**8ssEYMnD zFju=oL1FLndUmh*U7rWAQ7irrug^k-;sVbN?0OvLuj^YdBJTUXzU~+A(B~}w^f_uW zaPC)LsgoYF+I4HQ$^UnMX4fMoec#S6UzR3)-kYuWd6Huf#3k{a|GZ!O&-lH~G=B53IK4j%l>cl~uU){tl&_*i@}^yIUy_#Hor(I`h_TX!IN z?O%f>G1A75`+v?<_@%8x|Kz~LUiH4p_mhOeVI^tK2&+e|b6%KpdVxefdx`rzgBH^> z3SP8|$A?dXbpXjxDDU#LwSCF-zDCFx;WL^>hiNn42rZN1$>!V4&Q;M3`hT?j+0@v& zn#|9Dhq)?O`@8&OX2I>6yZ>;H?HmIq-Wwce1_%3}xQ|lqv*cR?ZqLCI1+w=gbLA{? zy?z1x7S9t`C&cPk|L6PWtHKFY@y_@8U1+4;$Bn0p7WBiPMJJ?OxGP*4f3YZj`JiIW zD|3nyU8Vb`^X1hCbz}7C=zqh^i`mBh4_j~Trgu;lc3m1EeSgZt&g$;#5P9pL{-sOr zinGgi%LN2Ps5Rc$ zIFO)0mt~WlrLlI+dL7DNd7+y8oZ0+VKaYc3ADjGT!*(tHod?;R!?AW86ZM@_FrCgp zi!7@0g5lyNEFBJ-FQ`3`R(}FI47+LYSB>+_2PP@hxRVD~It43fgn(kCwI3YxYtWh4 zPx2IL)zAI--}yDZ8-I;qSs)`((CfUqX6Pr!`JP{U5Wo4lYi}Fsac9fx6+bm+dBa9- zymoQ?Rm<33u0aQR zgg*-N5?84ret$o-sOQ_pLp-wm4pu+)*STH(^o|yTPfBn2hsE>6t@q>yGr7)!3Vt-~ zHs^UpL1FzALMg9(B>rIio$D-+t(}LTQv}6((1}XYJnT7XsY0P!^kF{7pO8OOT^CAH zEmG4_1lIq2Vr@UaOQ_*z#)N77=puc6bT$~lEg#M-o_{q0@9T<>&w4z;&@0D3XoQfy z&KEQMHPZZF8ls+;vi67veXY{!BOrBdd335n0@`xBO94{O3DP;zZ>yD=a#9sUT*PjU z8j0*RX#NRjMMv*0!&d%g`^SLa_{5pz(X&6mDUzRdpQy#JKBnly*!~RLAM5n?#Rq<8 z(1BFNKYw_^+;SDjjyi%iP}T2#A~`bSG=ZEUO(Qr@ zX&%I-*$hT>~ocblgH7H`>1Vb$lu z>N+Rmmwvp*MXvbP_q`7Le2p*P^-pwMF|Lq02urEi)e zbc@9j_fRE%Jw|| zuV*z~>uG_TfI&uHoKxNGnB_neKw>2p8GeU^&Xn}!caape#DzrR~QxPTndgntpv zg_)Q7gCXVHesT)l*N3sZ6Guq$KeD51Qffc6wdh+*FytWtnzOO_XbmM8HHAWedM?az zc3K5??g}h(0#bRT+e+Nek?}2GcD8(3-=A@oeZ7b*K794veZ6|N{Cl6PIq|KxV1Lsg z4!F$?i~QEF*_icHzbu)J)7m?uGJg=y(!srsLUuHit%r2`W`zg6pVxLEP>q?%719Ee zy*@C=8OXQKXHz32-)Lz2eeQ}F?ZpJS&4RN!0!ep~%r~fVb_BgmDp_cj+Br+PAW{(M z>}Qcre680reqKkY34M~7y6;=DeB)^9L(ub$ecplw{%mPy2+<)oO^&GXL4P&@k-GEv9blKy%H@|V~(fpYWIjrj?zqj!| zGW=HO9i@~ZUZ8hkYc~1jMbNL^Ya&~ATf6v`+Fo;t3G@5dngmCl(0d}et&79tLOFuy znaF%1%N*CyZsvs~r+;y!3(YcV`-#1<0#@Z^=0h9sjD{809>=ve%)ucm*Z`Vq z*#5Is$_T10wn} zxH%~ADQOZ4;tTgWV8lijiMBcd%Qxp12VSgYXvA%v>;2U}u75m7?;r1facax&c_(nw zWq$McH=7x7+3wdYpC7*NzMqX&e)l;KI+|-=Orl(uol8W)S<;T?KFEEn+rX>135j@Q z()|id=QF)n$yDw9v;OzF%}E`(fa}5urmA|*3#nGk;N@>q*hewD;^<#X2hI(UTJdGSKex}4M-d8k6Pe!`?@2_zNrnpA95{dp!P`>gIYM z`R`e}S1ds}?3#B4ea&>6<)SW1@+ijrQx z+u4Xbq4wTUXt+n9w)M?*E?xiqNgHDS#@GVt`v;$Z!LwV2eVpIi53XVsyu@xjht zmT*71OmWid6LP!W8PYesPjMFe{49ROXUmKAbAR!M@74!8KRwAW-qO&-$Ku4Ac8-m# zxSf^f*n@{?=N=L?XEq;3*$ZHQEakBc>~oP;<}kGL?Xvbyh_wL|~@Z{FDU@9(d@y2$R8)uDZ3hl|^c&})zG{#T#+ zd9L}tdUL;d=$o$RyVqa563s3CZgn|ney9a9W#Gy59@RA#iD@w6CwUnMeGhM1h5|$r zf-tGo;+mSLm#07^eIH;0RgOockoa74SbrA>oCt=b`10g3$BFH+rA9ioqtiXw9xYnA z6kc+KXg~zm+^ezNo>f2H{Xq-h{^8L=MWCl9QLy>l%S&|d=VjTfV>6x6viXFgnSor2 z%4+q1LTw;`*Bt%1_FZ|yoW#S`mk%1JUo&&#OYwS}Me(bj_1pI#W9O0Nc&kl-(tjY{ zc!zh}H-+tWPqxj*D+p-AZSU&lcxbYeGb|0=cxAHzxy4{~wk+Q^z%c=hkzT)Uyz&+4 z>w6r_w+(f4@0h*vZL`#T=-U`EI8pqdp#$PVCLugI2RyroE$aI=XwRYdIh^@5pXnd? zGpfXEAHRC6O8(X3fqrx{OJ?&D#D8UQE-I@_`|kHJ+@M5g3_1a6=SSvSNY-;Z4~ae; z-dWPE)kPzYOo5jw0Xg(=ERP+`K}+Gpm85**Km6JCbG>(ul!NmbXYtmDFa}w?2%{L* zx#!8|6Otq?x@i-ZhptH|6=S`C)H&hBIl+n#{Tft1Biz*4!h2gR&1tlA$$zMxq8T;O zNcWhH?`8DRYD1RBT=8Bu%PiNb$Hi=aSTQ)ENY@YPGVo=YdtM?^FHywK z$qkUV3cK(etCcYGNQh%uvGJS6Z!6r|B={|x?w+N-vM{%w6!)~ zdBZgZtoHDmR}og{2YsI91Q+H1#yH}p{q(gKX(o4!wjUmK+HBk+FxlP1!rA#49E)o} zXXk|HVwTE{$4-E{H$5AxlSkU8O{ZThFCHStXI<-|1U!pngP{^O&T@9nD-H!-Pj+{I zphlqEQLbGYVeX`G(|-ciJKJOKr)9fHI7V;|LTNq&+p}T>qiOMnKZ6~8`1;Hs6++@rsj34fe(EfP*fU(F0&&76!T z^bY4QF`^`>prcwKr<^uI>KVtm!Ev!cM(0?_T}UV=6&wZh1H-_d->s~jdJ;Y@FOwSU z@5FSr7faB+zq9l9EEYKQ5SPF5Zd3G_XkSMSJn`={X#Jz}J(tLyiA!-0mZSWQYug^Q zMF&t?w^b*on|~5!j>3;3OcliHKudc1fyndvHB2tST zt52HZ=6{RpHt1Sagm!MPY)g;0Bab!|Yk}reprCX6`5k_$k-QiLrs2tfMgCb?Z72@s zE@N3vi3}FSgoR#NcdV;%9G=E3oL`jjti7H4S#SONUU`;pe|^=@`v1`L{p{yn-0+RVEe8I-#GTuE z;=0nVFDM5#ge2s!w6QH40rCbgXNd_i7;qAj$Z+-RDVbe&k2%Je|Joh&)18DSSeB(y z)qhv-_se}qmDk?miw^rJ&EShTPq;uPNnGK2_yQ*P_ynH(W}yZ!Cv(>8WLRC%54+Sf zqfuN)tM%j9%(=Up(d;K;W=I>k5Xs(z6n!#^JA%1^3SzP?ch6b}EaES)LDr^ZB<4q@ z@>H2kCXR~_O7t78I`5Ra8Ha+C>e)n89)H5bbcU^+%IFxuln6Cnb*W}K-F_Hq8%QrdP|@5@ND*hwHQL` zl?VP0b)N3-X@5LeyFXD}yai1$_aI};tt8Bi?*Ve2+Wh$cYyE$r1%aH?5?YQoQ-7!5 zXSp#wDlx7yMG&qQ2M-y9tKxwJ=gbUQ9QU@cmF2j}N~*FcS@Xs#+jDNLgoc+MP4zE* z1u34HV&%QV3cd%nKc5X?S&@~Bp0M&lKBYL-)eaY)-=Ww_+%u3>I&pD*CW%{&ndZl# zRtbFWUw8~7NPCY=_lR-}-@`n$Uw@N3#dq+z$ND<Zul!T+zgN0KJx_BXteGTloo?$C>Vc+`Yzx0kMu+Pd)mBh z_u2z-!|1_!P^_j}moQj~M-IIWVdmQ-`@*BWKA{$vx7FaW|qV824VPfq?xSlCkpOlni3Vh zf(g1s{?mtZTzywlSAP$z&T)u~418d0^X7v&Wy-!L&hBj~R)5h_tXdxe`>elb^r+yW z)el~L8xH4s6EAyQmigf~X4+wx#5OXktJF|SeBRXTCPuhl3a#*~s ziyiW0-K4EGXMZ)IE(Rn-_^j@LzAKnvH{%%dF^8h^Z4OMH``=wbZ2x;EPC0)8ZJuA0 zHaCYHAAE*NPf6qo*!s<>o}e$e1SXHjPaPN@z{(JE(1Mt!yHxiAO3NYw?nZQdcNCnE zMrFiEpO%KKN)sUy4g0Z(37&ZmeLi@));WPvK7h$QI3HFi{Hz}2ar7XM<2#2Wb7I6- z0LBNbOR8K3)TLfn1(zK13on1pLj1}gTi@UHexAx-c|JD(-^Ddzi~Y9D@`k6DS8%ZW z%ZW%}=h5 z_}0VVCckplfVF2yI6b#5GxExL*=7+_y%5wQ8pSFrR#@K`^Wou{1WjE^C=@r63=`=F zM}b~KDuDgQHzswy^7Mbivl>yOlDW7qFs(&NtnN4I3c2W%d;XG_8-vC#bS~@44>o>|yxURAbg`o>J92?XN#~6^}6=A9+l7p0f!XGj|BM z_8ly5R7|mzhzgVFJmq>GNs69o-kd1O;g|6&=b+zx85=FieG}`NPSt5Yp3MD3*F>U@ z=&Kqee8FPWp)P-)hL*Qt+WORoA=alRZF*;4n43EFnLAX>P$NCi^1|5@?80F1Pd<i(@Jvc%T?$H* zIv?%&U0+wM^IRa;lU(B)?$P0LMi1TfsMH@6`POCSL!;;NxUY4o=^$}`oAc#MFp@_< zZu3J38-R}tBRXw$QSG?fdcZK1vs3=W=3s_NkN$d|Kl9ITd?4C>#qQOk2m2K~NS8OF%w$3DXT7t?>42RfWgP?w-o6r`e_b@_c%eh-Hv zp2)1X3fZEZ9dr|DH)wT!0Cx}^A$EWD%}R0K4~}=poVdJhy!-3djdz#VaUgGqKK;}u zzqcN}S<6`rHPnv%#5E6r7M$7~{>kU|C@SPS`Fio^n4&)g739r$u07W)IO2IFitF*! zpW^SwRsN&T?>r<#N^C>rv7Czoe+fyU0-_)mtpA_=ePOo$k6yhVnV!VEPI<9J;tUgEI$Ko!aGN8(x@sj5Z zDoWxGBvfUAeK)V#`za3gzS$a;=-&DvAt75(tcq_~$^#bvT!)_X`|q}s3IO$8%i?FWlGxQTkras zjZb*t6PXN@o?zX9!B@hdKo0jRB0BM7o%&R-FpeO`Ov5sl1ev4P`*FHwDqt`mgT~yC zW2|pwl*M^U8wiNDescu0afXdM_2X(9q(z@2LtKdgE9=zL?&&qIGi;7`^Fx1sF*@iM zVydBcP&1~gz+dU7AkOE0oL_&|h}Z`=Bu!n98rSB<(Z7c94BkxZUlZALo-91o3joh~ z$3OjJMDmBPP5BSLHst-*QwIFq-}xf`)7QAq2O8e7-Xg7EJNhQJdC_yO2-#Nl`6A+e zxX)igEy$-5+$ZlpMD?hoYDQFl9XK066nTif4@TG4Al_M3lq{V!>}#+L;a10Xxb2(4 zxPWPCeHBjgZQVZ4i?t8#2$|)y#Qxd+r$Em=WF#6zDVU#+$NiA0@F^@a!64%dYY%3R zeKONXaoZ^;LM7zi^KWUYZHk&WX}wZMgKkreF;t5UX^YnR}&WI4A|7urH)SKT;P1 z&qfeA504@m7?;Ud2z8W!Sb@jlzVuvlUlJAQK9#150`&e?+$V&m)YReB2*IF^h({!( zZg)tEvApj!p6*5CR1OIA!pJRRf130+oHP$H^k*X@Mwv$4v3ccS2B7B8ORM8<*Rf|E zb#NUU0rlE(z3@P%r~UyPr&dFJ<8gDt`t;B7-mQKwJmemZ={awB_7=eP|E4~O+^w^Y z;$EIKfU@b)R@Rz$$fELJWTk*V7T5ZruR#U-k_R?nCBtQe?||e{unX! z>czV7X#GfmKgbF~qOvFr(3>f&m@4St07F6{C5ODECMPVJENJTXX_+5IT7@G95%!;v zrij=oCqb%TMY8`0lH!4yoKu?bsVM`oDvk;EDd^xS+bK89_9-nzMv(`mbIDT3Jf8I# zdbnqDPnwAs9O{4?6HK!#e|}eL;3w#f26N9;rQg)bi}$Z_16X@{^%FOUu)m;bYX?E2 z-=f%!Xwug_7Gp}Xm1uO#bm@thyAk%OV7?TV&J*}UywBE$9H6rM@=1pw&N>WSzp!Wi z8bcj^jWg_7?8c`IS&s6;S86pfr#y-F=9EA@NjtfxBpEA(nx802e>m|D^P!}DN{skn zAZYMn1UFa{7oHbq+kviDeq8v3=w)y)32RjF((Jt#A|uZ4e0JO;a5W087v-LfHruzt zts_WW;m7sp{b?V1N{rDT(OYF@Xo@A2W(8_P<}ScEwTO$*CG=|ipo zkWX{**>1U@H+KmuCOmdkjw9B_xNpSTE9G) z*3+QLKf*&k<5ZO1$|G+{Z-#k7y4rixtr`L)dZsUCYa=y%j}JGX*gtZ;cYh4 zK`Q4#qS0$KcYgY@XKK$&xCa>yp-8C;zdU6oK~n9eX~uitG7(tF2Zizx$gU(=}f@`HgSU`s5weFZmj~b{+Bi7mtc5Kl`Yi z_HR3XkLPtHf4^lLxBEJtV~^R|JF#v~>zVbew)grJOIemdi97@J6v|J%(Cz(OnoJPm zmMPXRbk4kkc?&7L27o$9Qv|4sA!5L6U%LpxM1-)Gc}la=GjhaK949^KHGZAo&7x=D#W)@|3S4x_20Ie?(y~cx?0LLqNO4lGAL$)s(q! zfi{jfynotw8M4`+!*|US=twt09yof}d<9y+RI9$GD&urjk43C!!syp%RV*m+0?`Oe z0x@O`x8CkLxO>K2sGfwxj3ZCwiX^%s2fk)TdIUW5ew(smu_Q6~p&PHs@qH3*Tvx1% zchCa&e|=6N$Hc}6bx%-a&&dk)ZFvCq@_qzjXQoCY8$XVsX+ToA2!2t!k%A6$%{ z?foTRnUbW%2eXgAP@?Pwq>(w%c-Y}eQV49W{4*cQ#0=0c&mR;q)kVS%YC1Ls{Q!nz z&=FN~NC-qhqY_WMGr%?|!V_XD`4RIBbGPHt%mdn=QROxR7(P{v@d$hW=AdRFl{F{s zf7-J2KnyANi)oYcQYdjSYmfBqCIqiX?*fyJ0@dG~xXc}RLOzT&t^w4jpYuk)_U{&_ z997S|dN&p&Ji*veip>w;d&&ejhxm39Fe8(=n6~iPp7%_lmexDiHfA$+L`wqNG`fbn`7E-MDw4M$5Z9m$~Be=%o zZTwW`s5#4!U)jb?MT}kuT-m+ym1<6j5&={l!?2GU&orgCPmBm`JyO*^oyCNp?%ovN z=~|FoDLGLJ>sS=x_#sn!Moga&UC?wVQ}hP|ZC{hYft2*eT`}FG_xjxr%fj|Ye>hIY z_7FIz*9Yw9Kk&BKj@nw(x%VCaoa5t`CwPhrtf&1?CkRpB6e~%LV3f?aF;rpf=Tl-7 zCB{rh`Mh`3_{-NtCpp3o_U}`Zg&vu(m$*Q0`q@h+CmcQUjGQhfwtf>k$BOij7sN6E zn&Ezm+6p`?w=qbgu*T+J=LWJ2f7BKk754Mv{Aswt{DW5(aT|{W^Xqtj?YW&7t`DkX zQuX_I3!fp7x~Qy~+GboU#6==L7R3(r4r}f8XC9J_Y0x zhTg75Pv?~iX%=rijj8QIdMaPOjyo&p7Mhe9-b)H)xQR`l@oKR_Pq)6RIo_W|N3tw% zr(pITjnzbTs~M`*z3zs2v5bh?GjG>XfAehlEw4Ga>qD>xqae2~8lvUHpfBtC~lps{+;ezva zUXUU}JtzJw>6#c{OO;{-D$zp7?X-`_!wY#-Y*!JXY? z_&|GKB-I1ROh)v>cvfW(R15TY&|vm!8Y@Q3{*xNvt$&{G7MPm{J;d7m%DrC?5_b@k zWH-I^AGz$HPuB+=G z@*U8do)*pD$foALm*;CsaLvPP*eF35W9+%0gf8|`ClYITf~w4*k- zj}FZ~fJTLq>^W%d({=AzsAq39G^Td86K3kSmq`)h zn5LZvZ>099U3~#lSAXrgaPNe8g`Wj-Zeh4qznq&q)?-5hxAPRw)yJzB&1IfmaudSj zm;7qjxb}070DTrGS^6LjkRxlw-aF##(z~@1vyim+&I8F=T5_HcGQv1YVd7+O);iDl zWV7>#8iV51gx|$d`wVGscO4oJ{z;E%7Yx6JH zzPcMDK;g8kYbY9D#5GZ+R}ef4Lq)dw4yaHlEAZB0im$sXWKsr`~=WH{Fr+W*j;9AJZb4XMe>QnlPB7Um)h;K>MD`-(HlY4l^(8xc~6W`T_DzrOa^r*5r(uEF$JT zEK1`Oetzz8)hCQOU^P6dywe$PjXL8~& z{mSXN2bHtj^vBLvdVyKU5OM}3NTM^R#50rvJPYL%vRC3hlw2vFz+;wS&fE6IUaoJ7 zYs$_2z<-#^`k7;|i`~a`bo9XZByGAL24!a1->KdROz)VLg$GMDAeasSLx{?hjl&P~ z!&tjFaWo$xAMO-KU+d=WI%H!R8!vZdKzsW>QMt5ZSS}Lj91l-%9S~EDh&N4`$}rdK z6p8g~KgEmD5=ahKD)gCk-YzBB9uNdQ57wZw!XhLc621eu& zXzO?1?q7~G)*-_>FS5%$)8n|;Lc=eb;!D3E{$V|O(qXlABz4ZM7mT<6j&WR0`WE&8 z9OF{thYdiP?ikN6y5O|4VtyL~;=b(py&)RCqrZRZP%$MzRg&o}49gycvffbTAMsw} zeg&x@Qtc0c(E(G&C#&x=H^T1y5PR!7LB@v}5~HTgRtCMAQGR4-DMpwZ@6n`pYH3K2Ip|c_O73}BjEw(wjRM&M)Dv%^Z^7tQSS*9 zJobO^-XcRMv-kF=yI>{IwW^yKls`PG$eBq6ll_pGx*NCW7e*eQJ*EqfL-ta7`w)!g z=X=UbCc>u2pv30>46kSD9D7umsMqT2IC^5ISMR$v7i912`Fw%(iS_0DncA8mo-tyH z*WKpD8{UOEmsD{%^I^oyzDb}((*F39ygFigudX(66T&q(MWj4J&-zS~XnZbX|z3_j) zPrcT~Or)p2Y|TG0AH|g_hRc}?Mtuj6VGj}~Jn=-V2DDM->e$2QH}G^;43}x=FJ2eX zQk5$$zUcBk6i1ZLW-F@1JxxT1>be6*a;jlL~k-Es)+bJMPaaeH2CwGu7w8Q;hLzkp-x)z)(Ex#BCV!=m=}U4J9Uv z#}yLno`Ea$6Ee@krdlGKokKB`WxW`sMgl%JRdyt%IL0xa)r1_g&0=xLG#G!e6iSOQ z0S0Biq%b&I@7=~r$`ubly9sC>`0JOc^Oa181i45eVM+vk(wm62`^-!6JdWL$L8C9P z4_2-R8|@9^-u3nLd8(4)k$~z7vwNQEYB$n~4_7jT54&#y>huRzzf@bDI=|`l7pd(T zJnb!Dy-7n2WJqDc8zCvw9vpwn9#Z(EWiceGmpDI$kyv@)JUzqw{;n^|C-~sV4PlE} zeDsfd4XUDCr_U4gs5XkeJ5P6@g1epaEDt5ZzR7it9FcqEv$p5(-*tDFI@`r#?&@oD zs>>dW+g|gwo^ibPp5OGT+y8ec<($BCD4Qx^l>1p%PKVxmoUbRo-{gO|N6Zp8ro1WM zJT78r;<4YDts?S0EWTo*_9lr{=9OW^U_Kl)GUYnxd$EXEI>+8u!g^o3Q77tT)qWEt zu%_`|`~HHDwaZDf;It#V#X$~2VgeEGH-~MdAcd{iEQFZ!3CrAaoK{z^$x>n88pdx) z+m~F#_H1ylF;*;+)8Bs>SG{mNcMaSjA2IFwPOV2wdp&1Y(@k6l6qe5~kcFTyhsu{N zx1^8yyPfkS$PpyRxIWg%3qYQfJe@DqnW=YkPm1krP3X#5IM++%!NcC;vmdJE_u)Ed z{X^N+!vKS*w#G#o#UR^bjYbgEm4Q2;#>{!5x4h|f<3z0(C=Y+@%tY%UHG4E+*#>3( zAuIS4pg$N9hcMP7P->nva!>F3FVR7-0+KU`B_qbm;8PC+(;tH25vO9uNWGgpPTwZ0 z{hL&M4$4&_GAc?~g+{*)yqCAi8wMh&k^#sRjdo z{c}fQ@Bqu2zJp?@A91p_M+ZL35$a_f%$eD98p)hzO=BYp+aJX2xdvJCXCIj0TI$^VnAkz3ZuQ0|L)?6YSMgB(er^$f@HHe&V8p+_`LK< z8Ga8<7O5f{rDYe(K4^cDD^j-}&BDZU>E+~sjFK@8^&O>}|CwT7q z%<-Iuv2gdo6PTzGR2i}Yp75-5J{OM0&oA&L$TPt5`kdoE;g@@J!#3;u^33ntF>{`# zkkswpD)X^%5`La0*tZ!uPZL` zJO!pxe4b%qkZ&Z38 zM@)Y)y=6Cqzi`d+)-%hTSc+IDKm6h=G<{HRj;t*f%OOe4t8a3j#aRS3_=8Oya@`jM zhAxZch&8{xCYg9aCcb^B5@vPY{H-h@rHT^{CSIDHM~%#&(JxesXDIVMRlTvN1f7Un zG1VWl*mHKEye@u{!j z9$pRk0T}+}@6~>P!5{Fwn{1*UhCiFQNKG&EJTu1pv?9VeYBo_)w|?u`pIl79NIQSm z!4xNr!}**c54;rjLp`JvYV!ED=geTsE%$ikdDweI%=8P2r!km<3D^hZr?+s*8e6=! z1M~)A>WCUnr_RZT_6<=zT%R$2L0#-$X9BQ&V0cuwp0_w3&V70Qji$flq@K>I1Ea3m ziAG)e+s`%e6Qi~__rlEt`Z+m#?MHt`uL)?y^?N!l>!-oW%zeDKO`XK9SP!hfeHKi1 zy_HD06K3kkQ{{9_>ZdWooW-6G)<5|)-&FjN%OulYh`!NVSyMI170HLNmi~~uqgNiK zaEMt`9%6U%1tmeQjyLr()QUy9!akv;^Q=Y&eJer(UGzf&B5!)8ZsM<7k6(Z5ul9>} z#5?&}o_s6Xo=>5F@v%E|jkg)u28>LaOBLs7er?y3m+n9 zzbBRx8!R1S=n^@x$aM6uh90+l+kB4NULH~926w7Q9(1r}%IObroe+PiLy$G_-9v}M z^(DXFE2%6B!*O*n0Nac9pak+jcN>+5ZO zYS9~YWx1gV*Zhu{N)UhReJv7#9gW2gC$-_sgdlDTPOr z9$>mV7Eg=m+D$0R5p_-?#n)Acgw){jfK zSP{)9Y~NqX9N8Xx7t;DLX7W8!ufv1t4`ao%IO&&SwTQx`=Nv3Wd5m>W7DGZ!g-t+-DEC!( zXpr$yctpHlU7KniQOI-gapxB=;j&bHGKk&9AFol>r+8mJ_c!&9s$n;#HW6ydHSsv2cv>66f-Vm6aQol@1PkK-_oCMKMzLUi?FU zI~&jSdRdzsH+17 zbPq#y9%nnAx0>*lE!~cB%LqE>3fHpbHnE(v@xEmaZE>vC8vP3DRdASlii%~RRW~&_ z&JS4(^(u

- +
+ +
- From 1204f9727005974587d6fc1dcd4d4f0ead87c856 Mon Sep 17 00:00:00 2001 From: Tei Home Date: Thu, 9 Jan 2025 19:32:06 +0800 Subject: [PATCH 057/279] doc: add cuda guide for fedora (#11135) Since NVIDIA does not release CUDA for in-maintenance versions of Fedora, the process of setting up the CUDA toolkit on Fedora has become quite involved. This guide should help mere mortals install CUDA for development in a Fedora 39 toolbox environment, without affecting the host system. --- docs/build.md | 2 + docs/cuda-fedora.md | 317 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 319 insertions(+) create mode 100644 docs/cuda-fedora.md diff --git a/docs/build.md b/docs/build.md index 84019b204..3b0d2211d 100644 --- a/docs/build.md +++ b/docs/build.md @@ -127,6 +127,8 @@ For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). This provides GPU acceleration using an NVIDIA GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from the [NVIDIA developer site](https://developer.nvidia.com/cuda-downloads). +If you are using Fedora (using Fedora Workstation, or an 'Atomic' variant such as Silverblue), or would like to set up CUDA in a toolbox, please consider our [Fedora CUDA guide](./cuda-fedora.md). Unfortunately, the process is not as simple as one might expect. + - Using `CMake`: ```bash diff --git a/docs/cuda-fedora.md b/docs/cuda-fedora.md new file mode 100644 index 000000000..b993386c8 --- /dev/null +++ b/docs/cuda-fedora.md @@ -0,0 +1,317 @@ +# Setting Up CUDA on Fedora + +In this guide we setup [Nvidia CUDA](https://docs.nvidia.com/cuda/) in a toolbox container. This guide is applicable for: +- [Fedora Workstation](https://fedoraproject.org/workstation/) +- [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/) +- [Fedora Spins](https://fedoraproject.org/spins) +- [Other Distributions](https://containertoolbx.org/distros/), including `Red Hat Enterprise Linux >= 8.`, `Arch Linux`, and `Ubuntu`. + + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Monitoring NVIDIA CUDA Repositories](#monitoring-nvidia-cuda-repositories) +- [Using the Fedora 39 CUDA Repository](#using-the-fedora-39-cuda-repository) +- [Creating a Fedora Toolbox Environment](#creating-a-fedora-toolbox-environment) +- [Installing Essential Development Tools](#installing-essential-development-tools) +- [Adding the CUDA Repository](#adding-the-cuda-repository) +- [Installing `nvidia-driver-libs`](#installing-nvidia-driver-libs) +- [Manually Resolving Package Conflicts](#manually-resolving-package-conflicts) +- [Finalizing the Installation of `nvidia-driver-libs`](#finalizing-the-installation-of-nvidia-driver-libs) +- [Installing the CUDA Meta-Package](#installing-the-cuda-meta-package) +- [Configuring the Environment](#configuring-the-environment) +- [Verifying the Installation](#verifying-the-installation) +- [Conclusion](#conclusion) +- [Troubleshooting](#troubleshooting) +- [Additional Notes](#additional-notes) +- [References](#references) + +## Prerequisites + +- **Toolbox Installed on the Host System** `Fedora Silverblue` and `Fedora Workstation` both have toolbox by default, other distributions may need to install the [toolbox package](https://containertoolbx.org/install/). +- **NVIDIA Drivers and Graphics Card installed on Host System (optional)** To run CUDA program, such as `llama.cpp`, the host should be setup to access your NVIDIA hardware. Fedora Hosts can use the [RPM Fusion Repository](https://rpmfusion.org/Howto/NVIDIA). +- **Internet connectivity** to download packages. + +### Monitoring NVIDIA CUDA Repositories + +Before proceeding, it is advisable to check if NVIDIA has updated their CUDA repositories for your Fedora version. NVIDIA's repositories can be found at: + +- [Fedora 40 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora40/x86_64/) +- [Fedora 41 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora41/x86_64/) + +As of the latest update, these repositories do not contain the `cuda` meta-package or are missing essential components. + +### Using the Fedora 39 CUDA Repository + +Since the newer repositories are incomplete, we'll use the Fedora 39 repository: + +- [Fedora 39 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/) + +**Note:** Fedora 39 is no longer maintained, so we recommend using a toolbox environment to prevent system conflicts. + +## Creating a Fedora Toolbox Environment + +This guide focuses on Fedora hosts, but with small adjustments, it can work for other hosts. Using a Fedora 39 toolbox allows us to install the necessary packages without affecting the host system. + +**Note:** Toolbox is available for other systems, and even without Toolbox, it is possible to use Podman or Docker. + +We do not recommend installing on the host system, as Fedora 39 is out-of-maintenance, and instead you should upgrade to a maintained version of Fedora for your host. + +1. **Create a Fedora 39 Toolbox:** + + ```bash + toolbox create --image registry.fedoraproject.org/fedora-toolbox:39 --container fedora-toolbox-39-cuda + ``` + +2. **Enter the Toolbox:** + + ```bash + toolbox enter --container fedora-toolbox-39-cuda + ``` + + Inside the toolbox, you have root privileges and can install packages without affecting the host system. + +## Installing Essential Development Tools + +1. **Synchronize the DNF Package Manager:** + + ```bash + sudo dnf distro-sync + ``` + +2. **Install the Default Text Editor (Optional):** + + ```bash + sudo dnf install vim-default-editor --allowerasing + ``` + + The `--allowerasing` flag resolves any package conflicts. + +3. **Install Development Tools and Libraries:** + + ```bash + sudo dnf install @c-development @development-tools cmake + ``` + + This installs essential packages for compiling software, including `gcc`, `make`, and other development headers. + +## Adding the CUDA Repository + +Add the NVIDIA CUDA repository to your DNF configuration: + +```bash +sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/cuda-fedora39.repo +``` + +After adding the repository, synchronize the package manager again: + +```bash +sudo dnf distro-sync +``` + +## Installing `nvidia-driver-libs` + +Attempt to install `nvidia-driver-libs`: + +```bash +sudo dnf install nvidia-driver-libs +``` + +**Explanation:** + +- `nvidia-driver-libs` contains necessary NVIDIA driver libraries required by CUDA. +- This step might fail due to conflicts with existing NVIDIA drivers on the host system. + +## Manually Resolving Package Conflicts + +If the installation fails due to conflicts, we'll manually download and install the required packages, excluding conflicting files. + +### 1. Download the `nvidia-driver-libs` RPM + +```bash +sudo dnf download --arch x86_64 nvidia-driver-libs +``` + +You should see a file similar to: + +``` +nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +### 2. Attempt to Install the RPM + +```bash +sudo dnf install nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +**Expected Error:** + +Installation may fail with errors pointing to conflicts with `egl-gbm` and `egl-wayland`. + +**Note: It is important to carefully read the error messages to identify the exact paths that need to be excluded.** + +### 3. Download Dependencies + +```bash +sudo dnf download --arch x86_64 egl-gbm egl-wayland +``` + +### 4. Install `egl-gbm` with Excluded Paths + +Exclude conflicting files during installation: + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/lib64/libnvidia-egl-gbm.so.1.1.2 \ + --excludepath=/usr/share/egl/egl_external_platform.d/15_nvidia_gbm.json \ + egl-gbm-1.1.2^20240919gitb24587d-3.fc39.x86_64.rpm +``` + +**Explanation:** + +- The `--excludepath` option skips installing files that conflict with existing files. +- Adjust the paths based on the error messages you receive. + +### 5. Install `egl-wayland` with Excluded Paths + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/share/egl/egl_external_platform.d/10_nvidia_wayland.json \ + egl-wayland-1.1.17^20241118giteeb29e1-5.fc39.x86_64.rpm +``` + +### 6. Install `nvidia-driver-libs` with Excluded Paths + +```bash +sudo rpm --install --verbose --hash \ + --excludepath=/usr/share/glvnd/egl_vendor.d/10_nvidia.json \ + --excludepath=/usr/share/nvidia/nvoptix.bin \ + nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +``` + +**Note:** + +- Replace the paths with the ones causing conflicts in your installation if they differ. +- The `--verbose` and `--hash` options provide detailed output during installation. + +## Finalizing the Installation of `nvidia-driver-libs` + +After manually installing the dependencies, run: + +```bash +sudo dnf install nvidia-driver-libs +``` + +You should receive a message indicating the package is already installed: + +``` +Package nvidia-driver-libs-3:560.35.05-1.fc39.x86_64 is already installed. +Dependencies resolved. +Nothing to do. +Complete! +``` + +## Installing the CUDA Meta-Package + +Now that the driver libraries are installed, proceed to install CUDA: + +```bash +sudo dnf install cuda +``` + +This installs the CUDA toolkit and associated packages. + +## Configuring the Environment + +To use CUDA, add its binary directory to your system's `PATH`. + +1. **Create a Profile Script:** + + ```bash + sudo sh -c 'echo "export PATH=\$PATH:/usr/local/cuda/bin" >> /etc/profile.d/cuda.sh' + ``` + + **Explanation:** + + - We add to `/etc/profile.d/` as the `/etc/` folder is unique to this particular container, and is not shared with other containers or the host system. + - The backslash `\` before `$PATH` ensures the variable is correctly written into the script. + +2. **Make the Script Executable:** + + ```bash + sudo chmod +x /etc/profile.d/cuda.sh + ``` + +3. **Source the Script to Update Your Environment:** + + ```bash + source /etc/profile.d/cuda.sh + ``` + + **Note:** This command updates your current shell session with the new `PATH`. The `/etc/profile.d/cuda.sh` script ensures that the CUDA binaries are available in your `PATH` for all future sessions. + +## Verifying the Installation + +To confirm that CUDA is correctly installed and configured, check the version of the NVIDIA CUDA Compiler (`nvcc`): + +```bash +nvcc --version +``` + +You should see output similar to: + +``` +nvcc: NVIDIA (R) Cuda compiler driver +Copyright (c) 2005-2024 NVIDIA Corporation +Built on Tue_Oct_29_23:50:19_PDT_2024 +Cuda compilation tools, release 12.6, V12.6.85 +Build cuda_12.6.r12.6/compiler.35059454_0 +``` + +This output confirms that the CUDA compiler is accessible and indicates the installed version. + +## Conclusion + +You have successfully set up CUDA on Fedora within a toolbox environment using the Fedora 39 CUDA repository. By manually resolving package conflicts and configuring the environment, you can develop CUDA applications without affecting your host system. + +## Troubleshooting + +- **Installation Failures:** + - If you encounter errors during installation, carefully read the error messages. They often indicate conflicting files or missing dependencies. + - Use the `--excludepath` option with `rpm` to exclude conflicting files during manual installations. + +- **Driver Conflicts:** + - Since the host system may already have NVIDIA drivers installed, conflicts can arise. Using the toolbox environment helps isolate these issues. + +- **Environment Variables Not Set:** + - If `nvcc` is not found after installation, ensure that `/usr/local/cuda/bin` is in your `PATH`. + - Run `echo $PATH` to check if the path is included. + - Re-source the profile script or open a new terminal session. + +## Additional Notes + +- **Updating CUDA in the Future:** + - Keep an eye on the official NVIDIA repositories for updates to your Fedora version. + - When an updated repository becomes available, adjust your `dnf` configuration accordingly. + +- **Building `llama.cpp`:** + - With CUDA installed, you can follow these [build instructions for `llama.cpp`](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) to compile it with CUDA support. + - Ensure that any CUDA-specific build flags or paths are correctly set in your build configuration. + +- **Using the Toolbox Environment:** + - The toolbox environment is isolated from your host system, which helps prevent conflicts. + - Remember that system files and configurations inside the toolbox are separate from the host. By default the home directory of the user is shared between the host and the toolbox. + +--- + +**Disclaimer:** Manually installing and modifying system packages can lead to instability of the container. The above steps are provided as a guideline and may need adjustments based on your specific system configuration. Always back up important data before making significant system changes, especially as your home folder is writable and shared with he toolbox. + +**Acknowledgments:** Special thanks to the Fedora community and NVIDIA documentation for providing resources that assisted in creating this guide. + +## References + +- [Fedora Toolbox Documentation](https://docs.fedoraproject.org/en-US/fedora-silverblue/toolbox/) +- [NVIDIA CUDA Installation Guide](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) +- [Podman Documentation](https://podman.io/get-started) + +--- From c6860cc7346c90219475e4467bb8a288e0df975c Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Fri, 10 Jan 2025 05:43:03 +0530 Subject: [PATCH 058/279] SYCL: Refactor ggml_sycl_compute_forward (#11121) * SYCL: refactor ggml_sycl_compute_forward * SYCL: add back GGML_USED(dst) to ggml_sycl_cpy * SYCL: add function name to noop debug * SYCL: Some device info print refactoring and add details of XMX availability --- ggml/src/ggml-sycl/common.cpp | 4 + ggml/src/ggml-sycl/common.hpp | 1 + ggml/src/ggml-sycl/concat.cpp | 5 +- ggml/src/ggml-sycl/concat.hpp | 3 +- ggml/src/ggml-sycl/conv.cpp | 5 +- ggml/src/ggml-sycl/conv.hpp | 3 +- ggml/src/ggml-sycl/element_wise.cpp | 96 +++++----- ggml/src/ggml-sycl/element_wise.hpp | 48 ++--- ggml/src/ggml-sycl/ggml-sycl.cpp | 262 +++++++++++++--------------- ggml/src/ggml-sycl/outprod.cpp | 6 +- ggml/src/ggml-sycl/outprod.hpp | 3 +- ggml/src/ggml-sycl/tsembd.cpp | 5 +- ggml/src/ggml-sycl/tsembd.hpp | 3 +- ggml/src/ggml-sycl/wkv6.cpp | 6 +- ggml/src/ggml-sycl/wkv6.hpp | 3 +- 15 files changed, 223 insertions(+), 230 deletions(-) diff --git a/ggml/src/ggml-sycl/common.cpp b/ggml/src/ggml-sycl/common.cpp index 88314a5cd..022e7b763 100644 --- a/ggml/src/ggml-sycl/common.cpp +++ b/ggml/src/ggml-sycl/common.cpp @@ -51,6 +51,10 @@ void ggml_sycl_host_free(void* ptr) try { std::exit(1); } +bool gpu_has_xmx(sycl::device &dev) { + return dev.has(sycl::aspect::ext_intel_matrix); +} + int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size) { const int64_t max_range = std::numeric_limits::max(); int64_t sycl_down_blk_size = block_size; diff --git a/ggml/src/ggml-sycl/common.hpp b/ggml/src/ggml-sycl/common.hpp index 62b4cea3a..e9500f3a1 100644 --- a/ggml/src/ggml-sycl/common.hpp +++ b/ggml/src/ggml-sycl/common.hpp @@ -662,6 +662,7 @@ inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_t } } +bool gpu_has_xmx(sycl::device &dev); void ggml_sycl_op_flatten(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, diff --git a/ggml/src/ggml-sycl/concat.cpp b/ggml/src/ggml-sycl/concat.cpp index a240968ad..d41cfd3a6 100644 --- a/ggml/src/ggml-sycl/concat.cpp +++ b/ggml/src/ggml-sycl/concat.cpp @@ -158,8 +158,9 @@ static void concat_f32_sycl_non_cont( }); } -void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst) { +void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; queue_ptr stream = ctx.stream(); const int32_t dim = ((int32_t *)dst->op_params)[0]; diff --git a/ggml/src/ggml-sycl/concat.hpp b/ggml/src/ggml-sycl/concat.hpp index 5a04feaab..e5cb7314c 100644 --- a/ggml/src/ggml-sycl/concat.hpp +++ b/ggml/src/ggml-sycl/concat.hpp @@ -15,7 +15,6 @@ #include "common.hpp" -void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst); +void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst); #endif // GGML_SYCL_CONCAT_HPP diff --git a/ggml/src/ggml-sycl/conv.cpp b/ggml/src/ggml-sycl/conv.cpp index bc4ab1ddb..ddba601e1 100644 --- a/ggml/src/ggml-sycl/conv.cpp +++ b/ggml/src/ggml-sycl/conv.cpp @@ -71,8 +71,9 @@ static void conv_transpose_1d_f32_f32_sycl( }); } -void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst) { +void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst) { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; const float * src0_d = (const float *)src0->data; const float * src1_d = (const float *)src1->data; diff --git a/ggml/src/ggml-sycl/conv.hpp b/ggml/src/ggml-sycl/conv.hpp index eb20730f9..f9e60dc75 100644 --- a/ggml/src/ggml-sycl/conv.hpp +++ b/ggml/src/ggml-sycl/conv.hpp @@ -15,7 +15,6 @@ #include "common.hpp" -void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst); +void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst); #endif // GGML_SYCL_CONV_HPP diff --git a/ggml/src/ggml-sycl/element_wise.cpp b/ggml/src/ggml-sycl/element_wise.cpp index d05a51f80..4bcd74376 100644 --- a/ggml/src/ggml-sycl/element_wise.cpp +++ b/ggml/src/ggml-sycl/element_wise.cpp @@ -882,149 +882,149 @@ inline void ggml_sycl_op_div(ggml_backend_sycl_context & ctx, const ggml_tensor } -void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sqrt); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sqrt); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_sin(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sin); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sin); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_cos(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_cos); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_cos); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_acc(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_acc); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_acc); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_gelu); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_gelu); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_silu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_silu); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_silu); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_gelu_quick); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_gelu_quick); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_tanh); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_tanh); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_relu); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_relu); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sigmoid); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sigmoid); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_hardsigmoid); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_hardsigmoid); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_hardswish); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_hardswish); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_exp(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_exp); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_exp); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_log(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_log); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_log); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_neg(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_neg); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_neg); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_step(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_step); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_step); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_leaky_relu); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_leaky_relu); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sqr); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sqr); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_upscale); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_upscale); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_pad(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_pad); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_pad); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_add(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_add); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_add); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_sub(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sub); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sub); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_mul(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_mul); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_mul); GGML_SYCL_DEBUG("call %s done\n", __func__); } -void ggml_sycl_div(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_div); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_div); GGML_SYCL_DEBUG("call %s done\n", __func__); } diff --git a/ggml/src/ggml-sycl/element_wise.hpp b/ggml/src/ggml-sycl/element_wise.hpp index 8152edf58..464432645 100644 --- a/ggml/src/ggml-sycl/element_wise.hpp +++ b/ggml/src/ggml-sycl/element_wise.hpp @@ -25,52 +25,52 @@ static __dpct_inline__ float op_div(const float a, const float b) { } -void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_sin(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_cos(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_acc(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_silu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_exp(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_log(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_neg(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_step(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_pad(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_add(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_sub(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_mul(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst); -void ggml_sycl_div(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); +void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_ELEMENTWISE_HPP diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 312ccfeb8..037c8093e 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -54,18 +54,12 @@ static ggml_sycl_device_info ggml_sycl_init() { GGML_ASSERT(info.device_count <= GGML_SYCL_MAX_DEVICES); int64_t total_vram = 0; -#if defined(GGML_SYCL_FORCE_MMQ) - GGML_LOG_INFO("%s: GGML_SYCL_FORCE_MMQ: yes\n", __func__); -#else - GGML_LOG_INFO("%s: GGML_SYCL_FORCE_MMQ: no\n", __func__); -#endif -#if defined(SYCL_USE_XMX) - GGML_LOG_INFO("%s: SYCL_USE_XMX: yes\n", __func__); -#else - GGML_LOG_INFO("%s: SYCL_USE_XMX: no\n", __func__); -#endif - GGML_LOG_INFO("%s: found %d %s devices:\n", __func__, info.device_count, GGML_SYCL_NAME); - +/* This is a bit misleading; reserved for later */ +// #if defined(SYCL_USE_XMX) +// GGML_LOG_INFO("%s: SYCL_USE_XMX: yes\n", __func__); +// #else +// GGML_LOG_INFO("%s: SYCL_USE_XMX: no\n", __func__); +// #endif for (int i = 0; i < info.device_count; ++i) { info.devices[i].vmm = 0; dpct::device_info prop; @@ -109,11 +103,11 @@ void print_device_detail(int id, sycl::device &device, std::string device_type) name = std::regex_replace(name, std::regex("\\(TM\\)"), ""); auto global_mem_size = prop.get_global_mem_size()/1000000; - - GGML_LOG_INFO("|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|\n", id, device_type.c_str(), + std::string xmx = gpu_has_xmx(device) ? "yes" : "no"; + GGML_LOG_INFO("|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|%14s|\n", id, device_type.c_str(), name.c_str(), version.c_str(), prop.get_max_compute_units(), prop.get_max_work_group_size(), prop.get_max_sub_group_size(), - global_mem_size, device.get_info().c_str()); + global_mem_size, device.get_info().c_str(), xmx.c_str()); } void ggml_backend_sycl_print_sycl_devices() { @@ -124,16 +118,16 @@ void ggml_backend_sycl_print_sycl_devices() { GGML_LOG_INFO( "| | | | " - " |Max | |Max |Global | |\n"); + " |Max | |Max |Global | | XMX |\n"); GGML_LOG_INFO( "| | | | " - " |compute|Max work|sub |mem | |\n"); + " |compute|Max work|sub |mem | | or |\n"); GGML_LOG_INFO( "|ID| Device Type| " - "Name|Version|units |group |group|size | Driver version|\n"); + "Name|Version|units |group |group|size | Driver version| Tensor Cores |\n"); GGML_LOG_INFO( "|--|-------------------|---------------------------------------|------" - "-|-------|--------|-----|-------|---------------------|\n"); + "-|-------|--------|-----|-------|---------------------|--------------|\n"); for (int id = 0; id < device_count; ++id) { sycl::device device = dpct::dev_mgr::instance().get_device(id); @@ -164,14 +158,18 @@ static void ggml_check_sycl() try { static bool initialized = false; if (!initialized) { - GGML_LOG_INFO("[SYCL] call ggml_check_sycl\n"); + GGML_SYCL_DEBUG("[SYCL] call ggml_check_sycl\n"); g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0); - GGML_LOG_INFO("%s: GGML_SYCL_DEBUG: %d\n", __func__, g_ggml_sycl_debug); - -#if defined(GGML_SYCL_F16) - GGML_LOG_INFO("%s: GGML_SYCL_F16: yes\n", __func__); + GGML_LOG_INFO("GGML_SYCL_DEBUG: %d\n", g_ggml_sycl_debug); +#if defined(GGML_SYCL_FORCE_MMQ) + GGML_LOG_INFO("GGML_SYCL_FORCE_MMQ: yes\n"); #else - GGML_LOG_INFO("%s: GGML_SYCL_F16: no\n", __func__); + GGML_LOG_INFO("GGML_SYCL_FORCE_MMQ: no\n"); +#endif +#if defined(GGML_SYCL_F16) + GGML_LOG_INFO("GGML_SYCL_F16: yes\n"); +#else + GGML_LOG_INFO("GGML_SYCL_F16: no\n"); #endif /* NOT REMOVE, keep it for next optimize for XMX. @@ -1189,7 +1187,6 @@ std::unique_ptr ggml_backend_sycl_context::new_pool_for_device(q /// kernels typedef void (*cpy_kernel_t)(const char * cx, char * cdst); -typedef void (*ggml_sycl_func_t)(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst); typedef void (*ggml_sycl_op_mul_mat_t)( ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst, @@ -3171,33 +3168,33 @@ catch (sycl::exception const &exc) { } -static void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_repeat); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_repeat); GGML_SYCL_DEBUG("call %s done\n", __func__); } -static void ggml_sycl_get_rows(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_get_rows); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_get_rows); GGML_SYCL_DEBUG("call %s done\n", __func__); } -static void ggml_sycl_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_norm); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_norm); GGML_SYCL_DEBUG("call %s done\n", __func__); } -static void ggml_sycl_rms_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_rms_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_rms_norm); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_rms_norm); GGML_SYCL_DEBUG("call %s done\n", __func__); } -static void ggml_sycl_group_norm(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_group_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_SYCL_DEBUG("call %s\n", __func__); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_group_norm); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_group_norm); GGML_SYCL_DEBUG("call %s done\n", __func__); } @@ -3572,9 +3569,10 @@ __dpct_inline__ static void k_copy_dst_from_contiguous( } } -static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, +static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx, ggml_tensor *dst) try { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer) && "mul_mat_id does not support split buffers"); const ggml_tensor *ids = dst->src[2]; @@ -3740,12 +3738,12 @@ catch (sycl::exception const &exc) { std::exit(1); } -static void ggml_sycl_scale(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_scale); +static void ggml_sycl_scale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_scale); } -static void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_clamp); +static void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_clamp); } static void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1, @@ -3787,7 +3785,6 @@ static void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor *sr ggml_type_name(src0->type), ggml_type_name(src1->type)); GGML_ABORT("fatal error"); } - GGML_UNUSED(dst); } catch (sycl::exception const &exc) { @@ -3796,59 +3793,52 @@ catch (sycl::exception const &exc) { std::exit(1); } -static void ggml_sycl_dup(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { +static void ggml_sycl_dup(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { // TODO: why do we pass dst as src1 here? - ggml_sycl_cpy(ctx, src0, dst, nullptr); - GGML_UNUSED(src1); + ggml_sycl_cpy(ctx, dst->src[0], dst, nullptr); } -static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_diag_mask_inf); +static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_diag_mask_inf); } -static void ggml_sycl_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_soft_max); +static void ggml_sycl_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_soft_max); } -static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); // TODO: this restriction is temporary until non-cont support is implemented - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_rope); +static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); // TODO: this restriction is temporary until non-cont support is implemented + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_rope); } -static void ggml_sycl_pool2d(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_pool2d); +static void ggml_sycl_pool2d(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_pool2d); } -static void ggml_sycl_im2col(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_im2col); +static void ggml_sycl_im2col(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_im2col); } -static void ggml_sycl_sum(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sum); +static void ggml_sycl_sum(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sum); } -static void ggml_sycl_sum_rows(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_sum_rows); +static void ggml_sycl_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_sum_rows); } -static void ggml_sycl_argsort(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_argsort); +static void ggml_sycl_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_argsort); } -static void ggml_sycl_argmax(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_ASSERT(ggml_is_contiguous(src0)); - ggml_sycl_op_flatten(ctx, src0, src1, dst, ggml_sycl_op_argmax); +static void ggml_sycl_argmax(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + GGML_ASSERT(ggml_is_contiguous(dst->src[0])); + ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_argmax); } -static void ggml_sycl_nop(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { - GGML_UNUSED(src0); - GGML_UNUSED(src1); - GGML_UNUSED(dst); - GGML_UNUSED(ctx); -} void ggml_sycl_set_main_device(const int main_device) try { if (dpct::get_current_device_id() == static_cast (main_device)) { @@ -3871,191 +3861,189 @@ catch (sycl::exception const &exc) { std::exit(1); } -bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * tensor) { +bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * dst) { if (!g_sycl_loaded) return false; - ggml_sycl_func_t func; + if (dst->src[0] != nullptr && ggml_backend_buffer_is_sycl_split(dst->src[0]->buffer)) { + ggml_sycl_set_peer_access(dst->src[1]->ne[1], ctx.device); + } - switch (tensor->op) { + switch (dst->op) { case GGML_OP_ARGMAX: - func = ggml_sycl_argmax; + ggml_sycl_argmax(ctx, dst); break; case GGML_OP_CONV_TRANSPOSE_1D: - func = ggml_sycl_op_conv_transpose_1d; + ggml_sycl_op_conv_transpose_1d(ctx, dst); break; case GGML_OP_REPEAT: - func = ggml_sycl_repeat; + ggml_sycl_repeat(ctx, dst); break; case GGML_OP_GET_ROWS: - func = ggml_sycl_get_rows; + ggml_sycl_get_rows(ctx, dst); break; case GGML_OP_DUP: - func = ggml_sycl_dup; + ggml_sycl_dup(ctx, dst); break; case GGML_OP_ADD: case GGML_OP_ADD1: // TODO: more efficient implementation - func = ggml_sycl_add; + ggml_sycl_add(ctx, dst); break; case GGML_OP_SUB: - func = ggml_sycl_sub; + ggml_sycl_sub(ctx, dst); break; case GGML_OP_ACC: - func = ggml_sycl_acc; + ggml_sycl_acc(ctx, dst); break; case GGML_OP_MUL: - func = ggml_sycl_mul; + ggml_sycl_mul(ctx, dst); break; case GGML_OP_LOG: - func = ggml_sycl_log; + ggml_sycl_log(ctx, dst); break; case GGML_OP_DIV: - func = ggml_sycl_div; + ggml_sycl_div(ctx, dst); break; case GGML_OP_UNARY: - switch (ggml_get_unary_op(tensor)) { + switch (ggml_get_unary_op(dst)) { case GGML_UNARY_OP_NEG: - func = ggml_sycl_neg; + ggml_sycl_neg(ctx, dst); break; case GGML_UNARY_OP_STEP: - func = ggml_sycl_step; + ggml_sycl_step(ctx, dst); break; case GGML_UNARY_OP_GELU: - func = ggml_sycl_gelu; + ggml_sycl_gelu(ctx, dst); break; case GGML_UNARY_OP_SILU: - func = ggml_sycl_silu; + ggml_sycl_silu(ctx, dst); break; case GGML_UNARY_OP_GELU_QUICK: - func = ggml_sycl_gelu_quick; + ggml_sycl_gelu_quick(ctx, dst); break; case GGML_UNARY_OP_TANH: - func = ggml_sycl_tanh; + ggml_sycl_tanh(ctx, dst); break; case GGML_UNARY_OP_RELU: - func = ggml_sycl_relu; + ggml_sycl_relu(ctx, dst); break; case GGML_UNARY_OP_SIGMOID: - func = ggml_sycl_sigmoid; + ggml_sycl_sigmoid(ctx, dst); break; case GGML_UNARY_OP_HARDSIGMOID: - func = ggml_sycl_hardsigmoid; + ggml_sycl_hardsigmoid(ctx, dst); break; case GGML_UNARY_OP_HARDSWISH: - func = ggml_sycl_hardswish; + ggml_sycl_hardswish(ctx, dst); break; case GGML_UNARY_OP_EXP: - func = ggml_sycl_exp; + ggml_sycl_exp(ctx, dst); break; default: return false; } break; case GGML_OP_NORM: - func = ggml_sycl_norm; + ggml_sycl_norm(ctx, dst); break; case GGML_OP_GROUP_NORM: - func = ggml_sycl_group_norm; + ggml_sycl_group_norm(ctx, dst); break; case GGML_OP_CONCAT: - func = ggml_sycl_op_concat; + ggml_sycl_op_concat(ctx, dst); break; case GGML_OP_UPSCALE: - func = ggml_sycl_upscale; + ggml_sycl_upscale(ctx, dst); break; case GGML_OP_PAD: - func = ggml_sycl_pad; + ggml_sycl_pad(ctx, dst); break; case GGML_OP_LEAKY_RELU: - func = ggml_sycl_leaky_relu; + ggml_sycl_leaky_relu(ctx, dst); break; case GGML_OP_RMS_NORM: - func = ggml_sycl_rms_norm; + ggml_sycl_rms_norm(ctx, dst); break; case GGML_OP_MUL_MAT: - if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) { + if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) { return false; } - func = ggml_sycl_mul_mat; + /* ggml_sycl_mul_mat_id is dependent on ggml_sycl_mul_mat */ + ggml_sycl_mul_mat(ctx, dst->src[0], dst->src[1], dst); break; case GGML_OP_MUL_MAT_ID: - if (tensor->src[0]->ne[3] != tensor->src[1]->ne[3]) { + if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) { return false; } - func = ggml_sycl_mul_mat_id; + ggml_sycl_mul_mat_id(ctx, dst); break; case GGML_OP_OUT_PROD: - func = ggml_sycl_op_out_prod; + ggml_sycl_op_out_prod(ctx, dst); break; case GGML_OP_SCALE: - func = ggml_sycl_scale; + ggml_sycl_scale(ctx, dst); break; case GGML_OP_SQR: - func = ggml_sycl_sqr; + ggml_sycl_sqr(ctx, dst); break; case GGML_OP_SQRT: - func = ggml_sycl_sqrt; + ggml_sycl_sqrt(ctx, dst); break; case GGML_OP_SIN: - func = ggml_sycl_sin; + ggml_sycl_sin(ctx, dst); break; case GGML_OP_COS: - func = ggml_sycl_cos; + ggml_sycl_cos(ctx, dst); break; case GGML_OP_CLAMP: - func = ggml_sycl_clamp; + ggml_sycl_clamp(ctx, dst); break; case GGML_OP_CPY: - func = ggml_sycl_cpy; + ggml_sycl_cpy(ctx, dst->src[0], dst->src[1], dst); break; case GGML_OP_CONT: - func = ggml_sycl_dup; + ggml_sycl_dup(ctx, dst); break; case GGML_OP_NONE: case GGML_OP_RESHAPE: case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: - func = ggml_sycl_nop; + GGML_SYCL_DEBUG("%s: Tensor NO-OP\n", __func__); break; case GGML_OP_DIAG_MASK_INF: - func = ggml_sycl_diag_mask_inf; + ggml_sycl_diag_mask_inf(ctx, dst); break; case GGML_OP_SOFT_MAX: - func = ggml_sycl_soft_max; + ggml_sycl_soft_max(ctx, dst); break; case GGML_OP_ROPE: - func = ggml_sycl_rope; + ggml_sycl_rope(ctx, dst); break; case GGML_OP_IM2COL: - func = ggml_sycl_im2col; + ggml_sycl_im2col(ctx, dst); break; case GGML_OP_POOL_2D: - func = ggml_sycl_pool2d; + ggml_sycl_pool2d(ctx, dst); break; case GGML_OP_SUM: - func = ggml_sycl_sum; + ggml_sycl_sum(ctx, dst); break; case GGML_OP_SUM_ROWS: - func = ggml_sycl_sum_rows; + ggml_sycl_sum_rows(ctx, dst); break; case GGML_OP_ARGSORT: - func = ggml_sycl_argsort; + ggml_sycl_argsort(ctx, dst); break; case GGML_OP_TIMESTEP_EMBEDDING: - func = ggml_sycl_op_timestep_embedding; + ggml_sycl_op_timestep_embedding(ctx, dst); break; case GGML_OP_RWKV_WKV6: - func = ggml_sycl_op_rwkv_wkv6; + ggml_sycl_op_rwkv_wkv6(ctx, dst); break; default: return false; } - if (tensor->src[0] != nullptr && ggml_backend_buffer_is_sycl_split(tensor->src[0]->buffer)) { - ggml_sycl_set_peer_access(tensor->src[1]->ne[1], ctx.device); - } - - func(ctx, tensor->src[0], tensor->src[1], tensor); return true; } diff --git a/ggml/src/ggml-sycl/outprod.cpp b/ggml/src/ggml-sycl/outprod.cpp index ef9af0b76..8e8347ff4 100644 --- a/ggml/src/ggml-sycl/outprod.cpp +++ b/ggml/src/ggml-sycl/outprod.cpp @@ -3,9 +3,9 @@ #include "outprod.hpp" -void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, - const ggml_tensor* src1, ggml_tensor* dst) { - +void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; GGML_ASSERT(src0->type == GGML_TYPE_F32); GGML_ASSERT(src1->type == GGML_TYPE_F32); diff --git a/ggml/src/ggml-sycl/outprod.hpp b/ggml/src/ggml-sycl/outprod.hpp index 9c042738a..f50413d3f 100644 --- a/ggml/src/ggml-sycl/outprod.hpp +++ b/ggml/src/ggml-sycl/outprod.hpp @@ -3,8 +3,7 @@ #include "common.hpp" -void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, - const ggml_tensor* src1, ggml_tensor* dst); +void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, ggml_tensor* dst); #endif // GGML_SYCL_OUTPROD_HPP diff --git a/ggml/src/ggml-sycl/tsembd.cpp b/ggml/src/ggml-sycl/tsembd.cpp index 2ffe3cca9..b877d18c1 100644 --- a/ggml/src/ggml-sycl/tsembd.cpp +++ b/ggml/src/ggml-sycl/tsembd.cpp @@ -55,8 +55,9 @@ static void timestep_embedding_f32_sycl( }); } -void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor * dst) { +void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; const float * src0_d = (const float *)src0->data; float * dst_d = (float *)dst->data; dpct::queue_ptr stream = ctx.stream(); diff --git a/ggml/src/ggml-sycl/tsembd.hpp b/ggml/src/ggml-sycl/tsembd.hpp index ff854c337..4c18748bb 100644 --- a/ggml/src/ggml-sycl/tsembd.hpp +++ b/ggml/src/ggml-sycl/tsembd.hpp @@ -15,7 +15,6 @@ #include "common.hpp" -void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor * dst); +void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_TSEMBD_HPP diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp index 105db6f03..4fed18c2a 100644 --- a/ggml/src/ggml-sycl/wkv6.cpp +++ b/ggml/src/ggml-sycl/wkv6.cpp @@ -95,8 +95,10 @@ static void rwkv_wkv_f32_kernel( } } -void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, const ggml_tensor* src0, - const ggml_tensor* src1, ggml_tensor* dst) { +void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { + + const ggml_tensor *src0 = dst->src[0]; + const ggml_tensor *src1 = dst->src[1]; const float* k_d = (const float*)dst->src[0]->data; const float* v_d = (const float*)dst->src[1]->data; diff --git a/ggml/src/ggml-sycl/wkv6.hpp b/ggml/src/ggml-sycl/wkv6.hpp index ddfa3377b..8c596a997 100644 --- a/ggml/src/ggml-sycl/wkv6.hpp +++ b/ggml/src/ggml-sycl/wkv6.hpp @@ -3,8 +3,7 @@ #include "common.hpp" -void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor * dst); +void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, ggml_tensor * dst); #endif // GGML_SYCL_WKV6_HPP From ee7136c6d1e0ba7633294dad137b1573048031ec Mon Sep 17 00:00:00 2001 From: Molly Sophia Date: Fri, 10 Jan 2025 09:58:08 +0800 Subject: [PATCH 059/279] llama: add support for QRWKV6 model architecture (#11001) llama: add support for QRWKV6 model architecture (#11001) * WIP: Add support for RWKV6Qwen2 Signed-off-by: Molly Sophia * RWKV: Some graph simplification Signed-off-by: Molly Sophia * Add support for RWKV6Qwen2 with cpu and cuda GLA Signed-off-by: Molly Sophia * RWKV6[QWEN2]: Concat lerp weights together to reduce cpu overhead Signed-off-by: Molly Sophia * Fix some typos Signed-off-by: Molly Sophia * code format changes Signed-off-by: Molly Sophia * Fix wkv test & add gla test Signed-off-by: Molly Sophia * Fix cuda warning Signed-off-by: Molly Sophia * Update README.md Signed-off-by: Molly Sophia * Update ggml/src/ggml-cuda/gla.cu Co-authored-by: Georgi Gerganov * Fix fused lerp weights loading with RWKV6 Signed-off-by: Molly Sophia * better sanity check skipping for QRWKV6 in llama-quant thanks @compilade Signed-off-by: Molly Sophia Co-authored-by: compilade --------- Signed-off-by: Molly Sophia Co-authored-by: Georgi Gerganov Co-authored-by: compilade --- README.md | 1 + convert_hf_to_gguf.py | 81 ++++++- ggml/include/ggml.h | 10 + ggml/src/ggml-cpu/ggml-cpu.c | 200 +++++++++++++++- ggml/src/ggml-cuda/ggml-cuda.cu | 5 + ggml/src/ggml-cuda/gla.cu | 93 +++++++ ggml/src/ggml-cuda/gla.cuh | 3 + ggml/src/ggml-cuda/wkv6.cu | 4 +- ggml/src/ggml-sycl/wkv6.cpp | 4 +- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 4 +- ggml/src/ggml.c | 61 ++++- gguf-py/gguf/constants.py | 35 +++ gguf-py/gguf/gguf_writer.py | 3 + gguf-py/gguf/tensor_mapping.py | 26 +- src/llama-arch.cpp | 30 +++ src/llama-arch.h | 3 + src/llama-hparams.cpp | 2 +- src/llama-hparams.h | 1 + src/llama-model.cpp | 8 +- src/llama-model.h | 20 +- src/llama-quant.cpp | 4 +- src/llama.cpp | 346 ++++++++++++++++++++------- tests/test-backend-ops.cpp | 42 +++- 23 files changed, 862 insertions(+), 124 deletions(-) create mode 100644 ggml/src/ggml-cuda/gla.cu create mode 100644 ggml/src/ggml-cuda/gla.cuh diff --git a/README.md b/README.md index a71015256..6302ac977 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Jais](https://huggingface.co/inceptionai/jais-13b-chat) - [x] [Bielik-11B-v2.3](https://huggingface.co/collections/speakleash/bielik-11b-v23-66ee813238d9b526a072408a) - [x] [RWKV-6](https://github.com/BlinkDL/RWKV-LM) +- [x] [QRWKV-6](https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1) - [x] [GigaChat-20B-A3B](https://huggingface.co/ai-sage/GigaChat-20B-A3B-instruct) #### Multimodal diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 5562499aa..cf317eeae 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -326,6 +326,7 @@ class Model: gguf.MODEL_TENSOR.TIME_MIX_W2, gguf.MODEL_TENSOR.TIME_MIX_DECAY_W1, gguf.MODEL_TENSOR.TIME_MIX_DECAY_W2, + gguf.MODEL_TENSOR.TIME_MIX_LERP_FUSED, gguf.MODEL_TENSOR.POSNET_NORM1, gguf.MODEL_TENSOR.POSNET_NORM2, ) @@ -3316,6 +3317,8 @@ class Rwkv6Model(Model): # required by llama.cpp, unused self.gguf_writer.add_head_count(0) + lerp_weights: dict[int, dict[str, Tensor]] = {} + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: new_name = self.map_tensor_name(name) @@ -3331,14 +3334,84 @@ class Rwkv6Model(Model): if new_name.endswith("time_mix_decay.weight") or "lerp" in new_name: data_torch = data_torch.squeeze() - rescale_every_n_layers = self.hparams["rescale_every"] - if rescale_every_n_layers > 0: - if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"): - data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers)) + try: + rescale_every_n_layers = self.hparams["rescale_every"] + if rescale_every_n_layers > 0: + if new_name.endswith("time_mix_output.weight") or new_name.endswith("channel_mix_value.weight"): + data_torch = data_torch.div_(2 ** int(bid // rescale_every_n_layers)) + except KeyError: + pass + + # concat time_mix_lerp weights to reduce some cpu overhead + # also reduces the number of tensors in the model + if bid is not None and "time_mix_lerp" in new_name and "time_mix_lerp_x" not in new_name: + try: + self.lerp_weights[bid][new_name] = data_torch + except KeyError: + self.lerp_weights[bid] = {new_name: data_torch} + if all(f"blk.{bid}.time_mix_lerp_{i}.weight" in self.lerp_weights[bid].keys() for i in ["w", "k", "v", "r", "g"]): + new_name = f"blk.{bid}.time_mix_lerp_fused.weight" + data = torch.stack([self.lerp_weights[bid][f"blk.{bid}.time_mix_lerp_{i}.weight"].unsqueeze(0) for i in ["w", "k", "v", "r", "g"]], dim=0).unsqueeze(1) + yield (new_name, data) + return yield (new_name, data_torch) +@Model.register("RWKV6Qwen2ForCausalLM") +class RWKV6Qwen2Model(Rwkv6Model): + model_arch = gguf.MODEL_ARCH.RWKV6QWEN2 + + def set_vocab(self): + try: + self._set_vocab_sentencepiece() + except FileNotFoundError: + self._set_vocab_gpt2() + + def set_gguf_parameters(self): + block_count = self.hparams["num_hidden_layers"] + num_attention_heads = self.hparams["num_attention_heads"] + num_key_value_heads = self.hparams["num_key_value_heads"] + hidden_size = self.hparams["hidden_size"] + head_size = hidden_size // num_attention_heads + rms_norm_eps = self.hparams["rms_norm_eps"] + intermediate_size = self.hparams["intermediate_size"] + time_mix_extra_dim = 64 if hidden_size >= 4096 else 32 + time_decay_extra_dim = 128 if hidden_size >= 4096 else 64 + + # RWKV isn't context limited + self.gguf_writer.add_context_length(1048576) + self.gguf_writer.add_embedding_length(hidden_size) + self.gguf_writer.add_block_count(block_count) + self.gguf_writer.add_wkv_head_size(head_size) + self.gguf_writer.add_time_mix_extra_dim(time_mix_extra_dim) + self.gguf_writer.add_time_decay_extra_dim(time_decay_extra_dim) + self.gguf_writer.add_feed_forward_length(intermediate_size) + self.gguf_writer.add_file_type(self.ftype) + + # special parameters for time_mixing in RWKV6QWEN2 + self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) + self.gguf_writer.add_token_shift_count(1) + # RWKV6QWEN2 use grouped key/value like GQA + self.gguf_writer.add_head_count_kv(num_key_value_heads) + + # required by llama.cpp, unused + self.gguf_writer.add_head_count(0) + + def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: + for new_name, data in super().modify_tensors(data_torch, name, bid): + if "time_mix_w1" in new_name or "time_mix_w2" in new_name: + data = data.view(5, -1, data.shape[-1]) + # rwkv6qwen2 has a different order of rkvwg instead of the original wkvrg + # permute them here to avoid code changes + data = torch.stack([data[3], data[1], data[2], data[0], data[4]], dim=0).view(-1, data.shape[-1]) + if "w2" in new_name: + data = data.view(5, -1, data.shape[-1]) + yield (new_name, data) + continue + yield (new_name, data) + + @Model.register("MambaForCausalLM", "MambaLMHeadModel", "FalconMambaForCausalLM") class MambaModel(Model): model_arch = gguf.MODEL_ARCH.MAMBA diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 8630d92c5..8f8cb9e1a 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -501,6 +501,7 @@ extern "C" { GGML_OP_GET_REL_POS, GGML_OP_ADD_REL_POS, GGML_OP_RWKV_WKV6, + GGML_OP_GATED_LINEAR_ATTN, GGML_OP_UNARY, @@ -1859,6 +1860,15 @@ extern "C" { struct ggml_tensor * td, struct ggml_tensor * state); + GGML_API struct ggml_tensor * ggml_gated_linear_attn( + struct ggml_context * ctx, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * q, + struct ggml_tensor * g, + struct ggml_tensor * state, + float scale); + // custom operators typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *); diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index b7fefb9dd..2966ff768 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -11803,9 +11803,9 @@ static void ggml_compute_forward_add_rel_pos( static void ggml_compute_forward_rwkv_wkv6_f32( const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[3]; + const int64_t T = dst->src[1]->ne[2]; const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[2]; + const int64_t HEADS = dst->src[1]->ne[1]; const int64_t n_seqs = dst->src[5]->ne[1]; const int64_t head_size = C / HEADS; @@ -12000,6 +12000,197 @@ static void ggml_compute_forward_rwkv_wkv6( } } +// ggml_compute_forward_gla + +static void ggml_compute_forward_gla_f32( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[4]->ne[1]; + const int64_t head_size = C / HEADS; + const float scale = ggml_get_op_params_f32(dst, 0); + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? + (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * q = (float *) dst->src[2]->data; + float * g = (float *) dst->src[3]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + + + #if defined(__AVX__) && !defined(__AVX512F__) + #define GGML_F32X GGML_F32x8 + #define GGML_F32X_SET1 GGML_F32x8_SET1 + #define GGML_F32X_LOAD GGML_F32x8_LOAD + #define GGML_F32X_STORE GGML_F32x8_STORE + #define GGML_F32X_MUL GGML_F32x8_MUL + #define GGML_F32X_FMA GGML_F32x8_FMA + #define GLA_VECTOR_SIZE 8 + #elif defined(__AVX512F__) + #define GGML_F32X GGML_F32x16 + #define GGML_F32X_SET1 GGML_F32x16_SET1 + #define GGML_F32X_LOAD GGML_F32x16_LOAD + #define GGML_F32X_STORE GGML_F32x16_STORE + #define GGML_F32X_MUL GGML_F32x16_MUL + #define GGML_F32X_FMA GGML_F32x16_FMA + #define GLA_VECTOR_SIZE 16 + #elif defined(__ARM_NEON) && defined(__aarch64__) + #define GGML_F32X GGML_F32x4 + #define GGML_F32X_SET1 GGML_F32x4_SET1 + #define GGML_F32X_LOAD GGML_F32x4_LOAD + #define GGML_F32X_STORE GGML_F32x4_STORE + #define GGML_F32X_MUL GGML_F32x4_MUL + #define GGML_F32X_FMA GGML_F32x4_FMA + #define GLA_VECTOR_SIZE 4 + #endif + + #ifdef GLA_VECTOR_SIZE + const int64_t vec_count = head_size / GLA_VECTOR_SIZE; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X q_vec = GGML_F32X_SET1(q_val); + GGML_F32X g_vec = GGML_F32X_SET1(g_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * GLA_VECTOR_SIZE; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = prev_state * g + kv + GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); + + // Update dst: dst += temp * q + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * GLA_VECTOR_SIZE; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val + prev_state_val * g_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } + + #else + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float*)dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = prev_state_val * g_val + kv_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } + #endif +} + + +static void ggml_compute_forward_gla( + const struct ggml_compute_params * params, + struct ggml_tensor * dst) { + + const struct ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gla_f32(params, dst); + } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + // ggml_compute_forward_map_unary static void ggml_compute_forward_map_unary_f32( @@ -12749,6 +12940,10 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm { ggml_compute_forward_rwkv_wkv6(params, tensor); } break; + case GGML_OP_GATED_LINEAR_ATTN: + { + ggml_compute_forward_gla(params, tensor); + } break; case GGML_OP_MAP_UNARY: { ggml_unary_op_f32_t fun; @@ -13047,6 +13242,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) { case GGML_OP_WIN_UNPART: case GGML_OP_GET_REL_POS: case GGML_OP_RWKV_WKV6: + case GGML_OP_GATED_LINEAR_ATTN: case GGML_OP_MAP_UNARY: case GGML_OP_MAP_BINARY: case GGML_OP_MAP_CUSTOM1_F32: diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 0b06be729..8476ee1bc 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -37,6 +37,7 @@ #include "ggml-cuda/unary.cuh" #include "ggml-cuda/upscale.cuh" #include "ggml-cuda/wkv6.cuh" +#include "ggml-cuda/gla.cuh" #include #include @@ -2167,6 +2168,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg case GGML_OP_RWKV_WKV6: ggml_cuda_op_rwkv_wkv6(ctx, dst); break; + case GGML_OP_GATED_LINEAR_ATTN: + ggml_cuda_op_gated_linear_attn(ctx, dst); + break; case GGML_OP_CROSS_ENTROPY_LOSS_BACK: ggml_cuda_cross_entropy_loss_back(ctx, dst); break; @@ -3011,6 +3015,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: case GGML_OP_RWKV_WKV6: + case GGML_OP_GATED_LINEAR_ATTN: return true; case GGML_OP_FLASH_ATTN_EXT: { #ifndef FLASH_ATTN_AVAILABLE diff --git a/ggml/src/ggml-cuda/gla.cu b/ggml/src/ggml-cuda/gla.cu new file mode 100644 index 000000000..f7d615a82 --- /dev/null +++ b/ggml/src/ggml-cuda/gla.cu @@ -0,0 +1,93 @@ +#include "common.cuh" +#include "gla.cuh" + +template +static __global__ void gated_linear_attn_f32(const int B, const int T, const int C, const int H, const float scale, + const float * k, const float * v, const float * r, const float * td, const float * s, float * dst) { + const int tid = threadIdx.x; + const int bid = blockIdx.x; + + const int head_size = HEAD_SIZE; + const int batch_i = bid / H; + const int head_i = bid % H; + const int state_size = C * head_size; + const int n_seq_tokens = T / B; + + float state[head_size]; + __shared__ float _k[head_size], _r[head_size], _td[head_size]; + + #pragma unroll + for (int i = 0; i < head_size; i++) { + state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid]; + } + + for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid; t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) { + __syncthreads(); + _k[tid] = k[t]; + _r[tid] = r[t]; + _td[tid] = td[t]; + __syncthreads(); + + const float _v = v[t]; + float y = 0; + for (int j = 0; j < head_size; j += 4) { + const float4 & k = (float4 &)(_k[j]); + const float4 & r = (float4 &)(_r[j]); + const float4 & td = (float4 &)(_td[j]); + float4 & s = (float4 &)(state[j]); + float4 kv; + + kv.x = k.x * _v; + kv.y = k.y * _v; + kv.z = k.z * _v; + kv.w = k.w * _v; + + s.x = s.x * td.x + kv.x; + s.y = s.y * td.y + kv.y; + s.z = s.z * td.z + kv.z; + s.w = s.w * td.w + kv.w; + + y += r.x * s.x; + y += r.y * s.y; + y += r.z * s.z; + y += r.w * s.w; + } + dst[t] = y * scale; + } + + #pragma unroll + for (int i = 0; i < head_size; i++) { + dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i]; + } +} + +void ggml_cuda_op_gated_linear_attn(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const float * k_d = (const float *)dst->src[0]->data; + const float * v_d = (const float *)dst->src[1]->data; + const float * r_d = (const float *)dst->src[2]->data; + const float * td_d = (const float *)dst->src[3]->data; + const float * s_d = (const float *)dst->src[4]->data; + + const int64_t B = dst->src[4]->ne[1]; + const int64_t T = dst->src[0]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t H = dst->src[0]->ne[1]; + + float scale; + memcpy(&scale, (float*)dst->op_params, sizeof(float)); + + float * dst_d = (float *)dst->data; + + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(dst->src[4]->type == GGML_TYPE_F32); + GGML_ASSERT(C % H == 0); + GGML_ASSERT(C / H == 64 || C / H == 128); + + + if (C / H == 64) { + gated_linear_attn_f32<64><<>>(B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d); + } else { + gated_linear_attn_f32<128><<>>(B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d); + } +} diff --git a/ggml/src/ggml-cuda/gla.cuh b/ggml/src/ggml-cuda/gla.cuh new file mode 100644 index 000000000..2c82ad7dd --- /dev/null +++ b/ggml/src/ggml-cuda/gla.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_op_gated_linear_attn(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/wkv6.cu b/ggml/src/ggml-cuda/wkv6.cu index 42578341a..bbdafbee5 100644 --- a/ggml/src/ggml-cuda/wkv6.cu +++ b/ggml/src/ggml-cuda/wkv6.cu @@ -73,9 +73,9 @@ void ggml_cuda_op_rwkv_wkv6(ggml_backend_cuda_context & ctx, ggml_tensor * dst) const float * s_d = (const float *)dst->src[5]->data; const int64_t B = dst->src[5]->ne[1]; - const int64_t T = dst->src[0]->ne[3]; + const int64_t T = dst->src[0]->ne[2]; const int64_t C = dst->ne[0]; - const int64_t H = dst->src[0]->ne[2]; + const int64_t H = dst->src[0]->ne[1]; float * dst_d = (float *)dst->data; diff --git a/ggml/src/ggml-sycl/wkv6.cpp b/ggml/src/ggml-sycl/wkv6.cpp index 4fed18c2a..b54c20964 100644 --- a/ggml/src/ggml-sycl/wkv6.cpp +++ b/ggml/src/ggml-sycl/wkv6.cpp @@ -109,9 +109,9 @@ void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) { float* dst_d = (float*)dst->data; const int64_t B = dst->src[5]->ne[1]; - const int64_t T = dst->src[0]->ne[3]; + const int64_t T = dst->src[0]->ne[2]; const int64_t C = dst->ne[0]; - const int64_t H = dst->src[0]->ne[2]; + const int64_t H = dst->src[0]->ne[1]; GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32); GGML_ASSERT(C % H == 0); diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 077452424..1b9174682 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -5633,9 +5633,9 @@ static void ggml_vk_op_f32_rwkv6(ggml_backend_vk_context * ctx, vk_context& subc } static void ggml_vk_rwkv_wkv6(ggml_backend_vk_context * ctx, vk_context& subctx, ggml_tensor * dst, bool dryrun = false) { - const size_t seq_length = dst->src[0]->ne[3]; + const size_t seq_length = dst->src[0]->ne[2]; const size_t n_embed = dst->ne[0]; - const size_t n_heads = dst->src[0]->ne[2]; + const size_t n_heads = dst->src[0]->ne[1]; const size_t n_seqs = dst->src[5]->ne[1]; ggml_vk_op_f32_rwkv6( diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 90abc6ad4..da5b817e1 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -968,6 +968,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "GET_REL_POS", "ADD_REL_POS", "RWKV_WKV6", + "GATED_LINEAR_ATTN", "UNARY", @@ -987,7 +988,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = { "OPT_STEP_ADAMW", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "none", @@ -1064,6 +1065,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "get_rel_pos(x)", "add_rel_pos(x)", "rwkv_wkv6(k, v, r, tf, td, s)", + "gated_linear_attn(k, v, q, gate, s)", "unary(x)", @@ -1083,7 +1085,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = { "adamw(x)", }; -static_assert(GGML_OP_COUNT == 82, "GGML_OP_COUNT != 82"); +static_assert(GGML_OP_COUNT == 83, "GGML_OP_COUNT != 83"); static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2"); @@ -4629,15 +4631,13 @@ struct ggml_tensor * ggml_rwkv_wkv6( GGML_ASSERT(ggml_is_contiguous(state)); const int64_t S = k->ne[0]; - const int64_t H = k->ne[2]; - const int64_t n_tokens = k->ne[3]; + const int64_t H = k->ne[1]; + const int64_t n_tokens = k->ne[2]; const int64_t n_seqs = state->ne[1]; { - GGML_ASSERT(k->ne[1] == 1); - GGML_ASSERT(v->ne[0] == 1 && v->ne[1] == S && v->ne[2] == H && v->ne[3] == n_tokens); - GGML_ASSERT(r->ne[0] == 1 && r->ne[1] == S && r->ne[2] == H && r->ne[3] == n_tokens); - // TODO: RWKV v4 and v5 - GGML_ASSERT(td->ne[0] == 1 && td->ne[1] == S && td->ne[2] == H && td->ne[3] == n_tokens); + GGML_ASSERT(v->ne[0] == S && v->ne[1] == H && v->ne[2] == n_tokens); + GGML_ASSERT(r->ne[0] == S && r->ne[1] == H && r->ne[2] == n_tokens); + GGML_ASSERT(td->ne[0] == S && td->ne[1] == H && td->ne[2] == n_tokens); GGML_ASSERT(ggml_nelements(state) == S * S * H * n_seqs); } @@ -4656,6 +4656,49 @@ struct ggml_tensor * ggml_rwkv_wkv6( return result; } +// ggml_gated_linear_attn + +struct ggml_tensor * ggml_gated_linear_attn( + struct ggml_context * ctx, + struct ggml_tensor * k, + struct ggml_tensor * v, + struct ggml_tensor * q, + struct ggml_tensor * g, + struct ggml_tensor * state, + float scale) { + GGML_ASSERT(ggml_is_contiguous(k)); + GGML_ASSERT(ggml_is_contiguous(v)); + GGML_ASSERT(ggml_is_contiguous(q)); + GGML_ASSERT(ggml_is_contiguous(g)); + GGML_ASSERT(ggml_is_contiguous(state)); + + const int64_t S = k->ne[0]; + const int64_t H = k->ne[1]; + const int64_t n_tokens = k->ne[2]; + const int64_t n_seqs = state->ne[1]; + { + GGML_ASSERT(v->ne[0] == S && v->ne[1] == H && v->ne[2] == n_tokens); + GGML_ASSERT(q->ne[0] == S && q->ne[1] == H && q->ne[2] == n_tokens); + GGML_ASSERT(g->ne[0] == S && g->ne[1] == H && g->ne[2] == n_tokens); + GGML_ASSERT(ggml_nelements(state) == S * S * H * n_seqs); + } + + // concat output and new_state + const int64_t ne[4] = { S * H, n_tokens + S * n_seqs, 1, 1 }; + struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); + + ggml_set_op_params_f32(result, 0, scale); + + result->op = GGML_OP_GATED_LINEAR_ATTN; + result->src[0] = k; + result->src[1] = v; + result->src[2] = q; + result->src[3] = g; + result->src[4] = state; + + return result; +} + // ggml_unary static struct ggml_tensor * ggml_unary_impl( diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index cf05bf47e..56aa9288d 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -115,6 +115,7 @@ class Keys: TIME_DECAY_EXTRA_DIM = "{arch}.time_decay_extra_dim" RESIDUAL_SCALE = "{arch}.residual_scale" EMBEDDING_SCALE = "{arch}.embedding_scale" + TOKEN_SHIFT_COUNT = "{arch}.token_shift_count" class Attention: HEAD_COUNT = "{arch}.attention.head_count" @@ -255,6 +256,7 @@ class MODEL_ARCH(IntEnum): GEMMA2 = auto() STARCODER2 = auto() RWKV6 = auto() + RWKV6QWEN2 = auto() MAMBA = auto() XVERSE = auto() COMMAND_R = auto() @@ -334,6 +336,7 @@ class MODEL_TENSOR(IntEnum): TIME_MIX_LERP_V = auto() TIME_MIX_LERP_R = auto() TIME_MIX_LERP_G = auto() + TIME_MIX_LERP_FUSED = auto() TIME_MIX_LERP_W = auto() TIME_MIX_FIRST = auto() TIME_MIX_DECAY = auto() @@ -440,6 +443,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.GEMMA2: "gemma2", MODEL_ARCH.STARCODER2: "starcoder2", MODEL_ARCH.RWKV6: "rwkv6", + MODEL_ARCH.RWKV6QWEN2: "rwkv6qwen2", MODEL_ARCH.MAMBA: "mamba", MODEL_ARCH.XVERSE: "xverse", MODEL_ARCH.COMMAND_R: "command-r", @@ -519,6 +523,7 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = { MODEL_TENSOR.TIME_MIX_LERP_V: "blk.{bid}.time_mix_lerp_v", MODEL_TENSOR.TIME_MIX_LERP_R: "blk.{bid}.time_mix_lerp_r", MODEL_TENSOR.TIME_MIX_LERP_G: "blk.{bid}.time_mix_lerp_g", + MODEL_TENSOR.TIME_MIX_LERP_FUSED: "blk.{bid}.time_mix_lerp_fused", MODEL_TENSOR.TIME_MIX_LERP_W: "blk.{bid}.time_mix_lerp_w", MODEL_TENSOR.TIME_MIX_FIRST: "blk.{bid}.time_mix_first", MODEL_TENSOR.TIME_MIX_DECAY: "blk.{bid}.time_mix_decay", @@ -1103,6 +1108,7 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.TIME_MIX_LERP_R, MODEL_TENSOR.TIME_MIX_LERP_G, MODEL_TENSOR.TIME_MIX_LERP_W, + MODEL_TENSOR.TIME_MIX_LERP_FUSED, MODEL_TENSOR.TIME_MIX_FIRST, MODEL_TENSOR.TIME_MIX_DECAY, MODEL_TENSOR.TIME_MIX_DECAY_W1, @@ -1119,6 +1125,35 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.CHANNEL_MIX_RECEPTANCE, MODEL_TENSOR.CHANNEL_MIX_VALUE, ], + MODEL_ARCH.RWKV6QWEN2: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.OUTPUT, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.TIME_MIX_W1, + MODEL_TENSOR.TIME_MIX_W2, + MODEL_TENSOR.TIME_MIX_LERP_X, + MODEL_TENSOR.TIME_MIX_LERP_K, + MODEL_TENSOR.TIME_MIX_LERP_V, + MODEL_TENSOR.TIME_MIX_LERP_R, + MODEL_TENSOR.TIME_MIX_LERP_G, + MODEL_TENSOR.TIME_MIX_LERP_W, + MODEL_TENSOR.TIME_MIX_LERP_FUSED, + MODEL_TENSOR.TIME_MIX_FIRST, + MODEL_TENSOR.TIME_MIX_DECAY, + MODEL_TENSOR.TIME_MIX_DECAY_W1, + MODEL_TENSOR.TIME_MIX_DECAY_W2, + MODEL_TENSOR.TIME_MIX_KEY, + MODEL_TENSOR.TIME_MIX_VALUE, + MODEL_TENSOR.TIME_MIX_RECEPTANCE, + MODEL_TENSOR.TIME_MIX_GATE, + MODEL_TENSOR.TIME_MIX_LN, + MODEL_TENSOR.TIME_MIX_OUTPUT, + MODEL_TENSOR.FFN_NORM, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + ], MODEL_ARCH.MAMBA: [ MODEL_TENSOR.TOKEN_EMBD, MODEL_TENSOR.OUTPUT_NORM, diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 4a0a65e3c..bf851c92c 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -743,6 +743,9 @@ class GGUFWriter: def add_wkv_head_size(self, size: int) -> None: self.add_uint32(Keys.WKV.HEAD_SIZE.format(arch=self.arch), size) + def add_token_shift_count(self, count: int) -> None: + self.add_uint32(Keys.LLM.TOKEN_SHIFT_COUNT.format(arch=self.arch), count) + def add_layer_norm_eps(self, value: float) -> None: self.add_float32(Keys.Attention.LAYERNORM_EPS.format(arch=self.arch), value) diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index 7616c468a..617791e24 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -13,7 +13,7 @@ class TensorNameMap: "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais exaone "transformer.word_embeddings", # falcon "word_embeddings", # bloom - "model.embed_tokens", # llama-hf nemotron olmoe olmo2 + "model.embed_tokens", # llama-hf nemotron olmoe olmo2 rwkv6qwen2 "tok_embeddings", # llama-pth "embeddings.word_embeddings", # bert nomic-bert "language_model.embedding.word_embeddings", # persimmon @@ -464,34 +464,42 @@ class TensorNameMap: MODEL_TENSOR.TIME_MIX_W1: ( "rwkv.blocks.{bid}.attention.time_maa_w1", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_w1", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_W2: ( "rwkv.blocks.{bid}.attention.time_maa_w2", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_w2", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_X: ( "rwkv.blocks.{bid}.attention.time_maa_x", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_x", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_K: ( "rwkv.blocks.{bid}.attention.time_maa_k", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_k", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_V: ( "rwkv.blocks.{bid}.attention.time_maa_v", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_v", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_R: ( "rwkv.blocks.{bid}.attention.time_maa_r", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_r", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_G: ( "rwkv.blocks.{bid}.attention.time_maa_g", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_g", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LERP_W: ( "rwkv.blocks.{bid}.attention.time_maa_w", # rwkv v6 + "model.layers.{bid}.self_attn.time_maa_w", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_FIRST: ( @@ -500,30 +508,37 @@ class TensorNameMap: MODEL_TENSOR.TIME_MIX_DECAY: ( "rwkv.blocks.{bid}.attention.time_decay", # rwkv v6 + "model.layers.{bid}.self_attn.time_decay", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_DECAY_W1: ( "rwkv.blocks.{bid}.attention.time_decay_w1", # rwkv v6 + "model.layers.{bid}.self_attn.time_decay_w1", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_DECAY_W2: ( "rwkv.blocks.{bid}.attention.time_decay_w2", # rwkv v6 + "model.layers.{bid}.self_attn.time_decay_w2", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_KEY: ( - "rwkv.blocks.{bid}.attention.key", # rwkv + "rwkv.blocks.{bid}.attention.key", # rwkv + "model.layers.{bid}.self_attn.k_proj", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_VALUE: ( - "rwkv.blocks.{bid}.attention.value", # rwkv + "rwkv.blocks.{bid}.attention.value", # rwkv + "model.layers.{bid}.self_attn.v_proj", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_RECEPTANCE: ( "rwkv.blocks.{bid}.attention.receptance", # rwkv + "model.layers.{bid}.self_attn.q_proj", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_GATE: ( - "rwkv.blocks.{bid}.attention.gate", # rwkv + "rwkv.blocks.{bid}.attention.gate", # rwkv + "model.layers.{bid}.self_attn.gate", # rwkv6qwen2 ), MODEL_TENSOR.TIME_MIX_LN: ( @@ -531,7 +546,8 @@ class TensorNameMap: ), MODEL_TENSOR.TIME_MIX_OUTPUT: ( - "rwkv.blocks.{bid}.attention.output", # rwkv + "rwkv.blocks.{bid}.attention.output", # rwkv + "model.layers.{bid}.self_attn.o_proj", # rwkv6qwen2 ), MODEL_TENSOR.CHANNEL_MIX_LERP_K: ( diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index eef66ed31..7300bd26a 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -57,6 +57,7 @@ static const std::map LLM_ARCH_NAMES = { { LLM_ARCH_NEMOTRON, "nemotron" }, { LLM_ARCH_EXAONE, "exaone" }, { LLM_ARCH_RWKV6, "rwkv6" }, + { LLM_ARCH_RWKV6QWEN2, "rwkv6qwen2" }, { LLM_ARCH_GRANITE, "granite" }, { LLM_ARCH_GRANITE_MOE, "granitemoe" }, { LLM_ARCH_CHAMELEON, "chameleon" }, @@ -106,6 +107,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_TIME_DECAY_EXTRA_DIM, "%s.time_decay_extra_dim" }, { LLM_KV_RESIDUAL_SCALE, "%s.residual_scale" }, { LLM_KV_EMBEDDING_SCALE, "%s.embedding_scale" }, + { LLM_KV_TOKEN_SHIFT_COUNT, "%s.token_shift_count" }, { LLM_KV_ATTENTION_HEAD_COUNT, "%s.attention.head_count" }, { LLM_KV_ATTENTION_HEAD_COUNT_KV, "%s.attention.head_count_kv" }, @@ -1166,6 +1168,7 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_TIME_MIX_LERP_V, "blk.%d.time_mix_lerp_v" }, { LLM_TENSOR_TIME_MIX_LERP_R, "blk.%d.time_mix_lerp_r" }, { LLM_TENSOR_TIME_MIX_LERP_G, "blk.%d.time_mix_lerp_g" }, + { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" }, { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" }, { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" }, { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" }, @@ -1183,6 +1186,32 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" }, }, }, + { + LLM_ARCH_RWKV6QWEN2, + { + { LLM_TENSOR_TOKEN_EMBD, "token_embd" }, + { LLM_TENSOR_OUTPUT_NORM, "output_norm" }, + { LLM_TENSOR_OUTPUT, "output" }, + { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, + { LLM_TENSOR_TIME_MIX_W1, "blk.%d.time_mix_w1" }, + { LLM_TENSOR_TIME_MIX_W2, "blk.%d.time_mix_w2" }, + { LLM_TENSOR_TIME_MIX_LERP_X, "blk.%d.time_mix_lerp_x" }, + { LLM_TENSOR_TIME_MIX_LERP_FUSED, "blk.%d.time_mix_lerp_fused" }, + { LLM_TENSOR_TIME_MIX_FIRST, "blk.%d.time_mix_first" }, + { LLM_TENSOR_TIME_MIX_DECAY, "blk.%d.time_mix_decay" }, + { LLM_TENSOR_TIME_MIX_DECAY_W1, "blk.%d.time_mix_decay_w1" }, + { LLM_TENSOR_TIME_MIX_DECAY_W2, "blk.%d.time_mix_decay_w2" }, + { LLM_TENSOR_TIME_MIX_KEY, "blk.%d.time_mix_key" }, + { LLM_TENSOR_TIME_MIX_VALUE, "blk.%d.time_mix_value" }, + { LLM_TENSOR_TIME_MIX_RECEPTANCE, "blk.%d.time_mix_receptance" }, + { LLM_TENSOR_TIME_MIX_GATE, "blk.%d.time_mix_gate" }, + { LLM_TENSOR_TIME_MIX_OUTPUT, "blk.%d.time_mix_output" }, + { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, + { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" }, + { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" }, + { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, + }, + }, { LLM_ARCH_GRANITE, { @@ -1365,6 +1394,7 @@ static const std::map LLM_TENSOR_INFOS = { {LLM_TENSOR_TIME_MIX_LERP_V, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_LERP_R, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_LERP_G, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, + {LLM_TENSOR_TIME_MIX_LERP_FUSED, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_DECAY, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}}, {LLM_TENSOR_TIME_MIX_FIRST, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}}, {LLM_TENSOR_ATTN_NORM, {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, diff --git a/src/llama-arch.h b/src/llama-arch.h index 2e5f97b77..79909f03f 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -61,6 +61,7 @@ enum llm_arch { LLM_ARCH_NEMOTRON, LLM_ARCH_EXAONE, LLM_ARCH_RWKV6, + LLM_ARCH_RWKV6QWEN2, LLM_ARCH_GRANITE, LLM_ARCH_GRANITE_MOE, LLM_ARCH_CHAMELEON, @@ -110,6 +111,7 @@ enum llm_kv { LLM_KV_TIME_DECAY_EXTRA_DIM, LLM_KV_RESIDUAL_SCALE, LLM_KV_EMBEDDING_SCALE, + LLM_KV_TOKEN_SHIFT_COUNT, LLM_KV_ATTENTION_HEAD_COUNT, LLM_KV_ATTENTION_HEAD_COUNT_KV, @@ -253,6 +255,7 @@ enum llm_tensor { LLM_TENSOR_TIME_MIX_LERP_V, LLM_TENSOR_TIME_MIX_LERP_R, LLM_TENSOR_TIME_MIX_LERP_G, + LLM_TENSOR_TIME_MIX_LERP_FUSED, LLM_TENSOR_TIME_MIX_FIRST, LLM_TENSOR_TIME_MIX_DECAY, LLM_TENSOR_TIME_MIX_DECAY_W1, diff --git a/src/llama-hparams.cpp b/src/llama-hparams.cpp index c40534696..ea87b2953 100644 --- a/src/llama-hparams.cpp +++ b/src/llama-hparams.cpp @@ -52,7 +52,7 @@ uint32_t llama_hparams::n_embd_v_gqa(uint32_t il) const { uint32_t llama_hparams::n_embd_k_s() const { if (wkv_head_size != 0) { // for RWKV models - return 2 * n_embd; + return token_shift_count * n_embd; } // TODO: maybe support other convolution strides than 1 diff --git a/src/llama-hparams.h b/src/llama-hparams.h index a29f20ec4..3542bef49 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -76,6 +76,7 @@ struct llama_hparams { uint32_t time_mix_extra_dim = 0; uint32_t time_decay_extra_dim = 0; uint32_t wkv_head_size = 0; + uint32_t token_shift_count = 2; float rope_attn_factor = 1.0f; float rope_freq_base_train; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 7260cb155..c056204b0 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1054,12 +1054,15 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { } } break; case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: { - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false); ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false); + ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false); switch (hparams.n_layer) { case 24: model.type = e_model::MODEL_1_6B; break; @@ -1070,6 +1073,7 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { default: model.type = e_model::MODEL_UNKNOWN; } break; case 61: model.type = e_model::MODEL_14B; break; + case 64: model.type = e_model::MODEL_32B; break; default: model.type = e_model::MODEL_UNKNOWN; } } break; @@ -2064,6 +2068,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) { case LLM_ARCH_T5ENCODER: case LLM_ARCH_JAIS: case LLM_ARCH_RWKV6: + case LLM_ARCH_RWKV6QWEN2: case LLM_ARCH_WAVTOKENIZER_DEC: return LLAMA_ROPE_TYPE_NONE; @@ -2208,6 +2213,7 @@ bool llama_model_is_recurrent(const struct llama_model * model) { switch (model->arch) { case LLM_ARCH_MAMBA: return true; case LLM_ARCH_RWKV6: return true; + case LLM_ARCH_RWKV6QWEN2: return true; default: return false; } } diff --git a/src/llama-model.h b/src/llama-model.h index 424cb0f52..565d2dbdf 100644 --- a/src/llama-model.h +++ b/src/llama-model.h @@ -241,15 +241,19 @@ struct llama_layer { struct ggml_tensor * time_mix_lerp_v = nullptr; struct ggml_tensor * time_mix_lerp_r = nullptr; struct ggml_tensor * time_mix_lerp_g = nullptr; + struct ggml_tensor * time_mix_lerp_fused = nullptr; - struct ggml_tensor * time_mix_first = nullptr; - struct ggml_tensor * time_mix_decay = nullptr; - struct ggml_tensor * time_mix_decay_w1 = nullptr; - struct ggml_tensor * time_mix_decay_w2 = nullptr; - struct ggml_tensor * time_mix_key = nullptr; - struct ggml_tensor * time_mix_value = nullptr; - struct ggml_tensor * time_mix_receptance = nullptr; - struct ggml_tensor * time_mix_gate = nullptr; + struct ggml_tensor * time_mix_first = nullptr; + struct ggml_tensor * time_mix_decay = nullptr; + struct ggml_tensor * time_mix_decay_w1 = nullptr; + struct ggml_tensor * time_mix_decay_w2 = nullptr; + struct ggml_tensor * time_mix_key = nullptr; + struct ggml_tensor * time_mix_key_b = nullptr; + struct ggml_tensor * time_mix_value = nullptr; + struct ggml_tensor * time_mix_value_b = nullptr; + struct ggml_tensor * time_mix_receptance = nullptr; + struct ggml_tensor * time_mix_receptance_b = nullptr; + struct ggml_tensor * time_mix_gate = nullptr; struct ggml_tensor * time_mix_ln = nullptr; struct ggml_tensor * time_mix_ln_b = nullptr; diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp index 466e7bc61..a45044f30 100644 --- a/src/llama-quant.cpp +++ b/src/llama-quant.cpp @@ -620,7 +620,8 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer; - // sanity checks + // sanity checks for models that have attention layers + if (qs.n_attention_wv != 0) { const auto & n_head_kv_iter = model.hparams.n_head_kv_arr.begin(); // attention layers have a non-zero number of kv heads @@ -758,6 +759,7 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std:: quantize &= name.find("time_mix_w2.weight") == std::string::npos; quantize &= name.find("time_mix_decay_w1.weight") == std::string::npos; quantize &= name.find("time_mix_decay_w2.weight") == std::string::npos; + quantize &= name.find("time_mix_lerp_fused.weight") == std::string::npos; // do not quantize relative position bias (T5) quantize &= name.find("attn_rel_b.weight") == std::string::npos; diff --git a/src/llama.cpp b/src/llama.cpp index ae375bcd3..a364861d3 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -134,11 +134,11 @@ static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w const int64_t H = 123; const int64_t n_tokens = 123; const int64_t n_seqs = 123; - ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, 1, H, n_tokens); - ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 1, S, H, n_tokens); - ggml_tensor * r = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 1, S, H, n_tokens); + ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); ggml_tensor * tf = w; - ggml_tensor * td = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, 1, S, H, n_tokens); + ggml_tensor * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H); op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state); } break; @@ -2186,11 +2186,13 @@ static bool llm_load_tensors( layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0); layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0); - layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, 0); - layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0); - layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, 0); - layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0); - layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, 0); + layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, llama_model_loader::TENSOR_NOT_REQUIRED); + GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL)); layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0); layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0); @@ -2214,6 +2216,59 @@ static bool llm_load_tensors( } } break; + case LLM_ARCH_RWKV6QWEN2: + { + model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + const int time_mix_extra_dim = hparams.time_mix_extra_dim; + const int time_decay_extra_dim = hparams.time_decay_extra_dim; + const int head_size = hparams.wkv_head_size; + const int attn_hidden_size = n_embd; + const int n_head_kv = hparams.n_head_kv(); + int attn_key_value_size; + if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) { + attn_key_value_size = attn_hidden_size; + } else { + attn_key_value_size = n_head_kv * head_size; + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = model.layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0); + layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0); + + layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0); + layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0); + + layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0); + layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0); + layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0); + layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0); + layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0); + layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0); + layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0); + // optional bias tensors + layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, llama_model_loader::TENSOR_NOT_REQUIRED); + + layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; case LLM_ARCH_CHAMELEON: { model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); @@ -3337,16 +3392,20 @@ static struct ggml_tensor * llm_build_rwkv6_time_mix( const struct llama_layer * layer, struct ggml_tensor * cur, struct ggml_tensor * x_prev, - struct ggml_tensor ** wkv_state) { + struct ggml_tensor ** wkv_state, + size_t wkv_head_size, + size_t head_count_kv) { size_t n_embd = cur->ne[0]; size_t n_seq_tokens = cur->ne[1]; size_t n_seqs = cur->ne[2]; - size_t head_size = layer->time_mix_first->ne[0]; - size_t head_count = layer->time_mix_first->ne[1]; + size_t head_size = wkv_head_size; + size_t head_count = n_embd / head_size; size_t n_tokens = n_seqs * n_seq_tokens; + bool is_qrwkv = layer->time_mix_first == nullptr; + struct ggml_tensor * sx = ggml_sub(ctx, x_prev, cur); sx = ggml_reshape_2d(ctx, sx, n_embd, n_tokens); @@ -3375,69 +3434,64 @@ static struct ggml_tensor * llm_build_rwkv6_time_mix( xxx ); - struct ggml_tensor *mw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0); - struct ggml_tensor *mk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); - struct ggml_tensor *mv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); - struct ggml_tensor *mr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); - struct ggml_tensor *mg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); + struct ggml_tensor *xw, *xk, *xv, *xr, *xg; + if (layer->time_mix_lerp_fused) { + // fusing these weights makes some performance improvement + sx = ggml_reshape_3d(ctx, sx, n_embd, 1, n_tokens); + cur = ggml_reshape_3d(ctx, cur, n_embd, 1, n_tokens); + xxx = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xxx, layer->time_mix_lerp_fused), sx), cur); + xw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0); + xk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); + xv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); + xr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); + xg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); + } else { + // for backward compatibility + xw = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], 0); + xk = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * sizeof(float)); + xv = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 2 * sizeof(float)); + xr = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 3 * sizeof(float)); + xg = ggml_view_2d(ctx, xxx, n_embd, n_tokens, xxx->nb[1], n_embd * n_tokens * 4 * sizeof(float)); - struct ggml_tensor * xw = ggml_add( - ctx, - ggml_mul( - ctx, - ggml_add(ctx, mw, layer->time_mix_lerp_w), - sx - ), - cur - ); + xw = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xw, layer->time_mix_lerp_w), sx), cur); + xk = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xk, layer->time_mix_lerp_k), sx), cur); + xv = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xv, layer->time_mix_lerp_v), sx), cur); + xr = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xr, layer->time_mix_lerp_r), sx), cur); + xg = ggml_add(ctx, ggml_mul(ctx, ggml_add(ctx, xg, layer->time_mix_lerp_g), sx), cur); + } - struct ggml_tensor * xk = ggml_add( - ctx, - ggml_mul( - ctx, - ggml_add(ctx, mk, layer->time_mix_lerp_k), - sx - ), - cur - ); + struct ggml_tensor * r = llm_build_lora_mm(lctx, ctx, layer->time_mix_receptance, xr); + struct ggml_tensor * k = llm_build_lora_mm(lctx, ctx, layer->time_mix_key, xk); + struct ggml_tensor * v = llm_build_lora_mm(lctx, ctx, layer->time_mix_value, xv); + if (layer->time_mix_receptance_b) { + r = ggml_add(ctx, r, layer->time_mix_receptance_b); + } + if (layer->time_mix_key_b) { + k = ggml_add(ctx, k, layer->time_mix_key_b); + } + if (layer->time_mix_value_b) { + v = ggml_add(ctx, v, layer->time_mix_value_b); + } - struct ggml_tensor * xv = ggml_add( - ctx, - ggml_mul( - ctx, - ggml_add(ctx, mv, layer->time_mix_lerp_v), - sx - ), - cur - ); + struct ggml_tensor * g = llm_build_lora_mm(lctx, ctx, layer->time_mix_gate, xg); + if (is_qrwkv) { + g = ggml_sigmoid(ctx, g); + } else { + g = ggml_silu(ctx, g); + } - struct ggml_tensor * xr = ggml_add( - ctx, - ggml_mul( - ctx, - ggml_add(ctx, mr, layer->time_mix_lerp_r), - sx - ), - cur - ); + if (head_count_kv != head_count) { + GGML_ASSERT(head_count % head_count_kv == 0); + k = ggml_reshape_4d(ctx, k, head_size, 1, head_count_kv, n_tokens); + v = ggml_reshape_4d(ctx, v, head_size, 1, head_count_kv, n_tokens); + struct ggml_tensor * tmp = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, head_size, head_count / head_count_kv, head_count_kv, n_tokens); + k = ggml_repeat(ctx, k, tmp); + v = ggml_repeat(ctx, v, tmp); + } - struct ggml_tensor * xg = ggml_add( - ctx, - ggml_mul( - ctx, - ggml_add(ctx, mg, layer->time_mix_lerp_g), - sx - ), - cur - ); - - struct ggml_tensor * r = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_receptance, xr), head_size, 1, head_count, n_tokens); - struct ggml_tensor * k = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_key, xk), 1, head_size, head_count, n_tokens); - struct ggml_tensor * v = ggml_reshape_4d(ctx, llm_build_lora_mm(lctx, ctx, layer->time_mix_value, xv), head_size, 1, head_count, n_tokens); - struct ggml_tensor * g = ggml_silu( - ctx, - llm_build_lora_mm(lctx, ctx, layer->time_mix_gate, xg) - ); + k = ggml_reshape_3d(ctx, k, head_size, head_count, n_tokens); + v = ggml_reshape_3d(ctx, v, head_size, head_count, n_tokens); + r = ggml_reshape_3d(ctx, r, head_size, head_count, n_tokens); struct ggml_tensor * w = ggml_mul_mat( ctx, @@ -3448,25 +3502,35 @@ static struct ggml_tensor * llm_build_rwkv6_time_mix( ) ); - w = ggml_add(ctx, w, ggml_reshape_1d(ctx, layer->time_mix_decay, n_embd)); + w = ggml_add(ctx, w, layer->time_mix_decay); w = ggml_exp(ctx, ggml_neg(ctx, ggml_exp(ctx, w))); - w = ggml_reshape_4d(ctx, w, 1, head_size, head_count, n_tokens); + w = ggml_reshape_3d(ctx, w, head_size, head_count, n_tokens); - k = ggml_transpose(ctx, k); - v = ggml_transpose(ctx, v); - r = ggml_transpose(ctx, r); + if (is_qrwkv) { + // k = k * (1 - w) + k = ggml_sub(ctx, k, ggml_mul(ctx, k, w)); + } - struct ggml_tensor * wkv_output = ggml_rwkv_wkv6(ctx, k, v, r, layer->time_mix_first, w, *wkv_state); + struct ggml_tensor * wkv_output; + if (!layer->time_mix_first) { + wkv_output = ggml_gated_linear_attn(ctx, k, v, r, w, *wkv_state, pow(head_size, -0.5f)); + } else { + wkv_output = ggml_rwkv_wkv6(ctx, k, v, r, layer->time_mix_first, w, *wkv_state); + } cur = ggml_view_1d(ctx, wkv_output, n_embd * n_tokens, 0); *wkv_state = ggml_view_1d(ctx, wkv_output, n_embd * head_size * n_seqs, n_embd * n_tokens * sizeof(float)); - // group norm with head_count groups - cur = ggml_reshape_3d(ctx, cur, n_embd / head_count, head_count, n_tokens); - cur = ggml_norm(ctx, cur, 64e-5f); + if (!is_qrwkv) { + // group norm with head_count groups + cur = ggml_reshape_3d(ctx, cur, n_embd / head_count, head_count, n_tokens); + cur = ggml_norm(ctx, cur, 64e-5f); - // Convert back to regular vectors. - cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); - cur = ggml_add(ctx, ggml_mul(ctx, cur, layer->time_mix_ln), layer->time_mix_ln_b); + // Convert back to regular vectors. + cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); + cur = ggml_add(ctx, ggml_mul(ctx, cur, layer->time_mix_ln), layer->time_mix_ln_b); + } else { + cur = ggml_reshape_2d(ctx, cur, n_embd, n_tokens); + } cur = ggml_mul(ctx, cur, g); cur = llm_build_lora_mm(lctx, ctx, layer->time_mix_output, cur); @@ -10048,7 +10112,7 @@ struct llm_build_context { 1 ); - cur = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states)); + cur = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states, hparams.wkv_head_size, n_embd / hparams.wkv_head_size)); ggml_build_forward_expand(gf, cur); ggml_build_forward_expand( gf, @@ -10115,6 +10179,118 @@ struct llm_build_context { return gf; } + // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py + ggml_cgraph * build_rwkv6qwen2() { + ggml_cgraph *gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false); + + GGML_ASSERT(n_embd == hparams.n_embd_k_s()); + + const int64_t n_seqs = ubatch.n_seqs; + const int64_t n_seq_tokens = ubatch.n_seq_tokens; + const int64_t n_tokens = ubatch.n_tokens; + GGML_ASSERT(n_seqs != 0); + GGML_ASSERT(ubatch.equal_seqs); + GGML_ASSERT(n_tokens == n_seq_tokens * n_seqs); + + struct ggml_tensor * cur; + struct ggml_tensor * inpL; + struct ggml_tensor * state_copy = build_inp_s_copy(); + struct ggml_tensor * state_mask = build_inp_s_mask(); + + inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb); + + for (int il = 0; il < n_layer; ++il) { + const llama_layer * layer = &model.layers[il]; + + // (ab)using the KV cache to store the states + struct ggml_tensor * token_shift = llm_build_copy_mask_state(ctx0, + gf, kv_self.k_l[il], state_copy, state_mask, + hparams.n_embd_k_s(), kv_self.size, kv_head, n_kv, n_seqs); + struct ggml_tensor * wkv_states = llm_build_copy_mask_state(ctx0, + gf, kv_self.v_l[il], state_copy, state_mask, + hparams.n_embd_v_s(), kv_self.size, kv_head, n_kv, n_seqs); + + cur = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); + token_shift = ggml_reshape_3d(ctx0, token_shift, n_embd, 1, n_seqs); + + struct ggml_tensor * x_norm_att = llm_build_norm(ctx0, cur, hparams, layer->attn_norm, layer->attn_norm_b, LLM_NORM_RMS, cb, il); + struct ggml_tensor * x_prev = ggml_concat( + ctx0, + token_shift, + ggml_view_3d(ctx0, x_norm_att, n_embd, n_seq_tokens - 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], 0), + 1 + ); + + ggml_build_forward_expand( + gf, + ggml_cpy( + ctx0, + wkv_states, + ggml_view_1d( + ctx0, + kv_self.v_l[il], + hparams.n_embd_v_s() * n_seqs, + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il]) + ) + ) + ); + + struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, llm_build_rwkv6_time_mix(lctx, ctx0, layer, x_norm_att, x_prev, &wkv_states, hparams.wkv_head_size, hparams.n_head_kv())); + ggml_build_forward_expand(gf, ffn_inp); + ggml_build_forward_expand( + gf, + ggml_cpy( + ctx0, + wkv_states, + ggml_view_1d( + ctx0, + kv_self.v_l[il], + hparams.n_embd_v_s() * n_seqs, + hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il]) + ) + ) + ); + + cb(ffn_inp, "ffn_inp", il); + + // feed-forward network + cur = llm_build_norm(ctx0, ffn_inp, hparams, + model.layers[il].ffn_norm, NULL, + LLM_NORM_RMS, cb, il); + cb(cur, "ffn_norm", il); + + cur = llm_build_ffn(ctx0, lctx, cur, + model.layers[il].ffn_up, NULL, NULL, + model.layers[il].ffn_gate, NULL, NULL, + model.layers[il].ffn_down, NULL, NULL, + NULL, + LLM_FFN_SILU, LLM_FFN_PAR, cb, il); + cb(cur, "ffn_out", il); + + cur = ggml_add(ctx0, cur, ffn_inp); + cur = lctx.cvec.apply_to(ctx0, cur, il); + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; + } + + cur = inpL; + struct ggml_tensor * inp_out_ids = build_inp_out_ids(); + cur = ggml_reshape_2d(ctx0, cur, n_embd, n_tokens); + cur = ggml_get_rows(ctx0, cur, inp_out_ids); + + cur = llm_build_norm(ctx0, cur, hparams, model.output_norm, model.output_norm_b, LLM_NORM_RMS, cb, -1); + cb(cur, "result_norm", -1); + + cur = llm_build_lora_mm(lctx, ctx0, model.output, cur); + cb(cur, "result_output", -1); + + ggml_build_forward_expand(gf, cur); + + return gf; + } + // ref: https://github.com/facebookresearch/chameleon // based on the original build_llama() function, changes: // * qk-norm @@ -10724,6 +10900,10 @@ static struct ggml_cgraph * llama_build_graph( { result = llm.build_rwkv6(); } break; + case LLM_ARCH_RWKV6QWEN2: + { + result = llm.build_rwkv6qwen2(); + } break; case LLM_ARCH_CHAMELEON: { result = llm.build_chameleon(); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 1e892f663..3834e0f84 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1659,17 +1659,46 @@ struct test_rwkv_wkv6 : public test_case { ggml_tensor * build_graph(ggml_context * ctx) override { const int64_t n_tokens = n_seq_tokens * n_seqs; - ggml_tensor * r = ggml_new_tensor(ctx, type, 4, std::vector{ 1, head_size, head_count, n_tokens }.data()); - ggml_tensor * k = ggml_new_tensor(ctx, type, 4, std::vector{ head_size, 1, head_count, n_tokens }.data()); - ggml_tensor * v = ggml_new_tensor(ctx, type, 4, std::vector{ 1, head_size, head_count, n_tokens }.data()); + ggml_tensor * r = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); ggml_tensor * tf = ggml_new_tensor(ctx, type, 2, std::vector{ head_size, head_count }.data()); - ggml_tensor * td = ggml_new_tensor(ctx, type, 4, std::vector{ 1, head_size, head_count, n_tokens }.data()); + ggml_tensor * td = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); ggml_tensor * out = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, s); return out; } }; +// GGML_OP_GATED_LINEAR_ATTN +struct test_gla : public test_case { + const ggml_type type; + + const int64_t head_count; + const int64_t head_size; + const int64_t n_seq_tokens; + const int64_t n_seqs; + + std::string vars() override { + return VARS_TO_STR5(type, head_count, head_size, n_seq_tokens, n_seqs); + } + + test_gla(ggml_type type = GGML_TYPE_F32, + int64_t head_count = 32, int64_t head_size = 64, int64_t n_seq_tokens = 32, int64_t n_seqs = 32) + : type(type), head_count(head_count), head_size(head_size), n_seq_tokens(n_seq_tokens), n_seqs(n_seqs) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + const int64_t n_tokens = n_seq_tokens * n_seqs; + ggml_tensor * q = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * k = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * v = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * g = ggml_new_tensor(ctx, type, 3, std::vector{ head_size, head_count, n_tokens }.data()); + ggml_tensor * s = ggml_new_tensor(ctx, type, 2, std::vector{ head_size * head_size * head_count, n_seqs }.data()); + ggml_tensor * out = ggml_gated_linear_attn(ctx, k, v, q, g, s, pow(head_size, -0.5)); + return out; + } +}; + // GGML_OP_MUL_MAT struct test_mul_mat : public test_case { const ggml_type type_a; @@ -3626,6 +3655,11 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 32, 4)); test_cases.emplace_back(new test_rwkv_wkv6(GGML_TYPE_F32, 32, 64, 128, 4)); + test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 1, 1)); + test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 1)); + test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 32, 4)); + test_cases.emplace_back(new test_gla(GGML_TYPE_F32, 32, 64, 128, 4)); + for (int i = 1; i < 9; ++i) { test_cases.emplace_back(new test_mul_mat(GGML_TYPE_F16, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); test_cases.emplace_back(new test_mul_mat(GGML_TYPE_Q4_0, GGML_TYPE_F32, 16, i, 256, { 1, 1}, {1, 1})); From c3f9d25706ac84297067aeaa662c1f1af42ed443 Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Fri, 10 Jan 2025 06:39:33 +0100 Subject: [PATCH 060/279] Vulkan: Fix float16 use on devices without float16 support + fix subgroup_size_control validation error (#11161) * Vulkan: Remove float16 use in shaders * Fix validation error about subgroup_size_control extension --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 2 +- .../vulkan-shaders/mul_mat_vec.comp | 9 +++---- .../vulkan-shaders/mul_mat_vec_q2_k.comp | 24 +++++++++---------- .../vulkan-shaders/mul_mat_vec_q3_k.comp | 18 +++++++------- .../vulkan-shaders/mul_mat_vec_q4_k.comp | 12 +++++----- .../vulkan-shaders/mul_mat_vec_q5_k.comp | 20 ++++++++-------- .../vulkan-shaders/mul_mat_vec_q6_k.comp | 10 ++++---- .../ggml-vulkan/vulkan-shaders/soft_max.comp | 1 - .../src/ggml-vulkan/vulkan-shaders/types.comp | 5 +++- 9 files changed, 50 insertions(+), 51 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 1b9174682..649146d7b 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -2277,6 +2277,7 @@ static vk_device ggml_vk_get_device(size_t idx) { if (device->subgroup_size_control) { device->subgroup_min_size = subgroup_size_control_props.minSubgroupSize; device->subgroup_max_size = subgroup_size_control_props.maxSubgroupSize; + device_extensions.push_back("VK_EXT_subgroup_size_control"); } device->subgroup_size_control = device->subgroup_size_control && @@ -2285,7 +2286,6 @@ static vk_device ggml_vk_get_device(size_t idx) { if (device->subgroup_size_control) { device->subgroup_require_full_support = subgroup_size_control_features.computeFullSubgroups; - device_extensions.push_back("VK_EXT_subgroup_size_control"); } #if defined(VK_KHR_cooperative_matrix) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 24875cdcf..53902858d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -1,9 +1,6 @@ #version 450 -#ifdef FLOAT16 -#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require -#endif -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -27,8 +24,8 @@ void iter(inout FLOAT_TYPE temp[NUM_COLS][NUM_ROWS], const uint first_row, const #if K_PER_ITER == 8 #if QUANT_R == 2 - const B_TYPE_VEC4 bv02 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]; - const B_TYPE_VEC4 bv13 = data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs + y_offset) / 4]; + const vec4 bv02 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs) / 4]); + const vec4 bv13 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + iybs + iqs + y_offset) / 4]); const vec4 bv0 = vec4(bv02.x, bv13.x, bv02.y, bv13.y); const vec4 bv1 = vec4(bv02.z, bv13.z, bv02.w, bv13.w); #else diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp index 934213446..6a9b9b2d1 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q2_k.comp @@ -1,5 +1,5 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -40,9 +40,9 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; - f16vec2 d = data_a[ib0 + i].d; - const FLOAT_TYPE dall = d.x; - const FLOAT_TYPE dmin = d.y; + vec2 d = vec2(data_a[ib0 + i].d); + const FLOAT_TYPE dall = FLOAT_TYPE(d.x); + const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); uint32_t s0_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 0]; uint32_t s4_u32 = data_a_packed32[ib0 + i].scales[s_offset / 4 + 1]; @@ -63,14 +63,14 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec2 qs16 = uvec2(unpack8(qs16_u16)); [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + vec2 b0 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]); + vec2 b16 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]); + vec2 b32 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]); + vec2 b48 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]); + vec2 b64 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]); + vec2 b80 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]); + vec2 b96 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]); + vec2 b112 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]); FLOAT_TYPE sum1 = FLOAT_TYPE(0.0); FLOAT_TYPE sum2 = FLOAT_TYPE(0.0); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp index 86b0159d9..96ef50fdd 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q3_k.comp @@ -1,5 +1,5 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -60,14 +60,14 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - B_TYPE_VEC2 b0 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]; - B_TYPE_VEC2 b16 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]; - B_TYPE_VEC2 b32 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]; - B_TYPE_VEC2 b48 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]; - B_TYPE_VEC2 b64 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]; - B_TYPE_VEC2 b80 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]; - B_TYPE_VEC2 b96 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]; - B_TYPE_VEC2 b112 = data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]; + vec2 b0 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]); + vec2 b16 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]); + vec2 b32 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]); + vec2 b48 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]); + vec2 b64 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]); + vec2 b80 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]); + vec2 b96 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]); + vec2 b112 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]); FLOAT_TYPE sum = FLOAT_TYPE(0.0); [[unroll]] for (int l = 0; l < 2; ++l) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp index cd1dd8e89..f97eb8744 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q4_k.comp @@ -1,6 +1,6 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -45,7 +45,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; - f16vec2 d = data_a[ib0 + i].d; + vec2 d = vec2(data_a[ib0 + i].d); const FLOAT_TYPE dall = FLOAT_TYPE(d.x); const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); @@ -96,10 +96,10 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_15 = qs64_hi4.w; [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - B_TYPE_VEC4 by10 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4]; - B_TYPE_VEC4 by132 = data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 + 8]; - B_TYPE_VEC4 by20 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4]; - B_TYPE_VEC4 by232 = data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 + 8]; + vec4 by10 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 ]); + vec4 by132 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y1_idx) / 4 + 8]); + vec4 by20 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 ]); + vec4 by232 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y2_idx) / 4 + 8]); const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, fma(FLOAT_TYPE(by10.y), q4_1, fma(FLOAT_TYPE(by10.z), q4_2, FLOAT_TYPE(by10.w) * q4_3))); const FLOAT_TYPE sy = fma(FLOAT_TYPE(by132.x), q4_4, fma(FLOAT_TYPE(by132.y), q4_5, fma(FLOAT_TYPE(by132.z), q4_6, FLOAT_TYPE(by132.w) * q4_7))); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp index 0a68891c3..79d7db0e3 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q5_k.comp @@ -1,6 +1,6 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -42,7 +42,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { [[unroll]] for (uint n = 0; n < num_rows; ++n) { const uint ib0 = a_offset / QUANT_K + (first_row+n)*num_blocks_per_row; - f16vec2 d = data_a[ib0 + i].d; + vec2 d = vec2(data_a[ib0 + i].d); const FLOAT_TYPE dall = FLOAT_TYPE(d.x); const FLOAT_TYPE dmin = FLOAT_TYPE(d.y); @@ -105,14 +105,14 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { const uint32_t q4_15 = qs64_80_hi4.w; [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - B_TYPE_VEC2 by10 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2]; - B_TYPE_VEC2 by116 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 8]; - B_TYPE_VEC2 by132 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 16]; - B_TYPE_VEC2 by148 = data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 24]; - B_TYPE_VEC2 by20 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2]; - B_TYPE_VEC2 by216 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 8]; - B_TYPE_VEC2 by232 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 16]; - B_TYPE_VEC2 by248 = data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 24]; + vec2 by10 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 ]); + vec2 by116 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 8]); + vec2 by132 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 16]); + vec2 by148 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y1_idx) / 2 + 24]); + vec2 by20 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 ]); + vec2 by216 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 8]); + vec2 by232 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 16]); + vec2 by248 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y2_idx) / 2 + 24]); const FLOAT_TYPE sx = fma(FLOAT_TYPE(by10.x), q4_0, diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp index 70e13a56b..041fd27c1 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec_q6_k.comp @@ -1,6 +1,6 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require #include "mul_mat_vec_base.comp" @@ -77,10 +77,10 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { uvec4 q3 = uvec4(unpack8(q3_u32)); [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) { - B_TYPE_VEC4 by0 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4]; - B_TYPE_VEC4 by32 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 8]; - B_TYPE_VEC4 by64 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16]; - B_TYPE_VEC4 by96 = data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24]; + vec4 by0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 ]); + vec4 by32 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 8]); + vec4 by64 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16]); + vec4 by96 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24]); FLOAT_TYPE sum = FLOAT_TYPE(0.0); [[unroll]] for (int l = 0; l < 4; ++l) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp b/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp index a25808e16..51fc2dc7e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/soft_max.comp @@ -1,6 +1,5 @@ #version 450 -#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require #extension GL_EXT_control_flow_attributes : enable layout (push_constant) uniform parameter diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp index eecc47f3a..f12e61bbe 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp @@ -2,7 +2,10 @@ #if !defined(GGML_TYPES_COMP) #define GGML_TYPES_COMP -#extension GL_EXT_shader_explicit_arithmetic_types : require +#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require +#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require +#extension GL_EXT_shader_16bit_storage : require #if defined(DATA_A_F32) #define QUANT_K 1 From ff3fcabc727b2dd0c477d23a258217b27cc639fb Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 10 Jan 2025 11:30:53 +0100 Subject: [PATCH 061/279] convert : add --print-supported-models option (#11172) * convert : add --print-supported-models option This commit adds a new option to the convert_hf_to_gguf.py script to print the supported models. The motivation for this is that it can be useful to know which models are supported by the script without having to look at the code. Example usage: ```console $ ./convert_hf_to_gguf.py --print-supported-models Supported models: - GPTNeoXForCausalLM - BloomForCausalLM - BloomModel - MPTForCausalLM - OrionForCausalLM - BaichuanForCausalLM - BaiChuanForCausalLM - XverseForCausalLM - FalconForCausalLM - RWForCausalLM - GPTBigCodeForCausalLM - GPTRefactForCausalLM - StableLmForCausalLM - StableLMEpochForCausalLM - LlavaStableLMEpochForCausalLM - LLaMAForCausalLM - LlamaForCausalLM - MistralForCausalLM - MixtralForCausalLM - DeciLMForCausalLM - BitnetForCausalLM - GrokForCausalLM - DbrxForCausalLM - MiniCPMForCausalLM - MiniCPM3ForCausalLM - QWenLMHeadModel - Qwen2ForCausalLM - Qwen2VLForConditionalGeneration - WavTokenizerDec - Qwen2MoeForCausalLM - GPT2LMHeadModel - PhiForCausalLM - Phi3ForCausalLM - PhiMoEForCausalLM - PlamoForCausalLM - CodeShellForCausalLM - InternLM2ForCausalLM - BertModel - BertForMaskedLM - CamembertModel - RobertaModel - NomicBertModel - XLMRobertaModel - XLMRobertaForSequenceClassification - GemmaForCausalLM - Gemma2ForCausalLM - Starcoder2ForCausalLM - Rwkv6ForCausalLM - RWKV6Qwen2ForCausalLM - MambaForCausalLM - MambaLMHeadModel - FalconMambaForCausalLM - CohereForCausalLM - Cohere2ForCausalLM - OLMoForCausalLM - OlmoForCausalLM - Olmo2ForCausalLM - OlmoeForCausalLM - JinaBertModel - JinaBertForMaskedLM - OpenELMForCausalLM - ArcticForCausalLM - DeepseekForCausalLM - DeepseekV3ForCausalLM - DeepseekV2ForCausalLM - UMT5ForConditionalGeneration - MT5ForConditionalGeneration - T5ForConditionalGeneration - T5WithLMHeadModel - T5EncoderModel - JAISLMHeadModel - ChatGLMModel - ChatGLMForConditionalGeneration - NemotronForCausalLM - ExaoneForCausalLM - GraniteForCausalLM - GraniteMoeForCausalLM - ChameleonForCausalLM - ChameleonForConditionalGeneration ``` * squash! convert : add --print-supported-models option Fix flake8 error. --- convert_hf_to_gguf.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index cf317eeae..81f19bf37 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -478,6 +478,11 @@ class Model: return modelcls return func + @classmethod + def print_registered_models(cls): + for name in cls._model_classes.keys(): + logger.error(f"- {name}") + @classmethod def from_model_architecture(cls, arch: str) -> type[Model]: try: @@ -4929,6 +4934,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "model", type=Path, help="directory containing model file", + nargs="?", ) parser.add_argument( "--use-temp-file", action="store_true", @@ -4966,8 +4972,15 @@ def parse_args() -> argparse.Namespace: "--metadata", type=Path, help="Specify the path for an authorship metadata override file" ) + parser.add_argument( + "--print-supported-models", action="store_true", + help="Print the supported models" + ) - return parser.parse_args() + args = parser.parse_args() + if not args.print_supported_models and args.model is None: + parser.error("the following arguments are required: model") + return args def split_str_to_n_bytes(split_str: str) -> int: @@ -4991,6 +5004,11 @@ def split_str_to_n_bytes(split_str: str) -> int: def main() -> None: args = parse_args() + if args.print_supported_models: + logger.error("Supported models:") + Model.print_registered_models() + sys.exit(0) + if args.verbose: logging.basicConfig(level=logging.DEBUG) else: From ba8a1f9c5b675459c55a83e3f97f10df3a66c788 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 10 Jan 2025 13:16:16 +0100 Subject: [PATCH 062/279] examples : add README.md to tts example [no ci] (#11155) * examples : add README.md to tts example [no ci] * squash! examples : add README.md to tts example [no ci] Fix heading to be consistent with other examples, and add a quickstart section to README.md. * squash! examples : add README.md to tts example [no ci] Fix spelling mistake. --- examples/tts/README.md | 80 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 examples/tts/README.md diff --git a/examples/tts/README.md b/examples/tts/README.md new file mode 100644 index 000000000..b0d20111a --- /dev/null +++ b/examples/tts/README.md @@ -0,0 +1,80 @@ +# llama.cpp/example/tts +This example demonstrates the Text To Speech feature. It uses a +[model](https://www.outeai.com/blog/outetts-0.2-500m) from +[outeai](https://www.outeai.com/). + +## Quickstart +If you have built llama.cpp with `-DLLAMA_CURL=ON` you can simply run the +following command and the required models will be downloaded automatically: +```console +$ build/bin/llama-tts --tts-oute-default -p "Hello world" && aplay output.wav +``` +For details about the models and how to convert them to the required format +see the following sections. + +### Model conversion +Checkout or download the model that contains the LLM model: +```console +$ pushd models +$ git clone --branch main --single-branch --depth 1 https://huggingface.co/OuteAI/OuteTTS-0.2-500M +$ cd OuteTTS-0.2-500M && git lfs install && git lfs pull +$ popd +``` +Convert the model to .gguf format: +```console +(venv) python convert_hf_to_gguf.py models/OuteTTS-0.2-500M \ + --outfile models/outetts-0.2-0.5B-f16.gguf --outtype f16 +``` +The generated model will be `models/outetts-0.2-0.5B-f16.gguf`. + +We can optionally quantize this to Q8_0 using the following command: +```console +$ build/bin/llama-quantize models/outetts-0.2-0.5B-f16.gguf \ + models/outetts-0.2-0.5B-q8_0.gguf q8_0 +``` +The quantized model will be `models/outetts-0.2-0.5B-q8_0.gguf`. + +Next we do something simlar for the audio decoder. First download or checkout +the model for the voice decoder: +```console +$ pushd models +$ git clone --branch main --single-branch --depth 1 https://huggingface.co/novateur/WavTokenizer-large-speech-75token +$ cd WavTokenizer-large-speech-75token && git lfs install && git lfs pull +$ popd +``` +This model file is PyTorch checkpoint (.ckpt) and we first need to convert it to +huggingface format: +```console +(venv) python examples/tts/convert_pt_to_hf.py \ + models/WavTokenizer-large-speech-75token/wavtokenizer_large_speech_320_24k.ckpt +... +Model has been successfully converted and saved to models/WavTokenizer-large-speech-75token/model.safetensors +Metadata has been saved to models/WavTokenizer-large-speech-75token/index.json +Config has been saved to models/WavTokenizer-large-speech-75tokenconfig.json +``` +Then we can convert the huggingface format to gguf: +```console +(venv) python convert_hf_to_gguf.py models/WavTokenizer-large-speech-75token \ + --outfile models/wavtokenizer-large-75-f16.gguf --outtype f16 +... +INFO:hf-to-gguf:Model successfully exported to models/wavtokenizer-large-75-f16.gguf +``` + +### Running the example + +With both of the models generated, the LLM model and the voice decoder model, +we can run the example: +```console +$ build/bin/llama-tts -m ./models/outetts-0.2-0.5B-q8_0.gguf \ + -mv ./models/wavtokenizer-large-75-f16.gguf \ + -p "Hello world" +... +main: audio written to file 'output.wav' +``` +The output.wav file will contain the audio of the prompt. This can be heard +by playing the file with a media player. On Linux the following command will +play the audio: +```console +$ aplay output.wav +``` + From 2739a71e4b88474833b64aa974ca4515574fd3c4 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Sat, 11 Jan 2025 05:50:33 +0100 Subject: [PATCH 063/279] convert : sort print supported models [no ci] (#11179) This commit sorts the list of supported models when printing them out. The motivation for this change is to make it easier to find a specific model in the list of supported models. For example: ```console $ ./convert_hf_to_gguf.py --print-supported-models Supported models: - ArcticForCausalLM - BaiChuanForCausalLM - BaichuanForCausalLM - BertForMaskedLM - BertModel - BitnetForCausalLM - BloomForCausalLM - BloomModel - CamembertModel - ChameleonForCausalLM - ChameleonForConditionalGeneration - ChatGLMForConditionalGeneration - ChatGLMModel - CodeShellForCausalLM - Cohere2ForCausalLM - CohereForCausalLM - DbrxForCausalLM - DeciLMForCausalLM - DeepseekForCausalLM - DeepseekV2ForCausalLM - DeepseekV3ForCausalLM - ExaoneForCausalLM - FalconForCausalLM - FalconMambaForCausalLM - GPT2LMHeadModel - GPTBigCodeForCausalLM - GPTNeoXForCausalLM - GPTRefactForCausalLM - Gemma2ForCausalLM - GemmaForCausalLM - GraniteForCausalLM - GraniteMoeForCausalLM - GrokForCausalLM - InternLM2ForCausalLM - JAISLMHeadModel - JinaBertForMaskedLM - JinaBertModel - LLaMAForCausalLM - LlamaForCausalLM - LlavaStableLMEpochForCausalLM - MPTForCausalLM - MT5ForConditionalGeneration - MambaForCausalLM - MambaLMHeadModel - MiniCPM3ForCausalLM - MiniCPMForCausalLM - MistralForCausalLM - MixtralForCausalLM - NemotronForCausalLM - NomicBertModel - OLMoForCausalLM - Olmo2ForCausalLM - OlmoForCausalLM - OlmoeForCausalLM - OpenELMForCausalLM - OrionForCausalLM - Phi3ForCausalLM - PhiForCausalLM - PhiMoEForCausalLM - PlamoForCausalLM - QWenLMHeadModel - Qwen2ForCausalLM - Qwen2MoeForCausalLM - Qwen2VLForConditionalGeneration - RWForCausalLM - RWKV6Qwen2ForCausalLM - RobertaModel - Rwkv6ForCausalLM - StableLMEpochForCausalLM - StableLmForCausalLM - Starcoder2ForCausalLM - T5EncoderModel - T5ForConditionalGeneration - T5WithLMHeadModel - UMT5ForConditionalGeneration - WavTokenizerDec - XLMRobertaForSequenceClassification - XLMRobertaModel - XverseForCausalLM ``` --- convert_hf_to_gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 81f19bf37..4dc9837ab 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -480,7 +480,7 @@ class Model: @classmethod def print_registered_models(cls): - for name in cls._model_classes.keys(): + for name in sorted(cls._model_classes.keys()): logger.error(f"- {name}") @classmethod From c05e8c9934f94fde49bc1bc9dc51eed282605150 Mon Sep 17 00:00:00 2001 From: Vinesh Janarthanan <36610342+VJHack@users.noreply.github.com> Date: Sat, 11 Jan 2025 03:42:31 -0600 Subject: [PATCH 064/279] gguf-py: fixed local detection of gguf package (#11180) * updated path to gguf package for non-installed setups * added reader.py to readme * Bumped gguf version to 0.15.0 --- gguf-py/README.md | 2 ++ gguf-py/gguf/scripts/gguf_convert_endian.py | 4 ++-- gguf-py/gguf/scripts/gguf_dump.py | 4 ++-- gguf-py/gguf/scripts/gguf_hash.py | 4 ++-- gguf-py/gguf/scripts/gguf_new_metadata.py | 4 ++-- gguf-py/gguf/scripts/gguf_set_metadata.py | 4 ++-- gguf-py/pyproject.toml | 2 +- 7 files changed, 13 insertions(+), 11 deletions(-) diff --git a/gguf-py/README.md b/gguf-py/README.md index 37a75923b..2e513633d 100644 --- a/gguf-py/README.md +++ b/gguf-py/README.md @@ -15,6 +15,8 @@ pip install gguf [examples/writer.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/writer.py) — Generates `example.gguf` in the current directory to demonstrate generating a GGUF file. Note that this file cannot be used as a model. +[examples/reader.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/examples/reader.py) — Extracts and displays key-value pairs and tensor details from a GGUF file in a readable format. + [gguf/scripts/gguf_dump.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_dump.py) — Dumps a GGUF file's metadata to the console. [gguf/scripts/gguf_set_metadata.py](https://github.com/ggerganov/llama.cpp/blob/master/gguf-py/gguf/scripts/gguf_set_metadata.py) — Allows changing simple metadata values in a GGUF file by key. diff --git a/gguf-py/gguf/scripts/gguf_convert_endian.py b/gguf-py/gguf/scripts/gguf_convert_endian.py index b698af0fe..f97e91bd4 100755 --- a/gguf-py/gguf/scripts/gguf_convert_endian.py +++ b/gguf-py/gguf/scripts/gguf_convert_endian.py @@ -11,8 +11,8 @@ from pathlib import Path import numpy as np # Necessary to load the local gguf package -if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): - sys.path.insert(0, str(Path(__file__).parent.parent)) +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent.parent)) import gguf diff --git a/gguf-py/gguf/scripts/gguf_dump.py b/gguf-py/gguf/scripts/gguf_dump.py index 1b6546541..f95b4fd48 100755 --- a/gguf-py/gguf/scripts/gguf_dump.py +++ b/gguf-py/gguf/scripts/gguf_dump.py @@ -12,8 +12,8 @@ from typing import Any import numpy as np # Necessary to load the local gguf package -if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): - sys.path.insert(0, str(Path(__file__).parent.parent)) +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent.parent)) from gguf import GGUFReader, GGUFValueType, ReaderTensor # noqa: E402 diff --git a/gguf-py/gguf/scripts/gguf_hash.py b/gguf-py/gguf/scripts/gguf_hash.py index ee34d09bf..3ef989921 100755 --- a/gguf-py/gguf/scripts/gguf_hash.py +++ b/gguf-py/gguf/scripts/gguf_hash.py @@ -13,8 +13,8 @@ from pathlib import Path from tqdm import tqdm # Necessary to load the local gguf package -if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): - sys.path.insert(0, str(Path(__file__).parent.parent)) +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent.parent)) from gguf import GGUFReader # noqa: E402 diff --git a/gguf-py/gguf/scripts/gguf_new_metadata.py b/gguf-py/gguf/scripts/gguf_new_metadata.py index fce52a8c1..a8cfc9d58 100755 --- a/gguf-py/gguf/scripts/gguf_new_metadata.py +++ b/gguf-py/gguf/scripts/gguf_new_metadata.py @@ -13,8 +13,8 @@ from tqdm import tqdm from typing import Any, Sequence, NamedTuple # Necessary to load the local gguf package -if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): - sys.path.insert(0, str(Path(__file__).parent.parent)) +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent.parent)) import gguf diff --git a/gguf-py/gguf/scripts/gguf_set_metadata.py b/gguf-py/gguf/scripts/gguf_set_metadata.py index e35b651b8..f5809c35c 100755 --- a/gguf-py/gguf/scripts/gguf_set_metadata.py +++ b/gguf-py/gguf/scripts/gguf_set_metadata.py @@ -6,8 +6,8 @@ import sys from pathlib import Path # Necessary to load the local gguf package -if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists(): - sys.path.insert(0, str(Path(__file__).parent.parent)) +if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent.parent / 'gguf-py').exists(): + sys.path.insert(0, str(Path(__file__).parent.parent.parent)) from gguf import GGUFReader # noqa: E402 diff --git a/gguf-py/pyproject.toml b/gguf-py/pyproject.toml index 92d7f22ec..78c6baa64 100644 --- a/gguf-py/pyproject.toml +++ b/gguf-py/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gguf" -version = "0.14.0" +version = "0.15.0" description = "Read and write ML models in GGUF for GGML" authors = ["GGML "] packages = [ From afa8a9ec9b520137bbd1ca6838cda93ee39baf20 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 12 Jan 2025 11:32:42 +0200 Subject: [PATCH 065/279] llama : add `llama_vocab`, functions -> methods, naming (#11110) * llama : functions -> methods (#11110) * llama : add struct llama_vocab to the API (#11156) ggml-ci * hparams : move vocab params to llama_vocab (#11159) ggml-ci * vocab : more pimpl (#11165) ggml-ci * vocab : minor tokenization optimizations (#11160) ggml-ci Co-authored-by: Diego Devesa * lora : update API names (#11167) ggml-ci * llama : update API names to use correct prefix (#11174) * llama : update API names to use correct prefix ggml-ci * cont ggml-ci * cont ggml-ci * minor [no ci] * vocab : llama_vocab_add_[be]os -> llama_vocab_get_add_[be]os (#11174) ggml-ci * vocab : llama_vocab_n_vocab -> llama_vocab_n_tokens (#11174) ggml-ci --------- Co-authored-by: Diego Devesa --- common/common.cpp | 105 +- common/common.h | 26 +- common/sampling.cpp | 17 +- common/speculative.cpp | 33 +- examples/batched-bench/batched-bench.cpp | 2 +- examples/batched.swift/Sources/main.swift | 6 +- examples/batched/batched.cpp | 10 +- .../convert-llama2c-to-ggml.cpp | 2 +- .../cvector-generator/cvector-generator.cpp | 8 +- examples/embedding/embedding.cpp | 8 +- examples/eval-callback/eval-callback.cpp | 5 +- examples/export-lora/export-lora.cpp | 3 +- examples/gritlm/gritlm.cpp | 19 +- examples/imatrix/imatrix.cpp | 16 +- examples/infill/infill.cpp | 40 +- examples/llama-bench/llama-bench.cpp | 12 +- .../llama/src/main/cpp/llama-android.cpp | 7 +- .../llama.cpp.swift/LibLlama.swift | 6 +- examples/llava/llava-cli.cpp | 9 +- examples/llava/llava.cpp | 4 +- examples/llava/minicpmv-cli.cpp | 8 +- examples/llava/qwen2vl-cli.cpp | 13 +- examples/lookahead/lookahead.cpp | 6 +- examples/lookup/lookup.cpp | 4 +- examples/main/main.cpp | 23 +- examples/parallel/parallel.cpp | 4 +- examples/passkey/passkey.cpp | 8 +- examples/perplexity/perplexity.cpp | 52 +- examples/quantize-stats/quantize-stats.cpp | 2 +- examples/retrieval/retrieval.cpp | 10 +- examples/run/run.cpp | 24 +- examples/save-load-state/save-load-state.cpp | 4 +- examples/server/server.cpp | 74 +- examples/server/utils.hpp | 83 +- examples/simple-chat/simple-chat.cpp | 20 +- examples/simple/simple.cpp | 13 +- .../speculative-simple/speculative-simple.cpp | 4 +- examples/speculative/speculative.cpp | 29 +- examples/tokenize/tokenize.cpp | 8 +- examples/tts/tts.cpp | 26 +- include/llama-cpp.h | 6 +- include/llama.h | 172 +- src/llama-adapter.cpp | 77 +- src/llama-adapter.h | 59 +- src/llama-arch.cpp | 1 + src/llama-arch.h | 1 + src/llama-context.cpp | 14 +- src/llama-context.h | 10 +- src/llama-grammar.cpp | 8 +- src/llama-hparams.h | 2 - src/llama-kv-cache.cpp | 2 +- src/llama-mmap.cpp | 2 +- src/llama-model-loader.cpp | 61 + src/llama-model-loader.h | 4 + src/llama-model.cpp | 4153 ++++++++++++----- src/llama-model.h | 232 +- src/llama-quant.cpp | 14 +- src/llama-sampling.cpp | 58 +- src/llama-sampling.h | 22 +- src/llama-vocab.cpp | 2369 +++++++--- src/llama-vocab.h | 274 +- src/llama.cpp | 2932 +----------- tests/test-autorelease.cpp | 2 +- tests/test-chat-template.cpp | 3 +- tests/test-tokenizer-0.cpp | 2 +- tests/test-tokenizer-1-bpe.cpp | 10 +- tests/test-tokenizer-1-spm.cpp | 8 +- tests/test-tokenizer-random.py | 4 +- 68 files changed, 5855 insertions(+), 5400 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 86e4e1e24..39bfb0c2e 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -857,21 +857,23 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } + const llama_vocab * vocab = llama_model_get_vocab(model); + if (params.reranking) { bool ok = true; - if (llama_token_bos(model) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__); + if (llama_vocab_bos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have a BOS token, reranking will not work\n", __func__); ok = false; } - if (llama_token_eos(model) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__); + if (llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have an EOS token, reranking will not work\n", __func__); ok = false; } - if (llama_token_sep(model) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__); + if (llama_vocab_sep(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have a SEP token, reranking will not work\n", __func__); ok = false; } @@ -884,7 +886,7 @@ struct common_init_result common_init_from_params(common_params & params) { auto cparams = common_context_params_to_llama(params); - llama_context * lctx = llama_new_context_with_model(model, cparams); + llama_context * lctx = llama_init_from_model(model, cparams); if (lctx == NULL) { LOG_ERR("%s: failed to create context with model '%s'\n", __func__, params.model.c_str()); llama_model_free(model); @@ -898,7 +900,7 @@ struct common_init_result common_init_from_params(common_params & params) { if (!params.control_vectors.empty()) { if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1; - if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model); + if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_model_n_layer(model); const auto cvec = common_control_vector_load(params.control_vectors); if (cvec.n_embd == -1) { @@ -908,12 +910,13 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } - int err = llama_control_vector_apply(lctx, - cvec.data.data(), - cvec.data.size(), - cvec.n_embd, - params.control_vector_layer_start, - params.control_vector_layer_end); + int err = llama_apply_adapter_cvec( + lctx, + cvec.data.data(), + cvec.data.size(), + cvec.n_embd, + params.control_vector_layer_start, + params.control_vector_layer_end); if (err) { llama_free(lctx); llama_model_free(model); @@ -924,8 +927,8 @@ struct common_init_result common_init_from_params(common_params & params) { // load and optionally apply lora adapters for (auto & la : params.lora_adapters) { - llama_lora_adapter_ptr lora; - lora.reset(llama_lora_adapter_init(model, la.path.c_str())); + llama_adapter_lora_ptr lora; + lora.reset(llama_adapter_lora_init(model, la.path.c_str())); if (lora == nullptr) { LOG_ERR("%s: failed to apply lora adapter '%s'\n", __func__, la.path.c_str()); llama_free(lctx); @@ -938,17 +941,17 @@ struct common_init_result common_init_from_params(common_params & params) { } if (!params.lora_init_without_apply) { - common_lora_adapters_apply(lctx, params.lora_adapters); + common_set_adapter_lora(lctx, params.lora_adapters); } - if (params.sampling.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { - LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__); + if (params.sampling.ignore_eos && llama_vocab_eos(vocab) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: vocab does not have an EOS token, ignoring --ignore-eos\n", __func__); params.sampling.ignore_eos = false; } if (params.sampling.ignore_eos) { - for (llama_token i = 0; i < llama_n_vocab(model); i++) { - if (llama_token_is_eog(model, i)) { + for (llama_token i = 0; i < llama_vocab_n_tokens(vocab); i++) { + if (llama_vocab_is_eog(vocab, i)) { LOG_INF("%s: added %s logit bias = %f\n", __func__, common_token_to_piece(lctx, i).c_str(), -INFINITY); params.sampling.logit_bias.push_back({i, -INFINITY}); } @@ -969,8 +972,9 @@ struct common_init_result common_init_from_params(common_params & params) { LOG_WRN("%s: warming up the model with an empty run - please wait ... (--no-warmup to disable)\n", __func__); std::vector tmp; - llama_token bos = llama_token_bos(model); - llama_token eos = llama_token_eos(model); + llama_token bos = llama_vocab_bos(vocab); + llama_token eos = llama_vocab_eos(vocab); + // some models (e.g. T5) don't have a BOS token if (bos != LLAMA_TOKEN_NULL) { tmp.push_back(bos); @@ -1005,11 +1009,11 @@ struct common_init_result common_init_from_params(common_params & params) { return iparams; } -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora) { - llama_lora_adapter_clear(ctx); +void common_set_adapter_lora(struct llama_context * ctx, std::vector & lora) { + llama_clear_adapter_lora(ctx); for (auto & la : lora) { if (la.scale != 0.0f) { - llama_lora_adapter_set(ctx, la.ptr, la.scale); + llama_set_adapter_lora(ctx, la.ptr, la.scale); } } } @@ -1559,21 +1563,23 @@ std::vector common_tokenize( const std::string & text, bool add_special, bool parse_special) { - return common_tokenize(llama_get_model(ctx), text, add_special, parse_special); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + return common_tokenize(vocab, text, add_special, parse_special); } std::vector common_tokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const std::string & text, bool add_special, bool parse_special) { // upper limit for the number of tokens int n_tokens = text.length() + 2 * add_special; std::vector result(n_tokens); - n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + n_tokens = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); + int check = llama_tokenize(vocab, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); @@ -1582,12 +1588,18 @@ std::vector common_tokenize( } std::string common_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + return common_token_to_piece(vocab, token, special); +} + +std::string common_token_to_piece(const struct llama_vocab * vocab, llama_token token, bool special) { std::string piece; piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n' - const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special); + const int n_chars = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special); if (n_chars < 0) { piece.resize(-n_chars); - int check = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special); + int check = llama_token_to_piece(vocab, token, &piece[0], piece.size(), 0, special); GGML_ASSERT(check == -n_chars); } else { @@ -1597,13 +1609,19 @@ std::string common_token_to_piece(const struct llama_context * ctx, llama_token return piece; } -std::string common_detokenize(llama_context * ctx, const std::vector & tokens, bool special) { +std::string common_detokenize(const struct llama_context * ctx, const std::vector & tokens, bool special) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + return common_detokenize(vocab, tokens, special); +} + +std::string common_detokenize(const struct llama_vocab * vocab, const std::vector & tokens, bool special) { std::string text; text.resize(std::max(text.capacity(), tokens.size())); - int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); + int32_t n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); if (n_chars < 0) { text.resize(-n_chars); - n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); + n_chars = llama_detokenize(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special); GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization } @@ -1631,7 +1649,7 @@ std::string common_get_builtin_chat_template(const struct llama_model * model) { bool common_chat_verify_template(const std::string & tmpl) { llama_chat_message chat[] = {{"user", "test"}}; - int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); + const int res = llama_chat_apply_template(tmpl.c_str(), chat, 1, true, nullptr, 0); return res >= 0; } @@ -1642,16 +1660,16 @@ std::string common_chat_apply_template(const struct llama_model * model, int alloc_size = 0; bool fallback = false; // indicate if we must fallback to default chatml std::vector chat; - for (auto & msg : msgs) { + for (const auto & msg : msgs) { chat.push_back({msg.role.c_str(), msg.content.c_str()}); alloc_size += (msg.role.size() + msg.content.size()) * 1.25; } - const char * ptr_tmpl = tmpl.empty() ? nullptr : tmpl.c_str(); + const char * ptr_tmpl = tmpl.empty() ? llama_model_chat_template(model) : tmpl.c_str(); std::vector buf(alloc_size); // run the first time to get the total output length - int32_t res = llama_chat_apply_template(model, ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); + int32_t res = llama_chat_apply_template(ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); // error: chat template is not supported if (res < 0) { @@ -1659,18 +1677,17 @@ std::string common_chat_apply_template(const struct llama_model * model, // if the custom "tmpl" is not supported, we throw an error // this is a bit redundant (for good), since we're not sure if user validated the custom template with llama_chat_verify_template() throw std::runtime_error("this custom template is not supported"); - } else { - // If the built-in template is not supported, we default to chatml - res = llama_chat_apply_template(nullptr, "chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size()); - fallback = true; } + + // If the built-in template is not supported, we default to chatml + res = llama_chat_apply_template("chatml", chat.data(), chat.size(), add_ass, buf.data(), buf.size()); + fallback = true; } // if it turns out that our buffer is too small, we resize it if ((size_t) res > buf.size()) { buf.resize(res); res = llama_chat_apply_template( - fallback ? nullptr : model, fallback ? "chatml" : ptr_tmpl, chat.data(), chat.size(), add_ass, buf.data(), buf.size()); } diff --git a/common/common.h b/common/common.h index 0d452cf0f..d523948b0 100644 --- a/common/common.h +++ b/common/common.h @@ -24,11 +24,11 @@ #define DEFAULT_MODEL_PATH "models/7B/ggml-model-f16.gguf" -struct common_lora_adapter_info { +struct common_adapter_lora_info { std::string path; float scale; - struct llama_lora_adapter * ptr; + struct llama_adapter_lora * ptr; }; using llama_tokens = std::vector; @@ -246,8 +246,8 @@ struct common_params { std::vector antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts) std::vector kv_overrides; - bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply) - std::vector lora_adapters; // lora adapter path with user defined scale + bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_adapter_lora_apply) + std::vector lora_adapters; // lora adapter path with user defined scale std::vector control_vectors; // control vector with user defined scale @@ -481,7 +481,7 @@ struct common_init_result { llama_model_ptr model; llama_context_ptr context; - std::vector lora; + std::vector lora; }; struct common_init_result common_init_from_params(common_params & params); @@ -503,7 +503,7 @@ struct llama_model * common_load_model_from_hf( const struct llama_model_params & params); // clear LoRA adapters from context, then apply new list of adapters -void common_lora_adapters_apply(struct llama_context * ctx, std::vector & lora); +void common_set_adapter_lora(struct llama_context * ctx, std::vector & lora); // // Batch utils @@ -541,7 +541,7 @@ std::vector common_tokenize( bool parse_special = false); std::vector common_tokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const std::string & text, bool add_special, bool parse_special = false); @@ -553,11 +553,21 @@ std::string common_token_to_piece( llama_token token, bool special = true); +std::string common_token_to_piece( + const struct llama_vocab * vocab, + llama_token token, + bool special = true); + // detokenizes a vector of tokens into a string // should work similar to Python's `tokenizer.decode` // optionally renders special/control tokens std::string common_detokenize( - llama_context * ctx, + const struct llama_context * ctx, + const std::vector & tokens, + bool special = true); + +std::string common_detokenize( + const struct llama_vocab * vocab, const std::vector & tokens, bool special = true); diff --git a/common/sampling.cpp b/common/sampling.cpp index e83a971c7..7241ac321 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -113,7 +113,10 @@ struct common_sampler { void set_logits(struct llama_context * ctx, int idx) { const auto * logits = llama_get_logits_ith(ctx, idx); - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const int n_vocab = llama_vocab_n_tokens(vocab); cur.resize(n_vocab); @@ -142,13 +145,15 @@ std::string common_params_sampling::print() const { } struct common_sampler * common_sampler_init(const struct llama_model * model, const struct common_params_sampling & params) { + const llama_vocab * vocab = llama_model_get_vocab(model); + llama_sampler_chain_params lparams = llama_sampler_chain_default_params(); lparams.no_perf = params.no_perf; auto * result = new common_sampler { /* .params = */ params, - /* .grmr = */ llama_sampler_init_grammar(model, params.grammar.c_str(), "root"), + /* .grmr = */ llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"), /* .chain = */ llama_sampler_chain_init(lparams), /* .prev = */ ring_buffer(std::max(32, params.n_prev)), /* .cur = */ {}, @@ -157,7 +162,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co llama_sampler_chain_add(result->chain, llama_sampler_init_logit_bias( - llama_n_vocab(model), + llama_vocab_n_tokens(vocab), params.logit_bias.size(), params.logit_bias.data())); @@ -172,7 +177,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co c_breakers.push_back(str.c_str()); } - llama_sampler_chain_add(result->chain, llama_sampler_init_dry (model, params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); + llama_sampler_chain_add(result->chain, llama_sampler_init_dry (vocab, llama_model_n_ctx_train(model), params.dry_multiplier, params.dry_base, params.dry_allowed_length, params.dry_penalty_last_n, c_breakers.data(), c_breakers.size())); } break; case COMMON_SAMPLER_TYPE_TOP_K: @@ -194,7 +199,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co llama_sampler_chain_add(result->chain, llama_sampler_init_temp_ext (params.temp, params.dynatemp_range, params.dynatemp_exponent)); break; case COMMON_SAMPLER_TYPE_INFILL: - llama_sampler_chain_add(result->chain, llama_sampler_init_infill (model)); + llama_sampler_chain_add(result->chain, llama_sampler_init_infill (vocab)); break; case COMMON_SAMPLER_TYPE_PENALTIES: llama_sampler_chain_add(result->chain, llama_sampler_init_penalties(params.penalty_last_n, params.penalty_repeat, params.penalty_freq, params.penalty_present)); @@ -206,7 +211,7 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed)); } else if (params.mirostat == 1) { llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); - llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_n_vocab(model), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); + llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(llama_vocab_n_tokens(vocab), params.seed, params.mirostat_tau, params.mirostat_eta, 100)); } else if (params.mirostat == 2) { llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp)); llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta)); diff --git a/common/speculative.cpp b/common/speculative.cpp index 3fcbb0020..318e96ea3 100644 --- a/common/speculative.cpp +++ b/common/speculative.cpp @@ -79,10 +79,13 @@ bool common_speculative_are_compatible( const struct llama_model * model_tgt = llama_get_model(ctx_tgt); const struct llama_model * model_dft = llama_get_model(ctx_dft); - const bool vocab_type_tgt = llama_vocab_type(model_tgt); + const struct llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt); + const struct llama_vocab * vocab_dft = llama_model_get_vocab(model_dft); + + const bool vocab_type_tgt = llama_vocab_type(vocab_tgt); LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt); - const bool vocab_type_dft = llama_vocab_type(model_dft); + const bool vocab_type_dft = llama_vocab_type(vocab_dft); LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft); if (vocab_type_tgt != vocab_type_dft) { @@ -91,34 +94,34 @@ bool common_speculative_are_compatible( return false; } - if (llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) || - llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) || - llama_token_bos(model_tgt) != llama_token_bos(model_dft) || - llama_token_eos(model_tgt) != llama_token_eos(model_dft)) { - LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__); - LOG_ERR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_tgt), llama_add_bos_token(model_tgt), llama_token_eos(model_tgt), llama_add_eos_token(model_tgt)); - LOG_ERR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_dft), llama_add_bos_token(model_dft), llama_token_eos(model_dft), llama_add_eos_token(model_dft)); + if (llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) || + llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) || + llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) || + llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft)) { + LOG_ERR("%s: draft vocab special tokens must match target vocab to use speculation\n", __func__); + LOG_ERR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_vocab_bos(vocab_tgt), llama_vocab_get_add_bos(vocab_tgt), llama_vocab_eos(vocab_tgt), llama_vocab_get_add_eos(vocab_tgt)); + LOG_ERR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_vocab_bos(vocab_dft), llama_vocab_get_add_bos(vocab_dft), llama_vocab_eos(vocab_dft), llama_vocab_get_add_eos(vocab_dft)); return false; } { - const int n_vocab_tgt = llama_n_vocab(model_tgt); - const int n_vocab_dft = llama_n_vocab(model_dft); + const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt); + const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft); const int vocab_diff = std::abs(n_vocab_tgt - n_vocab_dft); if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) { LOG_ERR("%s: draft model vocab must closely match target model to use speculation but " "target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n", - __func__, n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); + __func__, n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); return false; } for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) { - const char * token_text_tgt = llama_token_get_text(model_tgt, i); - const char * token_text_dft = llama_token_get_text(model_dft, i); + const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i); + const char * token_text_dft = llama_vocab_get_text(vocab_dft, i); if (std::strcmp(token_text_tgt, token_text_dft) != 0) { - LOG_ERR("%s: draft model vocab must match target model to use speculation but " + LOG_ERR("%s: draft vocab vocab must match target vocab to use speculation but " "token %d content differs - target '%s', draft '%s'\n", __func__, i, common_token_to_piece(ctx_tgt, i).c_str(), common_token_to_piece(ctx_dft, i).c_str()); diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index dd75ff9f1..0659ab6f1 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -50,7 +50,7 @@ int main(int argc, char ** argv) { // ensure enough sequences are available ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end()); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); if (ctx == NULL) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); diff --git a/examples/batched.swift/Sources/main.swift b/examples/batched.swift/Sources/main.swift index 10f2e7fd1..371917b2e 100644 --- a/examples/batched.swift/Sources/main.swift +++ b/examples/batched.swift/Sources/main.swift @@ -23,12 +23,12 @@ defer { } let model_params = llama_model_default_params() -guard let model = llama_load_model_from_file(modelPath.cString(using: .utf8), model_params) else { +guard let model = llama_model_load_from_file(modelPath.cString(using: .utf8), model_params) else { print("Failed to load model") exit(1) } defer { - llama_free_model(model) + llama_model_free(model) } var tokens = tokenize(text: prompt, add_bos: true) @@ -141,7 +141,7 @@ while n_cur <= n_len { let new_token_id = llama_sampler_sample(smpl, context, i_batch[i]) // is it an end of stream? -> mark the stream as finished - if llama_token_is_eog(model, new_token_id) || n_cur == n_len { + if llama_vocab_is_eog(model, new_token_id) || n_cur == n_len { i_batch[i] = -1 // print("") if n_parallel > 1 { diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index d34b03099..21b95ef5e 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -48,10 +48,12 @@ int main(int argc, char ** argv) { return 1; } + const llama_vocab * vocab = llama_model_get_vocab(model); + // tokenize the prompt std::vector tokens_list; - tokens_list = common_tokenize(model, params.prompt, true); + tokens_list = common_tokenize(vocab, params.prompt, true); const int n_kv_req = tokens_list.size() + (n_predict - tokens_list.size())*n_parallel; @@ -62,7 +64,7 @@ int main(int argc, char ** argv) { ctx_params.n_ctx = n_kv_req; ctx_params.n_batch = std::max(n_predict, n_parallel); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); auto sparams = llama_sampler_chain_default_params(); sparams.no_perf = false; @@ -121,7 +123,7 @@ int main(int argc, char ** argv) { llama_token decoder_start_token_id = llama_model_decoder_start_token(model); if (decoder_start_token_id == LLAMA_TOKEN_NULL) { - decoder_start_token_id = llama_token_bos(model); + decoder_start_token_id = llama_vocab_bos(vocab); } common_batch_clear(batch); @@ -174,7 +176,7 @@ int main(int argc, char ** argv) { const llama_token new_token_id = llama_sampler_sample(smpl, ctx, i_batch[i]); // is it an end of generation? -> mark the stream as finished - if (llama_token_is_eog(model, new_token_id) || n_cur == n_predict) { + if (llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_predict) { i_batch[i] = -1; LOG("\n"); if (n_parallel > 1) { diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp index 1256abb17..bdf0eed2a 100644 --- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp @@ -911,7 +911,7 @@ int main(int argc, char ** argv) { load_vocab(params.fn_vocab_model, &config, &vocab); struct my_llama_model model; - model.hparams.n_vocab = config.vocab_size; //llama_n_vocab(lctx); + model.hparams.n_vocab = config.vocab_size; //llama_vocab_n_vocab(lctx); model.hparams.n_ctx = params.n_ctx; model.hparams.n_embd = config.dim; //params.n_embd; model.hparams.n_ff = config.hidden_dim; diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp index e899c1078..413b71d34 100644 --- a/examples/cvector-generator/cvector-generator.cpp +++ b/examples/cvector-generator/cvector-generator.cpp @@ -273,7 +273,9 @@ struct tokenized_prompt { size_t max_seq_len; tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + const bool add_bos = llama_vocab_get_add_bos(vocab); tokens_pos = common_tokenize(ctx, pos, add_bos, true); tokens_neg = common_tokenize(ctx, neg, add_bos, true); max_seq_len = std::max(tokens_pos.size(), tokens_neg.size()); @@ -421,8 +423,8 @@ int main(int argc, char ** argv) { llama_context * ctx = llama_init.context.get(); // int n_ctx = llama_n_ctx(ctx); - int n_layers = llama_n_layer(model); - int n_embd = llama_n_embd(model); + int n_layers = llama_model_n_layer(model); + int n_embd = llama_model_n_embd(model); // get model hint param (a.k.a model arch name) char model_hint[128]; diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 27f75cb77..38d22c90f 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -105,7 +105,9 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const int n_ctx_train = llama_model_n_ctx_train(model); const int n_ctx = llama_n_ctx(ctx); const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); @@ -148,7 +150,7 @@ int main(int argc, char ** argv) { // check if the last token is SEP // it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true' for (auto & inp : inputs) { - if (inp.empty() || inp.back() != llama_token_sep(model)) { + if (inp.empty() || inp.back() != llama_vocab_sep(vocab)) { LOG_WRN("%s: last token in the prompt is not SEP\n", __func__); LOG_WRN("%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__); } @@ -181,7 +183,7 @@ int main(int argc, char ** argv) { } // allocate output - const int n_embd = llama_n_embd(model); + const int n_embd = llama_model_n_embd(model); std::vector embeddings(n_embd_count * n_embd, 0); float * emb = embeddings.data(); diff --git a/examples/eval-callback/eval-callback.cpp b/examples/eval-callback/eval-callback.cpp index 2111c3cda..fb188f5a9 100644 --- a/examples/eval-callback/eval-callback.cpp +++ b/examples/eval-callback/eval-callback.cpp @@ -127,7 +127,10 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) { } static bool run(llama_context * ctx, const common_params & params) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const bool add_bos = llama_vocab_get_add_bos(vocab); std::vector tokens = common_tokenize(ctx, params.prompt, add_bos); diff --git a/examples/export-lora/export-lora.cpp b/examples/export-lora/export-lora.cpp index d5dcd20a0..99063b5d5 100644 --- a/examples/export-lora/export-lora.cpp +++ b/examples/export-lora/export-lora.cpp @@ -8,7 +8,6 @@ #include #include #include -#include #include static bool g_verbose = false; @@ -130,7 +129,7 @@ struct lora_merge_ctx { lora_merge_ctx( std::string & base_fname, - std::vector & lora_files, + std::vector & lora_files, std::string & outfile, int n_threads) : base_model(base_fname, 0), n_threads(n_threads), fout(outfile, std::ios::binary) { fout.exceptions(std::ofstream::failbit); // fail fast on write errors diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp index 4d2db5624..72eb46257 100644 --- a/examples/gritlm/gritlm.cpp +++ b/examples/gritlm/gritlm.cpp @@ -11,6 +11,7 @@ static std::vector> encode(llama_context * ctx, const std::ve std::vector> result; const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); llama_batch batch = llama_batch_init(llama_n_batch(ctx), 0, 1); @@ -19,16 +20,16 @@ static std::vector> encode(llama_context * ctx, const std::ve const std::string input_string = instruction + sentences[i]; - std::vector inputs = common_tokenize(model, input_string, true, false); + std::vector inputs = common_tokenize(vocab, input_string, true, false); const int32_t n_toks = inputs.size(); // GritLM seems to have EOS = "" // https://github.com/ContextualAI/gritlm/blob/92025b16534712b31b3c4aaaf069350e222bd5f8/gritlm/gritlm.py#L18 - // inputs.push_back(llama_token_eos(model)); + // inputs.push_back(llama_vocab_eos(vocab)); // we want to ignore instruction tokens for mean pooling - const int32_t n_inst = common_tokenize(model, instruction, true, false).size(); + const int32_t n_inst = common_tokenize(vocab, instruction, true, false).size(); #ifdef GRIT_DEBUG // debug tokens - should be matching as referenced in the GritLM sample @@ -52,7 +53,7 @@ static std::vector> encode(llama_context * ctx, const std::ve llama_decode(ctx, batch); // get embedding dimensions - uint64_t n_embd = llama_n_embd(model); + uint64_t n_embd = llama_model_n_embd(model); // allocate embedding output std::vector emb_unorm(n_embd, 0.0f); @@ -97,7 +98,9 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std std::string result; const llama_model * model = llama_get_model(ctx); - llama_token eos_token = llama_token_eos(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + + llama_token eos_token = llama_vocab_eos(vocab); llama_kv_cache_clear(ctx); llama_set_embeddings(ctx, false); @@ -105,7 +108,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1); - std::vector inputs = common_tokenize(model, prompt, false, true); + std::vector inputs = common_tokenize(vocab, prompt, false, true); int32_t i_current_token = 0; while (true) { @@ -168,7 +171,7 @@ int main(int argc, char * argv[]) { llama_model * model = llama_model_load_from_file(params.model.c_str(), mparams); // create generation context - llama_context * ctx = llama_new_context_with_model(model, cparams); + llama_context * ctx = llama_init_from_model(model, cparams); auto sparams = llama_sampler_chain_default_params(); @@ -197,7 +200,7 @@ int main(int argc, char * argv[]) { const std::vector> d_rep = encode(ctx, documents, gritlm_instruction("")); const std::vector> q_rep = encode(ctx, queries, gritlm_instruction(instruction)); - const int n_embd = llama_n_embd(model); + const int n_embd = llama_model_n_embd(model); const float cosine_sim_q0_d0 = common_embd_similarity_cos(q_rep[0].data(), d_rep[0].data(), n_embd); const float cosine_sim_q0_d1 = common_embd_similarity_cos(q_rep[0].data(), d_rep[1].data(), n_embd); diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index 588114ecd..b5f3feb9f 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include #include @@ -40,7 +39,7 @@ public: void set_params(common_params params) { m_params = std::move(params); } bool collect_imatrix(struct ggml_tensor * t, bool ask, void * user_data); void save_imatrix(int ncall = -1) const; - bool load_imatrix(const char * file_name); + bool load_imatrix(const char * fname); private: std::unordered_map m_stats; common_params m_params; @@ -429,10 +428,13 @@ static void process_logits( } static bool compute_imatrix(llama_context * ctx, const common_params & params) { - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const bool add_bos = llama_vocab_get_add_bos(vocab); const int n_ctx = llama_n_ctx(ctx); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); auto tim1 = std::chrono::high_resolution_clock::now(); LOG_INF("%s: tokenizing the input ..\n", __func__); @@ -468,7 +470,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { const int n_chunk_max = tokens.size() / n_ctx; const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max); - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); const int n_batch = params.n_batch; int count = 0; @@ -508,7 +510,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) { // add BOS token for the first batch of each chunk if (add_bos && j == 0) { - tokens[batch_start] = llama_token_bos(llama_get_model(ctx)); + tokens[batch_start] = llama_vocab_bos(vocab); } common_batch_clear(batch); @@ -627,7 +629,7 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const int n_ctx_train = llama_model_n_ctx_train(model); if (params.n_ctx > n_ctx_train) { LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, params.n_ctx); diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index d460be314..489a208b6 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -139,7 +139,9 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const int n_ctx_train = llama_model_n_ctx_train(model); const int n_ctx = llama_n_ctx(ctx); LOG_DBG("n_ctx: %d\n", n_ctx); @@ -152,28 +154,28 @@ int main(int argc, char ** argv) { LOG_INF("\n"); LOG_INF("%s\n", common_params_get_system_info(params).c_str()); } - const bool add_bos = llama_add_bos_token(model); - GGML_ASSERT(!llama_add_eos_token(model)); + const bool add_bos = llama_vocab_get_add_bos(vocab); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); std::vector embd_inp; std::vector embd_end; std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); - GGML_ASSERT(llama_token_fim_pre(model) >= 0); - GGML_ASSERT(llama_token_fim_suf(model) >= 0); + GGML_ASSERT(llama_vocab_fim_pre(vocab) >= 0); + GGML_ASSERT(llama_vocab_fim_suf(vocab) >= 0); - inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model)); - inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model)); + inp_pfx.insert(inp_pfx.begin(), llama_vocab_fim_pre(vocab)); + inp_sfx.insert(inp_sfx.begin(), llama_vocab_fim_suf(vocab)); embd_inp = params.spm_infill ? inp_sfx : inp_pfx; embd_end = params.spm_infill ? inp_pfx : inp_sfx; if (add_bos) { - embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); + embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); } embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); - const llama_token middle_token = llama_token_fim_mid(model); + const llama_token middle_token = llama_vocab_fim_mid(vocab); if (middle_token >= 0) { embd_inp.push_back(middle_token); } @@ -185,7 +187,7 @@ int main(int argc, char ** argv) { // Should not run without any tokens if (embd_inp.empty()) { - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(llama_vocab_bos(vocab)); LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str()); } @@ -420,10 +422,10 @@ int main(int argc, char ** argv) { // if not currently processing queued inputs; if ((int) embd_inp.size() <= n_consumed) { // deal with eot token in infill mode - if ((common_sampler_last(smpl) == llama_token_eot(model) || is_interacting) && params.interactive){ + if ((common_sampler_last(smpl) == llama_vocab_eot(vocab) || is_interacting) && params.interactive){ if (is_interacting && !params.interactive_first) { // print an eot token - LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str()); + LOG("%s", common_token_to_piece(ctx, llama_vocab_eot(vocab)).c_str()); } LOG("\n"); console::set_display(console::user_input); @@ -463,13 +465,13 @@ int main(int argc, char ** argv) { std::vector inp_pfx = common_tokenize(ctx, params.input_prefix, false); std::vector inp_sfx = common_tokenize(ctx, params.input_suffix, false); - inp_pfx.insert(inp_pfx.begin(), llama_token_fim_pre(model)); - inp_sfx.insert(inp_sfx.begin(), llama_token_fim_suf(model)); + inp_pfx.insert(inp_pfx.begin(), llama_vocab_fim_pre(vocab)); + inp_sfx.insert(inp_sfx.begin(), llama_vocab_fim_suf(vocab)); embd_inp = params.spm_infill ? inp_sfx : inp_pfx; embd_end = params.spm_infill ? inp_pfx : inp_sfx; if (add_bos) { - embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); + embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); } embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); @@ -484,7 +486,7 @@ int main(int argc, char ** argv) { is_interacting = false; } // deal with end of generation tokens in interactive mode - else if (llama_token_is_eog(model, common_sampler_last(smpl))) { + else if (llama_vocab_is_eog(vocab, common_sampler_last(smpl))) { LOG_DBG("found EOS token\n"); if (params.interactive) { @@ -500,7 +502,7 @@ int main(int argc, char ** argv) { if (params.input_prefix_bos) { LOG_DBG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(llama_vocab_bos(vocab)); } std::string buffer; @@ -563,7 +565,7 @@ int main(int argc, char ** argv) { } // end of generation - if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !params.interactive) { + if (!embd.empty() && llama_vocab_is_eog(vocab, embd.back()) && !params.interactive) { break; } @@ -575,7 +577,7 @@ int main(int argc, char ** argv) { } } if (!params.interactive && n_remain <= 0) { - LOG("%s", common_token_to_piece(ctx, llama_token_eot(model)).c_str()); + LOG("%s", common_token_to_piece(ctx, llama_vocab_eot(vocab)).c_str()); } LOG("\n"); diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 2a0916766..a3b4c5ac8 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1401,7 +1401,8 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_th llama_set_n_threads(ctx, n_threads, n_threads); const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + const int32_t n_vocab = llama_vocab_n_tokens(vocab); std::vector tokens(n_batch); @@ -1409,7 +1410,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_th while (n_processed < n_prompt) { int n_tokens = std::min(n_prompt - n_processed, n_batch); - tokens[0] = n_processed == 0 && llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; + tokens[0] = n_processed == 0 && llama_vocab_get_add_bos(vocab) ? llama_vocab_bos(vocab) : std::rand() % n_vocab; for (int i = 1; i < n_tokens; i++) { tokens[i] = std::rand() % n_vocab; } @@ -1424,9 +1425,10 @@ static void test_gen(llama_context * ctx, int n_gen, int n_threads) { llama_set_n_threads(ctx, n_threads, n_threads); const llama_model * model = llama_get_model(ctx); - const int32_t n_vocab = llama_n_vocab(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + const int32_t n_vocab = llama_vocab_n_tokens(vocab); - llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab; + llama_token token = llama_vocab_get_add_bos(vocab) ? llama_vocab_bos(vocab) : std::rand() % n_vocab; for (int i = 0; i < n_gen; i++) { llama_decode(ctx, llama_batch_get_one(&token, 1)); @@ -1537,7 +1539,7 @@ int main(int argc, char ** argv) { prev_inst = &inst; } - llama_context * ctx = llama_new_context_with_model(lmodel, inst.to_llama_cparams()); + llama_context * ctx = llama_init_from_model(lmodel, inst.to_llama_cparams()); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, inst.model.c_str()); llama_model_free(lmodel); diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp index 66ec2aeeb..99b14961d 100644 --- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp @@ -87,7 +87,7 @@ Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring fi auto path_to_model = env->GetStringUTFChars(filename, 0); LOGi("Loading model from %s", path_to_model); - auto model = llama_load_model_from_file(path_to_model, model_params); + auto model = llama_model_load_from_file(path_to_model, model_params); env->ReleaseStringUTFChars(filename, path_to_model); if (!model) { @@ -102,7 +102,7 @@ Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring fi extern "C" JNIEXPORT void JNICALL Java_android_llama_cpp_LLamaAndroid_free_1model(JNIEnv *, jobject, jlong model) { - llama_free_model(reinterpret_cast(model)); + llama_model_free(reinterpret_cast(model)); } extern "C" @@ -405,6 +405,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop( const auto batch = reinterpret_cast(batch_pointer); const auto sampler = reinterpret_cast(sampler_pointer); const auto model = llama_get_model(context); + const auto vocab = llama_model_get_vocab(model); if (!la_int_var) la_int_var = env->GetObjectClass(intvar_ncur); if (!la_int_var_value) la_int_var_value = env->GetMethodID(la_int_var, "getValue", "()I"); @@ -414,7 +415,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop( const auto new_token_id = llama_sampler_sample(sampler, context, -1); const auto n_cur = env->CallIntMethod(intvar_ncur, la_int_var_value); - if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { + if (llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_len) { return nullptr; } diff --git a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift index 998c673d5..477c3e6f2 100644 --- a/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift +++ b/examples/llama.swiftui/llama.cpp.swift/LibLlama.swift @@ -52,8 +52,8 @@ actor LlamaContext { deinit { llama_sampler_free(sampling) llama_batch_free(batch) + llama_model_free(model) llama_free(context) - llama_free_model(model) llama_backend_free() } @@ -65,7 +65,7 @@ actor LlamaContext { model_params.n_gpu_layers = 0 print("Running on simulator, force use n_gpu_layers = 0") #endif - let model = llama_load_model_from_file(path, model_params) + let model = llama_model_load_from_file(path, model_params) guard let model else { print("Could not load model at \(path)") throw LlamaError.couldNotInitializeContext @@ -151,7 +151,7 @@ actor LlamaContext { new_token_id = llama_sampler_sample(sampling, context, batch.n_tokens - 1) - if llama_token_is_eog(model, new_token_id) || n_cur == n_len { + if llama_vocab_is_eog(model, new_token_id) || n_cur == n_len { print("\n") is_done = true let new_token_str = String(cString: temporary_invalid_cchars + [0]) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 27215a42e..40aa0876f 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -47,8 +47,12 @@ static const char * sample(struct common_sampler * smpl, int * n_past) { const llama_token id = common_sampler_sample(smpl, ctx_llama, -1); common_sampler_accept(smpl, id, true); + + const llama_model * model = llama_get_model(ctx_llama); + const llama_vocab * vocab = llama_model_get_vocab(model); + static std::string ret; - if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + if (llama_vocab_is_eog(vocab, id)) { ret = "
"; } else { ret = common_token_to_piece(ctx_llama, id); @@ -239,11 +243,10 @@ static struct llava_context * llava_init_context(common_params * params, llama_m auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - llama_context_params ctx_params = common_context_params_to_llama(*params); ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings - llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + llama_context * ctx_llama = llama_init_from_model(model, ctx_params); if (ctx_llama == NULL) { LOG_ERR("%s: failed to create the llama_context\n" , __func__); diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 16f30c56c..c598caf3d 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -384,7 +384,7 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli bool llava_validate_embed_size(const llama_context * ctx_llama, const clip_ctx * ctx_clip) { // make sure that the correct mmproj was used, i.e., compare apples to apples - int n_llama_embd = llama_n_embd(llama_get_model(ctx_llama)); + int n_llama_embd = llama_model_n_embd(llama_get_model(ctx_llama)); auto n_image_embd = clip_n_mmproj_embd(ctx_clip); if (n_image_embd != n_llama_embd) { LOG_ERR("%s: embedding dim of the multimodal projector (%d) is not equal to that of LLaMA (%d). Make sure that you use the correct mmproj file.\n", __func__, n_image_embd, n_llama_embd); @@ -456,7 +456,7 @@ struct llava_embd_batch { }; bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + int n_embd = llama_model_n_embd(llama_get_model(ctx_llama)); for (int i = 0; i < image_embed->n_image_pos; i += n_batch) { int n_eval = image_embed->n_image_pos - i; diff --git a/examples/llava/minicpmv-cli.cpp b/examples/llava/minicpmv-cli.cpp index 2342bdd09..38c44e130 100644 --- a/examples/llava/minicpmv-cli.cpp +++ b/examples/llava/minicpmv-cli.cpp @@ -54,7 +54,7 @@ static struct llava_context * llava_init_context(common_params * params, llama_m ctx_params.n_ctx = params->n_ctx; } - llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + llama_context * ctx_llama = llama_init_from_model(model, ctx_params); if (ctx_llama == NULL) { LOG_ERR("%s: failed to create the llama_context\n" , __func__); @@ -167,8 +167,12 @@ static const char * sample(struct common_sampler * smpl, int * n_past) { const llama_token id = common_sampler_sample(smpl, ctx_llama, -1); common_sampler_accept(smpl, id, true); + + const llama_model * model = llama_get_model(ctx_llama); + const llama_vocab * vocab = llama_model_get_vocab(model); + static std::string ret; - if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + if (llama_vocab_is_eog(vocab, id)) { ret = "
"; } else { ret = common_token_to_piece(ctx_llama, id); diff --git a/examples/llava/qwen2vl-cli.cpp b/examples/llava/qwen2vl-cli.cpp index f3e5d66e2..132a7da54 100644 --- a/examples/llava/qwen2vl-cli.cpp +++ b/examples/llava/qwen2vl-cli.cpp @@ -27,7 +27,7 @@ static bool qwen2vl_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past, int * st_pos_id, struct clip_image_size * image_size) { - int n_embd = llama_n_embd(llama_get_model(ctx_llama)); + int n_embd = llama_model_n_embd(llama_get_model(ctx_llama)); const int patch_size = 14 * 2; const int ph = image_size->height / patch_size + (image_size->height % patch_size > 0); const int pw = image_size->width / patch_size + (image_size->width % patch_size > 0); @@ -132,8 +132,12 @@ static const char * sample(struct common_sampler * smpl, int * n_past, int * st_pos_id) { const llama_token id = common_sampler_sample(smpl, ctx_llama, -1); common_sampler_accept(smpl, id, true); + + const llama_model * model = llama_get_model(ctx_llama); + const llama_vocab * vocab = llama_model_get_vocab(model); + static std::string ret; - if (llama_token_is_eog(llama_get_model(ctx_llama), id)) { + if (llama_vocab_is_eog(vocab, id)) { ret = "
"; } else { ret = common_token_to_piece(ctx_llama, id); @@ -328,11 +332,10 @@ static struct llava_context * llava_init_context(common_params * params, llama_m auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1); - llama_context_params ctx_params = common_context_params_to_llama(*params); ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings - llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); + llama_context * ctx_llama = llama_init_from_model(model, ctx_params); if (ctx_llama == NULL) { LOG_ERR("%s: failed to create the llama_context\n" , __func__); @@ -481,7 +484,7 @@ static void debug_test_mrope_2d() { } static void debug_dump_img_embed(struct llava_context * ctx_llava) { - int n_embd = llama_n_embd(llama_get_model(ctx_llava->ctx_llama)); + int n_embd = llama_model_n_embd(llama_get_model(ctx_llava->ctx_llama)); int ne = n_embd * 4; float vals[56 * 56 * 3]; // float embd[ne]; diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index e016618e3..2f0898e62 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -61,6 +61,8 @@ int main(int argc, char ** argv) { llama_model * model = llama_init.model.get(); llama_context * ctx = llama_init.context.get(); + const llama_vocab * vocab = llama_model_get_vocab(model); + // Tokenize the prompt std::vector inp; std::vector all; @@ -147,7 +149,7 @@ int main(int argc, char ** argv) { } // here we keep adding new n-grams as we go - ngram_container ngrams_observed(llama_n_vocab(model), N, G); + ngram_container ngrams_observed(llama_vocab_n_tokens(vocab), N, G); // debug struct llama_kv_cache_view kvc_view = llama_kv_cache_view_init(ctx, W + G + 1); @@ -297,7 +299,7 @@ int main(int argc, char ** argv) { } fflush(stdout); - if (llama_token_is_eog(model, id)) { + if (llama_vocab_is_eog(vocab, id)) { has_eos = true; } diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 0d68b80b9..dbd0444ec 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -36,6 +36,8 @@ int main(int argc, char ** argv){ llama_model * model = llama_init.model.get(); llama_context * ctx = llama_init.context.get(); + const llama_vocab * vocab = llama_model_get_vocab(model); + // tokenize the prompt std::vector inp; inp = common_tokenize(ctx, params.prompt, true, true); @@ -136,7 +138,7 @@ int main(int argc, char ** argv){ LOG("%s", token_str.c_str()); } - if (llama_token_is_eog(model, id)) { + if (llama_vocab_is_eog(vocab, id)) { has_eos = true; } diff --git a/examples/main/main.cpp b/examples/main/main.cpp index aaee47e32..640b35c1d 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -5,7 +5,6 @@ #include "sampling.h" #include "llama.h" -#include #include #include #include @@ -163,6 +162,8 @@ int main(int argc, char ** argv) { return 1; } + const llama_vocab * vocab = llama_model_get_vocab(model); + LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads); auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); @@ -196,7 +197,7 @@ int main(int argc, char ** argv) { llama_attach_threadpool(ctx, threadpool, threadpool_batch); - const int n_ctx_train = llama_n_ctx_train(model); + const int n_ctx_train = llama_model_n_ctx_train(model); const int n_ctx = llama_n_ctx(ctx); if (n_ctx > n_ctx_train) { @@ -241,9 +242,9 @@ int main(int argc, char ** argv) { } } - const bool add_bos = llama_add_bos_token(model); + const bool add_bos = llama_vocab_get_add_bos(vocab); if (!llama_model_has_encoder(model)) { - GGML_ASSERT(!llama_add_eos_token(model)); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); } LOG_DBG("n_ctx: %d, add_bos: %d\n", n_ctx, add_bos); @@ -269,7 +270,7 @@ int main(int argc, char ** argv) { // Should not run without any tokens if (embd_inp.empty()) { if (add_bos) { - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(llama_vocab_bos(vocab)); LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str()); } else { LOG_ERR("input is empty\n"); @@ -495,7 +496,7 @@ int main(int argc, char ** argv) { llama_token decoder_start_token_id = llama_model_decoder_start_token(model); if (decoder_start_token_id == LLAMA_TOKEN_NULL) { - decoder_start_token_id = llama_token_bos(model); + decoder_start_token_id = llama_vocab_bos(vocab); } embd_inp.clear(); @@ -742,7 +743,7 @@ int main(int argc, char ** argv) { } // deal with end of generation tokens in interactive mode - if (llama_token_is_eog(model, common_sampler_last(smpl))) { + if (llama_vocab_is_eog(vocab, common_sampler_last(smpl))) { LOG_DBG("found an EOG token\n"); if (params.interactive) { @@ -776,7 +777,7 @@ int main(int argc, char ** argv) { if (params.input_prefix_bos) { LOG_DBG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(model)); + embd_inp.push_back(llama_vocab_bos(vocab)); } std::string buffer; @@ -830,8 +831,8 @@ int main(int argc, char ** argv) { // if user stop generation mid-way, we must add EOT to finish model's last response if (need_insert_eot && format_chat) { - llama_token eot = llama_token_eot(model); - embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_token_eos(model) : eot); + llama_token eot = llama_vocab_eot(vocab); + embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_vocab_eos(vocab) : eot); need_insert_eot = false; } @@ -866,7 +867,7 @@ int main(int argc, char ** argv) { } // end of generation - if (!embd.empty() && llama_token_is_eog(model, embd.back()) && !(params.interactive)) { + if (!embd.empty() && llama_vocab_is_eog(vocab, embd.back()) && !(params.interactive)) { LOG(" [end of text]\n"); break; } diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index d48f51975..7ef43d5e1 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -135,6 +135,8 @@ int main(int argc, char ** argv) { llama_model * model = llama_init.model.get(); llama_context * ctx = llama_init.context.get(); + const llama_vocab * vocab = llama_model_get_vocab(model); + // load the prompts from an external file if there are any if (params.prompt.empty()) { LOG_INF("\033[32mNo new questions so proceed with build-in defaults.\033[0m\n"); @@ -358,7 +360,7 @@ int main(int argc, char ** argv) { // client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str()); if (client.n_decoded > 2 && - (llama_token_is_eog(model, id) || + (llama_vocab_is_eog(vocab, id) || (params.n_predict > 0 && client.n_decoded + client.n_prompt >= params.n_predict) || client.response.find("User:") != std::string::npos || client.response.find('\n') != std::string::npos)) { diff --git a/examples/passkey/passkey.cpp b/examples/passkey/passkey.cpp index ea91f376c..5953928d4 100644 --- a/examples/passkey/passkey.cpp +++ b/examples/passkey/passkey.cpp @@ -70,15 +70,17 @@ int main(int argc, char ** argv) { return 1; } + const llama_vocab * vocab = llama_model_get_vocab(model); + // initialize the context llama_context_params ctx_params = common_context_params_to_llama(params); - ctx_params.n_ctx = llama_n_ctx_train(model)*n_grp + n_keep; + ctx_params.n_ctx = llama_model_n_ctx_train(model)*n_grp + n_keep; GGML_ASSERT(ctx_params.n_batch % n_grp == 0 && "n_batch must be divisible by n_grp"); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); if (ctx == NULL) { LOG_ERR("%s: failed to create the llama_context\n" , __func__); return 1; @@ -223,7 +225,7 @@ int main(int argc, char ** argv) { const llama_token new_token_id = llama_sampler_sample(smpl, ctx, batch.n_tokens - 1); // is it an end of generation? - if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { + if (llama_vocab_is_eog(vocab, new_token_id) || n_cur == n_len) { LOG("\n"); break; diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 6bdc57f8e..9bf6c5743 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -296,8 +296,11 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params // Output: `perplexity: 13.5106 [114/114]` // BOS tokens will be added for each chunk before eval - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const bool add_bos = llama_vocab_get_add_bos(vocab); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); LOG_INF("%s: tokenizing the input ..\n", __func__); @@ -338,7 +341,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max); const int n_batch = params.n_batch; - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); int count = 0; double nll = 0.0; @@ -382,7 +385,7 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params // add BOS token for the first batch of each chunk if (add_bos && j == 0) { - tokens[batch_start] = llama_token_bos(llama_get_model(ctx)); + tokens[batch_start] = llama_vocab_bos(vocab); } const auto * batch_logits = llama_get_logits(ctx); @@ -444,8 +447,11 @@ static results_perplexity perplexity(llama_context * ctx, const common_params & // Output: `perplexity: 13.5106 [114/114]` // BOS tokens will be added for each chunk before eval - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const bool add_bos = llama_vocab_get_add_bos(vocab); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); std::ofstream logits_stream; if (!params.logits_file.empty()) { @@ -485,7 +491,7 @@ static results_perplexity perplexity(llama_context * ctx, const common_params & const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max); const int n_batch = params.n_batch; - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); int count = 0; double nll = 0.0; @@ -557,7 +563,7 @@ static results_perplexity perplexity(llama_context * ctx, const common_params & // add BOS token for the first batch of each chunk if (add_bos && j == 0) { - tokens[seq_start] = llama_token_bos(llama_get_model(ctx)); + tokens[seq_start] = llama_vocab_bos(vocab); } for (int k = 0; k < batch_size; ++k) { @@ -732,6 +738,9 @@ static void compute_logprobs(const float * batch_logits, int n_vocab, std::vecto } static void hellaswag_score(llama_context * ctx, const common_params & params) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + // Calculates hellaswag score (acc_norm) from prompt // // Data extracted from the HellaSwag validation dataset (MIT license) https://github.com/rowanz/hellaswag/blob/master/data/hellaswag_val.jsonl @@ -765,7 +774,7 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) { size_t hs_task_count = prompt_lines.size()/6; LOG_INF("%s : loaded %zu tasks from prompt.\n", __func__, hs_task_count); - const bool is_spm = llama_vocab_type(llama_get_model(ctx)) == LLAMA_VOCAB_TYPE_SPM; + const bool is_spm = llama_vocab_type(vocab) == LLAMA_VOCAB_TYPE_SPM; LOG_INF("================================= is_spm = %d\n", is_spm); // The tasks should be randomized so the score stabilizes quickly. @@ -848,7 +857,7 @@ static void hellaswag_score(llama_context * ctx, const common_params & params) { const int n_ctx = llama_n_ctx(ctx); const int n_batch = params.n_batch; - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); const int max_tasks_per_batch = 32; const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx)); @@ -1072,6 +1081,8 @@ static std::vector load_winogrande_from_csv(const std::string * */ static void winogrande_score(llama_context * ctx, const common_params & params) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); constexpr int k_min_trailing_ctx = 3; @@ -1130,7 +1141,7 @@ static void winogrande_score(llama_context * ctx, const common_params & params) const int n_ctx = llama_n_ctx(ctx); const int n_batch = params.n_batch; - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); const int max_tasks_per_batch = 128; const int max_seq = std::min(2*max_tasks_per_batch, (int) llama_n_seq_max(ctx)); @@ -1374,6 +1385,8 @@ static bool multiple_choice_prepare_one_task(llama_context * ctx, multiple_choic // https://huggingface.co/datasets/truthful_qa // static void multiple_choice_score(llama_context * ctx, const common_params & params) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); std::istringstream strstream(params.prompt); uint32_t n_task; @@ -1482,7 +1495,7 @@ static void multiple_choice_score(llama_context * ctx, const common_params & par const int n_ctx = llama_n_ctx(ctx); const int n_batch = params.n_batch; - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + const int n_vocab = llama_vocab_n_tokens(vocab); const int max_tasks_per_batch = 32; const int max_seq = std::min(4*max_tasks_per_batch, (int) llama_n_seq_max(ctx)); @@ -1655,6 +1668,9 @@ static void multiple_choice_score(llama_context * ctx, const common_params & par } static void kl_divergence(llama_context * ctx, const common_params & params) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + if (params.logits_file.empty()) { LOG_ERR("%s: you must provide a name of a file containing the log probabilities of the base model\n", __func__); return; @@ -1688,8 +1704,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) { LOG_ERR("%s: failed reading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str()); return; } - if (n_vocab != llama_n_vocab(llama_get_model(ctx))) { - LOG_ERR("%s: inconsistent vocabulary (%d vs %d)\n", __func__, n_vocab, llama_n_vocab(llama_get_model(ctx))); + if (n_vocab != llama_vocab_n_tokens(vocab)) { + LOG_ERR("%s: inconsistent vocabulary (%d vs %d)\n", __func__, n_vocab, llama_vocab_n_tokens(vocab)); } std::vector tokens(size_t(n_ctx) * n_chunk); @@ -1701,8 +1717,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) { const int n_batch = params.n_batch; const int num_batches = (n_ctx + n_batch - 1)/n_batch; const int nv = 2*((n_vocab + 1)/2) + 4; - const bool add_bos = llama_add_bos_token(llama_get_model(ctx)); - GGML_ASSERT(!llama_add_eos_token(llama_get_model(ctx))); + const bool add_bos = llama_vocab_get_add_bos(vocab); + GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); std::vector log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv); std::vector kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk); @@ -1761,7 +1777,7 @@ static void kl_divergence(llama_context * ctx, const common_params & params) { // add BOS token for the first batch of each chunk if (add_bos && j == 0) { - tokens[batch_start] = llama_token_bos(llama_get_model(ctx)); + tokens[batch_start] = llama_vocab_bos(vocab); } common_batch_clear(batch); @@ -1995,7 +2011,7 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const int n_ctx_train = llama_model_n_ctx_train(model); if (params.n_ctx > n_ctx_train) { LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 9bfbb8862..bd2f73467 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -319,7 +319,7 @@ int main(int argc, char ** argv) { auto cparams = llama_context_default_params(); cparams.n_ctx = 256; - ctx = llama_new_context_with_model(model, cparams); + ctx = llama_init_from_model(model, cparams); if (ctx == NULL) { fprintf(stderr, "%s: error: failed to create context with model '%s'\n", __func__, params.model.c_str()); diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp index f534b5eff..2439022a2 100644 --- a/examples/retrieval/retrieval.cpp +++ b/examples/retrieval/retrieval.cpp @@ -159,7 +159,9 @@ int main(int argc, char ** argv) { return 1; } - const int n_ctx_train = llama_n_ctx_train(model); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const int n_ctx_train = llama_model_n_ctx_train(model); const int n_ctx = llama_n_ctx(ctx); const enum llama_pooling_type pooling_type = llama_pooling_type(ctx); @@ -192,8 +194,8 @@ int main(int argc, char ** argv) { return 1; } // add eos if not present - if (llama_token_eos(model) >= 0 && (inp.empty() || inp.back() != llama_token_eos(model))) { - inp.push_back(llama_token_eos(model)); + if (llama_vocab_eos(vocab) >= 0 && (inp.empty() || inp.back() != llama_vocab_eos(vocab))) { + inp.push_back(llama_vocab_eos(vocab)); } chunk.tokens = inp; } @@ -215,7 +217,7 @@ int main(int argc, char ** argv) { struct llama_batch batch = llama_batch_init(n_batch, 0, 1); // allocate output - const int n_embd = llama_n_embd(model); + const int n_embd = llama_model_n_embd(model); std::vector embeddings(n_chunks * n_embd, 0); float * emb = embeddings.data(); diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 61420e441..bfa8378bb 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -685,7 +685,7 @@ class LlamaData { // Initializes the context with the specified parameters llama_context_ptr initialize_context(const llama_model_ptr & model, const Opt & opt) { - llama_context_ptr context(llama_new_context_with_model(model.get(), opt.ctx_params)); + llama_context_ptr context(llama_init_from_model(model.get(), opt.ctx_params)); if (!context) { printe("%s: error: failed to create the llama_context\n", __func__); } @@ -713,11 +713,11 @@ static void add_message(const char * role, const std::string & text, LlamaData & // Function to apply the chat template and resize `formatted` if needed static int apply_chat_template(LlamaData & llama_data, const bool append) { int result = llama_chat_apply_template( - llama_data.model.get(), nullptr, llama_data.messages.data(), llama_data.messages.size(), append, + llama_model_chat_template(llama_data.model.get()), llama_data.messages.data(), llama_data.messages.size(), append, append ? llama_data.fmtted.data() : nullptr, append ? llama_data.fmtted.size() : 0); if (append && result > static_cast(llama_data.fmtted.size())) { llama_data.fmtted.resize(result); - result = llama_chat_apply_template(llama_data.model.get(), nullptr, llama_data.messages.data(), + result = llama_chat_apply_template(llama_model_chat_template(llama_data.model.get()), llama_data.messages.data(), llama_data.messages.size(), append, llama_data.fmtted.data(), llama_data.fmtted.size()); } @@ -726,11 +726,11 @@ static int apply_chat_template(LlamaData & llama_data, const bool append) { } // Function to tokenize the prompt -static int tokenize_prompt(const llama_model_ptr & model, const std::string & prompt, +static int tokenize_prompt(const llama_vocab * vocab, const std::string & prompt, std::vector & prompt_tokens) { - const int n_prompt_tokens = -llama_tokenize(model.get(), prompt.c_str(), prompt.size(), NULL, 0, true, true); + const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true); prompt_tokens.resize(n_prompt_tokens); - if (llama_tokenize(model.get(), prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, + if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) { printe("failed to tokenize the prompt\n"); return -1; @@ -753,9 +753,9 @@ static int check_context_size(const llama_context_ptr & ctx, const llama_batch & } // convert the token to a string -static int convert_token_to_string(const llama_model_ptr & model, const llama_token token_id, std::string & piece) { +static int convert_token_to_string(const llama_vocab * vocab, const llama_token token_id, std::string & piece) { char buf[256]; - int n = llama_token_to_piece(model.get(), token_id, buf, sizeof(buf), 0, true); + int n = llama_token_to_piece(vocab, token_id, buf, sizeof(buf), 0, true); if (n < 0) { printe("failed to convert token to piece\n"); return 1; @@ -773,8 +773,10 @@ static void print_word_and_concatenate_to_response(const std::string & piece, st // helper function to evaluate a prompt and generate a response static int generate(LlamaData & llama_data, const std::string & prompt, std::string & response) { + const llama_vocab * vocab = llama_model_get_vocab(llama_data.model.get()); + std::vector tokens; - if (tokenize_prompt(llama_data.model, prompt, tokens) < 0) { + if (tokenize_prompt(vocab, prompt, tokens) < 0) { return 1; } @@ -790,12 +792,12 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str // sample the next token, check is it an end of generation? new_token_id = llama_sampler_sample(llama_data.sampler.get(), llama_data.context.get(), -1); - if (llama_token_is_eog(llama_data.model.get(), new_token_id)) { + if (llama_vocab_is_eog(vocab, new_token_id)) { break; } std::string piece; - if (convert_token_to_string(llama_data.model, new_token_id, piece)) { + if (convert_token_to_string(vocab, new_token_id, piece)) { return 1; } diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index cd03661cf..cf7cbd815 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -97,7 +97,7 @@ int main(int argc, char ** argv) { printf("\n\n"); // make new context - llama_context * ctx2 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx2 = llama_init_from_model(model, common_context_params_to_llama(params)); llama_sampler * smpl2 = llama_sampler_chain_init(sparams); @@ -154,7 +154,7 @@ int main(int argc, char ** argv) { } // make new context - llama_context * ctx3 = llama_new_context_with_model(model, common_context_params_to_llama(params)); + llama_context * ctx3 = llama_init_from_model(model, common_context_params_to_llama(params)); llama_sampler * smpl3 = llama_sampler_chain_init(sparams); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 127323e77..64c0c4ef6 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -98,7 +98,7 @@ struct slot_params { int64_t t_max_prompt_ms = -1; // TODO: implement int64_t t_max_predict_ms = -1; // if positive, limit the generation phase to this time limit - std::vector lora; + std::vector lora; std::vector antiprompt; std::vector response_fields; @@ -198,15 +198,17 @@ struct server_task { bool metrics_reset_bucket = false; // used by SERVER_TASK_TYPE_SET_LORA - std::vector set_lora; + std::vector set_lora; server_task(server_task_type type) : type(type) {} static slot_params params_from_json_cmpl( - const llama_model * model, const llama_context * ctx, const common_params & params_base, const json & data) { + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + slot_params params; // Sampling parameter defaults are loaded from the global server context (but individual requests can still override them) @@ -329,7 +331,7 @@ struct server_task { const auto & logit_bias = data.find("logit_bias"); if (logit_bias != data.end() && logit_bias->is_array()) { - const int n_vocab = llama_n_vocab(model); + const int n_vocab = llama_vocab_n_tokens(vocab); for (const auto & el : *logit_bias) { // TODO: we may want to throw errors here, in case "el" is incorrect if (el.is_array() && el.size() == 2) { @@ -348,7 +350,7 @@ struct server_task { params.sampling.logit_bias.push_back({tok, bias}); } } else if (el[0].is_string()) { - auto toks = common_tokenize(model, el[0].get(), false); + auto toks = common_tokenize(vocab, el[0].get(), false); for (auto tok : toks) { params.sampling.logit_bias.push_back({tok, bias}); } @@ -1131,7 +1133,7 @@ struct server_slot { common_speculative * spec = nullptr; - std::vector lora; + std::vector lora; // the index relative to completion multi-task request size_t index = 0; @@ -1633,6 +1635,8 @@ struct server_context { llama_model * model = nullptr; llama_context * ctx = nullptr; + const llama_vocab * vocab = nullptr; + llama_model * model_dft = nullptr; llama_context_params cparams_dft; @@ -1690,10 +1694,12 @@ struct server_context { return false; } + vocab = llama_model_get_vocab(model); + n_ctx = llama_n_ctx(ctx); - add_bos_token = llama_add_bos_token(model); - has_eos_token = llama_token_eos(model) != LLAMA_TOKEN_NULL; + add_bos_token = llama_vocab_get_add_bos(vocab); + has_eos_token = llama_vocab_eos(vocab) != LLAMA_TOKEN_NULL; if (!params_base.speculative.model.empty()) { SRV_INF("loading draft model '%s'\n", params_base.speculative.model.c_str()); @@ -1736,7 +1742,8 @@ struct server_context { bool validate_builtin_chat_template() const { llama_chat_message chat[] = {{"user", "test"}}; - int32_t chat_res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0); + const char * tmpl = llama_model_chat_template(model); + const int32_t chat_res = llama_chat_apply_template(tmpl, chat, 1, true, nullptr, 0); return chat_res > 0; } @@ -1756,7 +1763,7 @@ struct server_context { if (model_dft) { slot.batch_spec = llama_batch_init(params_base.speculative.n_max + 1, 0, 1); - slot.ctx_dft = llama_new_context_with_model(model_dft, cparams_dft); + slot.ctx_dft = llama_init_from_model(model_dft, cparams_dft); if (slot.ctx_dft == nullptr) { SRV_ERR("%s", "failed to create draft context\n"); return; @@ -1891,7 +1898,7 @@ struct server_context { } if (slot.params.ignore_eos && has_eos_token) { - slot.params.sampling.logit_bias.push_back({llama_token_eos(model), -INFINITY}); + slot.params.sampling.logit_bias.push_back({llama_vocab_eos(vocab), -INFINITY}); } { @@ -2047,14 +2054,14 @@ struct server_context { slot.n_decoded, slot.n_prompt_tokens, slot.n_past, slot.n_ctx); } - if (llama_token_is_eog(model, result.tok)) { + if (llama_vocab_is_eog(vocab, result.tok)) { slot.stop = STOP_TYPE_EOS; slot.has_next_token = false; SLT_DBG(slot, "%s", "stopped by EOS\n"); } - const auto n_ctx_train = llama_n_ctx_train(model); + const auto n_ctx_train = llama_model_n_ctx_train(model); if (slot.params.n_predict < 1 && slot.n_predict < 1 && slot.n_prompt_tokens + slot.n_decoded >= n_ctx_train) { slot.truncated = true; @@ -2074,7 +2081,7 @@ struct server_context { void populate_token_probs(const server_slot & slot, completion_token_output & result, bool post_sampling, bool special, int idx) { size_t n_probs = slot.params.sampling.n_probs; - size_t n_vocab = llama_n_vocab(llama_get_model(ctx)); + size_t n_vocab = llama_vocab_n_tokens(vocab); if (post_sampling) { const auto * cur_p = common_sampler_get_candidates(slot.smpl); const size_t max_probs = cur_p->size; @@ -2225,7 +2232,7 @@ struct server_context { res->n_tokens = slot.n_prompt_tokens; res->oaicompat = slot.params.oaicompat; - const int n_embd = llama_n_embd(model); + const int n_embd = llama_model_n_embd(model); std::vector embd_res(n_embd, 0.0f); @@ -2927,7 +2934,7 @@ struct server_context { // make sure we're in the right embedding mode llama_set_embeddings(ctx, slot_batched->is_non_causal()); // apply lora, only need to do it once per batch - common_lora_adapters_apply(ctx, slot_batched->lora); + common_set_adapter_lora(ctx, slot_batched->lora); } // process the created batch of tokens @@ -3129,12 +3136,12 @@ struct server_context { json model_meta() const { return json { - {"vocab_type", llama_vocab_type (model)}, - {"n_vocab", llama_n_vocab (model)}, - {"n_ctx_train", llama_n_ctx_train (model)}, - {"n_embd", llama_n_embd (model)}, - {"n_params", llama_model_n_params(model)}, - {"size", llama_model_size (model)}, + {"vocab_type", llama_vocab_type (vocab)}, + {"n_vocab", llama_vocab_n_tokens (vocab)}, + {"n_ctx_train", llama_model_n_ctx_train(model)}, + {"n_embd", llama_model_n_embd (model)}, + {"n_params", llama_model_n_params (model)}, + {"size", llama_model_size (model)}, }; } }; @@ -3639,7 +3646,7 @@ int main(int argc, char ** argv) { std::vector tasks; try { - std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, data.at("prompt"), true, true); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, data.at("prompt"), true, true); tasks.reserve(tokenized_prompts.size()); for (size_t i = 0; i < tokenized_prompts.size(); i++) { server_task task = server_task(type); @@ -3649,7 +3656,6 @@ int main(int argc, char ** argv) { task.prompt_tokens = std::move(tokenized_prompts[i]); task.params = server_task::params_from_json_cmpl( - ctx_server.model, ctx_server.ctx, ctx_server.params_base, data); @@ -3745,13 +3751,13 @@ int main(int argc, char ** argv) { const auto handle_infill = [&ctx_server, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { // check model compatibility std::string err; - if (llama_token_fim_pre(ctx_server.model) == LLAMA_TOKEN_NULL) { + if (llama_vocab_fim_pre(ctx_server.vocab) == LLAMA_TOKEN_NULL) { err += "prefix token is missing. "; } - if (llama_token_fim_suf(ctx_server.model) == LLAMA_TOKEN_NULL) { + if (llama_vocab_fim_suf(ctx_server.vocab) == LLAMA_TOKEN_NULL) { err += "suffix token is missing. "; } - if (llama_token_fim_mid(ctx_server.model) == LLAMA_TOKEN_NULL) { + if (llama_vocab_fim_mid(ctx_server.vocab) == LLAMA_TOKEN_NULL) { err += "middle token is missing. "; } if (!err.empty()) { @@ -3797,10 +3803,10 @@ int main(int argc, char ** argv) { data["input_extra"] = input_extra; // default to empty array if it's not exist std::string prompt = json_value(data, "prompt", std::string()); - std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, false, true); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, false, true); SRV_DBG("creating infill tasks, n_prompts = %d\n", (int) tokenized_prompts.size()); data["prompt"] = format_infill( - ctx_server.ctx, + ctx_server.vocab, data.at("input_prefix"), data.at("input_suffix"), data.at("input_extra"), @@ -3857,7 +3863,7 @@ int main(int argc, char ** argv) { const bool add_special = json_value(body, "add_special", false); const bool with_pieces = json_value(body, "with_pieces", false); - llama_tokens tokens = tokenize_mixed(ctx_server.ctx, body.at("content"), add_special, true); + llama_tokens tokens = tokenize_mixed(ctx_server.vocab, body.at("content"), add_special, true); if (with_pieces) { for (const auto& token : tokens) { @@ -3933,7 +3939,7 @@ int main(int argc, char ** argv) { } } - std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.ctx, prompt, true, true); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true); for (const auto & tokens : tokenized_prompts) { // this check is necessary for models that do not add BOS token to the input if (tokens.empty()) { @@ -4033,20 +4039,20 @@ int main(int argc, char ** argv) { return; } - llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.ctx, query, /* add_special */ false, true)[0]; + llama_tokens tokenized_query = tokenize_input_prompts(ctx_server.vocab, query, /* add_special */ false, true)[0]; // create and queue the task json responses = json::array(); bool error = false; { std::vector tasks; - std::vector tokenized_docs = tokenize_input_prompts(ctx_server.ctx, documents, /* add_special */ false, true); + std::vector tokenized_docs = tokenize_input_prompts(ctx_server.vocab, documents, /* add_special */ false, true); tasks.reserve(tokenized_docs.size()); for (size_t i = 0; i < tokenized_docs.size(); i++) { server_task task = server_task(SERVER_TASK_TYPE_RERANK); task.id = ctx_server.queue_tasks.get_new_id(); task.index = i; - task.prompt_tokens = format_rerank(ctx_server.model, tokenized_query, tokenized_docs[i]); + task.prompt_tokens = format_rerank(ctx_server.vocab, tokenized_query, tokenized_docs[i]); tasks.push_back(task); } diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index ad130d490..699480f90 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -118,7 +118,7 @@ static json json_get_nested_values(const std::vector & paths, const * - only string, example: "string" * - mixed string and tokens, example: [12, 34, "string", 56, 78] */ -static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) { +static llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { // If `add_bos` is true, we only add BOS, when json_prompt is a string, // or the first element of the json_prompt array is a string. llama_tokens prompt_tokens; @@ -131,10 +131,10 @@ static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_ llama_tokens p; if (first) { - p = common_tokenize(ctx, s, add_special, parse_special); + p = common_tokenize(vocab, s, add_special, parse_special); first = false; } else { - p = common_tokenize(ctx, s, false, parse_special); + p = common_tokenize(vocab, s, false, parse_special); } prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end()); @@ -148,7 +148,7 @@ static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_ } } else { auto s = json_prompt.template get(); - prompt_tokens = common_tokenize(ctx, s, add_special, parse_special); + prompt_tokens = common_tokenize(vocab, s, add_special, parse_special); } return prompt_tokens; @@ -166,11 +166,11 @@ static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_ * - "prompt": [[12, 34, 56], [78, 90, 12]] * - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]] */ -static std::vector tokenize_input_prompts(llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) { +static std::vector tokenize_input_prompts(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) { std::vector result; if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) { // string or mixed - result.push_back(tokenize_mixed(ctx, json_prompt, add_special, parse_special)); + result.push_back(tokenize_mixed(vocab, json_prompt, add_special, parse_special)); } else if (json_is_array_of_numbers(json_prompt)) { // array of tokens result.push_back(json_prompt.get()); @@ -179,7 +179,7 @@ static std::vector tokenize_input_prompts(llama_context * ctx, con result.reserve(json_prompt.size()); for (const auto & p : json_prompt) { if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) { - result.push_back(tokenize_mixed(ctx, p, add_special, parse_special)); + result.push_back(tokenize_mixed(vocab, p, add_special, parse_special)); } else if (json_is_array_of_numbers(p)) { // array of tokens result.push_back(p.get()); @@ -231,21 +231,23 @@ static size_t validate_utf8(const std::string& text) { // // format rerank task: [BOS]query[EOS][SEP]doc[EOS] -static llama_tokens format_rerank(const struct llama_model * model, const llama_tokens & query, const llama_tokens & doc) { +static llama_tokens format_rerank(const struct llama_vocab * vocab, const llama_tokens & query, const llama_tokens & doc) { llama_tokens result; + result.reserve(doc.size() + query.size() + 4); - result.push_back(llama_token_bos(model)); + result.push_back(llama_vocab_bos(vocab)); result.insert(result.end(), query.begin(), query.end()); - result.push_back(llama_token_eos(model)); - result.push_back(llama_token_sep(model)); + result.push_back(llama_vocab_eos(vocab)); + result.push_back(llama_vocab_sep(vocab)); result.insert(result.end(), doc.begin(), doc.end()); - result.push_back(llama_token_eos(model)); + result.push_back(llama_vocab_eos(vocab)); + return result; } // format infill task static llama_tokens format_infill( - const llama_context * ctx, + const llama_vocab * vocab, const json & input_prefix, const json & input_suffix, const json & input_extra, @@ -272,15 +274,14 @@ static llama_tokens format_infill( llama_tokens extra_tokens; extra_tokens.reserve(n_ctx); - auto model = llama_get_model(ctx); - auto tokens_prefix = tokenize_mixed(ctx, input_prefix, false, false); - auto tokens_suffix = tokenize_mixed(ctx, input_suffix, false, false); + auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false); + auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false); - if (llama_token_fim_rep(model) != LLAMA_TOKEN_NULL) { + if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) { // TODO: make project name an input - static const auto k_fim_repo = common_tokenize(ctx, "myproject\n", false, false); + static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false); - extra_tokens.push_back(llama_token_fim_rep(model)); + extra_tokens.push_back(llama_vocab_fim_rep(vocab)); extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end()); } for (const auto & chunk : input_extra) { @@ -288,28 +289,28 @@ static llama_tokens format_infill( const std::string text = json_value(chunk, "text", std::string()); const std::string filename = json_value(chunk, "filename", std::string("tmp")); - if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) { - const auto k_fim_file = common_tokenize(ctx, filename + "\n", false, false); + if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { + const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false); - extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model)); + extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); } else { // chunk separator in binary form to avoid confusing the AI static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00}; - static const auto k_chunk_prefix_tokens = common_tokenize(ctx, k_chunk_prefix_str, false, false); + static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false); extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end()); } - const auto chunk_tokens = common_tokenize(ctx, text, false, false); + const auto chunk_tokens = common_tokenize(vocab, text, false, false); extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end()); } - if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) { + if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) { // TODO: current filename - static const auto k_fim_file = common_tokenize(ctx, "filename\n", false, false); + static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false); - extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model)); + extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab)); extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end()); } @@ -325,15 +326,15 @@ static llama_tokens format_infill( tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take); tokens_suffix.resize(n_suffix_take); - tokens_prefix.insert(tokens_prefix.begin(), llama_token_fim_pre(model)); + tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab)); tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end()); - tokens_suffix.insert(tokens_suffix.begin(), llama_token_fim_suf(model)); + tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab)); auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix; auto embd_end = spm_infill ? tokens_prefix : tokens_suffix; - if (llama_add_bos_token(model)) { - embd_inp.insert(embd_inp.begin(), llama_token_bos(model)); + if (llama_vocab_get_add_bos(vocab)) { + embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab)); } SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size()); @@ -342,7 +343,7 @@ static llama_tokens format_infill( embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end()); embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end()); - embd_inp.push_back(llama_token_fim_mid(model)); + embd_inp.push_back(llama_vocab_fim_mid(vocab)); return embd_inp; } @@ -764,14 +765,18 @@ static json format_logit_bias(const std::vector & logit_bias) return data; } -static std::string safe_json_to_str(json data) { +static std::string safe_json_to_str(const json & data) { return data.dump(-1, ' ', false, json::error_handler_t::replace); } static std::vector get_token_probabilities(llama_context * ctx, int idx) { std::vector cur; const auto * logits = llama_get_logits_ith(ctx, idx); - const int n_vocab = llama_n_vocab(llama_get_model(ctx)); + + const llama_model * model = llama_get_model(ctx); + const llama_vocab * vocab = llama_model_get_vocab(model); + + const int n_vocab = llama_vocab_n_tokens(vocab); cur.resize(n_vocab); for (llama_token token_id = 0; token_id < n_vocab; token_id++) { @@ -799,8 +804,8 @@ static std::vector get_token_probabilities(llama_context * ctx } static bool are_lora_equal( - const std::vector & l1, - const std::vector & l2) { + const std::vector & l1, + const std::vector & l2) { if (l1.size() != l2.size()) { return false; } @@ -814,10 +819,10 @@ static bool are_lora_equal( } // parse lora config from JSON request, returned a copy of lora_base with updated scale -static std::vector parse_lora_request( - const std::vector & lora_base, +static std::vector parse_lora_request( + const std::vector & lora_base, const json & data) { - std::vector lora(lora_base); + std::vector lora(lora_base); int max_idx = lora.size(); // clear existing value diff --git a/examples/simple-chat/simple-chat.cpp b/examples/simple-chat/simple-chat.cpp index d72f5bcdd..e8eda9c22 100644 --- a/examples/simple-chat/simple-chat.cpp +++ b/examples/simple-chat/simple-chat.cpp @@ -75,12 +75,14 @@ int main(int argc, char ** argv) { return 1; } + const llama_vocab * vocab = llama_model_get_vocab(model); + // initialize the context llama_context_params ctx_params = llama_context_default_params(); ctx_params.n_ctx = n_ctx; ctx_params.n_batch = n_ctx; - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); if (!ctx) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); return 1; @@ -97,9 +99,9 @@ int main(int argc, char ** argv) { std::string response; // tokenize the prompt - const int n_prompt_tokens = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true); + const int n_prompt_tokens = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true); std::vector prompt_tokens(n_prompt_tokens); - if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), llama_get_kv_cache_used_cells(ctx) == 0, true) < 0) { + if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), llama_get_kv_cache_used_cells(ctx) == 0, true) < 0) { GGML_ABORT("failed to tokenize the prompt\n"); } @@ -124,13 +126,13 @@ int main(int argc, char ** argv) { new_token_id = llama_sampler_sample(smpl, ctx, -1); // is it an end of generation? - if (llama_token_is_eog(model, new_token_id)) { + if (llama_vocab_is_eog(vocab, new_token_id)) { break; } // convert the token to a string, print it and add it to the response char buf[256]; - int n = llama_token_to_piece(model, new_token_id, buf, sizeof(buf), 0, true); + int n = llama_token_to_piece(vocab, new_token_id, buf, sizeof(buf), 0, true); if (n < 0) { GGML_ABORT("failed to convert token to piece\n"); } @@ -159,12 +161,14 @@ int main(int argc, char ** argv) { break; } + const char * tmpl = llama_model_chat_template(model); + // add the user input to the message list and format it messages.push_back({"user", strdup(user.c_str())}); - int new_len = llama_chat_apply_template(model, nullptr, messages.data(), messages.size(), true, formatted.data(), formatted.size()); + int new_len = llama_chat_apply_template(tmpl, messages.data(), messages.size(), true, formatted.data(), formatted.size()); if (new_len > (int)formatted.size()) { formatted.resize(new_len); - new_len = llama_chat_apply_template(model, nullptr, messages.data(), messages.size(), true, formatted.data(), formatted.size()); + new_len = llama_chat_apply_template(tmpl, messages.data(), messages.size(), true, formatted.data(), formatted.size()); } if (new_len < 0) { fprintf(stderr, "failed to apply the chat template\n"); @@ -181,7 +185,7 @@ int main(int argc, char ** argv) { // add the response to the messages messages.push_back({"assistant", strdup(response.c_str())}); - prev_len = llama_chat_apply_template(model, nullptr, messages.data(), messages.size(), false, nullptr, 0); + prev_len = llama_chat_apply_template(tmpl, messages.data(), messages.size(), false, nullptr, 0); if (prev_len < 0) { fprintf(stderr, "failed to apply the chat template\n"); return 1; diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index f69117890..10e79a0a6 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -84,6 +84,7 @@ int main(int argc, char ** argv) { model_params.n_gpu_layers = ngl; llama_model * model = llama_model_load_from_file(model_path.c_str(), model_params); + const llama_vocab * vocab = llama_model_get_vocab(model); if (model == NULL) { fprintf(stderr , "%s: error: unable to load model\n" , __func__); @@ -93,11 +94,11 @@ int main(int argc, char ** argv) { // tokenize the prompt // find the number of tokens in the prompt - const int n_prompt = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true); + const int n_prompt = -llama_tokenize(vocab, prompt.c_str(), prompt.size(), NULL, 0, true, true); // allocate space for the tokens and tokenize the prompt std::vector prompt_tokens(n_prompt); - if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) { + if (llama_tokenize(vocab, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) { fprintf(stderr, "%s: error: failed to tokenize the prompt\n", __func__); return 1; } @@ -112,7 +113,7 @@ int main(int argc, char ** argv) { // enable performance counters ctx_params.no_perf = false; - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); if (ctx == NULL) { fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__); @@ -131,7 +132,7 @@ int main(int argc, char ** argv) { for (auto id : prompt_tokens) { char buf[128]; - int n = llama_token_to_piece(model, id, buf, sizeof(buf), 0, true); + int n = llama_token_to_piece(vocab, id, buf, sizeof(buf), 0, true); if (n < 0) { fprintf(stderr, "%s: error: failed to convert token to piece\n", __func__); return 1; @@ -164,12 +165,12 @@ int main(int argc, char ** argv) { new_token_id = llama_sampler_sample(smpl, ctx, -1); // is it an end of generation? - if (llama_token_is_eog(model, new_token_id)) { + if (llama_vocab_is_eog(vocab, new_token_id)) { break; } char buf[128]; - int n = llama_token_to_piece(model, new_token_id, buf, sizeof(buf), 0, true); + int n = llama_token_to_piece(vocab, new_token_id, buf, sizeof(buf), 0, true); if (n < 0) { fprintf(stderr, "%s: error: failed to convert token to piece\n", __func__); return 1; diff --git a/examples/speculative-simple/speculative-simple.cpp b/examples/speculative-simple/speculative-simple.cpp index 9070c3512..403ba2dd2 100644 --- a/examples/speculative-simple/speculative-simple.cpp +++ b/examples/speculative-simple/speculative-simple.cpp @@ -45,6 +45,8 @@ int main(int argc, char ** argv) { model_tgt = llama_init_tgt.model.get(); ctx_tgt = llama_init_tgt.context.get(); + const llama_vocab * vocab = llama_model_get_vocab(model_tgt); + // load the draft model params.devices = params.speculative.devices; params.model = params.speculative.model; @@ -196,7 +198,7 @@ int main(int argc, char ** argv) { id_last = ids[i]; - if (llama_token_is_eog(model_tgt, id_last)) { + if (llama_vocab_is_eog(vocab, id_last)) { has_eos = true; break; } diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index bc0b6813b..c7ccea50d 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -90,10 +90,13 @@ int main(int argc, char ** argv) { model_dft = llama_init_dft.model.get(); ctx_dft = llama_init_dft.context.get(); - const bool vocab_type_tgt = llama_vocab_type(model_tgt); + const llama_vocab * vocab_tgt = llama_model_get_vocab(model_tgt); + const llama_vocab * vocab_dft = llama_model_get_vocab(model_dft); + + const bool vocab_type_tgt = llama_vocab_type(vocab_tgt); LOG_DBG("vocab_type tgt: %d\n", vocab_type_tgt); - const bool vocab_type_dft = llama_vocab_type(model_dft); + const bool vocab_type_dft = llama_vocab_type(vocab_dft); LOG_DBG("vocab_type dft: %d\n", vocab_type_dft); if (vocab_type_tgt != vocab_type_dft) { @@ -103,18 +106,18 @@ int main(int argc, char ** argv) { } if ( - llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) || - llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) || - llama_token_bos(model_tgt) != llama_token_bos(model_dft) || - llama_token_eos(model_tgt) != llama_token_eos(model_dft) + llama_vocab_get_add_bos(vocab_tgt) != llama_vocab_get_add_bos(vocab_dft) || + llama_vocab_get_add_eos(vocab_tgt) != llama_vocab_get_add_eos(vocab_dft) || + llama_vocab_bos(vocab_tgt) != llama_vocab_bos(vocab_dft) || + llama_vocab_eos(vocab_tgt) != llama_vocab_eos(vocab_dft) ) { LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__); return 1; } { - const int n_vocab_tgt = llama_n_vocab(model_tgt); - const int n_vocab_dft = llama_n_vocab(model_dft); + const int n_vocab_tgt = llama_vocab_n_tokens(vocab_tgt); + const int n_vocab_dft = llama_vocab_n_tokens(vocab_dft); const int vocab_diff = n_vocab_tgt > n_vocab_dft ? n_vocab_tgt - n_vocab_dft : n_vocab_dft - n_vocab_tgt; @@ -122,13 +125,13 @@ int main(int argc, char ** argv) { if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) { LOG_ERR("%s: draft model vocab must closely match target model to use speculation but ", __func__); LOG_ERR("target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n", - n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); + n_vocab_tgt, llama_vocab_n_tokens(vocab_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE); return 1; } for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) { - const char * token_text_tgt = llama_token_get_text(model_tgt, i); - const char * token_text_dft = llama_token_get_text(model_dft, i); + const char * token_text_tgt = llama_vocab_get_text(vocab_tgt, i); + const char * token_text_dft = llama_vocab_get_text(vocab_dft, i); if (std::strcmp(token_text_tgt, token_text_dft) != 0) { LOG_ERR("%s: draft model vocab must match target model to use speculation but ", __func__); LOG_ERR("token %d content differs - target '%s', draft '%s'\n", i, @@ -170,7 +173,7 @@ int main(int argc, char ** argv) { const auto t_enc_end = ggml_time_us(); // the 2 models should have the same vocab - //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft)); + //GGML_ASSERT(n_vocab == llama_vocab_n_tokens(model_dft)); // how many tokens to draft each time int n_draft = params.speculative.n_max; @@ -386,7 +389,7 @@ int main(int argc, char ** argv) { } } - if (llama_token_is_eog(model_tgt, token_id)) { + if (llama_vocab_is_eog(vocab_tgt, token_id)) { has_eos = true; } ++n_predict; diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp index 684ca054a..7375759eb 100644 --- a/examples/tokenize/tokenize.cpp +++ b/examples/tokenize/tokenize.cpp @@ -344,8 +344,10 @@ int main(int raw_argc, char ** raw_argv) { return 1; } + const llama_vocab * vocab = llama_model_get_vocab(model); + llama_context_params ctx_params = llama_context_default_params(); - llama_context * ctx = llama_new_context_with_model(model, ctx_params); + llama_context * ctx = llama_init_from_model(model, ctx_params); if (!ctx) { fprintf(stderr, "Error: could not create context.\n"); return 1; @@ -365,7 +367,7 @@ int main(int raw_argc, char ** raw_argv) { prompt = stdin_buffer.str(); } - const bool model_wants_add_bos = llama_add_bos_token(model); + const bool model_wants_add_bos = llama_vocab_get_add_bos(vocab); const bool add_bos = model_wants_add_bos && !no_bos; const bool parse_special = !no_parse_special; const bool escape = !no_escape; @@ -375,7 +377,7 @@ int main(int raw_argc, char ** raw_argv) { } std::vector tokens; - tokens = common_tokenize(model, prompt, add_bos, parse_special); + tokens = common_tokenize(vocab, prompt, add_bos, parse_special); if (printing_ids) { printf("["); diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp index 522f5e881..5a9161181 100644 --- a/examples/tts/tts.cpp +++ b/examples/tts/tts.cpp @@ -414,15 +414,15 @@ static void prompt_add(llama_tokens & prompt, const llama_tokens & tokens) { prompt.insert(prompt.end(), tokens.begin(), tokens.end()); } -static void prompt_add(llama_tokens & prompt, const llama_model * model, const std::string & txt, bool add_special, bool parse_special) { - auto tmp = common_tokenize(model, txt, add_special, parse_special); +static void prompt_add(llama_tokens & prompt, const llama_vocab * vocab, const std::string & txt, bool add_special, bool parse_special) { + auto tmp = common_tokenize(vocab, txt, add_special, parse_special); prompt_add(prompt, tmp); } -static void prompt_init(llama_tokens & prompt, const llama_model * model) { +static void prompt_init(llama_tokens & prompt, const llama_vocab * vocab) { prompt.clear(); - prompt_add(prompt, model, "<|im_start|>\n", true, true); + prompt_add(prompt, vocab, "<|im_start|>\n", true, true); } int main(int argc, char ** argv) { @@ -462,6 +462,8 @@ int main(int argc, char ** argv) { model_ttc = llama_init_ttc.model.get(); ctx_ttc = llama_init_ttc.context.get(); + const llama_vocab * vocab = llama_model_get_vocab(model_ttc); + // TODO: refactor in a common struct params.model = params.vocoder.model; params.model_url = params.vocoder.model_url; @@ -499,9 +501,9 @@ int main(int argc, char ** argv) { std::vector prompt_inp; - prompt_init(prompt_inp, model_ttc); + prompt_init(prompt_inp, vocab); - prompt_add(prompt_inp, model_ttc, "<|text_start|>the<|text_sep|>overall<|text_sep|>package<|text_sep|>from<|text_sep|>just<|text_sep|>two<|text_sep|>people<|text_sep|>is<|text_sep|>pretty<|text_sep|>remarkable<|text_sep|>sure<|text_sep|>i<|text_sep|>have<|text_sep|>some<|text_sep|>critiques<|text_sep|>about<|text_sep|>some<|text_sep|>of<|text_sep|>the<|text_sep|>gameplay<|text_sep|>aspects<|text_sep|>but<|text_sep|>its<|text_sep|>still<|text_sep|>really<|text_sep|>enjoyable<|text_sep|>and<|text_sep|>it<|text_sep|>looks<|text_sep|>lovely<|text_sep|>", false, true); + prompt_add(prompt_inp, vocab, "<|text_start|>the<|text_sep|>overall<|text_sep|>package<|text_sep|>from<|text_sep|>just<|text_sep|>two<|text_sep|>people<|text_sep|>is<|text_sep|>pretty<|text_sep|>remarkable<|text_sep|>sure<|text_sep|>i<|text_sep|>have<|text_sep|>some<|text_sep|>critiques<|text_sep|>about<|text_sep|>some<|text_sep|>of<|text_sep|>the<|text_sep|>gameplay<|text_sep|>aspects<|text_sep|>but<|text_sep|>its<|text_sep|>still<|text_sep|>really<|text_sep|>enjoyable<|text_sep|>and<|text_sep|>it<|text_sep|>looks<|text_sep|>lovely<|text_sep|>", false, true); // convert the input text into the necessary format expected by OuteTTS { @@ -509,10 +511,10 @@ int main(int argc, char ** argv) { LOG_INF("%s: prompt: '%s'\n", __func__, prompt_clean.c_str()); - prompt_add(prompt_inp, model_ttc, prompt_clean, false, true); + prompt_add(prompt_inp, vocab, prompt_clean, false, true); } - prompt_add(prompt_inp, model_ttc, "<|text_end|>\n", false, true); + prompt_add(prompt_inp, vocab, "<|text_end|>\n", false, true); // disabled to save time on tokenizing each time // TODO: load voices from the json files @@ -549,7 +551,7 @@ it<|t_0.09|><|code_start|><|848|><|1366|><|395|><|1601|><|1513|><|593|><|1302|>< looks<|t_0.27|><|code_start|><|1281|><|1266|><|1755|><|572|><|248|><|1751|><|1257|><|695|><|1380|><|457|><|659|><|585|><|1315|><|1105|><|1776|><|736|><|24|><|736|><|654|><|1027|><|code_end|> lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|1481|><|1721|><|1123|><|438|><|1246|><|1251|><|795|><|659|><|1381|><|1658|><|217|><|1772|><|562|><|952|><|107|><|1129|><|1112|><|467|><|550|><|1079|><|840|><|1615|><|1469|><|1380|><|168|><|917|><|836|><|1827|><|437|><|583|><|67|><|595|><|1087|><|1646|><|1493|><|1677|><|code_end|>)"; - auto tmp = common_tokenize(model_ttc, voice_data, false, true); + auto tmp = common_tokenize(vocab, voice_data, false, true); printf("\n\n"); for (int i = 0; i < tmp.size(); ++i) { printf("%d, ", tmp[i]); @@ -735,9 +737,9 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 const auto * cands = common_sampler_get_candidates(smpl[i]); // is it an end of generation? -> mark the stream as finished - if (llama_token_is_eog(model_ttc, new_token_id) || n_decode == n_predict) { + if (llama_vocab_is_eog(vocab, new_token_id) || n_decode == n_predict) { std::string reason; - if (llama_token_is_eog(model_ttc, new_token_id)) { + if (llama_vocab_is_eog(vocab, new_token_id)) { reason = "eos"; } else { reason = "n_predict"; @@ -873,7 +875,7 @@ lovely<|t_0.56|><|code_start|><|634|><|596|><|1766|><|1556|><|1306|><|1285|><|14 #if 1 // spectral operations - const int n_embd = llama_n_embd(model_cts); + const int n_embd = llama_model_n_embd(model_cts); const float * embd = llama_get_embeddings(ctx_cts); auto audio = embd_to_audio(embd, n_codes, n_embd, params.cpuparams.n_threads); diff --git a/include/llama-cpp.h b/include/llama-cpp.h index 11306b17f..8f6368177 100644 --- a/include/llama-cpp.h +++ b/include/llama-cpp.h @@ -20,11 +20,11 @@ struct llama_sampler_deleter { void operator()(llama_sampler * sampler) { llama_sampler_free(sampler); } }; -struct llama_lora_adapter_deleter { - void operator()(llama_lora_adapter * lora_adapter) { llama_lora_adapter_free(lora_adapter); } +struct llama_adapter_lora_deleter { + void operator()(llama_adapter_lora * adapter) { llama_adapter_lora_free(adapter); } }; typedef std::unique_ptr llama_model_ptr; typedef std::unique_ptr llama_context_ptr; typedef std::unique_ptr llama_sampler_ptr; -typedef std::unique_ptr llama_lora_adapter_ptr; +typedef std::unique_ptr llama_adapter_lora_ptr; diff --git a/include/llama.h b/include/llama.h index 0295a51fb..9f04bc622 100644 --- a/include/llama.h +++ b/include/llama.h @@ -56,7 +56,7 @@ extern "C" { // TODO: show sample usage // - // struct llama_vocab; // TODO: add in the future + struct llama_vocab; struct llama_model; struct llama_context; struct llama_sampler; @@ -385,8 +385,7 @@ extern "C" { } llama_chat_message; // lora adapter - // TODO: rename to llama_adapter_lora - struct llama_lora_adapter; + struct llama_adapter_lora; // Helpers for getting default parameters // TODO: update API to start accepting pointers to params structs (https://github.com/ggerganov/llama.cpp/discussions/9172) @@ -400,18 +399,19 @@ extern "C" { // Call once at the start of the program LLAMA_API void llama_backend_init(void); + // Call once at the end of the program - currently only used for MPI + LLAMA_API void llama_backend_free(void); + //optional: LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa); // Optional: an auto threadpool gets created in ggml if not passed explicitly LLAMA_API void llama_attach_threadpool( - struct llama_context * ctx, - ggml_threadpool_t threadpool, - ggml_threadpool_t threadpool_batch); - LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); + struct llama_context * ctx, + ggml_threadpool_t threadpool, + ggml_threadpool_t threadpool_batch); - // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(void); + LLAMA_API void llama_detach_threadpool(struct llama_context * ctx); DEPRECATED(LLAMA_API struct llama_model * llama_load_model_from_file( const char * path_model, @@ -427,11 +427,15 @@ extern "C" { LLAMA_API void llama_model_free(struct llama_model * model); - // TODO: rename to llama_init_from_model - LLAMA_API struct llama_context * llama_new_context_with_model( + LLAMA_API struct llama_context * llama_init_from_model( struct llama_model * model, struct llama_context_params params); + DEPRECATED(LLAMA_API struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params), + "use llama_init_from_model instead"); + // Frees all allocated memory LLAMA_API void llama_free(struct llama_context * ctx); @@ -449,20 +453,30 @@ extern "C" { LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx); LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx); - LLAMA_API int32_t llama_n_vocab (const struct llama_model * model); - LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model); - LLAMA_API int32_t llama_n_embd (const struct llama_model * model); - LLAMA_API int32_t llama_n_layer (const struct llama_model * model); - LLAMA_API int32_t llama_n_head (const struct llama_model * model); + DEPRECATED(LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model), "use llama_model_n_ctx_train instead"); + DEPRECATED(LLAMA_API int32_t llama_n_embd (const struct llama_model * model), "use llama_model_n_embd instead"); + DEPRECATED(LLAMA_API int32_t llama_n_layer (const struct llama_model * model), "use llama_model_n_layer instead"); + DEPRECATED(LLAMA_API int32_t llama_n_head (const struct llama_model * model), "use llama_model_n_head instead"); - LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx); + DEPRECATED(LLAMA_API int32_t llama_n_vocab (const struct llama_vocab * vocab), "use llama_vocab_n_tokens instead"); - LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); - LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model); - LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model); + LLAMA_API const struct llama_model * llama_get_model (const struct llama_context * ctx); + LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx); + + LLAMA_API const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model); + LLAMA_API enum llama_rope_type llama_model_rope_type(const struct llama_model * model); + + LLAMA_API int32_t llama_model_n_ctx_train(const struct llama_model * model); + LLAMA_API int32_t llama_model_n_embd (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_layer (const struct llama_model * model); + LLAMA_API int32_t llama_model_n_head (const struct llama_model * model); // Get the model's RoPE frequency scaling factor - LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); + LLAMA_API float llama_model_rope_freq_scale_train(const struct llama_model * model); + + LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_vocab * vocab); + + LLAMA_API int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab); // Functions to access the model's GGUF metadata scalar values // - The functions return the length of the string on success, or -1 on failure @@ -488,6 +502,9 @@ extern "C" { // Returns the total size of all the tensors in the model in bytes LLAMA_API uint64_t llama_model_size(const struct llama_model * model); + // Get the default chat template. Returns nullptr if not available + LLAMA_API const char * llama_model_chat_template(const struct llama_model * model); + // Returns the total number of parameters in the model LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); @@ -515,34 +532,31 @@ extern "C" { // // Load a LoRA adapter from file - // TODO: rename to llama_adapter_lora_init - LLAMA_API struct llama_lora_adapter * llama_lora_adapter_init( + LLAMA_API struct llama_adapter_lora * llama_adapter_lora_init( struct llama_model * model, const char * path_lora); + // Manually free a LoRA adapter + // Note: loaded adapters will be free when the associated model is deleted + LLAMA_API void llama_adapter_lora_free(struct llama_adapter_lora * adapter); + + // The following functions operate on a llama_context, hence the naming: llama_verb_... + // Add a loaded LoRA adapter to given context // This will not modify model's weight - // TODO: rename to llama_set_adapter_lora - LLAMA_API int32_t llama_lora_adapter_set( + LLAMA_API int32_t llama_set_adapter_lora( struct llama_context * ctx, - struct llama_lora_adapter * adapter, + struct llama_adapter_lora * adapter, float scale); // Remove a specific LoRA adapter from given context // Return -1 if the adapter is not present in the context - // TODO: rename to llama_rm_adapter_lora - LLAMA_API int32_t llama_lora_adapter_remove( + LLAMA_API int32_t llama_rm_adapter_lora( struct llama_context * ctx, - struct llama_lora_adapter * adapter); + struct llama_adapter_lora * adapter); // Remove all LoRA adapters from given context - // TODO: rename to llama_clear_adapter_lora - LLAMA_API void llama_lora_adapter_clear(struct llama_context * ctx); - - // Manually free a LoRA adapter - // Note: loaded adapters will be free when the associated model is deleted - // TODO: rename to llama_adapter_lora_free - LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter); + LLAMA_API void llama_clear_adapter_lora(struct llama_context * ctx); // Apply a loaded control vector to a llama_context, or if data is NULL, clear // the currently loaded vector. @@ -550,9 +564,8 @@ extern "C" { // to an n_embd x n_layers buffer starting from layer 1. // il_start and il_end are the layer range the vector should apply to (both inclusive) // See llama_control_vector_load in common to load a control vector. - // TODO: rename to llama_adapter_cvec_apply - LLAMA_API int32_t llama_control_vector_apply( - struct llama_context * lctx, + LLAMA_API int32_t llama_apply_adapter_cvec( + struct llama_context * ctx, const float * data, size_t len, int32_t n_embd, @@ -908,41 +921,57 @@ extern "C" { // Vocab // - LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token); + LLAMA_API const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token); - LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token); + LLAMA_API float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token); - LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token); + LLAMA_API enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token); // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) - LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token); + LLAMA_API bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token); // Identify if Token Id is a control token or a render-able token - LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token); + LLAMA_API bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token); // Special tokens - LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence - LLAMA_API llama_token llama_token_eot(const struct llama_model * model); // end-of-turn - LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification - LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator - LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line - LLAMA_API llama_token llama_token_pad(const struct llama_model * model); // padding + LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence + LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence + LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn + LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab); // classification + LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator + LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line + LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding - LLAMA_API bool llama_add_bos_token(const struct llama_model * model); - LLAMA_API bool llama_add_eos_token(const struct llama_model * model); + LLAMA_API bool llama_vocab_get_add_bos(const struct llama_vocab * vocab); + LLAMA_API bool llama_vocab_get_add_eos(const struct llama_vocab * vocab); - // infill tokens - DEPRECATED(LLAMA_API llama_token llama_token_prefix(const struct llama_model * model), "use llama_token_fim_pre instead"); - DEPRECATED(LLAMA_API llama_token llama_token_middle(const struct llama_model * model), "use llama_token_fim_mid instead"); - DEPRECATED(LLAMA_API llama_token llama_token_suffix(const struct llama_model * model), "use llama_token_fim_suf instead"); + LLAMA_API llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab); + LLAMA_API llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab); - LLAMA_API llama_token llama_token_fim_pre(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_suf(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_mid(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_pad(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_rep(const struct llama_model * model); - LLAMA_API llama_token llama_token_fim_sep(const struct llama_model * model); + DEPRECATED(LLAMA_API const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token), "use llama_vocabable_get_text instead"); + DEPRECATED(LLAMA_API float llama_token_get_score(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_score instead"); + DEPRECATED(LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_get_attr instead"); + DEPRECATED(LLAMA_API bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_eog instead"); + DEPRECATED(LLAMA_API bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token), "use llama_vocab_is_control instead"); + DEPRECATED(LLAMA_API llama_token llama_token_bos(const struct llama_vocab * vocab), "use llama_vocab_bos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_eos(const struct llama_vocab * vocab), "use llama_vocab_eos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_eot(const struct llama_vocab * vocab), "use llama_vocab_eot instead"); + DEPRECATED(LLAMA_API llama_token llama_token_cls(const struct llama_vocab * vocab), "use llama_vocab_cls instead"); + DEPRECATED(LLAMA_API llama_token llama_token_sep(const struct llama_vocab * vocab), "use llama_vocab_sep instead"); + DEPRECATED(LLAMA_API llama_token llama_token_nl (const struct llama_vocab * vocab), "use llama_vocab_nl instead"); + DEPRECATED(LLAMA_API llama_token llama_token_pad(const struct llama_vocab * vocab), "use llama_vocab_pad instead"); + DEPRECATED(LLAMA_API bool llama_add_bos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_bos instead"); + DEPRECATED(LLAMA_API bool llama_add_eos_token(const struct llama_vocab * vocab), "use llama_vocab_get_add_eos instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_pre(const struct llama_vocab * vocab), "use llama_vocab_fim_pre instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_suf(const struct llama_vocab * vocab), "use llama_vocab_fim_suf instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_mid(const struct llama_vocab * vocab), "use llama_vocab_fim_mid instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_pad(const struct llama_vocab * vocab), "use llama_vocab_fim_pad instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead"); + DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead"); // // Tokenization @@ -958,7 +987,7 @@ extern "C" { /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated /// as plaintext. Does not insert a leading space. LLAMA_API int32_t llama_tokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const char * text, int32_t text_len, llama_token * tokens, @@ -972,7 +1001,7 @@ extern "C" { // User can skip up to 'lstrip' leading spaces before copying (useful when encoding/decoding multiple tokens with 'add_space_prefix') // @param special If true, special tokens are rendered in the output. LLAMA_API int32_t llama_token_to_piece( - const struct llama_model * model, + const struct llama_vocab * vocab, llama_token token, char * buf, int32_t length, @@ -986,7 +1015,7 @@ extern "C" { /// @param remove_special Allow to remove BOS and EOS tokens if model is configured to do so. /// @param unparse_special If true, special tokens are rendered in the output. LLAMA_API int32_t llama_detokenize( - const struct llama_model * model, + const struct llama_vocab * vocab, const llama_token * tokens, int32_t n_tokens, char * text, @@ -1009,7 +1038,6 @@ extern "C" { /// @param length The size of the allocated buffer /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template. LLAMA_API int32_t llama_chat_apply_template( - const struct llama_model * model, const char * tmpl, const struct llama_chat_message * chat, size_t n_msg, @@ -1057,7 +1085,6 @@ extern "C" { // llama_sampler_free(smpl); // // TODO: In the future, llama_sampler will be utilized to offload the sampling to the backends (e.g. GPU). - // TODO: in the future, the entire sampling API that uses llama_model should start using llama_vocab // typedef void * llama_sampler_context_t; @@ -1157,7 +1184,7 @@ extern "C" { float eta); LLAMA_API struct llama_sampler * llama_sampler_init_grammar( - const struct llama_model * model, + const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root); @@ -1169,8 +1196,9 @@ extern "C" { float penalty_present); // 0.0 = disabled /// @details DRY sampler, designed by p-e-w, as described in: https://github.com/oobabooga/text-generation-webui/pull/5677, porting Koboldcpp implementation authored by pi6am: https://github.com/LostRuins/koboldcpp/pull/982 - LLAMA_API struct llama_sampler * llama_sampler_init_dry( - const struct llama_model * model, + LLAMA_API struct llama_sampler * llama_sampler_init_dry( + const struct llama_vocab * vocab, + int32_t n_ctx_train, float dry_multiplier, float dry_base, int32_t dry_allowed_length, @@ -1204,7 +1232,7 @@ extern "C" { // 3. discard non-EOG tokens with low prob // 4. if no tokens are left -> pick EOT // - LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model); + LLAMA_API struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab); // Returns the seed used by the sampler if applicable, LLAMA_DEFAULT_SEED otherwise LLAMA_API uint32_t llama_sampler_get_seed(const struct llama_sampler * smpl); diff --git a/src/llama-adapter.cpp b/src/llama-adapter.cpp index d4879b778..8a0800463 100644 --- a/src/llama-adapter.cpp +++ b/src/llama-adapter.cpp @@ -1,5 +1,7 @@ #include "llama-adapter.h" +#include "llama-impl.h" +#include "llama-mmap.h" #include "llama-model.h" #include @@ -9,7 +11,7 @@ // vec -struct ggml_tensor * llama_control_vector::tensor_for(int il) const { +struct ggml_tensor * llama_adapter_cvec::tensor_for(int il) const { if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { return nullptr; } @@ -17,7 +19,7 @@ struct ggml_tensor * llama_control_vector::tensor_for(int il) const { return tensors[il]; } -struct ggml_tensor * llama_control_vector::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { +struct ggml_tensor * llama_adapter_cvec::apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { ggml_tensor * layer_dir = tensor_for(il); if (layer_dir != nullptr) { cur = ggml_add(ctx, cur, layer_dir); @@ -26,12 +28,12 @@ struct ggml_tensor * llama_control_vector::apply_to(struct ggml_context * ctx, s return cur; } -static bool llama_control_vector_init(struct llama_control_vector & cvec, const llama_model & model) { +bool llama_adapter_cvec::init(const llama_model & model) { const auto & hparams = model.hparams; - GGML_ASSERT(cvec.tensors.empty()); - GGML_ASSERT(cvec.ctxs.empty()); - GGML_ASSERT(cvec.bufs.empty()); + GGML_ASSERT(tensors.empty()); + GGML_ASSERT(ctxs.empty()); + GGML_ASSERT(bufs.empty()); // create a context for each buffer type std::map ctx_map; @@ -50,7 +52,7 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const } ctx_map[buft] = ctx; - cvec.ctxs.emplace_back(ctx); + ctxs.emplace_back(ctx); return ctx; } @@ -59,21 +61,21 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const }; // make tensors - cvec.tensors.reserve(hparams.n_layer); - cvec.tensors.push_back(nullptr); // there's never a tensor for layer 0 + tensors.reserve(hparams.n_layer); + tensors.push_back(nullptr); // there's never a tensor for layer 0 for (size_t il = 1; il < hparams.n_layer; il++) { - ggml_backend_buffer_type_t buft = llama_model_select_buft(model, il); + ggml_backend_buffer_type_t buft = model.select_buft(il); ggml_context * ctx = ctx_for_buft(buft); if (!ctx) { LLAMA_LOG_ERROR("%s: failed to allocate context for control vector\n", __func__); return false; } ggml_tensor * tensor = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd); - cvec.tensors.push_back(tensor); + tensors.push_back(tensor); } // allocate tensors / buffers and zero - cvec.bufs.reserve(ctx_map.size()); + bufs.reserve(ctx_map.size()); for (auto it : ctx_map) { ggml_backend_buffer_type_t buft = it.first; ggml_context * ctx = it.second; @@ -83,14 +85,13 @@ static bool llama_control_vector_init(struct llama_control_vector & cvec, const return false; } ggml_backend_buffer_clear(buf, 0); - cvec.bufs.emplace_back(buf); + bufs.emplace_back(buf); } return true; } -int32_t llama_control_vector_apply( - struct llama_control_vector & cvec, +int32_t llama_adapter_cvec::apply( const llama_model & model, const float * data, size_t len, @@ -101,8 +102,8 @@ int32_t llama_control_vector_apply( if (data == nullptr) { // disable the current control vector (but leave allocated for later) - cvec.layer_start = -1; - cvec.layer_end = -1; + layer_start = -1; + layer_end = -1; return 0; } @@ -111,21 +112,21 @@ int32_t llama_control_vector_apply( return 1; } - if (cvec.tensors.empty()) { - if (!llama_control_vector_init(cvec, model)) { + if (tensors.empty()) { + if (!init(model)) { return 1; } } - cvec.layer_start = il_start; - cvec.layer_end = il_end; + layer_start = il_start; + layer_end = il_end; for (size_t il = 1; il < hparams.n_layer; il++) { - assert(cvec.tensors[il] != nullptr); + assert(tensors[il] != nullptr); const size_t off = n_embd * (il - 1); // buffer doesn't have data for layer 0, since it's never present if (off + n_embd <= len) { - ggml_backend_tensor_set(cvec.tensors[il], data + off, 0, n_embd * ggml_element_size(cvec.tensors[il])); + ggml_backend_tensor_set(tensors[il], data + off, 0, n_embd * ggml_element_size(tensors[il])); } } @@ -134,7 +135,7 @@ int32_t llama_control_vector_apply( // lora -llama_lora_weight * llama_lora_adapter::get_weight(struct ggml_tensor * w) { +llama_adapter_lora_weight * llama_adapter_lora::get_weight(struct ggml_tensor * w) { const std::string name(w->name); const auto pos = ab_map.find(name); @@ -145,11 +146,7 @@ llama_lora_weight * llama_lora_adapter::get_weight(struct ggml_tensor * w) { return nullptr; } -void llama_lora_adapter_free(struct llama_lora_adapter * adapter) { - delete adapter; -} - -static void llama_lora_adapter_init_impl(struct llama_model & model, const char * path_lora, struct llama_lora_adapter & adapter) { +static void llama_adapter_lora_init_impl(struct llama_model & model, const char * path_lora, struct llama_adapter_lora & adapter) { LLAMA_LOG_INFO("%s: loading lora adapter from '%s' ...\n", __func__, path_lora); ggml_context * ctx_init; @@ -221,7 +218,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char }; // bundle lora_a and lora_b into pairs - std::map ab_map; + std::map ab_map; auto str_endswith = [](const std::string & str, const std::string & suffix) { return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0; }; @@ -231,14 +228,14 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char if (str_endswith(name, ".lora_a")) { replace_all(name, ".lora_a", ""); if (ab_map.find(name) == ab_map.end()) { - ab_map[name] = llama_lora_weight(cur, nullptr); + ab_map[name] = llama_adapter_lora_weight(cur, nullptr); } else { ab_map[name].a = cur; } } else if (str_endswith(name, ".lora_b")) { replace_all(name, ".lora_b", ""); if (ab_map.find(name) == ab_map.end()) { - ab_map[name] = llama_lora_weight(nullptr, cur); + ab_map[name] = llama_adapter_lora_weight(nullptr, cur); } else { ab_map[name].b = cur; } @@ -254,7 +251,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char // add tensors for (auto & it : ab_map) { const std::string & name = it.first; - llama_lora_weight & w = it.second; + llama_adapter_lora_weight & w = it.second; bool is_token_embd = str_endswith(name, "token_embd.weight"); if (!w.a || !w.b) { @@ -262,7 +259,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char } // device buft and device ctx - auto * model_tensor = llama_model_get_tensor(model, name.c_str()); + const auto * model_tensor = model.get_tensor(name.c_str()); if (!model_tensor) { throw std::runtime_error("LoRA tensor '" + name + "' does not exist in base model (hint: maybe wrong base model?)"); } @@ -288,7 +285,7 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char struct ggml_tensor * tensor_b = ggml_dup_tensor(dev_ctx, w.b); ggml_set_name(tensor_a, w.a->name); ggml_set_name(tensor_b, w.b->name); - adapter.ab_map[name] = llama_lora_weight(tensor_a, tensor_b); + adapter.ab_map[name] = llama_adapter_lora_weight(tensor_a, tensor_b); } // allocate tensors / buffers and zero @@ -330,11 +327,11 @@ static void llama_lora_adapter_init_impl(struct llama_model & model, const char LLAMA_LOG_INFO("%s: loaded %zu tensors from lora file\n", __func__, adapter.ab_map.size()*2); } -struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, const char * path_lora) { - struct llama_lora_adapter * adapter = new llama_lora_adapter(); +struct llama_adapter_lora * llama_adapter_lora_init(struct llama_model * model, const char * path_lora) { + struct llama_adapter_lora * adapter = new llama_adapter_lora(); try { - llama_lora_adapter_init_impl(*model, path_lora, *adapter); + llama_adapter_lora_init_impl(*model, path_lora, *adapter); return adapter; } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); @@ -344,3 +341,7 @@ struct llama_lora_adapter * llama_lora_adapter_init(struct llama_model * model, return nullptr; } + +void llama_adapter_lora_free(struct llama_adapter_lora * adapter) { + delete adapter; +} diff --git a/src/llama-adapter.h b/src/llama-adapter.h index 3448656b1..603fa08f6 100644 --- a/src/llama-adapter.h +++ b/src/llama-adapter.h @@ -1,73 +1,74 @@ #pragma once -#include "llama-impl.h" -#include "llama-hparams.h" +#include "llama.h" #include "ggml-cpp.h" +#include #include #include +// TODO: pimpl + // // llama_adapter_cvec // -// TODO: rename to llama_adapter_cvec -struct llama_control_vector { - std::vector ctxs; - std::vector bufs; +struct llama_adapter_cvec { + struct ggml_tensor * tensor_for(int il) const; - std::vector tensors; // per layer + struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const; + + int32_t apply( + const llama_model & model, + const float * data, + size_t len, + int32_t n_embd, + int32_t il_start, + int32_t il_end); + +private: + bool init(const llama_model & model); int32_t layer_start = -1; int32_t layer_end = -1; - struct ggml_tensor * tensor_for(int il) const; + std::vector ctxs; + std::vector bufs; - struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const; + std::vector tensors; // per layer }; -int32_t llama_control_vector_apply( - struct llama_control_vector & cvec, - const llama_model & model, - const float * data, - size_t len, - int32_t n_embd, - int32_t il_start, - int32_t il_end); - // // llama_adapter_lora // -// TODO: rename to llama_adapter_lora_weight -struct llama_lora_weight { +struct llama_adapter_lora_weight { struct ggml_tensor * a = nullptr; struct ggml_tensor * b = nullptr; // get actual scale based on rank and alpha - float get_scale(float alpha, float adapter_scale) { + float get_scale(float alpha, float adapter_scale) const { const float rank = (float) b->ne[0]; const float scale = alpha ? adapter_scale * alpha / rank : adapter_scale; return scale; } - llama_lora_weight() = default; - llama_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} + llama_adapter_lora_weight() = default; + llama_adapter_lora_weight(struct ggml_tensor * a, struct ggml_tensor * b) : a(a), b(b) {} }; -// TODO: rename to llama_adapter_lora -struct llama_lora_adapter { +struct llama_adapter_lora { // map tensor name to lora_a_b - std::unordered_map ab_map; + std::unordered_map ab_map; std::vector ctxs; std::vector bufs; float alpha; - llama_lora_adapter() = default; - ~llama_lora_adapter() = default; + llama_adapter_lora() = default; + ~llama_adapter_lora() = default; - llama_lora_weight * get_weight(struct ggml_tensor * w); + llama_adapter_lora_weight * get_weight(struct ggml_tensor * w); }; diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index 7300bd26a..5c1f14cfd 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -178,6 +178,7 @@ static const std::map LLM_KV_NAMES = { { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap" }, { LLM_KV_TOKENIZER_HF_JSON, "tokenizer.huggingface.json" }, { LLM_KV_TOKENIZER_RWKV, "tokenizer.rwkv.world" }, + { LLM_KV_TOKENIZER_CHAT_TEMPLATE, "tokenizer.chat.template" }, { LLM_KV_TOKENIZER_FIM_PRE_ID, "tokenizer.ggml.fim_pre_token_id" }, { LLM_KV_TOKENIZER_FIM_SUF_ID, "tokenizer.ggml.fim_suf_token_id" }, { LLM_KV_TOKENIZER_FIM_MID_ID, "tokenizer.ggml.fim_mid_token_id" }, diff --git a/src/llama-arch.h b/src/llama-arch.h index 79909f03f..349844790 100644 --- a/src/llama-arch.h +++ b/src/llama-arch.h @@ -176,6 +176,7 @@ enum llm_kv { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, LLM_KV_TOKENIZER_HF_JSON, LLM_KV_TOKENIZER_RWKV, + LLM_KV_TOKENIZER_CHAT_TEMPLATE, LLM_KV_TOKENIZER_FIM_PRE_ID, LLM_KV_TOKENIZER_FIM_SUF_ID, LLM_KV_TOKENIZER_FIM_MID_ID, diff --git a/src/llama-context.cpp b/src/llama-context.cpp index 38a55fb2c..671d2a81a 100644 --- a/src/llama-context.cpp +++ b/src/llama-context.cpp @@ -1,5 +1,8 @@ #include "llama-context.h" +#include "llama-impl.h" +#include "llama-mmap.h" + #include #include #include @@ -467,11 +470,12 @@ void llama_set_inputs(llama_context & lctx, const llama_ubatch & ubatch) { size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { const auto & cparams = lctx.cparams; const auto & hparams = lctx.model.hparams; + const auto & vocab = lctx.model.vocab; const size_t n_outputs_max = std::max(n_outputs, (size_t) cparams.n_seq_max); const auto n_batch = cparams.n_batch; - const auto n_vocab = hparams.n_vocab; + const auto n_vocab = vocab.n_tokens(); const auto n_embd = hparams.n_embd; // TODO: use a per-batch flag for logits presence instead @@ -504,7 +508,7 @@ size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { auto * buft = ggml_backend_cpu_buffer_type(); // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory - auto * output_dev = lctx.model.dev_output.dev; + auto * output_dev = lctx.model.dev_output(); auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; if (output_dev_host_buft) { buft = output_dev_host_buft; @@ -538,7 +542,7 @@ size_t llama_output_reserve(struct llama_context & lctx, size_t n_outputs) { void llama_output_reorder(struct llama_context & ctx) { std::vector & out_ids = ctx.sbatch.out_ids; if (!out_ids.empty()) { - const uint32_t n_vocab = ctx.model.hparams.n_vocab; + const uint32_t n_vocab = ctx.model.vocab.n_tokens(); const uint32_t n_embd = ctx.model.hparams.n_embd; const int32_t n_outputs = ctx.n_outputs; @@ -722,7 +726,7 @@ float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) { throw std::runtime_error(format("corrupt output buffer (j=%d, n_outputs=%d)", j, ctx->n_outputs)); } - return ctx->logits + j*ctx->model.hparams.n_vocab; + return ctx->logits + j*ctx->model.vocab.n_tokens(); } catch (const std::exception & err) { LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); #ifndef NDEBUG @@ -882,7 +886,7 @@ struct llama_data_write { } void write_logits(const struct llama_context * ctx) { - const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.hparams.n_vocab); + const uint64_t logits_size = std::min((uint64_t) ctx->logits_size, (uint64_t) ctx->n_outputs * ctx->model.vocab.n_tokens()); write(&logits_size, sizeof(logits_size)); diff --git a/src/llama-context.h b/src/llama-context.h index 0d163c470..a9268b292 100644 --- a/src/llama-context.h +++ b/src/llama-context.h @@ -22,12 +22,12 @@ struct llama_context { const struct llama_model & model; - struct llama_cparams cparams; - struct llama_sbatch sbatch; // TODO: revisit if needed - struct llama_kv_cache kv_self; - struct llama_control_vector cvec; + struct llama_cparams cparams; + struct llama_sbatch sbatch; // TODO: revisit if needed + struct llama_kv_cache kv_self; + struct llama_adapter_cvec cvec; - std::unordered_map lora_adapters; + std::unordered_map lora; std::vector backends; std::vector> set_n_threads_fns; diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 186dc9a25..bebe4e9a3 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -1092,9 +1092,9 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_ for (size_t i = 0; i < cur_p->size; ++i) { const llama_token id = cur_p->data[i].id; - const std::string & piece = grammar.vocab->cache_token_to_piece.at(id); + const std::string & piece = grammar.vocab->token_to_piece(id); - if (llama_token_is_eog_impl(*grammar.vocab, id)) { + if (grammar.vocab->is_eog(id)) { if (!allow_eog) { cur_p->data[i].logit = -INFINITY; } @@ -1115,7 +1115,7 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) { GGML_ASSERT(grammar.vocab != nullptr); - if (llama_token_is_eog_impl(*grammar.vocab, token)) { + if (grammar.vocab->is_eog(token)) { for (const auto & stack : grammar.stacks) { if (stack.empty()) { return; @@ -1124,7 +1124,7 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token GGML_ABORT("fatal error"); } - const std::string & piece = grammar.vocab->cache_token_to_piece.at(token); + const std::string & piece = grammar.vocab->token_to_piece(token); // Note terminating 0 in decoded string const auto decoded = decode_utf8(piece, grammar.partial_utf8); diff --git a/src/llama-hparams.h b/src/llama-hparams.h index 3542bef49..1fe454103 100644 --- a/src/llama-hparams.h +++ b/src/llama-hparams.h @@ -30,7 +30,6 @@ struct llama_hparams { bool use_par_res; bool swin_norm; - uint32_t n_vocab = 0; uint32_t n_ctx_train; // context size the model was trained on uint32_t n_embd; uint32_t n_embd_features = 0; @@ -41,7 +40,6 @@ struct llama_hparams { uint32_t n_embd_head_v; // dimension of values (d_v) aka n_embd_head uint32_t n_expert = 0; uint32_t n_expert_used = 0; - uint32_t n_vocab_type = 0; // for BERT-style token types uint32_t n_rel_attn_bkts = 0; // for WavTokenizer diff --git a/src/llama-kv-cache.cpp b/src/llama-kv-cache.cpp index 90b6c56ed..feffdf0de 100644 --- a/src/llama-kv-cache.cpp +++ b/src/llama-kv-cache.cpp @@ -79,7 +79,7 @@ bool llama_kv_cache_init( ggml_backend_buffer_type_t buft; if (offload) { - auto * dev = model.dev_layer.at(i).dev; + auto * dev = model.dev_layer(i); buft = ggml_backend_dev_buffer_type(dev); } else { buft = ggml_backend_cpu_buffer_type(); diff --git a/src/llama-mmap.cpp b/src/llama-mmap.cpp index a8cb9439b..57c6e4f51 100644 --- a/src/llama-mmap.cpp +++ b/src/llama-mmap.cpp @@ -35,7 +35,7 @@ // TODO: consider moving to llama-impl.h if needed in more places #if defined(_WIN32) -std::string llama_format_win_err(DWORD err) { +static std::string llama_format_win_err(DWORD err) { LPSTR buf; size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 1c4e30878..53175f0e0 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -7,6 +7,10 @@ #include #include +static const size_t kiB = 1024; +static const size_t MiB = 1024*kiB; +static const size_t GiB = 1024*MiB; + const char * llama_file_version_name(llama_fver version) { switch (version) { case GGUF_FILE_VERSION_V1: return "GGUF V1 (support until nov 2023)"; @@ -17,6 +21,49 @@ const char * llama_file_version_name(llama_fver version) { return "unknown"; } +static std::string llama_model_ftype_name(llama_ftype ftype) { + if (ftype & LLAMA_FTYPE_GUESSED) { + return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)"; + } + + switch (ftype) { + case LLAMA_FTYPE_ALL_F32: return "all F32"; + case LLAMA_FTYPE_MOSTLY_F16: return "F16"; + case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; + case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; + case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; + case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; + case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; + case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; + case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; + case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; + case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary"; + case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; + case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; + + default: return "unknown, may not work"; + } +} + namespace GGUFMeta { template struct GKV_Base_Type { @@ -1009,3 +1056,17 @@ bool llama_model_loader::load_all_data( return true; } + +std::string llama_model_loader::ftype_name() const { + return llama_model_ftype_name(ftype); +} + +void llama_model_loader::print_info() const { + LLAMA_LOG_INFO("%s: file format = %s\n", __func__, llama_file_version_name(fver)); + LLAMA_LOG_INFO("%s: file type = %s\n", __func__, llama_model_ftype_name(ftype).c_str()); + if (n_bytes < GiB) { + LLAMA_LOG_INFO("%s: file size = %.2f MiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0, n_bytes*8.0/n_elements); + } else { + LLAMA_LOG_INFO("%s: file size = %.2f GiB (%.2f BPW) \n", __func__, n_bytes/1024.0/1024.0/1024.0, n_bytes*8.0/n_elements); + } +} diff --git a/src/llama-model-loader.h b/src/llama-model-loader.h index 1ec478195..b63d158d9 100644 --- a/src/llama-model-loader.h +++ b/src/llama-model-loader.h @@ -155,4 +155,8 @@ struct llama_model_loader { llama_mlocks * lmlocks, llama_progress_callback progress_callback, void * progress_callback_user_data); + + std::string ftype_name() const; + + void print_info() const; }; diff --git a/src/llama-model.cpp b/src/llama-model.cpp index c056204b0..f90f5e746 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1,129 +1,85 @@ #include "llama-model.h" #include "llama-impl.h" +#include "llama-mmap.h" #include "llama-model-loader.h" -#include "unicode.h" // TODO: remove +#include "ggml-cpp.h" #include #include +#include #include +#include #include #include -static const size_t kiB = 1024; -static const size_t MiB = 1024*kiB; -static const size_t GiB = 1024*MiB; - const char * llm_type_name(llm_type type) { switch (type) { - case MODEL_14M: return "14M"; - case MODEL_17M: return "17M"; - case MODEL_22M: return "22M"; - case MODEL_33M: return "33M"; - case MODEL_60M: return "60M"; - case MODEL_70M: return "70M"; - case MODEL_80M: return "80M"; - case MODEL_109M: return "109M"; - case MODEL_137M: return "137M"; - case MODEL_160M: return "160M"; - case MODEL_220M: return "220M"; - case MODEL_250M: return "250M"; - case MODEL_270M: return "270M"; - case MODEL_335M: return "335M"; - case MODEL_410M: return "410M"; - case MODEL_450M: return "450M"; - case MODEL_770M: return "770M"; - case MODEL_780M: return "780M"; - case MODEL_0_5B: return "0.5B"; - case MODEL_1B: return "1B"; - case MODEL_1_3B: return "1.3B"; - case MODEL_1_4B: return "1.4B"; - case MODEL_1_5B: return "1.5B"; - case MODEL_1_6B: return "1.6B"; - case MODEL_2B: return "2B"; - case MODEL_2_8B: return "2.8B"; - case MODEL_3B: return "3B"; - case MODEL_4B: return "4B"; - case MODEL_6B: return "6B"; - case MODEL_6_9B: return "6.9B"; - case MODEL_7B: return "7B"; - case MODEL_8B: return "8B"; - case MODEL_9B: return "9B"; - case MODEL_11B: return "11B"; - case MODEL_12B: return "12B"; - case MODEL_13B: return "13B"; - case MODEL_14B: return "14B"; - case MODEL_15B: return "15B"; - case MODEL_16B: return "16B"; - case MODEL_20B: return "20B"; - case MODEL_30B: return "30B"; - case MODEL_32B: return "32B"; - case MODEL_34B: return "34B"; - case MODEL_35B: return "35B"; - case MODEL_40B: return "40B"; - case MODEL_65B: return "65B"; - case MODEL_70B: return "70B"; - case MODEL_236B: return "236B"; - case MODEL_314B: return "314B"; - case MODEL_671B: return "671B"; - case MODEL_SMALL: return "0.1B"; - case MODEL_MEDIUM: return "0.4B"; - case MODEL_LARGE: return "0.8B"; - case MODEL_XL: return "1.5B"; - case MODEL_A1_7B: return "A1.7B"; - case MODEL_A2_7B: return "A2.7B"; - case MODEL_8x7B: return "8x7B"; - case MODEL_8x22B: return "8x22B"; - case MODEL_16x12B: return "16x12B"; - case MODEL_16x3_8B: return "16x3.8B"; - case MODEL_10B_128x3_66B: return "10B+128x3.66B"; - case MODEL_57B_A14B: return "57B.A14B"; - case MODEL_27B: return "27B"; - default: return "?B"; - } -} - -static std::string llama_model_ftype_name(llama_ftype ftype) { - if (ftype & LLAMA_FTYPE_GUESSED) { - return llama_model_ftype_name((enum llama_ftype) (ftype & ~LLAMA_FTYPE_GUESSED)) + " (guessed)"; - } - - switch (ftype) { - case LLAMA_FTYPE_ALL_F32: return "all F32"; - case LLAMA_FTYPE_MOSTLY_F16: return "F16"; - case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; - case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; - case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; - case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; - case LLAMA_FTYPE_MOSTLY_Q5_1: return "Q5_1"; - case LLAMA_FTYPE_MOSTLY_Q8_0: return "Q8_0"; - case LLAMA_FTYPE_MOSTLY_Q2_K: return "Q2_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q2_K_S: return "Q2_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "Q3_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "Q3_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "Q3_K - Large"; - case LLAMA_FTYPE_MOSTLY_Q4_K_S: return "Q4_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "Q4_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "Q5_K - Small"; - case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "Q5_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K"; - case LLAMA_FTYPE_MOSTLY_TQ1_0: return "TQ1_0 - 1.69 bpw ternary"; - case LLAMA_FTYPE_MOSTLY_TQ2_0: return "TQ2_0 - 2.06 bpw ternary"; - case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ3_XXS: return "IQ3_XXS - 3.0625 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ1_S: return "IQ1_S - 1.5625 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw"; - case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw"; - - default: return "unknown, may not work"; + case LLM_TYPE_14M: return "14M"; + case LLM_TYPE_17M: return "17M"; + case LLM_TYPE_22M: return "22M"; + case LLM_TYPE_33M: return "33M"; + case LLM_TYPE_60M: return "60M"; + case LLM_TYPE_70M: return "70M"; + case LLM_TYPE_80M: return "80M"; + case LLM_TYPE_109M: return "109M"; + case LLM_TYPE_137M: return "137M"; + case LLM_TYPE_160M: return "160M"; + case LLM_TYPE_220M: return "220M"; + case LLM_TYPE_250M: return "250M"; + case LLM_TYPE_270M: return "270M"; + case LLM_TYPE_335M: return "335M"; + case LLM_TYPE_410M: return "410M"; + case LLM_TYPE_450M: return "450M"; + case LLM_TYPE_770M: return "770M"; + case LLM_TYPE_780M: return "780M"; + case LLM_TYPE_0_5B: return "0.5B"; + case LLM_TYPE_1B: return "1B"; + case LLM_TYPE_1_3B: return "1.3B"; + case LLM_TYPE_1_4B: return "1.4B"; + case LLM_TYPE_1_5B: return "1.5B"; + case LLM_TYPE_1_6B: return "1.6B"; + case LLM_TYPE_2B: return "2B"; + case LLM_TYPE_2_8B: return "2.8B"; + case LLM_TYPE_3B: return "3B"; + case LLM_TYPE_4B: return "4B"; + case LLM_TYPE_6B: return "6B"; + case LLM_TYPE_6_9B: return "6.9B"; + case LLM_TYPE_7B: return "7B"; + case LLM_TYPE_8B: return "8B"; + case LLM_TYPE_9B: return "9B"; + case LLM_TYPE_11B: return "11B"; + case LLM_TYPE_12B: return "12B"; + case LLM_TYPE_13B: return "13B"; + case LLM_TYPE_14B: return "14B"; + case LLM_TYPE_15B: return "15B"; + case LLM_TYPE_16B: return "16B"; + case LLM_TYPE_20B: return "20B"; + case LLM_TYPE_30B: return "30B"; + case LLM_TYPE_32B: return "32B"; + case LLM_TYPE_34B: return "34B"; + case LLM_TYPE_35B: return "35B"; + case LLM_TYPE_40B: return "40B"; + case LLM_TYPE_65B: return "65B"; + case LLM_TYPE_70B: return "70B"; + case LLM_TYPE_236B: return "236B"; + case LLM_TYPE_314B: return "314B"; + case LLM_TYPE_671B: return "671B"; + case LLM_TYPE_SMALL: return "0.1B"; + case LLM_TYPE_MEDIUM: return "0.4B"; + case LLM_TYPE_LARGE: return "0.8B"; + case LLM_TYPE_XL: return "1.5B"; + case LLM_TYPE_A1_7B: return "A1.7B"; + case LLM_TYPE_A2_7B: return "A2.7B"; + case LLM_TYPE_8x7B: return "8x7B"; + case LLM_TYPE_8x22B: return "8x22B"; + case LLM_TYPE_16x12B: return "16x12B"; + case LLM_TYPE_16x3_8B: return "16x3.8B"; + case LLM_TYPE_10B_128x3_66B: return "10B+128x3.66B"; + case LLM_TYPE_57B_A14B: return "57B.A14B"; + case LLM_TYPE_27B: return "27B"; + default: return "?B"; } } @@ -135,84 +91,6 @@ static const char * llama_expert_gating_func_name(llama_expert_gating_func_type } } -std::string llama_model_arch_name (const llama_model & model) { - return llm_arch_name(model.arch); -} - -std::string llama_model_type_name (const llama_model & model) { - return llm_type_name(model.type); -} - -std::string llama_model_ftype_name(const llama_model & model) { - return llama_model_ftype_name(model.ftype); -} - -template -static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) { - ggml_init_params params = { - /*.mem_size =*/ ggml_tensor_overhead()*8, - /*.mem_buffer =*/ NULL, - /*.no_alloc =*/ true, - }; - - ggml_context_ptr ctx { ggml_init(params) }; - if (!ctx) { - throw std::runtime_error(format("failed to create ggml context")); - } - - ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) }; - ggml_tensor * op_tensor = fn(ctx.get()); - for (int i = 0; i < GGML_MAX_SRC; i++) { - if (op_tensor->src[i] != nullptr) { - assert(op_tensor->src[i]->buffer == nullptr); - op_tensor->src[i]->buffer = buf.get(); - } - } - - bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor); - - return op_supported; -} - -template -static ggml_backend_buffer_type_t select_buft(const llama_model::buft_list_t & buft_list, const F & fn) { - for (const auto & cur : buft_list) { - ggml_backend_dev_t cur_dev = cur.first; - ggml_backend_buffer_type_t cur_buft = cur.second; - if (buft_supported(cur_buft, cur_dev, fn)) { - return cur_buft; - } - } - - throw std::runtime_error(format("no suitable buffer type found")); -} - -ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il) { - return select_buft( - *model.dev_layer.at(il).buft_list, - [&](ggml_context * ctx) { - ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); - ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, model.hparams.n_embd); - return ggml_add(ctx, cur, layer_dir); - }); -} - -struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name) { - auto it = std::find_if(model.tensors_by_name.begin(), model.tensors_by_name.end(), - [name](const std::pair & it) { - return it.first == name; - }); - if (it == model.tensors_by_name.end()) { - return nullptr; - } - - return it->second; -} - -size_t llama_model_max_nodes(const llama_model & model) { - return std::max(8192, model.tensors_by_name.size()*5); -} - static const std::map LLAMA_ROPE_SCALING_TYPES = { { LLAMA_ROPE_SCALING_TYPE_NONE, "none" }, { LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" }, @@ -230,37 +108,284 @@ static llama_rope_scaling_type llama_rope_scaling_type_from_string(const std::st return LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED; } -// NOTE: avoid ever using this except for building the token_to_piece caches -static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) { - std::string piece; - piece.resize(piece.capacity()); // using string internal cache - const int n_chars = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); - if (n_chars < 0) { - piece.resize(-n_chars); - int check = llama_token_to_piece(model, token, &piece[0], piece.size(), 0, special); - GGML_ASSERT(check == -n_chars); - } - else { - piece.resize(n_chars); +// checks if the weight tensor can be used with the specified buffer type and device +static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) { + GGML_ASSERT(w != nullptr); + + if (op == GGML_OP_NONE) { + return true; } - return piece; + ggml_init_params params = { + /*.mem_size =*/ ggml_tensor_overhead()*8, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + ggml_context_ptr ctx_ptr { ggml_init(params) }; + if (!ctx_ptr) { + throw std::runtime_error(format("failed to create ggml context")); + } + ggml_context * ctx = ctx_ptr.get(); + + ggml_tensor * op_tensor = nullptr; + + switch (op) { + case GGML_OP_GET_ROWS: + { + ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512); + op_tensor = ggml_get_rows(ctx, w, b); + } break; + case GGML_OP_MUL_MAT: + { + ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]); + op_tensor = ggml_mul_mat(ctx, w, b); + } break; + case GGML_OP_MUL_MAT_ID: + { + int n_expert_used = hparams.n_expert_used; + ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512); + ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512); + op_tensor = ggml_mul_mat_id(ctx, w, b, ids); + } break; + case GGML_OP_ADD: + { + ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); + op_tensor = ggml_add(ctx, a, w); + } break; + case GGML_OP_MUL: + { + ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]); + op_tensor = ggml_mul(ctx, a, w); + } break; + case GGML_OP_DIV: + { + ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]); + op_tensor = ggml_div(ctx, a, w); + } break; + case GGML_OP_ROPE: + { + int n_embd_head = hparams.n_embd_head_v; + int n_head = hparams.n_head(); + ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512); + ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512); + op_tensor = ggml_rope_ext( + ctx, a, b, w, + 0, 0, 0, 0, 0, + 0, 0, 0, 0 + ); + + } break; + case GGML_OP_SSM_CONV: + { + // FIXME + ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 12345, w->ne[1], 6789); + op_tensor = ggml_ssm_conv(ctx, conv_x, w); + } break; + case GGML_OP_SSM_SCAN: + { + // FIXME + const int64_t d_state = w->ne[0]; + const int64_t d_inner = w->ne[1]; + const int64_t n_seq_tokens = 512; + const int64_t n_seqs = 1; + ggml_tensor * s = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, d_inner, n_seqs); + ggml_tensor * x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs); + ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs); + ggml_tensor * B = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs); + ggml_tensor * C = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs); + op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C); + } break; + case GGML_OP_RWKV_WKV6: + { + // FIXME + const int64_t S = 123; + const int64_t H = 123; + const int64_t n_tokens = 123; + const int64_t n_seqs = 123; + ggml_tensor * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * tf = w; + ggml_tensor * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens); + ggml_tensor * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H); + op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state); + } break; + case GGML_OP_IM2COL: + { + const int n_embd = hparams.n_embd; + ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1); + op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16); + } break; + default: + GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name); + } + + // create a temporary dummy buffer for the weight so that supports_op can check the buffer type + GGML_ASSERT(w->buffer == nullptr); + w->buffer = ggml_backend_buft_alloc_buffer(buft, 0); + bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor); + ggml_backend_buffer_free(w->buffer); + w->buffer = nullptr; + + return op_supported; } -void llm_load_stats(llama_model_loader & ml, llama_model & model) { - model.n_elements = ml.n_elements; - model.n_bytes = ml.n_bytes; +// lists of buffer types used for each layer +using buft_list_t = std::vector>; + +// find the first buffer type in the list that can use the tensor +static ggml_backend_buffer_type_t select_weight_buft(const llama_hparams & hparams, ggml_tensor * tensor, ggml_op op, const buft_list_t & buft_list) { + GGML_ASSERT(!buft_list.empty()); + for (const auto & cur : buft_list) { + ggml_backend_dev_t cur_dev = cur.first; + ggml_backend_buffer_type_t cur_buft = cur.second; + if (weight_buft_supported(hparams, tensor, op, cur_buft, cur_dev)) { + return cur_buft; + } + } + return nullptr; } -void llm_load_arch(llama_model_loader & ml, llama_model & model) { - model.arch = ml.get_arch(); - if (model.arch == LLM_ARCH_UNKNOWN) { +// CPU: ACCEL -> CPU extra -> GPU host -> CPU +static buft_list_t make_cpu_buft_list(const std::vector & devices) { + buft_list_t buft_list; + + // add ACCEL buffer types + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) { + auto * buft = ggml_backend_dev_buffer_type(dev); + // skip + if (buft != ggml_backend_cpu_buffer_type()) { + buft_list.emplace_back(dev, buft); + } + } + } + + // add extra buffer types + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev); + auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t) + ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts"); + if (ggml_backend_dev_get_extra_bufts_fn) { + ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev); + while (extra_bufts && *extra_bufts) { + buft_list.emplace_back(cpu_dev, *extra_bufts); + ++extra_bufts; + } + } + + // add a host buffer type + // storing the tensors in a host buffer is useful when the processing of large batches + // is offloaded to a GPU device, since it reduces the time spent on data transfers + // generally, this will be done using the first device in the list + // a better approach would be to handle this on a weight-by-weight basis using the offload_op + // function of the device to determine if it would benefit from being stored in a host buffer + for (auto * dev : devices) { + ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev); + if (buft) { + buft_list.emplace_back(dev, buft); + break; + } + } + + // add the CPU buffer type + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) { + buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); + } + } + + return buft_list; +} + +// GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU +static buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, enum llama_split_mode split_mode, const float * tensor_split) { + buft_list_t buft_list; + + // add the device split buffer type if requested and available + if (split_mode == LLAMA_SPLIT_MODE_ROW) { + ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); + auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t) + ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type"); + if (ggml_backend_split_buffer_type_fn) { + size_t dev_index = [&]() { + auto * reg = ggml_backend_dev_backend_reg(dev); + for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) { + if (ggml_backend_reg_dev_get(reg, i) == dev) { + return i; + } + } + throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev))); + }(); + auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split); + if (buft != nullptr) { + buft_list.emplace_back(dev, buft); + } + } + } + + // add the device default buffer type + buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev)); + + return buft_list; +} + +struct llama_model::impl { + impl() {} + ~impl() {} + + uint64_t n_elements = 0; + + size_t n_bytes = 0; + + std::string desc_str; + + // model memory mapped files + llama_mmaps mappings; + + // objects representing data potentially being locked in memory + llama_mlocks mlock_bufs; + llama_mlocks mlock_mmaps; + + // contexts where the model tensors metadata is stored + std::vector ctxs; + + // the model memory buffers for the tensor data + std::vector bufs; + + buft_list_t cpu_buft_list; + std::map gpu_buft_list; + + struct layer_dev { + ggml_backend_dev_t dev; + buft_list_t * buft_list; + }; + + layer_dev dev_input = {}; + layer_dev dev_output = {}; + std::vector dev_layer; +}; + +llama_model::llama_model(const struct llama_model_params & params) : params(params), pimpl(std::make_unique()) { +} + +llama_model::~llama_model() {} + +void llama_model::load_stats(llama_model_loader & ml) { + pimpl->n_elements = ml.n_elements; + pimpl->n_bytes = ml.n_bytes; +} + +void llama_model::load_arch(llama_model_loader & ml) { + arch = ml.get_arch(); + if (arch == LLM_ARCH_UNKNOWN) { throw std::runtime_error("unknown model architecture: '" + ml.get_arch_name() + "'"); } } -void llm_load_hparams(llama_model_loader & ml, llama_model & model) { - auto & hparams = model.hparams; +void llama_model::load_hparams(llama_model_loader & ml) { const gguf_context * ctx = ml.meta.get(); // get metadata as string @@ -271,14 +396,11 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { } const char * name = gguf_get_key(ctx, i); const std::string value = gguf_kv_to_str(ctx, i); - model.gguf_kv.emplace(name, value); + gguf_kv.emplace(name, value); } // get general kv - ml.get_key(LLM_KV_GENERAL_NAME, model.name, false); - - // get hparams kv - ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab, false); + ml.get_key(LLM_KV_GENERAL_NAME, name, false); // everything past this point is not vocab-related if (hparams.vocab_only) { @@ -291,7 +413,7 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_EXPERT_COUNT, hparams.n_expert, false); ml.get_key(LLM_KV_EXPERT_USED_COUNT, hparams.n_expert_used, false); - if (model.arch == LLM_ARCH_WAVTOKENIZER_DEC) { + if (arch == LLM_ARCH_WAVTOKENIZER_DEC) { ml.get_key(LLM_KV_FEATURES_LENGTH, hparams.n_embd_features); ml.get_key(LLM_KV_POSNET_EMBEDDING_LENGTH, hparams.posnet.n_embd); @@ -364,7 +486,7 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ROPE_DIMENSION_COUNT, hparams.n_rot, false); - if (model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_DECI || model.arch == LLM_ARCH_FALCON) { + if (arch == LLM_ARCH_LLAMA || arch == LLM_ARCH_DECI || arch == LLM_ARCH_FALCON) { if (hparams.n_rot != hparams.n_embd_head_k) { throw std::runtime_error(format("invalid n_rot: %u, expected %u", hparams.n_rot, hparams.n_embd_head_k)); } @@ -375,34 +497,36 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { hparams.n_embd_head_v = 0; } - using e_model = llm_type; // TMP + // for differentiating model types + uint32_t n_vocab = 0; + ml.get_key(LLM_KV_VOCAB_SIZE, n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, n_vocab, false); // arch-specific KVs - switch (model.arch) { + switch (arch) { case LLM_ARCH_LLAMA: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); if (hparams.n_expert == 8) { switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_8x7B; break; - case 56: model.type = e_model::MODEL_8x22B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_8x7B; break; + case 56: type = LLM_TYPE_8x22B; break; + default: type = LLM_TYPE_UNKNOWN; } } else { switch (hparams.n_layer) { - case 16: model.type = e_model::MODEL_1B; break; // Llama 3.2 1B - case 22: model.type = e_model::MODEL_1B; break; - case 26: model.type = e_model::MODEL_3B; break; - case 28: model.type = e_model::MODEL_3B; break; // Llama 3.2 3B + case 16: type = LLM_TYPE_1B; break; // Llama 3.2 1B + case 22: type = LLM_TYPE_1B; break; + case 26: type = LLM_TYPE_3B; break; + case 28: type = LLM_TYPE_3B; break; // Llama 3.2 3B // granite uses a vocab with len 49152 - case 32: model.type = hparams.n_vocab == 49152 ? e_model::MODEL_3B : (hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B); break; - case 36: model.type = e_model::MODEL_8B; break; // granite - case 40: model.type = e_model::MODEL_13B; break; - case 48: model.type = e_model::MODEL_34B; break; - case 60: model.type = e_model::MODEL_30B; break; - case 80: model.type = hparams.n_head() == hparams.n_head_kv() ? e_model::MODEL_65B : e_model::MODEL_70B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = n_vocab == 49152 ? LLM_TYPE_3B : (n_vocab < 40000 ? LLM_TYPE_7B : LLM_TYPE_8B); break; + case 36: type = LLM_TYPE_8B; break; // granite + case 40: type = LLM_TYPE_13B; break; + case 48: type = LLM_TYPE_34B; break; + case 60: type = LLM_TYPE_30B; break; + case 80: type = hparams.n_head() == hparams.n_head_kv() ? LLM_TYPE_65B : LLM_TYPE_70B; break; + default: type = LLM_TYPE_UNKNOWN; } } } break; @@ -410,33 +534,33 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 80: model.type = e_model::MODEL_70B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 80: type = LLM_TYPE_70B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_MINICPM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); - ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); - ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); switch (hparams.n_layer) { - case 52: model.type = e_model::MODEL_1B; break; - case 40: model.type = e_model::MODEL_2B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 52: type = LLM_TYPE_1B; break; + case 40: type = LLM_TYPE_2B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_MINICPM3: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); - ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); switch (hparams.n_layer) { - case 62: model.type = e_model::MODEL_4B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 62: type = LLM_TYPE_4B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GROK: @@ -444,8 +568,8 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 64: model.type = e_model::MODEL_314B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 64: type = LLM_TYPE_314B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_FALCON: @@ -453,21 +577,21 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 60: model.type = e_model::MODEL_40B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 60: type = LLM_TYPE_40B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_BAICHUAN: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_13B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_13B; break; + default: type = LLM_TYPE_UNKNOWN; } - if (model.type == e_model::MODEL_13B) { + if (type == LLM_TYPE_13B) { // TODO: become GGUF KV parameter hparams.f_max_alibi_bias = 8.0f; } @@ -476,19 +600,19 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; - case 36: model.type = e_model::MODEL_3B; break; - case 42: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_15B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = LLM_TYPE_1B; break; + case 36: type = LLM_TYPE_3B; break; + case 42: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_15B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_REFACT: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_1B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_1B; break; + default: type = LLM_TYPE_UNKNOWN; } // TODO: become GGUF KV parameter @@ -498,48 +622,45 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); - ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); switch (hparams.n_layer) { case 3: - model.type = e_model::MODEL_17M; break; // bge-micro + type = LLM_TYPE_17M; break; // bge-micro case 6: - model.type = e_model::MODEL_22M; break; // MiniLM-L6 + type = LLM_TYPE_22M; break; // MiniLM-L6 case 12: switch (hparams.n_embd) { - case 384: model.type = e_model::MODEL_33M; break; // MiniLM-L12, bge-small - case 768: model.type = e_model::MODEL_109M; break; // bge-base - default: model.type = e_model::MODEL_UNKNOWN; + case 384: type = LLM_TYPE_33M; break; // MiniLM-L12, bge-small + case 768: type = LLM_TYPE_109M; break; // bge-base + default: type = LLM_TYPE_UNKNOWN; } break; case 24: - model.type = e_model::MODEL_335M; break; // bge-large - default: model.type = e_model::MODEL_UNKNOWN; + type = LLM_TYPE_335M; break; // bge-large + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_JINA_BERT_V2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); - ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type, false); hparams.f_max_alibi_bias = 8.0f; switch (hparams.n_layer) { - case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small - case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base - default: model.type = e_model::MODEL_UNKNOWN; + case 4: type = LLM_TYPE_33M; break; // jina-embeddings-small + case 12: type = LLM_TYPE_137M; break; // jina-embeddings-base + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_NOMIC_BERT: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn); - ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type); ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type); if (hparams.n_layer == 12 && hparams.n_embd == 768) { - model.type = e_model::MODEL_137M; + type = LLM_TYPE_137M; } } break; case LLM_ARCH_BLOOM: @@ -547,14 +668,14 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; + case 24: type = LLM_TYPE_1B; break; case 30: switch (hparams.n_embd) { - case 2560: model.type = e_model::MODEL_3B; break; - case 4096: model.type = e_model::MODEL_7B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 2560: type = LLM_TYPE_3B; break; + case 4096: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; } break; - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } // TODO: become GGUF KV parameter @@ -567,9 +688,9 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 48: model.type = e_model::MODEL_30B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 48: type = LLM_TYPE_30B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_STABLELM: @@ -577,10 +698,10 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; - case 32: model.type = e_model::MODEL_3B; break; - case 40: model.type = e_model::MODEL_12B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = LLM_TYPE_1B; break; + case 32: type = LLM_TYPE_3B; break; + case 40: type = LLM_TYPE_12B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_QWEN: @@ -588,9 +709,9 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_13B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_13B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_QWEN2VL: @@ -602,27 +723,27 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 24: model.type = hparams.n_embd == 1024 ? e_model::MODEL_0_5B : e_model::MODEL_1B; break; - case 28: model.type = hparams.n_embd == 1536 ? e_model::MODEL_1_5B : e_model::MODEL_7B; break; - case 32: model.type = e_model::MODEL_7B; break; - case 36: model.type = e_model::MODEL_3B; break; - case 40: model.type = hparams.n_head() == 20 ? e_model::MODEL_4B : e_model::MODEL_13B; break; - case 48: model.type = e_model::MODEL_14B; break; - case 64: model.type = e_model::MODEL_32B; break; - case 80: model.type = e_model::MODEL_70B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = hparams.n_embd == 1024 ? LLM_TYPE_0_5B : LLM_TYPE_1B; break; + case 28: type = hparams.n_embd == 1536 ? LLM_TYPE_1_5B : LLM_TYPE_7B; break; + case 32: type = LLM_TYPE_7B; break; + case 36: type = LLM_TYPE_3B; break; + case 40: type = hparams.n_head() == 20 ? LLM_TYPE_4B : LLM_TYPE_13B; break; + case 48: type = LLM_TYPE_14B; break; + case 64: type = LLM_TYPE_32B; break; + case 80: type = LLM_TYPE_70B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_QWEN2MOE: { - ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp, false); ml.get_key(LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, hparams.n_ff_shexp, false); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_A2_7B; break; - case 28: model.type = e_model::MODEL_57B_A14B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = LLM_TYPE_A2_7B; break; + case 28: type = LLM_TYPE_57B_A14B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_PHI2: @@ -630,9 +751,9 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; - case 32: model.type = e_model::MODEL_3B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = LLM_TYPE_1B; break; + case 32: type = LLM_TYPE_3B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_PHI3: @@ -640,10 +761,10 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1B; break; - case 32: model.type = e_model::MODEL_3B; break; - case 40: model.type = e_model::MODEL_14B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24: type = LLM_TYPE_1B; break; + case 32: type = LLM_TYPE_3B; break; + case 40: type = LLM_TYPE_14B; break; + default: type = LLM_TYPE_UNKNOWN; } // for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931 @@ -667,8 +788,8 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_16x3_8B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_16x3_8B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_PLAMO: @@ -676,27 +797,27 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 40: model.type = e_model::MODEL_13B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 40: type = LLM_TYPE_13B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GPT2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 12: model.type = e_model::MODEL_SMALL; break; - case 24: model.type = e_model::MODEL_MEDIUM; break; - case 36: model.type = e_model::MODEL_LARGE; break; - case 48: model.type = e_model::MODEL_XL; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 12: type = LLM_TYPE_SMALL; break; + case 24: type = LLM_TYPE_MEDIUM; break; + case 36: type = LLM_TYPE_LARGE; break; + case 48: type = LLM_TYPE_XL; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_CODESHELL: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 42: model.type = e_model::MODEL_7B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 42: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_ORION: @@ -704,17 +825,17 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 40: model.type = e_model::MODEL_14B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 40: type = LLM_TYPE_14B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_INTERNLM2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 48: model.type = e_model::MODEL_20B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 48: type = LLM_TYPE_20B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GEMMA: @@ -722,37 +843,37 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 18: model.type = e_model::MODEL_2B; break; - case 28: model.type = e_model::MODEL_7B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 18: type = LLM_TYPE_2B; break; + case 28: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GEMMA2: { hparams.n_swa = 4096; // default value of gemma 2 - ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); + ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false); - ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false); + ml.get_key(LLM_KV_ATTN_LOGIT_SOFTCAPPING, hparams.f_attn_logit_softcapping, false); + ml.get_key(LLM_KV_FINAL_LOGIT_SOFTCAPPING, hparams.f_final_logit_softcapping, false); hparams.attn_soft_cap = true; switch (hparams.n_layer) { - case 26: model.type = e_model::MODEL_2B; break; - case 42: model.type = e_model::MODEL_9B; break; - case 46: model.type = e_model::MODEL_27B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 26: type = LLM_TYPE_2B; break; + case 42: type = LLM_TYPE_9B; break; + case 46: type = LLM_TYPE_27B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_STARCODER2: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 30: model.type = e_model::MODEL_3B; break; - case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_15B; break; - case 52: model.type = e_model::MODEL_20B; break; // granite - case 88: model.type = e_model::MODEL_34B; break; // granite - default: model.type = e_model::MODEL_UNKNOWN; + case 30: type = LLM_TYPE_3B; break; + case 32: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_15B; break; + case 52: type = LLM_TYPE_20B; break; // granite + case 88: type = LLM_TYPE_34B; break; // granite + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_MAMBA: @@ -768,51 +889,51 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { switch (hparams.n_layer) { case 24: switch (hparams.n_embd) { - case 768: model.type = e_model::MODEL_SMALL; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 768: type = LLM_TYPE_SMALL; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 48: switch (hparams.n_embd) { - case 1024: model.type = e_model::MODEL_MEDIUM; break; - case 1536: model.type = e_model::MODEL_LARGE; break; - case 2048: model.type = e_model::MODEL_XL; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 1024: type = LLM_TYPE_MEDIUM; break; + case 1536: type = LLM_TYPE_LARGE; break; + case 2048: type = LLM_TYPE_XL; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 64: switch (hparams.n_embd) { - case 2560: model.type = e_model::MODEL_3B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 2560: type = LLM_TYPE_3B; break; + default: type = LLM_TYPE_UNKNOWN; } break; - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_XVERSE: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_13B; break; - case 80: model.type = e_model::MODEL_65B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_13B; break; + case 80: type = LLM_TYPE_65B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_COMMAND_R: { - ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 40: model.type = e_model::MODEL_35B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 40: type = LLM_TYPE_35B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_COHERE2: { ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa); - ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_8B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_8B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_DBRX: @@ -821,8 +942,8 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv); switch (hparams.n_layer) { - case 40: model.type = e_model::MODEL_16x12B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 40: type = LLM_TYPE_16x12B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_OLMO: @@ -831,10 +952,10 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_CLAMP_KQV, hparams.f_clamp_kqv, false); switch (hparams.n_layer) { - case 22: model.type = e_model::MODEL_1B; break; - case 32: model.type = e_model::MODEL_7B; break; - case 80: model.type = e_model::MODEL_70B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 22: type = LLM_TYPE_1B; break; + case 32: type = LLM_TYPE_7B; break; + case 80: type = LLM_TYPE_70B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_OLMO2: @@ -842,18 +963,18 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 16: model.type = e_model::MODEL_1B; break; - case 32: model.type = e_model::MODEL_7B; break; - case 40: model.type = e_model::MODEL_13B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 16: type = LLM_TYPE_1B; break; + case 32: type = LLM_TYPE_7B; break; + case 40: type = LLM_TYPE_13B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_OLMOE: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 16: model.type = e_model::MODEL_A1_7B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 16: type = LLM_TYPE_A1_7B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_OPENELM: @@ -861,57 +982,57 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 16: model.type = e_model::MODEL_270M; break; - case 20: model.type = e_model::MODEL_450M; break; - case 28: model.type = e_model::MODEL_1B; break; - case 36: model.type = e_model::MODEL_3B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 16: type = LLM_TYPE_270M; break; + case 20: type = LLM_TYPE_450M; break; + case 28: type = LLM_TYPE_1B; break; + case 36: type = LLM_TYPE_3B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GPTNEOX: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); - ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res); + ml.get_key(LLM_KV_USE_PARALLEL_RESIDUAL, hparams.use_par_res); switch (hparams.n_layer) { case 6: switch (hparams.n_ff()) { - case 512: model.type = e_model::MODEL_14M; break; - case 2048: model.type = e_model::MODEL_70M; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 512: type = LLM_TYPE_14M; break; + case 2048: type = LLM_TYPE_70M; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 12: switch (hparams.n_ff()) { - case 3072: model.type = e_model::MODEL_160M; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 3072: type = LLM_TYPE_160M; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 16: switch (hparams.n_ff()) { - case 8192: model.type = e_model::MODEL_1B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 8192: type = LLM_TYPE_1B; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 24: switch (hparams.n_ff()) { - case 4096: model.type = e_model::MODEL_410M; break; - case 8192: model.type = e_model::MODEL_1_4B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 4096: type = LLM_TYPE_410M; break; + case 8192: type = LLM_TYPE_1_4B; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 32: switch (hparams.n_ff()) { - case 10240: model.type = e_model::MODEL_2_8B; break; - case 16384: model.type = e_model::MODEL_6_9B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 10240: type = LLM_TYPE_2_8B; break; + case 16384: type = LLM_TYPE_6_9B; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 36: switch (hparams.n_ff()) { - case 20480: model.type = e_model::MODEL_12B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 20480: type = LLM_TYPE_12B; break; + default: type = LLM_TYPE_UNKNOWN; } break; case 44: switch (hparams.n_ff()) { - case 24576: model.type = e_model::MODEL_20B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 24576: type = LLM_TYPE_20B; break; + default: type = LLM_TYPE_UNKNOWN; } break; - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_ARCTIC: @@ -920,40 +1041,40 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { if (hparams.n_expert == 128) { switch (hparams.n_layer) { - case 35: model.type = e_model::MODEL_10B_128x3_66B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 35: type = LLM_TYPE_10B_128x3_66B; break; + default: type = LLM_TYPE_UNKNOWN; } } else { - model.type = e_model::MODEL_UNKNOWN; + type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_DEEPSEEK: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); - ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); - ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); - ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); switch (hparams.n_layer) { - case 28: model.type = e_model::MODEL_20B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 28: type = LLM_TYPE_20B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_DEEPSEEK2: { bool is_lite = (hparams.n_layer == 27); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); + ml.get_key(LLM_KV_LEADING_DENSE_BLOCK_COUNT, hparams.n_layer_dense_lead); if (!is_lite) { ml.get_key(LLM_KV_ATTENTION_Q_LORA_RANK, hparams.n_lora_q); } - ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); + ml.get_key(LLM_KV_ATTENTION_KV_LORA_RANK, hparams.n_lora_kv); ml.get_key(LLM_KV_EXPERT_FEED_FORWARD_LENGTH, hparams.n_ff_exp); - ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); - ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); - ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); - ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); + ml.get_key(LLM_KV_EXPERT_SHARED_COUNT, hparams.n_expert_shared); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_SCALE, hparams.expert_weights_scale); + ml.get_key(LLM_KV_EXPERT_WEIGHTS_NORM, hparams.expert_weights_norm, false); + ml.get_key(LLM_KV_EXPERT_GATING_FUNC, hparams.expert_gating_func, false); if (hparams.expert_gating_func == LLAMA_EXPERT_GATING_FUNC_TYPE_NONE) { // for compatibility with existing DeepSeek V2 and V2.5 GGUFs // that have no expert_gating_func model parameter set @@ -962,19 +1083,19 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ROPE_SCALING_YARN_LOG_MUL, hparams.rope_yarn_log_mul); switch (hparams.n_layer) { - case 27: model.type = e_model::MODEL_16B; break; - case 60: model.type = e_model::MODEL_236B; break; - case 61: model.type = e_model::MODEL_671B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 27: type = LLM_TYPE_16B; break; + case 60: type = LLM_TYPE_236B; break; + case 61: type = LLM_TYPE_671B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_CHATGLM: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 28: model.type = e_model::MODEL_6B; break; - case 40: model.type = e_model::MODEL_9B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 28: type = LLM_TYPE_6B; break; + case 40: type = LLM_TYPE_9B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_BITNET: @@ -982,13 +1103,13 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 26: model.type = e_model::MODEL_3B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 26: type = LLM_TYPE_3B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_T5: { - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); uint32_t dec_start_token_id; @@ -997,32 +1118,32 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { } switch (hparams.n_layer) { - case 6: model.type = e_model::MODEL_60M; break; // t5-small - case 8: model.type = e_model::MODEL_80M; break; // flan-t5-small + case 6: type = LLM_TYPE_60M; break; // t5-small + case 8: type = LLM_TYPE_80M; break; // flan-t5-small case 12: switch (hparams.n_ff()) { - case 3072: model.type = e_model::MODEL_220M; break; // t5-base - case 2048: model.type = e_model::MODEL_250M; break; // flan-t5-base - default: model.type = e_model::MODEL_UNKNOWN; + case 3072: type = LLM_TYPE_220M; break; // t5-base + case 2048: type = LLM_TYPE_250M; break; // flan-t5-base + default: type = LLM_TYPE_UNKNOWN; } break; case 24: switch (hparams.n_ff()) { - case 4096: model.type = e_model::MODEL_770M; break; // t5-large - case 2816: model.type = e_model::MODEL_780M; break; // flan-t5-large - case 16384: model.type = e_model::MODEL_3B; break; // t5-3b - case 5120: model.type = e_model::MODEL_3B; break; // flan-t5-xl - case 65536: model.type = e_model::MODEL_11B; break; // t5-11b - case 10240: model.type = e_model::MODEL_11B; break; // flan-t5-xxl - default: model.type = e_model::MODEL_UNKNOWN; + case 4096: type = LLM_TYPE_770M; break; // t5-large + case 2816: type = LLM_TYPE_780M; break; // flan-t5-large + case 16384: type = LLM_TYPE_3B; break; // t5-3b + case 5120: type = LLM_TYPE_3B; break; // flan-t5-xl + case 65536: type = LLM_TYPE_11B; break; // t5-11b + case 10240: type = LLM_TYPE_11B; break; // flan-t5-xxl + default: type = LLM_TYPE_UNKNOWN; } break; - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_T5ENCODER: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); ml.get_key(LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, hparams.n_rel_attn_bkts); - model.type = e_model::MODEL_UNKNOWN; + type = LLM_TYPE_UNKNOWN; } break; case LLM_ARCH_JAIS: { @@ -1030,18 +1151,18 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_MAX_ALIBI_BIAS, hparams.f_max_alibi_bias); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1_3B; break; - case 40: model.type = e_model::MODEL_13B; break; + case 24: type = LLM_TYPE_1_3B; break; + case 40: type = LLM_TYPE_13B; break; /* TODO: add variants */ - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_NEMOTRON: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_4B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_4B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_EXAONE: @@ -1049,48 +1170,48 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_8B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_8B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_RWKV6: case LLM_ARCH_RWKV6QWEN2: { - ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false); + ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps, false); ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps, false); - ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); - ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); - ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); - ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false); - ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false); + ml.get_key(LLM_KV_WKV_HEAD_SIZE, hparams.wkv_head_size); + ml.get_key(LLM_KV_TIME_MIX_EXTRA_DIM, hparams.time_mix_extra_dim); + ml.get_key(LLM_KV_TIME_DECAY_EXTRA_DIM, hparams.time_decay_extra_dim); + ml.get_key(LLM_KV_RESCALE_EVERY_N_LAYERS, hparams.rescale_every_n_layers, false); + ml.get_key(LLM_KV_TOKEN_SHIFT_COUNT, hparams.token_shift_count, false); switch (hparams.n_layer) { - case 24: model.type = e_model::MODEL_1_6B; break; + case 24: type = LLM_TYPE_1_6B; break; case 32: switch (hparams.n_embd) { - case 2560: model.type = e_model::MODEL_3B; break; - case 4096: model.type = e_model::MODEL_7B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 2560: type = LLM_TYPE_3B; break; + case 4096: type = LLM_TYPE_7B; break; + default: type = LLM_TYPE_UNKNOWN; } break; - case 61: model.type = e_model::MODEL_14B; break; - case 64: model.type = e_model::MODEL_32B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 61: type = LLM_TYPE_14B; break; + case 64: type = LLM_TYPE_32B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_GRANITE: case LLM_ARCH_GRANITE_MOE: { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); - ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); - ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); - ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); - ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); + ml.get_key(LLM_KV_LOGIT_SCALE, hparams.f_logit_scale); + ml.get_key(LLM_KV_RESIDUAL_SCALE, hparams.f_residual_scale); + ml.get_key(LLM_KV_EMBEDDING_SCALE, hparams.f_embedding_scale); + ml.get_key(LLM_KV_ATTENTION_SCALE, hparams.f_attention_scale); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_3B; break; - case 40: model.type = e_model::MODEL_3B; break; + case 32: type = LLM_TYPE_3B; break; + case 40: type = LLM_TYPE_3B; break; // Add additional layer/vocab/etc checks here for other model sizes - default: model.type = e_model::MODEL_UNKNOWN; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_CHAMELEON: @@ -1100,9 +1221,9 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { ml.get_key(LLM_KV_SWIN_NORM, hparams.swin_norm); switch (hparams.n_layer) { - case 32: model.type = e_model::MODEL_7B; break; - case 48: model.type = e_model::MODEL_34B; break; - default: model.type = e_model::MODEL_UNKNOWN; + case 32: type = LLM_TYPE_7B; break; + case 48: type = LLM_TYPE_34B; break; + default: type = LLM_TYPE_UNKNOWN; } } break; case LLM_ARCH_WAVTOKENIZER_DEC: @@ -1115,732 +1236,2288 @@ void llm_load_hparams(llama_model_loader & ml, llama_model & model) { default: throw std::runtime_error("unsupported model architecture"); } - model.ftype = ml.ftype; + pimpl->n_bytes = ml.n_bytes; + + pimpl->desc_str = arch_name() + " " + type_name() + " " + ml.ftype_name(); if (hparams.f_max_alibi_bias > 0.0f) { hparams.use_alibi = true; } - hparams.rope_type = llama_rope_type(&model); + hparams.rope_type = llama_model_rope_type(this); } -void llm_load_vocab(llama_model_loader & ml, llama_model & model) { - auto & vocab = model.vocab; +void llama_model::load_vocab(llama_model_loader & ml) { + const auto kv = LLM_KV(arch); - struct gguf_context * ctx = ml.meta.get(); + vocab.load(ml, kv); +} - const auto kv = LLM_KV(model.arch); +bool llama_model::load_tensors(llama_model_loader & ml) { + const auto & split_mode = params.split_mode; + const auto & n_gpu_layers = params.n_gpu_layers; + const auto & use_mlock = params.use_mlock; + const auto & tensor_split = params.tensor_split; - // determine vocab type - { - std::string tokenizer_model; - std::string tokenizer_pre; + const int n_layer = hparams.n_layer; - ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model); - ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false); + const bool use_mmap_buffer = true; - if (tokenizer_model == "no_vocab" || tokenizer_model == "none") { - vocab.type = LLAMA_VOCAB_TYPE_NONE; - - // default special tokens - vocab.special_bos_id = LLAMA_TOKEN_NULL; - vocab.special_eos_id = LLAMA_TOKEN_NULL; - vocab.special_unk_id = LLAMA_TOKEN_NULL; - vocab.special_sep_id = LLAMA_TOKEN_NULL; - vocab.special_pad_id = LLAMA_TOKEN_NULL; - vocab.special_cls_id = LLAMA_TOKEN_NULL; - vocab.special_mask_id = LLAMA_TOKEN_NULL; - vocab.linefeed_id = LLAMA_TOKEN_NULL; - - // read vocab size from metadata - if (!ml.get_key(LLM_KV_VOCAB_SIZE, vocab.n_vocab, false)) { - vocab.n_vocab = 0; - LLAMA_LOG_WARN("%s: there is no vocab_size in metadata, vocab.n_vocab will be set to %u\n", __func__, vocab.n_vocab); - } - return; - } - - if (tokenizer_model == "llama") { - vocab.type = LLAMA_VOCAB_TYPE_SPM; - - // default special tokens - vocab.special_bos_id = 1; - vocab.special_eos_id = 2; - vocab.special_unk_id = 0; - vocab.special_sep_id = LLAMA_TOKEN_NULL; - vocab.special_pad_id = LLAMA_TOKEN_NULL; - vocab.special_cls_id = LLAMA_TOKEN_NULL; - vocab.special_mask_id = LLAMA_TOKEN_NULL; - } else if (tokenizer_model == "bert") { - vocab.type = LLAMA_VOCAB_TYPE_WPM; - - // default special tokens - vocab.special_bos_id = LLAMA_TOKEN_NULL; - vocab.special_eos_id = LLAMA_TOKEN_NULL; - vocab.special_unk_id = 100; - vocab.special_sep_id = 102; - vocab.special_pad_id = 0; - vocab.special_cls_id = 101; - vocab.special_mask_id = 103; - } else if (tokenizer_model == "gpt2") { - vocab.type = LLAMA_VOCAB_TYPE_BPE; - - // read bpe merges and populate bpe ranks - const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str()); - if (merges_keyidx == -1) { - throw std::runtime_error("cannot find tokenizer merges in model file\n"); - } - - const int n_merges = gguf_get_arr_n(ctx, merges_keyidx); - for (int i = 0; i < n_merges; i++) { - const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i); - GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0); - - std::string first; - std::string second; - - const size_t pos = word.find(' ', 1); - - if (pos != std::string::npos) { - first = word.substr(0, pos); - second = word.substr(pos + 1); - } - - vocab.bpe_ranks.emplace(std::make_pair(first, second), i); - } - - // default special tokens - vocab.special_bos_id = 11; - vocab.special_eos_id = 11; - vocab.special_unk_id = LLAMA_TOKEN_NULL; - vocab.special_sep_id = LLAMA_TOKEN_NULL; - vocab.special_pad_id = LLAMA_TOKEN_NULL; - vocab.special_cls_id = LLAMA_TOKEN_NULL; - vocab.special_mask_id = LLAMA_TOKEN_NULL; - } else if (tokenizer_model == "t5") { - vocab.type = LLAMA_VOCAB_TYPE_UGM; - - // default special tokens - vocab.special_bos_id = LLAMA_TOKEN_NULL; - vocab.special_eos_id = 1; - vocab.special_unk_id = 2; - vocab.special_sep_id = LLAMA_TOKEN_NULL; - vocab.special_pad_id = 0; - vocab.special_cls_id = LLAMA_TOKEN_NULL; - vocab.special_mask_id = LLAMA_TOKEN_NULL; - - const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str()); - if (precompiled_charsmap_keyidx != -1) { - size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx); - const char * precompiled_charsmap = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx); - vocab.precompiled_charsmap.assign(precompiled_charsmap, precompiled_charsmap + n_precompiled_charsmap); -#ifdef IS_BIG_ENDIAN - // correct endiannes of data in precompiled_charsmap binary blob - uint32_t * xcda_blob_size = (uint32_t *) &vocab.precompiled_charsmap[0]; - *xcda_blob_size = __builtin_bswap32(*xcda_blob_size); - assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap); - size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t); - uint32_t * xcda_array = (uint32_t *) &vocab.precompiled_charsmap[sizeof(uint32_t)]; - for (size_t i = 0; i < xcda_array_size; ++i) { - xcda_array[i] = __builtin_bswap32(xcda_array[i]); - } -#endif - } - } else if (tokenizer_model == "rwkv") { - vocab.type = LLAMA_VOCAB_TYPE_RWKV; - - // default special tokens - vocab.special_bos_id = LLAMA_TOKEN_NULL; - vocab.special_eos_id = LLAMA_TOKEN_NULL; - vocab.special_unk_id = LLAMA_TOKEN_NULL; - vocab.special_sep_id = LLAMA_TOKEN_NULL; - vocab.special_pad_id = LLAMA_TOKEN_NULL; - } else { - throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str())); - } - - // for now, only BPE models have pre-tokenizers - if (vocab.type == LLAMA_VOCAB_TYPE_BPE) { - vocab.tokenizer_add_space_prefix = false; - vocab.tokenizer_clean_spaces = true; - if (tokenizer_pre.empty()) { - LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__); - LLAMA_LOG_WARN("%s: \n", __func__); - LLAMA_LOG_WARN("%s: ************************************ \n", __func__); - LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED! \n", __func__); - LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL \n", __func__); - LLAMA_LOG_WARN("%s: ************************************ \n", __func__); - LLAMA_LOG_WARN("%s: \n", __func__); - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - } else if (tokenizer_pre == "default") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - } else if ( - tokenizer_pre == "llama3" || - tokenizer_pre == "llama-v3" || - tokenizer_pre == "llama-bpe"|| - tokenizer_pre == "falcon3") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_LLAMA3; - vocab.tokenizer_ignore_merges = true; - vocab.tokenizer_add_bos = true; - } else if ( - tokenizer_pre == "deepseek-llm") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "deepseek-coder") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "deepseek-v3") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "falcon") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_FALCON; - } else if ( - tokenizer_pre == "mpt") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MPT; - } else if ( - tokenizer_pre == "starcoder") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER; - } else if ( - tokenizer_pre == "gpt-2" || - tokenizer_pre == "phi-2" || - tokenizer_pre == "jina-es" || - tokenizer_pre == "jina-de" || - tokenizer_pre == "gigachat" || - tokenizer_pre == "jina-v1-en" || - tokenizer_pre == "jina-v2-es" || - tokenizer_pre == "jina-v2-de" || - tokenizer_pre == "jina-v2-code" || - tokenizer_pre == "roberta-bpe") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; - } else if ( - tokenizer_pre == "refact") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_REFACT; - } else if ( - tokenizer_pre == "command-r") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_COMMAND_R; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "qwen2") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "stablelm2") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STABLELM2; - } else if ( - tokenizer_pre == "olmo") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; - } else if ( - tokenizer_pre == "dbrx") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; - } else if ( - tokenizer_pre == "smaug-bpe") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMAUG; - } else if ( - tokenizer_pre == "poro-chat") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "chatglm-bpe") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHATGLM4; - vocab.special_bos_id = LLAMA_TOKEN_NULL; - } else if ( - tokenizer_pre == "viking") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_VIKING; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "jais") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS; - } else if ( - tokenizer_pre == "tekken") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN; - vocab.tokenizer_clean_spaces = false; - vocab.tokenizer_ignore_merges = true; - vocab.tokenizer_add_bos = true; - } else if ( - tokenizer_pre == "smollm") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SMOLLM; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "codeshell") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CODESHELL; - } else if ( - tokenizer_pre == "bloom") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BLOOM; - } else if ( - tokenizer_pre == "gpt3-finnish") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH; - } else if ( - tokenizer_pre == "exaone") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_EXAONE; - } else if ( - tokenizer_pre == "chameleon") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_CHAMELEON; - vocab.tokenizer_add_bos = true; - vocab.tokenizer_clean_spaces = false; - } else if ( - tokenizer_pre == "minerva-7b") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_MINERVA; - } else if ( - tokenizer_pre == "megrez") { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_QWEN2; - } else { - throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); - } - } else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - vocab.tokenizer_add_space_prefix = true; - vocab.tokenizer_clean_spaces = false; - vocab.tokenizer_add_bos = true; - vocab.tokenizer_add_eos = false; - } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - vocab.tokenizer_add_space_prefix = false; - vocab.tokenizer_clean_spaces = true; - vocab.tokenizer_add_bos = true; - vocab.tokenizer_add_eos = false; - } else if (vocab.type == LLAMA_VOCAB_TYPE_UGM) { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - vocab.tokenizer_add_bos = false; - vocab.tokenizer_add_eos = true; - } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - vocab.tokenizer_add_space_prefix = false; - vocab.tokenizer_clean_spaces = false; - vocab.tokenizer_add_bos = false; - vocab.tokenizer_add_eos = false; - } else { - vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT; - } - - ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX, vocab.tokenizer_add_space_prefix, false); - ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, vocab.tokenizer_remove_extra_whitespaces, false); + // build a list of buffer types for the CPU and GPU devices + pimpl->cpu_buft_list = make_cpu_buft_list(devices); + for (auto * dev : devices) { + buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split); + // add CPU buffer types as a fallback + buft_list.insert(buft_list.end(), pimpl->cpu_buft_list.begin(), pimpl->cpu_buft_list.end()); + pimpl->gpu_buft_list.emplace(dev, std::move(buft_list)); } - const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str()); - if (token_idx == -1) { - throw std::runtime_error("cannot find tokenizer vocab in model file\n"); - } - - const float * scores = nullptr; - const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str()); - if (score_idx != -1) { - scores = (const float * ) gguf_get_arr_data(ctx, score_idx); - } - - const int * toktypes = nullptr; - const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str()); - if (toktype_idx != -1) { - toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx); - } - - const uint32_t n_vocab = gguf_get_arr_n(ctx, token_idx); - - vocab.n_vocab = n_vocab; - vocab.id_to_token.resize(n_vocab); - - for (uint32_t i = 0; i < n_vocab; i++) { - std::string word = gguf_get_arr_str(ctx, token_idx, i); - if (word.empty()) { - LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i); - word = "[EMPTY_" + std::to_string(i) + "]"; + // calculate the split points + bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + n_devices(), [](float x) { return x == 0.0f; }); + std::vector splits(n_devices()); + if (all_zero) { + // default split, by free memory + for (size_t i = 0; i < n_devices(); ++i) { + ggml_backend_dev_t dev = devices[i]; + size_t total; + size_t free; + ggml_backend_dev_memory(dev, &free, &total); + splits[i] = free; } - - vocab.token_to_id[word] = i; - vocab.max_token_len = std::max(vocab.max_token_len, (int) word.size()); - - auto & token_data = vocab.id_to_token[i]; - token_data.text = std::move(word); - token_data.score = scores ? scores[i] : 0.0f; - token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; - - if (toktypes) { //TODO: remove, required until per token attributes are available from GGUF file - switch(toktypes[i]) { - case LLAMA_TOKEN_TYPE_UNKNOWN: token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN; break; - case LLAMA_TOKEN_TYPE_UNUSED: token_data.attr = LLAMA_TOKEN_ATTR_UNUSED; break; - case LLAMA_TOKEN_TYPE_NORMAL: token_data.attr = LLAMA_TOKEN_ATTR_NORMAL; break; - case LLAMA_TOKEN_TYPE_CONTROL: token_data.attr = LLAMA_TOKEN_ATTR_CONTROL; break; - case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break; - case LLAMA_TOKEN_TYPE_BYTE: token_data.attr = LLAMA_TOKEN_ATTR_BYTE; break; - case LLAMA_TOKEN_TYPE_UNDEFINED: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; - default: token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED; break; - } - } - } - GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size()); - - vocab.init_tokenizer(); - - // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n' - if (vocab.type == LLAMA_VOCAB_TYPE_SPM) { - try { - vocab.linefeed_id = llama_byte_to_token_impl(vocab, '\n'); - } catch (const std::exception & e) { - LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what()); - vocab.linefeed_id = vocab.special_pad_id; - } - } else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) { - vocab.linefeed_id = vocab.special_pad_id; - } else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) { - const std::vector ids = llama_tokenize_internal(vocab, "\n", false); - GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); - vocab.linefeed_id = ids[0]; } else { - const std::vector ids = llama_tokenize_internal(vocab, "\xC4\x8A", false); // U+010A - - //GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); - if (ids.empty()) { - LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__); - vocab.linefeed_id = vocab.special_pad_id; - } else { - vocab.linefeed_id = ids[0]; - } + std::copy(tensor_split, tensor_split + n_devices(), splits.begin()); } - // special tokens - { - const std::vector> special_token_types = { - { LLM_KV_TOKENIZER_BOS_ID, vocab.special_bos_id }, - { LLM_KV_TOKENIZER_EOS_ID, vocab.special_eos_id }, - { LLM_KV_TOKENIZER_EOT_ID, vocab.special_eot_id }, - { LLM_KV_TOKENIZER_EOM_ID, vocab.special_eom_id }, - { LLM_KV_TOKENIZER_UNK_ID, vocab.special_unk_id }, - { LLM_KV_TOKENIZER_SEP_ID, vocab.special_sep_id }, - { LLM_KV_TOKENIZER_PAD_ID, vocab.special_pad_id }, - { LLM_KV_TOKENIZER_CLS_ID, vocab.special_cls_id }, - { LLM_KV_TOKENIZER_MASK_ID, vocab.special_mask_id }, - { LLM_KV_TOKENIZER_FIM_PRE_ID, vocab.special_fim_pre_id }, - { LLM_KV_TOKENIZER_FIM_SUF_ID, vocab.special_fim_suf_id }, - { LLM_KV_TOKENIZER_FIM_MID_ID, vocab.special_fim_mid_id }, - { LLM_KV_TOKENIZER_FIM_PAD_ID, vocab.special_fim_pad_id }, - { LLM_KV_TOKENIZER_FIM_REP_ID, vocab.special_fim_rep_id }, - { LLM_KV_TOKENIZER_FIM_SEP_ID, vocab.special_fim_sep_id }, + // sum and normalize the splits to get the split points + float split_sum = 0.0f; + for (size_t i = 0; i < n_devices(); ++i) { + split_sum += splits[i]; + splits[i] = split_sum; + } + for (size_t i = 0; i < n_devices(); ++i) { + splits[i] /= split_sum; + } - // deprecated - { LLM_KV_TOKENIZER_PREFIX_ID, vocab.special_fim_pre_id }, - { LLM_KV_TOKENIZER_SUFFIX_ID, vocab.special_fim_suf_id }, - { LLM_KV_TOKENIZER_MIDDLE_ID, vocab.special_fim_mid_id }, + ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0); + const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1); + auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev { + if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) { + return {cpu_dev, &pimpl->cpu_buft_list}; + } + const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin(); + auto * dev = devices.at(layer_gpu); + return {dev, &pimpl->gpu_buft_list.at(dev)}; + }; + + // assign the input layer + // there is very little benefit to offloading the input layer, so always keep it on the CPU + pimpl->dev_input = { cpu_dev, &pimpl->cpu_buft_list }; + + // assign the repeating layers to the devices according to the splits + pimpl->dev_layer.resize(n_layer); + for (int il = 0; il < n_layer; ++il) { + pimpl->dev_layer[il] = get_layer_buft_list(il); + } + + // assign the output layer + pimpl->dev_output = get_layer_buft_list(n_layer); + + // one ggml context per buffer type + int max_n_tensors = ml.n_tensors; + max_n_tensors += 1; // duplicated output tensor + max_n_tensors += n_layer*2; // duplicated rope freq tensors + const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors; + + std::map ctx_map; + auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * { + auto it = ctx_map.find(buft); + if (it == ctx_map.end()) { + ggml_init_params params = { + /*.mem_size =*/ ctx_size, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + + ggml_context * ctx = ggml_init(params); + if (!ctx) { + throw std::runtime_error(format("failed to create ggml context")); + } + + ctx_map[buft] = ctx; + pimpl->ctxs.emplace_back(ctx); + + return ctx; + } + return it->second; + }; + + const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED; + const auto TENSOR_NOT_REQUIRED = llama_model_loader::TENSOR_NOT_REQUIRED; + + // create tensors for the weights + { + // note: cast to int64_t since we will use these for the tensor dimensions + const int64_t n_head = hparams.n_head(); + const int64_t n_head_kv = hparams.n_head_kv(); + const int64_t n_embd = hparams.n_embd; + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); + const int64_t n_embd_head_k = hparams.n_embd_head_k; + const int64_t n_embd_head_v = hparams.n_embd_head_v; + const int64_t n_ff = hparams.n_ff(); + const int64_t n_embd_gqa = n_embd_v_gqa; + const int64_t n_vocab = vocab.n_tokens(); + const int64_t n_token_types = vocab.n_token_types(); + const int64_t n_rot = hparams.n_rot; + const int64_t n_expert = hparams.n_expert; + const int64_t n_expert_used = hparams.n_expert_used; + const int64_t n_ctx_train = hparams.n_ctx_train; + + if (n_expert > 0 && hparams.n_expert_used == 0) { + throw std::runtime_error("model has expert layers but no expert layers are used"); + } + + int n_moved_tensors = 0; + ggml_tensor * first_moved_tensor = nullptr; + ggml_backend_buffer_type_t first_moved_from_buft = nullptr; + ggml_backend_buffer_type_t first_moved_to_buft = nullptr; + + auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list & ne, int flags) -> ggml_tensor * { + ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str()); + + if (!t_meta) { + if (flags & TENSOR_NOT_REQUIRED) { + return nullptr; + } + throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str())); + } + + // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops + // the tensor is duplicated + // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor + llm_tensor tn_tensor = tn.tensor; + if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & TENSOR_DUPLICATED) { + tn_tensor = LLM_TENSOR_OUTPUT; + } + + llm_tensor_info info; + try { + info = llm_tensor_info_for(tn_tensor); + } catch (const std::out_of_range & e) { + throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str())); + } + + // tensors with "bias" suffix are always used with GGML_OP_ADD + ggml_op op; + bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0; + if (bias) { + op = GGML_OP_ADD; + } else { + op = info.op; + } + + // sanity checks + if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) { + if (tn.bid != -1) { + GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str()); + } + } else { + if (tn.bid == -1) { + GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str()); + } + } + + // select the buffer type for this tensor + buft_list_t * buft_list; + switch (info.layer) { + case LLM_TENSOR_LAYER_INPUT: + buft_list = pimpl->dev_input.buft_list; + break; + case LLM_TENSOR_LAYER_OUTPUT: + buft_list = pimpl->dev_output.buft_list; + break; + case LLM_TENSOR_LAYER_REPEATING: + buft_list = pimpl->dev_layer.at(tn.bid).buft_list; + break; + default: + GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str()); + } + + ggml_backend_buffer_type_t buft = select_weight_buft(hparams, t_meta, op, *buft_list); + if (!buft) { + throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str())); + } + + // avoid using a host buffer when using mmap + auto * buft_dev = ggml_backend_buft_get_device(buft); + if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) { + auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU); + buft = ggml_backend_dev_buffer_type(cpu_dev); + } + + if (buft != buft_list->front().second) { + n_moved_tensors++; + if (!first_moved_tensor) { + first_moved_tensor = t_meta; + first_moved_from_buft = buft_list->front().second; + first_moved_to_buft = buft; + } + } + + ggml_context * ctx = ctx_for_buft(buft); + + // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one + if (flags & TENSOR_DUPLICATED) { + ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str()); + if (t) { + return t; + } + } + return ml.create_tensor(ctx, tn, ne, flags); }; - for (const auto & it : special_token_types) { - const std::string & key = kv(std::get<0>(it)); - int32_t & id = std::get<1>(it); + layers.resize(n_layer); - uint32_t new_id; - if (!ml.get_key(std::get<0>(it), new_id, false)) { - continue; - } - if (new_id >= vocab.id_to_token.size()) { - LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n", - __func__, key.c_str(), new_id, id); - } else { - id = new_id; - } - } + // TODO: move to a separate function + const auto tn = LLM_TN(arch); + switch (arch) { + case LLM_ARCH_LLAMA: + case LLM_ARCH_REFACT: + case LLM_ARCH_MINICPM: + case LLM_ARCH_GRANITE: + case LLM_ARCH_GRANITE_MOE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - // Handle add_bos_token and add_eos_token - { - bool temp = true; + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); - if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) { - vocab.tokenizer_add_bos = temp; - } - if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) { - vocab.tokenizer_add_eos = temp; - } - } + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } - // auto-detect special tokens by text - // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_... - // for now, we apply this workaround to find the tokens based on their text + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; - for (const auto & t : vocab.token_to_id) { - // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc. - if (vocab.special_eot_id == LLAMA_TOKEN_NULL) { - if (false - || t.first == "<|eot_id|>" - || t.first == "<|im_end|>" - || t.first == "<|end|>" - || t.first == "" - || t.first == "<|endoftext|>" - || t.first == "" - || t.first == "<|end▁of▁sentence|>" // DeepSeek - ) { - vocab.special_eot_id = t.second; - if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { - LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", - __func__, t.second, t.first.c_str()); - vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) { + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + else { + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + + if (n_expert == 0) { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + // optional MLP bias + layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + } else { + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + } + } + } break; + case LLM_ARCH_DECI: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(i); + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(i); + const int64_t n_embd_gqa = hparams.n_embd_v_gqa(i); + const int64_t n_ff = hparams.n_ff(i); + const int64_t n_head = hparams.n_head(i); + const int64_t n_head_kv = hparams.n_head_kv(i); + + if (n_head_kv == 0 && n_head > 0) { + // linear attention for DeciLMCausalModel + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + } + else if (n_head_kv > 0) { + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + } + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) { + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + else { + layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + // optional MLP bias + layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + } + } break; + case LLM_ARCH_MINICPM3: + { + const int64_t n_embd_head_qk_rope = hparams.n_rot; + const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot; + + const int64_t q_lora_rank = hparams.n_lora_q; + const int64_t kv_lora_rank = hparams.n_lora_kv; + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0); + + layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0); + + layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0); + layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0); + + layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0); + layer.wkv_b = create_tensor(tn(LLM_TENSOR_ATTN_KV_B, "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_head * ( n_embd_head_v), n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + } break; + case LLM_ARCH_GROK: + { + if (n_expert == 0) { + throw std::runtime_error("Grok model cannot have zero experts"); + } + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, TENSOR_NOT_REQUIRED); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), { n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + + layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0); + } + } break; + case LLM_ARCH_DBRX: + { + if (n_expert == 0) { + throw std::runtime_error("DBRX model cannot have zero experts"); + } + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), {n_embd, n_ff, n_expert}, 0); + } + } break; + case LLM_ARCH_BAICHUAN: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_FALCON: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + if (!output) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU + } + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_STARCODER: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); + + // output + { + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + if (!output) { + // needs to be on GPU + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + } + } break; + case LLM_ARCH_BERT: + case LLM_ARCH_NOMIC_BERT: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); + + if (arch == LLM_ARCH_BERT) { + pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); + + cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, TENSOR_NOT_REQUIRED); + cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); + + cls_out = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED); + cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"), {1}, TENSOR_NOT_REQUIRED); + } + + tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); + tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + if (arch == LLM_ARCH_BERT) { + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + } else { + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + } + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); + layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + + if (arch == LLM_ARCH_BERT) { + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + } else { + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + } + + layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0); + layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0); + } + } break; + case LLM_ARCH_JINA_BERT_V2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); // word_embeddings + type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_token_types}, 0); // token_type_embeddings + + tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm + tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); //LayerNorm bias + + cls = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, TENSOR_NOT_REQUIRED); + cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"), {1}, TENSOR_NOT_REQUIRED); + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; // JinaBertLayer + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); //output_dens + + layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm + layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd}, 0); + + layer.attn_norm_2 = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.layer_out_norm = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0); + layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd}, 0); + } + } break; + case LLM_ARCH_BLOOM: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); + tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + } + } break; + case LLM_ARCH_MPT: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, TENSOR_NOT_REQUIRED); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, TENSOR_NOT_REQUIRED); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + if (!output) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // needs to be on GPU + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, TENSOR_NOT_REQUIRED); + + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + // AWQ ScaleActivation layer + layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, TENSOR_NOT_REQUIRED); + } + } break; + case LLM_ARCH_STABLELM: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + // optional bias tensors, present in Stable LM 2 1.6B + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED); + + // optional q and k layernorms, present in StableLM 2 12B + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED); + + // optional FFN norm, not present in StableLM 2 12B which uses parallel residual + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, TENSOR_NOT_REQUIRED); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_QWEN: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd*3}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff/2}, 0); + } + } break; + case LLM_ARCH_QWEN2: + case LLM_ARCH_QWEN2VL: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_QWEN2MOE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0); + + if (n_expert == 0) { + throw std::runtime_error("n_expert must be > 0 for QWEN2MOE"); + } + if (n_expert_used == 0) { + throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE"); + } + + // MoE branch + const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used; + + layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp, n_embd, n_expert}, 0); + layer.ffn_up_exps = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS, "weight", i), { n_embd, n_ff_exp, n_expert}, 0); + + // Shared expert branch + const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff; + + layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0); + layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0); + layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp, n_embd}, 0); + layer.ffn_up_shexp = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP, "weight", i), { n_embd, n_ff_shexp}, 0); + } + } break; + case LLM_ARCH_PHI2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"), {n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, TENSOR_NOT_REQUIRED); + + if (layer.wqkv == nullptr) { + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + } + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + } + } break; + case LLM_ARCH_PHI3: + { + const int64_t n_embd_head = n_embd / n_head; + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, TENSOR_NOT_REQUIRED); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0); + + layer.rope_long = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0)); + } + } break; + case LLM_ARCH_PLAMO: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_GPT2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD, "weight"), {n_embd, n_ctx_train}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + } + } break; + case LLM_ARCH_CODESHELL: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0); + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, 0); + } + } break; + case LLM_ARCH_ORION: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_INTERNLM2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_GEMMA: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + } + } break; + case LLM_ARCH_GEMMA2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0); + layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0); + } + } break; + case LLM_ARCH_STARCODER2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + // optional bias tensors + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, 0); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, 0); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, 0); + layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0); + + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + + // optional bias tensors + layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0); + layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP , "bias", i), { n_ff}, 0); + } + } break; + case LLM_ARCH_MAMBA: + { + const int64_t d_conv = hparams.ssm_d_conv; + const int64_t d_inner = hparams.ssm_d_inner; + const int64_t d_state = hparams.ssm_d_state; + const int64_t dt_rank = hparams.ssm_dt_rank; + + // only an expansion factor of 2 is supported for now + if (2 * n_embd != d_inner) { + throw std::runtime_error("only an expansion factor of 2 is supported for now"); + } + + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed, duplicated to allow offloading + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + } + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + // norm + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0); + + layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0); + layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0); + + layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0); + + layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0); + layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0); + + // no "weight" suffix for these + layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0); + layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0); + + // out_proj + layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0); + } + } break; + case LLM_ARCH_XVERSE: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0); + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_COMMAND_R: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0); + // init output from the input tok embed + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + + if (n_layer >= 64){ + layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0); + layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0); + } + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd}, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0); + } + } break; + case LLM_ARCH_COHERE2: + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0); + + // output + output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0); + // init output from the input tok embed + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, + TENSOR_DUPLICATED); + + for (int i = 0; i < n_layer; ++i) { + auto & layer = layers[i]; + + layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0); + + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0); + layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0); + + layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0); + layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0); + layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0); } } - } + break; + case LLM_ARCH_OLMO: // adapted from LLM_ARCH_LLAMA with norm params removed + { + tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0); - // find EOM token: "<|eom_id|>" - if (vocab.special_eom_id == LLAMA_TOKEN_NULL) { - if (false - || t.first == "<|eom_id|>" - ) { - vocab.special_eom_id = t.second; - if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) { - LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n", - __func__, t.second, t.first.c_str()); - vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL; + // output + output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (output == NULL) { + output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED); } - } - } - // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
-            if (vocab.special_fim_pre_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_prefix|>"  // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁begin|>" // DeepSeek
-                        || t.first == "
"
-                        ) {
-                    vocab.special_fim_pre_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
 
-            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
-            if (vocab.special_fim_suf_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_suffix|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁hole|>" // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_suf_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
 
-            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
-            if (vocab.special_fim_mid_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_middle|>" // Qwen
-                        || t.first == ""
-                        || t.first == "<|fim▁end|>"  // DeepSeek
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_mid_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
                     }
-                }
-            }
+                } break;
+            case LLM_ARCH_OLMO2:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
 
-            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
-            if (vocab.special_fim_pad_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_pad|>" // Qwen
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_pad_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
 
-            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
-            if (vocab.special_fim_rep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|fim_repo|>"  // Qwen
-                        || t.first == "<|repo_name|>"
-                        || t.first == ""
-                        || t.first == ""
-                        ) {
-                    vocab.special_fim_rep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                    }
-                }
-            }
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
 
-            // find FIM_SEP token: "<|file_sep|>"
-            if (vocab.special_fim_sep_id == LLAMA_TOKEN_NULL) {
-                if (false
-                        || t.first == "<|file_sep|>" // Qwen
-                        ) {
-                    vocab.special_fim_sep_id = t.second;
-                    if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                                __func__, t.second, t.first.c_str());
-                        vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
                     }
-                }
-            }
+                } break;
+            case LLM_ARCH_OLMOE:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+
+                        if (n_expert == 0) {
+                            throw std::runtime_error("n_expert must be > 0");
+                        }
+                        if (n_expert_used == 0) {
+                            throw std::runtime_error("n_expert_used must be > 0");
+                        }
+
+                        // MoE branch
+                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
+                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
+                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
+                    }
+                } break;
+            case LLM_ARCH_OPENELM:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    // init output from the input tok embed
+                    output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        const int64_t n_head      =   hparams.n_head(i);
+                        const int64_t n_head_qkv  = 2*hparams.n_head_kv(i) + n_head;
+                        const int64_t n_ff        =   hparams.n_ff(i);
+
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
+                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
+                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_GPTNEOX:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
+                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
+
+                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
+                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
+
+                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
+                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_ARCTIC:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_embd}, 0);
+
+                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+                        layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
+                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, false);
+                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
+                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
+                    }
+                } break;
+            case LLM_ARCH_DEEPSEEK:
+                {
+
+                    const int64_t n_ff_exp        = hparams.n_ff_exp;
+                    const int64_t n_expert_shared = hparams.n_expert_shared;
+
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        if (i < (int) hparams.n_layer_dense_lead) {
+                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                        } else {
+                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+
+                            if (n_expert == 0) {
+                                throw std::runtime_error("n_expert must be > 0");
+                            }
+                            if (n_expert_used == 0) {
+                                throw std::runtime_error("n_expert_used must be > 0");
+                            }
+
+                            // MoE branch
+                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
+                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
+                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
+
+                            // Shared expert branch
+                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
+                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
+                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
+                        }
+                    }
+                } break;
+            case LLM_ARCH_DEEPSEEK2:
+                {
+                    const bool is_lite = (hparams.n_layer == 27);
+
+                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
+                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
+
+                    const int64_t q_lora_rank  = hparams.n_lora_q;
+                    const int64_t kv_lora_rank = hparams.n_lora_kv;
+
+                    const int64_t n_ff_exp        = hparams.n_ff_exp;
+                    const int64_t n_expert_shared = hparams.n_expert_shared;
+
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+                        if (!is_lite) {
+                            layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
+                        }
+
+                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
+
+                        if (!is_lite) {
+                            layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
+                            layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
+                        } else {
+                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        }
+
+                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
+                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
+                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        if (i < (int) hparams.n_layer_dense_lead) {
+                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                        } else {
+                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
+                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, TENSOR_NOT_REQUIRED);
+
+                            if (n_expert == 0) {
+                                throw std::runtime_error("n_expert must be > 0");
+                            }
+                            if (n_expert_used == 0) {
+                                throw std::runtime_error("n_expert_used must be > 0");
+                            }
+
+                            // MoE branch
+                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
+                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
+                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
+
+                            // Shared expert branch
+                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
+                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
+                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
+                        }
+                    }
+                } break;
+            case LLM_ARCH_BITNET:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm     = create_tensor(tn(LLM_TENSOR_ATTN_NORM,     "weight", i), {n_embd}, 0);
+                        layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wq       = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                        layer.wk       = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                        layer.wv       = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                        layer.wo       = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+
+                        layer.ffn_norm     = create_tensor(tn(LLM_TENSOR_FFN_NORM,     "weight", i), {n_embd}, 0);
+                        layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
+
+                        layer.ffn_gate       = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_down       = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
+                        layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_up         = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_up_scale   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "scale",  i), {1}, TENSOR_NOT_REQUIRED);
+                    }
+                } break;
+            case LLM_ARCH_T5:
+                {
+                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
+
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm     = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
+
+                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
+                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
+
+                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
+
+                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+
+                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM,  "weight", i), {n_embd}, 0);
+                        layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
+
+                        layer.attn_norm_cross  = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "weight", i), {n_embd}, 0);
+                        // this tensor seems to be unused in HF transformers implementation
+                        layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
+
+                        layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_T5ENCODER:
+                {
+                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
+
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
+                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, TENSOR_NOT_REQUIRED);
+
+                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
+
+                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_JAIS:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
+                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
+                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
+
+                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
+                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
+
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
+                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
+
+                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "bias", i),   {n_ff}, 0);
+
+                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
+                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_CHATGLM:
+                {
+                    tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
+                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
+
+                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+
+                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
+
+                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
+                    }
+                } break;
+            case LLM_ARCH_NEMOTRON:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
+                    output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+
+                        // optional bias tensors
+                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
+                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
+                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, TENSOR_NOT_REQUIRED);
+                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     TENSOR_NOT_REQUIRED);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
+
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+
+                        // optional MLP bias
+                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, TENSOR_NOT_REQUIRED);
+                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, TENSOR_NOT_REQUIRED);
+                    }
+                } break;
+            case LLM_ARCH_EXAONE:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
+
+                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM,   "weight", i), {n_embd}, 0);
+                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, TENSOR_NOT_REQUIRED | (i != 0 ? TENSOR_DUPLICATED : 0));
+                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN,   "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,     "weight", i), {n_embd,   n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_RWKV6:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // Block 0, LN0
+                    tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
+                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
+                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
+
+                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
+                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
+                    const int head_size = hparams.wkv_head_size;
+                    const int attn_hidden_size = n_embd;
+                    const int ffn_size = hparams.n_ff_arr[0];
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
+
+                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
+                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
+
+                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
+                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
+
+                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
+                        layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
+
+                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
+                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
+                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
+                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
+                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
+                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
+                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
+                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
+
+                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
+                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
+                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
+
+                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
+                        layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
+
+                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
+                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
+                        layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
+                    }
+
+                } break;
+            case LLM_ARCH_RWKV6QWEN2:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                    output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
+
+                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
+                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
+                    const int head_size = hparams.wkv_head_size;
+                    const int attn_hidden_size = n_embd;
+                    const int n_head_kv = hparams.n_head_kv();
+                    int attn_key_value_size;
+                    if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
+                        attn_key_value_size = attn_hidden_size;
+                    } else {
+                        attn_key_value_size = n_head_kv * head_size;
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
+                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
+
+                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
+                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
+
+                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
+                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
+                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
+                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
+                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
+                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
+                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
+                        // optional bias tensors
+                        layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
+                        layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
+
+                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_CHAMELEON:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
+
+                    // output
+                    output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                    output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
+                    // if output is NULL, init from the input tok embed
+                    if (output == NULL) {
+                        output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
+                    }
+
+                    for (int i = 0; i < n_layer; ++i) {
+                        auto & layer = layers[i];
+
+                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
+                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
+                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
+                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i),  {n_embd_head_k, n_head}, TENSOR_NOT_REQUIRED);
+                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i),  {n_embd_head_k, n_head_kv}, TENSOR_NOT_REQUIRED);
+
+                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
+                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
+                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
+
+                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
+
+                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
+                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
+                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
+                    }
+                } break;
+            case LLM_ARCH_WAVTOKENIZER_DEC:
+                {
+                    tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
+
+                    conv1d   = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
+                    conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"),   {1, hparams.posnet.n_embd}, 0);
+
+                    // posnet
+                    {
+                        const int64_t n_embd = hparams.posnet.n_embd;
+
+                        for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
+                            auto & layer = layers[i].posnet;
+
+                            // posnet:
+                            //
+                            //  - resnet
+                            //  - resnet
+                            //  - attn
+                            //  - resnet
+                            //  - resnet
+                            //  - norm
+                            //
+                            switch (i) {
+                                case 0:
+                                case 1:
+                                case 3:
+                                case 4:
+                                    {
+                                        layer.norm1   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
+                                        layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias",   i), {1, n_embd}, 0);
+
+                                        layer.conv1   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
+                                        layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias",   i), {1, n_embd}, 0);
+
+                                        layer.norm2   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
+                                        layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias",   i), {1, n_embd}, 0);
+
+                                        layer.conv2   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
+                                        layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias",   i), {1, n_embd}, 0);
+                                    } break;
+                                case 2:
+                                    {
+                                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
+                                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
+
+                                        layer.attn_q      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "weight", i), {1, n_embd, n_embd}, 0);
+                                        layer.attn_q_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "bias",   i), {1, n_embd}, 0);
+
+                                        layer.attn_k      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "weight", i), {1, n_embd, n_embd}, 0);
+                                        layer.attn_k_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "bias",   i), {1, n_embd}, 0);
+
+                                        layer.attn_v      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "weight", i), {1, n_embd, n_embd}, 0);
+                                        layer.attn_v_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "bias",   i), {1, n_embd}, 0);
+
+                                        layer.attn_o      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "weight", i), {1, n_embd, n_embd}, 0);
+                                        layer.attn_o_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "bias",   i), {1, n_embd}, 0);
+                                    } break;
+                                case 5:
+                                    {
+                                        layer.norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
+                                        layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
+                                    } break;
+                                default: GGML_ABORT("unknown posnet layer");
+                            };
+                        }
+                    }
+
+                    GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
+
+                    tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
+                    tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {hparams.posnet.n_embd}, 0);
+
+                    // convnext
+                    {
+                        const int64_t n_embd = hparams.convnext.n_embd;
+
+                        for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
+                            auto & layer = layers[i].convnext;
+
+                            layer.dw     = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "weight", i), {7, 1, n_embd}, 0);
+                            layer.dw_b   = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "bias",   i), {1, n_embd}, 0);
+
+                            layer.norm   = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "weight", i), {n_embd}, 0);
+                            layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "bias",   i), {n_embd}, 0);
+
+                            layer.pw1    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "weight", i), {n_embd, n_ff}, 0);
+                            layer.pw1_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "bias",   i), {n_ff}, 0);
+
+                            layer.pw2    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "weight", i), {n_ff, n_embd}, 0);
+                            layer.pw2_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "bias",   i), {n_embd}, 0);
+
+                            layer.gamma  = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
+                        }
+
+                        // output
+                        output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
+                        output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
+                    }
+
+                    output   = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
+                    output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"),   {n_embd}, 0);
+                } break;
+            default:
+                throw std::runtime_error("unknown architecture");
         }
 
-        // maintain a list of tokens that cause end-of-generation
-        // this is currently determined based on the token text, which is obviously not ideal
-        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
-        vocab.special_eog_ids.clear();
-
-        if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_pad_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_pad_id);
-        }
-
-        if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_rep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_rep_id);
-        }
-
-        if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_fim_sep_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_fim_sep_id);
-        }
-
-        for (const auto & t : vocab.token_to_id) {
-            if (false
-                    || t.first == "<|eot_id|>"
-                    || t.first == "<|im_end|>"
-                    || t.first == "<|end|>"
-                    || t.first == ""
-                    || t.first == "<|endoftext|>"
-                    || t.first == "<|eom_id|>"
-                    || t.first == ""
-               ) {
-                vocab.special_eog_ids.insert(t.second);
-                if ((vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
-                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
-                            __func__, t.second, t.first.c_str());
-                    vocab.id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
-                }
-            } else {
-                // token is control, but not marked as EOG -> print a debug log
-                if (vocab.id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && vocab.special_eog_ids.count(t.second) == 0) {
-                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
-                            __func__, t.second, t.first.c_str());
-                }
-            }
-        }
-
-        // sanity checks
-        if (vocab.special_eos_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eos_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eos_id);
-            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eot_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eot_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eot_id);
-            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
-        }
-
-        if (vocab.special_eom_id != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(vocab.special_eom_id) == 0) {
-            vocab.special_eog_ids.insert(vocab.special_eom_id);
-            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        if (n_moved_tensors > 0) {
+            LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
+                __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
+                ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
         }
     }
 
-    // build special tokens cache
-    {
-        for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
-            if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
-                vocab.cache_special_tokens.push_back(id);
-            }
+    ml.done_getting_tensors();
+
+    ml.init_mappings(true, use_mlock ? &pimpl->mlock_mmaps : nullptr);
+    pimpl->mappings.reserve(ml.mappings.size());
+
+    // create the backend buffers
+    std::vector> ctx_bufs;
+    ctx_bufs.reserve(ctx_map.size());
+
+    // Ensure we have enough capacity for the maximum backend buffer we will potentially create
+    const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
+    pimpl->bufs.reserve(n_max_backend_buffer);
+
+    for (auto & it : ctx_map) {
+        ggml_backend_buffer_type_t buft = it.first;
+        ggml_context * ctx              = it.second;
+
+        // skip contexts without tensors
+        if (ggml_get_first_tensor(ctx) == nullptr) {
+            continue;
         }
 
-        std::sort(vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
-            [&] (const llama_vocab::id a, const llama_vocab::id b) {
-                return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
-            }
-        );
+        llama_buf_map buf_map;
+        buf_map.reserve(n_max_backend_buffer);
 
-        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
-    }
-
-    // build token to piece cache
-    {
-        size_t size_cache = 0;
-
-        std::vector cache_token_to_piece(n_vocab);
-
-        for (uint32_t id = 0; id < n_vocab; ++id) {
-            cache_token_to_piece[id] = llama_token_to_piece(&model, id, true);
-
-            size_cache += cache_token_to_piece[id].size();
+        // check if it is possible to use buffer_from_host_ptr with this buffer type
+        ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
+        if (!dev) {
+            // FIXME: workaround for CPU backend buft having a NULL device
+            dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
         }
+        ggml_backend_dev_props props;
+        ggml_backend_dev_get_props(dev, &props);
+        bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
+        bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
 
-        std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
-
-        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
-    }
-
-    // Handle per token attributes
-    //NOTE: Each model customizes per token attributes.
-    //NOTE: Per token attributes are missing from the GGUF file.
-    //TODO: Extract attributes from GGUF file.
-    {
-        auto _contains_any = [] (const std::string &str, const std::vector &substrs) -> bool {
-            for (auto substr : substrs) {
-                if (str.find(substr) < std::string::npos) {
-                    return true;
+        if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
+            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
+                // only the mmap region containing the tensors in the model is mapped to the backend buffer
+                // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
+                // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
+                void * addr = nullptr;
+                size_t first, last; // NOLINT
+                ml.get_mapping_range(&first, &last, &addr, idx, ctx);
+                if (first >= last) {
+                    continue;
                 }
+                const size_t max_size = ggml_get_max_tensor_size(ctx);
+                ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
+                if (buf == nullptr) {
+                    throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
+                }
+                pimpl->bufs.emplace_back(buf);
+                buf_map.emplace(idx, buf);
             }
+        }
+        else {
+            ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
+            if (buf == nullptr) {
+                throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
+            }
+            pimpl->bufs.emplace_back(buf);
+            if (use_mlock && ggml_backend_buffer_is_host(buf)) {
+                pimpl->mlock_bufs.emplace_back(new llama_mlock);
+                auto & mlock_buf = pimpl->mlock_bufs.back();
+                mlock_buf->init   (ggml_backend_buffer_get_base(buf));
+                mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
+            }
+            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
+                buf_map.emplace(idx, buf);
+            }
+        }
+
+        if (pimpl->bufs.empty()) {
+            throw std::runtime_error("failed to allocate buffer");
+        }
+
+        for (auto & buf : buf_map) {
+            // indicate that this buffer contains weights
+            // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
+            ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
+        }
+
+        ctx_bufs.emplace_back(ctx, buf_map);
+    }
+
+    if (llama_supports_gpu_offload()) {
+        const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
+
+        LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
+        if (n_gpu_layers > (int) hparams.n_layer) {
+            LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
+        }
+
+        const int max_backend_supported_layers = hparams.n_layer + 1;
+        const int max_offloadable_layers       = hparams.n_layer + 1;
+
+        LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
+    }
+
+    // print memory requirements per buffer type
+    for (auto & buf : pimpl->bufs) {
+        LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
+    }
+
+    // populate tensors_by_name
+    for (auto & ctx : pimpl->ctxs) {
+        for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
+            tensors_by_name.emplace_back(ggml_get_name(cur), cur);
+        }
+    }
+
+    // load tensor data
+    for (auto & it : ctx_bufs) {
+        ggml_context * ctx = it.first;
+        auto & bufs = it.second;
+        if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
             return false;
-        };
-
-        auto _set_tokenid_attr = [&] (const llama_vocab::id id, llama_token_attr attr, bool value) {
-            uint32_t current = vocab.id_to_token.at(id).attr;
-            current = value ? (current | attr) : (current & ~attr);
-            vocab.id_to_token[id].attr = (llama_token_attr) current;
-        };
-
-        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
-            _set_tokenid_attr(vocab.token_to_id.at(token), attr, value);
-        };
-
-        std::string model_name;
-        std::string tokenizer_pre;
-
-        ml.get_key(LLM_KV_GENERAL_NAME, model_name, false);
-        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
-
-        // model name to lowercase
-        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
-            [] (const std::string::value_type x) {
-                return std::tolower(x);
-            }
-        );
-
-        // set attributes by model/tokenizer name
-        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
-            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
-        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
-            for (auto id : vocab.cache_special_tokens) {
-                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {""}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
-            }
-            for (auto token : {"", "", "<|endoftext|>"}) {
-                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
-            }
         }
     }
+
+    if (use_mmap_buffer) {
+        for (auto & mapping : ml.mappings) {
+            pimpl->mappings.emplace_back(std::move(mapping));
+        }
+    }
+
+    return true;
 }
 
-void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
-    const auto & hparams = model.hparams;
-    const auto & vocab   = model.vocab;
+std::string llama_model::arch_name() const {
+    return llm_arch_name(arch);
+}
 
+std::string llama_model::type_name() const {
+    return llm_type_name(type);
+}
+
+std::string llama_model::desc() const {
+    return pimpl->desc_str;
+}
+
+size_t llama_model::size() const {
+    return pimpl->n_bytes;
+}
+
+size_t llama_model::max_nodes() const {
+    return std::max(8192, tensors_by_name.size()*5);
+}
+
+size_t llama_model::n_devices() const {
+    return devices.size();
+}
+
+uint64_t llama_model::n_elements() const {
+    return pimpl->n_elements;
+}
+
+void llama_model::print_info() const {
     const char * rope_scaling_type = LLAMA_ROPE_SCALING_TYPES.at(hparams.rope_scaling_type_train);
 
     auto print_f = [](const std::function & f, uint32_t n) {
@@ -1873,11 +3550,7 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
     };
 
     // hparams
-    LLAMA_LOG_INFO("%s: format           = %s\n",     __func__, llama_file_version_name(ml.fver));
-    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, llm_arch_name(model.arch));
-    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, llama_model_vocab_type_name(vocab.type));
-    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, hparams.n_vocab);
-    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (int) vocab.bpe_ranks.size());
+    LLAMA_LOG_INFO("%s: arch             = %s\n",     __func__, arch_name().c_str());
     LLAMA_LOG_INFO("%s: vocab_only       = %d\n",     __func__, hparams.vocab_only);
 
     if (!hparams.vocab_only) {
@@ -1916,60 +3589,28 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
         LLAMA_LOG_INFO("%s: ssm_dt_b_c_rms   = %d\n",     __func__, hparams.ssm_dt_b_c_rms);
     }
 
-    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, llama_model_type_name(model).c_str());
-    LLAMA_LOG_INFO("%s: model ftype      = %s\n",     __func__, llama_model_ftype_name(model).c_str());
-    if (ml.n_elements >= 1e12) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, ml.n_elements*1e-12);
-    } else if (ml.n_elements >= 1e9) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, ml.n_elements*1e-9);
-    } else if (ml.n_elements >= 1e6) {
-        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, ml.n_elements*1e-6);
+    LLAMA_LOG_INFO("%s: model type       = %s\n",     __func__, type_name().c_str());
+    if (pimpl->n_elements >= 1e12) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f T\n", __func__, pimpl->n_elements*1e-12);
+    } else if (pimpl->n_elements >= 1e9) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f B\n", __func__, pimpl->n_elements*1e-9);
+    } else if (pimpl->n_elements >= 1e6) {
+        LLAMA_LOG_INFO("%s: model params     = %.2f M\n", __func__, pimpl->n_elements*1e-6);
     } else {
-        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, ml.n_elements*1e-3);
-    }
-    if (ml.n_bytes < GiB) {
-        LLAMA_LOG_INFO("%s: model size       = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0,        ml.n_bytes*8.0/ml.n_elements);
-    } else {
-        LLAMA_LOG_INFO("%s: model size       = %.2f GiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
+        LLAMA_LOG_INFO("%s: model params     = %.2f K\n", __func__, pimpl->n_elements*1e-3);
     }
 
     // general kv
-    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, model.name.c_str());
+    LLAMA_LOG_INFO("%s: general.name     = %s\n",    __func__, name.c_str());
 
-    // special tokens
-    if (vocab.special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, vocab.special_bos_id,     vocab.id_to_token[vocab.special_bos_id].text.c_str() );  }
-    if (vocab.special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, vocab.special_eos_id,     vocab.id_to_token[vocab.special_eos_id].text.c_str() );  }
-    if (vocab.special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, vocab.special_eot_id,     vocab.id_to_token[vocab.special_eot_id].text.c_str() );  }
-    if (vocab.special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, vocab.special_eom_id,     vocab.id_to_token[vocab.special_eom_id].text.c_str() );  }
-    if (vocab.special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, vocab.special_unk_id,     vocab.id_to_token[vocab.special_unk_id].text.c_str() );  }
-    if (vocab.special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, vocab.special_sep_id,     vocab.id_to_token[vocab.special_sep_id].text.c_str() );  }
-    if (vocab.special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, vocab.special_pad_id,     vocab.id_to_token[vocab.special_pad_id].text.c_str() );  }
-    if (vocab.special_cls_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, vocab.special_cls_id,     vocab.id_to_token[vocab.special_cls_id].text.c_str() );  }
-    if (vocab.special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, vocab.special_mask_id,    vocab.id_to_token[vocab.special_mask_id].text.c_str() ); }
-
-    if (vocab.linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, vocab.linefeed_id,        vocab.id_to_token[vocab.linefeed_id].text.c_str() ); }
-
-    if (vocab.special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, vocab.special_fim_pre_id, vocab.id_to_token[vocab.special_fim_pre_id].text.c_str() ); }
-    if (vocab.special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, vocab.special_fim_suf_id, vocab.id_to_token[vocab.special_fim_suf_id].text.c_str() ); }
-    if (vocab.special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, vocab.special_fim_mid_id, vocab.id_to_token[vocab.special_fim_mid_id].text.c_str() ); }
-    if (vocab.special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, vocab.special_fim_pad_id, vocab.id_to_token[vocab.special_fim_pad_id].text.c_str() ); }
-    if (vocab.special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, vocab.special_fim_rep_id, vocab.id_to_token[vocab.special_fim_rep_id].text.c_str() ); }
-    if (vocab.special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, vocab.special_fim_sep_id, vocab.id_to_token[vocab.special_fim_sep_id].text.c_str() ); }
-
-    for (const auto & id : vocab.special_eog_ids) {
-        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, vocab.id_to_token[id].text.c_str() );
-    }
-
-    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, vocab.max_token_len);
-
-    if (model.arch == LLM_ARCH_DEEPSEEK) {
+    if (arch == LLM_ARCH_DEEPSEEK) {
         LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
         LLAMA_LOG_INFO("%s: n_ff_exp             = %d\n",     __func__, hparams.n_ff_exp);
         LLAMA_LOG_INFO("%s: n_expert_shared      = %d\n",     __func__, hparams.n_expert_shared);
         LLAMA_LOG_INFO("%s: expert_weights_scale = %.1f\n",   __func__, hparams.expert_weights_scale);
     }
 
-    if (model.arch == LLM_ARCH_DEEPSEEK2) {
+    if (arch == LLM_ARCH_DEEPSEEK2) {
         LLAMA_LOG_INFO("%s: n_layer_dense_lead   = %d\n",     __func__, hparams.n_layer_dense_lead);
         LLAMA_LOG_INFO("%s: n_lora_q             = %d\n",     __func__, hparams.n_lora_q);
         LLAMA_LOG_INFO("%s: n_lora_kv            = %d\n",     __func__, hparams.n_lora_kv);
@@ -1981,16 +3622,88 @@ void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
         LLAMA_LOG_INFO("%s: rope_yarn_log_mul    = %.4f\n",   __func__, hparams.rope_yarn_log_mul);
     }
 
-    if (model.arch == LLM_ARCH_QWEN2MOE) {
+    if (arch == LLM_ARCH_QWEN2MOE) {
         LLAMA_LOG_INFO("%s: n_ff_exp         = %d\n",     __func__, hparams.n_ff_exp);
         LLAMA_LOG_INFO("%s: n_ff_shexp       = %d\n",     __func__, hparams.n_ff_shexp);
     }
 
-    if (model.arch == LLM_ARCH_MINICPM || model.arch == LLM_ARCH_GRANITE || model.arch == LLM_ARCH_GRANITE_MOE) {
+    if (arch == LLM_ARCH_MINICPM || arch == LLM_ARCH_GRANITE || arch == LLM_ARCH_GRANITE_MOE) {
         LLAMA_LOG_INFO("%s: f_embedding_scale = %f\n", __func__, hparams.f_embedding_scale);
         LLAMA_LOG_INFO("%s: f_residual_scale  = %f\n", __func__, hparams.f_residual_scale);
         LLAMA_LOG_INFO("%s: f_attention_scale = %f\n", __func__, hparams.f_attention_scale);
     }
+
+    vocab.print_info();
+}
+
+ggml_backend_dev_t llama_model::dev_layer(int il) const {
+    return pimpl->dev_layer.at(il).dev;
+}
+
+ggml_backend_dev_t llama_model::dev_output() const {
+    return pimpl->dev_output.dev;
+}
+
+template
+static bool buft_supported(ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev, F & fn) {
+    ggml_init_params params = {
+        /*.mem_size   =*/ ggml_tensor_overhead()*8,
+        /*.mem_buffer =*/ NULL,
+        /*.no_alloc   =*/ true,
+    };
+
+    ggml_context_ptr ctx { ggml_init(params) };
+    if (!ctx) {
+        throw std::runtime_error(format("failed to create ggml context"));
+    }
+
+    ggml_backend_buffer_ptr buf { ggml_backend_buft_alloc_buffer(buft, 0) };
+    ggml_tensor * op_tensor = fn(ctx.get());
+    for (int i = 0; i < GGML_MAX_SRC; i++) {
+        if (op_tensor->src[i] != nullptr) {
+            assert(op_tensor->src[i]->buffer == nullptr);
+            op_tensor->src[i]->buffer = buf.get();
+        }
+    }
+
+    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
+
+    return op_supported;
+}
+
+template
+static ggml_backend_buffer_type_t select_buft(const buft_list_t & buft_list, const F & fn) {
+    for (const auto & cur : buft_list) {
+        ggml_backend_dev_t cur_dev = cur.first;
+        ggml_backend_buffer_type_t cur_buft = cur.second;
+        if (buft_supported(cur_buft, cur_dev, fn)) {
+            return cur_buft;
+        }
+    }
+
+    throw std::runtime_error(format("no suitable buffer type found"));
+}
+
+ggml_backend_buffer_type_t llama_model::select_buft(int il) const {
+    return ::select_buft(
+            *pimpl->dev_layer.at(il).buft_list,
+            [&](ggml_context * ctx) {
+                ggml_tensor * cur = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
+                ggml_tensor * layer_dir = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hparams.n_embd);
+                return ggml_add(ctx, cur, layer_dir);
+            });
+}
+
+const struct ggml_tensor * llama_model::get_tensor(const char * name) const {
+    auto it = std::find_if(tensors_by_name.begin(), tensors_by_name.end(),
+            [name](const std::pair & it) {
+                return it.first == name;
+            });
+    if (it == tensors_by_name.end()) {
+        return nullptr;
+    }
+
+    return it->second;
 }
 
 //
@@ -2022,6 +3735,10 @@ struct llama_model_params llama_model_default_params() {
     return result;
 }
 
+const struct llama_vocab * llama_model_get_vocab(const struct llama_model * model) {
+    return &model->vocab;
+}
+
 void llama_free_model(struct llama_model * model) {
     llama_model_free(model);
 }
@@ -2030,31 +3747,43 @@ void llama_model_free(struct llama_model * model) {
     delete model;
 }
 
-enum llama_vocab_type llama_vocab_type(const struct llama_model * model) {
-    return model->vocab.type;
-}
-
-int32_t llama_n_vocab(const struct llama_model * model) {
-    return model->hparams.n_vocab;
-}
-
-int32_t llama_n_ctx_train(const struct llama_model * model) {
+int32_t llama_model_n_ctx_train(const struct llama_model * model) {
     return model->hparams.n_ctx_train;
 }
 
-int32_t llama_n_embd(const struct llama_model * model) {
+int32_t llama_model_n_embd(const struct llama_model * model) {
     return model->hparams.n_embd;
 }
 
-int32_t llama_n_layer(const struct llama_model * model) {
+int32_t llama_model_n_layer(const struct llama_model * model) {
     return model->hparams.n_layer;
 }
 
-int32_t llama_n_head(const struct llama_model * model) {
+int32_t llama_model_n_head(const struct llama_model * model) {
     return model->hparams.n_head();
 }
 
-enum llama_rope_type llama_rope_type(const struct llama_model * model) {
+// deprecated
+int32_t llama_n_ctx_train(const struct llama_model * model) {
+    return llama_model_n_ctx_train(model);
+}
+
+// deprecated
+int32_t llama_n_embd(const struct llama_model * model) {
+    return llama_model_n_embd(model);
+}
+
+// deprecated
+int32_t llama_n_layer(const struct llama_model * model) {
+    return llama_model_n_layer(model);
+}
+
+// deprecated
+int32_t llama_n_head(const struct llama_model * model) {
+    return llama_model_n_head(model);
+}
+
+enum llama_rope_type llama_model_rope_type(const struct llama_model * model) {
     switch (model->arch) {
         // these models do not use RoPE
         case LLM_ARCH_GPT2:
@@ -2132,7 +3861,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
     return LLAMA_ROPE_TYPE_NONE;
 }
 
-float llama_rope_freq_scale_train(const struct llama_model * model) {
+float llama_model_rope_freq_scale_train(const struct llama_model * model) {
     return model->hparams.rope_freq_scale_train;
 }
 
@@ -2176,18 +3905,24 @@ int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int3
 }
 
 int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
-    return snprintf(buf, buf_size, "%s %s %s",
-            llama_model_arch_name (*model).c_str(),
-            llama_model_type_name (*model).c_str(),
-            llama_model_ftype_name(*model).c_str());
+    return snprintf(buf, buf_size, "%s", model->desc().c_str());
 }
 
 uint64_t llama_model_size(const struct llama_model * model) {
-    return model->n_bytes;
+    return model->size();
+}
+
+const char * llama_model_chat_template(const struct llama_model * model) {
+    const auto & it = model->gguf_kv.find(LLM_KV(model->arch)(LLM_KV_TOKENIZER_CHAT_TEMPLATE));
+    if (it == model->gguf_kv.end()) {
+        return nullptr;
+    }
+
+    return it->second.c_str();
 }
 
 uint64_t llama_model_n_params(const struct llama_model * model) {
-    return model->n_elements;
+    return model->n_elements();
 }
 
 bool llama_model_has_encoder(const struct llama_model * model) {
diff --git a/src/llama-model.h b/src/llama-model.h
index 565d2dbdf..4cc8abb75 100644
--- a/src/llama-model.h
+++ b/src/llama-model.h
@@ -4,79 +4,80 @@
 #include "llama-arch.h"
 #include "llama-hparams.h"
 #include "llama-vocab.h"
-#include "llama-mmap.h"
-
-#include "ggml-cpp.h"
 
+#include 
+#include 
+#include 
 #include 
 
+struct llama_model_loader;
+
 // available models
-// TODO: this enum does not follow the enum naming convention
 enum llm_type {
-    MODEL_UNKNOWN,
-    MODEL_14M,
-    MODEL_17M,
-    MODEL_22M,
-    MODEL_33M,
-    MODEL_60M,
-    MODEL_70M,
-    MODEL_80M,
-    MODEL_109M,
-    MODEL_137M,
-    MODEL_160M,
-    MODEL_220M,
-    MODEL_250M,
-    MODEL_270M,
-    MODEL_335M,
-    MODEL_410M,
-    MODEL_450M,
-    MODEL_770M,
-    MODEL_780M,
-    MODEL_0_5B,
-    MODEL_1B,
-    MODEL_1_3B,
-    MODEL_1_4B,
-    MODEL_1_5B,
-    MODEL_1_6B,
-    MODEL_2B,
-    MODEL_2_8B,
-    MODEL_3B,
-    MODEL_4B,
-    MODEL_6B,
-    MODEL_6_9B,
-    MODEL_7B,
-    MODEL_8B,
-    MODEL_9B,
-    MODEL_11B,
-    MODEL_12B,
-    MODEL_13B,
-    MODEL_14B,
-    MODEL_15B,
-    MODEL_16B,
-    MODEL_20B,
-    MODEL_30B,
-    MODEL_32B,
-    MODEL_34B,
-    MODEL_35B,
-    MODEL_40B,
-    MODEL_65B,
-    MODEL_70B,
-    MODEL_236B,
-    MODEL_314B,
-    MODEL_671B,
-    MODEL_SMALL,
-    MODEL_MEDIUM,
-    MODEL_LARGE,
-    MODEL_XL,
-    MODEL_A1_7B,
-    MODEL_A2_7B,
-    MODEL_8x7B,
-    MODEL_8x22B,
-    MODEL_16x12B,
-    MODEL_16x3_8B,
-    MODEL_10B_128x3_66B,
-    MODEL_57B_A14B,
-    MODEL_27B,
+    LLM_TYPE_UNKNOWN,
+    LLM_TYPE_14M,
+    LLM_TYPE_17M,
+    LLM_TYPE_22M,
+    LLM_TYPE_33M,
+    LLM_TYPE_60M,
+    LLM_TYPE_70M,
+    LLM_TYPE_80M,
+    LLM_TYPE_109M,
+    LLM_TYPE_137M,
+    LLM_TYPE_160M,
+    LLM_TYPE_220M,
+    LLM_TYPE_250M,
+    LLM_TYPE_270M,
+    LLM_TYPE_335M,
+    LLM_TYPE_410M,
+    LLM_TYPE_450M,
+    LLM_TYPE_770M,
+    LLM_TYPE_780M,
+    LLM_TYPE_0_5B,
+    LLM_TYPE_1B,
+    LLM_TYPE_1_3B,
+    LLM_TYPE_1_4B,
+    LLM_TYPE_1_5B,
+    LLM_TYPE_1_6B,
+    LLM_TYPE_2B,
+    LLM_TYPE_2_8B,
+    LLM_TYPE_3B,
+    LLM_TYPE_4B,
+    LLM_TYPE_6B,
+    LLM_TYPE_6_9B,
+    LLM_TYPE_7B,
+    LLM_TYPE_8B,
+    LLM_TYPE_9B,
+    LLM_TYPE_11B,
+    LLM_TYPE_12B,
+    LLM_TYPE_13B,
+    LLM_TYPE_14B,
+    LLM_TYPE_15B,
+    LLM_TYPE_16B,
+    LLM_TYPE_20B,
+    LLM_TYPE_30B,
+    LLM_TYPE_32B,
+    LLM_TYPE_34B,
+    LLM_TYPE_35B,
+    LLM_TYPE_40B,
+    LLM_TYPE_65B,
+    LLM_TYPE_70B,
+    LLM_TYPE_236B,
+    LLM_TYPE_314B,
+    LLM_TYPE_671B,
+    LLM_TYPE_SMALL,
+    LLM_TYPE_MEDIUM,
+    LLM_TYPE_LARGE,
+    LLM_TYPE_XL,
+    LLM_TYPE_A1_7B,
+    LLM_TYPE_A2_7B,
+    LLM_TYPE_8x7B,
+    LLM_TYPE_8x22B,
+    LLM_TYPE_16x12B,
+    LLM_TYPE_16x3_8B,
+    LLM_TYPE_10B_128x3_66B,
+    LLM_TYPE_57B_A14B,
+    LLM_TYPE_27B,
 };
 
 struct llama_layer_posnet {
@@ -286,11 +287,9 @@ struct llama_layer {
 };
 
 struct llama_model {
-    llm_type type = MODEL_UNKNOWN;
+    llm_type type = LLM_TYPE_UNKNOWN;
     llm_arch arch = LLM_ARCH_UNKNOWN;
 
-    llama_ftype ftype = LLAMA_FTYPE_ALL_F32;
-
     std::string name = "n/a";
 
     llama_hparams hparams = {};
@@ -319,78 +318,55 @@ struct llama_model {
 
     std::vector layers;
 
+    llama_model_params params;
+
     // gguf metadata
     std::unordered_map gguf_kv;
 
-    llama_split_mode split_mode;
-    int main_gpu;
-    int n_gpu_layers;
-
     std::vector rpc_servers;
 
     // list of devices used in this model
     std::vector devices;
 
-
-    // lists of buffer types used for each layer
-    using buft_list_t = std::vector>;
-    buft_list_t cpu_buft_list;
-    std::map gpu_buft_list;
-
-    struct layer_dev {
-        ggml_backend_dev_t dev;
-        buft_list_t * buft_list;
-    };
-
-    layer_dev dev_input = {};
-    layer_dev dev_output = {};
-    std::vector dev_layer;
-
-    // contexts where the model tensors metadata is stored
-    std::vector ctxs;
-
-    // the model memory buffers for the tensor data
-    std::vector bufs;
-
-    // model memory mapped files
-    llama_mmaps mappings;
-
-    // objects representing data potentially being locked in memory
-    llama_mlocks mlock_bufs;
-    llama_mlocks mlock_mmaps;
-
     // for quantize-stats only
     std::vector> tensors_by_name;
 
     int64_t t_load_us  = 0;
     int64_t t_start_us = 0;
 
-    // total number of parameters in the model
-    uint64_t n_elements = 0;
+    explicit llama_model(const struct llama_model_params & params);
+    ~llama_model();
 
-    // total size of all the tensors in the model in bytes
-    size_t  n_bytes     = 0;
+    void load_stats  (llama_model_loader & ml);
+    void load_arch   (llama_model_loader & ml);
+    void load_hparams(llama_model_loader & ml);
+    void load_vocab  (llama_model_loader & ml);
+    bool load_tensors(llama_model_loader & ml); // returns false if cancelled by progress_callback
+
+    std::string arch_name() const;
+    std::string type_name() const;
+
+    std::string desc() const;
+
+    size_t size() const;
+    size_t max_nodes() const;
+    size_t n_devices() const;
+
+    // total number of parameters in the model
+    uint64_t n_elements() const;
+
+    void print_info() const;
+
+    ggml_backend_dev_t dev_layer(int il) const;
+    ggml_backend_dev_t dev_output() const;
+
+    ggml_backend_buffer_type_t select_buft(int il) const;
+
+    const struct ggml_tensor * get_tensor(const char * name) const;
+
+private:
+    struct impl;
+    std::unique_ptr pimpl;
 };
 
 const char * llm_type_name(llm_type type);
-
-std::string llama_model_arch_name (const llama_model & model);
-std::string llama_model_type_name (const llama_model & model);
-std::string llama_model_ftype_name(const llama_model & model);
-
-// used by llama_adapter_cvec
-ggml_backend_buffer_type_t llama_model_select_buft(const llama_model & model, int il);
-
-// used by llama_adapter_lora
-struct ggml_tensor * llama_model_get_tensor(const struct llama_model & model, const char * name);
-
-size_t llama_model_max_nodes(const llama_model & model);
-
-struct llama_model_loader;
-
-// TODO: become llama_model methods
-void llm_load_stats     (llama_model_loader & ml, llama_model & model);
-void llm_load_arch      (llama_model_loader & ml, llama_model & model);
-void llm_load_hparams   (llama_model_loader & ml, llama_model & model);
-void llm_load_vocab     (llama_model_loader & ml, llama_model & model);
-void llm_load_print_meta(llama_model_loader & ml, llama_model & model);
diff --git a/src/llama-quant.cpp b/src/llama-quant.cpp
index a45044f30..d4947a780 100644
--- a/src/llama-quant.cpp
+++ b/src/llama-quant.cpp
@@ -235,7 +235,7 @@ static ggml_type llama_tensor_get_type(quantize_state_impl & qs, ggml_type new_t
         else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
                 use_more_bits(qs.i_attention_wv, qs.n_attention_wv)) new_type = GGML_TYPE_Q6_K;
         else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && qs.i_attention_wv < 4) new_type = GGML_TYPE_Q5_K;
-        if (qs.model.type == MODEL_70B) {
+        if (qs.model.type == LLM_TYPE_70B) {
             // In the 70B model we have 8 heads sharing the same attn_v weights. As a result, the attn_v.weight tensor is
             // 8x smaller compared to attn_q.weight. Hence, we can get a nice boost in quantization accuracy with
             // nearly negligible increase in model size by quantizing this tensor with more bits:
@@ -525,18 +525,20 @@ static void llama_model_quantize_impl(const std::string & fname_inp, const std::
         auto v = (std::vector*)params->kv_overrides;
         kv_overrides = v->data();
     }
+
     llama_model_loader ml(fname_inp, use_mmap, /*check_tensors*/ true, kv_overrides);
     ml.init_mappings(false); // no prefetching
 
-    llama_model model;
-    llm_load_arch   (ml, model);
-    llm_load_hparams(ml, model);
-    llm_load_stats  (ml, model);
+    llama_model model(llama_model_default_params());
+
+    model.load_arch   (ml);
+    model.load_hparams(ml);
+    model.load_stats  (ml);
 
     struct quantize_state_impl qs(model, params);
 
     if (params->only_copy) {
-        ftype = model.ftype;
+        ftype = ml.ftype;
     }
     const std::unordered_map> * imatrix_data = nullptr;
     if (params->imatrix) {
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index ef5a576cc..b3a12386e 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -371,7 +371,10 @@ void llama_sampler_free(struct llama_sampler * smpl) {
 llama_token llama_sampler_sample(struct llama_sampler * smpl, struct llama_context * ctx, int32_t idx) {
     const auto * logits = llama_get_logits_ith(ctx, idx);
 
-    const int n_vocab = llama_n_vocab(llama_get_model(ctx));
+    const llama_model * model = llama_get_model(ctx);
+    const llama_vocab * vocab = llama_model_get_vocab(model);
+
+    const int n_vocab = llama_vocab_n_tokens(vocab);
 
     // TODO: do not allocate each time
     std::vector cur;
@@ -1445,7 +1448,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) {
 static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) {
     const auto * ctx = (const llama_sampler_grammar *) smpl->ctx;
 
-    auto * result = llama_sampler_init_grammar_impl(*ctx->vocab, nullptr, nullptr);
+    auto * result = llama_sampler_init_grammar(ctx->vocab, nullptr, nullptr);
 
     // copy the state
     {
@@ -1481,19 +1484,19 @@ static struct llama_sampler_i llama_sampler_grammar_i = {
     /* .free   = */ llama_sampler_grammar_free,
 };
 
-struct llama_sampler * llama_sampler_init_grammar_impl(const struct llama_vocab & vocab, const char * grammar_str, const char * grammar_root) {
+struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) {
     auto * ctx = new llama_sampler_grammar;
 
     if (grammar_str != nullptr && grammar_str[0] != '\0') {
         *ctx = {
-            /* .vocab        = */ &vocab,
+            /* .vocab        = */ vocab,
             /* .grammar_str  = */ grammar_str,
             /* .grammar_root = */ grammar_root,
-            /* .grammar      = */ llama_grammar_init_impl(&vocab, grammar_str, grammar_root),
+            /* .grammar      = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root),
         };
     } else {
         *ctx = {
-            /* .vocab        = */ &vocab,
+            /* .vocab        = */ vocab,
             /* .grammar_str  = */ {},
             /* .grammar_root = */ {},
             /* .grammar      = */ nullptr,
@@ -1663,8 +1666,8 @@ struct llama_sampler_dry {
 
 // Ported from Koboldcpp, original PR: https://github.com/LostRuins/koboldcpp/pull/982 (Original author: pi6am)
 static void get_overlapping_token_sequences(const llama_vocab & vocab, const std::string& str, std::unordered_multimap>& token_sequences, int max_tail_len = -1) {
-    for (llama_token token_id = 0; token_id < (llama_token)vocab.n_vocab; token_id++) {
-        std::string word = llama_detokenize(vocab, {token_id}, true);
+    for (llama_token token_id = 0; token_id < (llama_token) vocab.n_tokens(); token_id++) {
+        std::string word = vocab.detokenize({token_id}, true);
         if (word.find(str) != std::string::npos) {
             token_sequences.emplace(token_id, std::vector());
         } else {
@@ -1681,7 +1684,7 @@ static void get_overlapping_token_sequences(const llama_vocab & vocab, const std
                     }
                 }
                 if (match) {
-                    std::vector tokenization = llama_tokenize_internal(vocab, str.substr(i), false, false);
+                    std::vector tokenization = vocab.tokenize(str.substr(i), false, false);
                     if (max_tail_len >= 0 && tokenization.size() > (size_t)max_tail_len) {
                         tokenization.resize(max_tail_len);
                     }
@@ -1937,7 +1940,7 @@ static struct llama_sampler * llama_sampler_dry_clone(const struct llama_sampler
     llama_vocab dummy_vocab;
 
     // dummy vocab is passed because it is only needed for raw sequence breaker processing, which we have already done and will simply be copying
-    auto * result = llama_sampler_init_dry_impl(dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
+    auto * result = llama_sampler_init_dry(&dummy_vocab, ctx->total_context_size, ctx->dry_multiplier, ctx->dry_base, ctx->dry_allowed_length, ctx->dry_penalty_last_n, NULL, 0);
 
     // Copy the state, including the processed breakers
     {
@@ -1964,7 +1967,7 @@ static struct llama_sampler_i llama_sampler_dry_i = {
     /* .free   = */ llama_sampler_dry_free,
 };
 
-struct llama_sampler * llama_sampler_init_dry_impl(const struct llama_vocab & vocab, int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) {
+struct llama_sampler * llama_sampler_init_dry(const struct llama_vocab * vocab, int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) {
     int32_t effective_dry_penalty_last_n = (dry_penalty_last_n == -1) ? context_size : std::max(dry_penalty_last_n, 0);
     std::unordered_multimap> processed_breakers;
     const int MAX_CHAR_LEN = 40;
@@ -1991,7 +1994,7 @@ struct llama_sampler * llama_sampler_init_dry_impl(const struct llama_vocab & vo
                 sequence_break.resize(MAX_CHAR_LEN);
             }
 
-            get_overlapping_token_sequences(vocab, sequence_break, processed_breakers, MAX_SEQ_LEN);
+            get_overlapping_token_sequences(*vocab, sequence_break, processed_breakers, MAX_SEQ_LEN);
         }
     }
 
@@ -2014,7 +2017,7 @@ struct llama_sampler * llama_sampler_init_dry_impl(const struct llama_vocab & vo
 // wrapper for test-sampling.cpp
 struct llama_sampler * llama_sampler_init_dry_testing(int32_t context_size, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const std::vector>& seq_breakers) {
     llama_vocab dummy_vocab;
-    auto * result = llama_sampler_init_dry_impl(dummy_vocab, context_size, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, NULL, 0);
+    auto * result = llama_sampler_init_dry(&dummy_vocab, context_size, dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, NULL, 0);
     auto * ctx = (llama_sampler_dry *) result->ctx;
 
     // Process the token-based sequence breakers
@@ -2153,7 +2156,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
     float p_eog_sum = 0.0f;
 
     for (size_t i = 0; i < cur_p->size; ++i) {
-        if (llama_token_is_eog_impl(*ctx->vocab, cur_p->data[i].id)) {
+        if (ctx->vocab->is_eog(cur_p->data[i].id)) {
             p_eog_sum += cur_p->data[i].p;
         } else {
             p_txt_sum += cur_p->data[i].p;
@@ -2175,7 +2178,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
         float p_sum = 0.0f;
 
         for (size_t i = 0; i < size_org; ++i) {
-            if (llama_token_is_eog_impl(*ctx->vocab, cur_p->data[i].id)) {
+            if (ctx->vocab->is_eog(cur_p->data[i].id)) {
                 p_sum += cur_p->data[i].p;
 
                 cur_p->data[cur_p->size++] = cur_p->data[i];
@@ -2203,17 +2206,17 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
                 continue;
             }
 
-            int len0 = llama_token_to_piece_impl(*ctx->vocab, cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
+            int len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
             if (len0 < 0) {
                 ctx->buf0.resize(len0);
-                len0 = llama_token_to_piece_impl(*ctx->vocab, cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
+                len0 = ctx->vocab->token_to_piece(cur_p->data[i0].id, ctx->buf0.data(), ctx->buf0.size(), 0, false);
                 assert(len0 > 0);
             }
 
-            int len1 = llama_token_to_piece_impl(*ctx->vocab, cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
+            int len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
             if (len1 < 0) {
                 ctx->buf1.resize(len1);
-                len1 = llama_token_to_piece_impl(*ctx->vocab, cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
+                len1 = ctx->vocab->token_to_piece(cur_p->data[i1].id, ctx->buf1.data(), ctx->buf1.size(), 0, false);
                 assert(len1 > 0);
             }
 
@@ -2248,7 +2251,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
     LOG_DBG_CUR("%s: n_combined = %zu, applying thold = %.3f\n", __func__, n_combined, thold);
 
     for (size_t i = 0; i < size_org; ++i) {
-        const bool is_eog = llama_token_is_eog_impl(*ctx->vocab, cur_p->data[i].id);
+        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
 
         if (cur_p->data[i].p < thold && !is_eog) {
             continue;
@@ -2269,7 +2272,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
     // if no non-EOG tokens are left -> reduce cur_p to single EOT token
     if (n_non_eog == 0) {
         cur_p->size = 1;
-        cur_p->data[0].id = llama_token_eot_impl(*ctx->vocab);
+        cur_p->data[0].id = ctx->vocab->token_eot();
         cur_p->data[0].logit = 1.0f;
 
         return;
@@ -2291,7 +2294,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
     LOG_DBG_CUR("%s: applying thold = %.3f\n", __func__, thold);
 
     for (size_t i = 0; i < size_org; ++i) {
-        const bool is_eog = llama_token_is_eog_impl(*ctx->vocab, cur_p->data[i].id);
+        const bool is_eog = ctx->vocab->is_eog(cur_p->data[i].id);
 
         if (cur_p->data[i].p < thold && !is_eog) {
             continue;
@@ -2314,7 +2317,7 @@ static void llama_sampler_infill_apply(struct llama_sampler * smpl, llama_token_
 
 static struct llama_sampler * llama_sampler_infill_clone(const struct llama_sampler * smpl) {
     const auto * ctx = (const llama_sampler_infill *) smpl->ctx;
-    return llama_sampler_init_infill_impl(*ctx->vocab);
+    return llama_sampler_init_infill(ctx->vocab);
 }
 
 static void llama_sampler_infill_free(struct llama_sampler * smpl) {
@@ -2330,14 +2333,13 @@ static struct llama_sampler_i llama_sampler_infill_i = {
     /* .free   = */ llama_sampler_infill_free,
 };
 
-struct llama_sampler * llama_sampler_init_infill_impl(
-        const struct llama_vocab & vocab) {
+struct llama_sampler * llama_sampler_init_infill(const struct llama_vocab * vocab) {
     return new llama_sampler {
         /* .iface = */ &llama_sampler_infill_i,
         /* .ctx   = */ new llama_sampler_infill {
-            /* .vocab = */ &vocab,
-            /* .buf0 = */ std::vector(512),
-            /* .buf1 = */ std::vector(512),
+            /* .vocab = */ vocab,
+            /* .buf0  = */ std::vector(512),
+            /* .buf1  = */ std::vector(512),
         },
     };
 }
diff --git a/src/llama-sampling.h b/src/llama-sampling.h
index 919f6fdfc..759dd7dcb 100644
--- a/src/llama-sampling.h
+++ b/src/llama-sampling.h
@@ -2,7 +2,9 @@
 
 // TODO: rename llama-sampling.h/.cpp to llama-sampler.h/.cpp ?
 
-#include "llama-grammar.h"
+#include "llama.h"
+
+#include 
 
 struct llama_vocab;
 struct llama_grammar;
@@ -21,24 +23,6 @@ struct llama_sampler_chain {
     mutable int32_t n_sample;
 };
 
-struct llama_sampler * llama_sampler_init_grammar_impl(
-        const struct llama_vocab & vocab,
-                      const char * grammar_str,
-                      const char * grammar_root);
-
-struct llama_sampler * llama_sampler_init_infill_impl(
-        const struct llama_vocab & vocab);
-
-struct llama_sampler * llama_sampler_init_dry_impl(
-        const struct llama_vocab &  vocab,
-                         int32_t    context_size,
-                           float    dry_multiplier,
-                           float    dry_base,
-                         int32_t    dry_allowed_length,
-                         int32_t    dry_penalty_last_n,
-                      const char ** seq_breakers,
-                          size_t    num_breakers);
-
 struct llama_sampler * llama_sampler_init_dry_testing(
                          int32_t   context_size,
                            float   dry_multiplier,
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index a4c015484..ed8751737 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1,6 +1,7 @@
 #include "llama-vocab.h"
 
 #include "llama-impl.h"
+#include "llama-model-loader.h"
 
 #include "unicode.h"
 
@@ -11,8 +12,10 @@
 #include 
 #include 
 #include 
+#include 
 #include 
-#include 
+#include 
+#include 
 
 //
 // helpers
@@ -62,96 +65,14 @@ struct naive_trie {
 };
 
 //
-// impl
+// tokenizers
 //
 
 struct llm_tokenizer {
-   llm_tokenizer() {}
-   virtual ~llm_tokenizer() = default;
+    llm_tokenizer() {}
+    virtual ~llm_tokenizer() = default;
 };
 
-llama_vocab::~llama_vocab() {
-    delete tokenizer;
-}
-
-int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
-    GGML_ASSERT(token_left.find(' ')   == std::string::npos);
-    GGML_ASSERT(token_left.find('\n')  == std::string::npos);
-    GGML_ASSERT(token_right.find(' ')  == std::string::npos);
-    GGML_ASSERT(token_right.find('\n') == std::string::npos);
-
-    auto it = bpe_ranks.find(std::make_pair(token_left, token_right));
-    if (it == bpe_ranks.end()) {
-        return -1;
-    }
-
-    return it->second;
-}
-
-static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) {
-    return vocab.type;
-}
-
-static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
-}
-
-static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
-}
-
-static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
-}
-
-static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
-}
-
-static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
-}
-
-static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNUSED;
-}
-
-static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) {
-    GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
-    GGML_ASSERT(llama_is_byte_token(vocab, id));
-    const auto & token_data = vocab.id_to_token.at(id);
-    switch (llama_vocab_get_type(vocab)) {
-        case LLAMA_VOCAB_TYPE_SPM:
-        case LLAMA_VOCAB_TYPE_UGM: {
-            auto buf = token_data.text.substr(3, 2);
-            return strtol(buf.c_str(), NULL, 16);
-        }
-        case LLAMA_VOCAB_TYPE_BPE: {
-            GGML_ABORT("fatal error");
-            //return unicode_utf8_to_byte(token_data.text); // TODO: why is this here after GGML_ASSERT?
-        }
-        case LLAMA_VOCAB_TYPE_WPM: {
-            GGML_ABORT("fatal error");
-        }
-        default:
-            GGML_ABORT("fatal error");
-    }
-}
-
-static void llama_escape_whitespace(std::string & text) {
-    replace_all(text, " ", "\xe2\x96\x81");
-}
-
-static void llama_unescape_whitespace(std::string & word) {
-    replace_all(word, "\xe2\x96\x81", " ");
-}
-
 struct llm_symbol {
     using index = int;
     index prev;
@@ -183,14 +104,13 @@ struct llm_bigram_spm {
 };
 
 struct llm_tokenizer_spm : llm_tokenizer {
-    llm_tokenizer_spm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
+    llm_tokenizer_spm(const llama_vocab & /*vocab*/) {}
 };
 
 struct llm_tokenizer_spm_session {
     llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab) {}
 
-    void tokenize(const std::string & text, std::vector & output) {
-
+    void tokenize(const std::string & text, std::vector & output) {
         // split string into utf8 chars
         int index = 0;
         size_t offs = 0;
@@ -249,13 +169,13 @@ struct llm_tokenizer_spm_session {
     }
 
 private:
-    void resegment(llm_symbol & symbol, std::vector & output) {
+    void resegment(llm_symbol & symbol, std::vector & output) {
         auto text = std::string(symbol.text, symbol.n);
-        auto token = vocab.token_to_id.find(text);
+        auto token = vocab.text_to_token(text);
 
         // Do we need to support is_unused?
-        if (token != vocab.token_to_id.end()) {
-            output.push_back((*token).second);
+        if (token != LLAMA_TOKEN_NULL) {
+            output.push_back(token);
             return;
         }
 
@@ -265,8 +185,8 @@ private:
             // output any symbols that did not form tokens as bytes.
             output.reserve(output.size() + symbol.n);
             for (int j = 0; j < (int)symbol.n; ++j) {
-                llama_vocab::id token_id = llama_byte_to_token_impl(vocab, symbol.text[j]);
-                output.push_back(token_id);
+                llama_token id = vocab.byte_to_token(symbol.text[j]);
+                output.push_back(id);
             }
             return;
         }
@@ -280,17 +200,17 @@ private:
             return;
         }
         const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
-        auto token = vocab.token_to_id.find(text);
+        auto token = vocab.text_to_token(text);
 
-        if (token == vocab.token_to_id.end()) {
+        if (token == LLAMA_TOKEN_NULL) {
             return;
         }
 
-        if (static_cast((*token).second) >= vocab.id_to_token.size()) {
+        if (static_cast(token) >= vocab.n_tokens()) {
             return;
         }
 
-        const auto & tok_data = vocab.id_to_token[(*token).second];
+        const auto & tok_data = vocab.get_token_data(token);
 
         llm_bigram_spm bigram;
         bigram.left  = left;
@@ -353,9 +273,9 @@ struct llm_bigram_bpe {
 };
 
 struct llm_tokenizer_bpe : llm_tokenizer {
-    llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer() {
-        GGML_ASSERT(vocab.type == LLAMA_VOCAB_TYPE_BPE);
-        switch (vocab.type_pre) {
+    llm_tokenizer_bpe(const llama_vocab & vocab) {
+        GGML_ASSERT(vocab.get_type() == LLAMA_VOCAB_TYPE_BPE);
+        switch (vocab.get_pre_type()) {
             case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
                 regex_exprs = {
                     // original regex from tokenizer.json
@@ -488,39 +408,38 @@ struct llm_tokenizer_bpe : llm_tokenizer {
 };
 
 struct llm_tokenizer_bpe_session {
-    llm_tokenizer_bpe_session(const llama_vocab & vocab) : vocab(vocab),
-        bpe_tokenizer(static_cast(vocab.tokenizer)) {}
+    llm_tokenizer_bpe_session(const llama_vocab & vocab, const llm_tokenizer_bpe & tokenizer) : vocab(vocab), tokenizer(tokenizer) {}
 
-    static void append(const llama_vocab::id token_id, std::vector & output)  {
+    static void append(const llama_token token_id, std::vector & output)  {
         output.push_back(token_id);
     }
 
-    bool append_bos(std::vector & output) const {
-        if (vocab.tokenizer_add_bos) {
-            GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
-            output.push_back(vocab.special_bos_id);
+    bool append_bos(std::vector & output) const {
+        if (vocab.get_add_bos()) {
+            GGML_ASSERT(vocab.token_bos() != LLAMA_TOKEN_NULL);
+            output.push_back(vocab.token_bos());
             return true;
         }
         return false;
     }
 
-    bool append_eos(std::vector & output) const {
-        if (vocab.tokenizer_add_eos) {
-            GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
-            output.push_back(vocab.special_eos_id);
+    bool append_eos(std::vector & output) const {
+        if (vocab.get_add_eos()) {
+            GGML_ASSERT(vocab.token_eos() != LLAMA_TOKEN_NULL);
+            output.push_back(vocab.token_eos());
             return true;
         }
         return false;
     }
 
-    void check_double_bos_eos(const std::vector & output) const {
-        if (vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
+    void check_double_bos_eos(const std::vector & output) const {
+        if (vocab.get_add_bos() && output.size() >= 2 && output[1] == vocab.token_bos()) {
             LLAMA_LOG_WARN(
                 "%s: Added a BOS token to the prompt as specified by the model but the prompt "
                 "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
                 "Are you sure this is what you want?\n", __FUNCTION__);
         }
-        if (vocab.tokenizer_add_eos && output.size() >= 2 && *(output.end()-2) == vocab.special_eos_id) {
+        if (vocab.get_add_bos() && output.size() >= 2 && *(output.end()-2) == vocab.token_eos()) {
             LLAMA_LOG_WARN(
                 "%s: Added a EOS token to the prompt as specified by the model but the prompt "
                 "also ends with a EOS token. So now the final prompt ends with 2 EOS tokens. "
@@ -528,9 +447,9 @@ struct llm_tokenizer_bpe_session {
         }
     }
 
-    void tokenize(const std::string & text, std::vector & output) {
+    void tokenize(const std::string & text, std::vector & output) {
         int final_prev_index = -1;
-        const auto word_collection = unicode_regex_split(text, bpe_tokenizer->regex_exprs);
+        const auto word_collection = unicode_regex_split(text, tokenizer.regex_exprs);
 
         symbols_final.clear();
 
@@ -541,7 +460,8 @@ struct llm_tokenizer_bpe_session {
             int index = 0;
             size_t offset = 0;
 
-            if (vocab.tokenizer_ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
+            //if (vocab.tokenizer_ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
+            if (vocab.get_ignore_merges() && vocab.text_to_token(word) != LLAMA_TOKEN_NULL) {
                 symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
                 offset = word.size();
             }
@@ -615,18 +535,18 @@ struct llm_tokenizer_bpe_session {
                 }
 
                 const std::string str = std::string(symbol.text, symbol.n);
-                const auto token = vocab.token_to_id.find(str);
+                const auto token = vocab.text_to_token(str);
 
-                if (token == vocab.token_to_id.end()) {
+                if (token == LLAMA_TOKEN_NULL) {
                     for (auto j = str.begin(); j != str.end(); ++j) {
                         std::string byte_str(1, *j);
-                        auto token_multibyte = vocab.token_to_id.find(byte_str);
-                        if (token_multibyte != vocab.token_to_id.end()) {
-                            output.push_back(token_multibyte->second);
+                        auto token_multibyte = vocab.text_to_token(byte_str);
+                        if (token_multibyte != LLAMA_TOKEN_NULL) {
+                            output.push_back(token_multibyte);
                         }
                     }
                 } else {
-                    output.push_back((*token).second);
+                    output.push_back(token);
                 }
             }
         }
@@ -660,7 +580,7 @@ private:
     }
 
     const llama_vocab & vocab;
-    const llm_tokenizer_bpe * bpe_tokenizer;
+    const llm_tokenizer_bpe & tokenizer;
 
     std::vector symbols;
     std::vector symbols_final;
@@ -672,14 +592,13 @@ private:
 //
 
 struct llm_tokenizer_wpm : llm_tokenizer {
-    llm_tokenizer_wpm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
+    llm_tokenizer_wpm(const llama_vocab & /*vocab*/) {}
 };
 
 struct llm_tokenizer_wpm_session {
     llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab) {}
 
-    void tokenize(const std::string & text, std::vector & output) {
-        const auto & token_map = vocab.token_to_id;
+    void tokenize(const std::string & text, std::vector & output) {
         // normalize and split by whitespace
         std::vector words = preprocess(text);
         // bos token prepended already
@@ -702,10 +621,10 @@ struct llm_tokenizer_wpm_session {
             for (int i = 0; i < n; ++i) {
                 // loop through possible match length
                 bool match = false;
-                for (int j = std::min(n, i + vocab.max_token_len + 1); j > i; j--) {
-                    auto it = token_map.find(word1.substr(i, j - i));
-                    if (it != token_map.end()) {
-                        output.push_back(it->second);
+                for (int j = std::min(n, i + vocab.max_token_len() + 1); j > i; j--) {
+                    auto id = vocab.text_to_token(word1.substr(i, j - i));
+                    if (id != LLAMA_TOKEN_NULL) {
+                        output.push_back(id);
                         match = true;
                         i = j - 1;
                         break;
@@ -720,7 +639,7 @@ struct llm_tokenizer_wpm_session {
 
             // we didn't find any matches for this word
             if (current_tokens == output.size()) {
-                output.push_back(vocab.special_unk_id);
+                output.push_back(vocab.token_unk());
             }
         }
     }
@@ -789,45 +708,45 @@ private:
 //
 
 struct llm_tokenizer_ugm : llm_tokenizer {
-    llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer() {
-        if (vocab.precompiled_charsmap.size() > 0) {
+    llm_tokenizer_ugm(const llama_vocab & vocab, const std::vector & precompiled_charsmap) {
+        if (precompiled_charsmap.size() > 0) {
             size_t charsmap_offset = 0;
 
             // First four bytes of precompiled_charsmap contains length of binary
             // blob containing XOR-compressed compact double array (XCDA) entries
-            uint32_t xcda_blob_size = *(const uint32_t *) &vocab.precompiled_charsmap[0];
+            uint32_t xcda_blob_size = *(const uint32_t *) &precompiled_charsmap[0];
             charsmap_offset += sizeof(xcda_blob_size);
-            if (xcda_blob_size + charsmap_offset >= vocab.precompiled_charsmap.size()) {
+            if (xcda_blob_size + charsmap_offset >= precompiled_charsmap.size()) {
                 throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
             }
 
             // Next xcda_blob_size bytes contain entries of XOR-compressed compact
             // double array (XCDA). Each entry is bit-packed into a 32-bit integer.
-            xcda_array = (const uint32_t *) &vocab.precompiled_charsmap[charsmap_offset];
+            xcda_array = (const uint32_t *) &precompiled_charsmap[charsmap_offset];
             xcda_array_size = xcda_blob_size / sizeof(uint32_t);
             charsmap_offset += xcda_blob_size;
 
             // Remaining bytes of precompiled charsmap contain null-terminated
             // replacement strings for prefixes matched by the XCDA.
-            prefix_replacements = &vocab.precompiled_charsmap[charsmap_offset];
-            prefix_replacements_size = vocab.precompiled_charsmap.size() - charsmap_offset;
+            prefix_replacements = &precompiled_charsmap[charsmap_offset];
+            prefix_replacements_size = precompiled_charsmap.size() - charsmap_offset;
         }
 
-        for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
-            const auto &token_data = vocab.id_to_token[id];
+        for (uint32_t id = 0; id < vocab.n_tokens(); ++id) {
+            const auto & token_data = vocab.get_token_data(id);
 
-            if (llama_is_normal_token(vocab, id)) {
+            if (vocab.is_normal(id)) {
                 min_score = std::min(min_score, token_data.score);
                 max_score = std::max(max_score, token_data.score);
             }
 
-            if (llama_is_normal_token(vocab, id) ||
-                llama_is_user_defined_token(vocab, id) ||
-                llama_is_unused_token(vocab, id)) {
+            if (vocab.is_normal(id) ||
+                vocab.is_user_defined(id) ||
+                vocab.is_unused(id)) {
                 token_matcher.insert(token_data.text.data(), token_data.text.size(), id);
             }
 
-            if (llama_is_user_defined_token(vocab, id)) {
+            if (vocab.is_user_defined(id)) {
                 user_defined_token_matcher.insert(token_data.text.data(), token_data.text.size());
             }
         }
@@ -856,8 +775,7 @@ struct llm_tokenizer_ugm : llm_tokenizer {
 };
 
 struct llm_tokenizer_ugm_session {
-    llm_tokenizer_ugm_session(const llama_vocab & vocab) : vocab(vocab),
-        ugm_tokenizer(static_cast(vocab.tokenizer)) {}
+    llm_tokenizer_ugm_session(const llama_vocab & vocab, const llm_tokenizer_ugm & tokenizer) : vocab(vocab), tokenizer(tokenizer) {}
 
     /* This implementation is based on SentencePiece optimized Viterbi algorithm for
      * unigram language models. The general idea is to:
@@ -872,7 +790,7 @@ struct llm_tokenizer_ugm_session {
      * After processing the whole sequence we backtrack from the end to get
      * the best tokenization.
     */
-    void tokenize(const std::string & text, std::vector & output) {
+    void tokenize(const std::string & text, std::vector & output) {
         // get current size of output (for reversal later)
         size_t output_size = output.size();
 
@@ -885,9 +803,9 @@ struct llm_tokenizer_ugm_session {
         }
 
         // initialize score_sum to -FLT_MAX so it will be always lower than sums of token scores
-        std::vector tokenization_results(input_len + 1, {vocab.special_unk_id, 0, -FLT_MAX});
+        std::vector tokenization_results(input_len + 1, {vocab.token_unk(), 0, -FLT_MAX});
         // at the beginning tokenization score is zero
-        tokenization_results[0] = { vocab.special_unk_id, 0, 0 };
+        tokenization_results[0] = { vocab.token_unk(), 0, 0 };
 
         for (size_t input_offset = 0; input_offset < input_len;) {
             size_t prefix_offset = input_offset;
@@ -897,7 +815,7 @@ struct llm_tokenizer_ugm_session {
             // traverse the token matcher trie to find a matching token
             bool single_codepoint_token_found = false;
             const struct best_tokenization & current_best = tokenization_results[input_offset];
-            const struct naive_trie * node = ugm_tokenizer->token_matcher.traverse(normalized[prefix_offset++]);
+            const struct naive_trie * node = tokenizer.token_matcher.traverse(normalized[prefix_offset++]);
 
             while (prefix_offset <= input_len && node != NULL) {
                 // check if we found valid token in prefix
@@ -907,13 +825,13 @@ struct llm_tokenizer_ugm_session {
                         single_codepoint_token_found = true;
                     }
                     llama_token token_id = node->value;
-                    const auto & token_data = vocab.id_to_token[token_id];
+                    const auto & token_data = vocab.get_token_data(token_id);
 
                     // we set the user-defined token scores to 0 to make them more likely to be selected
                     // (normal token scores are log probabilities, so they are negative)
                     // score type is double here to make tokenization results exactly
                     // the same as in the HF tokenizer using SentencePiece
-                    const double token_score = llama_is_user_defined_token(vocab, token_id) ? 0.0 : token_data.score;
+                    const double token_score = vocab.is_user_defined(token_id) ? 0.0 : token_data.score;
                     const double challenger_score = current_best.score_sum + token_score;
                     struct best_tokenization & current_champ = tokenization_results[prefix_offset];
                     if (challenger_score > current_champ.score_sum) {
@@ -927,11 +845,11 @@ struct llm_tokenizer_ugm_session {
             // if we didn't find a valid token corresponding to the whole UTF code point
             // then use unknown token as the tokenization of this UTF code point
             if (!single_codepoint_token_found) {
-                const double challenger_score = current_best.score_sum + ugm_tokenizer->unknown_token_score;
+                const double challenger_score = current_best.score_sum + tokenizer.unknown_token_score;
                 prefix_offset = input_offset + n_utf8_code_units;
                 struct best_tokenization & current_champ = tokenization_results[prefix_offset];
                 if (challenger_score > current_champ.score_sum) {
-                    struct best_tokenization challenger = { vocab.special_unk_id, input_offset, (float) challenger_score };
+                    struct best_tokenization challenger = { vocab.token_unk(), input_offset, (float) challenger_score };
                     current_champ = challenger;
                 }
             }
@@ -944,7 +862,7 @@ struct llm_tokenizer_ugm_session {
         // merge sequences of consecutive unknown tokens into single unknown tokens
         bool is_prev_unknown = false;
         for (struct best_tokenization & tokenization = tokenization_results[input_len]; ; tokenization = tokenization_results[tokenization.input_offset]) {
-            bool is_unknown = tokenization.token_id == vocab.special_unk_id;
+            bool is_unknown = tokenization.token_id == vocab.token_unk();
             if (!(is_prev_unknown && is_unknown)) {
                 output.push_back(tokenization.token_id);
             }
@@ -971,11 +889,11 @@ private:
         normalized->clear();
         normalized->reserve(input.size() * 3);
 
-        const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer->escaped_space : " ";
+        const std::string space = vocab.get_escape_whitespaces() ? tokenizer.escaped_space : " ";
 
-        bool shall_prepend_space = !vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
-        bool shall_append_space = vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
-        bool shall_merge_spaces = vocab.tokenizer_remove_extra_whitespaces;
+        const bool shall_prepend_space = !vocab.get_treat_whitespace_as_suffix() && vocab.get_add_space_prefix();
+        const bool shall_append_space  =  vocab.get_treat_whitespace_as_suffix() && vocab.get_add_space_prefix();
+        const bool shall_merge_spaces  =  vocab.get_remove_extra_whitespaces();
 
         bool is_space_prepended = false;
         bool processing_non_ws = false;
@@ -1067,7 +985,7 @@ private:
 
         // if input prefix matches some user-defined token return this token as normalization result
         auto user_defined_token_match =
-           ugm_tokenizer->user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
+           tokenizer.user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
         if (user_defined_token_match.second > 0) {
             return { &input[input_offset], user_defined_token_match.second, user_defined_token_match.second };
         }
@@ -1075,8 +993,8 @@ private:
         size_t longest_prefix_length = 0;
         size_t longest_prefix_offset = 0;
 
-        if (ugm_tokenizer->xcda_array_size > 0) {
-            struct xcda_array_view xcda_view(ugm_tokenizer->xcda_array, ugm_tokenizer->xcda_array_size);
+        if (tokenizer.xcda_array_size > 0) {
+            struct xcda_array_view xcda_view(tokenizer.xcda_array, tokenizer.xcda_array_size);
 
             // Find the longest normalized sequence matching the input prefix by walking
             // the XOR-compressed compact double array (XCDA) starting from the root node
@@ -1112,10 +1030,10 @@ private:
 
         if (longest_prefix_length > 0) {
             // we have a match, so return the replacement sequence
-            if (longest_prefix_offset >= ugm_tokenizer->prefix_replacements_size) {
+            if (longest_prefix_offset >= tokenizer.prefix_replacements_size) {
                 throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
             }
-            const char * prefix_replacement = &(ugm_tokenizer->prefix_replacements)[longest_prefix_offset];
+            const char * prefix_replacement = &(tokenizer.prefix_replacements)[longest_prefix_offset];
             return { prefix_replacement, strlen(prefix_replacement), longest_prefix_length };
         }
 
@@ -1132,7 +1050,7 @@ private:
     }
 
     const llama_vocab & vocab;
-    const llm_tokenizer_ugm * ugm_tokenizer;
+    const llm_tokenizer_ugm & tokenizer;
 };
 
 //
@@ -1194,15 +1112,15 @@ static std::vector llama_unescape_rwkv_token(const std::string & escape
 }
 
 struct llm_tokenizer_rwkv : llm_tokenizer {
-    llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer() {
+    llm_tokenizer_rwkv(const llama_vocab & vocab) {
         // RWKV supports arbitrary byte tokens, but the vocab struct only supports string tokens.
         // For now, we decode the vocab here into the lookup we'll use for tokenization.
 
         // build trie
-        for (unsigned int id = 0; id < vocab.id_to_token.size(); ++id) {
-            const auto & token = vocab.id_to_token[id];
-            const auto data = llama_unescape_rwkv_token(token.text);
-            token_matcher.insert((const char *) data.data(), data.size(), id);
+        for (uint32_t id = 0; id < vocab.n_tokens(); ++id) {
+            const auto & data = vocab.get_token_data(id);
+            const auto text = llama_unescape_rwkv_token(data.text);
+            token_matcher.insert((const char *) text.data(), text.size(), id);
         }
     }
 
@@ -1210,16 +1128,15 @@ struct llm_tokenizer_rwkv : llm_tokenizer {
 };
 
 struct llm_tokenizer_rwkv_session {
-    llm_tokenizer_rwkv_session(const llama_vocab & vocab) : vocab(vocab),
-        rwkv_tokenizer(static_cast(*vocab.tokenizer)) {}
+    llm_tokenizer_rwkv_session(const llama_vocab & vocab, const llm_tokenizer_rwkv & tokenizer) : vocab(vocab), tokenizer(tokenizer) {}
 
-    void tokenize(const std::string & text, std::vector & output) {
+    void tokenize(const std::string & text, std::vector & output) {
         uint32_t position = 0;
         while (position < text.size()) {
-            const struct naive_trie * node = rwkv_tokenizer.token_matcher.traverse(text[position]);
+            const struct naive_trie * node = tokenizer.token_matcher.traverse(text[position]);
             if (node == NULL) {
                 // no matching token found, add unknown token
-                output.push_back(vocab.special_unk_id);
+                output.push_back(vocab.token_unk());
                 position += 1;
                 continue;
             }
@@ -1243,33 +1160,11 @@ struct llm_tokenizer_rwkv_session {
 
 private:
     const llama_vocab & vocab;
-    const llm_tokenizer_rwkv & rwkv_tokenizer;
+    const llm_tokenizer_rwkv & tokenizer;
 };
 
-void llama_vocab::init_tokenizer() {
-    switch (type) {
-        case LLAMA_VOCAB_TYPE_SPM:
-            tokenizer = new llm_tokenizer_spm(*this);
-            break;
-        case LLAMA_VOCAB_TYPE_BPE:
-            tokenizer = new llm_tokenizer_bpe(*this);
-            break;
-        case LLAMA_VOCAB_TYPE_WPM:
-            tokenizer = new llm_tokenizer_wpm(*this);
-            break;
-        case LLAMA_VOCAB_TYPE_UGM:
-            tokenizer = new llm_tokenizer_ugm(*this);
-            break;
-        case LLAMA_VOCAB_TYPE_RWKV:
-            tokenizer = new llm_tokenizer_rwkv(*this);
-            break;
-        default:
-            GGML_ABORT("unsupported vocab type");
-    }
-}
-
 //
-// (de-) tokenize
+// impl
 //
 
 typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
@@ -1278,7 +1173,7 @@ typedef enum FRAGMENT_BUFFER_VARIANT_TYPE {
 } FRAGMENT_BUFFER_VARIANT_TYPE;
 
 struct fragment_buffer_variant {
-    fragment_buffer_variant(llama_vocab::id _token)
+    fragment_buffer_variant(llama_token _token)
     :
         type(FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN),
         token(_token),
@@ -1289,7 +1184,7 @@ struct fragment_buffer_variant {
     fragment_buffer_variant(const std::string & _raw_text, int64_t _offset, int64_t _length)
     :
         type(FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT),
-        token((llama_vocab::id) - 1),
+        token((llama_token) - 1),
         raw_text(_raw_text),
         offset(_offset),
         length(_length){
@@ -1299,20 +1194,963 @@ struct fragment_buffer_variant {
         }
 
     const FRAGMENT_BUFFER_VARIANT_TYPE type;
-    const llama_vocab::id token;
+    const llama_token token;
     const std::string _dummy;
     const std::string & raw_text;
     const uint64_t offset;
     const uint64_t length;
 };
 
+struct llama_vocab::impl {
+    uint32_t n_token_types = 0; // for BERT-style token types
+
+    enum llama_vocab_type     type     = LLAMA_VOCAB_TYPE_SPM;
+    enum llama_vocab_pre_type pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+
+    int max_token_len = 0; // used for optimizing longest token search
+
+    // default LLaMA special tokens
+    // TODO: should we set all of these to LLAMA_TOKEN_NULL?
+    llama_token special_bos_id  = 1;
+    llama_token special_eos_id  = 2;
+    llama_token special_eot_id  = LLAMA_TOKEN_NULL;
+    llama_token special_eom_id  = LLAMA_TOKEN_NULL;
+    llama_token special_unk_id  = 0;
+    llama_token special_sep_id  = LLAMA_TOKEN_NULL;
+    llama_token special_pad_id  = LLAMA_TOKEN_NULL;
+    llama_token special_cls_id  = LLAMA_TOKEN_NULL; // TODO: revisit if this is really needed https://github.com/ggerganov/llama.cpp/pull/10930
+    llama_token special_mask_id = LLAMA_TOKEN_NULL;
+
+    llama_token linefeed_id = 13;
+
+    // fim tokens
+    llama_token special_fim_pre_id = LLAMA_TOKEN_NULL;
+    llama_token special_fim_suf_id = LLAMA_TOKEN_NULL;
+    llama_token special_fim_mid_id = LLAMA_TOKEN_NULL;
+    llama_token special_fim_pad_id = LLAMA_TOKEN_NULL;
+    llama_token special_fim_rep_id = LLAMA_TOKEN_NULL; // repo
+    llama_token special_fim_sep_id = LLAMA_TOKEN_NULL; // file separator
+
+    // tokenizer flags
+    bool add_space_prefix           = false;
+    bool add_bos                    = false;
+    bool add_eos                    = false;
+    bool ignore_merges              = false;
+    bool clean_spaces               = false;  // clean_up_tokenization_spaces
+    bool remove_extra_whitespaces   = false;
+    bool escape_whitespaces         = true;
+    bool treat_whitespace_as_suffix = false;
+
+    std::unordered_map token_to_id;
+    std::vector                      id_to_token;
+
+    std::vector cache_special_tokens;
+    std::vector cache_token_to_piece; // llama_token_to_piece(special = true);
+
+    std::map, int> bpe_ranks;
+
+    // set of all tokens that cause "end of generation"
+    std::set special_eog_ids;
+
+    std::unique_ptr tokenizer;
+
+    std::vector precompiled_charsmap;
+
+    impl(const llama_vocab & vocab) : vocab(vocab) {
+    }
+
+    ~impl() = default;
+
+    void load(llama_model_loader & ml, const LLM_KV & kv);
+
+    enum llama_vocab_type get_type() const;
+
+    std::string type_name() const;
+
+    bool is_normal      (llama_token id) const;
+    bool is_unknown     (llama_token id) const;
+    bool is_control     (llama_token id) const;
+    bool is_byte        (llama_token id) const;
+    bool is_user_defined(llama_token id) const;
+    bool is_unused      (llama_token id) const;
+    bool is_eog         (llama_token id) const;
+
+    uint8_t token_to_byte(llama_token id) const;
+
+    llama_token_attr token_get_attr(llama_token id) const;
+
+    void init_tokenizer(enum llama_vocab_type type);
+
+    void tokenizer_st_partition(std::forward_list & buffer, bool parse_special) const;
+
+    std::string token_to_piece_for_cache(
+                  llama_token   token,
+                         bool   special) const;
+
+
+    std::vector tokenize(
+            const std::string & raw_text,
+                         bool   add_special,
+                         bool   parse_special = false) const;
+
+    int32_t tokenize(
+                   const char * text,
+                      int32_t   text_len,
+                  llama_token * tokens,
+                      int32_t   n_tokens_max,
+                         bool   add_special,
+                         bool   parse_special) const;
+
+    // does not write null-terminator to buf
+    int32_t token_to_piece(
+                  llama_token   token,
+                         char * buf,
+                      int32_t   length,
+                      int32_t   lstrip,
+                         bool   special) const;
+
+    // use cached data
+    const std::string & token_to_piece(llama_token token) const;
+
+    int32_t detokenize(
+            const llama_token * tokens,
+                      int32_t   n_tokens,
+                         char * text,
+                      int32_t   text_len_max,
+                         bool   remove_special,
+                         bool   unparse_special) const;
+
+    std::string detokenize(
+            const std::vector & tokens,
+                                      bool   special) const;
+
+    void print_info() const;
+
+private:
+    const llama_vocab & vocab;
+};
+
+void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
+    struct gguf_context * ctx = ml.meta.get();
+
+    // determine vocab type
+    {
+        std::string tokenizer_model;
+        std::string tokenizer_pre;
+
+        ml.get_key(LLM_KV_TOKENIZER_MODEL, tokenizer_model);
+        ml.get_key(LLM_KV_TOKENIZER_PRE,   tokenizer_pre, false);
+
+        ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, n_token_types, false);
+
+        if (tokenizer_model == "no_vocab" || tokenizer_model == "none") {
+            type = LLAMA_VOCAB_TYPE_NONE;
+
+            // default special tokens
+            special_bos_id  = LLAMA_TOKEN_NULL;
+            special_eos_id  = LLAMA_TOKEN_NULL;
+            special_unk_id  = LLAMA_TOKEN_NULL;
+            special_sep_id  = LLAMA_TOKEN_NULL;
+            special_pad_id  = LLAMA_TOKEN_NULL;
+            special_cls_id  = LLAMA_TOKEN_NULL;
+            special_mask_id = LLAMA_TOKEN_NULL;
+            linefeed_id     = LLAMA_TOKEN_NULL;
+
+            // read vocab size from metadata
+            uint32_t n_tokens = 0;
+            if (!ml.get_key(LLM_KV_VOCAB_SIZE, n_tokens, false)) {
+                LLAMA_LOG_WARN("%s: there is no vocab_size in metadata\n", __func__);
+            }
+
+            return;
+        }
+
+        if (tokenizer_model == "llama") {
+            type = LLAMA_VOCAB_TYPE_SPM;
+
+            // default special tokens
+            special_bos_id  = 1;
+            special_eos_id  = 2;
+            special_unk_id  = 0;
+            special_sep_id  = LLAMA_TOKEN_NULL;
+            special_pad_id  = LLAMA_TOKEN_NULL;
+            special_cls_id  = LLAMA_TOKEN_NULL;
+            special_mask_id = LLAMA_TOKEN_NULL;
+        } else if (tokenizer_model == "bert") {
+            type = LLAMA_VOCAB_TYPE_WPM;
+
+            // default special tokens
+            special_bos_id  = LLAMA_TOKEN_NULL;
+            special_eos_id  = LLAMA_TOKEN_NULL;
+            special_unk_id  = 100;
+            special_sep_id  = 102;
+            special_pad_id  = 0;
+            special_cls_id  = 101;
+            special_mask_id = 103;
+        } else if (tokenizer_model == "gpt2") {
+            type = LLAMA_VOCAB_TYPE_BPE;
+
+            // read bpe merges and populate bpe ranks
+            const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
+            if (merges_keyidx == -1) {
+                throw std::runtime_error("cannot find tokenizer merges in model file\n");
+            }
+
+            const int n_merges = gguf_get_arr_n(ctx, merges_keyidx);
+            for (int i = 0; i < n_merges; i++) {
+                const std::string word = gguf_get_arr_str(ctx, merges_keyidx, i);
+                //GGML_ASSERT(unicode_cpts_from_utf8(word).size() > 0);
+
+                std::string first;
+                std::string second;
+
+                const size_t pos = word.find(' ', 1);
+
+                if (pos != std::string::npos) {
+                    first  = word.substr(0, pos);
+                    second = word.substr(pos + 1);
+                }
+
+                bpe_ranks.emplace(std::make_pair(first, second), i);
+            }
+
+            // default special tokens
+            special_bos_id  = 11;
+            special_eos_id  = 11;
+            special_unk_id  = LLAMA_TOKEN_NULL;
+            special_sep_id  = LLAMA_TOKEN_NULL;
+            special_pad_id  = LLAMA_TOKEN_NULL;
+            special_cls_id  = LLAMA_TOKEN_NULL;
+            special_mask_id = LLAMA_TOKEN_NULL;
+        } else if (tokenizer_model == "t5") {
+            type = LLAMA_VOCAB_TYPE_UGM;
+
+            // default special tokens
+            special_bos_id  = LLAMA_TOKEN_NULL;
+            special_eos_id  = 1;
+            special_unk_id  = 2;
+            special_sep_id  = LLAMA_TOKEN_NULL;
+            special_pad_id  = 0;
+            special_cls_id  = LLAMA_TOKEN_NULL;
+            special_mask_id = LLAMA_TOKEN_NULL;
+
+            const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
+            if (precompiled_charsmap_keyidx != -1) {
+                size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
+                const char * pc = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
+                precompiled_charsmap.assign(pc, pc + n_precompiled_charsmap);
+#ifdef IS_BIG_ENDIAN
+                // correct endiannes of data in precompiled_charsmap binary blob
+                uint32_t * xcda_blob_size = (uint32_t *) &precompiled_charsmap[0];
+                *xcda_blob_size = __builtin_bswap32(*xcda_blob_size);
+                assert(*xcda_blob_size + sizeof(uint32_t) < n_precompiled_charsmap);
+                size_t xcda_array_size = *xcda_blob_size / sizeof(uint32_t);
+                uint32_t * xcda_array = (uint32_t *) &precompiled_charsmap[sizeof(uint32_t)];
+                for (size_t i = 0; i < xcda_array_size; ++i) {
+                    xcda_array[i] = __builtin_bswap32(xcda_array[i]);
+                }
+#endif
+            }
+        } else if (tokenizer_model == "rwkv") {
+            type = LLAMA_VOCAB_TYPE_RWKV;
+
+            // default special tokens
+            special_bos_id = LLAMA_TOKEN_NULL;
+            special_eos_id = LLAMA_TOKEN_NULL;
+            special_unk_id = LLAMA_TOKEN_NULL;
+            special_sep_id = LLAMA_TOKEN_NULL;
+            special_pad_id = LLAMA_TOKEN_NULL;
+        } else {
+            throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
+        }
+
+        // for now, only BPE models have pre-tokenizers
+        if (type == LLAMA_VOCAB_TYPE_BPE) {
+            add_space_prefix = false;
+            clean_spaces = true;
+            if (tokenizer_pre.empty()) {
+                LLAMA_LOG_WARN("%s: missing pre-tokenizer type, using: 'default'\n", __func__);
+                LLAMA_LOG_WARN("%s:                                             \n", __func__);
+                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
+                LLAMA_LOG_WARN("%s: GENERATION QUALITY WILL BE DEGRADED!        \n", __func__);
+                LLAMA_LOG_WARN("%s: CONSIDER REGENERATING THE MODEL             \n", __func__);
+                LLAMA_LOG_WARN("%s: ************************************        \n", __func__);
+                LLAMA_LOG_WARN("%s:                                             \n", __func__);
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            } else if (tokenizer_pre == "default") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            } else if (
+                    tokenizer_pre == "llama3"   ||
+                    tokenizer_pre == "llama-v3" ||
+                    tokenizer_pre == "llama-bpe"||
+                    tokenizer_pre == "falcon3") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_LLAMA3;
+                ignore_merges = true;
+                add_bos = true;
+            } else if (
+                    tokenizer_pre == "deepseek-llm") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM;
+                clean_spaces = false;
+            } else if (
+                    tokenizer_pre == "deepseek-coder") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER;
+                clean_spaces = false;
+            } else if (
+                    tokenizer_pre == "deepseek-v3") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DEEPSEEK3_LLM;
+                clean_spaces = false;
+            } else if (
+                    tokenizer_pre == "falcon") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_FALCON;
+            } else if (
+                    tokenizer_pre == "mpt") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_MPT;
+            } else if (
+                    tokenizer_pre == "starcoder") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_STARCODER;
+            } else if (
+                    tokenizer_pre == "gpt-2"   ||
+                    tokenizer_pre == "phi-2"   ||
+                    tokenizer_pre == "jina-es" ||
+                    tokenizer_pre == "jina-de" ||
+                    tokenizer_pre == "gigachat"   ||
+                    tokenizer_pre == "jina-v1-en" ||
+                    tokenizer_pre == "jina-v2-es" ||
+                    tokenizer_pre == "jina-v2-de" ||
+                    tokenizer_pre == "jina-v2-code" ||
+                    tokenizer_pre == "roberta-bpe") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_GPT2;
+            } else if (
+                    tokenizer_pre == "refact") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_REFACT;
+            } else if (
+                tokenizer_pre == "command-r") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_COMMAND_R;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "qwen2") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "stablelm2") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_STABLELM2;
+            } else if (
+                tokenizer_pre == "olmo") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_OLMO;
+            } else if (
+                tokenizer_pre == "dbrx") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_DBRX;
+            } else if (
+                tokenizer_pre == "smaug-bpe") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_SMAUG;
+            } else if (
+                tokenizer_pre == "poro-chat") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_PORO;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "chatglm-bpe") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_CHATGLM4;
+                special_bos_id = LLAMA_TOKEN_NULL;
+            } else if (
+                tokenizer_pre == "viking") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_VIKING;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "jais") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_JAIS;
+            } else if (
+                tokenizer_pre == "tekken") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
+                clean_spaces = false;
+                ignore_merges = true;
+                add_bos = true;
+            } else if (
+                tokenizer_pre == "smollm") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_SMOLLM;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "codeshell") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_CODESHELL;
+            } else if (
+                tokenizer_pre == "bloom") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_BLOOM;
+            } else if (
+                tokenizer_pre == "gpt3-finnish") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_GPT3_FINNISH;
+            } else if (
+                tokenizer_pre == "exaone") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_EXAONE;
+            } else if (
+                tokenizer_pre == "chameleon") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_CHAMELEON;
+                add_bos = true;
+                clean_spaces = false;
+            } else if (
+                tokenizer_pre == "minerva-7b") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_MINERVA;
+            } else if (
+                tokenizer_pre == "megrez") {
+                pre_type = LLAMA_VOCAB_PRE_TYPE_QWEN2;
+            } else {
+                throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
+            }
+        } else if (type == LLAMA_VOCAB_TYPE_SPM) {
+            pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            add_space_prefix = true;
+            clean_spaces = false;
+            add_bos = true;
+            add_eos = false;
+        } else if (type == LLAMA_VOCAB_TYPE_WPM) {
+            pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            add_space_prefix = false;
+            clean_spaces = true;
+            add_bos = true;
+            add_eos = false;
+        } else if (type == LLAMA_VOCAB_TYPE_UGM) {
+            pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            add_bos = false;
+            add_eos = true;
+        } else if (type == LLAMA_VOCAB_TYPE_RWKV) {
+            pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+            add_space_prefix = false;
+            clean_spaces = false;
+            add_bos = false;
+            add_eos = false;
+        } else {
+            pre_type = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
+        }
+
+        ml.get_key(LLM_KV_TOKENIZER_ADD_PREFIX,      add_space_prefix,         false);
+        ml.get_key(LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, remove_extra_whitespaces, false);
+    }
+
+    const int token_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_LIST).c_str());
+    if (token_idx == -1) {
+        throw std::runtime_error("cannot find tokenizer vocab in model file\n");
+    }
+
+    const float * scores = nullptr;
+    const int score_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_SCORES).c_str());
+    if (score_idx != -1) {
+        scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
+    }
+
+    const int * toktypes = nullptr;
+    const int toktype_idx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_TOKEN_TYPE).c_str());
+    if (toktype_idx != -1) {
+        toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
+    }
+
+    uint32_t n_tokens = gguf_get_arr_n(ctx, token_idx);
+    id_to_token.resize(n_tokens);
+
+    for (uint32_t i = 0; i < n_tokens; i++) {
+        std::string word = gguf_get_arr_str(ctx, token_idx, i);
+        if (word.empty()) {
+            LLAMA_LOG_WARN("%s: empty token at index %u\n", __func__, i);
+            word = "[EMPTY_" + std::to_string(i) + "]";
+        }
+
+        token_to_id[word] = i;
+        max_token_len = std::max(max_token_len, (int) word.size());
+
+        auto & token_data = id_to_token[i];
+        token_data.text  = std::move(word);
+        token_data.score = scores ? scores[i] : 0.0f;
+        token_data.attr  = LLAMA_TOKEN_ATTR_NORMAL;
+
+        if (toktypes) {  //TODO: remove, required until per token attributes are available from GGUF file
+            switch(toktypes[i]) {
+                case LLAMA_TOKEN_TYPE_UNKNOWN:      token_data.attr = LLAMA_TOKEN_ATTR_UNKNOWN;      break;
+                case LLAMA_TOKEN_TYPE_UNUSED:       token_data.attr = LLAMA_TOKEN_ATTR_UNUSED;       break;
+                case LLAMA_TOKEN_TYPE_NORMAL:       token_data.attr = LLAMA_TOKEN_ATTR_NORMAL;       break;
+                case LLAMA_TOKEN_TYPE_CONTROL:      token_data.attr = LLAMA_TOKEN_ATTR_CONTROL;      break;
+                case LLAMA_TOKEN_TYPE_USER_DEFINED: token_data.attr = LLAMA_TOKEN_ATTR_USER_DEFINED; break;
+                case LLAMA_TOKEN_TYPE_BYTE:         token_data.attr = LLAMA_TOKEN_ATTR_BYTE;         break;
+                case LLAMA_TOKEN_TYPE_UNDEFINED:    token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
+                default:                            token_data.attr = LLAMA_TOKEN_ATTR_UNDEFINED;    break;
+            }
+        }
+    }
+    GGML_ASSERT(id_to_token.size() == token_to_id.size());
+
+    init_tokenizer(type);
+
+    // determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
+    if (type == LLAMA_VOCAB_TYPE_SPM) {
+        try {
+            linefeed_id = vocab.byte_to_token('\n');
+        } catch (const std::exception & e) {
+            LLAMA_LOG_WARN("%s: SPM vocabulary, but newline token not found: %s! Using special_pad_id instead.", __func__, e.what());
+            linefeed_id = special_pad_id;
+        }
+    } else if (type == LLAMA_VOCAB_TYPE_WPM) {
+        linefeed_id = special_pad_id;
+    } else if (type == LLAMA_VOCAB_TYPE_RWKV) {
+        const std::vector ids = tokenize("\n", false);
+        GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
+        linefeed_id = ids[0];
+    } else {
+        const std::vector ids = tokenize("\xC4\x8A", false); // U+010A
+
+        //GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
+        if (ids.empty()) {
+            LLAMA_LOG_WARN("%s: model vocab missing newline token, using special_pad_id instead\n", __func__);
+            linefeed_id = special_pad_id;
+        } else {
+            linefeed_id = ids[0];
+        }
+    }
+
+    // special tokens
+    {
+        const std::vector> special_token_types = {
+            { LLM_KV_TOKENIZER_BOS_ID,     special_bos_id     },
+            { LLM_KV_TOKENIZER_EOS_ID,     special_eos_id     },
+            { LLM_KV_TOKENIZER_EOT_ID,     special_eot_id     },
+            { LLM_KV_TOKENIZER_EOM_ID,     special_eom_id     },
+            { LLM_KV_TOKENIZER_UNK_ID,     special_unk_id     },
+            { LLM_KV_TOKENIZER_SEP_ID,     special_sep_id     },
+            { LLM_KV_TOKENIZER_PAD_ID,     special_pad_id     },
+            { LLM_KV_TOKENIZER_CLS_ID,     special_cls_id     },
+            { LLM_KV_TOKENIZER_MASK_ID,    special_mask_id    },
+            { LLM_KV_TOKENIZER_FIM_PRE_ID, special_fim_pre_id },
+            { LLM_KV_TOKENIZER_FIM_SUF_ID, special_fim_suf_id },
+            { LLM_KV_TOKENIZER_FIM_MID_ID, special_fim_mid_id },
+            { LLM_KV_TOKENIZER_FIM_PAD_ID, special_fim_pad_id },
+            { LLM_KV_TOKENIZER_FIM_REP_ID, special_fim_rep_id },
+            { LLM_KV_TOKENIZER_FIM_SEP_ID, special_fim_sep_id },
+
+            // deprecated
+            { LLM_KV_TOKENIZER_PREFIX_ID, special_fim_pre_id },
+            { LLM_KV_TOKENIZER_SUFFIX_ID, special_fim_suf_id },
+            { LLM_KV_TOKENIZER_MIDDLE_ID, special_fim_mid_id },
+        };
+
+        for (const auto & it : special_token_types) {
+            const std::string & key = kv(std::get<0>(it));
+            int32_t & id = std::get<1>(it);
+
+            uint32_t new_id;
+            if (!ml.get_key(std::get<0>(it), new_id, false)) {
+                continue;
+            }
+            if (new_id >= id_to_token.size()) {
+                LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
+                    __func__, key.c_str(), new_id, id);
+            } else {
+                id = new_id;
+            }
+        }
+
+        // Handle add_bos and add_eos
+        {
+            bool temp = true;
+
+            if (ml.get_key(LLM_KV_TOKENIZER_ADD_BOS, temp, false)) {
+                add_bos = temp;
+            }
+            if (ml.get_key(LLM_KV_TOKENIZER_ADD_EOS, temp, false)) {
+                add_eos = temp;
+            }
+        }
+
+        // auto-detect special tokens by text
+        // TODO: convert scripts should provide these tokens through the KV metadata LLM_KV_TOKENIZER_...
+        //       for now, we apply this workaround to find the tokens based on their text
+
+        for (const auto & t : token_to_id) {
+            // find EOT token: "<|eot_id|>", "<|im_end|>", "", etc.
+            if (special_eot_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|eot_id|>"
+                        || t.first == "<|im_end|>"
+                        || t.first == "<|end|>"
+                        || t.first == ""
+                        || t.first == "<|endoftext|>"
+                        || t.first == ""
+                        || t.first == "<|end▁of▁sentence|>" // DeepSeek
+                   ) {
+                    special_eot_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find EOM token: "<|eom_id|>"
+            if (special_eom_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|eom_id|>"
+                        ) {
+                    special_eom_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_PRE token: "<|fim_prefix|>", "", "
", etc.
+            if (special_fim_pre_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_prefix|>"  // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁begin|>" // DeepSeek
+                        || t.first == "
"
+                        ) {
+                    special_fim_pre_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SUF token: "<|fim_suffix|>", "", "", etc.
+            if (special_fim_suf_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_suffix|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁hole|>" // DeepSeek
+                        || t.first == ""
+                        ) {
+                    special_fim_suf_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_MID token: "<|fim_middle|>", "", "", etc.
+            if (special_fim_mid_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_middle|>" // Qwen
+                        || t.first == ""
+                        || t.first == "<|fim▁end|>"  // DeepSeek
+                        || t.first == ""
+                        ) {
+                    special_fim_mid_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_PAD token: "<|fim_pad|>", "", "", etc.
+            if (special_fim_pad_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_pad|>" // Qwen
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    special_fim_pad_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_REP token: "<|fim_repo|>", "", "", etc.
+            if (special_fim_rep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|fim_repo|>"  // Qwen
+                        || t.first == "<|repo_name|>"
+                        || t.first == ""
+                        || t.first == ""
+                        ) {
+                    special_fim_rep_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+
+            // find FIM_SEP token: "<|file_sep|>"
+            if (special_fim_sep_id == LLAMA_TOKEN_NULL) {
+                if (false
+                        || t.first == "<|file_sep|>" // Qwen
+                        ) {
+                    special_fim_sep_id = t.second;
+                    if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                        LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                                __func__, t.second, t.first.c_str());
+                        id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                    }
+                }
+            }
+        }
+
+        // maintain a list of tokens that cause end-of-generation
+        // this is currently determined based on the token text, which is obviously not ideal
+        // ref: https://github.com/ggerganov/llama.cpp/issues/9606
+        special_eog_ids.clear();
+
+        if (special_fim_pad_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_fim_pad_id) == 0) {
+            special_eog_ids.insert(special_fim_pad_id);
+        }
+
+        if (special_fim_rep_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_fim_rep_id) == 0) {
+            special_eog_ids.insert(special_fim_rep_id);
+        }
+
+        if (special_fim_sep_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_fim_sep_id) == 0) {
+            special_eog_ids.insert(special_fim_sep_id);
+        }
+
+        for (const auto & t : token_to_id) {
+            if (false
+                    || t.first == "<|eot_id|>"
+                    || t.first == "<|im_end|>"
+                    || t.first == "<|end|>"
+                    || t.first == ""
+                    || t.first == "<|endoftext|>"
+                    || t.first == "<|eom_id|>"
+                    || t.first == ""
+               ) {
+                special_eog_ids.insert(t.second);
+                if ((id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL) == 0) {
+                    LLAMA_LOG_WARN("%s: control-looking token: %6d '%s' was not control-type; this is probably a bug in the model. its type will be overridden\n",
+                            __func__, t.second, t.first.c_str());
+                    id_to_token[t.second].attr = LLAMA_TOKEN_ATTR_CONTROL;
+                }
+            } else {
+                // token is control, but not marked as EOG -> print a debug log
+                if (id_to_token[t.second].attr & LLAMA_TOKEN_ATTR_CONTROL && special_eog_ids.count(t.second) == 0) {
+                    LLAMA_LOG_DEBUG("%s: control token: %6d '%s' is not marked as EOG\n",
+                            __func__, t.second, t.first.c_str());
+                }
+            }
+        }
+
+        // sanity checks
+        if (special_eos_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_eos_id) == 0) {
+            special_eog_ids.insert(special_eos_id);
+            LLAMA_LOG_WARN("%s: special_eos_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (special_eot_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_eot_id) == 0) {
+            special_eog_ids.insert(special_eot_id);
+            LLAMA_LOG_WARN("%s: special_eot_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+
+        if (special_eom_id != LLAMA_TOKEN_NULL && special_eog_ids.count(special_eom_id) == 0) {
+            special_eog_ids.insert(special_eom_id);
+            LLAMA_LOG_WARN("%s: special_eom_id is not in special_eog_ids - the tokenizer config may be incorrect\n", __func__);
+        }
+    }
+
+    // build special tokens cache
+    {
+        for (llama_token id = 0; id < (llama_token) n_tokens; ++id) {
+            if (id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED | LLAMA_TOKEN_ATTR_UNKNOWN)) {
+                cache_special_tokens.push_back(id);
+            }
+        }
+
+        std::sort(cache_special_tokens.begin(), cache_special_tokens.end(),
+            [&] (const llama_token a, const llama_token b) {
+                return id_to_token[a].text.size() > id_to_token[b].text.size();
+            }
+        );
+
+        LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t) cache_special_tokens.size());
+    }
+
+    // build token to piece cache
+    {
+        size_t size_cache = 0;
+
+        std::vector cache(n_tokens);
+
+        for (uint32_t id = 0; id < n_tokens; ++id) {
+            cache[id] = token_to_piece_for_cache(id, true);
+
+            size_cache += cache[id].size();
+        }
+
+        std::swap(cache_token_to_piece, cache);
+
+        LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
+    }
+
+    // Handle per token attributes
+    //NOTE: Each model customizes per token attributes.
+    //NOTE: Per token attributes are missing from the GGUF file.
+    //TODO: Extract attributes from GGUF file.
+    {
+        auto _contains_any = [] (const std::string & str, const std::vector & substrs) -> bool {
+            for (const auto & substr : substrs) {
+                if (str.find(substr) < std::string::npos) {
+                    return true;
+                }
+            }
+            return false;
+        };
+
+        auto _set_tokenid_attr = [&] (const llama_token id, llama_token_attr attr, bool value) {
+            uint32_t current = id_to_token.at(id).attr;
+            current = value ? (current | attr) : (current & ~attr);
+            id_to_token[id].attr = (llama_token_attr) current;
+        };
+
+        auto _set_token_attr = [&] (const std::string & token, llama_token_attr attr, bool value) {
+            _set_tokenid_attr(token_to_id.at(token), attr, value);
+        };
+
+        std::string model_name;
+        std::string tokenizer_pre;
+
+        ml.get_key(LLM_KV_GENERAL_NAME,  model_name,    false);
+        ml.get_key(LLM_KV_TOKENIZER_PRE, tokenizer_pre, false);
+
+        // model name to lowercase
+        std::transform(model_name.begin(), model_name.end(), model_name.begin(),
+            [] (const std::string::value_type x) {
+                return std::tolower(x);
+            }
+        );
+
+        // set attributes by model/tokenizer name
+        if (_contains_any(tokenizer_pre, {"jina-v2-de", "jina-v2-es", "jina-v2-code"})) {
+            _set_token_attr("", LLAMA_TOKEN_ATTR_LSTRIP, true);
+        } else if (_contains_any(model_name, {"phi-3", "phi3"})) {
+            for (auto id : cache_special_tokens) {
+                _set_tokenid_attr(id, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (const auto * token : {""}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, true);
+            }
+            for (const auto * token : {"", "", "<|endoftext|>"}) {
+                _set_token_attr(token, LLAMA_TOKEN_ATTR_RSTRIP, false);
+            }
+        }
+    }
+}
+
+enum llama_vocab_type llama_vocab::impl::get_type() const {
+    return type;
+}
+
+std::string llama_vocab::impl::type_name() const{
+    switch (type) {
+        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
+        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
+        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
+        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
+        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
+        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
+        default:                    return "unknown";
+    }
+}
+
+bool llama_vocab::impl::is_normal(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL;
+}
+
+bool llama_vocab::impl::is_unknown(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNKNOWN;
+}
+
+bool llama_vocab::impl::is_control(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_CONTROL;
+}
+
+bool llama_vocab::impl::is_byte(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_BYTE;
+}
+
+bool llama_vocab::impl::is_user_defined(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_USER_DEFINED;
+}
+
+bool llama_vocab::impl::is_unused(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token[id].attr & LLAMA_TOKEN_ATTR_UNUSED;
+}
+
+bool llama_vocab::impl::is_eog(llama_token id) const {
+    return id != LLAMA_TOKEN_NULL && special_eog_ids.count(id) > 0;
+}
+
+uint8_t llama_vocab::impl::token_to_byte(llama_token id) const {
+    GGML_ASSERT(get_type() != LLAMA_VOCAB_TYPE_NONE);
+    GGML_ASSERT(is_byte(id));
+    const auto & token_data = id_to_token.at(id);
+    switch (get_type()) {
+        case LLAMA_VOCAB_TYPE_SPM:
+        case LLAMA_VOCAB_TYPE_UGM: {
+            auto buf = token_data.text.substr(3, 2);
+            return strtol(buf.c_str(), NULL, 16);
+        }
+        case LLAMA_VOCAB_TYPE_BPE: {
+            GGML_ABORT("fatal error");
+        }
+        case LLAMA_VOCAB_TYPE_WPM: {
+            GGML_ABORT("fatal error");
+        }
+        default:
+            GGML_ABORT("fatal error");
+    }
+}
+
+llama_token_attr llama_vocab::impl::token_get_attr(llama_token id) const {
+    GGML_ASSERT(type != LLAMA_VOCAB_TYPE_NONE);
+    return id_to_token.at(id).attr;
+}
+
+void llama_vocab::impl::init_tokenizer(enum llama_vocab_type type) {
+    LLAMA_LOG_DEBUG("%s: initializing tokenizer for type %d\n", __func__, type);
+
+    switch (type) {
+        case LLAMA_VOCAB_TYPE_SPM:
+            tokenizer = std::make_unique(vocab);
+            break;
+        case LLAMA_VOCAB_TYPE_BPE:
+            tokenizer = std::make_unique(vocab);
+            break;
+        case LLAMA_VOCAB_TYPE_WPM:
+            tokenizer = std::make_unique(vocab);
+            break;
+        case LLAMA_VOCAB_TYPE_UGM:
+            tokenizer = std::make_unique(vocab, precompiled_charsmap);
+            break;
+        case LLAMA_VOCAB_TYPE_RWKV:
+            tokenizer = std::make_unique(vocab);
+            break;
+        default:
+            GGML_ABORT("unsupported vocab type");
+    }
+}
+
+//
+// (de-) tokenize
+//
+
 // #define PRETOKENIZERDEBUG
 
-static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list & buffer, bool parse_special) {
+void llama_vocab::impl::tokenizer_st_partition(std::forward_list & buffer, bool parse_special) const {
     // for each special token
-    for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
-        const auto & data = vocab.id_to_token[special_id];
-        const auto & special_token = data.text;
+    for (const llama_token special_id : cache_special_tokens) {
+        const auto & data = vocab.get_token_data(special_id);
+        const auto & text = data.text;
 
         if (!parse_special && (data.attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_UNKNOWN))) {
             // Ignore control and unknown tokens when parse_special == false
@@ -1339,13 +2177,13 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                     // find the first occurrence of a given special token in this fragment
                     //  passing offset argument only limit the "search area" but match coordinates
                     //  are still relative to the source full raw_text
-                    auto match = raw_text.find(special_token, raw_text_base_offset);
+                    auto match = raw_text.find(text, raw_text_base_offset);
 
                     // no occurrences found, stop processing this fragment for a given special token
                     if (match == std::string::npos) break;
 
                     // check if match is within bounds of offset <-> length
-                    if (match + special_token.length() > raw_text_base_offset + raw_text_base_length) break;
+                    if (match + text.length() > raw_text_base_offset + raw_text_base_length) break;
 
 #ifdef PRETOKENIZERDEBUG
                     LLAMA_LOG_WARN("FF: (%ld %ld %ld) '%s'\n", raw_text->length(), raw_text_base_offset, raw_text_base_length, raw_text->substr(raw_text_base_offset, raw_text_base_length).c_str());
@@ -1380,9 +2218,9 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
                     it++;
 
                     // right
-                    if (match + special_token.length() < raw_text_base_offset + raw_text_base_length) {
-                        int64_t right_reminder_offset = match + special_token.length();
-                        int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + special_token.length());
+                    if (match + text.length() < raw_text_base_offset + raw_text_base_length) {
+                        int64_t right_reminder_offset = match + text.length();
+                        int64_t right_reminder_length = raw_text_base_length - ((match - raw_text_base_offset) + text.length());
 
                         if (data.attr & LLAMA_TOKEN_ATTR_RSTRIP) {
                             while (right_reminder_length > 0 && isspace(raw_text[right_reminder_offset])) {
@@ -1428,322 +2266,29 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
     }
 }
 
-std::vector llama_tokenize_internal(
-        const llama_vocab & vocab,
-        std::string raw_text,
-        bool add_special,
-        bool parse_special) {
-    GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
-
-    std::vector output;
-    std::forward_list fragment_buffer;
-
-    if (!raw_text.empty()) {
-        fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
-        tokenizer_st_partition(vocab, fragment_buffer, parse_special);
+// NOTE: avoid ever using this except for building the token_to_piece caches
+std::string llama_vocab::impl::token_to_piece_for_cache(llama_token token, bool special) const {
+    std::string piece;
+    piece.resize(piece.capacity());  // using string internal cache
+    const int n_chars = vocab.token_to_piece(token, &piece[0], piece.size(), 0, special);
+    if (n_chars < 0) {
+        piece.resize(-n_chars);
+        int check = vocab.token_to_piece(token, &piece[0], piece.size(), 0, special);
+        GGML_ASSERT(check == -n_chars);
+    }
+    else {
+        piece.resize(n_chars);
     }
 
-    switch (vocab.type) {
-        case LLAMA_VOCAB_TYPE_SPM:
-            {
-                // OG tokenizer behavior:
-                //
-                // tokenizer.encode('', add_special_tokens=True)  returns [1]
-                // tokenizer.encode('', add_special_tokens=False) returns []
-
-                bool is_prev_special = true;  // prefix with space if first token
-
-                if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_bos_id);
-                    is_prev_special = true;
-                }
-
-                for (const auto & fragment : fragment_buffer) {
-                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
-                        auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
-
-                        // prefix with space if previous is special
-                        if (vocab.tokenizer_add_space_prefix && is_prev_special) {
-                            raw_text = " " + raw_text;
-                        }
-
-#ifdef PRETOKENIZERDEBUG
-                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
-#endif
-                        llama_escape_whitespace(raw_text);
-                        llm_tokenizer_spm_session session(vocab);
-                        session.tokenize(raw_text, output);
-                        is_prev_special = false;
-                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
-                        output.push_back(fragment.token);
-                        is_prev_special = true;
-                    }
-                }
-
-                if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
-                    LLAMA_LOG_WARN(
-                        "%s: Added a BOS token to the prompt as specified by the model but the prompt "
-                        "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
-                        "Are you sure this is what you want?\n", __FUNCTION__);
-                }
-
-                if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_eos_id);
-                }
-            } break;
-        case LLAMA_VOCAB_TYPE_BPE:
-            {
-                llm_tokenizer_bpe_session session(vocab);
-                // it calls some other methods that are not exist in llm_tokenizer,
-                // here just cast it to bpe tokenizer object
-                if (add_special) {
-                    session.append_bos(output);
-                }
-                for (const auto & fragment : fragment_buffer) {
-                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
-                        auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
-
-#ifdef PRETOKENIZERDEBUG
-                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
-#endif
-                        session.tokenize(raw_text, output);
-                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
-                        session.append(fragment.token, output);
-                    }
-                }
-
-                if (add_special) {
-                    session.append_eos(output);
-                    session.check_double_bos_eos(output);
-                }
-            } break;
-        case LLAMA_VOCAB_TYPE_WPM:
-            {
-                if (add_special) {
-                    GGML_ASSERT(vocab.special_cls_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_cls_id);
-                }
-
-                llm_tokenizer_wpm_session session(vocab);
-
-                for (const auto & fragment : fragment_buffer) {
-                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
-                        auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
-
-#ifdef PRETOKENIZERDEBUG
-                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
-#endif
-                        session.tokenize(raw_text, output);
-                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
-                        output.push_back(fragment.token);
-                    }
-                }
-
-                if (add_special) {
-                    GGML_ASSERT(vocab.special_sep_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_sep_id);
-                }
-            } break;
-        case LLAMA_VOCAB_TYPE_UGM:
-            {
-                if (add_special && vocab.tokenizer_add_bos) {
-                    GGML_ASSERT(vocab.special_bos_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_bos_id);
-                }
-                llm_tokenizer_ugm_session session(vocab);
-
-                for (const auto & fragment : fragment_buffer) {
-                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
-                        auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
-#ifdef PRETOKENIZERDEBUG
-                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
-#endif
-                        session.tokenize(raw_text, output);
-                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
-                        output.push_back(fragment.token);
-                    }
-                }
-
-                if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
-                    LLAMA_LOG_WARN(
-                        "%s: Added a BOS token to the prompt as specified by the model but the prompt "
-                        "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
-                        "Are you sure this is what you want?\n", __FUNCTION__);
-                }
-
-                if (add_special && vocab.tokenizer_add_eos) {
-                    GGML_ASSERT(vocab.special_eos_id != LLAMA_TOKEN_NULL);
-                    output.push_back(vocab.special_eos_id);
-                }
-            } break;
-        case LLAMA_VOCAB_TYPE_RWKV:
-            {
-                llm_tokenizer_rwkv_session session(vocab);
-                for (const auto & fragment : fragment_buffer) {
-                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
-                        auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
-
-#ifdef PRETOKENIZERDEBUG
-                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
-#endif
-
-                        session.tokenize(raw_text, output);
-                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
-                        output.push_back(fragment.token);
-                    }
-                }
-            } break;
-        case LLAMA_VOCAB_TYPE_NONE:
-            GGML_ABORT("fatal error");
-    }
-
-    return output;
+    return piece;
 }
 
-llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch) {
-    GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
-    static const char * hex = "0123456789ABCDEF";
-    switch (llama_vocab_get_type(vocab)) {
-        case LLAMA_VOCAB_TYPE_SPM:
-        case LLAMA_VOCAB_TYPE_UGM: {
-            const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
-            auto token = vocab.token_to_id.find(buf);
-            if (token != vocab.token_to_id.end()) {
-                return (*token).second;
-            }
-            // Try to fall back to just the byte as a string
-            const char buf2[2] = { (char)ch, 0 };
-            return vocab.token_to_id.at(buf2);
-        }
-        case LLAMA_VOCAB_TYPE_WPM:
-        case LLAMA_VOCAB_TYPE_BPE: {
-            return vocab.token_to_id.at(unicode_byte_to_utf8(ch));
-        }
-        default:
-            GGML_ABORT("fatal error");
-    }
+static void llama_escape_whitespace(std::string & text) {
+    replace_all(text, " ", "\xe2\x96\x81");
 }
 
-const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[token].text.c_str();
-}
-
-float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[token].score;
-}
-
-llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token) {
-    GGML_ASSERT(vocab.type != LLAMA_VOCAB_TYPE_NONE);
-    return vocab.id_to_token[token].attr;
-}
-
-bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token) {
-    return token != LLAMA_TOKEN_NULL && vocab.special_eog_ids.count(token) > 0;
-}
-
-bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token) {
-    return llama_is_control_token(vocab, token);
-}
-
-llama_token llama_token_bos_impl(const struct llama_vocab & vocab) {
-    return vocab.type != LLAMA_VOCAB_TYPE_WPM ? vocab.special_bos_id : vocab.special_cls_id;
-}
-
-llama_token llama_token_eos_impl(const struct llama_vocab & vocab) {
-    return vocab.special_eos_id;
-}
-
-llama_token llama_token_eot_impl(const struct llama_vocab & vocab) {
-    return vocab.special_eot_id;
-}
-
-llama_token llama_token_eom_impl(const struct llama_vocab & vocab) {
-    return vocab.special_eom_id;
-}
-
-llama_token llama_token_cls_impl(const struct llama_vocab & vocab) {
-    return vocab.special_cls_id;
-}
-
-llama_token llama_token_sep_impl(const struct llama_vocab & vocab) {
-    return vocab.special_sep_id;
-}
-
-llama_token llama_token_nl_impl(const struct llama_vocab & vocab) {
-    return vocab.linefeed_id;
-}
-
-llama_token llama_token_pad_impl(const struct llama_vocab & vocab) {
-    return vocab.special_pad_id;
-}
-
-bool llama_add_bos_token_impl(const struct llama_vocab & vocab) {
-    return vocab.tokenizer_add_bos;
-}
-
-bool llama_add_eos_token_impl(const struct llama_vocab & vocab) {
-    return vocab.tokenizer_add_eos;
-}
-
-llama_token llama_token_prefix_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_pre_id;
-}
-
-llama_token llama_token_middle_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_mid_id;
-}
-
-llama_token llama_token_suffix_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_suf_id;
-}
-
-llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_pre_id;
-}
-
-llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_suf_id;
-}
-
-llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_mid_id;
-}
-
-llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_pad_id;
-}
-
-llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_rep_id;
-}
-
-llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab) {
-    return vocab.special_fim_sep_id;
-}
-
-int32_t llama_tokenize_impl(
-        const struct llama_vocab & vocab,
-                      const char * text,
-                         int32_t   text_len,
-                     llama_token * tokens,
-                         int32_t   n_tokens_max,
-                            bool   add_special,
-                            bool   parse_special) {
-    auto res = llama_tokenize_internal(vocab, std::string(text, text_len), add_special, parse_special);
-    if (n_tokens_max < (int) res.size()) {
-        // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
-        return -((int) res.size());
-    }
-
-    for (size_t i = 0; i < res.size(); i++) {
-        tokens[i] = res[i];
-    }
-
-    return res.size();
+static void llama_unescape_whitespace(std::string & word) {
+    replace_all(word, "\xe2\x96\x81", " ");
 }
 
 static std::string llama_decode_text(const std::string & text) {
@@ -1766,11 +2311,185 @@ static std::string llama_decode_text(const std::string & text) {
     return decoded_text;
 }
 
-// does not write null-terminator to buf
-int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) {
+std::vector llama_vocab::impl::tokenize(
+        const std::string & raw_text,
+        bool add_special,
+        bool parse_special) const {
+    GGML_ASSERT(tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
+
+    std::vector output;
+    std::forward_list fragment_buffer;
+
+    if (!raw_text.empty()) {
+        fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
+        tokenizer_st_partition(fragment_buffer, parse_special);
+    }
+
+    switch (get_type()) {
+        case LLAMA_VOCAB_TYPE_SPM:
+            {
+                // OG tokenizer behavior:
+                //
+                // tokenizer.encode('', add_special_tokens=True)  returns [1]
+                // tokenizer.encode('', add_special_tokens=False) returns []
+
+                bool is_prev_special = true;  // prefix with space if first token
+
+                if (add_special && add_bos) {
+                    GGML_ASSERT(special_bos_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_bos_id);
+                    is_prev_special = true;
+                }
+
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text;
+
+                        // prefix with space if previous is special
+                        if (add_space_prefix && is_prev_special) {
+                            text = ' ';
+                        }
+
+                        text += fragment.raw_text.substr(fragment.offset, fragment.length);
+
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+                        llama_escape_whitespace(text);
+                        llm_tokenizer_spm_session session(vocab);
+                        session.tokenize(text, output);
+                        is_prev_special = false;
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        output.push_back(fragment.token);
+                        is_prev_special = true;
+                    }
+                }
+
+                if (add_special && add_bos && output.size() >= 2 && output[1] == special_bos_id) {
+                    LLAMA_LOG_WARN(
+                        "%s: Added a BOS token to the prompt as specified by the model but the prompt "
+                        "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
+                        "Are you sure this is what you want?\n", __FUNCTION__);
+                }
+
+                if (add_special && add_eos) {
+                    GGML_ASSERT(special_eos_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_eos_id);
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_BPE:
+            {
+                llm_tokenizer_bpe_session session(vocab, *static_cast(tokenizer.get()));
+                // it calls some other methods that are not exist in llm_tokenizer,
+                // here just cast it to bpe tokenizer object
+                if (add_special) {
+                    session.append_bos(output);
+                }
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
+
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+                        session.tokenize(text, output);
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        session.append(fragment.token, output);
+                    }
+                }
+
+                if (add_special) {
+                    session.append_eos(output);
+                    session.check_double_bos_eos(output);
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_WPM:
+            {
+                if (add_special) {
+                    GGML_ASSERT(special_cls_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_cls_id);
+                }
+
+                llm_tokenizer_wpm_session session(vocab);
+
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
+
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+                        session.tokenize(text, output);
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        output.push_back(fragment.token);
+                    }
+                }
+
+                if (add_special) {
+                    GGML_ASSERT(special_sep_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_sep_id);
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_UGM:
+            {
+                if (add_special && add_bos) {
+                    GGML_ASSERT(special_bos_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_bos_id);
+                }
+                llm_tokenizer_ugm_session session(vocab, *static_cast(tokenizer.get()));
+
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+                        session.tokenize(text, output);
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        output.push_back(fragment.token);
+                    }
+                }
+
+                if (add_special && add_bos && output.size() >= 2 && output[1] == special_bos_id) {
+                    LLAMA_LOG_WARN(
+                        "%s: Added a BOS token to the prompt as specified by the model but the prompt "
+                        "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
+                        "Are you sure this is what you want?\n", __FUNCTION__);
+                }
+
+                if (add_special && add_eos) {
+                    GGML_ASSERT(special_eos_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_eos_id);
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_RWKV:
+            {
+                llm_tokenizer_rwkv_session session(vocab, *static_cast(tokenizer.get()));
+                for (const auto & fragment : fragment_buffer) {
+                    if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
+                        std::string text = fragment.raw_text.substr(fragment.offset, fragment.length);
+
+#ifdef PRETOKENIZERDEBUG
+                        LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", text.length(), fragment.offset, fragment.length, text.c_str());
+#endif
+
+                        session.tokenize(text, output);
+                    } else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
+                        output.push_back(fragment.token);
+                    }
+                }
+            } break;
+        case LLAMA_VOCAB_TYPE_NONE:
+            GGML_ABORT("fatal error");
+    }
+
+    return output;
+}
+
+int32_t llama_vocab::impl::token_to_piece(llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) const {
     // ref: https://github.com/ggerganov/llama.cpp/pull/7587#discussion_r1620983843
     static const int attr_special = LLAMA_TOKEN_ATTR_UNKNOWN | LLAMA_TOKEN_ATTR_CONTROL;
-    const llama_token_attr attr = llama_token_get_attr_impl(vocab, token);
+    const llama_token_attr attr = token_get_attr(token);
     if (!special && (attr & attr_special)) {
         return 0;
     }
@@ -1791,7 +2510,7 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
 
     // if we have a cache - use it
     {
-        const auto & cache = vocab.cache_token_to_piece;
+        const auto & cache = cache_token_to_piece;
 
         if (!cache.empty()) {
             const auto & result = cache.at(token);
@@ -1799,9 +2518,9 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
         }
     }
 
-    if (0 <= token && token < (int32_t) vocab.id_to_token.size()) {
-        const std::string & token_text = vocab.id_to_token[token].text;
-        switch (llama_vocab_get_type(vocab)) {
+    if (0 <= token && token < (int32_t) id_to_token.size()) {
+        const std::string & token_text = id_to_token[token].text;
+        switch (get_type()) {
             case LLAMA_VOCAB_TYPE_WPM:
             case LLAMA_VOCAB_TYPE_SPM:
             case LLAMA_VOCAB_TYPE_UGM: {
@@ -1816,7 +2535,7 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
                     return _try_copy(result.data(), result.size());
                 }
                 if (attr & LLAMA_TOKEN_ATTR_BYTE) {
-                    char byte = (char) llama_token_to_byte(vocab, token);
+                    char byte = (char) token_to_byte(token);
                     return _try_copy((char*) &byte, 1);
                 }
                 break;
@@ -1852,43 +2571,46 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
     return 0;
 }
 
-int32_t llama_detokenize_impl(
-        const struct llama_vocab & vocab,
+const std::string & llama_vocab::impl::token_to_piece(llama_token token) const {
+    return cache_token_to_piece.at(token);
+}
+
+int32_t llama_vocab::impl::detokenize(
                const llama_token * tokens,
                          int32_t   n_tokens,
                             char * text,
                          int32_t   text_len_max,
                             bool   remove_special,
-                            bool   unparse_special) {
-    if (vocab.type == LLAMA_VOCAB_TYPE_NONE) {
+                            bool   unparse_special) const {
+    if (type == LLAMA_VOCAB_TYPE_NONE) {
         return 0;
     }
 
-    GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
+    GGML_ASSERT(tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
 
     int32_t avail = text_len_max;
     int32_t total = 0;
 
     // remove the leading space
-    bool remove_space = vocab.tokenizer_add_space_prefix;
+    bool remove_space = add_space_prefix;
 
-    if (remove_special && vocab.tokenizer_add_bos) {
-        if (n_tokens > 0 && tokens[0] == vocab.special_bos_id) {
+    if (remove_special && add_bos) {
+        if (n_tokens > 0 && tokens[0] == special_bos_id) {
             remove_space = false;
             n_tokens--;
             tokens++;
         }
     }
 
-    if (remove_special && vocab.tokenizer_add_eos) {
-        if (n_tokens > 0 && tokens[n_tokens - 1] == vocab.special_eos_id) {
+    if (remove_special && add_eos) {
+        if (n_tokens > 0 && tokens[n_tokens - 1] == special_eos_id) {
             n_tokens--;
         }
     }
 
     for (int32_t i = 0; i < n_tokens; ++i) {
         GGML_ASSERT(avail >= 0);
-        int32_t n_chars = llama_token_to_piece_impl(vocab, tokens[i], text, avail, remove_space, unparse_special);
+        int32_t n_chars = token_to_piece(tokens[i], text, avail, remove_space, unparse_special);
         remove_space = false;
         if (n_chars < 0) {
             avail = 0;
@@ -1904,7 +2626,7 @@ int32_t llama_detokenize_impl(
         return -total;
     }
 
-    if (vocab.tokenizer_clean_spaces) {
+    if (clean_spaces) {
         text -= total;  // restart text
 
         // first pass: characters ?!.,  //TODO: where do these characters come from?
@@ -1965,13 +2687,326 @@ int32_t llama_detokenize_impl(
     return total <= text_len_max ? total : -total;
 }
 
-std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector & tokens, bool special) {
+void llama_vocab::impl::print_info() const {
+    LLAMA_LOG_INFO("%s: vocab type       = %s\n",     __func__, type_name().c_str());
+    LLAMA_LOG_INFO("%s: n_vocab          = %u\n",     __func__, vocab.n_tokens());
+    LLAMA_LOG_INFO("%s: n_merges         = %u\n",     __func__, (uint32_t) bpe_ranks.size());
+
+    // special tokens
+    if (special_bos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: BOS token        = %d '%s'\n", __func__, special_bos_id,     id_to_token[special_bos_id].text.c_str() );  }
+    if (special_eos_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOS token        = %d '%s'\n", __func__, special_eos_id,     id_to_token[special_eos_id].text.c_str() );  }
+    if (special_eot_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOT token        = %d '%s'\n", __func__, special_eot_id,     id_to_token[special_eot_id].text.c_str() );  }
+    if (special_eom_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: EOM token        = %d '%s'\n", __func__, special_eom_id,     id_to_token[special_eom_id].text.c_str() );  }
+    if (special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, special_unk_id,     id_to_token[special_unk_id].text.c_str() );  }
+    if (special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, special_sep_id,     id_to_token[special_sep_id].text.c_str() );  }
+    if (special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, special_pad_id,     id_to_token[special_pad_id].text.c_str() );  }
+    if (special_cls_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, special_cls_id,     id_to_token[special_cls_id].text.c_str() );  }
+    if (special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, special_mask_id,    id_to_token[special_mask_id].text.c_str() ); }
+
+    if (linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, linefeed_id,        id_to_token[linefeed_id].text.c_str() ); }
+
+    if (special_fim_pre_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PRE token    = %d '%s'\n", __func__, special_fim_pre_id, id_to_token[special_fim_pre_id].text.c_str() ); }
+    if (special_fim_suf_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SUF token    = %d '%s'\n", __func__, special_fim_suf_id, id_to_token[special_fim_suf_id].text.c_str() ); }
+    if (special_fim_mid_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM MID token    = %d '%s'\n", __func__, special_fim_mid_id, id_to_token[special_fim_mid_id].text.c_str() ); }
+    if (special_fim_pad_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM PAD token    = %d '%s'\n", __func__, special_fim_pad_id, id_to_token[special_fim_pad_id].text.c_str() ); }
+    if (special_fim_rep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM REP token    = %d '%s'\n", __func__, special_fim_rep_id, id_to_token[special_fim_rep_id].text.c_str() ); }
+    if (special_fim_sep_id != LLAMA_TOKEN_NULL) { LLAMA_LOG_INFO( "%s: FIM SEP token    = %d '%s'\n", __func__, special_fim_sep_id, id_to_token[special_fim_sep_id].text.c_str() ); }
+
+    for (const auto & id : special_eog_ids) {
+        LLAMA_LOG_INFO( "%s: EOG token        = %d '%s'\n", __func__, id, id_to_token[id].text.c_str() );
+    }
+
+    LLAMA_LOG_INFO("%s: max token length = %d\n", __func__, max_token_len);
+}
+
+llama_vocab::llama_vocab() : pimpl(new impl(*this)) {
+}
+
+llama_vocab::~llama_vocab() {
+}
+
+void llama_vocab::load(llama_model_loader & ml, const LLM_KV & kv) {
+    pimpl->load(ml, kv);
+}
+
+enum llama_vocab_type llama_vocab::get_type() const {
+    return pimpl->type;
+}
+
+enum llama_vocab_pre_type llama_vocab::get_pre_type() const {
+    return pimpl->pre_type;
+}
+
+uint32_t llama_vocab::n_tokens() const {
+    return (uint32_t) pimpl->id_to_token.size();
+}
+
+uint32_t llama_vocab::n_token_types() const {
+    return (uint32_t) pimpl->n_token_types;
+}
+
+std::string llama_vocab::type_name() const{
+    return pimpl->type_name();
+}
+
+bool llama_vocab::is_normal(llama_token id) const {
+    return pimpl->is_normal(id);
+}
+
+bool llama_vocab::is_unknown(llama_token id) const {
+    return pimpl->is_unknown(id);
+}
+
+bool llama_vocab::is_control(llama_token id) const {
+    return pimpl->is_control(id);
+}
+
+bool llama_vocab::is_byte(llama_token id) const {
+    return pimpl->is_byte(id);
+}
+
+bool llama_vocab::is_user_defined(llama_token id) const {
+    return pimpl->is_user_defined(id);
+}
+
+bool llama_vocab::is_unused(llama_token id) const {
+    return pimpl->is_unused(id);
+}
+
+bool llama_vocab::is_eog(llama_token id) const {
+    return pimpl->is_eog(id);
+}
+
+uint8_t llama_vocab::token_to_byte(llama_token id) const {
+    return pimpl->token_to_byte(id);
+}
+
+llama_token llama_vocab::byte_to_token(uint8_t ch) const {
+    GGML_ASSERT(get_type() != LLAMA_VOCAB_TYPE_NONE);
+    static const char * hex = "0123456789ABCDEF";
+    switch (get_type()) {
+        case LLAMA_VOCAB_TYPE_SPM:
+        case LLAMA_VOCAB_TYPE_UGM: {
+            const char buf[7] = { '<', '0', 'x', hex[ch >> 4], hex[ch & 15], '>', 0 };
+            auto token = pimpl->token_to_id.find(buf);
+            if (token != pimpl->token_to_id.end()) {
+                return (*token).second;
+            }
+            // Try to fall back to just the byte as a string
+            const char buf2[2] = { (char)ch, 0 };
+            return pimpl->token_to_id.at(buf2);
+        }
+        case LLAMA_VOCAB_TYPE_WPM:
+        case LLAMA_VOCAB_TYPE_BPE: {
+            return pimpl->token_to_id.at(unicode_byte_to_utf8(ch));
+        }
+        default:
+            GGML_ABORT("fatal error");
+    }
+}
+
+llama_token llama_vocab::text_to_token(const std::string & text) const {
+    GGML_ASSERT(pimpl->type != LLAMA_VOCAB_TYPE_NONE);
+    auto it = pimpl->token_to_id.find(text);
+    if (it != pimpl->token_to_id.end()) {
+        return (*it).second;
+    }
+    return LLAMA_TOKEN_NULL;
+}
+
+const llama_vocab::token_data & llama_vocab::get_token_data(llama_token id) const {
+    GGML_ASSERT(pimpl->type != LLAMA_VOCAB_TYPE_NONE);
+    return pimpl->id_to_token.at(id);
+}
+
+const char * llama_vocab::token_get_text(llama_token id) const {
+    GGML_ASSERT(pimpl->type != LLAMA_VOCAB_TYPE_NONE);
+    return pimpl->id_to_token.at(id).text.c_str();
+}
+
+float llama_vocab::token_get_score(llama_token id) const {
+    GGML_ASSERT(pimpl->type != LLAMA_VOCAB_TYPE_NONE);
+    return pimpl->id_to_token.at(id).score;
+}
+
+llama_token_attr llama_vocab::token_get_attr(llama_token id) const {
+    return pimpl->token_get_attr(id);
+}
+
+llama_token llama_vocab::token_bos() const {
+    return pimpl->type != LLAMA_VOCAB_TYPE_WPM ? pimpl->special_bos_id : pimpl->special_cls_id;
+}
+
+llama_token llama_vocab::token_eos() const {
+    return pimpl->special_eos_id;
+}
+
+llama_token llama_vocab::token_eot() const {
+    return pimpl->special_eot_id;
+}
+
+llama_token llama_vocab::token_eom() const {
+    return pimpl->special_eom_id;
+}
+
+llama_token llama_vocab::token_unk() const {
+    return pimpl->special_unk_id;
+}
+
+llama_token llama_vocab::token_cls() const {
+    return pimpl->special_cls_id;
+}
+
+llama_token llama_vocab::token_sep() const {
+    return pimpl->special_sep_id;
+}
+
+llama_token llama_vocab::token_nl() const {
+    return pimpl->linefeed_id;
+}
+
+llama_token llama_vocab::token_pad() const {
+    return pimpl->special_pad_id;
+}
+
+llama_token llama_vocab::token_prefix() const {
+    return pimpl->special_fim_pre_id;
+}
+
+llama_token llama_vocab::token_middle() const {
+    return pimpl->special_fim_mid_id;
+}
+
+llama_token llama_vocab::token_suffix() const {
+    return pimpl->special_fim_suf_id;
+}
+
+llama_token llama_vocab::token_fim_pre() const {
+    return pimpl->special_fim_pre_id;
+}
+
+llama_token llama_vocab::token_fim_suf() const {
+    return pimpl->special_fim_suf_id;
+}
+
+llama_token llama_vocab::token_fim_mid() const {
+    return pimpl->special_fim_mid_id;
+}
+
+llama_token llama_vocab::token_fim_pad() const {
+    return pimpl->special_fim_pad_id;
+}
+
+llama_token llama_vocab::token_fim_rep() const {
+    return pimpl->special_fim_rep_id;
+}
+
+llama_token llama_vocab::token_fim_sep() const {
+    return pimpl->special_fim_sep_id;
+}
+
+bool llama_vocab::get_add_space_prefix() const {
+    return pimpl->add_space_prefix;
+}
+
+bool llama_vocab::get_add_bos() const {
+    return pimpl->add_bos;
+}
+
+bool llama_vocab::get_add_eos() const {
+    return pimpl->add_eos;
+}
+
+bool llama_vocab::get_ignore_merges() const {
+    return pimpl->ignore_merges;
+}
+
+bool llama_vocab::get_clean_spaces() const {
+    return pimpl->clean_spaces;
+}
+
+bool llama_vocab::get_remove_extra_whitespaces() const {
+    return pimpl->remove_extra_whitespaces;
+}
+
+bool llama_vocab::get_escape_whitespaces() const {
+    return pimpl->escape_whitespaces;
+}
+
+bool llama_vocab::get_treat_whitespace_as_suffix() const {
+    return pimpl->treat_whitespace_as_suffix;
+}
+
+int llama_vocab::max_token_len() const {
+    return pimpl->max_token_len;
+}
+
+int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
+    GGML_ASSERT(token_left.find(' ')   == std::string::npos);
+    GGML_ASSERT(token_left.find('\n')  == std::string::npos);
+    GGML_ASSERT(token_right.find(' ')  == std::string::npos);
+    GGML_ASSERT(token_right.find('\n') == std::string::npos);
+
+    auto it = pimpl->bpe_ranks.find(std::make_pair(token_left, token_right));
+    if (it == pimpl->bpe_ranks.end()) {
+        return -1;
+    }
+
+    return it->second;
+}
+
+int32_t llama_vocab::tokenize(
+                  const char * text,
+                     int32_t   text_len,
+                 llama_token * tokens,
+                     int32_t   n_tokens_max,
+                        bool   add_special,
+                        bool   parse_special) const {
+    auto res = tokenize(std::string(text, text_len), add_special, parse_special);
+    if (n_tokens_max < (int) res.size()) {
+        // LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
+        return -((int) res.size());
+    }
+
+    for (size_t i = 0; i < res.size(); i++) {
+        tokens[i] = res[i];
+    }
+
+    return res.size();
+}
+
+std::vector llama_vocab::tokenize(
+        const std::string & raw_text,
+        bool add_special,
+        bool parse_special) const {
+    return pimpl->tokenize(raw_text, add_special, parse_special);
+}
+
+const std::string & llama_vocab::token_to_piece(llama_token token) const {
+    return pimpl->token_to_piece(token);
+}
+
+int32_t llama_vocab::token_to_piece(llama_token token, char * buf, int32_t length, int32_t lstrip, bool special) const {
+    return pimpl->token_to_piece(token, buf, length, lstrip, special);
+}
+
+int32_t llama_vocab::detokenize(
+               const llama_token * tokens,
+                         int32_t   n_tokens,
+                            char * text,
+                         int32_t   text_len_max,
+                            bool   remove_special,
+                            bool   unparse_special) const {
+    return pimpl->detokenize(tokens, n_tokens, text, text_len_max, remove_special, unparse_special);
+}
+
+std::string llama_vocab::detokenize(const std::vector & tokens, bool special) const {
     std::string text;
     text.resize(std::max(text.capacity(), tokens.size()));
-    int32_t n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
+    int32_t n_chars = detokenize(tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
     if (n_chars < 0) {
         text.resize(-n_chars);
-        n_chars = llama_detokenize_impl(vocab, tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
+        n_chars = detokenize(tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
         GGML_ASSERT(n_chars <= (int32_t)text.size());  // whitespace trimming is performed after per-token detokenization
     }
 
@@ -1980,3 +3015,241 @@ std::string llama_detokenize(const struct llama_vocab & vocab, const std::vector
     // NOTE: the original tokenizer decodes bytes after collecting the pieces.
     return text;
 }
+
+void llama_vocab::print_info() const {
+    pimpl->print_info();
+}
+
+//
+// interface implementation
+//
+
+int32_t llama_vocab_n_tokens(const struct llama_vocab * vocab) {
+    return vocab->n_tokens();
+}
+
+// deprecated
+int32_t llama_n_vocab(const struct llama_vocab * vocab) {
+    return llama_vocab_n_tokens(vocab);
+}
+
+enum llama_vocab_type llama_vocab_type(const struct llama_vocab * vocab) {
+    return vocab->get_type();
+}
+
+const char * llama_vocab_get_text(const struct llama_vocab * vocab, llama_token token) {
+    return vocab->token_get_text(token);
+}
+
+float llama_vocab_get_score(const struct llama_vocab * vocab, llama_token token) {
+    return vocab->token_get_score(token);
+}
+
+enum llama_token_attr llama_vocab_get_attr(const struct llama_vocab * vocab, llama_token token) {
+    return vocab->token_get_attr(token);
+}
+
+bool llama_vocab_is_eog(const struct llama_vocab * vocab, llama_token token) {
+    return vocab->is_eog(token);
+}
+
+bool llama_vocab_is_control(const struct llama_vocab * vocab, llama_token token) {
+    return vocab->is_control(token);
+}
+
+llama_token llama_vocab_bos(const struct llama_vocab * vocab) {
+    return vocab->token_bos();
+}
+
+llama_token llama_vocab_eos(const struct llama_vocab * vocab) {
+    return vocab->token_eos();
+}
+
+llama_token llama_vocab_eot(const struct llama_vocab * vocab) {
+    return vocab->token_eot();
+}
+
+llama_token llama_vocab_cls(const struct llama_vocab * vocab) {
+    return vocab->token_cls();
+}
+
+llama_token llama_vocab_sep(const struct llama_vocab * vocab) {
+    return vocab->token_sep();
+}
+
+llama_token llama_vocab_nl (const struct llama_vocab * vocab) {
+    return vocab->token_nl();
+}
+
+llama_token llama_vocab_pad(const struct llama_vocab * vocab) {
+    return vocab->token_pad();
+}
+
+bool llama_vocab_get_add_bos(const struct llama_vocab * vocab) {
+    return vocab->get_add_bos();
+}
+
+bool llama_vocab_get_add_eos(const struct llama_vocab * vocab) {
+    return vocab->get_add_eos();
+}
+
+llama_token llama_vocab_fim_pre(const struct llama_vocab * vocab) {
+    return vocab->token_fim_pre();
+}
+
+llama_token llama_vocab_fim_suf(const struct llama_vocab * vocab) {
+    return vocab->token_fim_suf();
+}
+
+llama_token llama_vocab_fim_mid(const struct llama_vocab * vocab) {
+    return vocab->token_fim_mid();
+}
+
+llama_token llama_vocab_fim_pad(const struct llama_vocab * vocab) {
+    return vocab->token_fim_pad();
+}
+
+llama_token llama_vocab_fim_rep(const struct llama_vocab * vocab) {
+    return vocab->token_fim_rep();
+}
+
+llama_token llama_vocab_fim_sep(const struct llama_vocab * vocab) {
+    return vocab->token_fim_sep();
+}
+
+// deprecated
+const char * llama_token_get_text(const struct llama_vocab * vocab, llama_token token) {
+    return llama_vocab_get_text(vocab, token);
+}
+
+// deprecated
+float llama_token_get_score(const struct llama_vocab * vocab, llama_token token) {
+    return llama_vocab_get_score(vocab, token);
+}
+
+// deprecated
+enum llama_token_attr llama_token_get_attr(const struct llama_vocab * vocab, llama_token token) {
+    return llama_vocab_get_attr(vocab, token);
+}
+
+// deprecated
+bool llama_token_is_eog(const struct llama_vocab * vocab, llama_token token) {
+    return llama_vocab_is_eog(vocab, token);
+}
+
+// deprecated
+bool llama_token_is_control(const struct llama_vocab * vocab, llama_token token) {
+    return llama_vocab_is_control(vocab, token);
+}
+
+// deprecated
+llama_token llama_token_bos(const struct llama_vocab * vocab) {
+    return llama_vocab_bos(vocab);
+}
+
+// deprecated
+llama_token llama_token_eos(const struct llama_vocab * vocab) {
+    return llama_vocab_eos(vocab);
+}
+
+// deprecated
+llama_token llama_token_eot(const struct llama_vocab * vocab) {
+    return llama_vocab_eot(vocab);
+}
+
+// deprecated
+llama_token llama_token_cls(const struct llama_vocab * vocab) {
+    return llama_vocab_cls(vocab);
+}
+
+// deprecated
+llama_token llama_token_sep(const struct llama_vocab * vocab) {
+    return llama_vocab_sep(vocab);
+}
+
+// deprecated
+llama_token llama_token_nl (const struct llama_vocab * vocab) {
+    return llama_vocab_nl(vocab);
+}
+
+// deprecated
+llama_token llama_token_pad(const struct llama_vocab * vocab) {
+    return llama_vocab_pad(vocab);
+}
+
+// deprecated
+bool llama_add_bos_token(const struct llama_vocab * vocab) {
+    return llama_vocab_get_add_bos(vocab);
+}
+
+// deprecated
+bool llama_add_eos_token(const struct llama_vocab * vocab) {
+    return llama_vocab_get_add_eos(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_pre(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_pre(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_suf(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_suf(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_mid(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_mid(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_pad(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_pad(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_rep(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_rep(vocab);
+}
+
+// deprecated
+llama_token llama_token_fim_sep(const struct llama_vocab * vocab) {
+    return llama_vocab_fim_sep(vocab);
+}
+
+//
+// tokenization
+//
+
+int32_t llama_tokenize(
+    const struct llama_vocab * vocab,
+                  const char * text,
+                     int32_t   text_len,
+                 llama_token * tokens,
+                     int32_t   n_tokens_max,
+                        bool   add_special,
+                        bool   parse_special) {
+    return vocab->tokenize(text, text_len, tokens, n_tokens_max, add_special, parse_special);
+}
+
+int32_t llama_token_to_piece(
+    const struct llama_vocab * vocab,
+                 llama_token   token,
+                        char * buf,
+                     int32_t   length,
+                     int32_t   lstrip,
+                        bool   special) {
+    return vocab->token_to_piece(token, buf, length, lstrip, special);
+}
+
+int32_t llama_detokenize(
+    const struct llama_vocab * vocab,
+           const llama_token * tokens,
+                     int32_t   n_tokens,
+                        char * text,
+                     int32_t   text_len_max,
+                        bool   remove_special,
+                        bool   unparse_special) {
+    return vocab->detokenize(tokens, n_tokens, text, text_len_max, remove_special, unparse_special);
+}
+
diff --git a/src/llama-vocab.h b/src/llama-vocab.h
index 0d00086da..020f2b533 100644
--- a/src/llama-vocab.h
+++ b/src/llama-vocab.h
@@ -4,179 +4,123 @@
 
 #include 
 #include 
-#include 
-#include 
-#include 
+#include 
 
-static const char * llama_model_vocab_type_name(enum llama_vocab_type type){
-    switch (type) {
-        case LLAMA_VOCAB_TYPE_NONE: return "no vocab";
-        case LLAMA_VOCAB_TYPE_SPM:  return "SPM";
-        case LLAMA_VOCAB_TYPE_BPE:  return "BPE";
-        case LLAMA_VOCAB_TYPE_WPM:  return "WPM";
-        case LLAMA_VOCAB_TYPE_UGM:  return "UGM";
-        case LLAMA_VOCAB_TYPE_RWKV: return "RWKV";
-        default:                    return "unknown";
-    }
-}
-
-struct llm_tokenizer;
+struct LLM_KV;
+struct llama_model_loader;
 
 struct llama_vocab {
-    using id    = llama_token;
-    using token = std::string;
-    using tattr = llama_token_attr;
-
     struct token_data {
-        token text;
-        float score;
-        tattr attr;
+        std::string      text;
+        float            score;
+        llama_token_attr attr;
     };
 
-    uint32_t n_vocab = 0; // TODO: not great because has to keep in sync with hparams.n_vocab
-
-    enum llama_vocab_type     type     = LLAMA_VOCAB_TYPE_SPM;
-    enum llama_vocab_pre_type type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
-
-    int max_token_len = 0; // used for optimizing longest token search
-
-    std::unordered_map token_to_id;
-    std::vector       id_to_token;
-
-    std::vector    cache_special_tokens;
-    std::vector cache_token_to_piece; // llama_token_to_piece(special = true);
-
-    std::map, int> bpe_ranks;
-
-    // default LLaMA special tokens
-    // TODO: should we set all of these to LLAMA_TOKEN_NULL?
-    id special_bos_id  = 1;
-    id special_eos_id  = 2;
-    id special_eot_id  = LLAMA_TOKEN_NULL;
-    id special_eom_id  = LLAMA_TOKEN_NULL;
-    id special_unk_id  = 0;
-    id special_sep_id  = LLAMA_TOKEN_NULL;
-    id special_pad_id  = LLAMA_TOKEN_NULL;
-    id special_cls_id  = LLAMA_TOKEN_NULL; // TODO: revisit if this is really needed https://github.com/ggerganov/llama.cpp/pull/10930
-    id special_mask_id = LLAMA_TOKEN_NULL;
-
-    id linefeed_id = 13;
-
-    // fim tokens
-    id special_fim_pre_id = LLAMA_TOKEN_NULL;
-    id special_fim_suf_id = LLAMA_TOKEN_NULL;
-    id special_fim_mid_id = LLAMA_TOKEN_NULL;
-    id special_fim_pad_id = LLAMA_TOKEN_NULL;
-    id special_fim_rep_id = LLAMA_TOKEN_NULL; // repo
-    id special_fim_sep_id = LLAMA_TOKEN_NULL; // file separator
-
-    // set of all tokens that cause "end of generation"
-    std::set special_eog_ids;
-
-    // tokenizer flags
-    bool tokenizer_add_space_prefix           = false;
-    bool tokenizer_add_bos                    = false;
-    bool tokenizer_add_eos                    = false;
-    bool tokenizer_ignore_merges              = false;
-    bool tokenizer_clean_spaces               = false;  // clean_up_tokenization_spaces
-    bool tokenizer_remove_extra_whitespaces   = false;
-    bool tokenizer_escape_whitespaces         = true;
-    bool tokenizer_treat_whitespace_as_suffix = false;
-
-    std::vector precompiled_charsmap;
-
-    llm_tokenizer * tokenizer = nullptr;
-
-    llama_vocab() = default;
+    llama_vocab();
     ~llama_vocab();
 
+    void load(llama_model_loader & ml, const LLM_KV & kv);
+
+    enum llama_vocab_type     get_type()     const;
+    enum llama_vocab_pre_type get_pre_type() const;
+
+    uint32_t n_tokens() const;
+    uint32_t n_token_types() const;
+
+    std::string type_name() const;
+
+    bool is_normal      (llama_token id) const;
+    bool is_unknown     (llama_token id) const;
+    bool is_control     (llama_token id) const;
+    bool is_byte        (llama_token id) const;
+    bool is_user_defined(llama_token id) const;
+    bool is_unused      (llama_token id) const;
+    bool is_eog         (llama_token id) const;
+
+    uint8_t     token_to_byte(llama_token id) const;
+    llama_token byte_to_token(uint8_t ch)     const;
+
+    llama_token text_to_token(const std::string & text) const;
+
+    const token_data & get_token_data(llama_token id) const;
+
+    const char *     token_get_text (llama_token id) const;
+    float            token_get_score(llama_token id) const;
+    llama_token_attr token_get_attr (llama_token id) const;
+
+    llama_token token_bos() const;
+    llama_token token_eos() const;
+    llama_token token_eot() const;
+    llama_token token_eom() const;
+    llama_token token_unk() const;
+    llama_token token_cls() const;
+    llama_token token_sep() const;
+    llama_token token_nl () const;
+    llama_token token_pad() const;
+
+    llama_token token_prefix() const;
+    llama_token token_middle() const;
+    llama_token token_suffix() const;
+
+    llama_token token_fim_pre() const;
+    llama_token token_fim_suf() const;
+    llama_token token_fim_mid() const;
+    llama_token token_fim_pad() const;
+    llama_token token_fim_rep() const;
+    llama_token token_fim_sep() const;
+
+    bool get_add_space_prefix          () const;
+    bool get_add_bos                   () const;
+    bool get_add_eos                   () const;
+    bool get_ignore_merges             () const;
+    bool get_clean_spaces              () const;
+    bool get_remove_extra_whitespaces  () const;
+    bool get_escape_whitespaces        () const;
+    bool get_treat_whitespace_as_suffix() const;
+
+    int max_token_len() const;
+
     int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
 
-    void init_tokenizer();
+    int32_t tokenize(
+                   const char * text,
+                      int32_t   text_len,
+                  llama_token * tokens,
+                      int32_t   n_tokens_max,
+                         bool   add_special,
+                         bool   parse_special) const;
+
+    std::vector tokenize(
+            const std::string & raw_text,
+                         bool   add_special,
+                         bool   parse_special = false) const;
+
+    // does not write null-terminator to buf
+    int32_t token_to_piece(
+                  llama_token   token,
+                         char * buf,
+                      int32_t   length,
+                      int32_t   lstrip,
+                         bool   special) const;
+
+    // use cached data
+    const std::string & token_to_piece(llama_token token) const;
+
+    int32_t detokenize(
+            const llama_token * tokens,
+                      int32_t   n_tokens,
+                         char * text,
+                      int32_t   text_len_max,
+                         bool   remove_special,
+                         bool   unparse_special) const;
+
+    std::string detokenize(
+            const std::vector & tokens,
+                                      bool   special) const;
+
+    void print_info() const;
+
+private:
+    struct impl;
+    std::unique_ptr pimpl;
 };
-
-//
-// internal API
-//
-
-// TODO: rename to llama_tokenize_impl
-// TODO: This should probably be in llama.h
-std::vector llama_tokenize_internal(
-        const llama_vocab & vocab,
-        std::string raw_text,
-        bool add_special,
-        bool parse_special = false);
-
-// TODO: move the API below as member functions of llama_vocab
-llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch);
-
-const char * llama_token_get_text_impl(const struct llama_vocab & vocab, llama_token token);
-
-float llama_token_get_score_impl(const struct llama_vocab & vocab, llama_token token);
-
-llama_token_attr llama_token_get_attr_impl(const struct llama_vocab & vocab, llama_token token);
-
-bool llama_token_is_eog_impl(const struct llama_vocab & vocab, llama_token token);
-
-bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token token);
-
-llama_token llama_token_bos_impl(const struct llama_vocab & vocab);
-llama_token llama_token_eos_impl(const struct llama_vocab & vocab);
-llama_token llama_token_eot_impl(const struct llama_vocab & vocab);
-llama_token llama_token_eom_impl(const struct llama_vocab & vocab);
-llama_token llama_token_cls_impl(const struct llama_vocab & vocab);
-llama_token llama_token_sep_impl(const struct llama_vocab & vocab);
-llama_token llama_token_nl_impl (const struct llama_vocab & vocab);
-llama_token llama_token_pad_impl(const struct llama_vocab & vocab);
-
-llama_token llama_token_prefix_impl(const struct llama_vocab & vocab);
-llama_token llama_token_middle_impl(const struct llama_vocab & vocab);
-llama_token llama_token_suffix_impl(const struct llama_vocab & vocab);
-
-llama_token llama_token_fim_pre_impl(const struct llama_vocab & vocab);
-llama_token llama_token_fim_suf_impl(const struct llama_vocab & vocab);
-llama_token llama_token_fim_mid_impl(const struct llama_vocab & vocab);
-llama_token llama_token_fim_pad_impl(const struct llama_vocab & vocab);
-llama_token llama_token_fim_rep_impl(const struct llama_vocab & vocab);
-llama_token llama_token_fim_sep_impl(const struct llama_vocab & vocab);
-
-bool llama_add_bos_token_impl(const struct llama_vocab & vocab);
-bool llama_add_eos_token_impl(const struct llama_vocab & vocab);
-
-int32_t llama_tokenize_impl(
-        const struct llama_vocab & vocab,
-                      const char * text,
-                         int32_t   text_len,
-                     llama_token * tokens,
-                         int32_t   n_tokens_max,
-                            bool   add_special,
-                            bool   parse_special);
-
-// does not write null-terminator to buf
-int32_t llama_token_to_piece_impl(
-        const struct llama_vocab & vocab,
-                     llama_token   token,
-                            char * buf,
-                         int32_t   length,
-                         int32_t   lstrip,
-                            bool   special);
-
-// check if token0 is contained as a prefix in token1
-bool llama_token_is_prefix_impl(
-        const struct llama_vocab & vocab,
-                     llama_token   token0,
-                     llama_token   token1);
-
-int32_t llama_detokenize_impl(
-        const struct llama_vocab & vocab,
-               const llama_token * tokens,
-                         int32_t   n_tokens,
-                            char * text,
-                         int32_t   text_len_max,
-                            bool   remove_special,
-                            bool   unparse_special);
-
-std::string llama_detokenize(
-        const struct llama_vocab & vocab,
-  const std::vector & tokens,
-                            bool   special);
diff --git a/src/llama.cpp b/src/llama.cpp
index a364861d3..daf1b7c97 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -25,2569 +25,52 @@
 #include 
 #include 
 #include 
-#include 
-#include 
 
 #if defined(_MSC_VER)
 #pragma warning(disable: 4244 4267) // possible loss of data
 #endif
 
-//
-// tensor loading (TODO: add llama_tesor_loader?)
-//
-
-static int llama_get_device_count(const llama_model & model) {
-    return (int) model.devices.size();
-}
-
-// checks if the weight tensor can be used with the specified buffer type and device
-static bool weight_buft_supported(const llama_hparams & hparams, ggml_tensor * w, ggml_op op, ggml_backend_buffer_type_t buft, ggml_backend_dev_t dev) {
-    GGML_ASSERT(w != nullptr);
-
-    if (op == GGML_OP_NONE) {
-        return true;
-    }
-
-    ggml_init_params params = {
-        /*.mem_size   =*/ ggml_tensor_overhead()*8,
-        /*.mem_buffer =*/ NULL,
-        /*.no_alloc   =*/ true,
-    };
-    ggml_context_ptr ctx_ptr { ggml_init(params) };
-    if (!ctx_ptr) {
-        throw std::runtime_error(format("failed to create ggml context"));
-    }
-    ggml_context * ctx = ctx_ptr.get();
-
-    ggml_tensor * op_tensor = nullptr;
-
-    switch (op) {
-        case GGML_OP_GET_ROWS:
-            {
-                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
-                op_tensor = ggml_get_rows(ctx, w, b);
-            } break;
-        case GGML_OP_MUL_MAT:
-            {
-                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], 512, w->ne[2], w->ne[3]);
-                op_tensor = ggml_mul_mat(ctx, w, b);
-            } break;
-        case GGML_OP_MUL_MAT_ID:
-            {
-                int n_expert_used = hparams.n_expert_used;
-                ggml_tensor * b = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, w->ne[0], n_expert_used, 512);
-                ggml_tensor * ids = ggml_new_tensor_2d(ctx, GGML_TYPE_I32, n_expert_used, 512);
-                op_tensor = ggml_mul_mat_id(ctx, w, b, ids);
-            } break;
-        case GGML_OP_ADD:
-            {
-                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
-                op_tensor = ggml_add(ctx, a, w);
-            } break;
-        case GGML_OP_MUL:
-            {
-                ggml_tensor * a = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, w->ne[0], w->ne[1], w->ne[2], w->ne[3]);
-                op_tensor = ggml_mul(ctx, a, w);
-            } break;
-        case GGML_OP_DIV:
-            {
-                ggml_tensor * a = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, w->ne[0]);
-                op_tensor = ggml_div(ctx, a, w);
-            } break;
-        case GGML_OP_ROPE:
-            {
-                int n_embd_head = hparams.n_embd_head_v;
-                int n_head = hparams.n_head();
-                ggml_tensor * a = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, n_embd_head, n_head, 512);
-                ggml_tensor * b = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 512);
-                op_tensor = ggml_rope_ext(
-                    ctx, a, b, w,
-                    0, 0, 0, 0, 0,
-                    0, 0, 0, 0
-                );
-
-            } break;
-        case GGML_OP_SSM_CONV:
-            {
-                // FIXME
-                ggml_tensor * conv_x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, 12345, w->ne[1], 6789);
-                op_tensor = ggml_ssm_conv(ctx, conv_x, w);
-            } break;
-        case GGML_OP_SSM_SCAN:
-            {
-                // FIXME
-                const int64_t d_state      = w->ne[0];
-                const int64_t d_inner      = w->ne[1];
-                const int64_t n_seq_tokens = 512;
-                const int64_t n_seqs       = 1;
-                ggml_tensor * s  = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, d_inner, n_seqs);
-                ggml_tensor * x = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
-                ggml_tensor * dt = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_inner, n_seq_tokens, n_seqs);
-                ggml_tensor * B = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
-                ggml_tensor * C = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, d_state, n_seq_tokens, n_seqs);
-                op_tensor = ggml_ssm_scan(ctx, s, x, dt, w, B, C);
-            } break;
-        case GGML_OP_RWKV_WKV6:
-            {
-                // FIXME
-                const int64_t S = 123;
-                const int64_t H = 123;
-                const int64_t n_tokens = 123;
-                const int64_t n_seqs = 123;
-                ggml_tensor  * k = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
-                ggml_tensor  * v = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
-                ggml_tensor  * r = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
-                ggml_tensor  * tf = w;
-                ggml_tensor  * td = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, S, H, n_tokens);
-                ggml_tensor  * state = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, S, n_seqs, S, H);
-                op_tensor = ggml_rwkv_wkv6(ctx, k, v, r, tf, td, state);
-            } break;
-        case GGML_OP_IM2COL:
-            {
-                const int n_embd = hparams.n_embd;
-                ggml_tensor * b = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, n_embd, w->ne[1], 1, 1);
-                op_tensor = ggml_im2col(ctx, w, b, 1, 0, 0, 0, 1, 0, false, GGML_TYPE_F16);
-            } break;
-        default:
-            GGML_ABORT("%s: missing test for op %s for tensor %s", __func__, ggml_op_name(op), w->name);
-    }
-
-    // create a temporary dummy buffer for the weight so that supports_op can check the buffer type
-    GGML_ASSERT(w->buffer == nullptr);
-    w->buffer = ggml_backend_buft_alloc_buffer(buft, 0);
-    bool op_supported = ggml_backend_dev_supports_op(dev, op_tensor);
-    ggml_backend_buffer_free(w->buffer);
-    w->buffer = nullptr;
-
-    return op_supported;
-}
-
-// find the first buffer type in the list that can use the tensor
-static ggml_backend_buffer_type_t select_weight_buft(const llama_model & model, ggml_tensor * tensor, ggml_op op, const llama_model::buft_list_t & buft_list) {
-    GGML_ASSERT(!buft_list.empty());
-    for (const auto & cur : buft_list) {
-        ggml_backend_dev_t cur_dev = cur.first;
-        ggml_backend_buffer_type_t cur_buft = cur.second;
-        if (weight_buft_supported(model.hparams, tensor, op, cur_buft, cur_dev)) {
-            return cur_buft;
-        }
-    }
-    return nullptr;
-}
-
-// CPU: ACCEL -> CPU extra -> GPU host -> CPU
-static llama_model::buft_list_t make_cpu_buft_list(llama_model & model) {
-    llama_model::buft_list_t buft_list;
-
-    // add ACCEL buffer types
-    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
-        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
-        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) {
-            auto * buft = ggml_backend_dev_buffer_type(dev);
-            // skip
-            if (buft != ggml_backend_cpu_buffer_type()) {
-                buft_list.emplace_back(dev, buft);
-            }
-        }
-    }
-
-    // add extra buffer types
-    auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
-    auto * cpu_reg = ggml_backend_dev_backend_reg(cpu_dev);
-    auto ggml_backend_dev_get_extra_bufts_fn = (ggml_backend_dev_get_extra_bufts_t)
-        ggml_backend_reg_get_proc_address(cpu_reg, "ggml_backend_dev_get_extra_bufts");
-    if (ggml_backend_dev_get_extra_bufts_fn) {
-        ggml_backend_buffer_type_t * extra_bufts = ggml_backend_dev_get_extra_bufts_fn(cpu_dev);
-        while (extra_bufts && *extra_bufts) {
-            buft_list.emplace_back(cpu_dev, *extra_bufts);
-            ++extra_bufts;
-        }
-    }
-
-    // add a host buffer type
-    // storing the tensors in a host buffer is useful when the processing of large batches
-    // is offloaded to a GPU device, since it reduces the time spent on data transfers
-    // generally, this will be done using the first device in the list
-    // a better approach would be to handle this on a weight-by-weight basis using the offload_op
-    // function of the device to determine if it would benefit from being stored in a host buffer
-    for (auto * dev : model.devices) {
-        ggml_backend_buffer_type_t buft = ggml_backend_dev_host_buffer_type(dev);
-        if (buft) {
-            buft_list.emplace_back(dev, buft);
-            break;
-        }
-    }
-
-    // add the CPU buffer type
-    for (size_t i = 0; i < ggml_backend_dev_count(); ++i) {
-        ggml_backend_dev_t dev = ggml_backend_dev_get(i);
-        if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_CPU) {
-            buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
-        }
-    }
-
-    return buft_list;
-}
-
-// GPU: split if LLAMA_SPLIT_MODE_ROW -> GPU
-static llama_model::buft_list_t make_gpu_buft_list(ggml_backend_dev_t dev, enum llama_split_mode split_mode, const float * tensor_split) {
-    llama_model::buft_list_t buft_list;
-
-    // add the device split buffer type if requested and available
-    if (split_mode == LLAMA_SPLIT_MODE_ROW) {
-        ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev);
-        auto ggml_backend_split_buffer_type_fn = (ggml_backend_split_buffer_type_t)
-            ggml_backend_reg_get_proc_address(reg, "ggml_backend_split_buffer_type");
-        if (ggml_backend_split_buffer_type_fn) {
-            size_t dev_index = [&]() {
-                auto * reg = ggml_backend_dev_backend_reg(dev);
-                for (size_t i = 0; i < ggml_backend_reg_dev_count(reg); ++i) {
-                    if (ggml_backend_reg_dev_get(reg, i) == dev) {
-                        return i;
-                    }
-                }
-                throw std::runtime_error(format("device %s not found in its backend reg", ggml_backend_dev_name(dev)));
-            }();
-            auto * buft = ggml_backend_split_buffer_type_fn(dev_index, tensor_split);
-            if (buft != nullptr) {
-                buft_list.emplace_back(dev, buft);
-            }
-        }
-    }
-
-    // add the device default buffer type
-    buft_list.emplace_back(dev, ggml_backend_dev_buffer_type(dev));
-
-    return buft_list;
-}
-
-// Returns false if cancelled by progress_callback
-static bool llm_load_tensors(
-        llama_model_loader & ml,
-        llama_model & model,
-        int n_gpu_layers,
-        enum llama_split_mode split_mode,
-        int main_gpu,
-        const float * tensor_split,
-        bool use_mlock,
-        llama_progress_callback progress_callback,
-        void * progress_callback_user_data) {
-    auto & hparams = model.hparams;
-
-    model.split_mode   = split_mode;
-    model.main_gpu     = main_gpu;
-    model.n_gpu_layers = n_gpu_layers;
-
-    const int n_layer = hparams.n_layer;
-
-    bool use_mmap_buffer = true;
-
-    // build a list of buffer types for the CPU and GPU devices
-    model.cpu_buft_list = make_cpu_buft_list(model);
-    for (auto * dev : model.devices) {
-        llama_model::buft_list_t buft_list = make_gpu_buft_list(dev, split_mode, tensor_split);
-        // add CPU buffer types as a fallback
-        buft_list.insert(buft_list.end(), model.cpu_buft_list.begin(), model.cpu_buft_list.end());
-        model.gpu_buft_list.emplace(dev, std::move(buft_list));
-    }
-
-    // calculate the split points
-    int device_count = llama_get_device_count(model);
-    bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; });
-    std::vector splits(device_count);
-    if (all_zero) {
-        // default split, by free memory
-        for (int i = 0; i < device_count; ++i) {
-            ggml_backend_dev_t dev = model.devices[i];
-            size_t total;
-            size_t free;
-            ggml_backend_dev_memory(dev, &free, &total);
-            splits[i] = free;
-        }
-    } else {
-        std::copy(tensor_split, tensor_split + device_count, splits.begin());
-    }
-
-    // sum and normalize the splits to get the split points
-    float split_sum = 0.0f;
-    for (int i = 0; i < device_count; ++i) {
-        split_sum += splits[i];
-        splits[i] = split_sum;
-    }
-    for (int i = 0; i < device_count; ++i) {
-        splits[i] /= split_sum;
-    }
-
-    ggml_backend_dev_t cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
-    const int i_gpu_start = std::max((int) hparams.n_layer - n_gpu_layers, (int) 0);
-    const int act_gpu_layers = model.devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1);
-    auto get_layer_buft_list = [&](int il) -> llama_model::layer_dev {
-        if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) {
-            return {cpu_dev, &model.cpu_buft_list};
-        }
-        int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(il - i_gpu_start)/act_gpu_layers) - splits.begin();
-        auto * dev = model.devices.at(layer_gpu);
-        return {dev, &model.gpu_buft_list.at(dev)};
-    };
-
-    // assign the input layer
-    // there is very little benefit to offloading the input layer, so always keep it on the CPU
-    model.dev_input = { cpu_dev, &model.cpu_buft_list };
-
-    // assign the repeating layers to the devices according to the splits
-    model.dev_layer.resize(n_layer);
-    for (int il = 0; il < n_layer; ++il) {
-        model.dev_layer[il] = get_layer_buft_list(il);
-    }
-    // assign the output layer
-    model.dev_output = get_layer_buft_list(n_layer);
-
-    // one ggml context per buffer type
-    int max_n_tensors = ml.n_tensors;
-    max_n_tensors += 1;         // duplicated output tensor
-    max_n_tensors += n_layer*2; // duplicated rope freq tensors
-    const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
-
-    std::map ctx_map;
-    auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
-        auto it = ctx_map.find(buft);
-        if (it == ctx_map.end()) {
-            ggml_init_params params = {
-                /*.mem_size   =*/ ctx_size,
-                /*.mem_buffer =*/ NULL,
-                /*.no_alloc   =*/ true,
-            };
-            ggml_context * ctx = ggml_init(params);
-            if (!ctx) {
-                throw std::runtime_error(format("failed to create ggml context"));
-            }
-            ctx_map[buft] = ctx;
-            model.ctxs.emplace_back(ctx);
-            return ctx;
-        }
-        return it->second;
-    };
-
-    // create tensors for the weights
-    {
-        // note: cast to int64_t since we will use these for the tensor dimensions
-        const int64_t n_head        = hparams.n_head();
-        const int64_t n_head_kv     = hparams.n_head_kv();
-        const int64_t n_embd        = hparams.n_embd;
-        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa();
-        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa();
-        const int64_t n_embd_head_k = hparams.n_embd_head_k;
-        const int64_t n_embd_head_v = hparams.n_embd_head_v;
-        const int64_t n_ff          = hparams.n_ff();
-        const int64_t n_embd_gqa    = n_embd_v_gqa;
-        const int64_t n_vocab       = hparams.n_vocab;
-        const int64_t n_vocab_type  = hparams.n_vocab_type;
-        const int64_t n_rot         = hparams.n_rot;
-        const int64_t n_expert      = hparams.n_expert;
-        const int64_t n_expert_used = hparams.n_expert_used;
-        const int64_t n_ctx_train   = hparams.n_ctx_train;
-
-        if (n_expert > 0 && hparams.n_expert_used == 0) {
-            throw std::runtime_error("model has expert layers but no expert layers are used");
-        }
-
-        int n_moved_tensors = 0;
-        ggml_tensor * first_moved_tensor = nullptr;
-        ggml_backend_buffer_type_t first_moved_from_buft = nullptr;
-        ggml_backend_buffer_type_t first_moved_to_buft = nullptr;
-
-        auto create_tensor = [&](const LLM_TN_IMPL & tn, const std::initializer_list & ne, int flags) -> ggml_tensor * {
-            ggml_tensor * t_meta = ml.get_tensor_meta(tn.str().c_str());
-
-            if (!t_meta) {
-                if (flags & llama_model_loader::TENSOR_NOT_REQUIRED) {
-                    return nullptr;
-                }
-                throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
-            }
-
-            // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops
-            // the tensor is duplicated
-            // to handle this, we check if the tensor is duplicated, and if so, we assume that it is being loaded as the output tensor
-            llm_tensor tn_tensor = tn.tensor;
-            if (tn.tensor == LLM_TENSOR_TOKEN_EMBD && flags & llama_model_loader::TENSOR_DUPLICATED) {
-                tn_tensor = LLM_TENSOR_OUTPUT;
-            }
-
-            llm_tensor_info info;
-            try {
-                info = llm_tensor_info_for(tn_tensor);
-            } catch (const std::out_of_range & e) {
-                throw std::runtime_error(format("missing tensor info mapping for %s", tn.str().c_str()));
-            }
-
-            // tensors with "bias" suffix are always used with GGML_OP_ADD
-            ggml_op op;
-            bool bias = tn.suffix != nullptr && strcmp(tn.suffix, "bias") == 0;
-            if (bias) {
-                op = GGML_OP_ADD;
-            } else {
-                op = info.op;
-            }
-
-            // sanity checks
-            if (info.layer == LLM_TENSOR_LAYER_INPUT || info.layer == LLM_TENSOR_LAYER_OUTPUT) {
-                if (tn.bid != -1) {
-                    GGML_ABORT("input/output layer tensor %s used with a layer number", tn.str().c_str());
-                }
-            } else {
-                if (tn.bid == -1) {
-                    GGML_ABORT("repeating layer tensor %s used without a layer number", tn.str().c_str());
-                }
-            }
-
-            // select the buffer type for this tensor
-            llama_model::buft_list_t * buft_list;
-            switch (info.layer) {
-                case LLM_TENSOR_LAYER_INPUT:
-                    buft_list = model.dev_input.buft_list;
-                    break;
-                case LLM_TENSOR_LAYER_OUTPUT:
-                    buft_list = model.dev_output.buft_list;
-                    break;
-                case LLM_TENSOR_LAYER_REPEATING:
-                    buft_list = model.dev_layer.at(tn.bid).buft_list;
-                    break;
-                default:
-                    GGML_ABORT("invalid layer %d for tensor %s", info.layer, tn.str().c_str());
-            }
-
-            ggml_backend_buffer_type_t buft = select_weight_buft(model, t_meta, op, *buft_list);
-            if (!buft) {
-                throw std::runtime_error(format("failed to find a compatible buffer type for tensor %s", tn.str().c_str()));
-            }
-
-            // avoid using a host buffer when using mmap
-            auto * buft_dev = ggml_backend_buft_get_device(buft);
-            if (ml.use_mmap && buft_dev && buft == ggml_backend_dev_host_buffer_type(buft_dev)) {
-                auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
-                buft = ggml_backend_dev_buffer_type(cpu_dev);
-            }
-
-            if (buft != buft_list->front().second) {
-                n_moved_tensors++;
-                if (!first_moved_tensor) {
-                    first_moved_tensor = t_meta;
-                    first_moved_from_buft = buft_list->front().second;
-                    first_moved_to_buft   = buft;
-                }
-            }
-
-            ggml_context * ctx = ctx_for_buft(buft);
-
-            // if duplicated, check if the original tensor was allocated in the same buffer type context and avoid creating a new one
-            if (flags & llama_model_loader::TENSOR_DUPLICATED) {
-                ggml_tensor * t = ggml_get_tensor(ctx, tn.str().c_str());
-                if (t) {
-                    return t;
-                }
-            }
-            return ml.create_tensor(ctx, tn, ne, flags);
-        };
-
-        model.layers.resize(n_layer);
-
-        // TODO: move to a separate function
-        const auto tn = LLM_TN(model.arch);
-        switch (model.arch) {
-            case LLM_ARCH_LLAMA:
-            case LLM_ARCH_REFACT:
-            case LLM_ARCH_MINICPM:
-            case LLM_ARCH_GRANITE:
-            case LLM_ARCH_GRANITE_MOE:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
-                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        }
-                        else {
-                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        }
-
-                        if (n_expert == 0) {
-                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                            // optional MLP bias
-                            layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        } else {
-                            layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
-                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
-                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
-                        }
-                    }
-                } break;
-            case LLM_ARCH_DECI:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-                        const int64_t n_embd_k_gqa  = hparams.n_embd_k_gqa(i);
-                        const int64_t n_embd_v_gqa  = hparams.n_embd_v_gqa(i);
-                        const int64_t n_embd_gqa    = hparams.n_embd_v_gqa(i);
-                        const int64_t n_ff          = hparams.n_ff(i);
-                        const int64_t n_head        = hparams.n_head(i);
-                        const int64_t n_head_kv     = hparams.n_head_kv(i);
-
-                        if (n_head_kv == 0 && n_head > 0) {
-                            // linear attention for DeciLMCausalModel
-                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        }
-                        else if (n_head_kv > 0) {
-                            layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
-                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                            layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
-                        }
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        if (hparams.rope_scaling_type_train == LLAMA_ROPE_SCALING_TYPE_LONGROPE) {
-                            layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                            layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        }
-                        else {
-                            layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        }
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                        // optional MLP bias
-                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE, "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    }
-                } break;
-            case LLM_ARCH_MINICPM3:
-                {
-                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
-                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
-
-                    const int64_t q_lora_rank  = hparams.n_lora_q;
-                    const int64_t kv_lora_rank = hparams.n_lora_kv;
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
-
-                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
-
-                        layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
-                        layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
-
-                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
-                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
-                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head_qk_rope/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head_qk_rope/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                    }
-                } break;
-            case LLM_ARCH_GROK:
-                {
-                    if (n_expert == 0) {
-                        throw std::runtime_error("Grok model cannot have zero experts");
-                    }
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
-
-                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_DBRX:
-                {
-                    if (n_expert == 0) {
-                        throw std::runtime_error("DBRX model cannot have zero experts");
-                    }
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.attn_out_norm = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert}, 0);
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
-                    }
-                } break;
-            case LLM_ARCH_BAICHUAN:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-                    {
-                        model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                        model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_FALCON:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    {
-                        model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                        model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-
-                        model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        if (!model.output) {
-                            model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
-                        }
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_STARCODER:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-                    model.pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
-
-                    // output
-                    {
-                        model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                        model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                        model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        if (!model.output) {
-                            // needs to be on GPU
-                            model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                        }
-
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
-                        layer.ffn_up_b = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_BERT:
-            case LLM_ARCH_NOMIC_BERT:
-                {
-                    model.tok_embd     = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0);
-                    model.type_embd    = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}, 0);
-
-                    if (model.arch == LLM_ARCH_BERT) {
-                        model.pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,    "weight"), {n_embd, n_ctx_train}, 0);
-
-                        model.cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        model.cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {n_embd},         llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        model.cls_out   = create_tensor(tn(LLM_TENSOR_CLS_OUT, "weight"), {n_embd, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        model.cls_out_b = create_tensor(tn(LLM_TENSOR_CLS_OUT, "bias"),   {1},         llama_model_loader::TENSOR_NOT_REQUIRED);
-                    }
-
-                    model.tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
-                    model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        if (model.arch == LLM_ARCH_BERT) {
-                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i),   {n_embd}, 0);
-
-                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i),   {n_embd_gqa}, 0);
-
-                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i),   {n_embd_gqa}, 0);
-                        } else {
-                            layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        }
-
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,        "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN,      "weight", i), {n_ff, n_embd}, 0);
-
-                        if (model.arch == LLM_ARCH_BERT) {
-                            layer.bo         = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
-                            layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, 0);
-                            layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
-                        } else {
-                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
-                        }
-
-                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
-                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i),   {n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_JINA_BERT_V2:
-                {
-                    model.tok_embd  = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, 0); // word_embeddings
-                    model.type_embd = create_tensor(tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}, 0); // token_type_embeddings
-
-                    model.tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0); // LayerNorm
-                    model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0); //LayerNorm bias
-
-                    model.cls   = create_tensor(tn(LLM_TENSOR_CLS, "weight"), {n_embd, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    model.cls_b = create_tensor(tn(LLM_TENSOR_CLS, "bias"),   {1},         llama_model_loader::TENSOR_NOT_REQUIRED);
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i]; // JinaBertLayer
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
-
-                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
-
-                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
-
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); //output_dens
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0); //output_dens
-
-                        layer.attn_out_norm   = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}, 0); //output_norm
-                        layer.attn_out_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_OUT_NORM, "bias",   i), {n_embd}, 0);
-
-                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias",   i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
-
-                        layer.layer_out_norm   = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd}, 0);
-                        layer.layer_out_norm_b = create_tensor(tn(LLM_TENSOR_LAYER_OUT_NORM, "bias",   i), {n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_BLOOM:
-                {
-                    model.tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
-                    model.tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
-                    model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {n_embd}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias",   i), {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias",   i), {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias",   i), {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_MPT:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-                    model.pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    if (!model.output) {
-                        model.output    = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // needs to be on GPU
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.attn_q_norm   = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias",   i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.attn_k_norm   = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias",   i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        // AWQ ScaleActivation layer
-                        layer.ffn_act = create_tensor(tn(LLM_TENSOR_FFN_ACT, "scales", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    }
-                } break;
-            case LLM_ARCH_STABLELM:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm =   create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        // optional bias tensors, present in Stable LM 2 1.6B
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        // optional q and k layernorms, present in StableLM 2 12B
-                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head},    llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_QWEN:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd*3}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd*3}, 0);
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff/2}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff/2, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff/2}, 0);
-                    }
-                } break;
-            case LLM_ARCH_QWEN2:
-            case LLM_ARCH_QWEN2VL:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, 0);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_QWEN2MOE:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, 0);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
-
-                        if (n_expert == 0) {
-                            throw std::runtime_error("n_expert must be > 0 for QWEN2MOE");
-                        }
-                        if (n_expert_used == 0) {
-                            throw std::runtime_error("n_expert_used must be > 0 for QWEN2MOE");
-                        }
-
-                        // MoE branch
-                        const int64_t n_ff_exp = hparams.n_ff_exp ? hparams.n_ff_exp : n_ff / n_expert_used;
-
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-
-                        // Shared expert branch
-                        const int64_t n_ff_shexp = hparams.n_ff_shexp ? hparams.n_ff_shexp : n_ff;
-
-                        layer.ffn_gate_inp_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP_SHEXP, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {    n_embd, n_ff_shexp}, 0);
-                        layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {n_ff_shexp,     n_embd}, 0);
-                        layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {    n_embd, n_ff_shexp}, 0);
-                    }
-                } break;
-            case LLM_ARCH_PHI2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-                    model.output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   {n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        if (layer.wqkv == nullptr) {
-                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
-                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i),   {n_embd}, 0);
-
-                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i),   {n_embd_gqa}, 0);
-
-                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i),   {n_embd_gqa}, 0);
-                        }
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_PHI3:
-                {
-                    const int64_t n_embd_head = n_embd / n_head;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), { n_embd, n_vocab }, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
-
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
-                        layer.ffn_up = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, 2 * n_ff }, 0);
-
-                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                    }
-                } break;
-            case LLM_ARCH_PHIMOE:
-                {
-                    const int64_t n_embd_head = n_embd / n_head;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), { n_embd, n_vocab }, 0);
-                    model.output_b      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "bias"),   { n_vocab }, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias",   i), { n_embd }, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), { n_embd, n_embd + 2 * n_embd_gqa }, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        if (layer.wqkv == nullptr) {
-                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}, 0);
-                            layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias",   i), {n_embd}, 0);
-
-                            layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias",   i), {n_embd_gqa}, 0);
-
-                            layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, 0);
-                            layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias",   i), {n_embd_gqa}, 0);
-                        }
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias",   i), { n_embd }, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), { n_embd }, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias",   i), { n_embd }, 0);
-
-                        layer.ffn_gate_inp  = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP,  "weight", i), {n_embd, n_expert},         0);
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
-
-                        layer.rope_long  = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_LONG,  "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        layer.rope_short = create_tensor(tn(LLM_TENSOR_ROPE_FACTORS_SHORT, "weight", i), { n_embd_head/2 }, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                    }
-                } break;
-            case LLM_ARCH_PLAMO:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_GPT2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-                    model.pos_embd = create_tensor(tn(LLM_TENSOR_POS_EMBD,   "weight"), {n_embd, n_ctx_train}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_CODESHELL:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i),   {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP, "bias", i),     {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_ORION:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_INTERNLM2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        // layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_GEMMA:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_GEMMA2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,  "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
-                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_STARCODER2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd}, 0);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, 0);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, 0);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                        // optional bias tensors
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP ,  "bias", i), {  n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_MAMBA:
-                {
-                    const int64_t d_conv  = hparams.ssm_d_conv;
-                    const int64_t d_inner = hparams.ssm_d_inner;
-                    const int64_t d_state = hparams.ssm_d_state;
-                    const int64_t dt_rank = hparams.ssm_dt_rank;
-
-                    // only an expansion factor of 2 is supported for now
-                    if (2 * n_embd != d_inner) {
-                        throw std::runtime_error("only an expansion factor of 2 is supported for now");
-                    }
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed, duplicated to allow offloading
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        // norm
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, 2*d_inner}, 0);
-
-                        layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner}, 0);
-                        layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner}, 0);
-
-                        layer.ssm_x = create_tensor(tn(LLM_TENSOR_SSM_X, "weight", i), {d_inner, dt_rank + 2*d_state}, 0);
-
-                        layer.ssm_dt = create_tensor(tn(LLM_TENSOR_SSM_DT, "weight", i), {dt_rank, d_inner}, 0);
-                        layer.ssm_dt_b = create_tensor(tn(LLM_TENSOR_SSM_DT, "bias", i), {d_inner}, 0);
-
-                        // no "weight" suffix for these
-                        layer.ssm_a = create_tensor(tn(LLM_TENSOR_SSM_A, i), {d_state, d_inner}, 0);
-                        layer.ssm_d = create_tensor(tn(LLM_TENSOR_SSM_D, i), {d_inner}, 0);
-
-                        // out_proj
-                        layer.ssm_out = create_tensor(tn(LLM_TENSOR_SSM_OUT, "weight", i), {d_inner, n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_XVERSE:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_COMMAND_R:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    // init output from the input tok embed
-                    model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        if (n_layer >= 64){
-                            layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
-                            layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
-                        }
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_COHERE2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab }, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), { n_embd }, 0);
-                    // init output from the input tok embed
-                    model.output      = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), { n_embd, n_vocab },
-                                                      llama_model_loader::TENSOR_DUPLICATED);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), { n_embd }, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), { n_embd, n_embd }, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), { n_embd, n_embd_gqa }, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), { n_embd, n_embd_gqa }, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), { n_embd, n_embd }, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), { n_embd, n_ff }, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd }, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP, "weight", i), { n_embd, n_ff }, 0);
-                    }
-                }
-                break;
-            case LLM_ARCH_OLMO:  // adapted from LLM_ARCH_LLAMA with norm params removed
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_OLMO2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_post_norm = create_tensor(tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_post_norm = create_tensor(tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_OLMOE:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
-
-                        if (n_expert == 0) {
-                            throw std::runtime_error("n_expert must be > 0");
-                        }
-                        if (n_expert_used == 0) {
-                            throw std::runtime_error("n_expert_used must be > 0");
-                        }
-
-                        // MoE branch
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd, n_ff,   n_expert}, 0);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff,   n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd, n_ff,   n_expert}, 0);
-                    }
-                } break;
-            case LLM_ARCH_OPENELM:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    // init output from the input tok embed
-                    model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        const int64_t n_head      =   hparams.n_head(i);
-                        const int64_t n_head_qkv  = 2*hparams.n_head_kv(i) + n_head;
-                        const int64_t n_ff        =   hparams.n_ff(i);
-
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_head_qkv*n_embd_head_k}, 0);
-                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k}, 0);
-                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_head*n_embd_head_k, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_GPTNEOX:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_ARCTIC:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_embd}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_embd, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
-                        layer.ffn_norm_exps = create_tensor(tn(LLM_TENSOR_FFN_NORM_EXPS, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {n_embd,   n_ff, n_expert}, false);
-                        layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {  n_ff, n_embd, n_expert}, 0);
-                        layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {n_embd,   n_ff, n_expert}, 0);
-                    }
-                } break;
-            case LLM_ARCH_DEEPSEEK:
-                {
-
-                    const int64_t n_ff_exp        = hparams.n_ff_exp;
-                    const int64_t n_expert_shared = hparams.n_expert_shared;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        if (i < (int) hparams.n_layer_dense_lead) {
-                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        } else {
-                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
-
-                            if (n_expert == 0) {
-                                throw std::runtime_error("n_expert must be > 0");
-                            }
-                            if (n_expert_used == 0) {
-                                throw std::runtime_error("n_expert_used must be > 0");
-                            }
-
-                            // MoE branch
-                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
-                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-
-                            // Shared expert branch
-                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
-                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
-                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
-                        }
-                    }
-                } break;
-            case LLM_ARCH_DEEPSEEK2:
-                {
-                    const bool is_lite = (hparams.n_layer == 27);
-
-                    const int64_t n_embd_head_qk_rope = hparams.n_rot;
-                    const int64_t n_embd_head_qk_nope = hparams.n_embd_head_k - hparams.n_rot;
-
-                    const int64_t q_lora_rank  = hparams.n_lora_q;
-                    const int64_t kv_lora_rank = hparams.n_lora_kv;
-
-                    const int64_t n_ff_exp        = hparams.n_ff_exp;
-                    const int64_t n_expert_shared = hparams.n_expert_shared;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        if (!is_lite) {
-                            layer.attn_q_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_A_NORM, "weight", i), {q_lora_rank}, 0);
-                        }
-
-                        layer.attn_kv_a_norm = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_NORM, "weight", i), {kv_lora_rank}, 0);
-
-                        if (!is_lite) {
-                            layer.wq_a = create_tensor(tn(LLM_TENSOR_ATTN_Q_A, "weight", i), {n_embd, q_lora_rank}, 0);
-                            layer.wq_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_B, "weight", i), {q_lora_rank, n_head * n_embd_head_k}, 0);
-                        } else {
-                            layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        }
-
-                        layer.wkv_a_mqa = create_tensor(tn(LLM_TENSOR_ATTN_KV_A_MQA, "weight", i), {n_embd, kv_lora_rank + (n_embd_head_qk_rope)}, 0);
-                        layer.wkv_b     = create_tensor(tn(LLM_TENSOR_ATTN_KV_B,     "weight", i), {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)}, 0);
-                        layer.wo        = create_tensor(tn(LLM_TENSOR_ATTN_OUT,      "weight", i), {              n_head * (                      n_embd_head_v), n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        if (i < (int) hparams.n_layer_dense_lead) {
-                            layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                            layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                            layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                        } else {
-                            layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, 0);
-                            layer.ffn_exp_probs_b = create_tensor(tn(LLM_TENSOR_FFN_EXP_PROBS_B, "bias", i), {n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                            if (n_expert == 0) {
-                                throw std::runtime_error("n_expert must be > 0");
-                            }
-                            if (n_expert_used == 0) {
-                                throw std::runtime_error("n_expert_used must be > 0");
-                            }
-
-                            // MoE branch
-                            layer.ffn_gate_exps = create_tensor(tn(LLM_TENSOR_FFN_GATE_EXPS, "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-                            layer.ffn_down_exps = create_tensor(tn(LLM_TENSOR_FFN_DOWN_EXPS, "weight", i), {n_ff_exp,   n_embd, n_expert}, 0);
-                            layer.ffn_up_exps   = create_tensor(tn(LLM_TENSOR_FFN_UP_EXPS,   "weight", i), {  n_embd, n_ff_exp, n_expert}, 0);
-
-                            // Shared expert branch
-                            layer.ffn_gate_shexp = create_tensor(tn(LLM_TENSOR_FFN_GATE_SHEXP, "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
-                            layer.ffn_down_shexp = create_tensor(tn(LLM_TENSOR_FFN_DOWN_SHEXP, "weight", i), {        n_ff_exp * n_expert_shared, n_embd}, 0);
-                            layer.ffn_up_shexp   = create_tensor(tn(LLM_TENSOR_FFN_UP_SHEXP,   "weight", i), {n_embd, n_ff_exp * n_expert_shared}, 0);
-                        }
-                    }
-                } break;
-            case LLM_ARCH_BITNET:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm     = create_tensor(tn(LLM_TENSOR_ATTN_NORM,     "weight", i), {n_embd}, 0);
-                        layer.attn_sub_norm = create_tensor(tn(LLM_TENSOR_ATTN_SUB_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq       = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wq_scale = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.wk       = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wk_scale = create_tensor(tn(LLM_TENSOR_ATTN_K,   "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.wv       = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv_scale = create_tensor(tn(LLM_TENSOR_ATTN_V,   "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.wo       = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.wo_scale = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_norm     = create_tensor(tn(LLM_TENSOR_FFN_NORM,     "weight", i), {n_embd}, 0);
-                        layer.ffn_sub_norm = create_tensor(tn(LLM_TENSOR_FFN_SUB_NORM, "weight", i), {n_ff}, 0);
-
-                        layer.ffn_gate       = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_gate_scale = create_tensor(tn(LLM_TENSOR_FFN_GATE, "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down       = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_scale = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_up         = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_scale   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "scale",  i), {1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    }
-                } break;
-            case LLM_ARCH_T5:
-                {
-                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm     = create_tensor(tn(LLM_TENSOR_DEC_OUTPUT_NORM, "weight"), {n_embd}, 0);
-
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
-                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
-
-                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                        layer.attn_norm  = create_tensor(tn(LLM_TENSOR_DEC_ATTN_NORM,  "weight", i), {n_embd}, 0);
-                        layer.attn_rel_b = create_tensor(tn(LLM_TENSOR_DEC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_DEC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_DEC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_DEC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_DEC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
-
-                        layer.attn_norm_cross  = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_NORM,  "weight", i), {n_embd}, 0);
-                        // this tensor seems to be unused in HF transformers implementation
-                        layer.attn_rel_b_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wq_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wk_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo_cross = create_tensor(tn(LLM_TENSOR_DEC_CROSS_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_DEC_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_DEC_FFN_GATE, "weight", i), {n_embd,   n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_DEC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_DEC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_T5ENCODER:
-                {
-                    const auto n_rel_attn_bkts = hparams.n_rel_attn_bkts;
-
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm_enc  = create_tensor(tn(LLM_TENSOR_ENC_ATTN_NORM,  "weight", i), {n_embd}, 0);
-                        layer.attn_rel_b_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_REL_B, "weight", i), {n_head, n_rel_attn_bkts}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wq_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_Q,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wk_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo_enc = create_tensor(tn(LLM_TENSOR_ENC_ATTN_OUT, "weight", i), {n_embd_v_gqa, n_embd}, 0);
-
-                        layer.ffn_norm_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_gate_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_GATE, "weight", i), {n_embd,   n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_down_enc = create_tensor(tn(LLM_TENSOR_ENC_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up_enc   = create_tensor(tn(LLM_TENSOR_ENC_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_JAIS:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM,   "bias", i),   {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i),   {n_embd}, 0);
-
-                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_gate_b = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "bias", i),   {n_ff}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff}, 0);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i),   {n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_CHATGLM:
-                {
-                    model.tok_embd   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD,      "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0);
-                        layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i),   {n_embd + 2*n_embd_gqa}, 0);
-
-                        layer.wo   = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd, n_ff * 2}, 0);
-
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, 0);
-                    }
-                } break;
-            case LLM_ARCH_NEMOTRON:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
-                    model.output        = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        // optional bias tensors
-                        layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.bo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-                        layer.ffn_norm_b = create_tensor(tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, 0);
-
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-
-                        // optional MLP bias
-                        layer.ffn_down_b = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.ffn_up_b   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "bias", i), {n_ff}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    }
-                } break;
-            case LLM_ARCH_EXAONE:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, 0);
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd_head_k * n_head}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_k_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_v_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}, 0);
-
-                        layer.ffn_norm   = create_tensor(tn(LLM_TENSOR_FFN_NORM,   "weight", i), {n_embd}, 0);
-                        layer.rope_freqs = create_tensor(tn(LLM_TENSOR_ROPE_FREQS, "weight", i), {n_rot/2}, llama_model_loader::TENSOR_NOT_REQUIRED | (i != 0 ? llama_model_loader::TENSOR_DUPLICATED : 0));
-                        layer.ffn_gate   = create_tensor(tn(LLM_TENSOR_FFN_GATE,   "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down   = create_tensor(tn(LLM_TENSOR_FFN_DOWN,   "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up     = create_tensor(tn(LLM_TENSOR_FFN_UP,     "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_RWKV6:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // Block 0, LN0
-                    model.tok_norm = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}, 0);
-                    model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, 0);
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
-
-                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
-                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
-                    const int head_size = hparams.wkv_head_size;
-                    const int attn_hidden_size = n_embd;
-                    const int ffn_size = hparams.n_ff_arr[0];
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "bias", i),   {n_embd}, 0);
-
-                        layer.attn_norm_2   = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, 0);
-                        layer.attn_norm_2_b = create_tensor(tn(LLM_TENSOR_ATTN_NORM_2, "bias", i),   {n_embd}, 0);
-
-                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
-                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
-
-                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
-                        layer.time_mix_lerp_w = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_W, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_lerp_k = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_lerp_v = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_V, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_lerp_r = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_lerp_g = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_G, "weight", i), {n_embd, 1, 1}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        GGML_ASSERT(!(layer.time_mix_lerp_fused == NULL && layer.time_mix_lerp_w == NULL));
-
-                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, 0);
-                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
-                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
-                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
-                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {attn_hidden_size, n_embd}, 0);
-                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {attn_hidden_size, n_embd}, 0);
-                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
-                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
-
-                        layer.time_mix_ln = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "weight", i), {n_embd}, 0);
-                        layer.time_mix_ln_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_LN, "bias", i), {n_embd}, 0);
-                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
-
-                        layer.channel_mix_lerp_k = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_K, "weight", i), {n_embd, 1, 1}, 0);
-                        layer.channel_mix_lerp_r = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_LERP_R, "weight", i), {n_embd, 1, 1}, 0);
-
-                        layer.channel_mix_key = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_KEY, "weight", i), {n_embd, ffn_size}, 0);
-                        layer.channel_mix_value = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_VALUE, "weight", i), {ffn_size, n_embd}, 0);
-                        layer.channel_mix_receptance = create_tensor(tn(LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "weight", i), {n_embd, n_embd}, 0);
-                    }
-
-                } break;
-            case LLM_ARCH_RWKV6QWEN2:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    model.output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, 0);
-
-                    const int time_mix_extra_dim = hparams.time_mix_extra_dim;
-                    const int time_decay_extra_dim = hparams.time_decay_extra_dim;
-                    const int head_size = hparams.wkv_head_size;
-                    const int attn_hidden_size = n_embd;
-                    const int n_head_kv = hparams.n_head_kv();
-                    int attn_key_value_size;
-                    if (n_head_kv == 0 || attn_hidden_size / head_size == n_head_kv) {
-                        attn_key_value_size = attn_hidden_size;
-                    } else {
-                        attn_key_value_size = n_head_kv * head_size;
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.time_mix_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W1, "weight", i), {n_embd, time_mix_extra_dim * 5}, 0);
-                        layer.time_mix_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_W2, "weight", i), {time_mix_extra_dim, n_embd, 5}, 0);
-
-                        layer.time_mix_lerp_x = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_X, "weight", i), {n_embd, 1, 1}, 0);
-                        layer.time_mix_lerp_fused = create_tensor(tn(LLM_TENSOR_TIME_MIX_LERP_FUSED, "weight", i), {n_embd, 1, 1, 5}, 0);
-
-                        layer.time_mix_first = create_tensor(tn(LLM_TENSOR_TIME_MIX_FIRST, "weight", i), {head_size, n_embd / head_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_decay = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY, "weight", i), {n_embd}, 0);
-                        layer.time_mix_decay_w1 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W1, "weight", i), {n_embd, time_decay_extra_dim}, 0);
-                        layer.time_mix_decay_w2 = create_tensor(tn(LLM_TENSOR_TIME_MIX_DECAY_W2, "weight", i), {time_decay_extra_dim, attn_hidden_size}, 0);
-                        layer.time_mix_key = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "weight", i), {n_embd, attn_key_value_size}, 0);
-                        layer.time_mix_value = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "weight", i), {n_embd, attn_key_value_size}, 0);
-                        layer.time_mix_receptance = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "weight", i), {attn_hidden_size, n_embd}, 0);
-                        layer.time_mix_gate = create_tensor(tn(LLM_TENSOR_TIME_MIX_GATE, "weight", i), {attn_hidden_size, n_embd}, 0);
-                        // optional bias tensors
-                        layer.time_mix_key_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_KEY, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_value_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_VALUE, "bias", i), {attn_key_value_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.time_mix_receptance_b = create_tensor(tn(LLM_TENSOR_TIME_MIX_RECEPTANCE, "bias", i), {attn_hidden_size}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.time_mix_output = create_tensor(tn(LLM_TENSOR_TIME_MIX_OUTPUT, "weight", i), {n_embd, attn_hidden_size}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_CHAMELEON:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, 0);
-
-                    // output
-                    model.output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                    model.output      = create_tensor(tn(LLM_TENSOR_OUTPUT,      "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                    // if output is NULL, init from the input tok embed
-                    if (model.output == NULL) {
-                        model.output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
-                    }
-
-                    for (int i = 0; i < n_layer; ++i) {
-                        auto & layer = model.layers[i];
-
-                        layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0);
-                        layer.attn_q_norm = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd_head_k, n_head}, 0);
-                        layer.attn_k_norm = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd_head_k, n_head_kv}, 0);
-                        layer.attn_q_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i),  {n_embd_head_k, n_head}, llama_model_loader::TENSOR_NOT_REQUIRED);
-                        layer.attn_k_norm_b = create_tensor(tn(LLM_TENSOR_ATTN_K_NORM, "bias", i),  {n_embd_head_k, n_head_kv}, llama_model_loader::TENSOR_NOT_REQUIRED);
-
-                        layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q,   "weight", i), {n_embd, n_embd}, 0);
-                        layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, 0);
-                        layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0);
-
-                        layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
-
-                        layer.ffn_gate = create_tensor(tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd,   n_ff}, 0);
-                        layer.ffn_down = create_tensor(tn(LLM_TENSOR_FFN_DOWN, "weight", i), {  n_ff, n_embd}, 0);
-                        layer.ffn_up   = create_tensor(tn(LLM_TENSOR_FFN_UP,   "weight", i), {n_embd,   n_ff}, 0);
-                    }
-                } break;
-            case LLM_ARCH_WAVTOKENIZER_DEC:
-                {
-                    model.tok_embd = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {hparams.n_embd_features, n_vocab}, 0);
-
-                    model.conv1d   = create_tensor(tn(LLM_TENSOR_CONV1D, "weight"), {7, hparams.n_embd_features, hparams.posnet.n_embd}, 0);
-                    model.conv1d_b = create_tensor(tn(LLM_TENSOR_CONV1D, "bias"),   {1, hparams.posnet.n_embd}, 0);
-
-                    // posnet
-                    {
-                        const int64_t n_embd = hparams.posnet.n_embd;
-
-                        for (uint32_t i = 0; i < hparams.posnet.n_layer; ++i) {
-                            auto & layer = model.layers[i].posnet;
-
-                            // posnet:
-                            //
-                            //  - resnet
-                            //  - resnet
-                            //  - attn
-                            //  - resnet
-                            //  - resnet
-                            //  - norm
-                            //
-                            switch (i) {
-                                case 0:
-                                case 1:
-                                case 3:
-                                case 4:
-                                    {
-                                        layer.norm1   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "weight", i), {1, n_embd}, 0);
-                                        layer.norm1_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM1, "bias",   i), {1, n_embd}, 0);
-
-                                        layer.conv1   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "weight", i), {3, n_embd, n_embd}, 0);
-                                        layer.conv1_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV1, "bias",   i), {1, n_embd}, 0);
-
-                                        layer.norm2   = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "weight", i), {1, n_embd}, 0);
-                                        layer.norm2_b = create_tensor(tn(LLM_TENSOR_POS_NET_NORM2, "bias",   i), {1, n_embd}, 0);
-
-                                        layer.conv2   = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "weight", i), {3, n_embd, n_embd}, 0);
-                                        layer.conv2_b = create_tensor(tn(LLM_TENSOR_POS_NET_CONV2, "bias",   i), {1, n_embd}, 0);
-                                    } break;
-                                case 2:
-                                    {
-                                        layer.attn_norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
-                                        layer.attn_norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
-
-                                        layer.attn_q      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "weight", i), {1, n_embd, n_embd}, 0);
-                                        layer.attn_q_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_Q,    "bias",   i), {1, n_embd}, 0);
-
-                                        layer.attn_k      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "weight", i), {1, n_embd, n_embd}, 0);
-                                        layer.attn_k_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_K,    "bias",   i), {1, n_embd}, 0);
-
-                                        layer.attn_v      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "weight", i), {1, n_embd, n_embd}, 0);
-                                        layer.attn_v_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_V,    "bias",   i), {1, n_embd}, 0);
-
-                                        layer.attn_o      = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "weight", i), {1, n_embd, n_embd}, 0);
-                                        layer.attn_o_b    = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_OUT,  "bias",   i), {1, n_embd}, 0);
-                                    } break;
-                                case 5:
-                                    {
-                                        layer.norm   = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "weight", i), {1, n_embd}, 0);
-                                        layer.norm_b = create_tensor(tn(LLM_TENSOR_POS_NET_ATTN_NORM, "bias",   i), {1, n_embd}, 0);
-                                    } break;
-                                default: GGML_ABORT("unknown posnet layer");
-                            };
-                        }
-                    }
-
-                    GGML_ASSERT(hparams.posnet.n_embd == hparams.convnext.n_embd);
-
-                    model.tok_norm   = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {hparams.posnet.n_embd}, 0);
-                    model.tok_norm_b = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"),   {hparams.posnet.n_embd}, 0);
-
-                    // convnext
-                    {
-                        const int64_t n_embd = hparams.convnext.n_embd;
-
-                        for (uint32_t i = 0; i < hparams.convnext.n_layer; ++i) {
-                            auto & layer = model.layers[i].convnext;
-
-                            layer.dw     = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "weight", i), {7, 1, n_embd}, 0);
-                            layer.dw_b   = create_tensor(tn(LLM_TENSOR_CONVNEXT_DW,    "bias",   i), {1, n_embd}, 0);
-
-                            layer.norm   = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "weight", i), {n_embd}, 0);
-                            layer.norm_b = create_tensor(tn(LLM_TENSOR_CONVNEXT_NORM,  "bias",   i), {n_embd}, 0);
-
-                            layer.pw1    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "weight", i), {n_embd, n_ff}, 0);
-                            layer.pw1_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW1,   "bias",   i), {n_ff}, 0);
-
-                            layer.pw2    = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "weight", i), {n_ff, n_embd}, 0);
-                            layer.pw2_b  = create_tensor(tn(LLM_TENSOR_CONVNEXT_PW2,   "bias",   i), {n_embd}, 0);
-
-                            layer.gamma  = create_tensor(tn(LLM_TENSOR_CONVNEXT_GAMMA, "weight", i), {n_embd}, 0);
-                        }
-
-                        // output
-                        model.output_norm   = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
-                        model.output_norm_b = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "bias"),   {n_embd}, 0);
-                    }
-
-                    model.output   = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {hparams.convnext.n_embd, n_embd}, 0);
-                    model.output_b = create_tensor(tn(LLM_TENSOR_OUTPUT, "bias"),   {n_embd}, 0);
-                } break;
-            default:
-                throw std::runtime_error("unknown architecture");
-        }
-
-        if (n_moved_tensors > 0) {
-            LLAMA_LOG_DEBUG("%s: tensor '%s' (%s) (and %d others) cannot be used with preferred buffer type %s, using %s instead\n",
-                __func__, first_moved_tensor->name, ggml_type_name(first_moved_tensor->type), n_moved_tensors - 1,
-                ggml_backend_buft_name(first_moved_from_buft), ggml_backend_buft_name(first_moved_to_buft));
-        }
-    }
-
-    ml.done_getting_tensors();
-
-    ml.init_mappings(true, use_mlock ? &model.mlock_mmaps : nullptr);
-    model.mappings.reserve(ml.mappings.size());
-
-    // create the backend buffers
-    std::vector> ctx_bufs;
-    ctx_bufs.reserve(ctx_map.size());
-
-    // Ensure we have enough capacity for the maximum backend buffer we will potentially create
-    const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
-    model.bufs.reserve(n_max_backend_buffer);
-
-    for (auto & it : ctx_map) {
-        ggml_backend_buffer_type_t buft = it.first;
-        ggml_context * ctx              = it.second;
-
-        // skip contexts without tensors
-        if (ggml_get_first_tensor(ctx) == nullptr) {
-            continue;
-        }
-
-        llama_buf_map bufs;
-        bufs.reserve(n_max_backend_buffer);
-
-        // check if it is possible to use buffer_from_host_ptr with this buffer type
-        ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft);
-        if (!dev) {
-            // FIXME: workaround for CPU backend buft having a NULL device
-            dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
-        }
-        ggml_backend_dev_props props;
-        ggml_backend_dev_get_props(dev, &props);
-        bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
-        bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
-
-        if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
-            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
-                // only the mmap region containing the tensors in the model is mapped to the backend buffer
-                // this is important for metal with apple silicon: if the entire model could be mapped to a metal buffer, then we could just use metal for all layers
-                // this allows using partial offloading when the model size exceeds the metal buffer size, but not the RAM size
-                void * addr = nullptr;
-                size_t first, last; // NOLINT
-                ml.get_mapping_range(&first, &last, &addr, idx, ctx);
-                if (first >= last) {
-                    continue;
-                }
-                const size_t max_size = ggml_get_max_tensor_size(ctx);
-                ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
-                if (buf == nullptr) {
-                    throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
-                }
-                model.bufs.emplace_back(buf);
-                bufs.emplace(idx, buf);
-            }
-        }
-        else {
-            ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
-            if (buf == nullptr) {
-                throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
-            }
-            model.bufs.emplace_back(buf);
-            if (use_mlock && ggml_backend_buffer_is_host(buf)) {
-                model.mlock_bufs.emplace_back(new llama_mlock);
-                auto & mlock_buf = model.mlock_bufs.back();
-                mlock_buf->init   (ggml_backend_buffer_get_base(buf));
-                mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
-            }
-            for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
-                bufs.emplace(idx, buf);
-            }
-        }
-
-        if (bufs.empty()) {
-            throw std::runtime_error("failed to allocate buffer");
-        }
-
-        for (auto & buf : bufs) {
-            // indicate that this buffer contains weights
-            // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
-            ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
-        }
-
-        ctx_bufs.emplace_back(ctx, bufs);
-    }
-
-    if (llama_supports_gpu_offload()) {
-        const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
-
-        LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu);
-        if (n_gpu_layers > (int) hparams.n_layer) {
-            LLAMA_LOG_INFO("%s: offloading output layer to GPU\n", __func__);
-        }
-
-        const int max_backend_supported_layers = hparams.n_layer + 1;
-        const int max_offloadable_layers       = hparams.n_layer + 1;
-
-        LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
-    }
-
-    // print memory requirements per buffer type
-    for (auto & buf : model.bufs) {
-        LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
-    }
-
-    // populate tensors_by_name
-    for (auto & ctx : model.ctxs) {
-        for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
-            model.tensors_by_name.emplace_back(ggml_get_name(cur), cur);
-        }
-    }
-
-    // load tensor data
-    for (auto & it : ctx_bufs) {
-        ggml_context * ctx = it.first;
-        auto & bufs = it.second;
-        if (!ml.load_all_data(ctx, bufs, use_mlock ? &model.mlock_mmaps : NULL, progress_callback, progress_callback_user_data)) {
-            return false;
-        }
-    }
-
-    if (use_mmap_buffer) {
-        for (auto & mapping : ml.mappings) {
-            model.mappings.emplace_back(std::move(mapping));
-        }
-    }
-
-    return true;
-}
-
 // Returns 0 on success, -1 on error, and -2 on cancellation via llama_progress_callback
 static int llama_model_load(const std::string & fname, llama_model & model, llama_model_params & params) {
-    model.t_start_us = ggml_time_us();
+    // loading time will be recalculated after the first eval, so
+    // we take page faults deferred by mmap() into consideration
+    model.t_load_us = 0;
+    time_meas tm(model.t_load_us);
+
+    model.t_start_us = tm.t_start_us;
 
     try {
         llama_model_loader ml(fname, params.use_mmap, params.check_tensors, params.kv_overrides);
 
+        ml.print_info();
+
         model.hparams.vocab_only = params.vocab_only;
 
         try {
-            llm_load_arch(ml, model);
+            model.load_arch(ml);
         } catch(const std::exception & e) {
             throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
         }
         try {
-            llm_load_hparams(ml, model);
+            model.load_hparams(ml);
         } catch(const std::exception & e) {
             throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
         }
         try {
-            llm_load_vocab(ml, model);
+            model.load_vocab(ml);
         } catch(const std::exception & e) {
             throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
         }
 
-        llm_load_stats(ml, model);
-        llm_load_print_meta(ml, model);
-
-        if (model.vocab.type != LLAMA_VOCAB_TYPE_NONE &&
-            model.hparams.n_vocab != model.vocab.id_to_token.size()) {
-            throw std::runtime_error("vocab size mismatch");
-        }
+        model.load_stats(ml);
+        model.print_info();
 
         if (params.vocab_only) {
             LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__);
             return 0;
         }
 
-        if (!llm_load_tensors(
-            ml, model, params.n_gpu_layers, params.split_mode,  params.main_gpu, params.tensor_split, params.use_mlock,
-            params.progress_callback, params.progress_callback_user_data
-        )) {
+        if (!model.load_tensors(ml)) {
             return -2;
         }
     } catch (const std::exception & err) {
@@ -2595,10 +78,6 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
         return -1;
     }
 
-    // loading time will be recalculate after the first eval, so
-    // we take page faults deferred by mmap() into consideration
-    model.t_load_us = ggml_time_us() - model.t_start_us;
-
     return 0;
 }
 
@@ -2646,16 +125,16 @@ static struct ggml_tensor * llm_build_inp_embd(
         inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
 
         // apply lora for embedding tokens if needed
-        for (auto & it : lctx.lora_adapters) {
-            struct llama_lora_weight * lora = it.first->get_weight(tok_embd);
-            if (lora == nullptr) {
+        for (auto & it : lctx.lora) {
+            struct llama_adapter_lora_weight * lw = it.first->get_weight(tok_embd);
+            if (lw == nullptr) {
                 continue;
             }
             const float adapter_scale = it.second;
-            const float scale = lora->get_scale(it.first->alpha, adapter_scale);
+            const float scale = lw->get_scale(it.first->alpha, adapter_scale);
             struct ggml_tensor * inpL_delta = ggml_scale(ctx, ggml_mul_mat(
-                ctx, lora->b, // non-transposed lora_b
-                ggml_get_rows(ctx, lora->a, lctx.inp_tokens)
+                ctx, lw->b, // non-transposed lora_b
+                ggml_get_rows(ctx, lw->a, lctx.inp_tokens)
             ), scale);
             inpL = ggml_add(ctx, inpL, inpL_delta);
         }
@@ -2726,16 +205,16 @@ static struct ggml_tensor * llm_build_lora_mm(
           struct ggml_tensor * w,
           struct ggml_tensor * cur) {
     struct ggml_tensor * res = ggml_mul_mat(ctx0, w, cur);
-    for (auto & it : lctx.lora_adapters) {
-        struct llama_lora_weight * lora = it.first->get_weight(w);
-        if (lora == nullptr) {
+    for (auto & it : lctx.lora) {
+        struct llama_adapter_lora_weight * lw = it.first->get_weight(w);
+        if (lw == nullptr) {
             continue;
         }
         const float adapter_scale = it.second;
-        const float scale = lora->get_scale(it.first->alpha, adapter_scale);
+        const float scale = lw->get_scale(it.first->alpha, adapter_scale);
         struct ggml_tensor * ab_cur = ggml_mul_mat(
-            ctx0, lora->b,
-            ggml_mul_mat(ctx0, lora->a, cur)
+            ctx0, lw->b,
+            ggml_mul_mat(ctx0, lw->a, cur)
         );
         ab_cur = ggml_scale(ctx0, ab_cur, scale);
         res = ggml_add(ctx0, res, ab_cur);
@@ -2751,17 +230,17 @@ static struct ggml_tensor * llm_build_lora_mm_id(
           struct ggml_tensor * cur, // struct ggml_tensor * b
           struct ggml_tensor * ids) {
     struct ggml_tensor * res = ggml_mul_mat_id(ctx0, w, cur, ids);
-    for (auto & it : lctx.lora_adapters) {
-        struct llama_lora_weight * lora = it.first->get_weight(w);
-        if (lora == nullptr) {
+    for (auto & it : lctx.lora) {
+        struct llama_adapter_lora_weight * lw = it.first->get_weight(w);
+        if (lw == nullptr) {
             continue;
         }
         const float alpha = it.first->alpha;
-        const float rank  = (float) lora->b->ne[0];
+        const float rank  = (float) lw->b->ne[0];
         const float scale = alpha ? it.second * alpha / rank : it.second;
         struct ggml_tensor * ab_cur = ggml_mul_mat_id(
-            ctx0, lora->b,
-            ggml_mul_mat_id(ctx0, lora->a, cur, ids),
+            ctx0, lw->b,
+            ggml_mul_mat_id(ctx0, lw->a, cur, ids),
             ids
         );
         ab_cur = ggml_scale(ctx0, ab_cur, scale);
@@ -3686,7 +1165,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_k_shift() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         GGML_ASSERT(kv_self.size == n_ctx);
 
@@ -3736,7 +1215,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_defrag(const std::vector & ids) {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         for (uint32_t i = 0; i < ids.size(); ++i) {
             const uint32_t id = ids[i];
@@ -3995,7 +1474,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_llama() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -4161,7 +1640,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_deci() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -4322,7 +1801,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_baichuan() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -4334,7 +1813,7 @@ struct llm_build_context {
         inpL = llm_build_inp_embd(ctx0, lctx, hparams, ubatch, model.tok_embd, cb);
 
         // inp_pos - contains the positions
-        struct ggml_tensor * inp_pos = model.type == MODEL_7B ? build_inp_pos() : nullptr;
+        struct ggml_tensor * inp_pos = model.type == LLM_TYPE_7B ? build_inp_pos() : nullptr;
 
         // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
         struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
@@ -4359,7 +1838,7 @@ struct llm_build_context {
                 cb(Vcur, "Vcur", il);
 
                 switch (model.type) {
-                    case MODEL_7B:
+                    case LLM_TYPE_7B:
                         Qcur = ggml_rope_ext(
                             ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens), inp_pos, nullptr,
                             n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
@@ -4371,7 +1850,7 @@ struct llm_build_context {
                             ext_factor, attn_factor, beta_fast, beta_slow
                         );
                         break;
-                    case MODEL_13B:
+                    case LLM_TYPE_13B:
                         Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd/n_head, n_head, n_tokens);
                         Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd/n_head, n_head, n_tokens);
                         break;
@@ -4437,7 +1916,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_xverse() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -4540,7 +2019,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_falcon() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -4660,7 +2139,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_grok() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -4819,7 +2298,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_dbrx() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -4947,7 +2426,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_starcoder() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -5051,7 +2530,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_refact() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -5145,7 +2624,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_bert() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -5339,7 +2818,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_bloom() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -5440,7 +2919,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_mpt() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -5730,7 +3209,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_qwen() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -5842,7 +3321,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_qwen2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -5954,7 +3433,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_qwen2vl() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
         GGML_ASSERT(n_embd_head == hparams.n_rot);
@@ -6072,7 +3551,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_qwen2moe() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -6220,7 +3699,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_phi2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -6341,7 +3820,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_phi3() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
@@ -6595,7 +4074,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_gpt2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -6700,7 +4179,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_codeshell() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -6811,7 +4290,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_orion() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -6929,7 +4408,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_internlm2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -7047,7 +4526,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_minicpm3() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         //TODO: if the model varies, these parameters need to be read from the model
         const int64_t n_embd_base = 256;
@@ -7256,7 +4735,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_gemma() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head_k = hparams.n_embd_head_k;
 
@@ -7364,7 +4843,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_gemma2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head_k = hparams.n_embd_head_k;
 
@@ -7414,9 +4893,9 @@ struct llm_build_context {
 
                 // ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
                 switch (model.type) {
-                    case llm_type::MODEL_2B:
-                    case llm_type::MODEL_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
-                    case llm_type::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
+                    case LLM_TYPE_2B:
+                    case LLM_TYPE_9B:  Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));   break;
+                    case LLM_TYPE_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
                     default: GGML_ABORT("fatal error");
                 };
                 cb(Qcur, "Qcur_scaled", il);
@@ -7500,7 +4979,7 @@ struct llm_build_context {
 
 
     struct ggml_cgraph * build_starcoder2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -7619,7 +5098,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_mamba() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         struct ggml_tensor * cur;
         struct ggml_tensor * inpL;
@@ -7674,7 +5153,7 @@ struct llm_build_context {
 
     struct ggml_cgraph * build_command_r() {
 
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -7822,7 +5301,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_cohere2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -7959,7 +5438,7 @@ struct llm_build_context {
     //   * removed bias
     //   * removed MoE
     struct ggml_cgraph * build_olmo() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -8083,7 +5562,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_olmo2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -8211,7 +5690,7 @@ struct llm_build_context {
     //   * removed bias
     //   * added q, k norm
     struct ggml_cgraph * build_olmoe() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -8337,7 +5816,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_openelm() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -8462,7 +5941,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_gptneox() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -8604,7 +6083,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_arctic() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -8738,7 +6217,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_deepseek() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -8895,7 +6374,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_deepseek2() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -9125,7 +6604,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_bitnet() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -9276,7 +6755,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_t5_enc() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -9408,7 +6887,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_t5_dec() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -9613,7 +7092,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_jais() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -9705,7 +7184,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_chatglm() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
@@ -9819,7 +7298,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_nemotron() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
@@ -9940,7 +7419,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_exaone() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -10067,7 +7546,7 @@ struct llm_build_context {
     }
 
     ggml_cgraph * build_rwkv6() {
-        ggml_cgraph *gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // Token shift state dimensions should be 2 * n_emb
         GGML_ASSERT(n_embd == hparams.n_embd_k_s() / 2);
@@ -10181,7 +7660,7 @@ struct llm_build_context {
 
     // ref: https://huggingface.co/recursal/QRWKV6-32B-Instruct-Preview-v0.1/blob/main/modeling_rwkv6qwen2.py
     ggml_cgraph * build_rwkv6qwen2() {
-        ggml_cgraph *gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         GGML_ASSERT(n_embd == hparams.n_embd_k_s());
 
@@ -10298,7 +7777,7 @@ struct llm_build_context {
     //   * removed bias
     //   * removed MoE
     struct ggml_cgraph * build_chameleon() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         // mutable variable, needed during the last layer of the computation to skip unused tokens
         int32_t n_tokens = this->n_tokens;
@@ -10470,7 +7949,7 @@ struct llm_build_context {
     }
 
     struct ggml_cgraph * build_wavtokenizer_dec() {
-        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, llama_model_max_nodes(model), false);
+        struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, model.max_nodes(), false);
 
         struct ggml_tensor * cur;
         struct ggml_tensor * inpL;
@@ -10679,12 +8158,12 @@ static struct ggml_cgraph * llama_build_graph(
 
         // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
         // FIXME: fix in ggml_backend_sched
-        const bool full_offload = lctx.model.n_gpu_layers > (int)lctx.model.hparams.n_layer;
+        const bool full_offload = lctx.model.params.n_gpu_layers > (int) lctx.model.hparams.n_layer;
         if (ubatch.n_tokens < 32 || full_offload) {
             if (il != -1 && strcmp(name, "norm") == 0) {
-                const auto & dev_layer = lctx.model.dev_layer.at(il);
+                const auto & dev_layer = lctx.model.dev_layer(il);
                 for (auto & backend : lctx.backends) {
-                    if (ggml_backend_get_device(backend.get()) == dev_layer.dev) {
+                    if (ggml_backend_get_device(backend.get()) == dev_layer) {
                         if (ggml_backend_supports_op(backend.get(), cur)) {
                             ggml_backend_sched_set_tensor_backend(lctx.sched.get(), cur, backend.get());
                         }
@@ -10983,6 +8462,7 @@ static int llama_decode_impl(
     const uint32_t n_tokens_all = batch.n_tokens;
 
     const auto & model   = lctx.model;
+    const auto & vocab   = model.vocab;
     const auto & hparams = model.hparams;
     const auto & cparams = lctx.cparams;
 
@@ -10990,7 +8470,7 @@ static int llama_decode_impl(
 
     if (batch.token) {
         for (uint32_t i = 0; i < n_tokens_all; ++i) {
-            if (batch.token[i] < 0 || (uint32_t)batch.token[i] >= model.vocab.n_vocab) {
+            if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) {
                 LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
                 return -1;
             }
@@ -11010,7 +8490,7 @@ static int llama_decode_impl(
     llama_kv_slot_restorer kv_slot_restorer(kv_self);
 
     const int64_t n_embd  = hparams.n_embd;
-    const int64_t n_vocab = hparams.n_vocab;
+    const int64_t n_vocab = vocab.n_tokens();
 
     uint32_t n_outputs = 0;
     uint32_t n_outputs_prev = 0;
@@ -11325,7 +8805,7 @@ static int llama_encode_impl(
 
     if (batch.token) {
         for (uint32_t i = 0; i < n_tokens; ++i) {
-            if (batch.token[i] < 0 || (uint32_t)batch.token[i] >= model.vocab.n_vocab) {
+            if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) {
                 LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]);
                 return -1;
             }
@@ -11502,9 +8982,9 @@ static void llama_kv_cache_defrag_impl(struct llama_context & lctx) {
     // each move requires 6*n_layer tensors (see build_defrag)
     //   - source view, destination view, copy operation
     //   - x2 for keys and values
-    //const uint32_t max_moves = llama_model_max_nodes(model)/(6*n_layer);
+    //const uint32_t max_moves = model.max_nodes()/(6*n_layer);
     // TODO: tmp fix https://github.com/ggerganov/llama.cpp/issues/6685#issuecomment-2057579516
-    const uint32_t max_moves = (llama_model_max_nodes(lctx.model) - 2*n_layer)/(6*n_layer);
+    const uint32_t max_moves = (lctx.model.max_nodes() - 2*n_layer)/(6*n_layer);
 
     // determine which KV cells to move where
     //
@@ -11751,7 +9231,7 @@ static void llama_kv_cache_update_impl(struct llama_context & lctx) {
         // build worst-case graph
         uint32_t n_seqs = 1; // TODO: worst-case number of sequences
         uint32_t n_tokens = std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch);
-        llama_token token = llama_token_bos(&lctx.model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+        llama_token token = lctx.model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
         llama_ubatch ubatch = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
         ggml_cgraph * gf = llama_build_graph(lctx, ubatch, true);
 
@@ -11763,39 +9243,38 @@ static void llama_kv_cache_update_impl(struct llama_context & lctx) {
     }
 }
 
-int32_t llama_lora_adapter_set(
+int32_t llama_set_adapter_lora(
             struct llama_context * ctx,
-            struct llama_lora_adapter * adapter,
+            struct llama_adapter_lora * adapter,
             float scale) {
-    ctx->lora_adapters[adapter] = scale;
+    ctx->lora[adapter] = scale;
     return 0;
 }
 
-int32_t llama_lora_adapter_remove(
+int32_t llama_rm_adapter_lora(
             struct llama_context * ctx,
-            struct llama_lora_adapter * adapter) {
-    auto pos = ctx->lora_adapters.find(adapter);
-    if (pos != ctx->lora_adapters.end()) {
-        ctx->lora_adapters.erase(pos);
+            struct llama_adapter_lora * adapter) {
+    auto pos = ctx->lora.find(adapter);
+    if (pos != ctx->lora.end()) {
+        ctx->lora.erase(pos);
         return 0;
     }
 
     return -1;
 }
 
-void llama_lora_adapter_clear(struct llama_context * ctx) {
-    ctx->lora_adapters.clear();
+void llama_clear_adapter_lora(struct llama_context * ctx) {
+    ctx->lora.clear();
 }
 
-// TODO: tmp
-int32_t llama_control_vector_apply(
-        struct llama_context * lctx,
+int32_t llama_apply_adapter_cvec(
+        struct llama_context * ctx,
                  const float * data,
                       size_t   len,
                      int32_t   n_embd,
                      int32_t   il_start,
                      int32_t   il_end) {
-    return llama_control_vector_apply(lctx->cvec, lctx->model, data, len, n_embd, il_start, il_end);
+    return ctx->cvec.apply(ctx->model, data, len, n_embd, il_start, il_end);
 }
 
 //
@@ -11906,7 +9385,7 @@ struct llama_model * llama_model_load_from_file(
         struct llama_model_params params) {
     ggml_time_init();
 
-    llama_model * model = new llama_model;
+    llama_model * model = new llama_model(params);
 
     unsigned cur_percentage = 0;
     if (params.progress_callback == NULL) {
@@ -12006,7 +9485,7 @@ struct llama_model * llama_model_load_from_file(
         LLAMA_LOG_INFO("%s: using device %s (%s) - %zu MiB free\n", __func__, ggml_backend_dev_name(dev), ggml_backend_dev_description(dev), free/1024/1024);
     }
 
-    int status = llama_model_load(path_model, *model, params);
+    const int status = llama_model_load(path_model, *model, params);
     GGML_ASSERT(status <= 0);
     if (status < 0) {
         if (status == -1) {
@@ -12022,7 +9501,7 @@ struct llama_model * llama_model_load_from_file(
     return model;
 }
 
-struct llama_context * llama_new_context_with_model(
+struct llama_context * llama_init_from_model(
                  struct llama_model * model,
         struct llama_context_params   params) {
 
@@ -12280,7 +9759,7 @@ struct llama_context * llama_new_context_with_model(
                 backend_ptrs.push_back(backend.get());
             }
 
-            const size_t max_nodes = llama_model_max_nodes(*model);
+            const size_t max_nodes = model->max_nodes();
 
             // buffer used to store the computation graph and the tensor meta data
             ctx->buf_compute_meta.resize(ggml_tensor_overhead()*max_nodes + ggml_graph_overhead_custom(max_nodes, false));
@@ -12288,9 +9767,9 @@ struct llama_context * llama_new_context_with_model(
             // TODO: move these checks to ggml_backend_sched
             // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
             bool pipeline_parallel =
-                llama_get_device_count(*model) > 1 &&
-                model->n_gpu_layers > (int)model->hparams.n_layer &&
-                model->split_mode == LLAMA_SPLIT_MODE_LAYER &&
+                model->n_devices() > 1 &&
+                model->params.n_gpu_layers > (int)model->hparams.n_layer &&
+                model->params.split_mode == LLAMA_SPLIT_MODE_LAYER &&
                 params.offload_kqv;
 
             // pipeline parallelism requires support for async compute and events in all devices
@@ -12321,7 +9800,7 @@ struct llama_context * llama_new_context_with_model(
             // initialize scheduler with the worst-case graph
             uint32_t n_seqs = 1; // TODO: worst-case number of sequences
             uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch);
-            llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+            llama_token token = ctx->model.vocab.token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
 
             llama_ubatch ubatch_pp = { true, n_tokens, n_tokens / n_seqs, n_seqs, &token, nullptr, nullptr, nullptr, nullptr, nullptr};
             ggml_cgraph * gf_pp = llama_build_graph(*ctx, ubatch_pp, true);
@@ -12373,6 +9852,12 @@ struct llama_context * llama_new_context_with_model(
     return ctx;
 }
 
+struct llama_context * llama_new_context_with_model(
+                 struct llama_model * model,
+        struct llama_context_params   params) {
+    return llama_init_from_model(model, params);
+}
+
 //
 // kv cache
 //
@@ -12470,166 +9955,18 @@ int32_t llama_decode(
     return ret;
 }
 
-//
-// vocab
-//
-
-// TODO: tmp bridges below until `struct llama_vocab` is exposed through the public API
-
-const char * llama_token_get_text(const struct llama_model * model, llama_token token) {
-    return llama_token_get_text_impl(model->vocab, token);
-}
-
-float llama_token_get_score(const struct llama_model * model, llama_token token) {
-    return llama_token_get_score_impl(model->vocab, token);
-}
-
-enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token) {
-    return llama_token_get_attr_impl(model->vocab, token);
-}
-
-bool llama_token_is_eog(const struct llama_model * model, llama_token token) {
-    return llama_token_is_eog_impl(model->vocab, token);
-}
-
-bool llama_token_is_control(const struct llama_model * model, llama_token token) {
-    return llama_token_is_control_impl(model->vocab, token);
-}
-
-llama_token llama_token_bos(const struct llama_model * model) {
-    return llama_token_bos_impl(model->vocab);
-}
-
-llama_token llama_token_eos(const struct llama_model * model) {
-    return llama_token_eos_impl(model->vocab);
-}
-
-llama_token llama_token_eot(const struct llama_model * model) {
-    return llama_token_eot_impl(model->vocab);
-}
-
-llama_token llama_token_cls(const struct llama_model * model) {
-    return llama_token_cls_impl(model->vocab);
-}
-
-llama_token llama_token_sep(const struct llama_model * model) {
-    return llama_token_sep_impl(model->vocab);
-}
-
-llama_token llama_token_nl (const struct llama_model * model) {
-    return llama_token_nl_impl(model->vocab);
-}
-
-llama_token llama_token_pad(const struct llama_model * model) {
-    return llama_token_pad_impl(model->vocab);
-}
-
-bool llama_add_bos_token(const struct llama_model * model) {
-    return llama_add_bos_token_impl(model->vocab);
-}
-
-bool llama_add_eos_token(const struct llama_model * model) {
-    return llama_add_eos_token_impl(model->vocab);
-}
-
-llama_token llama_token_prefix(const struct llama_model * model) {
-    return llama_token_prefix_impl(model->vocab);
-}
-
-llama_token llama_token_middle(const struct llama_model * model) {
-    return llama_token_middle_impl(model->vocab);
-}
-
-llama_token llama_token_suffix(const struct llama_model * model) {
-    return llama_token_suffix_impl(model->vocab);
-}
-
-llama_token llama_token_fim_pre(const struct llama_model * model) {
-    return llama_token_fim_pre_impl(model->vocab);
-}
-
-llama_token llama_token_fim_suf(const struct llama_model * model) {
-    return llama_token_fim_suf_impl(model->vocab);
-}
-
-llama_token llama_token_fim_mid(const struct llama_model * model) {
-    return llama_token_fim_mid_impl(model->vocab);
-}
-
-llama_token llama_token_fim_pad(const struct llama_model * model) {
-    return llama_token_fim_pad_impl(model->vocab);
-}
-
-llama_token llama_token_fim_rep(const struct llama_model * model) {
-    return llama_token_fim_rep_impl(model->vocab);
-}
-
-llama_token llama_token_fim_sep(const struct llama_model * model) {
-    return llama_token_fim_sep_impl(model->vocab);
-}
-
-//
-// tokenization
-//
-
-int32_t llama_tokenize(
-    const struct llama_model * model,
-                  const char * text,
-                     int32_t   text_len,
-                 llama_token * tokens,
-                     int32_t   n_tokens_max,
-                        bool   add_special,
-                        bool   parse_special) {
-    return llama_tokenize_impl(model->vocab, text, text_len, tokens, n_tokens_max, add_special, parse_special);
-}
-
-int32_t llama_token_to_piece(
-    const struct llama_model * model,
-                 llama_token   token,
-                        char * buf,
-                     int32_t   length,
-                     int32_t   lstrip,
-                        bool   special) {
-    return llama_token_to_piece_impl(model->vocab, token, buf, length, lstrip, special);
-}
-
-int32_t llama_detokenize(
-    const struct llama_model * model,
-           const llama_token * tokens,
-                     int32_t   n_tokens,
-                        char * text,
-                     int32_t   text_len_max,
-                        bool   remove_special,
-                        bool   unparse_special) {
-    return llama_detokenize_impl(model->vocab, tokens, n_tokens, text, text_len_max, remove_special, unparse_special);
-}
-
 //
 // chat templates
 //
 
 int32_t llama_chat_apply_template(
-                const struct llama_model * model,
                               const char * tmpl,
          const struct llama_chat_message * chat,
                                   size_t   n_msg,
                                     bool   add_ass,
                                     char * buf,
                                  int32_t   length) {
-    std::string curr_tmpl(tmpl == nullptr ? "" : tmpl);
-    if (tmpl == nullptr) {
-        GGML_ASSERT(model != nullptr);
-
-        // load template from model, if available
-        const auto & it = model->gguf_kv.find("tokenizer.chat_template");
-        if (it != model->gguf_kv.end() && it->second.size() > 0) {
-            curr_tmpl = it->second;
-        }
-        else {
-            // worst case: there is no information about template, we will use chatml by default
-            curr_tmpl = "chatml";  // see llm_chat_apply_template
-        }
-    }
+    const std::string curr_tmpl(tmpl == nullptr ? "chatml" : tmpl);
 
     // format the chat to string
     std::vector chat_vec;
@@ -12653,23 +9990,6 @@ int32_t llama_chat_apply_template(
     return res;
 }
 
-//
-// sampling
-//
-
-// TODO: remove indirection when vocab becomes accesible in llama-sampling.cpp
-struct llama_sampler * llama_sampler_init_grammar(const struct llama_model * model, const char * grammar_str, const char * grammar_root) {
-    return llama_sampler_init_grammar_impl(model->vocab, grammar_str, grammar_root);
-}
-
-struct llama_sampler * llama_sampler_init_infill(const struct llama_model * model) {
-    return llama_sampler_init_infill_impl(model->vocab);
-}
-
-struct llama_sampler * llama_sampler_init_dry(const struct llama_model * model, float dry_multiplier, float dry_base, int32_t dry_allowed_length, int32_t dry_penalty_last_n, const char** seq_breakers, size_t num_breakers) {
-    return llama_sampler_init_dry_impl(model->vocab, llama_n_ctx_train(model), dry_multiplier, dry_base, dry_allowed_length, dry_penalty_last_n, seq_breakers, num_breakers);
-}
-
 //
 // model split
 //
diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp
index ba084a91a..35b09aaea 100644
--- a/tests/test-autorelease.cpp
+++ b/tests/test-autorelease.cpp
@@ -14,7 +14,7 @@ int main(int argc, char ** argv) {
     std::thread([&model_path]() {
         llama_backend_init();
         auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
-        auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
+        auto * ctx = llama_init_from_model(model, llama_context_default_params());
         llama_free(ctx);
         llama_model_free(model);
         llama_backend_free();
diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp
index f1f9aec4d..77d386954 100644
--- a/tests/test-chat-template.cpp
+++ b/tests/test-chat-template.cpp
@@ -157,7 +157,7 @@ int main(void) {
     }
 
     // test invalid chat template
-    res = llama_chat_apply_template(nullptr, "INVALID TEMPLATE", conversation, message_count, true, formatted_chat.data(), formatted_chat.size());
+    res = llama_chat_apply_template("INVALID TEMPLATE", conversation, message_count, true, formatted_chat.data(), formatted_chat.size());
     assert(res < 0);
 
     for (size_t i = 0; i < templates.size(); i++) {
@@ -165,7 +165,6 @@ int main(void) {
         std::string expected = expected_output[i];
         formatted_chat.resize(1024);
         res = llama_chat_apply_template(
-            nullptr,
             custom_template.c_str(),
             conversation,
             message_count,
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
index 121c2c60c..59dda4877 100644
--- a/tests/test-tokenizer-0.cpp
+++ b/tests/test-tokenizer-0.cpp
@@ -161,7 +161,7 @@ int main(int argc, char **argv) {
 
         auto cparams = llama_context_default_params();
 
-        ctx = llama_new_context_with_model(model, cparams);
+        ctx = llama_init_from_model(model, cparams);
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
diff --git a/tests/test-tokenizer-1-bpe.cpp b/tests/test-tokenizer-1-bpe.cpp
index 5718fab04..55425d88a 100644
--- a/tests/test-tokenizer-1-bpe.cpp
+++ b/tests/test-tokenizer-1-bpe.cpp
@@ -55,7 +55,7 @@ int main(int argc, char **argv) {
 
         auto cparams = llama_context_default_params();
 
-        ctx = llama_new_context_with_model(model, cparams);
+        ctx = llama_init_from_model(model, cparams);
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -64,8 +64,10 @@ int main(int argc, char **argv) {
         }
     }
 
-    //GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_BPE);
-    if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_BPE) {
+    const llama_vocab * vocab = llama_model_get_vocab(model);
+
+    //GGML_ASSERT(llama_vocab_type(vocab) == LLAMA_VOCAB_TYPE_BPE);
+    if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_BPE) {
         return 99;
     }
 
@@ -75,7 +77,7 @@ int main(int argc, char **argv) {
     atexit([]() { console::cleanup(); });
 #endif
 
-    const int n_vocab = llama_n_vocab(model);
+    const int n_vocab = llama_vocab_n_tokens(vocab);
 
     for (int i = 0; i < n_vocab; ++i) {
         std::string str = common_detokenize(ctx, std::vector(1, i));
diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp
index ac05387c9..9e7b77f31 100644
--- a/tests/test-tokenizer-1-spm.cpp
+++ b/tests/test-tokenizer-1-spm.cpp
@@ -43,7 +43,7 @@ int main(int argc, char ** argv) {
 
         auto cparams = llama_context_default_params();
 
-        ctx = llama_new_context_with_model(model, cparams);
+        ctx = llama_init_from_model(model, cparams);
 
         if (ctx == NULL) {
             fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
@@ -52,8 +52,10 @@ int main(int argc, char ** argv) {
         }
     }
 
+    const llama_vocab * vocab = llama_model_get_vocab(model);
+
     //GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
-    if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_SPM) {
+    if (llama_vocab_type(vocab) != LLAMA_VOCAB_TYPE_SPM) {
         return 99;
     }
 
@@ -63,7 +65,7 @@ int main(int argc, char ** argv) {
     atexit([]() { console::cleanup(); });
 #endif
 
-    const int n_vocab = llama_n_vocab(model);
+    const int n_vocab = llama_vocab_n_tokens(vocab);
 
     for (int i = 0; i < n_vocab; ++i) {
         std::string str = common_detokenize(ctx, std::vector(1, i), true);
diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py
index 9ebe6c891..c6cdcb554 100644
--- a/tests/test-tokenizer-random.py
+++ b/tests/test-tokenizer-random.py
@@ -76,7 +76,7 @@ class LibLlamaModel:
         self.ffi = libllama.ffi
         if isinstance(mparams, dict):
             mparams = libllama.model_default_params(**mparams)
-        self.model = self.lib.llama_load_model_from_file(path_model.encode(), mparams)
+        self.model = self.lib.llama_model_load_from_file(path_model.encode(), mparams)
         if not self.model:
             raise RuntimeError("error: failed to load model '%s'" % path_model)
         if isinstance(cparams, dict):
@@ -92,7 +92,7 @@ class LibLlamaModel:
         if self.ctx:
             self.lib.llama_free(self.ctx)
         if self.model:
-            self.lib.llama_free_model(self.model)
+            self.lib.llama_model_free(self.model)
         self.ctx = None
         self.model = None
         self.lib = None

From 08f10f69c38288e9e8bb1f933af63a3fc9013d40 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Sun, 12 Jan 2025 12:15:53 +0200
Subject: [PATCH 066/279] llama : remove notion of CLS token (#11064)

ggml-ci
---
 gguf-py/gguf/constants.py   |  2 --
 gguf-py/gguf/gguf_writer.py |  3 ---
 include/llama.h             |  5 ++++-
 src/llama-vocab.cpp         | 26 ++++++++------------------
 src/llama-vocab.h           |  1 -
 5 files changed, 12 insertions(+), 25 deletions(-)

diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 56aa9288d..8fe84df21 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -184,7 +184,6 @@ class Keys:
         UNK_ID               = "tokenizer.ggml.unknown_token_id"
         SEP_ID               = "tokenizer.ggml.seperator_token_id"
         PAD_ID               = "tokenizer.ggml.padding_token_id"
-        CLS_ID               = "tokenizer.ggml.cls_token_id"
         MASK_ID              = "tokenizer.ggml.mask_token_id"
         ADD_BOS              = "tokenizer.ggml.add_bos_token"
         ADD_EOS              = "tokenizer.ggml.add_eos_token"
@@ -1837,7 +1836,6 @@ KEY_TOKENIZER_EOM_ID     = Keys.Tokenizer.EOM_ID
 KEY_TOKENIZER_UNK_ID     = Keys.Tokenizer.UNK_ID
 KEY_TOKENIZER_SEP_ID     = Keys.Tokenizer.SEP_ID
 KEY_TOKENIZER_PAD_ID     = Keys.Tokenizer.PAD_ID
-KEY_TOKENIZER_CLS_ID     = Keys.Tokenizer.CLS_ID
 KEY_TOKENIZER_MASK_ID    = Keys.Tokenizer.MASK_ID
 KEY_TOKENIZER_HF_JSON    = Keys.Tokenizer.HF_JSON
 KEY_TOKENIZER_RWKV       = Keys.Tokenizer.RWKV
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index bf851c92c..080d2b9dc 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -857,9 +857,6 @@ class GGUFWriter:
     def add_pad_token_id(self, id: int) -> None:
         self.add_uint32(Keys.Tokenizer.PAD_ID, id)
 
-    def add_cls_token_id(self, id: int) -> None:
-        self.add_uint32(Keys.Tokenizer.CLS_ID, id)
-
     def add_mask_token_id(self, id: int) -> None:
         self.add_uint32(Keys.Tokenizer.MASK_ID, id)
 
diff --git a/include/llama.h b/include/llama.h
index 9f04bc622..a184884c7 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -937,7 +937,6 @@ extern "C" {
     LLAMA_API llama_token llama_vocab_bos(const struct llama_vocab * vocab); // beginning-of-sentence
     LLAMA_API llama_token llama_vocab_eos(const struct llama_vocab * vocab); // end-of-sentence
     LLAMA_API llama_token llama_vocab_eot(const struct llama_vocab * vocab); // end-of-turn
-    LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab); // classification
     LLAMA_API llama_token llama_vocab_sep(const struct llama_vocab * vocab); // sentence separator
     LLAMA_API llama_token llama_vocab_nl (const struct llama_vocab * vocab); // next-line
     LLAMA_API llama_token llama_vocab_pad(const struct llama_vocab * vocab); // padding
@@ -973,6 +972,10 @@ extern "C" {
     DEPRECATED(LLAMA_API llama_token llama_token_fim_rep(const struct llama_vocab * vocab), "use llama_vocab_fim_rep instead");
     DEPRECATED(LLAMA_API llama_token llama_token_fim_sep(const struct llama_vocab * vocab), "use llama_vocab_fim_sep instead");
 
+    // CLS is equivalent to BOS
+    DEPRECATED(LLAMA_API llama_token llama_vocab_cls(const struct llama_vocab * vocab), // classification
+            "use llama_vocab_bos instead");
+
     //
     // Tokenization
     //
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index ed8751737..d0fb85cea 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1218,7 +1218,6 @@ struct llama_vocab::impl {
     llama_token special_unk_id  = 0;
     llama_token special_sep_id  = LLAMA_TOKEN_NULL;
     llama_token special_pad_id  = LLAMA_TOKEN_NULL;
-    llama_token special_cls_id  = LLAMA_TOKEN_NULL; // TODO: revisit if this is really needed https://github.com/ggerganov/llama.cpp/pull/10930
     llama_token special_mask_id = LLAMA_TOKEN_NULL;
 
     llama_token linefeed_id = 13;
@@ -1352,7 +1351,6 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             special_unk_id  = LLAMA_TOKEN_NULL;
             special_sep_id  = LLAMA_TOKEN_NULL;
             special_pad_id  = LLAMA_TOKEN_NULL;
-            special_cls_id  = LLAMA_TOKEN_NULL;
             special_mask_id = LLAMA_TOKEN_NULL;
             linefeed_id     = LLAMA_TOKEN_NULL;
 
@@ -1374,18 +1372,16 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             special_unk_id  = 0;
             special_sep_id  = LLAMA_TOKEN_NULL;
             special_pad_id  = LLAMA_TOKEN_NULL;
-            special_cls_id  = LLAMA_TOKEN_NULL;
             special_mask_id = LLAMA_TOKEN_NULL;
         } else if (tokenizer_model == "bert") {
             type = LLAMA_VOCAB_TYPE_WPM;
 
             // default special tokens
-            special_bos_id  = LLAMA_TOKEN_NULL;
+            special_bos_id  = 101;
             special_eos_id  = LLAMA_TOKEN_NULL;
             special_unk_id  = 100;
             special_sep_id  = 102;
             special_pad_id  = 0;
-            special_cls_id  = 101;
             special_mask_id = 103;
         } else if (tokenizer_model == "gpt2") {
             type = LLAMA_VOCAB_TYPE_BPE;
@@ -1420,7 +1416,6 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             special_unk_id  = LLAMA_TOKEN_NULL;
             special_sep_id  = LLAMA_TOKEN_NULL;
             special_pad_id  = LLAMA_TOKEN_NULL;
-            special_cls_id  = LLAMA_TOKEN_NULL;
             special_mask_id = LLAMA_TOKEN_NULL;
         } else if (tokenizer_model == "t5") {
             type = LLAMA_VOCAB_TYPE_UGM;
@@ -1431,7 +1426,6 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             special_unk_id  = 2;
             special_sep_id  = LLAMA_TOKEN_NULL;
             special_pad_id  = 0;
-            special_cls_id  = LLAMA_TOKEN_NULL;
             special_mask_id = LLAMA_TOKEN_NULL;
 
             const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
@@ -1712,7 +1706,6 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
             { LLM_KV_TOKENIZER_UNK_ID,     special_unk_id     },
             { LLM_KV_TOKENIZER_SEP_ID,     special_sep_id     },
             { LLM_KV_TOKENIZER_PAD_ID,     special_pad_id     },
-            { LLM_KV_TOKENIZER_CLS_ID,     special_cls_id     },
             { LLM_KV_TOKENIZER_MASK_ID,    special_mask_id    },
             { LLM_KV_TOKENIZER_FIM_PRE_ID, special_fim_pre_id },
             { LLM_KV_TOKENIZER_FIM_SUF_ID, special_fim_suf_id },
@@ -2406,8 +2399,8 @@ std::vector llama_vocab::impl::tokenize(
         case LLAMA_VOCAB_TYPE_WPM:
             {
                 if (add_special) {
-                    GGML_ASSERT(special_cls_id != LLAMA_TOKEN_NULL);
-                    output.push_back(special_cls_id);
+                    GGML_ASSERT(special_bos_id != LLAMA_TOKEN_NULL);
+                    output.push_back(special_bos_id);
                 }
 
                 llm_tokenizer_wpm_session session(vocab);
@@ -2700,7 +2693,6 @@ void llama_vocab::impl::print_info() const {
     if (special_unk_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: UNK token        = %d '%s'\n", __func__, special_unk_id,     id_to_token[special_unk_id].text.c_str() );  }
     if (special_sep_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: SEP token        = %d '%s'\n", __func__, special_sep_id,     id_to_token[special_sep_id].text.c_str() );  }
     if (special_pad_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: PAD token        = %d '%s'\n", __func__, special_pad_id,     id_to_token[special_pad_id].text.c_str() );  }
-    if (special_cls_id  != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: CLS token        = %d '%s'\n", __func__, special_cls_id,     id_to_token[special_cls_id].text.c_str() );  }
     if (special_mask_id != LLAMA_TOKEN_NULL)    { LLAMA_LOG_INFO( "%s: MASK token       = %d '%s'\n", __func__, special_mask_id,    id_to_token[special_mask_id].text.c_str() ); }
 
     if (linefeed_id != LLAMA_TOKEN_NULL)        { LLAMA_LOG_INFO( "%s: LF token         = %d '%s'\n", __func__, linefeed_id,        id_to_token[linefeed_id].text.c_str() ); }
@@ -2834,7 +2826,7 @@ llama_token_attr llama_vocab::token_get_attr(llama_token id) const {
 }
 
 llama_token llama_vocab::token_bos() const {
-    return pimpl->type != LLAMA_VOCAB_TYPE_WPM ? pimpl->special_bos_id : pimpl->special_cls_id;
+    return pimpl->special_bos_id;
 }
 
 llama_token llama_vocab::token_eos() const {
@@ -2853,10 +2845,6 @@ llama_token llama_vocab::token_unk() const {
     return pimpl->special_unk_id;
 }
 
-llama_token llama_vocab::token_cls() const {
-    return pimpl->special_cls_id;
-}
-
 llama_token llama_vocab::token_sep() const {
     return pimpl->special_sep_id;
 }
@@ -3069,8 +3057,9 @@ llama_token llama_vocab_eot(const struct llama_vocab * vocab) {
     return vocab->token_eot();
 }
 
+// deprecated
 llama_token llama_vocab_cls(const struct llama_vocab * vocab) {
-    return vocab->token_cls();
+    return vocab->token_bos();
 }
 
 llama_token llama_vocab_sep(const struct llama_vocab * vocab) {
@@ -3159,7 +3148,8 @@ llama_token llama_token_eot(const struct llama_vocab * vocab) {
 
 // deprecated
 llama_token llama_token_cls(const struct llama_vocab * vocab) {
-    return llama_vocab_cls(vocab);
+    //return llama_vocab_cls(vocab);
+    return llama_vocab_bos(vocab); // avoid deprecation warning
 }
 
 // deprecated
diff --git a/src/llama-vocab.h b/src/llama-vocab.h
index 020f2b533..5ce355214 100644
--- a/src/llama-vocab.h
+++ b/src/llama-vocab.h
@@ -53,7 +53,6 @@ struct llama_vocab {
     llama_token token_eot() const;
     llama_token token_eom() const;
     llama_token token_unk() const;
-    llama_token token_cls() const;
     llama_token token_sep() const;
     llama_token token_nl () const;
     llama_token token_pad() const;

From 9a483999a6fda350772aaf7bc541f1cb246f8a29 Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Sun, 12 Jan 2025 13:45:14 +0100
Subject: [PATCH 067/279] llama : fix chat template gguf key (#11201)

---
 common/common.cpp  | 11 ++---------
 src/llama-arch.cpp |  2 +-
 2 files changed, 3 insertions(+), 10 deletions(-)

diff --git a/common/common.cpp b/common/common.cpp
index 39bfb0c2e..1a2e15247 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1636,15 +1636,8 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto
 //
 
 std::string common_get_builtin_chat_template(const struct llama_model * model) {
-    static const char * template_key = "tokenizer.chat_template";
-    // call with NULL buffer to get the total size of the string
-    int32_t res = llama_model_meta_val_str(model, template_key, NULL, 0);
-    if (res > 0) {
-        std::vector model_template(res + 1, 0);
-        llama_model_meta_val_str(model, template_key, model_template.data(), model_template.size());
-        return std::string(model_template.data(), model_template.size() - 1);
-    }
-    return "";
+    const char * ptr_tmpl = llama_model_chat_template(model);
+    return ptr_tmpl == nullptr ? "" : ptr_tmpl;
 }
 
 bool common_chat_verify_template(const std::string & tmpl) {
diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp
index 5c1f14cfd..d7d277e72 100644
--- a/src/llama-arch.cpp
+++ b/src/llama-arch.cpp
@@ -178,7 +178,7 @@ static const std::map LLM_KV_NAMES = {
     { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap"     },
     { LLM_KV_TOKENIZER_HF_JSON,              "tokenizer.huggingface.json"              },
     { LLM_KV_TOKENIZER_RWKV,                 "tokenizer.rwkv.world"                    },
-    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat.template"                 },
+    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat_template"                 },
     { LLM_KV_TOKENIZER_FIM_PRE_ID,           "tokenizer.ggml.fim_pre_token_id"         },
     { LLM_KV_TOKENIZER_FIM_SUF_ID,           "tokenizer.ggml.fim_suf_token_id"         },
     { LLM_KV_TOKENIZER_FIM_MID_ID,           "tokenizer.ggml.fim_mid_token_id"         },

From 924518e2e5726e81f3aeb2518fb85963a500e93a Mon Sep 17 00:00:00 2001
From: Eric Curtin 
Date: Sun, 12 Jan 2025 18:23:10 +0000
Subject: [PATCH 068/279] Reset color before we exit (#11205)

We don't want colors to leak post termination of llama-run.

Signed-off-by: Eric Curtin 
---
 examples/run/run.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/examples/run/run.cpp b/examples/run/run.cpp
index bfa8378bb..0ad8bb15b 100644
--- a/examples/run/run.cpp
+++ b/examples/run/run.cpp
@@ -29,7 +29,7 @@
 
 #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32)
 [[noreturn]] static void sigint_handler(int) {
-    printf("\n");
+    printf("\n\033[0m");
     exit(0);  // not ideal, but it's the only way to guarantee exit in all cases
 }
 #endif

From 1244cdcf14900dd199907b13f25d9c91a507f578 Mon Sep 17 00:00:00 2001
From: Radoslav Gerganov 
Date: Mon, 13 Jan 2025 13:31:41 +0200
Subject: [PATCH 069/279] ggml : do not define GGML_USE_CUDA when building with
 GGML_BACKEND_DL (#11211)

Build fails when using HIP and GGML_BACKEND_DL:
```
/usr/bin/ld: ../ggml/src/libggml.so: undefined reference to `ggml_backend_cuda_reg'
collect2: error: ld returned 1 exit status
```
This patch fixes this.
---
 ggml/src/ggml-hip/CMakeLists.txt | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt
index b15fbd24d..d090ba9bd 100644
--- a/ggml/src/ggml-hip/CMakeLists.txt
+++ b/ggml/src/ggml-hip/CMakeLists.txt
@@ -70,7 +70,9 @@ ggml_add_backend_library(ggml-hip
                         )
 
 # TODO: do not use CUDA definitions for HIP
-target_compile_definitions(ggml PUBLIC GGML_USE_CUDA)
+if (NOT GGML_BACKEND_DL)
+    target_compile_definitions(ggml PUBLIC GGML_USE_CUDA)
+endif()
 
 add_compile_definitions(GGML_USE_HIP)
 

From 8f70fc3d1b1d3c17b61842330dd106d391cc1227 Mon Sep 17 00:00:00 2001
From: Daniel Bevenius 
Date: Mon, 13 Jan 2025 13:38:20 +0100
Subject: [PATCH 070/279] llama : remove 'd' from bad special token log
 (#11212)

This commit removes the 'd' from the log message in llama-vocab.cpp
when logging a bad special token.

The motivation for this is that currently the output can look something
like the following:
```console
load: bad special token:
    'tokenizer.ggml.image_token_id' = 128256d, using default id -1
```
---
 src/llama-vocab.cpp | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index d0fb85cea..96b74e93a 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -1729,7 +1729,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
                 continue;
             }
             if (new_id >= id_to_token.size()) {
-                LLAMA_LOG_WARN("%s: bad special token: '%s' = %ud, using default id %d\n",
+                LLAMA_LOG_WARN("%s: bad special token: '%s' = %u, using default id %d\n",
                     __func__, key.c_str(), new_id, id);
             } else {
                 id = new_id;

From 7426a26b2492fc546a4db6991e871ee605714093 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 13 Jan 2025 14:46:36 +0200
Subject: [PATCH 071/279] contrib : add naming guidelines (#11177)

* contrib : add naming guidelines

* contrib : expand naming guidelines [no ci]

* contrib : cont [no ci]

* contrib : add `_t` suffix guideline [no ci]

* contrib : cont [no ci]

* minor [no ci]

* contrib : move coding guidelines to correct section [no ci]

* contrib : minor reword coding guidelines [no ci]

* contrib : add TODO for preprocessor directives [no ci]

* contrib : expand [no ci]

* minor [no ci]

* contrib : clarify `_context` suffix usage [no ci]

* contrib : filename guidelines [no ci]

* contrib : fix notes [no ci]
---
 CONTRIBUTING.md | 102 +++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 96 insertions(+), 6 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5a85ec5d2..a86f00ac6 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,10 +1,10 @@
 # Pull requests (for contributors)
 
 - Test your changes:
-  - Execute [the full CI locally on your machine](ci/README.md) before publishing
-  - Verify that the perplexity and the performance are not affected negatively by your changes (use `llama-perplexity` and `llama-bench`)
-  - If you modified the `ggml` source, run the `test-backend-ops` tool to check whether different backend implementations of the `ggml` operators produce consistent results (this requires access to at least two different `ggml` backends)
-  - If you modified a `ggml` operator or added a new one, add the corresponding test cases to `test-backend-ops`
+    - Execute [the full CI locally on your machine](ci/README.md) before publishing
+    - Verify that the perplexity and the performance are not affected negatively by your changes (use `llama-perplexity` and `llama-bench`)
+    - If you modified the `ggml` source, run the `test-backend-ops` tool to check whether different backend implementations of the `ggml` operators produce consistent results (this requires access to at least two different `ggml` backends)
+    - If you modified a `ggml` operator or added a new one, add the corresponding test cases to `test-backend-ops`
 - Consider allowing write access to your branch for faster reviews, as reviewers can push commits directly
 - If your PR becomes stale, don't hesitate to ping the maintainers in the comments
 
@@ -20,14 +20,104 @@
 - Avoid adding third-party dependencies, extra files, extra headers, etc.
 - Always consider cross-compatibility with other operating systems and architectures
 - Avoid fancy-looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
-- There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit
+- Vertical alignment makes things more readable and easier to batch edit
 - Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a`
-- Naming usually optimizes for common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963)
+- Use sized integer types in the public API
+- Declare structs with `struct foo {}` instead of `typedef struct foo {} foo`
+    - In C++ code omit optional `struct` and `enum` keyword whenever they are not necessary
+    ```cpp
+    // OK
+    llama_context * ctx;
+    const llama_rope_type rope_type;
+
+    // not OK
+    struct llama_context * ctx;
+    const enum llama_rope_type rope_type;
+    ```
+    
+    _(NOTE: this guideline is yet to be applied to the `llama.cpp` codebase. New code should follow this guideline.)_
+  
+- Try to follow the existing patterns in the code (indentation, spaces, etc.). In case of doubt use `clang-format` to format the added code
+- For anything not covered in the current guidelines, refer to the [C++ Core Guidelines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines)
 - Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices
 - Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$
 
 ![matmul](media/matmul.png)
 
+# Naming guidelines
+
+- Use `snake_case` for function, variable and type names
+- Naming usually optimizes for longest common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963)
+
+    ```cpp
+    // not OK
+    int small_number;
+    int big_number;
+
+    // OK
+    int number_small;
+    int number_big;
+    ```
+
+- Enum values are always in upper case and prefixed with the enum name
+
+    ```cpp
+    enum llama_vocab_type {
+        LLAMA_VOCAB_TYPE_NONE = 0,
+        LLAMA_VOCAB_TYPE_SPM  = 1,
+        LLAMA_VOCAB_TYPE_BPE  = 2,
+        LLAMA_VOCAB_TYPE_WPM  = 3,
+        LLAMA_VOCAB_TYPE_UGM  = 4,
+        LLAMA_VOCAB_TYPE_RWKV = 5,
+    };
+    ```
+
+- The general naming pattern is `_`, with `` being `_`
+
+    ```cpp
+    llama_model_init();           // class: "llama_model",         method: "init"
+    llama_sampler_chain_remove(); // class: "llama_sampler_chain", method: "remove"
+    llama_sampler_get_seed();     // class: "llama_sampler",       method: "get_seed"
+    llama_set_embeddings();       // class: "llama_context",       method: "set_embeddings"
+    llama_n_threads();            // class: "llama_context",       method: "n_threads"
+    llama_adapter_lora_free();    // class: "llama_adapter_lora",  method: "free"
+    ```
+
+    - The `get` `` can be omitted
+    - The `` can be omitted if not necessary
+    - The `_context` suffix of the `` is optional. Use it to disambiguate symbols when needed
+    - Use `init`/`free` for constructor/destructor ``
+
+- Use the `_t` suffix when a type is supposed to be opaque to the user - it's not relevant to them if it is a struct or anything else
+
+    ```cpp
+    typedef struct llama_context * llama_context_t;
+
+    enum llama_pooling_type llama_pooling_type(const llama_context_t ctx);
+    ```
+
+    _(NOTE: this guideline is yet to be applied to the `llama.cpp` codebase. New code should follow this guideline)_
+
+- C/C++ filenames are all lowercase with dashes. Headers use the `.h` extension. Source files use the `.c` or `.cpp` extension
+- Python filenames are all lowercase with underscores
+
+- _(TODO: abbreviations usage)_
+
+# Preprocessor directives
+
+- (TODO: add guidelines with examples and apply them to the codebase)
+
+    ```cpp
+    #ifdef FOO
+    #endif // FOO
+    ```
+
+# Documentation
+
+- Documentation is a community effort
+- When you need to look into the source code to figure out implementation details to figure out how to use an API consider adding a short summary to the header file for future reference
+- When you notice incorrect or outdated documentation, please update it
+
 # Resources
 
 The Github issues, PRs and discussions contain a lot of information that can be useful to get familiar with the codebase. For convenience, some of the more important information is referenced from Github projects:

From 00b4c3da6202e855087a4986bf19bb41b959e333 Mon Sep 17 00:00:00 2001
From: Xuan Son Nguyen 
Date: Mon, 13 Jan 2025 13:56:23 +0100
Subject: [PATCH 072/279] common : support tag-based --hf-repo like on ollama
 (#11195)

* common : support tag-based hf_repo like on ollama

* fix build

* various fixes

* small fixes

* fix style

* fix windows build?

* move common_get_hf_file to common.cpp

* fix complain with noreturn
---
 common/arg.cpp    |  33 ++++++++++-----
 common/common.cpp | 106 +++++++++++++++++++++++++++++++++++++++++++---
 common/common.h   |   8 ++++
 3 files changed, 130 insertions(+), 17 deletions(-)

diff --git a/common/arg.cpp b/common/arg.cpp
index 27886b84e..1457a360f 100644
--- a/common/arg.cpp
+++ b/common/arg.cpp
@@ -130,17 +130,26 @@ std::string common_arg::to_string() {
 
 static void common_params_handle_model_default(
         std::string & model,
-        std::string & model_url,
+        const std::string & model_url,
         std::string & hf_repo,
-        std::string & hf_file) {
+        std::string & hf_file,
+        const std::string & hf_token) {
     if (!hf_repo.empty()) {
         // short-hand to avoid specifying --hf-file -> default it to --model
         if (hf_file.empty()) {
             if (model.empty()) {
-                throw std::invalid_argument("error: --hf-repo requires either --hf-file or --model\n");
+                auto auto_detected = common_get_hf_file(hf_repo, hf_token);
+                if (auto_detected.first.empty() || auto_detected.second.empty()) {
+                    exit(1); // built without CURL, error message already printed
+                }
+                hf_repo = auto_detected.first;
+                hf_file = auto_detected.second;
+            } else {
+                hf_file = model;
             }
-            hf_file = model;
-        } else if (model.empty()) {
+        }
+        // make sure model path is present (for caching purposes)
+        if (model.empty()) {
             // this is to avoid different repo having same file name, or same file name in different subdirs
             std::string filename = hf_repo + "_" + hf_file;
             // to make sure we don't have any slashes in the filename
@@ -290,8 +299,8 @@ static bool common_params_parse_ex(int argc, char ** argv, common_params_context
     }
 
     // TODO: refactor model params in a common struct
-    common_params_handle_model_default(params.model,         params.model_url,         params.hf_repo,         params.hf_file);
-    common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file);
+    common_params_handle_model_default(params.model,         params.model_url,         params.hf_repo,         params.hf_file,         params.hf_token);
+    common_params_handle_model_default(params.vocoder.model, params.vocoder.model_url, params.vocoder.hf_repo, params.vocoder.hf_file, params.hf_token);
 
     if (params.escape) {
         string_process_escapes(params.prompt);
@@ -1583,21 +1592,23 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
         }
     ).set_env("LLAMA_ARG_MODEL_URL"));
     add_opt(common_arg(
-        {"-hfr", "--hf-repo"}, "REPO",
-        "Hugging Face model repository (default: unused)",
+        {"-hf", "-hfr", "--hf-repo"}, "/[:quant]",
+        "Hugging Face model repository; quant is optional, case-insensitive, default to Q4_K_M, or falls back to the first file in the repo if Q4_K_M doesn't exist.\n"
+        "example: unsloth/phi-4-GGUF:q4_k_m\n"
+        "(default: unused)",
         [](common_params & params, const std::string & value) {
             params.hf_repo = value;
         }
     ).set_env("LLAMA_ARG_HF_REPO"));
     add_opt(common_arg(
         {"-hff", "--hf-file"}, "FILE",
-        "Hugging Face model file (default: unused)",
+        "Hugging Face model file. If specified, it will override the quant in --hf-repo (default: unused)",
         [](common_params & params, const std::string & value) {
             params.hf_file = value;
         }
     ).set_env("LLAMA_ARG_HF_FILE"));
     add_opt(common_arg(
-        {"-hfrv", "--hf-repo-v"}, "REPO",
+        {"-hfv", "-hfrv", "--hf-repo-v"}, "/[:quant]",
         "Hugging Face model repository for the vocoder model (default: unused)",
         [](common_params & params, const std::string & value) {
             params.vocoder.hf_repo = value;
diff --git a/common/common.cpp b/common/common.cpp
index 1a2e15247..a6f9252b2 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -73,6 +73,22 @@
 #include 
 #endif
 #define LLAMA_CURL_MAX_URL_LENGTH 2084 // Maximum URL Length in Chrome: 2083
+
+//
+// CURL utils
+//
+
+using curl_ptr = std::unique_ptr;
+
+// cannot use unique_ptr for curl_slist, because we cannot update without destroying the old one
+struct curl_slist_ptr {
+    struct curl_slist * ptr = nullptr;
+    ~curl_slist_ptr() {
+        if (ptr) {
+            curl_slist_free_all(ptr);
+        }
+    }
+};
 #endif // LLAMA_USE_CURL
 
 using json = nlohmann::ordered_json;
@@ -1130,7 +1146,8 @@ static bool curl_perform_with_retry(const std::string & url, CURL * curl, int ma
 
 static bool common_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
     // Initialize libcurl
-    std::unique_ptr curl(curl_easy_init(), &curl_easy_cleanup);
+    curl_ptr       curl(curl_easy_init(), &curl_easy_cleanup);
+    curl_slist_ptr http_headers;
     if (!curl) {
         LOG_ERR("%s: error initializing libcurl\n", __func__);
         return false;
@@ -1144,11 +1161,9 @@ static bool common_download_file(const std::string & url, const std::string & pa
 
     // Check if hf-token or bearer-token was specified
     if (!hf_token.empty()) {
-      std::string auth_header = "Authorization: Bearer ";
-      auth_header += hf_token.c_str();
-      struct curl_slist *http_headers = NULL;
-      http_headers = curl_slist_append(http_headers, auth_header.c_str());
-      curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers);
+        std::string auth_header = "Authorization: Bearer " + hf_token;
+        http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
+        curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
     }
 
 #if defined(_WIN32)
@@ -1444,6 +1459,80 @@ struct llama_model * common_load_model_from_hf(
     return common_load_model_from_url(model_url, local_path, hf_token, params);
 }
 
+/**
+ * Allow getting the HF file from the HF repo with tag (like ollama), for example:
+ * - bartowski/Llama-3.2-3B-Instruct-GGUF:q4
+ * - bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M
+ * - bartowski/Llama-3.2-3B-Instruct-GGUF:q5_k_s
+ * Tag is optional, default to "latest" (meaning it checks for Q4_K_M first, then Q4, then if not found, return the first GGUF file in repo)
+ *
+ * Return pair of  (with "repo" already having tag removed)
+ *
+ * Note: we use the Ollama-compatible HF API, but not using the blobId. Instead, we use the special "ggufFile" field which returns the value for "hf_file". This is done to be backward-compatible with existing cache files.
+ */
+std::pair common_get_hf_file(const std::string & hf_repo_with_tag, const std::string & hf_token) {
+    auto parts = string_split(hf_repo_with_tag, ':');
+    std::string tag = parts.size() > 1 ? parts.back() : "latest";
+    std::string hf_repo = parts[0];
+    if (string_split(hf_repo, '/').size() != 2) {
+        throw std::invalid_argument("error: invalid HF repo format, expected /[:quant]\n");
+    }
+
+    // fetch model info from Hugging Face Hub API
+    json model_info;
+    curl_ptr       curl(curl_easy_init(), &curl_easy_cleanup);
+    curl_slist_ptr http_headers;
+    std::string res_str;
+    std::string url = "https://huggingface.co/v2/" + hf_repo + "/manifests/" + tag;
+    curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
+    curl_easy_setopt(curl.get(), CURLOPT_NOPROGRESS, 1L);
+    typedef size_t(*CURLOPT_WRITEFUNCTION_PTR)(void * ptr, size_t size, size_t nmemb, void * data);
+    auto write_callback = [](void * ptr, size_t size, size_t nmemb, void * data) -> size_t {
+        static_cast(data)->append((char * ) ptr, size * nmemb);
+        return size * nmemb;
+    };
+    curl_easy_setopt(curl.get(), CURLOPT_WRITEFUNCTION, static_cast(write_callback));
+    curl_easy_setopt(curl.get(), CURLOPT_WRITEDATA, &res_str);
+#if defined(_WIN32)
+    curl_easy_setopt(curl.get(), CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
+#endif
+    if (!hf_token.empty()) {
+        std::string auth_header = "Authorization: Bearer " + hf_token;
+        http_headers.ptr = curl_slist_append(http_headers.ptr, auth_header.c_str());
+    }
+    // Important: the User-Agent must be "llama-cpp" to get the "ggufFile" field in the response
+    http_headers.ptr = curl_slist_append(http_headers.ptr, "User-Agent: llama-cpp");
+    http_headers.ptr = curl_slist_append(http_headers.ptr, "Accept: application/json");
+    curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers.ptr);
+
+    CURLcode res = curl_easy_perform(curl.get());
+
+    if (res != CURLE_OK) {
+        throw std::runtime_error("error: cannot make GET request to HF API");
+    }
+
+    long res_code;
+    curl_easy_getinfo(curl.get(), CURLINFO_RESPONSE_CODE, &res_code);
+    if (res_code == 200) {
+        model_info = json::parse(res_str);
+    } else if (res_code == 401) {
+        throw std::runtime_error("error: model is private or does not exist; if you are accessing a gated model, please provide a valid HF token");
+    } else {
+        throw std::runtime_error(string_format("error from HF API, response code: %ld, data: %s", res_code, res_str.c_str()));
+    }
+
+    // check response
+    if (!model_info.contains("ggufFile")) {
+        throw std::runtime_error("error: model does not have ggufFile");
+    }
+    json & gguf_file = model_info.at("ggufFile");
+    if (!gguf_file.contains("rfilename")) {
+        throw std::runtime_error("error: ggufFile does not have rfilename");
+    }
+
+    return std::make_pair(hf_repo, gguf_file.at("rfilename"));
+}
+
 #else
 
 struct llama_model * common_load_model_from_url(
@@ -1465,6 +1554,11 @@ struct llama_model * common_load_model_from_hf(
     return nullptr;
 }
 
+std::pair common_get_hf_file(const std::string &, const std::string &) {
+    LOG_WRN("%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
+    return std::make_pair("", "");
+}
+
 #endif // LLAMA_USE_CURL
 
 //
diff --git a/common/common.h b/common/common.h
index d523948b0..c86a4ef39 100644
--- a/common/common.h
+++ b/common/common.h
@@ -454,6 +454,11 @@ static bool string_starts_with(const std::string & str,
     return str.rfind(prefix, 0) == 0;
 }
 
+static bool string_ends_with(const std::string & str,
+                               const std::string & suffix) {  // While we wait for C++20's std::string::ends_with...
+    return str.size() >= suffix.size() && str.compare(str.size()-suffix.size(), suffix.size(), suffix) == 0;
+}
+
 bool string_parse_kv_override(const char * data, std::vector & overrides);
 void string_process_escapes(std::string & input);
 
@@ -501,6 +506,9 @@ struct llama_model * common_load_model_from_hf(
     const std::string & local_path,
     const std::string & hf_token,
     const struct llama_model_params & params);
+std::pair common_get_hf_file(
+    const std::string & hf_repo_with_tag,
+    const std::string & hf_token);
 
 // clear LoRA adapters from context, then apply new list of adapters
 void common_set_adapter_lora(struct llama_context * ctx, std::vector & lora);

From ca001f6656c1c3d29ef479b3aa5d669453e63be5 Mon Sep 17 00:00:00 2001
From: Georgi Gerganov 
Date: Mon, 13 Jan 2025 15:08:44 +0200
Subject: [PATCH 073/279] contrib : add naming guidelines (cont) (#11177)

---
 CONTRIBUTING.md | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a86f00ac6..dc58dbd51 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -22,7 +22,7 @@
 - Avoid fancy-looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
 - Vertical alignment makes things more readable and easier to batch edit
 - Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a`
-- Use sized integer types in the public API
+- Use sized integer types such as `int32_t` in the public API, e.g. `size_t` may also be appropriate for allocation sizes or byte offsets
 - Declare structs with `struct foo {}` instead of `typedef struct foo {} foo`
     - In C++ code omit optional `struct` and `enum` keyword whenever they are not necessary
     ```cpp
@@ -115,7 +115,7 @@
 # Documentation
 
 - Documentation is a community effort
-- When you need to look into the source code to figure out implementation details to figure out how to use an API consider adding a short summary to the header file for future reference
+- When you need to look into the source code to figure out how to use an API consider adding a short summary to the header file for future reference
 - When you notice incorrect or outdated documentation, please update it
 
 # Resources

From 437e05f714cdd67757405221578d3f95f8228b63 Mon Sep 17 00:00:00 2001
From: ebraminio 
Date: Mon, 13 Jan 2025 17:16:39 +0330
Subject: [PATCH 074/279] server : (UI) Support for RTL text as models input or
 output (#11208)

---
 examples/server/public/index.html.gz | Bin 1206458 -> 1206472 bytes
 examples/server/webui/index.html     |   5 +++--
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz
index 3640a7a6cfa76764d93684e9051a32c263932c8a..489bff84b98f7bd7ebcdabeb7d0f9b31093eae58 100644
GIT binary patch
delta 8441
zcmVmhbut>;wM=_HnN~*xyWHtL~d^xz9Vl2fx30J%0rJ
zcr~A^^Xnhsd&&*iC;b-QXL7xrfA@O+1>}puD$p>dj86otJ@bZ=C4+B1HNN`eNE=|0)*0EU;lmn%6Ses==0af^Q$d<_q+1^diCNv
z(9Db1FK*|5{c%pWFW%1IyngAwe>2mY>$(1B<*SS6ZVO)){#^fI$i7bE#8G2|8H+Xe_r;ra`Owz
zM|nP(IP>f6#l;C)CRyatV~J)PLLXBioqUw5}MCcp$E&1E~8#jng
z-v$xM(JQh=Z<5H3Pjiw5%d;?u30{+6wWc#w*3Kw-pg4$q64RN&KV>BLNfyMbnY*D$
z!3O=8CmiK?9jw;aOJbV2e>ugj|H~$)vp9*#AA%@NGU~?kY>{L>$*`A%NjCE~Sw>=d
z1s_UM{13h-Sso0D|4ANOfJbJ9A52v+%Ck5Y`
za#nUPeK?O
zThQH;ALME1KF$_l;@wp>7j90TDv}uqT^ihzv+X*d1m~&iky)HqwA?7qqEkVh#i=mhiw-99n_Hm|UZ+;C
z+%Q;WZn|!$WSO~l0uLzYX#ZvA-qDRXgwXd^`?9c~MsNRtf94ny%DqfroqJiAsd`zE
zQnBIk^b;(+>~f`l!L?WTfe>`<#5yi_1Lupg&Xe$!vgy33bIpvVuoQOEyD^0y>Pm!Cgf*5B^BCOSB
zBVHx%(gYu`-~(Zk9?T+~zLq|8>*TDuzazg$9-deo98ePFGnjawbIl=LhUDQ4eqld=
znHcotCE+I8%?@jzRm`V~@<*Y3XTj?rhI$8(ubV^Je=Kqzie4%$ZASSf%{PlDww~er
z4#~3S>dVaaYNJ`J=aE=I6R3$~_>I~8Ul=}DW8out5(fTRmr0y;DZzL1HB_lIA%1GI
ztZsk}LBZ}yyq|5t-G>b<;&|=qlJ@}2W4A=A=?s=B`wiO*-L&Em)<0ySn|3XL8Uq6a
zF6AJOe@Q0LX_jTXvZvNctwC)WJTz$4OR}XbFR_;E-i
z>J`go`D2mYBmhteNQp_DZ!(g}8OaF;PgQ2We|L-R;6o6v+7+H^a65*wHbhygQw;<{{%bRA{Tca$1^LspGZ(yPP1sS5O`_JQ(iL$0s2Twiwd@@EVWZHR|99$vH|
ze+bYzAV9T>AA1ZFV~B}}1^OrH$Q)uKf`U1QwlzfC62a_~v}6x4vBx8iHiQmZ2RayI
zXitY|Pd^EHv_Wjp+StIb=Myxq4*Rg$fS_?~^dY3s+el%nhUS>IYUpElXhS?S0gcR(
z%NQQ|5D$Gk8tFqop?3g9sUdAThKV`Ee?$Zdty0n$Ce{!W5i!b+IMx^@_7D?WVDf1e
zqE440iwncH-?BCznw}z;TcrlK+Jw)98l%@|nrz-+^TT?^eD&chhuVF-{@{!GX4Fo$@U3T$j^92;v0I;;-p7(-ecA}u1u
z7}EL>X%Q&KkT!-$i})~xv^hk&4h!;h3~6hKv^5@atRXP4I>101L)#gmEn>hp%bpGq
z7eT<*Mvdh@6tKEbK(WX9<*;Ll?r$6#PO*pl-fsK7p^lr*?I8fL+W=sWe<7_8krn{J
zbjHn4_7D=-Z6p}G;<1My!EQr>JxWK`5N+$XM5FB?bgAExct
ziVpve;b9N)uzw3gO^4uP+JcYK5H%eFl4%=AMme4`=y@WHXo{lgs^O^SC^o|mF2abR
zPaWWL@R1E=@Hmyy0VIa1IEtblx*b%|&<43^4Y<;#mSP<0g9QzJkc$8&x^A1QrkKVk
zCdMEW0Znwrv^7Vue^h%E7juw{04Rp8PYpw{rsMiz4RW!>Wt*m2ieewS;aJeH2f5e{
z;4-J0axlghG@L;$P6N7hRWT1St3ZIFxBKrq#`P4i%Ee;Gc&**?K3YmTa$is5ME
zxEO<6L~zkm)6fmYF-CDQ2e}B~qFbu1Te@zH<6;eR5y3@Kr`lAr6mt|8dytFWfG}M(
z)q}T;)!{>*?Ngs@?PY@njAAk!WHN0an5pZwrK#hbQW^G2rGa6VqH2yc&Mnmgyua_L
zwok$!Dq7WGe?rWng7V%LT2BkDw}sx*Lho&1^t3QKTSS|X7A0Hv-`j&;s=N5nFyg&}nZGT}dvEa4?d>UsfvoT){l
zQm^Gle^8s}rnAL{(t6o1Nf6JNFO{Y21GBoBQIyxXeqS$5mdl(#He*q?lcO{1
zF^JbB3uu2rd@T(pzh9x$rR%3c4Zym*7euQv(WlHDjma5g=@pEs&zvs*xXVRTYPPqt
z?wn?Xdh3RHxAXdYGS9>uqJkEG*37${K`EpTv0WhD$fg
zk}XSGD)Pnp`_LEwQ<1eUTFjoGhN%?kY@O5=nd^~Hl1ejWwMmD2GpD4Pe;m=nCZ$j7
zn<&!*=6Xt9|EYTK0}}QrJ-??859=cZu>`Yv#3KTscDb4?Ywoncau32Sl7|K_VF6#y
ze~(?VH)uerYmpHsD+Y;*tiVO`fcdOFtFr5McW>XUC#1F|-S^hBuC%%iKf
z%aaY=lN%`BDQ118u0J$}23Yt~OuQ84{
zQOos@CFCU=7`bCs^Tn_6tKC_e~yFL%SZ&ZcQtla5{38x+EEnlrCi>x&2<@G
zg&XqJb{x-#H>%LW0!L&pPcp0(-jZy+?`CA
z5w<9{S@%rqxFFh?Ex4J$697`q`h2r&ds)0!M<=G1uSSl
zCA7x5mk|?6XoDC+=WDH^mayli&__`S^{$c_-Ztfe&*bNmwN?#CZCC`YAXt?racztS
zHC*ss&=@Ufj92RahDVGGss0?YlORBgpjg-}+>AM_gzf{feV#mAND5MrY9ZBXiGNh4*UzTjs*@1yA{?-fooa)uZ$?NW%K1!u8WJ{Qkt}e0yNzi
z=ce&PkssU(hQrJT9yC$1L|)y-8g|1heO3(5lfdXmw3pJ@bLwXFXolc@Z>OyW*!5t#
zIGUk|dxg3&se?`~m;^G1+^#&g$^k**&yt@l2Cfm9rG=~2R_O7^kFWP-?^ynTl
zKY%tUh(!%ZpU_{#{Fc9Q=zzWhKRNJOpTW9=cm@6Y+VAOqPUsK2YR5(M6AAf2b|Jz1
z*F+Quf0bTEvrX)|lt`CPPn>nB`=6Abl++
zI0u5E=%Iyo0;nN3BPe%a5~K-QWXU!s*-?(d#B;-2nq=;Z^ot~X6z!bZIJ_;2skmp(
z^42abr>9~XDHXB;JQrt+Y9`y37tsUnWn+q9ykU(%GQpt^oH!ziEtoAtpevRTB;F78
zp9|=JZn);_d`2Fc1?szESqap5nvaoFZCAuM+!Fnf46LccniWS0PpcTW|z$2
zw1*H03kFTP3_~|^WiL&8K;LEWr7A2ZN5?ct5nD!|bRmD;RO=SnG!>4i!SX_C!ulmr
zYU{Kl^^+}l8o$6K_S?e^VoI_sOMOcjgwBgQ)Lpk(EZH1mTT
z!ma*=6y#r#m}D*u;#JpD;&(QDy3Po}enN9x2DcAfGdMI_#IjrArJ$GKrm6=1XW<$_*S6qF?p
zXpVXhx7MmJMv4--s>nGc;!4HFK+6C%i;`(gT73S#^>GIsYye{saPh5EB
z2)e1&r%RG$U=9@nrh;Db>~0_N^T{S`B1vPmZp_z764D?=#or>-B_ZFXE~Y_xA>9%R
zE|E)p?zZ!(+S+8Dz~XEftgZt$OjhWEEq_Kie;j+ZESUE@{qOIYD*{iIqg~NKhHRUL
ztIL{UA^w|B01C2XW2zzX4*L(YJ2xh@M2@Smds@|2MFwlBAn^{&BgLIAKNJzdo?1wi
zbwl^CY-%=EWqXQsS+UIW(gZ>wmX)a?>*~~%9b1Qg1t&=IHf2)_v8+usEUT7>RoODI
zf9y;(ti$Old8%kwlT8D`FEx}^a|)H4q-0Y=j9iF;6L9pH6qRI%Nrn`}#0tw=4MM0+
zNVR~K$HYM(m^@Wf1acMPu-{=JyxHs@s`^(dzyoPeO`778>j`S%4n1)?m=V&ZkOb~A
zVVtSXg~6aDCJW|TV$$FaJwf-l+L;2_f5>=juqw%Lp(j3kT%EW>Q@BP-wHPxGL@GuQ
z#*K|27m5Am}dcZ2+K6Ubf?O}r#ZlbFObpRne5*L>2Bx{aEE-vYgk=~oD{cC4Xy9}1cs
z+5#GfFPA%Kmfv~yP2G@CO0n*3)!;jI3I3|_PSqbtCNe%S
zW!w
z42Ii#=RMSnkK_^i!?wKoB7ag3sVaup2>svp^tr(YC*u;dgEi>
z@TA8z5Q5v?O6YkmE20lUk4DR=DF@9U=IELCxhC6!>Jg3aLS?QWetIPi`K8!U2p{=mf7+VUpPsrmFVH`tqg$Ab-8vz@AA)A+=m>rMc<9bVl&`?UN
zm-qq2o1A1g%2(KpeVmb^F~oBHfPSBo>?U8$WB*Ncr$uJhe+V;?cb{+a73}$nS~y_;
zDnoq1)4(jo|M7
z?>mskC+Ol5J$)P;xT@kIV>S#m{G`LDZL&BWn=BBgb2H*T>zZJSd#Jp?{74t^(Uu(?
z5IbMPnF|TNfAFP-49B;CI$ylt+h+-KeZY;FY!MtPIuQxo<0$cA{$ZVM4fUVz$RkXT
zGV7R39$b&gX$IRQ6ef(=zu{c>He)c@Y+N!#UtFN0{~8~O6#7DZLM4Xy`3f^q;;r5G
zWv%$~d{U}c7?>h1$erd70>9?MEzN?+%^vxdNNwiaeh0(NK!7Nw0C?nkL>uaq}|Q$sb8*SZkk4p;RZDdt>dRQY_e*1Mf_jelY%=cda2
z8mn4Hf1;D0_s{)l^N1B}I1Aj+f`rMowz(X{J)6ttlT@Hx2BI&vyC1~Mq`dicyiBB>WAj3plLTb!m!e~@w>mtOMdk7r
z)S*fJ!3fq+y1aeNDTz?w7F}z=3iF0BaE&!Xe?l_>_KpfI%+L|~0tt$dbH==K$)+;I
z+kLRmaomNHC=FdoYR5d5!Rj0G2uASJ5!?n(++^hF6dlndeScS8q;L@hadF{3yr;<>
ziSwhMTf5^2nn}g{Gup{d715tZ*vh2TOY2a<+i$PGov|q>N|?h$X$Z?3cDj@J#jxe4
ze<%%!o5QI^w%B10EGUR+g8X2)BoNp^Qu&2i63Y3PAhpgE0n6Ls3zE>cbWJkO68@I%
z+gx}NNMgvVV;cnzR9^5yW8!hX1=e;bav$WuA|$O*+(c}qXwbzppdr~onRMAL=J})`
z(C=s!$s+90z2AMy3Fqk74cU;cUC=%De^xx4iZrAKS{0(_pB1l4qK8{yyT=`U9dKa(
zF?ONK`^=43%!jKI^1x%W%%2}q*jwkW{6M|;X_hS9MGyw`u`GhU_#CjR<>3D@oxI=C
zyC3JdTlIVAcO-U08j$?ouV=PJ{48EMV1pS+iA&#yZcg9FU`<6q%A{fTmBLB#f7m1S
zvSmho-H_OOtQY#(<9o-&sV5rX+feC;y*38%Yx_rJ!A~|
z%?0wm7xQh%_gQTBzz1MhAr{Y5L4Hl{`;KPNbxHcX_1JUf#3zf*>TAa7nnVdZKyxBp9<%Zj#P75v
z=sl0rwUIGKi5=2&7wgHk*cG%U)OE6b%T}Daj
z3bbw?>h16GsE3KlI<7W3L`fkzJ0cWj^ma(5_Xz~?Br55$pz@d7Z(o(3e|r37lJNy8
zAk8Vt$dboO&Z~!!>Z|`Gw9d$q?@Pjl?JA(_&4TUMu2v*lxp8tosm?}D7GbiOL~c$=
zHX#peC6Z5alHHSR^5(_-`sRfk`4Xa#b5Ad%_lwYt?}HIod*36m&;ATw`^
z{KVTp{##*g?&V3}CfVr6e=6ar>IIj1hMv%$e%&PWEDEwDr!M86W=K7)Zp81?vQ&{(
zI9A>?I*;kT?+L5tDeEva?u^?t&Y!GPO^;`Fzh@_$9upm)+3GJA&e`^6F{(x65X5ffD
z;b86N6%zCCYcUYJl}~N2rijo=wY0Qmmc$W+V;jj2NBn$!krD!%32^EF{Q)(A_r(=<4hqBoJ7-9dP>m5))cRqa6U7w=Pt$P
zbBnrH#dbJwg%^^1f0l4`{TM6@%&|}9&ii&3aNA;sh1TH>Puy7)wKo>C$R-E4
z0N;F#DArJv)uW^hg_P>JVj15D`WTQEDc^)y}$u#*AzAOG%JwYEoB09+j&Sv+XjRF7tfK>(YY}D#FOjjBy
z_`~IQSrVmS=9f)xgddE!+8RB8ChucN0qbrPM5L=lltb^Lyt`GGs#vqVdg>r`zPQ}&
zos8^RIX0s@S2h~ozk^JnMQ!+;MH8YL^L}}`TU>EKeXpEyE4jA_0q(F^5lSduL
z4oF7$*Z=k3{_X$#cl5#!=(Fx)Qe3@zjtK1QQq}ITel=!B;jqJTl&?+>>C*rG-~aQU
z(e6k^e_ywLH>tfOedM$6LBjrze?eDC`q(LEW#x5W*t@)4Jnll4e7Ce~dD|b8Dwn##
zLvit11^B&NYr$);Fa!l^DWgJ+VCANrp242j*Cb3UTS{WY6baLR!VcJ>T8O{*y$hWa
zs?>O@8lDYnQK_e=g5^K`6WT>SAxBt5rhRkRe?UIgA(I+UST5p=wQwScb#xpjqW0W-
zHweMXPK2XYeNRlqD7+*YL0gE)Wm`A%%SA6g0G_&RQ;R0ZTL&Qo>59YOrzj_6Rvi$}
zsq5X9F+mn~_;o{ahQt%Y)>Uw>qKJJwZ`iJu+`yXCEe+T=|@8-|{KL0YFzuP`{=U>Y6zpb7x`J@eJ6<*)W
ze_35!&tK_tW&7gA{MBl%!*>kuugSb#fKb}&>%Y%m+0P*def~Orezk?~ewUwLuU>oy
zntAd1#qIpBKhD|q#oPIt*Dt+ye@1e1J=fl>JY^ByZQ;wppKCvC$lLkj%?G!*|Cj4;
zUl;f9UOg}Gzm~uL@{Rf>TfBQw)VncW&42iweD#C;Lry*g-6P9i$drau$Q($uc^OUvza2PXN*2D>_;Ar*i6npr8M$r>PM>?*|4}^
zgZ|4C4l}&=S8MFX5lczNe=zZW*<@@M#S#63A0~0iNW{(-aq7_&yKxYwGk24wG-6lq
zp(K_6!S^)H{5YDCC8OyR5CtJ7vZR=jVoVyfL&DZ!@Z<)5GE1q;j+Nx$wD#|$;5(Dg
z%I+l(XYoBvmqEOpt$oj<(OE(~4|*n_C2{66&VG@_!G_VZt?#ilf3#|FUb8T`lp?wX
z-934JmIUN+wg_VPuBy2p8GWiqrZgbTzo%#0wa+Nd65`TX6mL_K6kKo^f54$1;iB6`
zajirkeOi9VSh``A41UCiQxpZpaIxfOfw#FPELj$A`z*hmvB!YUd`1G_6)L1*USUZ>
zKX{z&tV*brEjw!6f08J>1yd8MGS*S`?PEf>sn6)nGGLS=F!i8Q0Jc8s7aZF9tVi&H
zc4de23b_PAg~f6&;SNPF;=s!u!$o{}S9)oYFiXQzL7v5_FyM<0Ci9zHp$~4SRAzf!pkl~d5kw~w)7vU*C3cyn3h*fbuao?puaMs4#dzv!eB>@JTj%JIP
zu{ab7U1kZ1e?$~7Ck(kwv7@a>pc8^?m8B$y?B+z2J}z9_UssVx6dDBoG|+K-^#0p#oEP&Nz6e?!qrxuwl8+a%d$@x<3NdA|d?
zthxF!C2nmrYxP_j31|W}kp#anpZ^QP&(&D?NEQdach+SRXI)D0NVbM5l_tbbO_tRS
zupubeU5WS8O|bj0fkj+iySmJK0Oqk%;wbXWG&TB$XtE&YcEt$ImXmhA6YO^B!5_x1~27Z6OVyLxV2g-i2l*pUBt
zCiu8fGYuHQr*a*O_Lx;aP_0LqOuv24ALs05_MG|Dz9O{J7(GzU*b;=gx`e{KJRAFbLIo@#JAhO#@d$fFKogWARhjy<2Cd1cs#l?DWjW1|frh1Nz2OVL%^uoPVz!$Te7p$ceZlw8K}
z(1v(uzhEP=_VS(H7*
zLmNXri~##8izj#ML3<
z0*qLrXlp~XH36c=5!Z)^>*KLWAA$|N12&X#3O9$Si+C}PxHUxF`jn-SJ_HYXe+wQC
zeiqY*P(klN1;rfYKk}dhiD+OrUlcby$#`F{I5Q(&l)?F^9mw>;MCG3~hUewuk}aEZZ3(E`ori
zjvC8-C}4J>fNYKP%VEb9-CsX6oMH|6z18-6T^To>TSEX~wE@5wLs}ame=PujVUL@k
ztRW<@+DI^V#bXUYg4KouYm|=6A=>6|iAGyP=wP+cVGLz^h_d}DxMK}LgVg~IhvJSk
zhzwR6894HMg65TB4^|ogG;VJzB8;fAtZIsGE5;}`!wxROh^{#{a5?zM
zhCFzjO6~v>U6E~B)(+hcDrl&KT+{|!sg5b@hx%YaLmT8GfQhDAhN8-bK8lGx$V5OB
z%{DC6mQBSP#l;xpeiu4#_0%ce7~FXkW@Q(U&GiYd$1p&O0`4Qr5#)c`KTQRRa%
zzMx?ba40-QCEx7|^#!)}O=S6H$9LH32)jD+f
zpbQ_?R64LlmK{?!HCY|SMIGd#HV{lPEW-qS+wY!Pk(R+QvA$deDobgW52-$!C6MYIV5-U#yW
zp7xyRmlU-*2zk2AE?*(RMb2b@hC@|H%0dzZI8FvTxoZ-TbVd7{?1qoHAlhY_m{pgM
z_cW_+?a3a>iIjiwBK98lbgZNHEQ;B2S+Lv$!O2rIf2D>ec96%O#N?1?i6N=V0?TMZ
zQo`b%f!gJc3AsT96_Tdp(T#(QCi~NA$_8#+S^f*fsn#}ma#OxqhcA_;>1OJynlz^!hkmt~MY!-s>X-`7jyaf9-pa%IXJCpTu}gf+b1Qc*~QPihQyD
zJ~RfvkR`Q?7Psf8VJbyBTPL+eN?iI$QmKZdH0k8toKsSbKaS{OlhUX4O_b>Y=Xy$r
z_f)<20SWt*uGdp159=ct@dUGa#3KZucDb57Ywoncau32S;)ez=VF6#yk6rRNXh5oK
ze~}R=$vO>-tiVP5fb&^XUC#1F|-S^hBuC%%iKf%i;~&lN%`B
zDQ11Su0Im3f|5YaK$3N*)-QgC#YwZgqrak{lB|j6ynt0h1P9|qXIrbwrr6JVbf7RGoNf_V*Xoq32mvVW(HrJ(i6>R8J+i_eE
z-l)O~3mlm>>UuaJ3or+{XB?Pc*^9g#$E{R3u^FY0mNS~e~`vof>$tIOx!hf?-ubxC2Bz=uRKBgKK0P_@>xj{~QfG?%6saN76bj}?NUKZ7on4~_*{qJJp260A9F`B{`&l8i&fAcA?UZdWr
zMmG-7Vuj&t5m=RF#FupZSp}66qT~_lD0#qjS@#VKe*s0YoIvo7DS!Eh7nE(G|KS4b
zC}a&{2%WFhidxK{p8^ks0o1#SBY4{s0iVgvr)#wuklL^aT0yWXkE7Zc4QjaHyW*rqk_qD{VC?PFGV^mgQ-7znUP9^cNGw
zoS=NAoL|Eu#>J%m9I}(ZM+?7L*epoO9d?55eY$-fKU_>?BqPN{irEqgp>tT>qw0a?
zVnXj}6nowTc^8u#MU@Ps7(_B81937~GO%RaV@0bKA_d9!LJ5Y;e;Y+ccC+xMCRwr-
zsFI>v2!5MF4NJD+Uw}0%aFF?}pjHZ8%j$pSM3E&KFCD2aE`rO+v^5l<>BcxWjUS3U
z|6VX0ZZ`0siQ*-4>o(S~8*b^dVtAeeMn|H(l*XPhlCq;2g7>|hwi;mK!gO&oLm|yF
zvZ9$3&}hZh=;Go6f64U*A1CO~Zs5Cj7n6u?>yFSE{xA5u;_AI<_r1}hd(8a++8{p?
zH8A;v{vz(T`5T80=sWO}e2@1TtlN)P(7&&}p8jW){jjTcTr@vv!U)8?N>^g(CUOa*
zlgp>4oL{N%pX8sVVo8TS!kh*#lNr=lNfu_#0^a!bLsrddf4U6d2D32=A6g(MAQ>Vl
zMHzvakHu(_#@mdhM;QuYmjt&gPRWY)izIv$jhh=bydsM0wP&Jo*MyaGQZaXw3RwY&
zi&I23eQnE&@FDMIV_x9AVPQa1!HW)@EkcG(m?A`UE7lD(+K=y_3*CQjYzNFa2+{ZB
zIfrvq#78U{e``e+uofvVwHD@wGS>fpw`?MJ3%4uO?(sWrm)zmhhX4ty1a)#51SBMq
zn07@bbY9$HWC8E}1oN4oGzR0y?+CUYe_~~VW}crxjMclC_}N!9qA6j1
zwCY+v{LY5Y)+wdfi&=)ti1vXio6;A+?j5jujVSP^Ts&Y6{#GSWrXe|$vGF3kHa!VPwL^kPjx1CSb79{H!
zR%A_T#c10WMl4a;blT_VA`R{xJkdZ0r
zj*7%P{6Ea^+!)Xj*+k{{)T*t5bly@%;vJYriaT9?C?bUAm`IT{U30Nys1{Zv%fXr?
zn?`wQ0HFX&vZG6y;tb^%32LVs7^>RftAa}K_Hmq$O;0v
z3UT=Fpb*|@_77G4D;3~^G^i#`ahdB0YT*t&<#aG7q&krK?Q&skN6Ur5pd~H~&b7p)
z!5w;n?&WId3g9E-^1-Sk!-bym;mg&De>*gVYorvDGjlFSNy#rEoe*MGb7)bf8304A7XuEeo&0qtFgJxKok&fCmpY8TKdia+q5v3DVHzE%}a4Xf2Kk>
zZe^H@l#9nTrsT%ernc*+Svi^Q;!-ZmT@0duh(H5GvokQ#THiIHD2Dro1`cHJ`RaZKEdOw?MCB`V}Ip9c$>_hk|B@wt&Xr%jM3Q<#(Qa
z6B1CysCw`WT%YVeQkzXo?)WM(~93&blQB;)_Rl$;NIrFLz{%Mk|>mhiIf-Ff>ZEQ(^%eLz9
z-}YZkWJ!u`f4I@x>wdYlPM3lB4Irs;kM)<1q#AaBbxBqXq<`Sbx>r<5cZ?6AjPk1=
z6zxOLlA_DgI@g<{*6Za`e_cCdsm>pE*VvX(xuK39dFZ9vRJ#`AwUun=J=BbkWFh~<
z_qqDQeUlziRp77@@5^XFT}D0b>OAUq1id&yANwfPB7e8_#>aWXlOESV2yS;Pq35}*
zh(7o|8ZASmIcWM(j-FYcYw~@k9?|lxr<8bp+}G`hgtXv=OVgR`e;>Cd9HY-Nko8R`
z&Y`#FJqb2+s8X??2IJYZguM-W&49N$nV{+6!`Xgz?0$M|g?~GPuVT~_BC1tHza9L9
zeaP}|5d!SojpI?SeXd@g6w6pMCsB@ScccnLOakK4bsRu+mc=Mr`%4DOFU5vJ_?SdvGX;YxtPGWyws54
z_!dy-ix>I!e_4!(2e=W@ErOFoCnBMH9L65ZKfJT8q5kt7eT3;z;vJLd196#@q_9oG
zV8V#Jo1E+3W*i2ajZ22;iwku0U!xBKwlXXNlEG0pcHH(u{Uf7+!
zWpUD`-Med9UO(GyV8$Ndn|TP_58x^XgZjB39(FCKLa1Ncw?5(p{74Y2J{!T>kCU!V
z(Yy(q395;_)`UoQu&Qr8aW5*v$LG_v*6odJ{1ZPH=g9YKtf(msPk!D%$tTSNRIsxw
za6=0kf5h9`F0vo>>>{5}6M=RaamIOnvvr)Z+)7L2#T-G8NaBA(ACD$gu}7C=6R;AP
zuk(+)`+l^H%bQQE>BQ3u{|a(VlhF&d)6
zExHza6}Sz9-5RU<1kD6|J1Vp=Lr3TfBq&D8e>n5XC7a3+Z}-7M$6*#m!z3V#)(&wj
z{na=05scucBe)HoxJl{JDLP_t^8T*8NZ=y$qvC=*yl3$pjk2SkTif6Vnn}g{Gup{d
z715tZ*vhojOY8K&+i$PGo$FOu-)U1z79CB_ZShV@;)Weiu-U?LLc%V
zEcfTf1UAugSAJmb`y`DQWZ?%sdn}7!(>(*MYB~6SOegPm^zP^O+^zb(^E(=mfcZ50
z_v@K$5kHGp4%lEy6H3_ofMo1_1lCj(BwQM9UkRKOk6c}di#Q4%`SU^se&Er2cod9eWO!cyzH@ea$&tf73ALCuL41m&d&P1m$e{gwqr?vBa~JE$
zrPvj;Ce(Gjealyz-_i7zx^d)nhH!UBsN%;@ddOYaj1%2T8!mj#u-+_DFj9T7
z-vq5wy398qVP|&bv-M`dH)dBWnyyF`-%qPUkkdsFFQy^M7)_`2fv-feX-3m~noi%m
zm|x$#kV0>Q7-ZD5i^=;%K%%<|N@1j;7@nGGilUfHltMH)Z;QOx-9UC*VQ%E|q;EHD
z^kbFqRP`d4d4`_QpMKrMf9x#u(>P;<<)3CqIjwF)?~}4rmJ~QN-ZVOw>%Q*^tLG`}
zFf{HA&@~R5_66zkX(F#Y>59N~K?Bdts>d=Wmv5j5?53E|_62piNb=;FdE{ODnM+bn
zBz^@q52IKQO|_}tl6xwWhLXdAMCB#41htue8A
z)HY>`Q0%0Zbk@v)ID)uqBemfuKS*CBe88>&94|nBfc;Gf86SxiY2?+bT{NmS!cy%d
zjGRi*n~qC5D5O-!
z6;I+m(6NB5Nbx3H!HVE^vj~0G344oWwp~!R4<}qM^@Y*4%Hhuu54nVJ9G=)SJNuZy^p)PKO#2G22Ni0o_qoS{eTr2^E_UfsF)cN9aw_!4}XXV(8>Rj1qeE$wI
zg%-8pQxr{zYE1U!_fx`jsKgn7iRoaguSf6TY&sLDh?nxXn;+sDd}Ace0$
z^!nog7YL|yK%%&b*THzVl}9{Sn6Ltzx~_)`S0k3
z=d)+shoExx?l}OkuS-?C!}`^j8HJ+^$6>ZQIiySf_kaJ-e@43l6MfzK-K2KoGTZ#n7*b#QrS`*A+AW6{$qZ84%OoNy{}T}oKPo?N2%f2uojAXdMa4{
z(?6kIxDzsfMP%AHhYe&~9Wtrq>BvP;u@*xFf3c2^qgd1)xhH-AR(2vBr0RQ2Dn{WY
zO)1(!ATHgK)GHUg`SI?A@SQ9cBX{iw5Sc5EcAuh*(ph!H5e@%q@-14aPP^#erP{ky8iLV&iZF?bo-5qM*94v~xV16)tg
zJ)b$(i^)IG&}YX-o5UlG&UQ79PSHQ`BOg*83XwRC-KoM_TtM*A^J)It6L=2_yprYF
N{{t=SD#I=KA^_ZbfVThu

diff --git a/examples/server/webui/index.html b/examples/server/webui/index.html
index 86a79b77f..a76d831a9 100644
--- a/examples/server/webui/index.html
+++ b/examples/server/webui/index.html
@@ -37,7 +37,7 @@
           
+ }" @click="setViewingConv(conv.id)" dir="auto"> {{ conv.messages[0].content }}
@@ -156,6 +156,7 @@ @keydown.enter.shift.exact.prevent="inputMsg += '\n'" :disabled="isGenerating" id="msg-input" + dir="auto" > @@ -244,7 +245,7 @@
+ }" dir="auto">
+ +
+ Reasoning models +
+
+ + Expand though process by default for generating message +
+
+ + Exclude thought process when sending request to API (Recommended for DeepSeek-R1) +
+
+
Advanced config @@ -261,7 +276,17 @@
- +
+ + + + Thinking + + Thought Process + + +
+
`; } + +/** + * filter out redundant fields upon sending to API + * @param {Array} messages + * @returns {Array} + */ +function normalizeMsgsForAPI(messages) { + return messages.map((msg) => { + return { + role: msg.role, + content: msg.content, + }; + }); +} + +/** + * recommended for DeepsSeek-R1, filter out content between and tags + * @param {Array} messages + * @returns {Array} + */ +function filterThoughtFromMsgs(messages) { + return messages.map((msg) => { + return { + role: msg.role, + content: msg.role === 'assistant' + ? msg.content.split('').at(-1).trim() + : msg.content, + }; + }); +} From 01f37edf1a6fae76fd9e2e02109aae6995a914f0 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Fri, 24 Jan 2025 09:39:24 +0000 Subject: [PATCH 143/279] Update llama-run README.md (#11386) For consistency Signed-off-by: Eric Curtin --- examples/run/README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/run/README.md b/examples/run/README.md index a06805441..89a552079 100644 --- a/examples/run/README.md +++ b/examples/run/README.md @@ -3,11 +3,10 @@ The purpose of this example is to demonstrate a minimal usage of llama.cpp for running models. ```bash -llama-run granite-code +llama-run granite3-moe ``` ```bash -llama-run -h Description: Runs a llm @@ -17,7 +16,7 @@ Usage: Options: -c, --context-size Context size (default: 2048) - -n, --ngl + -n, -ngl, --ngl Number of GPU layers (default: 0) --temp Temperature (default: 0.8) From 1af6945eb0d0e97525dc0ec18167abf05c28f482 Mon Sep 17 00:00:00 2001 From: "Bernhard M. Wiedemann" Date: Fri, 24 Jan 2025 12:21:35 +0100 Subject: [PATCH 144/279] cmake : avoid -march=native when reproducible build is wanted (#11366) See https://reproducible-builds.org/ for why this is good and https://reproducible-builds.org/specs/source-date-epoch/ for the definition of this variable. Without this patch, compiling on different machines produced different binaries, which made verification of results difficult. Fixes: #11317 This patch was done while working on reproducible builds for openSUSE. --- ggml/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 185079aa4..ff68ddc21 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -58,7 +58,8 @@ else() set(GGML_BLAS_VENDOR_DEFAULT "Generic") endif() -if (CMAKE_CROSSCOMPILING) +if (CMAKE_CROSSCOMPILING OR DEFINED ENV{SOURCE_DATE_EPOCH}) + message(STATUS "Setting GGML_NATIVE_DEFAULT to OFF") set(GGML_NATIVE_DEFAULT OFF) else() set(GGML_NATIVE_DEFAULT ON) From 8137b4bb2b5fd4cb4a752bfe69ccfd915a313d58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 24 Jan 2025 12:38:31 +0100 Subject: [PATCH 145/279] CPU/CUDA: fix (GQA) mul mat back, add CUDA support (#11380) --- ggml/src/ggml-cpu/ggml-cpu.c | 4 +- ggml/src/ggml-cpu/ggml-cpu.cpp | 3 +- ggml/src/ggml-cuda/binbcast.cu | 54 ++++++++------- ggml/src/ggml-cuda/ggml-cuda.cu | 2 +- ggml/src/ggml-cuda/out-prod.cu | 7 +- ggml/src/ggml.c | 32 +++++---- tests/test-backend-ops.cpp | 115 +++++++++++++++++++++++++++----- 7 files changed, 156 insertions(+), 61 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 0ed92b3ff..9e627da8c 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -7883,7 +7883,7 @@ static void ggml_compute_forward_out_prod_f32( float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); - float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); + float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); } @@ -7892,7 +7892,7 @@ static void ggml_compute_forward_out_prod_f32( float * s0 = (float *) ((char *) src0->data + ( i01*nb01 + i02*nb02 + i03*nb03)); float * s1 = (float *) ((char *) src1->data + (i1*nb10 + i11*nb11 + i12*nb12 + i13*nb13)); - float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); + float * d = (float *) ((char *) dst->data + ( i1*nb1 + i2*nb2 + i3*nb3)); ggml_vec_mad_f32(ne0, d, s0, *s1); } diff --git a/ggml/src/ggml-cpu/ggml-cpu.cpp b/ggml/src/ggml-cpu/ggml-cpu.cpp index 35a1c876c..2ccb4b472 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.cpp +++ b/ggml/src/ggml-cpu/ggml-cpu.cpp @@ -416,7 +416,8 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st case GGML_OP_IM2COL_BACK: return src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32; case GGML_OP_OUT_PROD: - return (src0->type == GGML_TYPE_F32 || ggml_is_quantized(src0->type)) && src1->type == GGML_TYPE_F32; + return (src0->type == GGML_TYPE_F32 || (ggml_is_quantized(src0->type) && src0->ne[2] == src1->ne[2] && src0->ne[3] == src1->ne[3])) && + src1->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; default: return true; } diff --git a/ggml/src/ggml-cuda/binbcast.cu b/ggml/src/ggml-cuda/binbcast.cu index c7b6be4e2..ce4b9cfb5 100644 --- a/ggml/src/ggml-cuda/binbcast.cu +++ b/ggml/src/ggml-cuda/binbcast.cu @@ -93,26 +93,31 @@ static __global__ void k_bin_bcast_unravel(const src0_t * src0, const src1_t * s template static __global__ void k_repeat_back( - const T * __restrict__ src, T * __restrict__ dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, - const int64_t ne0, const int64_t ne1, const int64_t ne2) { + const T * __restrict__ src, T * __restrict__ dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const size_t s00, const size_t s01, const size_t s02, const size_t s03, + const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3) { - const int64_t tid0 = (int64_t) blockIdx.x*blockDim.x + threadIdx.x; - const int64_t tid1 = (int64_t) blockIdx.y*blockDim.y + threadIdx.y; - const int64_t tid2 = (int64_t) blockIdx.z*blockDim.z + threadIdx.z; + const int64_t tid0 = int64_t(blockIdx.x)*blockDim.x + threadIdx.x; + const int64_t tid1 = int64_t(blockIdx.y)*blockDim.y + threadIdx.y; + const int64_t tid23 = int64_t(blockIdx.z)*blockDim.z + threadIdx.z; + const int64_t tid2 = tid23 % ne2; + const int64_t tid3 = tid23 / ne2; if (tid0 >= ne0) { return; } T sum = 0; - for (int64_t i2 = tid2; i2 < ne02; i2 += ne2) { - for (int64_t i1 = tid1; i1 < ne01; i1 += ne1) { - for (int64_t i0 = tid0; i0 < ne00; i0 += ne0) { - sum += src[i2*ne01*ne00 + i1*ne00 + i0]; + for (int64_t i3 = tid3; i3 < ne03; i3 += ne3) { + for (int64_t i2 = tid2; i2 < ne02; i2 += ne2) { + for (int64_t i1 = tid1; i1 < ne01; i1 += ne1) { + for (int64_t i0 = tid0; i0 < ne00; i0 += ne0) { + sum += src[i3*s03 + i2*s02 + i1*s01 + i0*s00]; + } } } } - dst[tid2*ne1*ne0 + tid1*ne0 + tid0] = sum; + dst[tid3*ne2*ne1*ne0 + tid2*ne1*ne0 + tid1*ne0 + tid0] = sum; } template @@ -274,12 +279,14 @@ struct bin_bcast_cuda { template static void repeat_back_cuda( - const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, - const int64_t ne0, const int64_t ne1, const int64_t ne2, cudaStream_t stream) { + const T * src, T * dst, const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03, + const size_t s00, const size_t s01, const size_t s02, const size_t s03, + const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) { const dim3 block_dims(WARP_SIZE, 1, 1); - const dim3 block_nums((ne0 + WARP_SIZE - 1) / WARP_SIZE, ne1, ne2); - k_repeat_back<<>>(src, dst, ne00, ne01, ne02, ne0, ne1, ne2); + const dim3 block_nums((ne0 + WARP_SIZE - 1) / WARP_SIZE, ne1, ne2*ne3); + k_repeat_back<<>> + (src, dst, ne00, ne01, ne02, ne03, s00, s01, s02, s03, ne0, ne1, ne2, ne3); } template @@ -326,27 +333,26 @@ void ggml_cuda_op_repeat_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst const ggml_tensor * src0 = dst->src[0]; GGML_ASSERT(src0->type == dst->type); - GGML_ASSERT(ggml_is_contiguous(src0)); GGML_ASSERT(ggml_is_contiguous(dst)); GGML_ASSERT(ggml_can_repeat(dst, src0)); cudaStream_t stream = ctx.stream(); - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - const int64_t ne02 = src0->ne[2]; - GGML_ASSERT(src0->ne[3] == 1); + GGML_TENSOR_UNARY_OP_LOCALS; - const int64_t ne0 = dst->ne[0]; - const int64_t ne1 = dst->ne[1]; - const int64_t ne2 = dst->ne[2]; - GGML_ASSERT(dst->ne[3] == 1); + GGML_ASSERT(ne2*ne3 <= (1 << 15)); + + const size_t ts = ggml_type_size(src0->type); + const size_t s00 = nb00 / ts; + const size_t s01 = nb01 / ts; + const size_t s02 = nb02 / ts; + const size_t s03 = nb03 / ts; switch (dst->type) { case GGML_TYPE_F32: { const float * src0_d = (const float *) src0->data; float * dst_d = (float *) dst->data; - repeat_back_cuda(src0_d, dst_d, ne00, ne01, ne02, ne0, ne1, ne2, stream); + repeat_back_cuda(src0_d, dst_d, ne00, ne01, ne02, ne03, s00, s01, s02, s03, ne0, ne1, ne2, ne3, stream); } break; default: { GGML_ASSERT(false); diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 7fd1fc853..e602419bc 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -3002,7 +3002,7 @@ static bool ggml_backend_cuda_device_supports_op(ggml_backend_dev_t dev, const g return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16; } break; case GGML_OP_REPEAT_BACK: - return op->type == GGML_TYPE_F32 && op->src[0]->ne[3] == 1; + return op->type == GGML_TYPE_F32 && (op->src[0]->ne[2]*op->src[0]->ne[3]) <= (1 << 15); case GGML_OP_CONCAT: { ggml_type src0_type = op->src[0]->type; diff --git a/ggml/src/ggml-cuda/out-prod.cu b/ggml/src/ggml-cuda/out-prod.cu index 73e3e2c47..c9b2b699c 100644 --- a/ggml/src/ggml-cuda/out-prod.cu +++ b/ggml/src/ggml-cuda/out-prod.cu @@ -34,6 +34,9 @@ void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { CUBLAS_CHECK(cublasSetStream(handle, stream)); + const int64_t lda = nb01 / sizeof(float); + const int64_t ldc = nb1 / sizeof(float); + const bool src1_T = ggml_is_transposed(src1); const cublasOperation_t src1_cublas_op = src1_T ? CUBLAS_OP_N : CUBLAS_OP_T; const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float); @@ -57,9 +60,9 @@ void ggml_cuda_out_prod(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { CUBLAS_CHECK( cublasSgemm(handle, CUBLAS_OP_N, src1_cublas_op, ne0, ne1, ne01, - &alpha, src0_d + (i3/dps3)*s03 + (i2/dps2)*s02, ne00, + &alpha, src0_d + (i3/dps3)*s03 + (i2/dps2)*s02, lda, src1_d + i3 *s13 + i2 *s12, ldb, - &beta, dst_d + i3 *s3 + i2 *s2, ne0)); + &beta, dst_d + i3 *s3 + i2 *s2, ldc)); } } } diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index b1d0d4913..92c4294c5 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -5339,7 +5339,7 @@ static void ggml_compute_backward( } break; case GGML_OP_MUL: { if (src0_needs_grads) { - ggml_add_or_set(ctx, cgraph, isrc0, ggml_mul(ctx, src1, grad)); + ggml_add_or_set(ctx, cgraph, isrc0, ggml_mul(ctx, grad, src1)); } if (src1_needs_grads) { struct ggml_tensor * tmp = ggml_mul(ctx, src0, grad); @@ -5431,21 +5431,25 @@ static void ggml_compute_backward( // src1.shape [n,p,qq,rr] if (src0_needs_grads) { - struct ggml_tensor * s1_tg = + GGML_ASSERT(grad->ne[2] == src1->ne[2]); + GGML_ASSERT(grad->ne[3] == src1->ne[3]); + struct ggml_tensor * tmp = ggml_out_prod(ctx, // [n,m,qq,rr] src1, // [n,p,qq,rr] grad); // [m,p,qq,rr] - const int64_t qq = s1_tg->ne[2]; - const int64_t rr = s1_tg->ne[3]; - const int64_t q1 = src0->ne[2]; - const int64_t r1 = src0->ne[3]; - const bool ne2_broadcasted = qq > q1; - const bool ne3_broadcasted = rr > r1; - if (ne2_broadcasted || ne3_broadcasted) { - // sum broadcast repetitions of s1_tg into shape of src0 - s1_tg = ggml_repeat_back(ctx, s1_tg, src0); + if (!ggml_are_same_shape(tmp, src0)) { + GGML_ASSERT(tmp->ne[0] == src0->ne[0]); + GGML_ASSERT(tmp->ne[1] == src0->ne[1]); + GGML_ASSERT(tmp->ne[3] == 1); + + const int64_t nr2 = tmp->ne[2] / src0->ne[2]; + const size_t nb2 = tmp->nb[2] * nr2; + const size_t nb3 = tmp->nb[2]; + + tmp = ggml_view_4d(ctx, tmp, src0->ne[0], src0->ne[1], src0->ne[2], nr2, tmp->nb[1], nb2, nb3, 0); + tmp = ggml_repeat_back(ctx, tmp, src0); } - ggml_add_or_set(ctx, cgraph, isrc0, s1_tg /*= [n,m,q1,r1]*/); + ggml_add_or_set(ctx, cgraph, isrc0, tmp); } if (src1_needs_grads) { ggml_add_or_set(ctx, cgraph, isrc1, @@ -5514,7 +5518,9 @@ static void ggml_compute_backward( if (src0_needs_grads) { GGML_ASSERT(!cgraph->grads[isrc0] || ggml_is_contiguous(cgraph->grads[isrc0])); GGML_ASSERT(ggml_is_contiguous(grad)); - ggml_add_or_set(ctx, cgraph, isrc0, grad); + GGML_ASSERT(ggml_nelements(tensor) == ggml_nelements(src0)); + ggml_add_or_set(ctx, cgraph, isrc0, + ggml_are_same_shape(tensor, src0) ? grad : ggml_reshape(ctx, grad, src0)); } } break; case GGML_OP_RESHAPE: { diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 381956a04..468016403 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1302,6 +1302,59 @@ struct test_repeat : public test_case { } }; +// GGML_OP_REPEAT_BACK +struct test_repeat_back : public test_case { + const ggml_type type; + const std::array ne; + const std::array nr; + const bool v; // whether src is a noncontiguous view + + std::string vars() override { + return VARS_TO_STR4(type, ne, nr, v); + } + + size_t op_size(ggml_tensor * t) override { + return ggml_nbytes(t) * 2; + } + + test_repeat_back(ggml_type type = GGML_TYPE_F32, + std::array ne = {8, 6, 4, 2}, + std::array nr = {2, 2, 2, 2}, + bool v = false) + : type(type), ne(ne), nr(nr), v(v) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * src = ggml_new_tensor_4d(ctx, type, ne[0]*nr[0], ne[1]*nr[1], ne[2]*nr[2], ne[3]*nr[3]); + ggml_set_name(src, "src"); + + if (v) { + GGML_ASSERT(ne[0] % 2 == 0); + GGML_ASSERT(ne[1] % 2 == 0); + GGML_ASSERT(ne[2] % 2 == 0); + GGML_ASSERT(ne[3] % 2 == 0); + GGML_ASSERT(nr[0] % 2 == 0 || nr[0] == 1); + GGML_ASSERT(nr[1] % 2 == 0 || nr[1] == 1); + GGML_ASSERT(nr[2] % 2 == 0 || nr[2] == 1); + GGML_ASSERT(nr[3] % 2 == 0 || nr[3] == 1); + + const int64_t ne00 = nr[0] == 1 ? src->ne[0] : src->ne[0] / 2; + const int64_t ne01 = nr[1] == 1 ? src->ne[1] : src->ne[1] / 2; + const int64_t ne02 = nr[2] == 1 ? src->ne[2] : src->ne[2] / 2; + const int64_t ne03 = nr[3] == 1 ? src->ne[3] : src->ne[3] / 2; + + src = ggml_view_4d(ctx, src, ne00, ne01, ne02, ne03, src->nb[1], src->nb[2], src->nb[3], 0); + } + + ggml_tensor * target = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_set_name(target, "target"); + + ggml_tensor * out = ggml_repeat_back(ctx, src, target); + ggml_set_name(out, "out"); + + return out; + } +}; + // GGML_OP_DUP struct test_dup : public test_case { const ggml_type type; @@ -1849,6 +1902,10 @@ struct test_mul_mat : public test_case { return 5e-4; } + int64_t grad_nmax() override { + return 20000; + } + uint64_t op_flops(ggml_tensor * t) override { GGML_UNUSED(t); return 2 * m * n * k * bs[0] * nr[0] * bs[1] * nr[1]; @@ -1878,8 +1935,12 @@ struct test_mul_mat : public test_case { a = ggml_new_tensor_4d(ctx, type_a, ne_a[per[0]], ne_a[per[1]], ne_a[per[2]], ne_a[per[3]]); b = ggml_new_tensor_4d(ctx, type_b, ne_b[per[0]], ne_b[per[1]], ne_b[per[2]], ne_b[per[3]]); - ggml_set_param(ctx, a); - ggml_set_param(ctx, b); + if (!ggml_is_quantized(type_a)) { + if (bs[1] == 1 && nr[1] == 1) { + ggml_set_param(ctx, a); + } + ggml_set_param(ctx, b); + } ggml_set_name(a, "a"); ggml_set_name(b, "b"); @@ -1890,8 +1951,12 @@ struct test_mul_mat : public test_case { } else { a = ggml_new_tensor_4d(ctx, type_a, k, m, bs[0], bs[1]); b = ggml_new_tensor_4d(ctx, type_b, k, n, bs[0]*nr[0], bs[1]*nr[1]); - ggml_set_param(ctx, a); - ggml_set_param(ctx, b); + if (!ggml_is_quantized(type_a)) { + if (bs[1] == 1 && nr[1] == 1) { + ggml_set_param(ctx, a); + } + ggml_set_param(ctx, b); + } ggml_set_name(a, "a"); ggml_set_name(b, "b"); } @@ -3798,6 +3863,16 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 5, 4, ne3}, {1, 1, 1, 2})); } + for (bool view : {false, true}) { + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 1}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {2, 1, 1, 1}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 2, 1, 1}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 2, 1}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_F32, {8, 6, 4, 2}, {1, 1, 1, 2}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_I32, {8, 6, 4, 2}, {2, 1, 1, 1}, view)); + test_cases.emplace_back(new test_repeat_back(GGML_TYPE_I16, {8, 6, 4, 2}, {1, 1, 1, 2}, view)); + } + test_cases.emplace_back(new test_dup(GGML_TYPE_F32)); test_cases.emplace_back(new test_dup(GGML_TYPE_F16)); test_cases.emplace_back(new test_dup(GGML_TYPE_I32)); @@ -3919,21 +3994,25 @@ static std::vector> make_test_cases_eval() { for (ggml_type type_a : base_types) { for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) { // test cases without permutation - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {10, 10}, {2, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {1, 1}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {3, 2}, {2, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, { 1, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 1}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 1})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {1, 2})); - test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {1, 1}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 1}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 1})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {1, 2})); + test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {3, 2}, {2, 2})); // test cases with permutation test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, {2, 3}, {1, 1}, {0, 2, 1, 3})); From a07c2c8a52464646ce13040e62c1ea04459f721e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jafar=20Uru=C3=A7?= Date: Fri, 24 Jan 2025 13:30:13 +0000 Subject: [PATCH 146/279] docs : Update readme to build targets for local docker build (#11368) --- docs/backend/SYCL.md | 2 +- docs/build.md | 2 +- docs/docker.md | 12 ++++++------ 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md index 8d8312e91..89ddbd669 100644 --- a/docs/backend/SYCL.md +++ b/docs/backend/SYCL.md @@ -133,7 +133,7 @@ The docker build option is currently limited to *intel GPU* targets. ### Build image ```sh # Using FP16 -docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" -f .devops/llama-cli-intel.Dockerfile . +docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" --target light -f .devops/intel.Dockerfile . ``` *Notes*: diff --git a/docs/build.md b/docs/build.md index 3b0d2211d..dd6495028 100644 --- a/docs/build.md +++ b/docs/build.md @@ -286,7 +286,7 @@ You don't need to install Vulkan SDK. It will be installed inside the container. ```sh # Build the image -docker build -t llama-cpp-vulkan -f .devops/llama-cli-vulkan.Dockerfile . +docker build -t llama-cpp-vulkan --target light -f .devops/vulkan.Dockerfile . # Then, use it: docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 diff --git a/docs/docker.md b/docs/docker.md index 8d90e6ded..dac9a9ec1 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -60,9 +60,9 @@ Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia ## Building Docker locally ```bash -docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile . -docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile . -docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile . +docker build -t local/llama.cpp:full-cuda --target full -f .devops/cuda.Dockerfile . +docker build -t local/llama.cpp:light-cuda --target light -f .devops/cuda.Dockerfile . +docker build -t local/llama.cpp:server-cuda --target server -f .devops/cuda.Dockerfile . ``` You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture. @@ -95,9 +95,9 @@ Assuming one has the [mt-container-toolkit](https://developer.mthreads.com/musa/ ## Building Docker locally ```bash -docker build -t local/llama.cpp:full-musa -f .devops/full-musa.Dockerfile . -docker build -t local/llama.cpp:light-musa -f .devops/llama-cli-musa.Dockerfile . -docker build -t local/llama.cpp:server-musa -f .devops/llama-server-musa.Dockerfile . +docker build -t local/llama.cpp:full-musa --target full -f .devops/musa.Dockerfile . +docker build -t local/llama.cpp:light-musa --target light -f .devops/musa.Dockerfile . +docker build -t local/llama.cpp:server-musa --target server -f .devops/musa.Dockerfile . ``` You may want to pass in some different `ARGS`, depending on the MUSA environment supported by your container host, as well as the GPU architecture. From 9755129c27da76d768bd7e47e206bac61b40cf18 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 24 Jan 2025 18:41:30 +0200 Subject: [PATCH 147/279] release : pack /lib in the packages (#11392) * release : pack /lib and /include in the packages * cmake : put libs in /bin * TMP : push artifacts * Revert "TMP : push artifacts" This reverts commit 4decf2c4dfc5cdf5d96ea44c03c8f9801ab41262. * ci : fix HIP cmake compiler options to be on first line * ci : restore the original HIP commands * ci : change ubuntu build from latest to 20.04 * ci : try to fix macos build rpaths * ci : remove obsolete MacOS build * TMP : push artifacts * ci : change back to ubuntu latest * ci : macos set build rpath to "@loader_path" * ci : fix typo * ci : change ubuntu package to 22.04 * Revert "TMP : push artifacts" This reverts commit 537b09e70ffc604c414ee78acf3acb4c940ec597. --- .github/workflows/build.yml | 101 +++++++++++++++++++----------------- CMakeLists.txt | 1 + 2 files changed, 53 insertions(+), 49 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fe3b2cdfa..fda726955 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -56,6 +56,7 @@ jobs: mkdir build cd build cmake .. \ + -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_METAL_USE_BF16=ON \ @@ -120,6 +121,7 @@ jobs: # Metal is disabled due to intermittent failures with Github runners not having a GPU: # https://github.com/ggerganov/llama.cpp/actions/runs/8635935781/job/23674807267#step:5:2313 cmake -B build \ + -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_METAL=OFF \ @@ -160,8 +162,8 @@ jobs: path: llama-${{ steps.tag.outputs.name }}-bin-macos-x64.zip name: llama-bin-macos-x64.zip - ubuntu-latest-cmake: - runs-on: ubuntu-latest + ubuntu-cpu-cmake: + runs-on: ubuntu-22.04 steps: - name: Clone @@ -181,7 +183,10 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_CURL=ON -DGGML_RPC=ON + cmake .. \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DLLAMA_CURL=ON \ + -DGGML_RPC=ON cmake --build . --config Release -j $(nproc) - name: Test @@ -256,7 +261,10 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + cmake .. \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \ + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} cmake --build . --config ${{ matrix.build_type }} -j $(nproc) - name: Build (no OpenMP) @@ -265,7 +273,11 @@ jobs: run: | mkdir build cd build - cmake .. -DLLAMA_FATAL_WARNINGS=ON -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} -DGGML_OPENMP=OFF + cmake .. \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \ + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ + -DGGML_OPENMP=OFF cmake --build . --config ${{ matrix.build_type }} -j $(nproc) - name: Test @@ -295,7 +307,8 @@ jobs: run: | mkdir build cd build - cmake -DGGML_RPC=ON .. + cmake .. \ + -DGGML_RPC=ON cmake --build . --config Release -j $(nproc) - name: Test @@ -325,7 +338,8 @@ jobs: run: | mkdir build cd build - cmake -DGGML_VULKAN=ON .. + cmake .. \ + -DGGML_VULKAN=ON cmake --build . --config Release -j $(nproc) - name: Test @@ -352,13 +366,18 @@ jobs: - name: Build with native CMake HIP support id: cmake_build run: | - cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DGGML_HIP=ON + cmake -B build -S . \ + -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" \ + -DGGML_HIP=ON cmake --build build --config Release -j $(nproc) - name: Build with legacy HIP support id: cmake_build_legacy_hip run: | - cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DGGML_HIP=ON + cmake -B build2 -S . \ + -DCMAKE_C_COMPILER=hipcc \ + -DCMAKE_CXX_COMPILER=hipcc \ + -DGGML_HIP=ON cmake --build build2 --config Release -j $(nproc) ubuntu-22-cmake-musa: @@ -379,7 +398,8 @@ jobs: - name: Build with native CMake MUSA support id: cmake_build run: | - cmake -B build -S . -DGGML_MUSA=ON + cmake -B build -S . \ + -DGGML_MUSA=ON cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl: @@ -420,7 +440,10 @@ jobs: source /opt/intel/oneapi/setvars.sh mkdir build cd build - cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx .. + cmake .. \ + -DGGML_SYCL=ON \ + -DCMAKE_C_COMPILER=icx \ + -DCMAKE_CXX_COMPILER=icpx cmake --build . --config Release -j $(nproc) ubuntu-22-cmake-sycl-fp16: @@ -461,42 +484,13 @@ jobs: source /opt/intel/oneapi/setvars.sh mkdir build cd build - cmake -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON .. + cmake .. \ + -DGGML_SYCL=ON \ + -DCMAKE_C_COMPILER=icx \ + -DCMAKE_CXX_COMPILER=icpx \ + -DGGML_SYCL_F16=ON cmake --build . --config Release -j $(nproc) - # TODO: build with GGML_METAL=OFF because test-backend-ops fail on "Apple Paravirtual device" and I don't know - # how to debug it. - # ref: https://github.com/ggerganov/llama.cpp/actions/runs/7132125951/job/19422043567?pr=4359#step:5:6584 - # would be great if we fix these - macOS-latest-cmake: - runs-on: macos-latest - - steps: - - name: Clone - id: checkout - uses: actions/checkout@v4 - - - name: Dependencies - id: depends - continue-on-error: true - run: | - brew update - - - name: Build - id: cmake_build - run: | - sysctl -a - mkdir build - cd build - cmake -DLLAMA_FATAL_WARNINGS=ON -DGGML_METAL=OFF .. - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - - - name: Test - id: cmake_test - run: | - cd build - ctest -L main --verbose --timeout 900 - macOS-latest-cmake-ios: runs-on: macos-latest @@ -827,7 +821,13 @@ jobs: - name: Build with CMake run: | - cmake -S . -B build -G Ninja -DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES=89-real -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined -DLLAMA_FATAL_WARNINGS=ON + cmake -S . -B build -G Ninja \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CUDA_ARCHITECTURES=89-real \ + -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CUDA=ON cmake --build build windows-2019-cmake-cuda: @@ -916,7 +916,11 @@ jobs: shell: cmd run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" - cmake -S . -B build -G "Ninja Multi-Config" -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_CUDA=ON -DGGML_RPC=ON + cmake -S . -B build -G "Ninja Multi-Config" \ + -DLLAMA_BUILD_SERVER=ON \ + -DGGML_NATIVE=OFF \ + -DGGML_CUDA=ON \ + -DGGML_RPC=ON set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 cmake --build build --config Release -j %NINJA_JOBS% -t ggml cmake --build build --config Release @@ -1201,8 +1205,7 @@ jobs: runs-on: ubuntu-latest needs: - - ubuntu-latest-cmake - - macOS-latest-cmake + - ubuntu-cpu-cmake - windows-latest-cmake - windows-2019-cmake-cuda - windows-latest-cmake-hip-release diff --git a/CMakeLists.txt b/CMakeLists.txt index 42caed486..7e41a44d2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,6 +16,7 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) set(LLAMA_STANDALONE ON) From 9fbadaef4f0903c64895ba9c70f02ac6e6a4b41c Mon Sep 17 00:00:00 2001 From: uvos Date: Fri, 24 Jan 2025 17:50:49 +0100 Subject: [PATCH 148/279] rocBLAS: Avoid fp32->fp16->fp32 conversion on cdna (#11356) --- ggml/src/ggml-cuda/ggml-cuda.cu | 62 ++++++++++++++++++++------------- ggml/src/ggml-cuda/mmvq.cu | 3 +- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index e602419bc..fb3d9e2d9 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1082,7 +1082,9 @@ static void ggml_cuda_op_mul_mat_cublas( const int compute_capability = ggml_cuda_info().devices[id].cc; - if (compute_capability >= GGML_CUDA_CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) { + const bool use_fp16 = (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT; + + if (compute_capability >= GGML_CUDA_CC_VOLTA && use_fp16) { // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 ggml_cuda_pool_alloc src0_as_f16(ctx.pool(id)); if (src0->type != GGML_TYPE_F16) { @@ -1103,28 +1105,38 @@ static void ggml_cuda_op_mul_mat_cublas( to_fp16_cuda(src1_ddf_i, src1_as_f16.get(), ne, stream); } const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16.get(); - ggml_cuda_pool_alloc dst_f16(ctx.pool(id), row_diff*src1_ncols); - - const half alpha_f16 = 1.0f; - const half beta_f16 = 0.0f; - - cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; - if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { - cu_compute_type = CUBLAS_COMPUTE_32F; - } CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); - CUBLAS_CHECK( - cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, - row_diff, src1_ncols, ne10, - &alpha_f16, src0_ptr, CUDA_R_16F, ne00, - src1_ptr, CUDA_R_16F, ne10, - &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, - cu_compute_type, - CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); - to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); + if (compute_capability == GGML_CUDA_CC_CDNA) { + const float alpha = 1.0f; + const float beta = 0.0f; + CUBLAS_CHECK( + cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, + row_diff, src1_ncols, ne10, + &alpha, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta, dst_dd_i, CUDA_R_32F, ldc, + CUBLAS_COMPUTE_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP)); + } else { + ggml_cuda_pool_alloc dst_f16(ctx.pool(id), row_diff*src1_ncols); + + const half alpha_f16 = 1.0f; + const half beta_f16 = 0.0f; + + CUBLAS_CHECK( + cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, + row_diff, src1_ncols, ne10, + &alpha_f16, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta_f16, dst_dd_i, CUDA_R_16F, ldc, + CUBLAS_COMPUTE_16F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP)); + + const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); + to_fp32_cuda(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream); + } } else { ggml_cuda_pool_alloc src0_ddq_as_f32(ctx.pool(id)); ggml_cuda_pool_alloc src1_ddq_as_f32(ctx.pool(id)); @@ -1613,10 +1625,6 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co cublasComputeType_t cu_compute_type = CUBLAS_COMPUTE_16F; cudaDataType_t cu_data_type = CUDA_R_16F; - if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { - cu_compute_type = CUBLAS_COMPUTE_32F; - } - // dst strides size_t nbd2 = dst->nb[2]; size_t nbd3 = dst->nb[3]; @@ -1645,6 +1653,12 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co beta = &beta_f32; } + if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { + cu_compute_type = CUBLAS_COMPUTE_32F; + alpha = &alpha_f32; + beta = &beta_f32; + } + GGML_ASSERT(ne12 % ne02 == 0); GGML_ASSERT(ne13 % ne03 == 0); diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu index e3b912d87..4fb466ca0 100644 --- a/ggml/src/ggml-cuda/mmvq.cu +++ b/ggml/src/ggml-cuda/mmvq.cu @@ -142,7 +142,7 @@ static void mul_mat_vec_q_cuda( int64_t nwarps = 1; int64_t rows_per_cuda_block = 1; - if (ggml_cuda_info().devices[id].cc < GGML_CUDA_CC_CDNA || ggml_cuda_info().devices[id].cc == GGML_CUDA_CC_RDNA1) { // NVIDIA and AMD older than RDNA2 but not CDNA + if (ggml_cuda_info().devices[id].cc < GGML_CUDA_CC_RDNA2) { // NVIDIA and AMD older than RDNA2 switch(ncols_y) { case 1: nwarps = 4; @@ -166,6 +166,7 @@ static void mul_mat_vec_q_cuda( break; } } + const int64_t nblocks = (nrows_x + rows_per_cuda_block - 1) / rows_per_cuda_block; const dim3 block_nums(nblocks, 1, 1); const dim3 block_dims(WARP_SIZE, nwarps, 1); From c5d9effb49649db80a52caf5c0626de6f342f526 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 24 Jan 2025 21:02:43 +0100 Subject: [PATCH 149/279] CUDA: fix FP16 cuBLAS GEMM (#11396) --- ggml/src/ggml-cuda/ggml-cuda.cu | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index fb3d9e2d9..fbe889a01 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1114,8 +1114,8 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, - &alpha, src0_ptr, CUDA_R_16F, ne00, - src1_ptr, CUDA_R_16F, ne10, + &alpha, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, &beta, dst_dd_i, CUDA_R_32F, ldc, CUBLAS_COMPUTE_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); @@ -1128,9 +1128,9 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK( cublasGemmEx(ctx.cublas_handle(id), CUBLAS_OP_T, CUBLAS_OP_N, row_diff, src1_ncols, ne10, - &alpha_f16, src0_ptr, CUDA_R_16F, ne00, - src1_ptr, CUDA_R_16F, ne10, - &beta_f16, dst_dd_i, CUDA_R_16F, ldc, + &alpha_f16, src0_ptr, CUDA_R_16F, ne00, + src1_ptr, CUDA_R_16F, ne10, + &beta_f16, dst_f16.get(), CUDA_R_16F, ldc, CUBLAS_COMPUTE_16F, CUBLAS_GEMM_DEFAULT_TENSOR_OP)); From 5f0db9522f347b095f84c3033d6c1c1895402e25 Mon Sep 17 00:00:00 2001 From: uvos Date: Sat, 25 Jan 2025 00:02:23 +0100 Subject: [PATCH 150/279] hip : Add hipGraph and VMM support to ROCM (#11362) * Add hipGraph support * Enable VMM on rocm --- ggml/CMakeLists.txt | 1 + ggml/src/ggml-cuda/common.cuh | 2 +- ggml/src/ggml-cuda/ggml-cuda.cu | 58 +++++++++++++++++++++----------- ggml/src/ggml-cuda/vendors/hip.h | 43 +++++++++++++++++++++++ ggml/src/ggml-hip/CMakeLists.txt | 8 +++++ 5 files changed, 92 insertions(+), 20 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index ff68ddc21..123c755ac 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -154,6 +154,7 @@ option(GGML_CUDA_FA_ALL_QUANTS "ggml: compile all quants for FlashA option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp only)" ${GGML_CUDA_GRAPHS_DEFAULT}) option(GGML_HIP "ggml: use HIP" OFF) +option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 2c0a56226..a79fa83c5 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -588,7 +588,7 @@ struct ggml_tensor_extra_gpu { }; -#if (CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS) +#if ((CUDART_VERSION >= 12000) && defined(GGML_CUDA_USE_GRAPHS)) || defined(GGML_HIP_GRAPHS) #define USE_CUDA_GRAPH #endif diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index fbe889a01..a53a1bbd0 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -62,7 +62,7 @@ static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size"); [[noreturn]] void ggml_cuda_error(const char * stmt, const char * func, const char * file, int line, const char * msg) { int id = -1; // in case cudaGetDevice fails - cudaGetDevice(&id); + (void)cudaGetDevice(&id); GGML_LOG_ERROR(GGML_CUDA_NAME " error: %s\n", msg); GGML_LOG_ERROR(" current device: %d, in function %s at %s:%d\n", id, func, file, line); @@ -152,7 +152,7 @@ static ggml_cuda_device_info ggml_cuda_init() { for (int id = 0; id < info.device_count; ++id) { int device_vmm = 0; -#if !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#if !defined(GGML_CUDA_NO_VMM) CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); @@ -164,7 +164,7 @@ static ggml_cuda_device_info ggml_cuda_init() { alloc_prop.location.id = id; CU_CHECK(cuMemGetAllocationGranularity(&info.devices[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); } -#endif // !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#endif // !defined(GGML_CUDA_NO_VMM) info.devices[id].vmm = !!device_vmm; cudaDeviceProp prop; @@ -300,7 +300,7 @@ struct ggml_cuda_pool_leg : public ggml_cuda_pool { }; // pool with virtual memory -#if !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#if !defined(GGML_CUDA_NO_VMM) struct ggml_cuda_pool_vmm : public ggml_cuda_pool { static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB @@ -309,6 +309,9 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { size_t pool_used = 0; size_t pool_size = 0; size_t granularity; +#if defined(GGML_USE_HIP) + std::vector> mappings; +#endif explicit ggml_cuda_pool_vmm(int device) : device(device), @@ -317,7 +320,14 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { ~ggml_cuda_pool_vmm() { if (pool_addr != 0) { +#if defined(GGML_USE_HIP) + // Workaround for https://github.com/ROCm/ROCR-Runtime/issues/285 + for (std::pair & mapping : mappings) { + CU_CHECK(cuMemUnmap(mapping.first, mapping.second)); + } +#else CU_CHECK(cuMemUnmap(pool_addr, pool_size)); +#endif CU_CHECK(cuMemAddressFree(pool_addr, CUDA_POOL_VMM_MAX_SIZE)); } } @@ -350,7 +360,11 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { } // map at the end of the pool - CU_CHECK(cuMemMap(pool_addr + pool_size, reserve_size, 0, handle, 0)); + CUdeviceptr start_ptr = (CUdeviceptr)((char *)(pool_addr) + pool_size); + CU_CHECK(cuMemMap(start_ptr, reserve_size, 0, handle, 0)); +#if defined(GGML_USE_HIP) + mappings.push_back({start_ptr, reserve_size}); +#endif // the memory allocation handle is no longer needed after mapping CU_CHECK(cuMemRelease(handle)); @@ -360,7 +374,7 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { access.location.type = CU_MEM_LOCATION_TYPE_DEVICE; access.location.id = device; access.flags = CU_MEM_ACCESS_FLAGS_PROT_READWRITE; - CU_CHECK(cuMemSetAccess(pool_addr + pool_size, reserve_size, &access, 1)); + CU_CHECK(cuMemSetAccess((CUdeviceptr)((char *)(pool_addr) + pool_size), reserve_size, &access, 1)); // add to the pool pool_size += reserve_size; @@ -372,7 +386,7 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { GGML_ASSERT(pool_addr != 0); - void * ptr = (void *) (pool_addr + pool_used); + void * ptr = (void *) ((CUdeviceptr)((char *)(pool_addr) + pool_used)); *actual_size = size; pool_used += size; @@ -391,17 +405,17 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { pool_used -= size; // all deallocations must be in reverse order of the allocations - GGML_ASSERT(ptr == (void *) (pool_addr + pool_used)); + GGML_ASSERT(ptr == (void *) ((char *)(pool_addr) + pool_used)); } }; -#endif // !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#endif // !defined(GGML_CUDA_NO_VMM) std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(int device) { -#if !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#if !defined(GGML_CUDA_NO_VMM) if (ggml_cuda_info().devices[device].vmm) { return std::unique_ptr(new ggml_cuda_pool_vmm(device)); } -#endif // !defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM) +#endif // !defined(GGML_CUDA_NO_VMM) return std::unique_ptr(new ggml_cuda_pool_leg(device)); } @@ -547,7 +561,7 @@ static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_bac cudaError_t err = ggml_cuda_device_malloc(&dev_ptr, size, buft_ctx->device); if (err != cudaSuccess) { // clear the error - cudaGetLastError(); + (void)cudaGetLastError(); GGML_LOG_ERROR("%s: allocating %.2f MiB on device %d: cudaMalloc failed: %s\n", __func__, size / 1024.0 / 1024.0, buft_ctx->device, cudaGetErrorString(err)); return nullptr; } @@ -962,7 +976,7 @@ static void * ggml_cuda_host_malloc(size_t size) { cudaError_t err = cudaMallocHost((void **) &ptr, size); if (err != cudaSuccess) { // clear the error - cudaGetLastError(); + (void)cudaGetLastError(); GGML_LOG_DEBUG("%s: failed to allocate %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, cudaGetErrorString(err)); return nullptr; @@ -1209,7 +1223,7 @@ static void ggml_cuda_set_peer_access(const int n_tokens, int main_device) { CUDA_CHECK(err); } else { // reset the error - cudaGetLastError(); + (void)cudaGetLastError(); } } else { cudaError_t err = cudaDeviceDisablePeerAccess(id_other); @@ -1217,7 +1231,7 @@ static void ggml_cuda_set_peer_access(const int n_tokens, int main_device) { CUDA_CHECK(err); } else { // reset the error - cudaGetLastError(); + (void)cudaGetLastError(); } } } @@ -2452,7 +2466,7 @@ static void maintain_cuda_graph(ggml_backend_cuda_context * cuda_ctx, std::vecto if (stat == cudaErrorInvalidDeviceFunction) { // Fails due to incorrect handling by CUDA runtime of CUDA BLAS node. // We don't need to update blas nodes, so clear error and move on. - cudaGetLastError(); + (void)cudaGetLastError(); } else { GGML_ASSERT(stat == cudaSuccess); } @@ -2507,14 +2521,20 @@ static bool is_cuda_graph_update_required(ggml_backend_cuda_context * cuda_ctx, static void update_cuda_graph_executable(ggml_backend_cuda_context * cuda_ctx) { cudaGraphExecUpdateResultInfo result_info; +#ifdef __HIP_PLATFORM_AMD__ + hipGraphNode_t errorNode; + hipError_t stat = hipGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &errorNode, &result_info); +#else cudaError_t stat = cudaGraphExecUpdate(cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, &result_info); +#endif if (stat == cudaErrorGraphExecUpdateFailure) { #ifndef NDEBUG GGML_LOG_DEBUG("%s: CUDA graph update failed\n", __func__); #endif + // The pre-existing graph exec cannot be updated due to violated constraints // so instead clear error and re-instantiate - cudaGetLastError(); + (void)cudaGetLastError(); CUDA_CHECK(cudaGraphExecDestroy(cuda_ctx->cuda_graph->instance)); cuda_ctx->cuda_graph->instance = nullptr; CUDA_CHECK(cudaGraphInstantiate(&cuda_ctx->cuda_graph->instance, cuda_ctx->cuda_graph->graph, NULL, NULL, 0)); @@ -2742,7 +2762,7 @@ bool ggml_backend_cuda_register_host_buffer(void * buffer, size_t size) { cudaError_t err = cudaHostRegister(buffer, size, cudaHostRegisterPortable | cudaHostRegisterReadOnly); if (err != cudaSuccess) { // clear the error - cudaGetLastError(); + (void)cudaGetLastError(); GGML_LOG_DEBUG("%s: failed to register %.2f MiB of pinned memory: %s\n", __func__, size / 1024.0 / 1024.0, cudaGetErrorString(err)); @@ -2762,7 +2782,7 @@ void ggml_backend_cuda_unregister_host_buffer(void * buffer) { cudaError_t err = cudaHostUnregister(buffer); if (err != cudaSuccess) { // clear the error - cudaGetLastError(); + (void)cudaGetLastError(); } } diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index c905b15d7..8594093f0 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -19,6 +19,12 @@ #define CUBLAS_TF32_TENSOR_OP_MATH 0 #define CUDA_R_16F HIPBLAS_R_16F #define CUDA_R_32F HIPBLAS_R_32F +#define CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED hipDeviceAttributeVirtualMemoryManagementSupported +#define CU_MEM_ALLOC_GRANULARITY_RECOMMENDED hipMemAllocationGranularityRecommended +#define CU_MEM_ALLOCATION_TYPE_PINNED hipMemAllocationTypePinned +#define CU_MEM_LOCATION_TYPE_DEVICE hipMemLocationTypeDevice +#define CU_MEM_ACCESS_FLAGS_PROT_READWRITE hipMemAccessFlagsProtReadWrite +#define CU_CHECK(fn) {hipError_t err = fn; if(err != hipSuccess) { GGML_ABORT("HipVMM Failure: %s\n", hipGetErrorString(err)); }} #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) #define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6 #define cublasCreate hipblasCreate @@ -74,6 +80,21 @@ #define cudaMemGetInfo hipMemGetInfo #define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize #define cudaSetDevice hipSetDevice +#define cuDeviceGet hipDeviceGet +#define CUdevice hipDevice_t +#define CUdeviceptr hipDeviceptr_t +#define cuMemUnmap hipMemUnmap +#define CUmemAccessDesc hipMemAccessDesc +#define cuMemAddressFree hipMemAddressFree +#define cuMemRelease hipMemRelease +#define CUmemGenericAllocationHandle hipMemGenericAllocationHandle_t +#define cuMemCreate hipMemCreate +#define cuMemAddressReserve hipMemAddressReserve +#define cuMemMap hipMemMap +#define cuMemSetAccess hipMemSetAccess +#define cuMemGetAllocationGranularity hipMemGetAllocationGranularity +#define CUmemAllocationProp hipMemAllocationProp +#define cuDeviceGetAttribute hipDeviceGetAttribute #define cudaStreamCreateWithFlags hipStreamCreateWithFlags #define cudaStreamDestroy hipStreamDestroy #define cudaStreamFireAndForget hipStreamFireAndForget @@ -81,6 +102,28 @@ #define cudaStreamPerThread hipStreamPerThread #define cudaStreamSynchronize hipStreamSynchronize #define cudaStreamWaitEvent(stream, event, flags) hipStreamWaitEvent(stream, event, flags) +#define cudaGraphExec_t hipGraphExec_t +#define cudaGraphNode_t hipGraphNode_t +#define cudaKernelNodeParams hipKernelNodeParams +#define cudaKernelNodeParams hipKernelNodeParams +#define cudaGraphExecDestroy hipGraphExecDestroy +#define cudaGraphLaunch hipGraphLaunch +#define cudaErrorGraphExecUpdateFailure hipErrorGraphExecUpdateFailure +#define cudaGraphExecUpdateResultInfo hipGraphExecUpdateResult +#define cudaGraphNodeType hipGraphNodeType +#define cudaGraphNodeTypeKernel hipGraphNodeTypeKernel +#define cudaGraphInstantiate hipGraphInstantiate +#define cudaStreamEndCapture hipStreamEndCapture +#define cudaGraphDestroy hipGraphDestroy +#define cudaGraphKernelNodeSetParams hipGraphKernelNodeSetParams +#define cudaErrorInvalidDeviceFunction hipErrorInvalidDeviceFunction +#define cudaGraphKernelNodeGetParams hipGraphKernelNodeGetParams +#define cudaGraphNodeGetType hipGraphNodeGetType +#define cudaGraphGetNodes hipGraphGetNodes +#define cudaGraphExecUpdate hipGraphExecUpdate +#define cudaStreamCaptureModeRelaxed hipStreamCaptureModeRelaxed +#define cudaStreamBeginCapture hipStreamBeginCapture +#define cudaGraph_t hipGraph_t #define cudaStream_t hipStream_t #define cudaSuccess hipSuccess #define __trap() do { abort(); __builtin_unreachable(); } while(0) diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index d090ba9bd..77994a698 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -92,6 +92,14 @@ if (GGML_CUDA_NO_PEER_COPY) add_compile_definitions(GGML_CUDA_NO_PEER_COPY) endif() +if (GGML_HIP_GRAPHS) + add_compile_definitions(GGML_HIP_GRAPHS) +endif() + +if (GGML_CUDA_NO_VMM) + add_compile_definitions(GGML_CUDA_NO_VMM) +endif() + if (CXX_IS_HIPCC) set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) target_link_libraries(ggml-hip PRIVATE hip::device) From 466ea66f338d63109540dae1df97ccfdbf4cd08f Mon Sep 17 00:00:00 2001 From: jiahao su Date: Sat, 25 Jan 2025 07:26:01 +0800 Subject: [PATCH 151/279] CANN: Add Ascend CANN build ci (#10217) * CANN: Add Ascend CANN build ci * Update build.yml * Modify cann image version * Update build.yml * Change to run on x86 system * Update build.yml * Update build.yml * Modify format error * Update build.yml * Add 'Ascend NPU' label restrictions * Exclude non PR event Co-authored-by: Yuanhao Ji * Update build.yml --------- Co-authored-by: Yuanhao Ji --- .github/workflows/build.yml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index fda726955..7d08574f5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1464,3 +1464,37 @@ jobs: # popd # emcmake cmake . -DCMAKE_BUILD_TYPE=${{ matrix.build }} # make + + openEuler-latest-cmake-cann: + if: ${{ github.event_name != 'pull_request' || contains(github.event.pull_request.labels.*.name, 'Ascend NPU') }} + defaults: + run: + shell: bash -el {0} + runs-on: ubuntu-24.04-arm + strategy: + matrix: + cann: + - '8.0.rc3.beta1-910b-openeuler22.03-py3.10' + device: + - 'ascend910b3' + build: + - 'Release' + container: ascendai/cann:${{ matrix.cann }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Dependencies + run: | + yum update -y + yum install -y git gcc gcc-c++ make cmake + + - name: Build + run: | + export LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/$(uname -m)-linux/devlib/:${LD_LIBRARY_PATH} + + cmake -S . -B build \ + -DCMAKE_BUILD_TYPE=${{ matrix.build }} \ + -DGGML_CANN=on \ + -DSOC_TYPE=${{ matrix.device }} + cmake --build build -j $(nproc) From 00c24acb2ac49d9f8318e808b6ada2f5649f253f Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 25 Jan 2025 13:36:48 +0200 Subject: [PATCH 152/279] ci : fix line breaks on windows builds (#11409) * ci : fix line breaks on windows builds * cont : another try * ci : fix powershell line breaks --- .github/workflows/build.yml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7d08574f5..37cb6b1e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -916,10 +916,10 @@ jobs: shell: cmd run: | call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise\VC\Auxiliary\Build\vcvars64.bat" - cmake -S . -B build -G "Ninja Multi-Config" \ - -DLLAMA_BUILD_SERVER=ON \ - -DGGML_NATIVE=OFF \ - -DGGML_CUDA=ON \ + cmake -S . -B build -G "Ninja Multi-Config" ^ + -DLLAMA_BUILD_SERVER=ON ^ + -DGGML_NATIVE=OFF ^ + -DGGML_CUDA=ON ^ -DGGML_RPC=ON set /A NINJA_JOBS=%NUMBER_OF_PROCESSORS%-1 cmake --build build --config Release -j %NINJA_JOBS% -t ggml @@ -1073,7 +1073,12 @@ jobs: run: | $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path) $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" - cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIP=ON -DCMAKE_BUILD_TYPE=Release -DGGML_RPC=ON + cmake -G "Unix Makefiles" -B build -S . ` + -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` + -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` + -DCMAKE_BUILD_TYPE=Release ` + -DGGML_HIP=ON ` + -DGGML_RPC=ON cmake --build build -j ${env:NUMBER_OF_PROCESSORS} windows-latest-cmake-hip-release: @@ -1111,7 +1116,13 @@ jobs: run: | $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path) $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" - cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DGGML_HIP=ON -DCMAKE_BUILD_TYPE=Release -DAMDGPU_TARGETS=${{ matrix.gpu_target }} -DGGML_RPC=ON + cmake -G "Unix Makefiles" -B build -S . ` + -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" ` + -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" ` + -DCMAKE_BUILD_TYPE=Release ` + -DAMDGPU_TARGETS=${{ matrix.gpu_target }} ` + -DGGML_HIP=ON ` + -DGGML_RPC=ON cmake --build build -j ${env:NUMBER_OF_PROCESSORS} md "build\bin\rocblas\library\" cp "${env:HIP_PATH}\bin\hipblas.dll" "build\bin\" From 20a758155bc5f37290b20ea44d76ba99c4e7f2cb Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Sat, 25 Jan 2025 15:22:29 +0100 Subject: [PATCH 153/279] docker : fix CPU ARM build (#11403) * docker : fix CPU ARM build * add CURL to other builds --- .devops/cpu.Dockerfile | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.devops/cpu.Dockerfile b/.devops/cpu.Dockerfile index 8d020f16c..ab0e951bc 100644 --- a/.devops/cpu.Dockerfile +++ b/.devops/cpu.Dockerfile @@ -2,6 +2,8 @@ ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION AS build +ARG TARGETARCH + RUN apt-get update && \ apt-get install -y build-essential git cmake libcurl4-openssl-dev @@ -9,7 +11,11 @@ WORKDIR /app COPY . . -RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release && \ +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON; \ + else \ + cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON; \ + fi && \ cmake --build build -j $(nproc) RUN mkdir -p /app/lib && \ From 49b0e3cec4b67dc9f4debe3a16acd4c819f751d6 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Sat, 25 Jan 2025 16:36:44 +0100 Subject: [PATCH 154/279] server : fix cleaning up stream task (#11418) * server : fix cleaning up stream task * one more spot --- examples/server/server.cpp | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index a94c3822c..b1cde2d7f 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1427,16 +1427,16 @@ struct server_queue { int post(server_task task, bool front = false) { std::unique_lock lock(mutex_tasks); GGML_ASSERT(task.id != -1); + // if this is cancel task make sure to clean up pending tasks + if (task.type == SERVER_TASK_TYPE_CANCEL) { + cleanup_pending_task(task.id_target); + } QUE_DBG("new task, id = %d, front = %d\n", task.id, front); if (front) { queue_tasks.push_front(std::move(task)); } else { queue_tasks.push_back(std::move(task)); } - // if this is cancel task make sure to clean up pending tasks - if (task.type == SERVER_TASK_TYPE_CANCEL) { - cleanup_pending_task(task.id_target); - } condition_tasks.notify_one(); return task.id; } @@ -1448,16 +1448,16 @@ struct server_queue { if (task.id == -1) { task.id = id++; } + // if this is cancel task make sure to clean up pending tasks + if (task.type == SERVER_TASK_TYPE_CANCEL) { + cleanup_pending_task(task.id_target); + } QUE_DBG("new task, id = %d/%d, front = %d\n", task.id, (int) tasks.size(), front); if (front) { queue_tasks.push_front(std::move(task)); } else { queue_tasks.push_back(std::move(task)); } - // if this is cancel task make sure to clean up pending tasks - if (task.type == SERVER_TASK_TYPE_CANCEL) { - cleanup_pending_task(task.id_target); - } } condition_tasks.notify_one(); return 0; @@ -1554,10 +1554,10 @@ struct server_queue { } private: - void cleanup_pending_task(int id_task) { + void cleanup_pending_task(int id_target) { // no need lock because this is called exclusively by post() - auto rm_func = [id_task](const server_task & task) { - return task.id_target == id_task; + auto rm_func = [id_target](const server_task & task) { + return task.id_target == id_target; }; queue_tasks.erase( std::remove_if(queue_tasks.begin(), queue_tasks.end(), rm_func), From 6e264a905bec9e4c0111eb4c91379c88accef7c6 Mon Sep 17 00:00:00 2001 From: Diego Devesa Date: Sat, 25 Jan 2025 17:22:41 +0100 Subject: [PATCH 155/279] docker : add GGML_CPU_ARM_ARCH arg to select ARM architecture to build for (#11419) --- .devops/cpu.Dockerfile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.devops/cpu.Dockerfile b/.devops/cpu.Dockerfile index ab0e951bc..522ee8147 100644 --- a/.devops/cpu.Dockerfile +++ b/.devops/cpu.Dockerfile @@ -4,6 +4,8 @@ FROM ubuntu:$UBUNTU_VERSION AS build ARG TARGETARCH +ARG GGML_CPU_ARM_ARCH=armv8-a + RUN apt-get update && \ apt-get install -y build-essential git cmake libcurl4-openssl-dev @@ -12,9 +14,12 @@ WORKDIR /app COPY . . RUN if [ "$TARGETARCH" = "amd64" ]; then \ - cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON; \ + cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON -DGGML_NATIVE=OFF -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON; \ + elif [ "$TARGETARCH" = "arm64" ]; then \ + cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ARM_ARCH=${GGML_CPU_ARM_ARCH}; \ else \ - cmake -S . -B build -DCMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON; \ + echo "Unsupported architecture"; \ + exit 1; \ fi && \ cmake --build build -j $(nproc) From ca6baf76c1a7adb9134b08d2bc4c65557297ff87 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 25 Jan 2025 11:26:37 -0600 Subject: [PATCH 156/279] build: add /bigobj to MSVC build (#11407) --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7e41a44d2..e7f520582 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,6 +50,7 @@ endif() if (MSVC) add_compile_options("$<$:/utf-8>") add_compile_options("$<$:/utf-8>") + add_compile_options(/bigobj) endif() # From 26771a1491f3a4c3d5b99c4c267b81aca9a7dfa0 Mon Sep 17 00:00:00 2001 From: uvos Date: Sat, 25 Jan 2025 21:01:12 +0100 Subject: [PATCH 157/279] Hip: disable VMM on hip as it seams that it dosent work in some configurations (#11420) --- ggml/CMakeLists.txt | 1 + ggml/src/ggml-cuda/common.cuh | 4 ++++ ggml/src/ggml-cuda/ggml-cuda.cu | 14 +++++++------- ggml/src/ggml-hip/CMakeLists.txt | 4 ++-- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index 123c755ac..bbabb14de 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -155,6 +155,7 @@ option(GGML_CUDA_GRAPHS "ggml: use CUDA graphs (llama.cpp on option(GGML_HIP "ggml: use HIP" OFF) option(GGML_HIP_GRAPHS "ggml: use HIP graph, experimental, slow" OFF) +option(GGML_HIP_NO_VMM "ggml: do not try to use HIP VMM" ON) option(GGML_HIP_UMA "ggml: use HIP unified memory architecture" OFF) option(GGML_VULKAN "ggml: use Vulkan" OFF) option(GGML_VULKAN_CHECK_RESULTS "ggml: run Vulkan op checks" OFF) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index a79fa83c5..bb6120568 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -131,6 +131,10 @@ typedef float dfloat; // dequantize float typedef float2 dfloat2; #endif // GGML_CUDA_F16 +#if (!defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM)) || (defined(GGML_USE_HIP) && !defined(GGML_HIP_NO_VMM)) +#define GGML_USE_VMM +#endif // (!defined(GGML_USE_HIP) && !defined(GGML_CUDA_NO_VMM)) || (defined(GGML_USE_HIP) && !defined(GGML_HIP_NO_VMM)) + #if (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL #define FP16_AVAILABLE #endif // (defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index a53a1bbd0..85178abd2 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -152,7 +152,7 @@ static ggml_cuda_device_info ggml_cuda_init() { for (int id = 0; id < info.device_count; ++id) { int device_vmm = 0; -#if !defined(GGML_CUDA_NO_VMM) +#if defined(GGML_USE_VMM) CUdevice device; CU_CHECK(cuDeviceGet(&device, id)); CU_CHECK(cuDeviceGetAttribute(&device_vmm, CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED, device)); @@ -164,7 +164,7 @@ static ggml_cuda_device_info ggml_cuda_init() { alloc_prop.location.id = id; CU_CHECK(cuMemGetAllocationGranularity(&info.devices[id].vmm_granularity, &alloc_prop, CU_MEM_ALLOC_GRANULARITY_RECOMMENDED)); } -#endif // !defined(GGML_CUDA_NO_VMM) +#endif // defined(GGML_USE_VMM) info.devices[id].vmm = !!device_vmm; cudaDeviceProp prop; @@ -300,7 +300,7 @@ struct ggml_cuda_pool_leg : public ggml_cuda_pool { }; // pool with virtual memory -#if !defined(GGML_CUDA_NO_VMM) +#if defined(GGML_USE_VMM) struct ggml_cuda_pool_vmm : public ggml_cuda_pool { static const size_t CUDA_POOL_VMM_MAX_SIZE = 1ull << 35; // 32 GB @@ -408,14 +408,14 @@ struct ggml_cuda_pool_vmm : public ggml_cuda_pool { GGML_ASSERT(ptr == (void *) ((char *)(pool_addr) + pool_used)); } }; -#endif // !defined(GGML_CUDA_NO_VMM) +#endif // defined(GGML_USE_VMM) std::unique_ptr ggml_backend_cuda_context::new_pool_for_device(int device) { -#if !defined(GGML_CUDA_NO_VMM) +#if defined(GGML_USE_VMM) if (ggml_cuda_info().devices[device].vmm) { return std::unique_ptr(new ggml_cuda_pool_vmm(device)); } -#endif // !defined(GGML_CUDA_NO_VMM) +#endif // defined(GGML_USE_VMM) return std::unique_ptr(new ggml_cuda_pool_leg(device)); } @@ -3250,7 +3250,7 @@ static ggml_backend_feature * ggml_backend_cuda_get_features(ggml_backend_reg_t features.push_back({ "FORCE_CUBLAS", "1" }); #endif - #ifdef GGML_CUDA_NO_VMM + #ifndef GGML_USE_VMM features.push_back({ "NO_VMM", "1" }); #endif diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index 77994a698..ecc3bc66d 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -96,8 +96,8 @@ if (GGML_HIP_GRAPHS) add_compile_definitions(GGML_HIP_GRAPHS) endif() -if (GGML_CUDA_NO_VMM) - add_compile_definitions(GGML_CUDA_NO_VMM) +if (GGML_HIP_NO_VMM) + add_compile_definitions(GGML_HIP_NO_VMM) endif() if (CXX_IS_HIPCC) From 4a75d19376f2f00dbae6c266eb9c4f3001872b52 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 25 Jan 2025 15:29:57 -0600 Subject: [PATCH 158/279] vulkan: compile shaders on-demand (#11406) Reduce first-run startup time and memory consumption. Should fix #11339. --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 64 ++++++++++++++++++---------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index c325416d1..a9d6b923c 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -85,6 +85,10 @@ struct vk_pipeline_struct { uint32_t parameter_count; std::array wg_denoms; uint32_t align; + // set to true to request the pipeline is compiled after the dryrun + bool needed {}; + // set to true when the shader has been compiled + bool compiled {}; }; typedef std::shared_ptr vk_pipeline; @@ -186,8 +190,11 @@ struct vk_device_struct { bool mul_mat_id_m; bool mul_mat_id_s; - vk_matmul_pipeline pipeline_matmul_f32; - vk_matmul_pipeline pipeline_matmul_f32_f16; + // set to true to indicate that some shaders need to be compiled after the dryrun + bool need_compiles {}; + + vk_matmul_pipeline pipeline_matmul_f32 {}; + vk_matmul_pipeline pipeline_matmul_f32_f16 {}; vk_matmul_pipeline2 pipeline_matmul_f16; vk_matmul_pipeline2 pipeline_matmul_f16_f32; vk_pipeline pipeline_matmul_split_k_reduce; @@ -195,7 +202,7 @@ struct vk_device_struct { vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_COUNT]; vk_matmul_pipeline2 pipeline_dequant_mul_mat_mat[GGML_TYPE_COUNT]; - vk_matmul_pipeline pipeline_matmul_id_f32; + vk_matmul_pipeline pipeline_matmul_id_f32 {}; vk_matmul_pipeline2 pipeline_matmul_id_f16; vk_matmul_pipeline2 pipeline_matmul_id_f16_f32; @@ -776,13 +783,6 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin GGML_ASSERT(parameter_count > 0); GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT - pipeline = std::make_shared(); - pipeline->name = name; - pipeline->parameter_count = parameter_count; - pipeline->push_constant_size = push_constant_size; - pipeline->wg_denoms = wg_denoms; - pipeline->align = align; - vk::ShaderModuleCreateInfo shader_module_create_info({}, spv_size, reinterpret_cast(spv_data)); pipeline->shader_module = device->device.createShaderModule(shader_module_create_info); @@ -865,6 +865,7 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin } pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; + pipeline->compiled = true; { std::lock_guard guard(device->mutex); @@ -875,12 +876,6 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin std::lock_guard guard(compile_count_mutex); assert(compile_count > 0); compile_count--; - - // "Progress bar" for shader compiles - static uint32_t total_compile_count = 0; - if ((total_compile_count++ % 10) == 0) { - std::cerr << "."; - } } compile_count_cond.notify_all(); } @@ -906,6 +901,10 @@ static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) static void ggml_pipeline_request_descriptor_sets(vk_device& device, vk_pipeline& pipeline, uint32_t n) { VK_LOG_DEBUG("ggml_pipeline_request_descriptor_sets(" << pipeline->name << ", " << n << ")"); device->pipeline_descriptor_set_requirements[pipeline->name] += n; + if (!pipeline->compiled) { + pipeline->needed = true; + device->need_compiles = true; + } } static void ggml_pipeline_allocate_descriptor_sets(vk_device& device) { @@ -1388,8 +1387,6 @@ static bool ggml_vk_matmul_shmem_support(const vk_device& device, const std::vec static void ggml_vk_load_shaders(vk_device& device) { VK_LOG_DEBUG("ggml_vk_load_shaders(" << device->name << ")"); - std::cerr << "ggml_vulkan: Compiling shaders"; - // some shaders have a minimum subgroup size const uint32_t subgroup_size_16 = std::max(device->subgroup_size, 16u); const uint32_t subgroup_size_32 = std::max(device->subgroup_size, 32u); @@ -1527,15 +1524,33 @@ static void ggml_vk_load_shaders(vk_device& device) { } } - device->pipeline_matmul_f32 = std::make_shared(); - device->pipeline_matmul_f32_f16 = std::make_shared(); - - device->pipeline_matmul_id_f32 = std::make_shared(); + if (!device->pipeline_matmul_f32) { + device->pipeline_matmul_f32 = std::make_shared(); + } + if (!device->pipeline_matmul_f32_f16) { + device->pipeline_matmul_f32_f16 = std::make_shared(); + } + if (!device->pipeline_matmul_id_f32) { + device->pipeline_matmul_id_f32 = std::make_shared(); + } std::vector> compiles; auto const &ggml_vk_create_pipeline = [&](vk_device& device, vk_pipeline& pipeline, const std::string &name, size_t spv_size, const void* spv_data, const std::string &entrypoint, uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, const std::vector& specialization_constants, uint32_t align, bool disable_robustness = false, bool require_full_subgroups = false, uint32_t required_subgroup_size = 0) { + + if (!pipeline) { + pipeline = std::make_shared(); + pipeline->name = name; + pipeline->parameter_count = parameter_count; + pipeline->push_constant_size = push_constant_size; + pipeline->wg_denoms = wg_denoms; + pipeline->align = align; + } + + if (!pipeline->needed || pipeline->compiled) { + return; + } { // wait until fewer than N compiles are in progress uint32_t N = std::max(1u, std::thread::hardware_concurrency()); @@ -2050,7 +2065,7 @@ static void ggml_vk_load_shaders(vk_device& device) { for (auto &c : compiles) { c.wait(); } - std::cerr << "Done!" << std::endl; + device->need_compiles = false; } static bool ggml_vk_khr_cooperative_matrix_support(const vk::PhysicalDeviceProperties& props, const vk::PhysicalDeviceDriverProperties& driver_props); @@ -7656,6 +7671,9 @@ static ggml_status ggml_backend_vk_graph_compute(ggml_backend_t backend, ggml_cg for (int i = 0; i < cgraph->n_nodes; i++) { ggml_vk_build_graph(ctx, cgraph->nodes[i], i, nullptr, 0, true, false, false); } + if (ctx->device->need_compiles) { + ggml_vk_load_shaders(ctx->device); + } ggml_vk_preallocate_buffers(ctx); ggml_pipeline_allocate_descriptor_sets(ctx->device); From f35726c2fb0a824246e004ab4bedcde37f3f0dd0 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Sat, 25 Jan 2025 20:10:03 -0600 Subject: [PATCH 159/279] build: apply MSVC /bigobj option to c/cpp files only (#11423) --- CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e7f520582..2f2b1a201 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -50,7 +50,8 @@ endif() if (MSVC) add_compile_options("$<$:/utf-8>") add_compile_options("$<$:/utf-8>") - add_compile_options(/bigobj) + add_compile_options("$<$:/bigobj>") + add_compile_options("$<$:/bigobj>") endif() # From 2cc9b8c32c78d09cd1b4df0aaa605ab2d0176243 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 26 Jan 2025 14:30:15 +0200 Subject: [PATCH 160/279] readme : update hot topics --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 97d028670..ff8536773 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) ## Hot topics +- **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggerganov/llama.cpp/pull/11427 - **VS Code extension for FIM completions:** https://github.com/ggml-org/llama.vscode - Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim - Introducing GGUF-my-LoRA https://github.com/ggerganov/llama.cpp/discussions/10123 From 1d8ee06000ecdd274e7f0a0465d6bf26ad2b3491 Mon Sep 17 00:00:00 2001 From: Frank Mai Date: Sun, 26 Jan 2025 23:20:34 +0800 Subject: [PATCH 161/279] rpc: fix register position (#11424) Signed-off-by: thxCode --- src/llama-model.cpp | 2 ++ src/llama.cpp | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 031b4c30b..18bd0b071 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1303,10 +1303,12 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const int act_gpu_layers = devices.empty() ? 0 : std::min(n_gpu_layers, (int)n_layer + 1); auto get_layer_buft_list = [&](int il) -> llama_model::impl::layer_dev { if (il < i_gpu_start || (il - i_gpu_start) >= act_gpu_layers) { + LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(cpu_dev)); return {cpu_dev, &pimpl->cpu_buft_list}; } const int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + n_devices(), float(il - i_gpu_start)/act_gpu_layers) - splits.begin(); auto * dev = devices.at(layer_gpu); + LLAMA_LOG_DEBUG("load_tensors: layer %3d assigned to device %s\n", il, ggml_backend_dev_name(dev)); return {dev, &pimpl->gpu_buft_list.at(dev)}; }; diff --git a/src/llama.cpp b/src/llama.cpp index e8cfe5012..094157ccf 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -9405,6 +9405,7 @@ static struct llama_model * llama_model_load_from_file_impl( model->devices.push_back(*dev); } } else { + std::vector rpc_servers; // use all available devices for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { ggml_backend_dev_t dev = ggml_backend_dev_get(i); @@ -9415,10 +9416,19 @@ static struct llama_model * llama_model_load_from_file_impl( break; case GGML_BACKEND_DEVICE_TYPE_GPU: - model->devices.push_back(dev); + ggml_backend_reg_t reg = ggml_backend_dev_backend_reg(dev); + if (ggml_backend_reg_name(reg) == std::string("RPC")) { + rpc_servers.push_back(dev); + } else { + model->devices.push_back(dev); + } break; } } + // add RPC servers at the front of the list + if (!rpc_servers.empty()) { + model->devices.insert(model->devices.begin(), rpc_servers.begin(), rpc_servers.end()); + } } // if using single GPU mode, remove all except the main GPU From 19f65187cbf009801288861133267ee5573ceead Mon Sep 17 00:00:00 2001 From: bandoti <141645996+bandoti@users.noreply.github.com> Date: Sun, 26 Jan 2025 12:07:48 -0400 Subject: [PATCH 162/279] cmake: add ggml find package (#11369) * Add initial ggml cmake package * Add build numbers to ggml find-package * Expand variables with GGML_ prefix * Guard against adding to cache variable twice * Add git to msys2 workflow * Handle ggml-cpu-* variants * Link ggml/ggml-base libraries to their targets * Replace main-cmake-pkg with simple-cmake-pkg * Interface features require c_std_90 * Fix typo * Removed unnecessary bracket from status message * Update examples/simple-cmake-pkg/README.md Co-authored-by: Georgi Gerganov * Update examples/simple-cmake-pkg/README.md Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- .github/workflows/build.yml | 1 + CMakeLists.txt | 23 +-- cmake/llama-config.cmake.in | 156 +----------------- examples/main-cmake-pkg/CMakeLists.txt | 32 ---- examples/main-cmake-pkg/README.md | 31 ---- .../.gitignore | 0 examples/simple-cmake-pkg/CMakeLists.txt | 11 ++ examples/simple-cmake-pkg/README.md | 34 ++++ ggml/CMakeLists.txt | 71 ++++++++ ggml/cmake/ggml-config.cmake.in | 147 +++++++++++++++++ ggml/src/CMakeLists.txt | 11 ++ 11 files changed, 284 insertions(+), 233 deletions(-) delete mode 100644 examples/main-cmake-pkg/CMakeLists.txt delete mode 100644 examples/main-cmake-pkg/README.md rename examples/{main-cmake-pkg => simple-cmake-pkg}/.gitignore (100%) create mode 100644 examples/simple-cmake-pkg/CMakeLists.txt create mode 100644 examples/simple-cmake-pkg/README.md create mode 100644 ggml/cmake/ggml-config.cmake.in diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 37cb6b1e7..cd8422f8a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -613,6 +613,7 @@ jobs: msystem: ${{matrix.sys}} install: >- base-devel + git mingw-w64-${{matrix.env}}-toolchain mingw-w64-${{matrix.env}}-cmake mingw-w64-${{matrix.env}}-openblas diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f2b1a201..4c62d1788 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -188,27 +188,14 @@ set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location o set(LLAMA_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") set(LLAMA_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") -# At the moment some compile definitions are placed within the ggml/src -# directory but not exported on the `ggml` target. This could be improved by -# determining _precisely_ which defines are necessary for the llama-config -# package. -# -set(GGML_TRANSIENT_DEFINES) -get_target_property(GGML_DIRECTORY ggml SOURCE_DIR) -get_directory_property(GGML_DIR_DEFINES DIRECTORY ${GGML_DIRECTORY} COMPILE_DEFINITIONS) -if (GGML_DIR_DEFINES) - list(APPEND GGML_TRANSIENT_DEFINES ${GGML_DIR_DEFINES}) -endif() -get_target_property(GGML_TARGET_DEFINES ggml COMPILE_DEFINITIONS) -if (GGML_TARGET_DEFINES) - list(APPEND GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES}) -endif() -get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES) -# all public headers set(LLAMA_PUBLIC_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h ${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h) -set_target_properties(llama PROPERTIES PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}") + +set_target_properties(llama + PROPERTIES + PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}") + install(TARGETS llama LIBRARY PUBLIC_HEADER) configure_package_config_file( diff --git a/cmake/llama-config.cmake.in b/cmake/llama-config.cmake.in index 5c55bc6b8..40ade96e5 100644 --- a/cmake/llama-config.cmake.in +++ b/cmake/llama-config.cmake.in @@ -3,159 +3,13 @@ set(LLAMA_BUILD_COMMIT @LLAMA_BUILD_COMMIT@) set(LLAMA_BUILD_NUMBER @LLAMA_BUILD_NUMBER@) set(LLAMA_SHARED_LIB @BUILD_SHARED_LIBS@) -set(GGML_STATIC @GGML_STATIC@) -set(GGML_NATIVE @GGML_NATIVE@) -set(GGML_LTO @GGML_LTO@) -set(GGML_CCACHE @GGML_CCACHE@) -set(GGML_AVX @GGML_AVX@) -set(GGML_AVX2 @GGML_AVX2@) -set(GGML_AVX512 @GGML_AVX512@) -set(GGML_AVX512_VBMI @GGML_AVX512_VBMI@) -set(GGML_AVX512_VNNI @GGML_AVX512_VNNI@) -set(GGML_AVX512_BF16 @GGML_AVX512_BF16@) -set(GGML_AMX_TILE @GGML_AMX_TILE@) -set(GGML_AMX_INT8 @GGML_AMX_INT8@) -set(GGML_AMX_BF16 @GGML_AMX_BF16@) -set(GGML_FMA @GGML_FMA@) -set(GGML_LASX @GGML_LASX@) -set(GGML_LSX @GGML_LSX@) -set(GGML_RVV @GGML_RVV@) -set(GGML_SVE @GGML_SVE@) - -set(GGML_ACCELERATE @GGML_ACCELERATE@) -set(GGML_OPENMP @GGML_OPENMP@) -set(GGML_CPU_HBM @GGML_CPU_HBM@) -set(GGML_BLAS_VENDOR @GGML_BLAS_VENDOR@) - -set(GGML_CUDA_FORCE_MMQ @GGML_CUDA_FORCE_MMQ@) -set(GGML_CUDA_FORCE_CUBLAS @GGML_CUDA_FORCE_CUBLAS@) -set(GGML_CUDA_F16 @GGML_CUDA_F16@) -set(GGML_CUDA_PEER_MAX_BATCH_SIZE @GGML_CUDA_PEER_MAX_BATCH_SIZE@) -set(GGML_CUDA_NO_PEER_COPY @GGML_CUDA_NO_PEER_COPY@) -set(GGML_CUDA_NO_VMM @GGML_CUDA_NO_VMM@) -set(GGML_CUDA_FA_ALL_QUANTS @GGML_CUDA_FA_ALL_QUANTS@) -set(GGML_CUDA_GRAPHS @GGML_CUDA_GRAPHS@) - -set(GGML_HIP_UMA @GGML_HIP_UMA@) - -set(GGML_VULKAN_CHECK_RESULTS @GGML_VULKAN_CHECK_RESULTS@) -set(GGML_VULKAN_DEBUG @GGML_VULKAN_DEBUG@) -set(GGML_VULKAN_MEMORY_DEBUG @GGML_VULKAN_MEMORY_DEBUG@) -set(GGML_VULKAN_SHADER_DEBUG_INFO @GGML_VULKAN_SHADER_DEBUG_INFO@) -set(GGML_VULKAN_PERF @GGML_VULKAN_PERF@) -set(GGML_VULKAN_VALIDATE @GGML_VULKAN_VALIDATE@) -set(GGML_VULKAN_RUN_TESTS @GGML_VULKAN_RUN_TESTS@) - -set(GGML_METAL_USE_BF16 @GGML_METAL_USE_BF16@) -set(GGML_METAL_NDEBUG @GGML_METAL_NDEBUG@) -set(GGML_METAL_SHADER_DEBUG @GGML_METAL_SHADER_DEBUG@) -set(GGML_METAL_EMBED_LIBRARY @GGML_METAL_EMBED_LIBRARY@) -set(GGML_METAL_MACOSX_VERSION_MIN @GGML_METAL_MACOSX_VERSION_MIN@) -set(GGML_METAL_STD @GGML_METAL_STD@) - -set(GGML_SYCL_F16 @GGML_SYCL_F16@) -set(GGML_SYCL_TARGET @GGML_SYCL_TARGET@) -set(GGML_SYCL_DEVICE_ARCH @GGML_SYCL_DEVICE_ARCH@) - - @PACKAGE_INIT@ set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@") set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@") set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@") -find_package(Threads REQUIRED) - -set(_llama_transient_defines "@GGML_TRANSIENT_DEFINES@") -set(_llama_link_deps "") -set(_llama_link_opts "") -foreach(_ggml_lib ggml ggml-base) - string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY") - find_library(${_ggml_lib_var} ${_ggml_lib} - REQUIRED - HINTS ${LLAMA_LIB_DIR} - NO_CMAKE_FIND_ROOT_PATH - ) - list(APPEND _llama_link_deps "${${_ggml_lib_var}}") - message(STATUS "Found ${${_ggml_lib_var}}") -endforeach() - -foreach(backend amx blas cann cpu cuda hip kompute metal musa rpc sycl vulkan) - string(TOUPPER "GGML_${backend}" backend_id) - set(_ggml_lib "ggml-${backend}") - string(REPLACE "-" "_" _ggml_lib_var "${_ggml_lib}_LIBRARY") - - find_library(${_ggml_lib_var} ${_ggml_lib} - HINTS ${LLAMA_LIB_DIR} - NO_CMAKE_FIND_ROOT_PATH - ) - if(${_ggml_lib_var}) - list(APPEND _llama_link_deps "${${_ggml_lib_var}}") - set(${backend_id} ON) - message(STATUS "Found backend ${${_ggml_lib_var}}") - else() - set(${backend_id} OFF) - endif() -endforeach() - -if (NOT LLAMA_SHARED_LIB) - if (APPLE AND GGML_ACCELERATE) - find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED) - list(APPEND _llama_link_deps ${ACCELERATE_FRAMEWORK}) - endif() - - if (GGML_OPENMP) - find_package(OpenMP REQUIRED) - list(APPEND _llama_link_deps OpenMP::OpenMP_C OpenMP::OpenMP_CXX) - endif() - - if (GGML_CPU_HBM) - find_library(memkind memkind REQUIRED) - list(APPEND _llama_link_deps memkind) - endif() - - if (GGML_BLAS) - find_package(BLAS REQUIRED) - list(APPEND _llama_link_deps ${BLAS_LIBRARIES}) - list(APPEND _llama_link_opts ${BLAS_LINKER_FLAGS}) - endif() - - if (GGML_CUDA) - find_package(CUDAToolkit REQUIRED) - endif() - - if (GGML_METAL) - find_library(FOUNDATION_LIBRARY Foundation REQUIRED) - find_library(METAL_FRAMEWORK Metal REQUIRED) - find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) - list(APPEND _llama_link_deps ${FOUNDATION_LIBRARY} - ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK}) - endif() - - if (GGML_VULKAN) - find_package(Vulkan REQUIRED) - list(APPEND _llama_link_deps Vulkan::Vulkan) - endif() - - if (GGML_HIP) - find_package(hip REQUIRED) - find_package(hipblas REQUIRED) - find_package(rocblas REQUIRED) - list(APPEND _llama_link_deps hip::host roc::rocblas roc::hipblas) - endif() - - if (GGML_SYCL) - find_package(DNNL) - if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL") - list(APPEND _llama_link_deps DNNL::dnnl) - endif() - if (WIN32) - find_package(IntelSYCL REQUIRED) - find_package(MKL REQUIRED) - list(APPEND _llama_link_deps IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL) - endif() - endif() -endif() +find_package(ggml REQUIRED) find_library(llama_LIBRARY llama REQUIRED @@ -167,12 +21,10 @@ add_library(llama UNKNOWN IMPORTED) set_target_properties(llama PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${LLAMA_INCLUDE_DIR}" - INTERFACE_LINK_LIBRARIES "${_llama_link_deps}" - INTERFACE_LINK_OPTIONS "${_llama_link_opts}" - INTERFACE_COMPILE_DEFINITIONS "${_llama_transient_defines}" + INTERFACE_LINK_LIBRARIES "ggml::ggml;ggml::ggml-base;" IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" IMPORTED_LOCATION "${llama_LIBRARY}" - INTERFACE_COMPILE_FEATURES cxx_std_11 - POSITION_INDEPENDENT_CODE ON ) + INTERFACE_COMPILE_FEATURES c_std_90 + POSITION_INDEPENDENT_CODE ON) check_required_components(Llama) diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/examples/main-cmake-pkg/CMakeLists.txt deleted file mode 100644 index 5563f4de0..000000000 --- a/examples/main-cmake-pkg/CMakeLists.txt +++ /dev/null @@ -1,32 +0,0 @@ -cmake_minimum_required(VERSION 3.12) -project("llama-cli-cmake-pkg" C CXX) -set(TARGET llama-cli-cmake-pkg) - -find_package(Llama 0.0.1 REQUIRED) - -# Bake common functionality in with target. Because applications -# using the relocatable Llama package should be outside of the -# source tree, llama-cli-cmake-pkg pretends the dependencies are built-in. -set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common") -add_library(common OBJECT) -file(GLOB _common_files - "${_common_path}/*.h" - "${_common_path}/*.cpp" -) -target_sources(common PRIVATE ${_common_files}) - -# If the common project was part of "llama-cli-cmake-pkg" the transient -# defines would automatically be attached. Because the common func- -# tionality is separate, but dependent upon the defines, it must be -# explicitly extracted from the "llama" target. -# -get_target_property(_llama_transient_defines llama - INTERFACE_COMPILE_DEFINITIONS) - -target_compile_definitions(common PRIVATE "${_llama_transient_defines}") - -add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../main/main.cpp) -target_include_directories(${TARGET} PRIVATE ${_common_path}) -install(TARGETS ${TARGET} RUNTIME) -target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT}) -target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/main-cmake-pkg/README.md b/examples/main-cmake-pkg/README.md deleted file mode 100644 index 08d83dd08..000000000 --- a/examples/main-cmake-pkg/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# llama.cpp/example/main-cmake-pkg - -This program builds [llama-cli](../main) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree. - -## Building - -Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions. - -### Considerations - -When hardware acceleration libraries are used (e.g. CUDA, Metal, etc.), CMake must be able to locate the associated CMake package. - -### Build llama.cpp and install to C:\LlamaCPP directory - -```cmd -git clone https://github.com/ggerganov/llama.cpp -cd llama.cpp -cmake -B build -DBUILD_SHARED_LIBS=OFF -G "Visual Studio 17 2022" -A x64 -cmake --build build --config Release -cmake --install build --prefix C:/LlamaCPP -``` - -### Build llama-cli-cmake-pkg - - -```cmd -cd ..\examples\main-cmake-pkg -cmake -B build -DBUILD_SHARED_LIBS=OFF -DCMAKE_PREFIX_PATH="C:/LlamaCPP/lib/cmake/Llama" -G "Visual Studio 17 2022" -A x64 -cmake --build build --config Release -cmake --install build --prefix C:/MyLlamaApp -``` diff --git a/examples/main-cmake-pkg/.gitignore b/examples/simple-cmake-pkg/.gitignore similarity index 100% rename from examples/main-cmake-pkg/.gitignore rename to examples/simple-cmake-pkg/.gitignore diff --git a/examples/simple-cmake-pkg/CMakeLists.txt b/examples/simple-cmake-pkg/CMakeLists.txt new file mode 100644 index 000000000..128e38c8f --- /dev/null +++ b/examples/simple-cmake-pkg/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.12) +project(llama-simple-cmake-pkg) + +set(TARGET llama-simple-cmake-pkg) + +find_package(Llama REQUIRED) + +add_executable(${TARGET} ${CMAKE_CURRENT_LIST_DIR}/../simple/simple.cpp) +install(TARGETS ${TARGET} RUNTIME) +target_link_libraries(${TARGET} PRIVATE llama ggml::all ${CMAKE_THREAD_LIBS_INIT}) +target_compile_features(${TARGET} PRIVATE cxx_std_17) diff --git a/examples/simple-cmake-pkg/README.md b/examples/simple-cmake-pkg/README.md new file mode 100644 index 000000000..8b30049e2 --- /dev/null +++ b/examples/simple-cmake-pkg/README.md @@ -0,0 +1,34 @@ +# llama.cpp/example/simple-cmake-pkg + +This program builds [simple](../simple) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree. + +## Building + +Because this example is "outside of the source tree", it is important to first build/install llama.cpp using CMake. An example is provided here, but please see the [llama.cpp build instructions](../..) for more detailed build instructions. + +### Considerations + +When hardware acceleration libraries are used (e.g. CUDA, Metal, Vulkan, etc.), the appropriate dependencies will be searched for automatically. So, for example, when finding a package + +### Build llama.cpp and install to llama.cpp/inst + +```sh +git clone https://github.com/ggerganov/llama.cpp +cd llama.cpp +cmake -S . -B build +cmake --build build +cmake --install build --prefix inst + +### Build simple-cmake-pkg + +```sh +cd examples/simple-cmake-pkg +cmake -S . -B build -DCMAKE_PREFIX_PATH=../../inst/lib/cmake +cmake --build build +``` + +### Run simple-cmake-pkg + +```sh +./build/llama-simple-cmake-pkg -m ./models/llama-7b-v2/ggml-model-f16.gguf "Hello my name is" +``` diff --git a/ggml/CMakeLists.txt b/ggml/CMakeLists.txt index bbabb14de..7c069e420 100644 --- a/ggml/CMakeLists.txt +++ b/ggml/CMakeLists.txt @@ -267,3 +267,74 @@ if (GGML_STANDALONE) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml.pc DESTINATION share/pkgconfig) endif() + +# +# Create CMake package +# + +# Generate version info based on git commit. + +find_program(GIT_EXE NAMES git git.exe REQUIRED NO_CMAKE_FIND_ROOT_PATH) +execute_process(COMMAND ${GIT_EXE} rev-list --count HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE GGML_BUILD_NUMBER + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +if(GGML_BUILD_NUMBER EQUAL 1) + message(WARNING "GGML build version fixed at 1 likely due to a shallow clone.") +endif() + +execute_process(COMMAND ${GIT_EXE} rev-parse --short HEAD + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + OUTPUT_VARIABLE GGML_BUILD_COMMIT + OUTPUT_STRIP_TRAILING_WHITESPACE +) + +# Capture variables prefixed with GGML_. + +set(variable_set_statements +" +####### Expanded from @GGML_VARIABLES_EXPANED@ by configure_package_config_file() ####### +####### Any changes to this file will be overwritten by the next CMake run ####### + +") + +set(GGML_SHARED_LIB ${BUILD_SHARED_LIBS}) + +get_cmake_property(all_variables VARIABLES) +foreach(variable_name IN LISTS all_variables) + if(variable_name MATCHES "^GGML_") + string(REPLACE ";" "\\;" + variable_value "${${variable_name}}") + + set(variable_set_statements + "${variable_set_statements}set(${variable_name} \"${variable_value}\")\n") + endif() +endforeach() + +set(GGML_VARIABLES_EXPANDED ${variable_set_statements}) + +# Create the CMake package and set install location. + +set(GGML_INSTALL_VERSION 0.0.${GGML_BUILD_NUMBER}) +set(GGML_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header files") +set(GGML_LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR} CACHE PATH "Location of library files") +set(GGML_BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR} CACHE PATH "Location of binary files") + +configure_package_config_file( + ${CMAKE_CURRENT_SOURCE_DIR}/cmake/ggml-config.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml + PATH_VARS GGML_INCLUDE_INSTALL_DIR + GGML_LIB_INSTALL_DIR + GGML_BIN_INSTALL_DIR) + +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake + VERSION ${GGML_INSTALL_VERSION} + COMPATIBILITY SameMajorVersion) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ggml-config.cmake + ${CMAKE_CURRENT_BINARY_DIR}/ggml-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/ggml) diff --git a/ggml/cmake/ggml-config.cmake.in b/ggml/cmake/ggml-config.cmake.in new file mode 100644 index 000000000..bf39f9c00 --- /dev/null +++ b/ggml/cmake/ggml-config.cmake.in @@ -0,0 +1,147 @@ + +@GGML_VARIABLES_EXPANDED@ + +@PACKAGE_INIT@ + +set_and_check(GGML_INCLUDE_DIR "@PACKAGE_GGML_INCLUDE_INSTALL_DIR@") +set_and_check(GGML_LIB_DIR "@PACKAGE_GGML_LIB_INSTALL_DIR@") +set_and_check(GGML_BIN_DIR "@PACKAGE_GGML_BIN_INSTALL_DIR@") + +find_package(Threads REQUIRED) + +find_library(GGML_LIBRARY ggml + REQUIRED + HINTS ${GGML_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH) + +add_library(ggml::ggml UNKNOWN IMPORTED) +set_target_properties(ggml::ggml + PROPERTIES + IMPORTED_LOCATION "${GGML_LIBRARY}") + +find_library(GGML_BASE_LIBRARY ggml-base + REQUIRED + HINTS ${GGML_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH) + +add_library(ggml::ggml-base UNKNOWN IMPORTED) +set_target_properties(ggml::ggml-base + PROPERTIES + IMPORTED_LOCATION "${GGML_BASE_LIBRARY}") + +if (NOT GGML_SHARED_LIB) + if (APPLE AND GGML_ACCELERATE) + find_library(ACCELERATE_FRAMEWORK Accelerate REQUIRED) + list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES ${ACCELERATE_FRAMEWORK}) + endif() + + if (GGML_OPENMP) + find_package(OpenMP REQUIRED) + list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES OpenMP::OpenMP_C OpenMP::OpenMP_CXX) + endif() + + if (GGML_CPU_HBM) + find_library(memkind memkind REQUIRED) + list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES memkind) + endif() + + if (GGML_BLAS) + find_package(BLAS REQUIRED) + list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES ${BLAS_LIBRARIES}) + list(APPEND GGML_CPU_INTERFACE_LINK_OPTIONS ${BLAS_LINKER_FLAGS}) + endif() + + if (GGML_CUDA) + find_package(CUDAToolkit REQUIRED) + endif() + + if (GGML_METAL) + find_library(FOUNDATION_LIBRARY Foundation REQUIRED) + find_library(METAL_FRAMEWORK Metal REQUIRED) + find_library(METALKIT_FRAMEWORK MetalKit REQUIRED) + + list(APPEND GGML_METAL_INTERFACE_LINK_LIBRARIES + ${FOUNDATION_LIBRARY} ${METAL_FRAMEWORK} ${METALKIT_FRAMEWORK}) + endif() + + if (GGML_VULKAN) + find_package(Vulkan REQUIRED) + list(APPEND GGML_VULKAN_INTERFACE_LINK_LIBRARIES Vulkan::Vulkan) + endif() + + if (GGML_HIP) + find_package(hip REQUIRED) + find_package(hipblas REQUIRED) + find_package(rocblas REQUIRED) + list(APPEND GGML_HIP_INTERFACE_LINK_LIBRARIES hip::host roc::rocblas roc::hipblas) + endif() + + if (GGML_SYCL) + find_package(DNNL) + if (${DNNL_FOUND} AND GGML_SYCL_TARGET STREQUAL "INTEL") + list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES DNNL::dnnl) + endif() + if (WIN32) + find_package(IntelSYCL REQUIRED) + find_package(MKL REQUIRED) + list(APPEND GGML_SYCL_INTERFACE_LINK_LIBRARIES IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL) + endif() + endif() +endif() + +set(_ggml_all_targets "") +foreach(_ggml_backend ${GGML_AVAILABLE_BACKENDS}) + string(REPLACE "-" "_" _ggml_backend_pfx "${_ggml_backend}") + string(TOUPPER "${_ggml_backend_pfx}" _ggml_backend_pfx) + + find_library(${_ggml_backend_pfx}_LIBRARY ${_ggml_backend} + REQUIRED + HINTS ${GGML_LIB_DIR} + NO_CMAKE_FIND_ROOT_PATH) + + message(STATUS "Found ${${_ggml_backend_pfx}_LIBRARY}") + + add_library(ggml::${_ggml_backend} UNKNOWN IMPORTED) + set_target_properties(ggml::${_ggml_backend} + PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${GGML_INCLUDE_DIR}" + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + IMPORTED_LOCATION "${${_ggml_backend_pfx}_LIBRARY}" + INTERFACE_COMPILE_FEATURES c_std_90 + POSITION_INDEPENDENT_CODE ON) + + string(REGEX MATCH "^ggml-cpu" is_cpu_variant "${_ggml_backend}") + if(is_cpu_variant) + list(APPEND GGML_CPU_INTERFACE_LINK_LIBRARIES "ggml::ggml" "ggml::ggml-base") + set_target_properties(ggml::${_ggml_backend} + PROPERTIES + INTERFACE_LINK_LIBRARIES "${GGML_CPU_INTERFACE_LINK_LIBRARIES}") + + if(GGML_CPU_INTERFACE_LINK_OPTIONS) + set_target_properties(ggml::${_ggml_backend} + PROPERTIES + INTERFACE_LINK_OPTIONS "${GGML_CPU_INTERFACE_LINK_OPTIONS}") + endif() + + else() + list(APPEND ${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES "ggml::ggml" "ggml::ggml-base") + set_target_properties(ggml::${_ggml_backend} + PROPERTIES + INTERFACE_LINK_LIBRARIES "${${_ggml_backend_pfx}_INTERFACE_LINK_LIBRARIES}") + + if(${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS) + set_target_properties(ggml::${_ggml_backend} + PROPERTIES + INTERFACE_LINK_OPTIONS "${${_ggml_backend_pfx}_INTERFACE_LINK_OPTIONS}") + endif() + endif() + + list(APPEND _ggml_all_targets ggml::${_ggml_backend}) +endforeach() + +add_library(ggml::all INTERFACE IMPORTED) +set_target_properties(ggml::all + PROPERTIES + INTERFACE_LINK_LIBRARIES "${_ggml_all_targets}") + +check_required_components(ggml) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index ae1cd2337..8d2b948fb 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -250,6 +250,17 @@ function(ggml_add_backend_library backend) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_BUILD) target_compile_definitions(${backend} PUBLIC GGML_BACKEND_SHARED) endif() + + if(NOT GGML_AVAILABLE_BACKENDS) + set(GGML_AVAILABLE_BACKENDS "${backend}" + CACHE INTERNAL "List of backends for cmake package") + else() + list(FIND GGML_AVAILABLE_BACKENDS "${backend}" has_backend) + if(has_backend EQUAL -1) + set(GGML_AVAILABLE_BACKENDS "${GGML_AVAILABLE_BACKENDS};${backend}" + CACHE INTERNAL "List of backends for cmake package") + endif() + endif() endfunction() function(ggml_add_backend backend) From 6f53d8a6b41e48c73b345fc6c712c3b00ea4fb93 Mon Sep 17 00:00:00 2001 From: Nuno Date: Sun, 26 Jan 2025 18:22:43 +0100 Subject: [PATCH 163/279] docker: add missing vulkan library to base layer and update to 24.04 (#11422) Signed-off-by: rare-magma --- .devops/vulkan.Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index cfc2162e3..ad5dcd374 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -1,4 +1,4 @@ -ARG UBUNTU_VERSION=jammy +ARG UBUNTU_VERSION=24.04 FROM ubuntu:$UBUNTU_VERSION AS build @@ -7,7 +7,7 @@ RUN apt update && apt install -y git build-essential cmake wget # Install Vulkan SDK and cURL RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ - wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list https://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \ apt update -y && \ apt-get install -y vulkan-sdk libcurl4-openssl-dev curl @@ -34,7 +34,7 @@ RUN mkdir -p /app/full \ FROM ubuntu:$UBUNTU_VERSION AS base RUN apt-get update \ - && apt-get install -y libgomp1 curl\ + && apt-get install -y libgomp1 curl libvulkan-dev \ && apt autoremove -y \ && apt clean -y \ && rm -rf /tmp/* /var/tmp/* \ From 178a7eb952d211b8d4232d5e50ae1b64519172a9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 26 Jan 2025 20:06:16 +0200 Subject: [PATCH 164/279] metal : use residency sets (#11427) * metal : use residency sets ggml-ci * metal : restore commandBufferWithUnretainedReferences calls [no ci] * metal : release descriptors ggml-ci * metal : check env GGML_METAL_NO_RESIDENCY ggml-ci * metal : fix build + clean-up ggml-ci --- ggml/src/ggml-metal/ggml-metal.m | 136 +++++++++++++++++++++++++++---- 1 file changed, 119 insertions(+), 17 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index a85502ee0..c9474345d 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -19,7 +19,10 @@ // max number of MTLCommandBuffer used to submit a graph for processing #define GGML_METAL_MAX_COMMAND_BUFFERS 8 -#define UNUSED(x) (void)(x) +// create residency sets only on macOS >= 15.0 +#if TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 +#define GGML_METAL_HAS_RESIDENCY_SETS 1 +#endif // globals @@ -39,6 +42,7 @@ static struct ggml_backend_metal_device_context { bool has_simdgroup_reduction; bool has_simdgroup_mm; + bool has_residency_sets; bool has_bfloat; bool use_bfloat; @@ -48,6 +52,7 @@ static struct ggml_backend_metal_device_context { /*.mtl_device_ref_count =*/ 0, /*.has_simdgroup_reduction =*/ false, /*.has_simdgroup_mm =*/ false, + /*.has_residency_sets =*/ false, /*.has_bfloat =*/ false, /*.use_bfloat =*/ false, /*.name =*/ "", @@ -65,6 +70,10 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_dev ctx->has_simdgroup_mm = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7]; +#if defined(GGML_METAL_HAS_RESIDENCY_SETS) + ctx->has_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == NULL; +#endif + ctx->has_bfloat = [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; ctx->has_bfloat |= [ctx->mtl_device supportsFamily:MTLGPUFamilyApple6]; @@ -483,6 +492,11 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]); ctx->queue = [device newCommandQueue]; + if (ctx->queue == nil) { + GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__); + return NULL; + } + ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT); id metal_library; @@ -649,6 +663,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(ggml_backend_dev_t de GGML_LOG_INFO("%s: simdgroup reduction = %s\n", __func__, ctx_dev->has_simdgroup_reduction ? "true" : "false"); GGML_LOG_INFO("%s: simdgroup matrix mul. = %s\n", __func__, ctx_dev->has_simdgroup_mm ? "true" : "false"); + GGML_LOG_INFO("%s: has residency sets = %s\n", __func__, ctx_dev->has_residency_sets ? "true" : "false"); GGML_LOG_INFO("%s: has bfloat = %s\n", __func__, ctx_dev->has_bfloat ? "true" : "false"); GGML_LOG_INFO("%s: use bfloat = %s\n", __func__, ctx_dev->use_bfloat ? "true" : "false"); GGML_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx_dev->mtl_device.hasUnifiedMemory ? "true" : "false"); @@ -1035,8 +1050,70 @@ struct ggml_backend_metal_buffer_context { // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap int n_buffers; struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS]; + + // optional MTLResidencySet + id rset; }; +// rset init +static bool ggml_backend_metal_buffer_rset_init( + struct ggml_backend_metal_buffer_context * ctx, + struct ggml_backend_metal_device_context * ctx_dev, + id device) { + ctx->rset = nil; + + if (!ctx_dev->has_residency_sets) { + return true; + } + +#if defined(GGML_METAL_HAS_RESIDENCY_SETS) + if (@available(macOS 15.0, *)) { + MTLResidencySetDescriptor * desc = [[MTLResidencySetDescriptor alloc] init]; + desc.label = @"ggml_backend_metal"; + desc.initialCapacity = ctx->n_buffers; + + NSError * error; + ctx->rset = [device newResidencySetWithDescriptor:desc error:&error]; + if (error) { + GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]); + [desc release]; + return false; + } + + [desc release]; + + for (int i = 0; i < ctx->n_buffers; i++) { + [ctx->rset addAllocation:ctx->buffers[i].metal]; + } + + [ctx->rset commit]; + [ctx->rset requestResidency]; + + return true; + } +#else + GGML_UNUSED(ctx_dev); + GGML_UNUSED(device); +#endif + + return true; +} + +// rset free +static void ggml_backend_metal_buffer_rset_free(struct ggml_backend_metal_buffer_context * ctx) { +#if defined(GGML_METAL_HAS_RESIDENCY_SETS) + if (@available(macOS 15.0, *)) { + if (ctx->rset) { + [ctx->rset endResidency]; + [ctx->rset removeAllAllocations]; + [ctx->rset release]; + } + } +#else + GGML_UNUSED(ctx); +#endif +} + // finds the Metal buffer that contains the tensor data on the GPU device // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the // Metal buffer based on the host memory pointer @@ -4176,6 +4253,8 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) for (int i = 0; i < ctx->n_buffers; i++) { [ctx->buffers[i].metal release]; } + + ggml_backend_metal_buffer_rset_free(ctx); ggml_backend_metal_device_rel(buffer->buft->device->context); if (ctx->owned) { @@ -4198,19 +4277,19 @@ static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) { static void ggml_backend_metal_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) { memset((char *)tensor->data + offset, value, size); - UNUSED(buffer); + GGML_UNUSED(buffer); } static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) { memcpy((char *)tensor->data + offset, data, size); - UNUSED(buffer); + GGML_UNUSED(buffer); } static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) { memcpy(data, (const char *)tensor->data + offset, size); - UNUSED(buffer); + GGML_UNUSED(buffer); } static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) { @@ -4220,7 +4299,7 @@ static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, c } return false; - UNUSED(buffer); + GGML_UNUSED(buffer); } static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { @@ -4246,7 +4325,7 @@ static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = { static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) { return "Metal"; - UNUSED(buft); + GGML_UNUSED(buft); } static void ggml_backend_metal_log_allocated_size(id device, size_t size_aligned) { @@ -4270,8 +4349,8 @@ static void ggml_backend_metal_log_allocated_size(id device, size_t s } #endif #endif - UNUSED(device); - UNUSED(size_aligned); + GGML_UNUSED(device); + GGML_UNUSED(size_aligned); } static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { @@ -4284,7 +4363,8 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba size_aligned += (size_page - (size_aligned % size_page)); } - id device = ggml_backend_metal_device_acq(buft->device->context); + struct ggml_backend_metal_device_context * ctx_dev = (struct ggml_backend_metal_device_context *)buft->device->context; + id device = ggml_backend_metal_device_acq(ctx_dev); ctx->all_data = ggml_metal_host_malloc(size_aligned); ctx->all_size = size_aligned; @@ -4307,7 +4387,14 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba if (size_aligned > 0 && (ctx->all_data == NULL || ctx->buffers[0].metal == nil)) { GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0); free(ctx); - ggml_backend_metal_device_rel(buft->device->context); + ggml_backend_metal_device_rel(ctx_dev); + return NULL; + } + + if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { + GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); + free(ctx); + ggml_backend_metal_device_rel(ctx_dev); return NULL; } @@ -4318,7 +4405,7 @@ static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_ba static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { return 32; - UNUSED(buft); + GGML_UNUSED(buft); } static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { @@ -4328,13 +4415,13 @@ static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_ty return max_size; - UNUSED(buft); + GGML_UNUSED(buft); } static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) { return true; - UNUSED(buft); + GGML_UNUSED(buft); } ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { @@ -4357,7 +4444,7 @@ ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) { static const char * ggml_backend_metal_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) { return "Metal_Mapped"; - UNUSED(buft); + GGML_UNUSED(buft); } static ggml_backend_buffer_type_t ggml_backend_metal_buffer_from_ptr_type(void) { @@ -4400,7 +4487,8 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz size_aligned += (size_page - (size_aligned % size_page)); } - id device = ggml_backend_metal_device_acq(&g_ggml_ctx_dev_main); + struct ggml_backend_metal_device_context * ctx_dev = &g_ggml_ctx_dev_main; + id device = ggml_backend_metal_device_acq(ctx_dev); // the buffer fits into the max buffer size allowed by the device if (size_aligned <= device.maxBufferLength) { @@ -4453,6 +4541,13 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz } } + if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { + GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); + free(ctx); + ggml_backend_metal_device_rel(ctx_dev); + return NULL; + } + return ggml_backend_buffer_init(ggml_backend_metal_buffer_from_ptr_type(), ggml_backend_metal_buffer_i, ctx, size); } @@ -4461,7 +4556,7 @@ ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t siz static const char * ggml_backend_metal_name(ggml_backend_t backend) { return "Metal"; - UNUSED(backend); + GGML_UNUSED(backend); } static void ggml_backend_metal_free(ggml_backend_t backend) { @@ -4766,6 +4861,13 @@ static ggml_backend_buffer_t ggml_backend_metal_device_buffer_from_ptr(ggml_back } } + if (!ggml_backend_metal_buffer_rset_init(ctx, ctx_dev, device)) { + GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__); + free(ctx); + ggml_backend_metal_device_rel(ctx_dev); + return NULL; + } + return ggml_backend_buffer_init(ggml_backend_metal_buffer_from_ptr_type(), ggml_backend_metal_buffer_i, ctx, size); } @@ -4779,7 +4881,7 @@ static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml return buft->iface.get_name == ggml_backend_metal_buffer_type_get_name || buft->iface.get_name == ggml_backend_metal_buffer_from_ptr_type_get_name; - UNUSED(dev); + GGML_UNUSED(dev); } static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) { From caf773f249aa267c78d3da5567b8ab156080ea59 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Sun, 26 Jan 2025 22:45:32 +0100 Subject: [PATCH 165/279] docker : fix ARM build and Vulkan build (#11434) * ci : do not fail-fast for docker * build arm64/amd64 separatedly * fix pip * no fast fail * vulkan: try jammy --- .devops/vulkan.Dockerfile | 4 ++-- .github/workflows/docker.yml | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index ad5dcd374..b5bd3b6d2 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -1,4 +1,4 @@ -ARG UBUNTU_VERSION=24.04 +ARG UBUNTU_VERSION=22.04 FROM ubuntu:$UBUNTU_VERSION AS build @@ -7,7 +7,7 @@ RUN apt update && apt install -y git build-essential cmake wget # Install Vulkan SDK and cURL RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ - wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list https://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ apt update -y && \ apt-get install -y vulkan-sdk libcurl4-openssl-dev curl diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d71f1eb38..6bf22eb66 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -32,10 +32,12 @@ jobs: env: COMMIT_SHA: ${{ github.sha }} strategy: + fail-fast: false matrix: config: # Multi-stage build - - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/arm64", full: true, light: true, server: true, freediskspace: false} - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} From acd38efee316f3a5ed2e6afcbc5814807c347053 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Mon, 27 Jan 2025 02:41:59 -0500 Subject: [PATCH 166/279] metal: Handle null returned from MTLCreateSystemDefaultDevice() (#11441) This fixes segmentation fault error when running tests when no metal devices are available (for example, when not linked with Core Graphics framework or otherwise). --- ggml/src/ggml-metal/ggml-metal.m | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index c9474345d..76f8e4291 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -64,7 +64,9 @@ static id ggml_backend_metal_device_acq(struct ggml_backend_metal_dev if (ctx->mtl_device == nil) { ctx->mtl_device = MTLCreateSystemDefaultDevice(); + } + if (ctx->mtl_device) { ctx->has_simdgroup_reduction = [ctx->mtl_device supportsFamily:MTLGPUFamilyApple7]; ctx->has_simdgroup_reduction |= [ctx->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML]; @@ -99,8 +101,10 @@ static void ggml_backend_metal_device_rel(struct ggml_backend_metal_device_conte ctx->mtl_device_ref_count--; if (ctx->mtl_device_ref_count == 0) { - [ctx->mtl_device release]; - ctx->mtl_device = nil; + if (ctx->mtl_device) { + [ctx->mtl_device release]; + ctx->mtl_device = nil; + } } } From df984e014714cba4c99ef894b20b51cbcef31b16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 27 Jan 2025 12:07:12 +0100 Subject: [PATCH 167/279] llama: refactor llama_decode_impl (#11381) --- src/llama.cpp | 243 +++++++++++++++++++++++++++++--------------------- 1 file changed, 140 insertions(+), 103 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 094157ccf..12e8f41fc 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -8432,13 +8432,141 @@ static enum ggml_status llama_graph_compute( return status; } +static int llama_prepare_sbatch( + llama_context & lctx, + const llama_batch & batch, + uint32_t & n_outputs) { + const auto & model = lctx.model; + const auto & hparams = model.hparams; + const auto & cparams = lctx.cparams; + + const uint32_t n_tokens_all = batch.n_tokens; + const int64_t n_embd = hparams.n_embd; + + // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens + const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; + + GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT + if (batch.token) { + for (uint32_t i = 0; i < n_tokens_all; ++i) { + if (batch.token[i] < 0 || uint32_t(batch.token[i]) >= model.vocab.n_tokens()) { + LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); + return -1; + } + } + } + GGML_ASSERT(n_tokens_all <= cparams.n_batch); + GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); + + lctx.n_queued_tokens += n_tokens_all; + lctx.embd_seq.clear(); + + // count outputs + if (batch.logits && !embd_pooled) { + for (uint32_t i = 0; i < n_tokens_all; ++i) { + n_outputs += batch.logits[i] != 0; + } + } else if (lctx.logits_all || embd_pooled) { + n_outputs = n_tokens_all; + } else { + // keep last output only + n_outputs = 1; + } + + lctx.sbatch.from_batch(batch, n_embd, + /* simple_split */ !lctx.kv_self.recurrent, + /* logits_all */ n_outputs == n_tokens_all); + + // reserve output buffer + if (llama_output_reserve(lctx, n_outputs) < n_outputs) { + LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs); + return -2; + }; + + return 0; +} + +static int llama_prepare_ubatch( + llama_context & lctx, + llama_kv_slot_restorer & kv_slot_restorer, + llama_ubatch & ubatch, + const uint32_t n_outputs, + const uint32_t n_tokens_all) { + GGML_ASSERT(lctx.sbatch.n_tokens > 0); + + auto & kv_self = lctx.kv_self; + const auto & cparams = lctx.cparams; + const auto & hparams = lctx.model.hparams; + + // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens + const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; + + if (lctx.kv_self.recurrent) { + if (embd_pooled) { + // Pooled embeddings cannot be split across ubatches (yet) + ubatch = lctx.sbatch.split_seq(cparams.n_ubatch); + } else { + // recurrent model architectures are easier to implement + // with equal-length sequences + ubatch = lctx.sbatch.split_equal(cparams.n_ubatch); + } + } else { + ubatch = lctx.sbatch.split_simple(cparams.n_ubatch); + } + + // count the outputs in this u_batch + { + int32_t n_outputs_new = 0; + + if (n_outputs == n_tokens_all) { + n_outputs_new = ubatch.n_tokens; + } else { + GGML_ASSERT(ubatch.output); + for (uint32_t i = 0; i < ubatch.n_tokens; i++) { + n_outputs_new += int32_t(ubatch.output[i] != 0); + } + } + + // needs to happen before the graph is built + lctx.n_outputs = n_outputs_new; + } + + // non-causal masks do not use the KV cache + if (hparams.causal_attn) { + llama_kv_cache_update(&lctx); + + // if we have enough unused cells before the current head -> + // better to start searching from the beginning of the cache, hoping to fill it + if (kv_self.head > kv_self.used + 2*ubatch.n_tokens) { + kv_self.head = 0; + } + + const auto slot = llama_kv_cache_find_slot(kv_self, ubatch); + if (!slot) { + return 1; + } + kv_slot_restorer.save(slot); + + if (!kv_self.recurrent) { + // a heuristic, to avoid attending the full cache if it is not yet utilized + // after enough generations, the benefit from this heuristic disappears + // if we start defragmenting the cache, the benefit from this will be more important + const uint32_t pad = llama_kv_cache_get_padding(cparams); + kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad))); + //kv_self.n = llama_kv_cache_cell_max(kv_self); + } + } + + return 0; +} + // decode a batch of tokens by evaluating the transformer // in case of unsuccessful decoding (error or warning), // the kv_cache state will be returned to its original state // (for non-recurrent models) or cleaned (for recurrent models) // // - lctx: llama context -// - batch: batch to evaluate +// - inp_batch: batch to evaluate // // return 0 on success // return positive int on warning @@ -8455,37 +8583,18 @@ static int llama_decode_impl( return -1; } - // temporary allocate memory for the input batch if needed + // temporarily allocate memory for the input batch if needed llama_batch_allocr batch_allocr(inp_batch, inp_batch.pos ? -1 : lctx.kv_self.max_pos() + 1); - const llama_batch & batch = batch_allocr.batch; - const uint32_t n_tokens_all = batch.n_tokens; const auto & model = lctx.model; const auto & vocab = model.vocab; const auto & hparams = model.hparams; const auto & cparams = lctx.cparams; - GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT - - if (batch.token) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - if (batch.token[i] < 0 || (uint32_t) batch.token[i] >= model.vocab.n_tokens()) { - LLAMA_LOG_ERROR("%s: invalid token[%d] = %d\n", __func__, i, batch.token[i]); - return -1; - } - } - } - - GGML_ASSERT(n_tokens_all <= cparams.n_batch); - - GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); - if (lctx.t_compute_start_us == 0) { lctx.t_compute_start_us = ggml_time_us(); } - lctx.n_queued_tokens += n_tokens_all; - auto & kv_self = lctx.kv_self; llama_kv_slot_restorer kv_slot_restorer(kv_self); @@ -8495,99 +8604,27 @@ static int llama_decode_impl( uint32_t n_outputs = 0; uint32_t n_outputs_prev = 0; - const auto n_ubatch = cparams.n_ubatch; - - // this indicates we are doing pooled embedding, so we ignore batch.logits and output all tokens - const bool embd_pooled = cparams.embeddings && cparams.pooling_type != LLAMA_POOLING_TYPE_NONE; - - lctx.embd_seq.clear(); - - // count outputs - if (batch.logits && !embd_pooled) { - for (uint32_t i = 0; i < n_tokens_all; ++i) { - n_outputs += batch.logits[i] != 0; + { + const int ret = llama_prepare_sbatch(lctx, batch, n_outputs); + if (ret != 0) { + return ret; } - } else if (lctx.logits_all || embd_pooled) { - n_outputs = n_tokens_all; - } else { - // keep last output only - n_outputs = 1; } - lctx.sbatch.from_batch(batch, n_embd, - /* simple_split */ !kv_self.recurrent, - /* logits_all */ n_outputs == n_tokens_all); - - // reserve output buffer - if (llama_output_reserve(lctx, n_outputs) < n_outputs) { - LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_outputs); - return -2; - }; - while (lctx.sbatch.n_tokens > 0) { llama_ubatch ubatch; - if (kv_self.recurrent) { - if (embd_pooled) { - // Pooled embeddings cannot be split across ubatches (yet) - ubatch = lctx.sbatch.split_seq(n_ubatch); - } else { - // recurrent model architectures are easier to implement - // with equal-length sequences - ubatch = lctx.sbatch.split_equal(n_ubatch); - } - } else { - ubatch = lctx.sbatch.split_simple(n_ubatch); - } - const uint32_t n_tokens = ubatch.n_tokens; - - // count the outputs in this u_batch { - int32_t n_outputs_new = 0; - - if (n_outputs == n_tokens_all) { - n_outputs_new = n_tokens; - } else { - GGML_ASSERT(ubatch.output); - for (uint32_t i = 0; i < n_tokens; i++) { - n_outputs_new += (int32_t) (ubatch.output[i] != 0); - } + const int ret = llama_prepare_ubatch(lctx, kv_slot_restorer, ubatch, n_outputs, batch.n_tokens); + if (ret != 0) { + return ret; } - - // needs to happen before the graph is built - lctx.n_outputs = n_outputs_new; } - int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; - ggml_threadpool_t threadpool = n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch; + const int n_threads = ubatch.n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch; + ggml_threadpool_t threadpool = ubatch.n_tokens == 1 ? lctx.threadpool : lctx.threadpool_batch; GGML_ASSERT(n_threads > 0); - // non-causal masks do not use the KV cache - if (hparams.causal_attn) { - llama_kv_cache_update(&lctx); - - // if we have enough unused cells before the current head -> - // better to start searching from the beginning of the cache, hoping to fill it - if (kv_self.head > kv_self.used + 2*n_tokens) { - kv_self.head = 0; - } - - const auto slot = llama_kv_cache_find_slot(kv_self, ubatch); - if (!slot) { - return 1; - } - kv_slot_restorer.save(slot); - - if (!kv_self.recurrent) { - // a heuristic, to avoid attending the full cache if it is not yet utilized - // after enough generations, the benefit from this heuristic disappears - // if we start defragmenting the cache, the benefit from this will be more important - const uint32_t pad = llama_kv_cache_get_padding(cparams); - kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad))); - //kv_self.n = llama_kv_cache_cell_max(kv_self); - } - } - //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head); ggml_backend_sched_reset(lctx.sched.get()); @@ -8640,7 +8677,7 @@ static int llama_decode_impl( // update the kv ring buffer { - kv_self.head += n_tokens; + kv_self.head += ubatch.n_tokens; // Ensure kv cache head points to a valid index. if (kv_self.head >= kv_self.size) { From a5203b4465c5c87813936bde98170e25bb09024f Mon Sep 17 00:00:00 2001 From: lexasub Date: Mon, 27 Jan 2025 17:42:09 +0400 Subject: [PATCH 168/279] llama : minor fixes for up llama load model speed (#11448) * impl::load change map bpe_ranks to onordered map for reduce time of impl::load on 30% * llama_model_loader::init_mapping - replace new llama_mmap to std::make_unique for clean code & reduce (/2) time of running init_mappings * Update src/llama-vocab.cpp --------- Co-authored-by: lexasub Co-authored-by: Diego Devesa --- src/llama-model-loader.cpp | 2 +- src/llama-vocab.cpp | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/llama-model-loader.cpp b/src/llama-model-loader.cpp index 75073bf61..05d58ad90 100644 --- a/src/llama-model-loader.cpp +++ b/src/llama-model-loader.cpp @@ -819,7 +819,7 @@ void llama_model_loader::init_mappings(bool prefetch, llama_mlocks * mlock_mmaps for (const auto & file : files) { auto * reg = ggml_backend_dev_backend_reg(ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU)); auto * is_numa_fn = (decltype(ggml_is_numa) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_is_numa"); - std::unique_ptr mapping(new llama_mmap(file.get(), prefetch ? -1 : 0, is_numa_fn())); + std::unique_ptr mapping = std::make_unique(file.get(), prefetch ? -1 : 0, is_numa_fn()); mmaps_used.emplace_back(mapping->size(), 0); if (mlock_mmaps) { std::unique_ptr mlock_mmap(new llama_mlock()); diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 0782d3a41..561f8bdb8 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1245,8 +1245,13 @@ struct llama_vocab::impl { std::vector cache_special_tokens; std::vector cache_token_to_piece; // llama_token_to_piece(special = true); - - std::map, int> bpe_ranks; + struct pair_hash { + size_t operator()(const std::pair & p) const { + return std::hash{}(p.first) ^ //create some hash for pair + (std::hash{}(p.second) << 1); + } + }; + std::unordered_map, int, pair_hash> bpe_ranks; // set of all tokens that cause "end of generation" std::set special_eog_ids; From d6d24cd9ed6d0b9558643dcc28f2124bef488c52 Mon Sep 17 00:00:00 2001 From: Haus1 Date: Mon, 27 Jan 2025 08:58:17 -0500 Subject: [PATCH 169/279] AMD: parse the architecture as supplied by gcnArchName (#11244) The value provided by minor doesn't include stepping for AMD, parse the value returned by gcnArchName instead to retrieve an accurate ID. --- ggml/src/ggml-cuda/common.cuh | 20 +++++----- ggml/src/ggml-cuda/ggml-cuda.cu | 67 ++++++++++++++++++++++++++++++++- 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index bb6120568..a66322da0 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -46,20 +46,20 @@ #define GGML_CUDA_CC_VOLTA 700 #define GGML_CUDA_CC_TURING 750 #define GGML_CUDA_CC_AMPERE 800 -#define GGML_CUDA_CC_OFFSET_AMD 1000000 +#define GGML_CUDA_CC_OFFSET_AMD 0x1000000 // GCN/CNDA, wave size is 64 -#define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 803) // Tonga, Fiji, Polaris, minimum for fast fp16 -#define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 900) // Vega56/64, minimum for fp16 dual issue -#define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 906) // MI50/Radeon VII, minimum for dp4a -#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 908) // MI100, minimum for MFMA, acc registers -#define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 910) // MI210, minimum acc register renameing -#define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 942) // MI300 +#define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16 +#define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue +#define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a +#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 0x908) // MI100, minimum for MFMA, acc registers +#define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing +#define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300 // RNDA removes MFMA, dp4a, xnack, acc registers, wave size is 32 -#define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 1010) // RX 5000 -#define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 1030) // RX 6000, minimum for dp4a -#define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 1100) // RX 7000, minimum for WMMA +#define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x1010) // RX 5000 +#define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a +#define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA #define GGML_CUDA_CC_QY1 210 #define GGML_CUDA_CC_QY2 220 diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 85178abd2..402f37e85 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -119,6 +119,55 @@ static cudaError_t ggml_cuda_device_malloc(void ** ptr, size_t size, int device) #endif } +#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +static int ggml_cuda_parse_id(char devName[]) { + // A list of possible Target IDs can be found under the rocclr/clr repo in device.cpp + // these values are not stable so this is susceptible to breakage + // https://github.com/ROCm/clr/blob/amd-staging/rocclr/device/device.cpp + int archMajor = 0x0; + int archMinor = 0x0; + int archNum = GGML_CUDA_CC_OFFSET_AMD; + int archLen = strlen(devName); + char archName[archLen + 1]; + + // strip leading 'gfx' while copying into our buffer + if (archLen > 3) { + strcpy(archName, &devName[3]); + archLen -= 3; + } + + // trim trailing :xnack- or :sramecc- statuses + archLen = strcspn(archName, ":"); + archName[archLen] = '\0'; + + // tease out the version information + if (archLen > 8) { + // versions labeled generic use '-' as delimiter + // strip the trailing "-generic" then iterate through what remains + if ((strstr(archName, "-generic"))) { + archName[archLen - 8] = '\0'; + char * pch; + if ((pch = strtok(archName, "-"))) { + archMajor = (int)strtoul(pch, 0, 16); + if ((pch = strtok(NULL, "-"))) { + archMinor = 0x10 * (int)strtoul(pch, 0, 16); + } + } + } + } else if (archLen >= 3) { + // last two digits should be the minor * 0x10 + stepping + archMinor = (int)strtoul(&archName[archLen - 2], 0, 16); + archName[archLen - 2] = '\0'; + + // only the major version remains + archMajor = (int)strtoul(archName, 0, 16); + } + archNum += archMajor * 0x100; + archNum += archMinor; + return archNum; +} +#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) + static ggml_cuda_device_info ggml_cuda_init() { #ifdef __HIP_PLATFORM_AMD__ // Workaround for a rocBLAS bug when using multiple graphics cards: @@ -169,7 +218,6 @@ static ggml_cuda_device_info ggml_cuda_init() { cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, id)); - GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; @@ -178,10 +226,25 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].smpb = prop.sharedMemPerBlock; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; - info.devices[id].cc = 100*prop.major + 10*prop.minor + GGML_CUDA_CC_OFFSET_AMD; + + info.devices[id].cc = ggml_cuda_parse_id(prop.gcnArchName); + if ((info.devices[id].cc & 0xff00) == 0x0) { + GGML_LOG_WARN("invalid architecture ID received for device %d %s: %s cc %d.%d\n", + id, prop.name, prop.gcnArchName, prop.major, prop.minor); + + // Fallback to prop.major and prop.minor + if (prop.major > 0) { + info.devices[id].cc = GGML_CUDA_CC_OFFSET_AMD + prop.major * 0x100; + info.devices[id].cc += prop.minor * 0x10; + } + } + GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s\n", + id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, device_vmm ? "yes" : "no"); #else info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = 100*prop.major + 10*prop.minor; + GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", + id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no"); #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) } From a4417ddda98fd0558fb4d802253e68a933704b59 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Mon, 27 Jan 2025 19:36:10 +0100 Subject: [PATCH 170/279] Add new hf protocol for ollama (#11449) https://huggingface.co/docs/hub/en/ollama Signed-off-by: Eric Curtin --- examples/run/run.cpp | 121 ++++++++++++++++++++++++++++--------------- 1 file changed, 80 insertions(+), 41 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 92a49eb74..8a0db74b6 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -319,6 +319,10 @@ class HttpClient { public: int init(const std::string & url, const std::vector & headers, const std::string & output_file, const bool progress, std::string * response_str = nullptr) { + if (std::filesystem::exists(output_file)) { + return 0; + } + std::string output_file_partial; curl = curl_easy_init(); if (!curl) { @@ -558,13 +562,14 @@ class LlamaData { } sampler = initialize_sampler(opt); + return 0; } private: #ifdef LLAMA_USE_CURL - int download(const std::string & url, const std::vector & headers, const std::string & output_file, - const bool progress, std::string * response_str = nullptr) { + int download(const std::string & url, const std::string & output_file, const bool progress, + const std::vector & headers = {}, std::string * response_str = nullptr) { HttpClient http; if (http.init(url, headers, output_file, progress, response_str)) { return 1; @@ -573,48 +578,85 @@ class LlamaData { return 0; } #else - int download(const std::string &, const std::vector &, const std::string &, const bool, + int download(const std::string &, const std::string &, const bool, const std::vector & = {}, std::string * = nullptr) { printe("%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__); + return 1; } #endif - int huggingface_dl(const std::string & model, const std::vector headers, const std::string & bn) { - // Find the second occurrence of '/' after protocol string - size_t pos = model.find('/'); - pos = model.find('/', pos + 1); - if (pos == std::string::npos) { - return 1; - } - - const std::string hfr = model.substr(0, pos); - const std::string hff = model.substr(pos + 1); - const std::string url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff; - return download(url, headers, bn, true); - } - - int ollama_dl(std::string & model, const std::vector headers, const std::string & bn) { - if (model.find('/') == std::string::npos) { - model = "library/" + model; - } - - std::string model_tag = "latest"; - size_t colon_pos = model.find(':'); + // Helper function to handle model tag extraction and URL construction + std::pair extract_model_and_tag(std::string & model, const std::string & base_url) { + std::string model_tag = "latest"; + const size_t colon_pos = model.find(':'); if (colon_pos != std::string::npos) { model_tag = model.substr(colon_pos + 1); model = model.substr(0, colon_pos); } - std::string manifest_url = "https://registry.ollama.ai/v2/" + model + "/manifests/" + model_tag; + std::string url = base_url + model + "/manifests/" + model_tag; + + return { model, url }; + } + + // Helper function to download and parse the manifest + int download_and_parse_manifest(const std::string & url, const std::vector & headers, + nlohmann::json & manifest) { std::string manifest_str; - const int ret = download(manifest_url, headers, "", false, &manifest_str); + int ret = download(url, "", false, headers, &manifest_str); if (ret) { return ret; } - nlohmann::json manifest = nlohmann::json::parse(manifest_str); - std::string layer; + manifest = nlohmann::json::parse(manifest_str); + + return 0; + } + + int huggingface_dl(std::string & model, const std::string & bn) { + // Find the second occurrence of '/' after protocol string + size_t pos = model.find('/'); + pos = model.find('/', pos + 1); + std::string hfr, hff; + std::vector headers = { "User-Agent: llama-cpp", "Accept: application/json" }; + std::string url; + + if (pos == std::string::npos) { + auto [model_name, manifest_url] = extract_model_and_tag(model, "https://huggingface.co/v2/"); + hfr = model_name; + + nlohmann::json manifest; + int ret = download_and_parse_manifest(manifest_url, headers, manifest); + if (ret) { + return ret; + } + + hff = manifest["ggufFile"]["rfilename"]; + } else { + hfr = model.substr(0, pos); + hff = model.substr(pos + 1); + } + + url = "https://huggingface.co/" + hfr + "/resolve/main/" + hff; + + return download(url, bn, true, headers); + } + + int ollama_dl(std::string & model, const std::string & bn) { + const std::vector headers = { "Accept: application/vnd.docker.distribution.manifest.v2+json" }; + if (model.find('/') == std::string::npos) { + model = "library/" + model; + } + + auto [model_name, manifest_url] = extract_model_and_tag(model, "https://registry.ollama.ai/v2/"); + nlohmann::json manifest; + int ret = download_and_parse_manifest(manifest_url, {}, manifest); + if (ret) { + return ret; + } + + std::string layer; for (const auto & l : manifest["layers"]) { if (l["mediaType"] == "application/vnd.ollama.image.model") { layer = l["digest"]; @@ -622,8 +664,9 @@ class LlamaData { } } - std::string blob_url = "https://registry.ollama.ai/v2/" + model + "/blobs/" + layer; - return download(blob_url, headers, bn, true); + std::string blob_url = "https://registry.ollama.ai/v2/" + model_name + "/blobs/" + layer; + + return download(blob_url, bn, true, headers); } std::string basename(const std::string & path) { @@ -653,22 +696,18 @@ class LlamaData { return ret; } - const std::string bn = basename(model_); - const std::vector headers = { "--header", - "Accept: application/vnd.docker.distribution.manifest.v2+json" }; + const std::string bn = basename(model_); if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) { rm_until_substring(model_, "://"); - ret = huggingface_dl(model_, headers, bn); + ret = huggingface_dl(model_, bn); } else if (string_starts_with(model_, "hf.co/")) { rm_until_substring(model_, "hf.co/"); - ret = huggingface_dl(model_, headers, bn); - } else if (string_starts_with(model_, "ollama://")) { - rm_until_substring(model_, "://"); - ret = ollama_dl(model_, headers, bn); + ret = huggingface_dl(model_, bn); } else if (string_starts_with(model_, "https://")) { - ret = download(model_, headers, bn, true); - } else { - ret = ollama_dl(model_, headers, bn); + ret = download(model_, bn, true); + } else { // ollama:// or nothing + rm_until_substring(model_, "://"); + ret = ollama_dl(model_, bn); } model_ = bn; From 2b8525d5c89b124c4578a2621cbeb64354ff3d9c Mon Sep 17 00:00:00 2001 From: Michael Engel Date: Tue, 28 Jan 2025 09:32:40 +0100 Subject: [PATCH 171/279] Handle missing model in CLI parameters for llama-run (#11399) The HTTP client in llama-run only prints an error in case the download of a resource failed. If the model name in the CLI parameter list is missing, this causes the application to crash. In order to prevent this, a check for the required model parameter has been added and errors for resource downloads get propagated to the caller. Signed-off-by: Michael Engel --- examples/run/run.cpp | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 8a0db74b6..5980a786f 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -181,6 +181,10 @@ class Opt { } } + if (model_.empty()){ + return 1; + } + return 0; } @@ -350,7 +354,11 @@ class HttpClient { data.file_size = set_resume_point(output_file_partial); set_progress_options(progress, data); set_headers(headers); - perform(url); + CURLcode res = perform(url); + if (res != CURLE_OK){ + printe("Fetching resource '%s' failed: %s\n", url.c_str(), curl_easy_strerror(res)); + return 1; + } if (!output_file.empty()) { std::filesystem::rename(output_file_partial, output_file); } @@ -415,16 +423,12 @@ class HttpClient { } } - void perform(const std::string & url) { - CURLcode res; + CURLcode perform(const std::string & url) { curl_easy_setopt(curl, CURLOPT_URL, url.c_str()); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https"); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L); - res = curl_easy_perform(curl); - if (res != CURLE_OK) { - printe("curl_easy_perform() failed: %s\n", curl_easy_strerror(res)); - } + return curl_easy_perform(curl); } static std::string human_readable_time(double seconds) { From 6e84b0ab8e10b8f6f99a32855f976ebcd35b0353 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Tue, 28 Jan 2025 15:26:58 +0530 Subject: [PATCH 172/279] SYCL : SOFTMAX F16 mask support and other fixes (#11261) Implemented ggml_sycl_op_soft_max() F16 src1(mask) support for which a pragma deprecation warning was added during #5021. To do this, had to decouple it from ggml_sycl_op_flatten which always considered src1 to be of fp32 type(many OP functions are dependent on it). * SYCL: SOFTMAX F16 mask support and other fixes * test-backend-ops: Add F16 mask test cases --- ggml/src/ggml-sycl/ggml-sycl.cpp | 6 +--- ggml/src/ggml-sycl/softmax.cpp | 56 +++++++++++++++++++------------- ggml/src/ggml-sycl/softmax.hpp | 6 +--- tests/test-backend-ops.cpp | 45 ++++++++++++++++--------- 4 files changed, 64 insertions(+), 49 deletions(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index ed4d8bb8b..2984ed82e 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -3878,10 +3878,6 @@ static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_diag_mask_inf); } -static void ggml_sycl_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_soft_max); -} - static void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { GGML_ASSERT(ggml_is_contiguous(dst->src[0])); // TODO: this restriction is temporary until non-cont support is implemented ggml_sycl_op_flatten(ctx, dst->src[0], dst->src[1], dst, ggml_sycl_op_rope); @@ -4090,7 +4086,7 @@ bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tens ggml_sycl_diag_mask_inf(ctx, dst); break; case GGML_OP_SOFT_MAX: - ggml_sycl_soft_max(ctx, dst); + ggml_sycl_op_soft_max(ctx, dst); break; case GGML_OP_ROPE: ggml_sycl_rope(ctx, dst); diff --git a/ggml/src/ggml-sycl/softmax.cpp b/ggml/src/ggml-sycl/softmax.cpp index a9b3fce0d..563e0655f 100644 --- a/ggml/src/ggml-sycl/softmax.cpp +++ b/ggml/src/ggml-sycl/softmax.cpp @@ -1,7 +1,7 @@ -#include "norm.hpp" +#include "softmax.hpp" -template -static void soft_max_f32(const float * x, const float * mask, float * dst, const int ncols_par, +template +static void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) { const int ncols = ncols_template == 0 ? ncols_par : ncols_template; @@ -29,7 +29,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const slope = sycl::pow(base, float(exp)); } - float *vals = vals_smem ? buf + std::max(nwarps, WARP_SIZE) : dst + rowx * ncols; + float *vals = vals_smem ? buf + sycl::max(nwarps, WARP_SIZE) : dst + rowx * ncols; float max_val = -INFINITY; for (int col0 = 0; col0 < ncols; col0 += block_size) { @@ -42,7 +42,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const const int ix = rowx*ncols + col; const int iy = rowy*ncols + col; - const float val = x[ix]*scale + (mask ? slope*mask[iy] : 0.0f); + const float val = x[ix]*scale + (mask ? slope*static_cast(mask[iy]) : 0.0f); vals[col] = val; max_val = sycl::max(max_val, val); @@ -65,7 +65,7 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const item_ct1.barrier(sycl::access::fence_space::local_space); max_val = buf[lane_id]; for (size_t i = 1; i < nreduce; i += 1) { - max_val = std::max(max_val, buf[lane_id + i * WARP_SIZE]); + max_val = sycl::max(max_val, buf[lane_id + i * WARP_SIZE]); } max_val = warp_reduce_max(max_val, item_ct1); } @@ -122,8 +122,8 @@ static void soft_max_f32(const float * x, const float * mask, float * dst, const } } -template -static void soft_max_f32_submitter(const float * x, const float * mask, float * dst, const int ncols_par, +template +static void soft_max_f32_submitter(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims, const size_t n_local_scratch, queue_ptr stream) { @@ -141,7 +141,8 @@ static void soft_max_f32_submitter(const float * x, const float * mask, float * }); } -static void soft_max_f32_sycl(const float * x, const float * mask, +template +static void soft_max_f32_sycl(const float * x, const T * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, queue_ptr stream, int device) { @@ -223,22 +224,16 @@ static void soft_max_f32_sycl(const float * x, const float * mask, } } -void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst, - const float *src0_dd, const float *src1_dd, - float *dst_dd, - const queue_ptr &main_stream) { +void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) { - GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); GGML_ASSERT( dst->type == GGML_TYPE_F32); -#pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 support") -#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021") - GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional + GGML_ASSERT(!dst->src[1] || dst->src[1]->type == GGML_TYPE_F16 || dst->src[1]->type == GGML_TYPE_F32); // src1 contains mask and it is optional - const int64_t ne00 = src0->ne[0]; - const int64_t nrows_x = ggml_nrows(src0); - const int64_t nrows_y = src0->ne[1]; + const int64_t ne00 = dst->src[0]->ne[0]; + const int64_t nrows_x = ggml_nrows(dst->src[0]); + const int64_t nrows_y = dst->src[0]->ne[1]; float scale = 1.0f; float max_bias = 0.0f; @@ -246,6 +241,21 @@ void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, const ggml_tensor *s memcpy(&scale, dst->op_params + 0, sizeof(float)); memcpy(&max_bias, dst->op_params + 1, sizeof(float)); - soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00, - nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device); + const float * src0_dd = static_cast(dst->src[0]->data); + float * dst_dd = static_cast(dst->data); + + ggml_sycl_set_device(ctx.device); + dpct::queue_ptr main_stream = ctx.stream(); + + if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F16) { + const sycl::half * src1_dd = static_cast(dst->src[1]->data); + soft_max_f32_sycl(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, + main_stream, ctx.device); + } else if (dst->src[1] && dst->src[1]->type == GGML_TYPE_F32) { + const float * src1_dd = static_cast(dst->src[1]->data); + soft_max_f32_sycl(src0_dd, src1_dd, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device); + } else { + /* mask unavailable */ + soft_max_f32_sycl(src0_dd, nullptr, dst_dd, ne00, nrows_x, nrows_y, scale, max_bias, main_stream, ctx.device); + } } diff --git a/ggml/src/ggml-sycl/softmax.hpp b/ggml/src/ggml-sycl/softmax.hpp index bdb8f712e..2cf8582ec 100644 --- a/ggml/src/ggml-sycl/softmax.hpp +++ b/ggml/src/ggml-sycl/softmax.hpp @@ -15,10 +15,6 @@ #include "common.hpp" -void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, const ggml_tensor *src0, - const ggml_tensor *src1, ggml_tensor *dst, - const float *src0_dd, const float *src1_dd, - float *dst_dd, - const queue_ptr &main_stream); +void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, ggml_tensor *dst); #endif // GGML_SYCL_SOFTMAX_HPP diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 468016403..4c5c4dd9c 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -2347,11 +2347,12 @@ struct test_soft_max : public test_case { const ggml_type type; const std::array ne; const bool mask; + const ggml_type m_prec; const float scale; const float max_bias; std::string vars() override { - return VARS_TO_STR5(type, ne, mask, scale, max_bias); + return VARS_TO_STR6(type, ne, mask, m_prec, scale, max_bias); } // the 1024 test with bias occasionally fails: @@ -2363,9 +2364,10 @@ struct test_soft_max : public test_case { test_soft_max(ggml_type type = GGML_TYPE_F32, std::array ne = {10, 5, 4, 3}, bool mask = false, + ggml_type m_prec = GGML_TYPE_F32, float scale = 1.0f, float max_bias = 0.0f) - : type(type), ne(ne), mask(mask), scale(scale), max_bias(max_bias) {} + : type(type), ne(ne), mask(mask), m_prec(m_prec), scale(scale), max_bias(max_bias) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); @@ -2374,7 +2376,7 @@ struct test_soft_max : public test_case { ggml_tensor * mask = nullptr; if (this->mask) { - mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]); + mask = ggml_new_tensor_2d(ctx, m_prec, ne[0], ne[1]); ggml_set_name(mask, "mask"); } @@ -4150,17 +4152,28 @@ static std::vector> make_test_cases_eval() { for (float scale : {1.0f, 0.1f}) { for (int64_t ne0 : {16, 1024}) { for (int64_t ne1 : {16, 1024}) { - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, scale, max_bias)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, scale, max_bias)); + if (mask) { + for (ggml_type m_prec : {GGML_TYPE_F32, GGML_TYPE_F16}) { + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, m_prec, scale, max_bias)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, m_prec, scale, max_bias)); + } + } else { + /* The precision of mask here doesn't matter as boolean mask is false */ + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0-1, ne1-1, 1, 1}, mask, GGML_TYPE_F32, scale, max_bias)); + } } } } } } - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, 0.1f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, GGML_TYPE_F32, 0.1f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F32, 0.1f, 8.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, GGML_TYPE_F16, 0.1f, 8.0f)); for (float max_bias : {0.0f, 8.0f}) { for (float scale : {1.0f, 0.1f}) { @@ -4296,13 +4309,13 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {8192, 512, 2, 1}, {0, 2, 1, 3})); test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, GGML_TYPE_F32, {3072, 512, 2, 1}, {0, 2, 1, 3})); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, 1.0f, 0.0f)); - test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {4096, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 4096, 5, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {1024, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 1024, 10, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {256, 256, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {64, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); + test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {77, 64, 20, 1}, false, GGML_TYPE_F32, 1.0f, 0.0f)); test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {32, 10, 1, 1})); test_cases.emplace_back(new test_argmax(GGML_TYPE_F32, {1024, 10, 1, 1})); From f643120bad8ab3a753daa64aaac8288ee5800e06 Mon Sep 17 00:00:00 2001 From: Nuno Date: Tue, 28 Jan 2025 11:42:32 +0100 Subject: [PATCH 173/279] docker: add perplexity and bench commands to full image (#11438) Signed-off-by: rare-magma --- .devops/tools.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.devops/tools.sh b/.devops/tools.sh index 9a86e6ea0..41a6b1e55 100755 --- a/.devops/tools.sh +++ b/.devops/tools.sh @@ -13,9 +13,13 @@ elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then exec ./llama-quantize "$@" elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then exec ./llama-cli "$@" +elif [[ "$arg1" == '--bench' || "$arg1" == '-b' ]]; then + exec ./llama-bench "$@" +elif [[ "$arg1" == '--perplexity' || "$arg1" == '-p' ]]; then + exec ./llama-perplexity "$@" elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then echo "Converting PTH to GGML..." - for i in `ls $1/$2/ggml-model-f16.bin*`; do + for i in $(ls $1/$2/ggml-model-f16.bin*); do if [ -f "${i/f16/q4_0}" ]; then echo "Skip model quantization, it already exists: ${i/f16/q4_0}" else @@ -30,6 +34,10 @@ else echo "Available commands: " echo " --run (-r): Run a model previously converted into ggml" echo " ex: -m /models/7B/ggml-model-q4_0.bin -p \"Building a website can be done in 10 simple steps:\" -n 512" + echo " --bench (-b): Benchmark the performance of the inference for various parameters." + echo " ex: -m model.gguf" + echo " --perplexity (-p): Measure the perplexity of a model over a given text." + echo " ex: -m model.gguf -f file.txt" echo " --convert (-c): Convert a llama model into ggml" echo " ex: --outtype f16 \"/models/7B/\" " echo " --quantize (-q): Optimize with quantization process ggml" From 4bf3119d61c1de5660025fd5a611effe503e3d2b Mon Sep 17 00:00:00 2001 From: someone13574 <81528246+someone13574@users.noreply.github.com> Date: Tue, 28 Jan 2025 09:15:34 -0500 Subject: [PATCH 174/279] cmake : don't fail on `GGML_CPU=OFF` (#11457) --- ggml/src/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 8d2b948fb..566709135 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -308,7 +308,7 @@ if (GGML_CPU_ALL_VARIANTS) # MSVC doesn't support AMX ggml_add_cpu_backend_variant(sapphirerapids AVX F16C AVX2 FMA AVX512 AVX512_VBMI AVX512_VNNI AVX512_BF16 AMX_TILE AMX_INT8) endif() -else () +elseif (GGML_CPU) ggml_add_cpu_backend_variant_impl("") endif() From d7d1eccacccaa698c9232014b96a82b359595d6e Mon Sep 17 00:00:00 2001 From: Nuno Date: Tue, 28 Jan 2025 15:17:25 +0100 Subject: [PATCH 175/279] docker: allow installing pip packages system-wide (#11437) Signed-off-by: rare-magma --- .devops/vulkan.Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index b5bd3b6d2..eabf832f8 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -55,8 +55,9 @@ RUN apt-get update \ git \ python3 \ python3-pip \ - && pip install --upgrade pip setuptools wheel \ - && pip install -r requirements.txt \ + python3-wheel \ + && pip install --break-system-packages --upgrade setuptools \ + && pip install --break-system-packages -r requirements.txt \ && apt autoremove -y \ && apt clean -y \ && rm -rf /tmp/* /var/tmp/* \ From 7fee2889e6565830631fbe76d47ef85cf8fd946a Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Tue, 28 Jan 2025 15:45:41 +0100 Subject: [PATCH 176/279] Add github protocol pulling and http:// (#11465) As pulling protocols to llama-run Signed-off-by: Eric Curtin --- examples/run/run.cpp | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 5980a786f..40f2bcb00 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -673,6 +673,40 @@ class LlamaData { return download(blob_url, bn, true, headers); } + int github_dl(const std::string & model, const std::string & bn) { + std::string repository = model; + std::string branch = "main"; + size_t at_pos = model.find('@'); + if (at_pos != std::string::npos) { + repository = model.substr(0, at_pos); + branch = model.substr(at_pos + 1); + } + + std::vector repo_parts; + size_t start = 0; + for (size_t end = 0; (end = repository.find('/', start)) != std::string::npos; start = end + 1) { + repo_parts.push_back(repository.substr(start, end - start)); + } + + repo_parts.push_back(repository.substr(start)); + if (repo_parts.size() < 3) { + printe("Invalid GitHub repository format\n"); + return 1; + } + + const std::string org = repo_parts[0]; + const std::string project = repo_parts[1]; + std::string project_path = repo_parts[2]; + for (size_t i = 3; i < repo_parts.size(); ++i) { + project_path += "/" + repo_parts[i]; + } + + const std::string url = + "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch + "/" + project_path; + + return download(url, bn, true); + } + std::string basename(const std::string & path) { const size_t pos = path.find_last_of("/\\"); if (pos == std::string::npos) { @@ -707,8 +741,12 @@ class LlamaData { } else if (string_starts_with(model_, "hf.co/")) { rm_until_substring(model_, "hf.co/"); ret = huggingface_dl(model_, bn); - } else if (string_starts_with(model_, "https://")) { + } else if (string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) { ret = download(model_, bn, true); + } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) { + rm_until_substring(model_, "github://"); + rm_until_substring(model_, "github:"); + ret = github_dl(model_, bn); } else { // ollama:// or nothing rm_until_substring(model_, "://"); ret = ollama_dl(model_, bn); From cae9fb4361138b937464524eed907328731b81f6 Mon Sep 17 00:00:00 2001 From: Nikita Sarychev <42014488+sARY77@users.noreply.github.com> Date: Tue, 28 Jan 2025 07:42:20 -0800 Subject: [PATCH 177/279] HIP: Only call rocblas_initialize on rocblas versions with the multiple instantation bug (#11080) This disables the workaround on rocblas fixed versions (>=4.0.0) to eliminate the runtime cost and unnecessary VRAM allocation of loading all tensile objects. --- ggml/src/ggml-cuda/ggml-cuda.cu | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 402f37e85..de3f9c2ca 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -172,8 +173,25 @@ static ggml_cuda_device_info ggml_cuda_init() { #ifdef __HIP_PLATFORM_AMD__ // Workaround for a rocBLAS bug when using multiple graphics cards: // https://github.com/ROCmSoftwarePlatform/rocBLAS/issues/1346 - rocblas_initialize(); - CUDA_CHECK(cudaDeviceSynchronize()); + { + int major_version = 0; + size_t version_length = 0; + if (rocblas_get_version_string_size(&version_length) == rocblas_status_success) { + std::string version(version_length, '\0'); + if (rocblas_get_version_string(version.data(), version.size()) == rocblas_status_success) { + version.resize(::strlen(version.c_str())); + int parsed_value = 0; + if (std::from_chars(version.c_str(), version.c_str() + version.length(), parsed_value).ec == std::errc()) { + major_version = parsed_value; + } + } + } + if (major_version < 4) { + GGML_LOG_DEBUG(GGML_CUDA_NAME " calling rocblas_initialize as a workaround for a rocBLAS bug\n"); + rocblas_initialize(); + CUDA_CHECK(cudaDeviceSynchronize()); + } + } #endif ggml_cuda_device_info info = {}; From be5ef7963fcf14a9c77c963fdd3f7b606eacb498 Mon Sep 17 00:00:00 2001 From: uvos Date: Tue, 28 Jan 2025 23:06:32 +0100 Subject: [PATCH 178/279] HIP: Supress transformation warning in softmax.cu loops with bounds not known at compile time can not be unrolled. when ncols_template == 0, the bounds of the loop are not constexpr, thus llvm cant unroll the loops here. --- ggml/src/ggml-cuda/softmax.cu | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ggml/src/ggml-cuda/softmax.cu b/ggml/src/ggml-cuda/softmax.cu index 9aa4b8489..da377200e 100644 --- a/ggml/src/ggml-cuda/softmax.cu +++ b/ggml/src/ggml-cuda/softmax.cu @@ -13,6 +13,12 @@ __device__ float __forceinline__ t2f32(half val) { return __half2float(val); } +// When ncols_template == 0 the bounds for the loops in this function are not known and can't be unrolled. +// As we want to keep pragma unroll for all other cases we supress the clang transformation warning here. +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpass-failed" +#endif template static __global__ void soft_max_f32( const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, @@ -118,6 +124,9 @@ static __global__ void soft_max_f32( dst[col] = vals[col] * inv_sum; } } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif static __global__ void soft_max_back_f32( const float * grad, const float * dstf, float * dst, const int ncols, const float scale) { From d0c08040b6c8bebeade7b8d5764df6cf901678d5 Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Wed, 29 Jan 2025 00:02:56 +0100 Subject: [PATCH 179/279] ci : fix build CPU arm64 (#11472) * ci : fix build CPU arm64 * failed, trying ubuntu 22 * vulkan: ubuntu 24 * vulkan : jammy --> noble --- .devops/vulkan.Dockerfile | 4 ++-- .github/workflows/docker.yml | 5 ++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.devops/vulkan.Dockerfile b/.devops/vulkan.Dockerfile index eabf832f8..9064f3838 100644 --- a/.devops/vulkan.Dockerfile +++ b/.devops/vulkan.Dockerfile @@ -1,4 +1,4 @@ -ARG UBUNTU_VERSION=22.04 +ARG UBUNTU_VERSION=24.04 FROM ubuntu:$UBUNTU_VERSION AS build @@ -7,7 +7,7 @@ RUN apt update && apt install -y git build-essential cmake wget # Install Vulkan SDK and cURL RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add - && \ - wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list && \ + wget -qO /etc/apt/sources.list.d/lunarg-vulkan-noble.list https://packages.lunarg.com/vulkan/lunarg-vulkan-noble.list && \ apt update -y && \ apt-get install -y vulkan-sdk libcurl4-openssl-dev curl diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 6bf22eb66..6955a7dc8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -28,7 +28,7 @@ jobs: push_to_registry: name: Push Docker image to Docker Hub - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 env: COMMIT_SHA: ${{ github.sha }} strategy: @@ -36,8 +36,7 @@ jobs: matrix: config: # Multi-stage build - - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/arm64", full: true, light: true, server: true, freediskspace: false} + - { tag: "cpu", dockerfile: ".devops/cpu.Dockerfile", platforms: "linux/amd64,linux/arm64", full: true, light: true, server: true, freediskspace: false} - { tag: "cuda", dockerfile: ".devops/cuda.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - { tag: "musa", dockerfile: ".devops/musa.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} - { tag: "intel", dockerfile: ".devops/intel.Dockerfile", platforms: "linux/amd64", full: true, light: true, server: true, freediskspace: false} From cf8cc856d7d02165bd08593b4757e1256a62d501 Mon Sep 17 00:00:00 2001 From: peidaqi Date: Tue, 28 Jan 2025 16:03:42 -0700 Subject: [PATCH 180/279] server : Fixed wrong function name in llamacpp server unit test (#11473) The test_completion_stream_with_openai_library() function is actually with stream=False by default, and test_completion_with_openai_library() with stream=True --- examples/server/tests/unit/test_completion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/tests/unit/test_completion.py b/examples/server/tests/unit/test_completion.py index c1fc12462..0ed5b99be 100644 --- a/examples/server/tests/unit/test_completion.py +++ b/examples/server/tests/unit/test_completion.py @@ -87,7 +87,7 @@ def test_completion_stream_vs_non_stream(): assert content_stream == res_non_stream.body["content"] -def test_completion_stream_with_openai_library(): +def test_completion_with_openai_library(): global server server.start() client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") @@ -102,7 +102,7 @@ def test_completion_stream_with_openai_library(): assert match_regex("(going|bed)+", res.choices[0].text) -def test_completion_with_openai_library(): +def test_completion_stream_with_openai_library(): global server server.start() client = OpenAI(api_key="dummy", base_url=f"http://{server.server_host}:{server.server_port}/v1") From 794fe23f29fb40104975c91fe19f23798f7c726e Mon Sep 17 00:00:00 2001 From: Emreerdog <34742675+Emreerdog@users.noreply.github.com> Date: Wed, 29 Jan 2025 02:22:06 +0300 Subject: [PATCH 181/279] cmake: add hints for locating ggml on Windows using Llama find-package (#11466) --- cmake/llama-config.cmake.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/llama-config.cmake.in b/cmake/llama-config.cmake.in index 40ade96e5..90cbec5b6 100644 --- a/cmake/llama-config.cmake.in +++ b/cmake/llama-config.cmake.in @@ -9,7 +9,7 @@ set_and_check(LLAMA_INCLUDE_DIR "@PACKAGE_LLAMA_INCLUDE_INSTALL_DIR@") set_and_check(LLAMA_LIB_DIR "@PACKAGE_LLAMA_LIB_INSTALL_DIR@") set_and_check(LLAMA_BIN_DIR "@PACKAGE_LLAMA_BIN_INSTALL_DIR@") -find_package(ggml REQUIRED) +find_package(ggml REQUIRED HINTS ${LLAMA_LIB_DIR}/cmake) find_library(llama_LIBRARY llama REQUIRED From 325afb370a1a7b32b5fe46a749bc840c66db9765 Mon Sep 17 00:00:00 2001 From: Molly Sophia Date: Wed, 29 Jan 2025 12:07:21 +0800 Subject: [PATCH 182/279] llama: fix missing k_cache store for rwkv6qwen2 (#11445) Signed-off-by: Molly Sophia --- src/llama.cpp | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 12e8f41fc..192b20a27 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -7700,17 +7700,13 @@ struct llm_build_context { 1 ); + struct ggml_tensor * last_norm_att = ggml_view_3d(ctx0, x_norm_att, n_embd, 1, n_seqs, x_norm_att->nb[1], x_norm_att->nb[2], (n_seq_tokens-1)*n_embd*ggml_element_size(x_norm_att)); ggml_build_forward_expand( gf, ggml_cpy( ctx0, - wkv_states, - ggml_view_1d( - ctx0, - kv_self.v_l[il], - hparams.n_embd_v_s() * n_seqs, - hparams.n_embd_v_s() * kv_head * ggml_element_size(kv_self.v_l[il]) - ) + ggml_view_1d(ctx0, last_norm_att, n_embd * n_seqs, 0), + ggml_view_1d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s() * n_seqs, hparams.n_embd_k_s() * kv_head * ggml_element_size(kv_self.k_l[il])) ) ); From b636228c0ad0db95bf73008094c6145a05615cf6 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 29 Jan 2025 09:38:54 +0100 Subject: [PATCH 183/279] embedding : enable --no-warmup option (#11475) This commit enables the `--no-warmup` option for the llama-embeddings. The motivation for this change is to allow the user to disable the warmup when running the the program. --- common/arg.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/arg.cpp b/common/arg.cpp index a6226a34b..f5e9b294f 100644 --- a/common/arg.cpp +++ b/common/arg.cpp @@ -877,7 +877,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex [](common_params & params) { params.warmup = false; } - ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER})); + ).set_examples({LLAMA_EXAMPLE_MAIN, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_EMBEDDING})); add_opt(common_arg( {"--spm-infill"}, string_format( From d2e518e9b4231077013ffb0e5c4cc24a0c4c1b7d Mon Sep 17 00:00:00 2001 From: issixx <46835150+issixx@users.noreply.github.com> Date: Fri, 17 Jan 2025 21:29:08 +0900 Subject: [PATCH 184/279] ggml-cpu : fix ggml_graph_compute_thread did not terminate on abort. (ggml/1065) some threads kept looping and failed to terminate properly after an abort during CPU execution. Co-authored-by: issi --- ggml/src/ggml-cpu/ggml-cpu.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c index 9e627da8c..e809f05d2 100644 --- a/ggml/src/ggml-cpu/ggml-cpu.c +++ b/ggml/src/ggml-cpu/ggml-cpu.c @@ -1302,7 +1302,7 @@ struct ggml_threadpool { // these are atomic as an annotation for thread-sanitizer atomic_bool stop; // Used for stopping the threadpool altogether atomic_bool pause; // Used for pausing the threadpool or individual threads - atomic_bool abort; // Used for aborting processing of a graph + atomic_int abort; // Used for aborting processing of a graph struct ggml_compute_state * workers; // per thread state int n_threads_max; // number of threads in the pool @@ -13851,14 +13851,14 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { /*.threadpool=*/ tp, }; - for (int node_n = 0; node_n < cgraph->n_nodes && !tp->abort; node_n++) { + for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) { struct ggml_tensor * node = cgraph->nodes[node_n]; ggml_compute_forward(¶ms, node); if (state->ith == 0 && cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) { - tp->abort = true; + atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed); tp->ec = GGML_STATUS_ABORTED; } @@ -14031,7 +14031,7 @@ static struct ggml_threadpool * ggml_threadpool_new_impl( threadpool->current_chunk = 0; threadpool->stop = false; threadpool->pause = tpp->paused; - threadpool->abort = false; + threadpool->abort = -1; threadpool->workers = NULL; threadpool->n_threads_max = tpp->n_threads; threadpool->n_threads_cur = tpp->n_threads; @@ -14110,7 +14110,7 @@ enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cpl threadpool->cgraph = cgraph; threadpool->cplan = cplan; threadpool->current_chunk = 0; - threadpool->abort = false; + threadpool->abort = -1; threadpool->ec = GGML_STATUS_SUCCESS; } From 1a0e87d29152cb9d4ce13b4ad64b0382c9ba1ab6 Mon Sep 17 00:00:00 2001 From: William Tambellini Date: Thu, 23 Jan 2025 11:59:08 -0800 Subject: [PATCH 185/279] ggml : add option to not print stack on abort (ggml/1081) * Add option to not print stack on abort Add option/envvar to disable stack printing on abort. Also link some unittests with Threads to fix link errors on ubuntu/g++11. * Update ggml/src/ggml.c --------- Co-authored-by: Diego Devesa --- ggml/src/ggml.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 92c4294c5..3b4861542 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -128,6 +128,10 @@ static void ggml_print_backtrace_symbols(void) { #endif static void ggml_print_backtrace(void) { + const char * GGML_NO_BACKTRACE = getenv("GGML_NO_BACKTRACE"); + if (GGML_NO_BACKTRACE) { + return; + } char attach[32]; snprintf(attach, sizeof(attach), "attach %d", getpid()); int pid = fork(); From 815857791d3639a4d544d0a8cf25a49b0325c08c Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 29 Jan 2025 11:25:29 +0200 Subject: [PATCH 186/279] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index cfba59d32..ddb9d817e 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -d92321c0d151fe73a47d89738c7c3091ac904297 +32f0b85987396945afea2291d5f4c5862434292b From f0d4b29edfc633ec3ba6442482a6b43a5cd80a01 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Wed, 29 Jan 2025 12:23:10 +0100 Subject: [PATCH 187/279] Parse https://ollama.com/library/ syntax (#11480) People search for ollama models using the web ui, this change allows one to copy the url from the browser and for it to be compatible with llama-run. Signed-off-by: Eric Curtin --- examples/run/run.cpp | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 40f2bcb00..9cecae48c 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -674,36 +674,27 @@ class LlamaData { } int github_dl(const std::string & model, const std::string & bn) { - std::string repository = model; - std::string branch = "main"; - size_t at_pos = model.find('@'); + std::string repository = model; + std::string branch = "main"; + const size_t at_pos = model.find('@'); if (at_pos != std::string::npos) { repository = model.substr(0, at_pos); branch = model.substr(at_pos + 1); } - std::vector repo_parts; - size_t start = 0; - for (size_t end = 0; (end = repository.find('/', start)) != std::string::npos; start = end + 1) { - repo_parts.push_back(repository.substr(start, end - start)); - } - - repo_parts.push_back(repository.substr(start)); + const std::vector repo_parts = string_split(repository, "/"); if (repo_parts.size() < 3) { printe("Invalid GitHub repository format\n"); return 1; } - const std::string org = repo_parts[0]; - const std::string project = repo_parts[1]; - std::string project_path = repo_parts[2]; - for (size_t i = 3; i < repo_parts.size(); ++i) { - project_path += "/" + repo_parts[i]; + const std::string & org = repo_parts[0]; + const std::string & project = repo_parts[1]; + std::string url = "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch; + for (size_t i = 2; i < repo_parts.size(); ++i) { + url += "/" + repo_parts[i]; } - const std::string url = - "https://raw.githubusercontent.com/" + org + "/" + project + "/" + branch + "/" + project_path; - return download(url, bn, true); } @@ -735,19 +726,20 @@ class LlamaData { } const std::string bn = basename(model_); - if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://")) { + if (string_starts_with(model_, "hf://") || string_starts_with(model_, "huggingface://") || + string_starts_with(model_, "hf.co/")) { + rm_until_substring(model_, "hf.co/"); rm_until_substring(model_, "://"); ret = huggingface_dl(model_, bn); - } else if (string_starts_with(model_, "hf.co/")) { - rm_until_substring(model_, "hf.co/"); - ret = huggingface_dl(model_, bn); - } else if (string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) { + } else if ((string_starts_with(model_, "https://") || string_starts_with(model_, "http://")) && + !string_starts_with(model_, "https://ollama.com/library/")) { ret = download(model_, bn, true); } else if (string_starts_with(model_, "github:") || string_starts_with(model_, "github://")) { - rm_until_substring(model_, "github://"); rm_until_substring(model_, "github:"); + rm_until_substring(model_, "://"); ret = github_dl(model_, bn); } else { // ollama:// or nothing + rm_until_substring(model_, "ollama.com/library/"); rm_until_substring(model_, "://"); ret = ollama_dl(model_, bn); } From 2711d0215ff6a1ecb2202ac8c1f135bceed7057b Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Wed, 29 Jan 2025 09:26:50 -0600 Subject: [PATCH 188/279] vulkan: Catch pipeline creation failure and print an error message (#11436) * vulkan: Catch pipeline creation failure and print an error message Also, fix some warnings from my on-demand compile change. * vulkan: fix pipeline creation logging --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index a9d6b923c..6c7e60650 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -774,12 +774,12 @@ static uint32_t compile_count = 0; static std::mutex compile_count_mutex; static std::condition_variable compile_count_cond; -static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, const std::string name, size_t spv_size, const void* spv_data, const std::string entrypoint, - uint32_t parameter_count, uint32_t push_constant_size, std::array wg_denoms, std::vector specialization_constants, - uint32_t align, bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) { - VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << name << ", " << entrypoint << ", " << parameter_count << ", " << push_constant_size << - ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << align << - ", " << disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")"); +static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipeline, size_t spv_size, const void* spv_data, const std::string entrypoint, + uint32_t parameter_count, std::array wg_denoms, std::vector specialization_constants, + bool disable_robustness, bool require_full_subgroups, uint32_t required_subgroup_size) { + VK_LOG_DEBUG("ggml_vk_create_pipeline(" << device->name << ", " << pipeline->name << ", " << entrypoint << ", " << parameter_count << + ", (" << wg_denoms[0] << "," << wg_denoms[1] << "," << wg_denoms[2] << "), specialization_constants, " << + disable_robustness << ", " << require_full_subgroups << ", " << required_subgroup_size << ")"); GGML_ASSERT(parameter_count > 0); GGML_ASSERT(wg_denoms[0] > 0 && wg_denoms[1] > 0 && wg_denoms[2] > 0); // NOLINT @@ -864,7 +864,13 @@ static void ggml_vk_create_pipeline_func(vk_device& device, vk_pipeline& pipelin compute_pipeline_create_info.setPNext(&rci); } - pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; + try { + pipeline->pipeline = device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; + } catch (const vk::SystemError& e) { + std::cerr << "ggml_vulkan: Compute pipeline creation failed for " << pipeline->name << std::endl; + std::cerr << "ggml_vulkan: " << e.what() << std::endl; + throw e; + } pipeline->compiled = true; { @@ -1560,8 +1566,8 @@ static void ggml_vk_load_shaders(vk_device& device) { } compile_count++; } - compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), name, spv_size, spv_data, entrypoint, - parameter_count, push_constant_size, wg_denoms, specialization_constants, align, disable_robustness, require_full_subgroups, required_subgroup_size)); + compiles.push_back(std::async(ggml_vk_create_pipeline_func, std::ref(device), std::ref(pipeline), spv_size, spv_data, entrypoint, + parameter_count, wg_denoms, specialization_constants, disable_robustness, require_full_subgroups, required_subgroup_size)); }; #if defined(VK_NV_cooperative_matrix2) && defined(GGML_VULKAN_COOPMAT2_GLSLC_SUPPORT) From e51c47b401f8cb5f21630a05171e2529cde4d186 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 29 Jan 2025 16:34:18 +0100 Subject: [PATCH 189/279] server : update auto gen files comments [no ci] (#11484) * server : update auto gen files comments This commit updates the 'auto generated files' comments in server.cpp and removes `deps.sh` from the comment. The motivation for this change is that `deps.sh` was removed in Commit 91c36c269bca75b2d08119c653512cd20b4ea2ba ("server : (web ui) Various improvements, now use vite as bundler (#10599)"). * squash! server : update auto gen files comments [no ci] Move comments about file generation to README.md. * squash! server : update auto gen files comments [no ci] Remove the comments in server.cpp that mention that information can be found in the README.md file. --- examples/server/README.md | 6 +++++- examples/server/server.cpp | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index 5022de672..e788d8b59 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -236,9 +236,13 @@ npm i # to run the dev server npm run dev -# to build the public/index.html +# to build the public/index.html.gz npm run build ``` +After `public/index.html.gz` has been generated we need to generate the c++ +headers (like build/examples/server/index.html.gz.hpp) that will be included +by server.cpp. This is done by building `llama-server` as described in the +[build](#build) section above. NOTE: if you are using the vite dev server, you can change the API base URL to llama.cpp. To do that, run this code snippet in browser's console: diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b1cde2d7f..c5efbdb09 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -14,7 +14,7 @@ // mime type for sending response #define MIMETYPE_JSON "application/json; charset=utf-8" -// auto generated files (update with ./deps.sh) +// auto generated files (see README.md for details) #include "index.html.gz.hpp" #include "loading.html.hpp" From 66ee4f297cff3c7ce98b31dbc0ce909d41b9e408 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20Oudompheng?= Date: Wed, 29 Jan 2025 18:29:39 +0100 Subject: [PATCH 190/279] vulkan: implement initial support for IQ2 and IQ3 quantizations (#11360) * vulkan: initial support for IQ3_S * vulkan: initial support for IQ3_XXS * vulkan: initial support for IQ2_XXS * vulkan: initial support for IQ2_XS * vulkan: optimize Q3_K by removing branches * vulkan: implement dequantize variants for coopmat2 * vulkan: initial support for IQ2_S * vulkan: vertically realign code * port failing dequant callbacks from mul_mm * Fix array length mismatches * vulkan: avoid using workgroup size before it is referenced * tests: increase timeout for Vulkan llvmpipe backend --------- Co-authored-by: Jeff Bolz --- .github/workflows/build.yml | 3 +- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 157 +++- .../vulkan-shaders/copy_from_quant.comp | 4 +- .../vulkan-shaders/copy_to_quant.comp | 4 +- .../vulkan-shaders/dequant_funcs.comp | 218 +++++- .../vulkan-shaders/dequant_funcs_cm2.comp | 164 ++++ .../vulkan-shaders/dequant_iq2_s.comp | 44 ++ .../vulkan-shaders/dequant_iq2_xs.comp | 43 + .../vulkan-shaders/dequant_iq2_xxs.comp | 48 ++ .../vulkan-shaders/dequant_iq3_s.comp | 39 + .../vulkan-shaders/dequant_iq3_xxs.comp | 49 ++ .../vulkan-shaders/dequant_iq4_nl.comp | 2 +- .../vulkan-shaders/flash_attn_cm2.comp | 4 +- .../vulkan-shaders/get_rows_quant.comp | 4 +- .../vulkan-shaders/mul_mat_vec.comp | 4 +- .../ggml-vulkan/vulkan-shaders/mul_mm.comp | 122 ++- .../vulkan-shaders/mul_mm_cm2.comp | 4 +- .../src/ggml-vulkan/vulkan-shaders/types.comp | 738 +++++++++++++++++- .../vulkan-shaders/vulkan-shaders-gen.cpp | 5 + 19 files changed, 1616 insertions(+), 40 deletions(-) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index cd8422f8a..7eaf9c460 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -346,7 +346,8 @@ jobs: id: cmake_test run: | cd build - ctest -L main --verbose --timeout 900 + # This is using llvmpipe and runs slower than other backends + ctest -L main --verbose --timeout 1800 ubuntu-22-cmake-hip: runs-on: ubuntu-22.04 diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 6c7e60650..9ca3959ab 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1616,6 +1616,11 @@ static void ggml_vk_load_shaders(vk_device& device) { //CREATE_FA(GGML_TYPE_Q4_K, q4_k) //CREATE_FA(GGML_TYPE_Q5_K, q5_k) //CREATE_FA(GGML_TYPE_Q6_K, q6_k) + //CREATE_FA(GGML_TYPE_IQ2_XXS, iq2_xxs) + //CREATE_FA(GGML_TYPE_IQ2_XS, iq2_xs) + //CREATE_FA(GGML_TYPE_IQ2_S, iq2_s) + //CREATE_FA(GGML_TYPE_IQ3_XXS, iq3_xxs) + //CREATE_FA(GGML_TYPE_IQ3_S, iq3_s) CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl) #undef CREATE_FA @@ -1644,7 +1649,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f16, _f16acc, mmq_wg_denoms_k, warptile_mmq_k, vk_mat_mat_push_constants, 3) - CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XXS].f16acc, matmul_iq2_xxs_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_XS].f16acc, matmul_iq2_xs_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) @@ -1657,7 +1667,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f16acc, matmul_id_iq2_xxs_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f16acc, matmul_id_iq2_xs_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) #undef CREATE_MM #undef CREATE_MM2 } else @@ -1705,7 +1720,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f16acc, matmul_iq2_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f16acc, matmul_iq2_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); } else { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_1].f16acc, matmul_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); @@ -1718,7 +1738,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f16acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f16acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); } // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. @@ -1739,7 +1764,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f16acc, matmul_id_iq2_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f16acc, matmul_id_iq2_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } else { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_1].f16acc, matmul_id_q4_1_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); @@ -1752,7 +1782,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f16acc, matmul_id_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f16acc, matmul_id_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } } #undef CREATE_MM2 @@ -1796,7 +1831,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f16acc, matmul_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f16acc, matmul_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f16acc, matmul_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f16acc, matmul_iq2_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f16acc, matmul_iq2_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) { @@ -1815,7 +1855,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f16acc, matmul_id_q4_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f16acc, matmul_id_q5_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f16acc, matmul_id_q6_k_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f16acc, matmul_id_iq2_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f16acc, matmul_id_iq2_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } #undef CREATE_MM2 #undef CREATE_MM @@ -1851,7 +1896,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_K].f32acc, matmul_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q5_K].f32acc, matmul_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q6_K].f32acc, matmul_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); - CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XXS].f32acc, matmul_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_XS].f32acc, matmul_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. if (device->mul_mat_id_s || device->mul_mat_id_m || device->mul_mat_id_l) { @@ -1870,7 +1920,12 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_K].f32acc, matmul_id_q4_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q5_K].f32acc, matmul_id_q5_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q6_K].f32acc, matmul_id_q6_k_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); - CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XXS].f32acc, matmul_id_iq2_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_XS].f32acc, matmul_id_iq2_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } #undef CREATE_MM } @@ -1901,7 +1956,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f32_f32_len, mul_mat_vec_q4_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f32_f32_len, mul_mat_vec_q5_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f32_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f32_f32_len, mul_mat_vec_q6_k_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq2_xxs_f32_f32_len, mul_mat_vec_iq2_xxs_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq2_xs_f32_f32_len, mul_mat_vec_iq2_xs_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq2_s_f32_f32_len, mul_mat_vec_iq2_s_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq3_xxs_f32_f32_len, mul_mat_vec_iq3_xxs_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq3_s_f32_f32_len, mul_mat_vec_iq3_s_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32_"+std::to_string(i+1), mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F16 ][i], "mul_mat_vec_f16_f16_f32_"+std::to_string(i+1), mul_mat_vec_f16_f16_f32_len, mul_mat_vec_f16_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); @@ -1915,7 +1975,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q4_K][i], "mul_mat_vec_q4_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q4_k_f16_f32_len, mul_mat_vec_q4_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q5_K][i], "mul_mat_vec_q5_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q5_k_f16_f32_len, mul_mat_vec_q5_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_Q6_K][i], "mul_mat_vec_q6_k_f16_f32_"+std::to_string(i+1), mul_mat_vec_q6_k_f16_f32_len, mul_mat_vec_q6_k_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ2_XXS][i], "mul_mat_vec_iq2_xxs_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq2_xxs_f16_f32_len, mul_mat_vec_iq2_xxs_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ2_XS][i], "mul_mat_vec_iq2_xs_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq2_xs_f16_f32_len, mul_mat_vec_iq2_xs_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq2_s_f16_f32_len, mul_mat_vec_iq2_s_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq3_xxs_f16_f32_len, mul_mat_vec_iq3_xxs_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq3_s_f16_f32_len, mul_mat_vec_iq3_s_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); } ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_F32 ], "mul_mat_vec_id_f32_f32", mul_mat_vec_id_f32_f32_len, mul_mat_vec_id_f32_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2, 1, 1}, {device->subgroup_size, 2}, 1); @@ -1930,7 +1995,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q4_K], "mul_mat_vec_id_q4_k_f32", mul_mat_vec_id_q4_k_f32_len, mul_mat_vec_id_q4_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q5_K], "mul_mat_vec_id_q5_k_f32", mul_mat_vec_id_q5_k_f32_len, mul_mat_vec_id_q5_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_Q6_K], "mul_mat_vec_id_q6_k_f32", mul_mat_vec_id_q6_k_f32_len, mul_mat_vec_id_q6_k_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); - ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XXS], "mul_mat_vec_id_iq2_xxs_f32", mul_mat_vec_id_iq2_xxs_f32_len, mul_mat_vec_id_iq2_xxs_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_XS], "mul_mat_vec_id_iq2_xs_f32", mul_mat_vec_id_iq2_xs_f32_len, mul_mat_vec_id_iq2_xs_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); // dequant shaders ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_F32 ], "f32_to_f16", dequant_f32_len, dequant_f32_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); @@ -1944,7 +2014,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q4_K], "dequant_q4_k", dequant_q4_k_len, dequant_q4_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q5_K], "dequant_q5_k", dequant_q5_k_len, dequant_q5_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_Q6_K], "dequant_q6_k", dequant_q6_k_len, dequant_q6_k_data, "main", 2, 5 * sizeof(uint32_t), {256 * 64, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XXS], "dequant_iq2_xxs", dequant_iq2_xxs_len, dequant_iq2_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_XS], "dequant_iq2_xs", dequant_iq2_xs_len, dequant_iq2_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_S], "dequant_iq2_s", dequant_iq2_s_len, dequant_iq2_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_XXS], "dequant_iq3_xxs", dequant_iq3_xxs_len, dequant_iq3_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_S], "dequant_iq3_s", dequant_iq3_s_len, dequant_iq3_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); // get_rows ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_F32 ], "get_rows_f32", get_rows_f32_len, get_rows_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1); @@ -1954,7 +2029,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_0], "get_rows_q5_0", get_rows_q5_0_len, get_rows_q5_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q5_1], "get_rows_q5_1", get_rows_q5_1_len, get_rows_q5_1_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_Q8_0], "get_rows_q8_0", get_rows_q8_0_len, get_rows_q8_0_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs", get_rows_iq2_xxs_len, get_rows_iq2_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs", get_rows_iq2_xs_len, get_rows_iq2_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_S], "get_rows_iq2_s", get_rows_iq2_s_len, get_rows_iq2_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs", get_rows_iq3_xxs_len, get_rows_iq3_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_S], "get_rows_iq3_s", get_rows_iq3_s_len, get_rows_iq3_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F16 ], "get_rows_f16_f32", get_rows_f16_f32_len, get_rows_f16_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1); @@ -1963,7 +2043,12 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_0], "get_rows_q5_0_f32", get_rows_q5_0_f32_len, get_rows_q5_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q5_1], "get_rows_q5_1_f32", get_rows_q5_1_f32_len, get_rows_q5_1_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_Q8_0], "get_rows_q8_0_f32", get_rows_q8_0_f32_len, get_rows_q8_0_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); - ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XXS], "get_rows_iq2_xxs_f32", get_rows_iq2_xxs_f32_len, get_rows_iq2_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_XS], "get_rows_iq2_xs_f32", get_rows_iq2_xs_f32_len, get_rows_iq2_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_S], "get_rows_iq2_s_f32", get_rows_iq2_s_f32_len, get_rows_iq2_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs_f32", get_rows_iq3_xxs_f32_len, get_rows_iq3_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_S], "get_rows_iq3_s_f32", get_rows_iq3_s_f32_len, get_rows_iq3_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1); @@ -2890,6 +2975,11 @@ static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -2938,6 +3028,11 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -2969,6 +3064,11 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -3012,6 +3112,11 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_co case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -3038,6 +3143,11 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -7907,6 +8017,11 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_TYPE_Q4_K: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -7975,6 +8090,11 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm //case GGML_TYPE_Q4_K: //case GGML_TYPE_Q5_K: //case GGML_TYPE_Q6_K: + //case GGML_TYPE_IQ2_XXS: + //case GGML_TYPE_IQ2_XS: + //case GGML_TYPE_IQ2_S: + //case GGML_TYPE_IQ3_XXS: + //case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: break; default: @@ -7992,6 +8112,11 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_TYPE_Q5_0: case GGML_TYPE_Q5_1: case GGML_TYPE_Q8_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ3_S: case GGML_TYPE_IQ4_NL: return true; default: diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp index c09bf496b..aeae5400d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp @@ -12,8 +12,8 @@ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; #endif void main() { -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); if (gl_LocalInvocationIndex.x != 0) { return; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp index ccf5b980a..d4b068e61 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp @@ -217,8 +217,8 @@ void quantize(uint dst_idx, uint src_idx) #endif void main() { -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); if (gl_LocalInvocationIndex.x != 0) { return; } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp index 91bb8f8db..ee6877531 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp @@ -88,6 +88,222 @@ vec4 dequantize4(uint ib, uint iqs, uint a_offset) { } #endif +#if defined(DATA_A_IQ2_XXS) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint ib8 = (iqs / 8) % 4; + const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8]; + // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale) + const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2], + data_a_packed16[a_offset + ib].qs[4 * ib32 + 3])); + const float db = 0.25 * (0.5 + (signs >> 28)); + const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7); + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4))); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + return db * vec2( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0) + ); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint ib8 = (iqs / 8) % 4; + const uint qs = data_a[a_offset + ib].qs[8 * ib32 + ib8]; + // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale) + const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[4 * ib32 + 2], + data_a_packed16[a_offset + ib].qs[4 * ib32 + 3])); + const float db = 0.25 * (0.5 + (signs >> 28)); + const uint sign7 = bitfieldExtract(signs, 7 * int(ib8), 7); + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq2xxs_grid[qs][(iqs % 8) / 4] >> (8 * (iqs % 4))); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + bool sign2 = (sign & 4) != 0; + bool sign3 = (sign & 8) != 0; + return db * vec4( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0), + grid.z * (sign2 ? -1.0 : 1.0), + grid.w * (sign3 ? -1.0 : 1.0) + ); +} +#endif + +#if defined(DATA_A_IQ2_XS) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf; + const uint qs = data_a[a_offset + ib].qs[iqs / 8]; + const float db = 0.25 * (0.5 + scale); + const uint sign7 = qs >> 9; + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4))); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + return db * vec2( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0) + ); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint scale = (data_a[a_offset + ib].scales[iqs / 32] >> (4 * ((iqs / 16) & 1))) & 0xf; + const uint qs = data_a[a_offset + ib].qs[iqs / 8]; + const float db = 0.25 * (0.5 + scale); + const uint sign7 = qs >> 9; + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq2xs_grid[qs & 511][(iqs % 8) / 4] >> (8 * (iqs % 4))); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + bool sign2 = (sign & 4) != 0; + bool sign3 = (sign & 8) != 0; + return db * vec4( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0), + grid.z * (sign2 ? -1.0 : 1.0), + grid.w * (sign3 ? -1.0 : 1.0) + ); +} +#endif + +#if defined(DATA_A_IQ2_S) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint ib8 = iqs / 8; + + const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf; + const uint qs = data_a[a_offset + ib].qs[ib8]; + const uint qh = data_a[a_offset + ib].qh[ib32]; + const uint qhshift = 2 * (ib8 % 4); + const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8); + + const float db = 0.25 * (0.5 + scale); + const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + return db * vec2( + grid[iqs % 4] * (sign0 ? -1.0 : 1.0), + grid[(iqs % 4) + 1] * (sign1 ? -1.0 : 1.0) + ); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint ib8 = iqs / 8; + + const uint scale = (data_a[a_offset + ib].scales[ib32] >> (4 * ((iqs / 16) & 1))) & 0xf; + const uint qs = data_a[a_offset + ib].qs[ib8]; + const uint qh = data_a[a_offset + ib].qh[ib32]; + const uint qhshift = 2 * (ib8 % 4); + const uint sign = data_a[a_offset + ib].qs[QUANT_K / 8 + ib8] >> (iqs % 8); + + const float db = 0.25 * (0.5 + scale); + const u8vec4 grid = unpack8(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(iqs % 8) / 4]); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + bool sign2 = (sign & 4) != 0; + bool sign3 = (sign & 8) != 0; + return db * vec4( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0), + grid.z * (sign2 ? -1.0 : 1.0), + grid.w * (sign3 ? -1.0 : 1.0) + ); +} +#endif + +#if defined(DATA_A_IQ3_XXS) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint ib4 = iqs / 4; + const uint ib32 = iqs / 32; + const uint is = QUANT_K / 4 + 4 * ib32; + const uint qs = data_a[a_offset + ib].qs[ib4]; + // Scales are stored as packed 7+7+7+7+4 bits (4 sign tuples and 1 int4 scale) + const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2], + data_a_packed16[a_offset + ib].qs[is / 2 + 1])); + const float db = 0.5 * (0.5 + (signs >> 28)); + const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7); + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq3xxs_grid[qs] >> (8 * (iqs % 4))); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + return db * vec2( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0) + ); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint ib4 = iqs / 4; + const uint ib32 = iqs / 32; + const uint is = QUANT_K / 4 + 4 * ib32; + const uint qs = data_a[a_offset + ib].qs[ib4]; + const uint signs = pack32(u16vec2(data_a_packed16[a_offset + ib].qs[is / 2], + data_a_packed16[a_offset + ib].qs[is / 2 + 1])); + const float db = 0.5 * (0.5 + (signs >> 28)); + const uint sign7 = bitfieldExtract(signs, 7 * (int(ib4 / 2) % 4), 7); + // Add parity bit + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const uint sign = sign8 >> (iqs % 8); + const u8vec4 grid = unpack8(iq3xxs_grid[qs]); + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + bool sign2 = (sign & 4) != 0; + bool sign3 = (sign & 8) != 0; + return db * vec4( + grid.x * (sign0 ? -1.0 : 1.0), + grid.y * (sign1 ? -1.0 : 1.0), + grid.z * (sign2 ? -1.0 : 1.0), + grid.w * (sign3 ? -1.0 : 1.0) + ); +} +#endif + +#if defined(DATA_A_IQ3_S) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint qs = data_a[a_offset + ib].qs[iqs / 4]; + const uint qh = data_a[a_offset + ib].qh[iqs / 32]; + const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8); + const uint scale = data_a[a_offset + ib].scales[iqs / 64]; + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + const float db = 1 + 2 * ((scale >> (4 * ((iqs / 32) & 1))) & 0xf); + const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ((iqs / 4) % 8))) & 256)] >> (8 * (iqs % 4)); + return db * vec2( + int(grid & 0xFF) * (sign0 ? -1.0 : 1.0), + int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0) + ); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint ib4 = iqs / 4; + const uint ib32 = iqs / 32; + const uint qs = data_a[a_offset + ib].qs[ib4]; + const uint qh = data_a[a_offset + ib].qh[ib32]; + const uint sign = data_a[a_offset + ib].signs[iqs / 8] >> (iqs % 8); + const uint scale = data_a[a_offset + ib].scales[ib32 / 2]; + bool sign0 = (sign & 1) != 0; + bool sign1 = (sign & 2) != 0; + bool sign2 = (sign & 4) != 0; + bool sign3 = (sign & 8) != 0; + const float db = 1 + 2 * ((scale >> (4 * (ib32 & 1))) & 0xf); + const uint32_t grid = iq3s_grid[qs | ((qh << (8 - ib4 % 8)) & 256)] >> (8 * (iqs % 4)); + return db * vec4( + int(grid & 0xFF) * (sign0 ? -1.0 : 1.0), + int((grid >> 8) & 0xFF) * (sign1 ? -1.0 : 1.0), + int((grid >> 16) & 0xFF) * (sign2 ? -1.0 : 1.0), + int((grid >> 24) & 0xFF) * (sign3 ? -1.0 : 1.0) + ); +} +#endif + #if defined(DATA_A_IQ4_NL) vec2 dequantize(uint ib, uint iqs, uint a_offset) { const uint vui = uint(data_a[a_offset + ib].qs[iqs]); @@ -105,7 +321,7 @@ vec2 get_dm(uint ib, uint a_offset) { } #endif -#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) vec2 get_dm(uint ib, uint a_offset) { return vec2(float(data_a[a_offset + ib].d), 0); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp index 175e31fa7..974efd3f9 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -301,6 +301,160 @@ float16_t dequantFuncQ6_K(const in decodeBufQ6_K bl, const in uint blockCoords[2 return ret; } +#if defined(DATA_A_IQ2_XXS) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS { + block_iq2_xxs block; +}; + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XXS_packed16 { + block_iq2_xxs_packed16 block; +}; + +float16_t dequantFuncIQ2_XXS(const in decodeBufIQ2_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + decodeBufIQ2_XXS_packed16 bl16 = decodeBufIQ2_XXS_packed16(bl); + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + + const uint ib32 = (idx & 0xE0) >> 5; // 0..7 + const uint ib8 = (idx & 0x18) >> 3; // 0..3 + const uint iqs = 8 * ib32 + ib8; + + const uint8_t qs = bl.block.qs[iqs]; + const uint signscale = pack32(u16vec2(bl16.block.qs[4*ib32+2], bl16.block.qs[4*ib32+3])); + + const float16_t dscale = bl.block.d * 0.25hf * (0.5hf + float16_t(signscale >> 28)); + uint sign = bitfieldExtract(signscale, 7 * int(ib8), 7); + sign |= bitCount(sign) << 7; + + const uint8_t g = unpack8(iq2xxs_grid[qs][(idx & 4) >> 2])[idx & 3]; + + float16_t ret = dscale * float16_t(g) * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); + + return ret; +} +#endif + +#if defined(DATA_A_IQ2_XS) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_XS { + block_iq2_xs block; +}; + +float16_t dequantFuncIQ2_XS(const in decodeBufIQ2_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + + const uint is = (idx & 0xE0) >> 5; // 0..8 + const uint sshift = (idx & 0x10) >> 2; // 0,4 + const uint iqs = (idx & 0xF8) >> 3; // 0..63 + + const uint16_t qs = bl.block.qs[iqs]; + const float16_t dscale = bl.block.d * 0.25hf * (0.5hf + float16_t((bl.block.scales[is] >> sshift) & 0xF)); + + uint sign = uint(qs >> 9); + sign |= bitCount(sign) << 7; + const uint8_t g = unpack8(iq2xs_grid[qs & 0x1FF][(idx & 4) >> 2])[idx & 3]; + + float16_t ret = dscale * float16_t(g) * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); + return ret; +} +#endif + +#if defined(DATA_A_IQ2_S) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2_S { + block_iq2_s block; +}; + +float16_t dequantFuncIQ2_S(const in decodeBufIQ2_S bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + uint idx = coordInBlock[1]; + uint lsb = idx & 1; + idx /= 2; + + const uint ib8 = (idx % 128) / 4; // 0..31 + const uint ib32 = ib8 / 4; // 0..7 + + const uint scale = (bl.block.scales[ib32] >> (2 * (ib8 & 2))) & 0xf; + const uint qs = bl.block.qs[ib8]; + const uint qh = bl.block.qh[ib32]; + const uint qhshift = 2 * (ib8 % 4); + const uint sign = bl.block.qs[QUANT_K / 8 + ib8] >> (2 * (idx % 4)); + + const float d = float(bl.block.d); + const float db = d * 0.25 * (0.5 + scale); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint16_t grid = unpack16(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 2) >> 1])[idx & 1]; + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid)); + return float16_t(v[lsb]); +} +#endif + +#if defined(DATA_A_IQ3_XXS) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS { + block_iq3_xxs block; +}; + +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_XXS_packed16 { + block_iq3_xxs_packed16 block; +}; + +float16_t dequantFuncIQ3_XXS(const in decodeBufIQ3_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + uint idx = coordInBlock[1]; + uint lsb = idx & 1; + idx /= 2; + + const uint iqs = (idx % 128) / 2; // 0..63 + const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values + + const float d = float(bl.block.d); + const uint qs = bl.block.qs[iqs]; + const uint signs = pack32(u8vec4( + bl.block.qs[is+0], + bl.block.qs[is+1], + bl.block.qs[is+2], + bl.block.qs[is+3] + )); + const float db = d * 0.5 * (0.5 + (signs >> 28)); + const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7); + const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (2 * (idx % 4)); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint grid = iq3xxs_grid[qs] >> (16 * (idx & 1)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + return float16_t(v[lsb]); +} +#endif + +#if defined(DATA_A_IQ3_S) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3_S { + block_iq3_s block; +}; + +float16_t dequantFuncIQ3_S(const in decodeBufIQ3_S bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + uint idx = coordInBlock[1]; + uint lsb = idx & 1; + idx /= 2; + + const uint iqs = (idx % 128) / 2; // 0..63 + const uint iqh = iqs / 8; + + const float d = float(bl.block.d); + const uint qs = bl.block.qs[iqs]; + const uint qh = bl.block.qh[iqh]; + const int8_t sign = int8_t(bl.block.signs[iqs / 2] >> (2 * (idx % 4))); + const uint scale = bl.block.scales[iqs / 16]; + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign))); + const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf)); + const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> (16 * (idx % 2)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + + return float16_t(v[lsb]); +} +#endif + + #if defined(DATA_A_IQ4_NL) layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL { block_iq4_nl block; @@ -340,6 +494,16 @@ float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoor #define dequantFuncA dequantFuncQ5_K #elif defined(DATA_A_Q6_K) #define dequantFuncA dequantFuncQ6_K +#elif defined(DATA_A_IQ2_XXS) +#define dequantFuncA dequantFuncIQ2_XXS +#elif defined(DATA_A_IQ2_XS) +#define dequantFuncA dequantFuncIQ2_XS +#elif defined(DATA_A_IQ2_S) +#define dequantFuncA dequantFuncIQ2_S +#elif defined(DATA_A_IQ3_XXS) +#define dequantFuncA dequantFuncIQ3_XXS +#elif defined(DATA_A_IQ3_S) +#define dequantFuncA dequantFuncIQ3_S #elif defined(DATA_A_IQ4_NL) #define dequantFuncA dequantFuncIQ4_NL #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp new file mode 100644 index 000000000..48f6b65bc --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_s.comp @@ -0,0 +1,44 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq2_s data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 subblock (32 values with 2 scales) + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint ib32 = gl_LocalInvocationID.x % 8; + const uint b_idx = 256 * ib + 32 * ib32; + + const float d = float(data_a[ib].d); + const vec2 scale = vec2(data_a[ib].scales[ib32] & 0xf, data_a[ib].scales[ib32] >> 4); + const vec2 db = d * (0.5 + scale) * 0.25; + + uint qh = data_a[ib].qh[ib32]; + [[unroll]] for (uint l = 0; l < 4; ++l) { + uint qs = data_a[ib].qs[4 * ib32 + l]; + const uint8_t sign = data_a[ib].qs[QUANT_K / 8 + 4 * ib32 + l]; + qs |= (qh << (8 - 2 * l)) & 0x300; + const uvec2 grid = iq2s_grid[qs & 511]; + const u8vec4 grid0 = unpack8(grid.x); + const u8vec4 grid1 = unpack8(grid.y); + data_b[b_idx + 8 * l + 0] = D_TYPE(db[l/2] * grid0.x * ((sign & 1) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 1] = D_TYPE(db[l/2] * grid0.y * ((sign & 2) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 2] = D_TYPE(db[l/2] * grid0.z * ((sign & 4) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 3] = D_TYPE(db[l/2] * grid0.w * ((sign & 8) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 4] = D_TYPE(db[l/2] * grid1.x * ((sign & 16) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 5] = D_TYPE(db[l/2] * grid1.y * ((sign & 32) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 6] = D_TYPE(db[l/2] * grid1.z * ((sign & 64) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 7] = D_TYPE(db[l/2] * grid1.w * ((sign & 128) != 0 ? -1.0 : 1.0)); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp new file mode 100644 index 000000000..a08331c40 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xs.comp @@ -0,0 +1,43 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq2_xs data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 subblock (32 values with 2 scales) + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint ib32 = gl_LocalInvocationID.x % 8; + const uint b_idx = 256 * ib + 32 * ib32; + + const float d = float(data_a[ib].d); + const vec2 scale = vec2(data_a[ib].scales[ib32] & 0xf, data_a[ib].scales[ib32] >> 4); + const vec2 db = d * (0.5 + scale) * 0.25; + + [[unroll]] for (uint l = 0; l < 4; ++l) { + uint16_t qs = data_a[ib].qs[4 * ib32 + l]; + const uint sign7 = qs >> 9; + const uint sign8 = sign7 | (bitCount(sign7) << 7); // parity bit + const uvec2 grid = iq2xs_grid[qs & 511]; + const u8vec4 grid0 = unpack8(grid.x); + const u8vec4 grid1 = unpack8(grid.y); + data_b[b_idx + 8 * l + 0] = D_TYPE(db[l/2] * grid0.x * ((sign8 & 1) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 1] = D_TYPE(db[l/2] * grid0.y * ((sign8 & 2) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 2] = D_TYPE(db[l/2] * grid0.z * ((sign8 & 4) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 3] = D_TYPE(db[l/2] * grid0.w * ((sign8 & 8) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 4] = D_TYPE(db[l/2] * grid1.x * ((sign8 & 16) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 5] = D_TYPE(db[l/2] * grid1.y * ((sign8 & 32) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 6] = D_TYPE(db[l/2] * grid1.z * ((sign8 & 64) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 7] = D_TYPE(db[l/2] * grid1.w * ((sign8 & 128) != 0 ? -1.0 : 1.0)); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp new file mode 100644 index 000000000..e370690bc --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq2_xxs.comp @@ -0,0 +1,48 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq2_xxs data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 scale block (32 values) + // Each block is described by 4 lattice indices, 4x7 sign bits and 4 scale bits + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint is = gl_LocalInvocationID.x % 8; + const uint b_idx = 256 * ib + 32 * is; + + const float d = float(data_a[ib].d); + uint signscale = pack32(u8vec4( + data_a[ib].qs[8*is + 4], + data_a[ib].qs[8*is + 5], + data_a[ib].qs[8*is + 6], + data_a[ib].qs[8*is + 7] + )); + const float db = d * (0.5 + (signscale >> 28)) * 0.25; + + [[unroll]] for (uint l = 0; l < 4; ++l) { + const uint sign7 = bitfieldExtract(signscale, 7 * int(l), 7); + const uint sign8 = sign7 | (bitCount(sign7) << 7); // parity bit + const uvec2 grid = iq2xxs_grid[data_a[ib].qs[8 * is + l]]; + const u8vec4 grid0 = unpack8(grid.x); + const u8vec4 grid1 = unpack8(grid.y); + data_b[b_idx + 8 * l + 0] = D_TYPE(db * grid0.x * ((sign8 & 1) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 1] = D_TYPE(db * grid0.y * ((sign8 & 2) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 2] = D_TYPE(db * grid0.z * ((sign8 & 4) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 3] = D_TYPE(db * grid0.w * ((sign8 & 8) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 4] = D_TYPE(db * grid1.x * ((sign8 & 16) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 5] = D_TYPE(db * grid1.y * ((sign8 & 32) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 6] = D_TYPE(db * grid1.z * ((sign8 & 64) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 7] = D_TYPE(db * grid1.w * ((sign8 & 128) != 0 ? -1.0 : 1.0)); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp new file mode 100644 index 000000000..c3f4bca5d --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_s.comp @@ -0,0 +1,39 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq3_s data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 scale nibble. + // Each block contains 4 scale bytes (8 scales) for 256 output values. + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint is = gl_LocalInvocationID.x % 8; + const uint b_idx = 256 * ib + 32 * is; + + const float d = float(data_a[ib].d); + const float db = d * (1 + 2 * ((data_a[ib].scales[is] >> (4 * (is % 2))) & 0xf)); + + // We must produce 32 values using 4 sign bytes, 1 qh byte, 8 qs bytes. + uint qh = data_a[ib].qh[is]; + [[unroll]] for (uint l = 0; l < 8; ++l) { + uint qs = data_a[ib].qs[8 * is + l]; + uint gidx = qs | ((qh << (8 - l)) & 256); + uint8_t signs = data_a[ib].signs[8 * is + l / 2] >> (4 * (l & 1)); + u8vec4 grid = unpack8(iq3s_grid[gidx]); + data_b[b_idx + 4 * l + 0] = D_TYPE(db * grid.x * ((signs & 1) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 4 * l + 1] = D_TYPE(db * grid.y * ((signs & 2) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 4 * l + 2] = D_TYPE(db * grid.z * ((signs & 4) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 4 * l + 3] = D_TYPE(db * grid.w * ((signs & 8) != 0 ? -1.0 : 1.0)); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp new file mode 100644 index 000000000..a92b82961 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq3_xxs.comp @@ -0,0 +1,49 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq3_xxs data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 scale block (32 values) + // 8 threads handle 1 superblock + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint is = gl_LocalInvocationID.x % 8; + const uint b_idx = 256 * ib + 32 * is; + const uint s_idx = QUANT_K / 4 + 4 * is; + + const float d = float(data_a[ib].d); + uint signscale = pack32(u8vec4( + data_a[ib].qs[s_idx + 0], + data_a[ib].qs[s_idx + 1], + data_a[ib].qs[s_idx + 2], + data_a[ib].qs[s_idx + 3] + )); + const float db = d * (0.5 + (signscale >> 28)) * 0.5; + + [[unroll]] for (uint l = 0; l < 4; ++l) { + const uint sign7 = bitfieldExtract(signscale, 7 * int(l), 7); + // Restore parity bit. + const uint sign8 = sign7 | (bitCount(sign7) << 7); + const u8vec4 grid0 = unpack8(iq3xxs_grid[data_a[ib].qs[8 * is + 2 * l]]); + const u8vec4 grid1 = unpack8(iq3xxs_grid[data_a[ib].qs[8 * is + 2 * l + 1]]); + data_b[b_idx + 8 * l + 0] = D_TYPE(db * grid0.x * ((sign8 & 1) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 1] = D_TYPE(db * grid0.y * ((sign8 & 2) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 2] = D_TYPE(db * grid0.z * ((sign8 & 4) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 3] = D_TYPE(db * grid0.w * ((sign8 & 8) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 4] = D_TYPE(db * grid1.x * ((sign8 & 16) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 5] = D_TYPE(db * grid1.y * ((sign8 & 32) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 6] = D_TYPE(db * grid1.z * ((sign8 & 64) != 0 ? -1.0 : 1.0)); + data_b[b_idx + 8 * l + 7] = D_TYPE(db * grid1.w * ((sign8 & 128) != 0 ? -1.0 : 1.0)); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp index 8de14fc03..46d9ad15e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_nl.comp @@ -10,7 +10,7 @@ layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; void main() { const uint i = gl_WorkGroupID.x * 4 + gl_LocalInvocationID.x / 64; - init_iq4nl_shmem(); + init_iq_shmem(gl_WorkGroupSize); const uint tid = gl_LocalInvocationID.x % 64; const uint il = tid/32; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index 3735d0dbb..043a53023 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -104,8 +104,8 @@ ACC_TYPE Max(const in uint32_t row, const in uint32_t col, const in ACC_TYPE ele #endif void main() { -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); #endif const uint32_t N = p.N; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp index 1426fde65..09dc43d8d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp @@ -12,8 +12,8 @@ void main() { const uint i11 = (gl_GlobalInvocationID.z)/p.ne12; const uint i12 = (gl_GlobalInvocationID.z)%p.ne12; -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); #endif if (i00 >= p.ne00) { diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 53902858d..48156e7ba 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -133,8 +133,8 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { void main() { const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); #endif // do NUM_ROWS at a time, unless there aren't enough remaining rows diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp index 48122cbef..d0559aac8 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp @@ -95,8 +95,8 @@ shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS]; #endif void main() { -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); #endif #ifdef MUL_MAT_ID @@ -343,10 +343,8 @@ void main() { const uint qsshift = halfsplit * 2; // 0,2,4,6 const uint m = 1 << (4 * n + halfsplit); // 1,2,4,8,16,32,64,128 - const int8_t us = int8_t(is < 4 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+8] >> 0) & 3) << 4) : - is < 8 ? (data_a[ib].scales[is-0] & 0xF) | (((data_a[ib].scales[is+4] >> 2) & 3) << 4) : - is < 12 ? (data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is+0] >> 4) & 3) << 4) : - (data_a[ib].scales[is-8] >> 4) | (((data_a[ib].scales[is-4] >> 6) & 3) << 4)); + const int8_t us = int8_t(((data_a[ib].scales[is % 8] >> (4 * int(is / 8))) & 0xF) + | (((data_a[ib].scales[8 + (is % 4)] >> (2 * int(is / 4))) & 3) << 4)); const float dl = float(data_a[ib].d) * float(us - 32); buf_a[buf_idx ] = FLOAT_TYPE(dl * float(int8_t((data_a[ib].qs[qsi ] >> qsshift) & 3) - (((data_a[ib].hmask[hmi ] & m) != 0) ? 0 : 4))); @@ -439,6 +437,118 @@ void main() { buf_a[buf_idx ] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi ] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi ] >> qhshift) & 3) << 4)) - 32)); buf_a[buf_idx + 1] = FLOAT_TYPE(dscale * float(int8_t(((data_a[ib].ql[qsi + 1] >> (b * 4)) & 0xF) | (((data_a[ib].qh[qhi + 1] >> qhshift) & 3) << 4)) - 32)); +#elif defined(DATA_A_IQ2_XXS) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint ib32 = (idx % 128) / 16; // 0..7 + const uint ib8 = (idx / 4) % 4; + + const float d = float(data_a[ib].d); + const uint qs = data_a[ib].qs[8 * ib32 + ib8]; + const uint signs = pack32(u8vec4( + data_a[ib].qs[8*ib32 + 4], + data_a[ib].qs[8*ib32 + 5], + data_a[ib].qs[8*ib32 + 6], + data_a[ib].qs[8*ib32 + 7] + )); + const float db = d * 0.25 * (0.5 + (signs >> 28)); + const uint32_t sign7 = bitfieldExtract(signs, 7 * int(ib8), 7); + const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (2 * (idx % 4)); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint grid = iq2xxs_grid[qs][(idx % 4) / 2] >> (16 * (idx & 1)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); +#elif defined(DATA_A_IQ2_XS) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint ib32 = (idx % 128) / 16; // 0..7 + const uint ib8 = (idx / 4) % 4; // 0..3 + + const float d = float(data_a[ib].d); + const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf; + const float db = d * 0.25 * (0.5 + scale); + const uint qs = data_a[ib].qs[4 * ib32 + ib8]; + const uint sign7 = qs >> 9; + const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (2 * (idx % 4)); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint grid = iq2xs_grid[qs & 511][(idx % 4) / 2] >> (16 * (idx & 1)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); +#elif defined(DATA_A_IQ2_S) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint ib8 = (idx % 128) / 4; // 0..31 + const uint ib32 = ib8 / 4; // 0..7 + + const uint scale = (data_a[ib].scales[ib32] >> (2 * (ib8 & 2))) & 0xf; + const uint qs = data_a[ib].qs[ib8]; + const uint qh = data_a[ib].qh[ib32]; + const uint qhshift = 2 * (ib8 % 4); + const uint sign = data_a[ib].qs[QUANT_K / 8 + ib8] >> (2 * (idx % 4)); + + const float d = float(data_a[ib].d); + const float db = d * 0.25 * (0.5 + scale); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint16_t grid = unpack16(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 2) >> 1])[idx & 1]; + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid)); + + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); +#elif defined(DATA_A_IQ3_XXS) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint iqs = (idx % 128) / 2; // 0..63 + const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values + + const float d = float(data_a[ib].d); + const uint qs = data_a[ib].qs[iqs]; + const uint signs = pack32(u8vec4( + data_a[ib].qs[is+0], + data_a[ib].qs[is+1], + data_a[ib].qs[is+2], + data_a[ib].qs[is+3] + )); + const float db = d * 0.5 * (0.5 + (signs >> 28)); + const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7); + const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (2 * (idx % 4)); + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); + const uint grid = iq3xxs_grid[qs] >> (16 * (idx & 1)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); +#elif defined(DATA_A_IQ3_S) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint iqs = (idx % 128) / 2; // 0..63 + const uint iqh = iqs / 8; + + const float d = float(data_a[ib].d); + const uint qs = data_a[ib].qs[iqs]; + const uint qh = data_a[ib].qh[iqh]; + const int8_t sign = int8_t(data_a[ib].signs[iqs / 2] >> (2 * (idx % 4))); + const uint scale = data_a[ib].scales[iqs / 16]; + const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign))); + const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf)); + const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> (16 * (idx % 2)); + const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); #elif defined(DATA_A_IQ4_NL) const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp index 57f9e7245..27c5d68b3 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp @@ -106,8 +106,8 @@ D_TYPE perElemOpD(const in uint32_t r, const in uint32_t c, const in D_TYPE elem #endif void main() { -#if defined(DATA_A_IQ4_NL) - init_iq4nl_shmem(); +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) + init_iq_shmem(gl_WorkGroupSize); #endif #ifdef MUL_MAT_ID diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp index 1e35b6652..9e56a3530 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp @@ -294,6 +294,738 @@ struct block_q6_K_packed16 // IQuants +#define QUANT_K_IQ2_XXS 256 +#define QUANT_R_IQ2_XXS 1 + +struct block_iq2_xxs +{ + float16_t d; + uint8_t qs[QUANT_K_IQ2_XXS/4]; +}; + +struct block_iq2_xxs_packed16 +{ + float16_t d; + uint16_t qs[QUANT_K_IQ2_XXS/8]; +}; + +#if defined(DATA_A_IQ2_XXS) + +const uvec2[256] iq2xxs_grid_const = { + uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808), + uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x082b0808, 0x08080808), + uvec2(0x082b082b, 0x08080808), uvec2(0x082b2b08, 0x08080808), uvec2(0x082b2b2b, 0x08080808), uvec2(0x19080819, 0x08080808), + uvec2(0x19081908, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), + uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b082b2b, 0x08080808), + uvec2(0x2b2b082b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819), uvec2(0x08190808, 0x08080819), + uvec2(0x08191919, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x2b081908, 0x08080819), uvec2(0x2b192b08, 0x08080819), + uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x082b082b, 0x0808082b), uvec2(0x2b08082b, 0x0808082b), + uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x082b0819, 0x08081908), + uvec2(0x082b1908, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19082b08, 0x08081908), + uvec2(0x192b0808, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908), + uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919), uvec2(0x08082b08, 0x08081919), + uvec2(0x082b0808, 0x08081919), uvec2(0x1908192b, 0x08081919), uvec2(0x192b2b19, 0x08081919), uvec2(0x2b080808, 0x08081919), + uvec2(0x2b190819, 0x08081919), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x19080808, 0x0808192b), + uvec2(0x2b081908, 0x0808192b), uvec2(0x2b2b1908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x08081919, 0x08082b08), + uvec2(0x08082b08, 0x08082b08), uvec2(0x08191908, 0x08082b08), uvec2(0x082b2b08, 0x08082b08), uvec2(0x19080819, 0x08082b08), + uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x2b082b08, 0x08082b08), + uvec2(0x08081908, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x0808082b, 0x08082b2b), uvec2(0x08191908, 0x08082b2b), + uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x082b0819, 0x08190808), + uvec2(0x19080808, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808), + uvec2(0x2b191919, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x082b0808, 0x08190819), + uvec2(0x19190808, 0x08190819), uvec2(0x19192b2b, 0x08190819), uvec2(0x2b080808, 0x08190819), uvec2(0x082b1908, 0x0819082b), + uvec2(0x19081919, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x08082b08, 0x08191908), uvec2(0x082b0808, 0x08191908), + uvec2(0x082b1919, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08192b08, 0x08191919), + uvec2(0x192b082b, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x0819192b, 0x0819192b), uvec2(0x08080819, 0x08192b08), + uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x2b080819, 0x08192b08), + uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x2b2b0808, 0x08192b19), uvec2(0x19190819, 0x08192b2b), + uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x19081908, 0x082b0808), + uvec2(0x192b0819, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b08082b, 0x082b0808), uvec2(0x082b2b19, 0x082b0819), + uvec2(0x19082b08, 0x082b0819), uvec2(0x08080808, 0x082b082b), uvec2(0x0808082b, 0x082b082b), uvec2(0x08080819, 0x082b1908), + uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x19080808, 0x082b1908), uvec2(0x1919192b, 0x082b1908), + uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x192b1908, 0x082b1919), uvec2(0x2b190808, 0x082b192b), + uvec2(0x08082b08, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08), uvec2(0x2b191908, 0x082b2b08), uvec2(0x19081908, 0x082b2b2b), + uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x08192b08, 0x19080808), + uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x19080808, 0x19080808), uvec2(0x19082b08, 0x19080808), + uvec2(0x1919192b, 0x19080808), uvec2(0x192b0808, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808), + uvec2(0x2b190808, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x192b0819, 0x19080819), + uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08190808, 0x1908082b), + uvec2(0x19082b08, 0x1908082b), uvec2(0x1919192b, 0x1908082b), uvec2(0x192b2b08, 0x1908082b), uvec2(0x08080808, 0x19081908), + uvec2(0x08082b08, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b192b19, 0x19081908), + uvec2(0x0819082b, 0x19081919), uvec2(0x082b1908, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08080819, 0x19082b08), + uvec2(0x08081908, 0x19082b08), uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08), + uvec2(0x08080808, 0x19082b19), uvec2(0x19192b08, 0x19082b19), uvec2(0x192b0819, 0x19082b19), uvec2(0x2b08082b, 0x19082b19), + uvec2(0x19081919, 0x19082b2b), uvec2(0x2b190808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x08082b08, 0x19190808), + uvec2(0x08190819, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x2b080808, 0x19190808), + uvec2(0x2b082b08, 0x19190808), uvec2(0x08081908, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x2b2b1908, 0x19190819), + uvec2(0x2b190819, 0x1919082b), uvec2(0x2b190808, 0x19191908), uvec2(0x2b19082b, 0x19191908), uvec2(0x08082b2b, 0x19191919), + uvec2(0x08080819, 0x1919192b), uvec2(0x19191908, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x08190819, 0x19192b08), + uvec2(0x08192b19, 0x19192b08), uvec2(0x192b1908, 0x19192b08), uvec2(0x19080808, 0x19192b19), uvec2(0x08082b08, 0x19192b2b), + uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x192b2b08, 0x192b0808), + uvec2(0x08080808, 0x192b0819), uvec2(0x19191919, 0x192b0819), uvec2(0x08192b08, 0x192b082b), uvec2(0x192b0808, 0x192b082b), + uvec2(0x08080808, 0x192b1908), uvec2(0x08081919, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x0819082b, 0x192b1919), + uvec2(0x2b081908, 0x192b1919), uvec2(0x1908082b, 0x192b2b08), uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808), + uvec2(0x08082b2b, 0x2b080808), uvec2(0x19080819, 0x2b080808), uvec2(0x2b08082b, 0x2b080808), uvec2(0x08081908, 0x2b080819), + uvec2(0x08192b08, 0x2b080819), uvec2(0x19080808, 0x2b080819), uvec2(0x08190819, 0x2b08082b), uvec2(0x08080819, 0x2b081908), + uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908), + uvec2(0x192b0808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x1908192b, 0x2b081919), uvec2(0x2b191908, 0x2b081919), + uvec2(0x08082b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x192b0808, 0x2b08192b), uvec2(0x0808082b, 0x2b082b08), + uvec2(0x08081908, 0x2b082b19), uvec2(0x08190819, 0x2b082b2b), uvec2(0x08081908, 0x2b190808), uvec2(0x08190808, 0x2b190808), + uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x2b2b0819, 0x2b190808), uvec2(0x0819192b, 0x2b190819), + uvec2(0x2b080808, 0x2b190819), uvec2(0x19081919, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x082b082b, 0x2b191908), + uvec2(0x19081908, 0x2b191908), uvec2(0x19190819, 0x2b191919), uvec2(0x2b080819, 0x2b192b08), uvec2(0x082b0808, 0x2b192b19), + uvec2(0x0808082b, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b081919, 0x2b2b0808), uvec2(0x08082b19, 0x2b2b0819), + uvec2(0x08080808, 0x2b2b082b), uvec2(0x08192b08, 0x2b2b1908), uvec2(0x19190808, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19) +}; + +shared uvec2 iq2xxs_grid[256]; + +void init_iq_shmem(uvec3 wgsize) +{ + // copy the table into shared memory and sync + for (uint i = gl_LocalInvocationIndex.x; i < iq2xxs_grid.length(); i += wgsize.x) { + iq2xxs_grid[i] = iq2xxs_grid_const[i]; + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ2_XXS +#define QUANT_R QUANT_R_IQ2_XXS +#define A_TYPE block_iq2_xxs +#define A_TYPE_PACKED16 block_iq2_xxs_packed16 +#endif + +#define QUANT_K_IQ2_XS 256 +#define QUANT_R_IQ2_XS 1 + +struct block_iq2_xs +{ + float16_t d; + uint16_t qs[QUANT_K_IQ2_XS/8]; + uint8_t scales[QUANT_K_IQ2_XS/32]; +}; + +struct block_iq2_xs_packed16 +{ + float16_t d; + uint16_t qs[QUANT_K_IQ2_XS/8]; + uint16_t scales[QUANT_K_IQ2_XS/64]; +}; + +#if defined(DATA_A_IQ2_XS) + +const uvec2 iq2xs_grid_const[512] = { + uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808), + uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808), + uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808), + uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808), + uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808), + uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x2b080808, 0x08080808), + uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808), uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808), + uvec2(0x2b191908, 0x08080808), uvec2(0x2b192b19, 0x08080808), uvec2(0x2b2b0808, 0x08080808), uvec2(0x08080819, 0x08080819), + uvec2(0x08081908, 0x08080819), uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819), + uvec2(0x0819082b, 0x08080819), uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x08192b2b, 0x08080819), + uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819), uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819), + uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819), uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819), + uvec2(0x192b0808, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819), uvec2(0x2b081908, 0x08080819), + uvec2(0x2b190808, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), uvec2(0x08081919, 0x0808082b), + uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b), uvec2(0x082b0808, 0x0808082b), + uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b), + uvec2(0x2b080808, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), + uvec2(0x0808192b, 0x08081908), uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908), + uvec2(0x08191919, 0x08081908), uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908), + uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908), uvec2(0x19082b08, 0x08081908), + uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908), uvec2(0x1919192b, 0x08081908), uvec2(0x192b0808, 0x08081908), + uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x08080808, 0x08081919), + uvec2(0x0808082b, 0x08081919), uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08190819, 0x08081919), + uvec2(0x08191908, 0x08081919), uvec2(0x082b0808, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919), + uvec2(0x19190808, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x2b080808, 0x08081919), uvec2(0x08080819, 0x0808192b), + uvec2(0x08081908, 0x0808192b), uvec2(0x08190808, 0x0808192b), uvec2(0x082b192b, 0x0808192b), uvec2(0x19080808, 0x0808192b), + uvec2(0x1908082b, 0x0808192b), uvec2(0x2b081908, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08), + uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08082b2b, 0x08082b08), uvec2(0x08190819, 0x08082b08), + uvec2(0x08191908, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08), uvec2(0x19080819, 0x08082b08), + uvec2(0x19081908, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x19192b08, 0x08082b08), uvec2(0x2b080808, 0x08082b08), + uvec2(0x2b2b0808, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19), uvec2(0x08081908, 0x08082b19), + uvec2(0x08190808, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x2b080819, 0x08082b19), uvec2(0x2b082b19, 0x08082b19), + uvec2(0x08080808, 0x08082b2b), uvec2(0x082b0808, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x2b19192b, 0x08082b2b), + uvec2(0x2b2b0808, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), uvec2(0x0808192b, 0x08190808), + uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808), uvec2(0x08191919, 0x08190808), + uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808), uvec2(0x19080808, 0x08190808), + uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808), uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808), + uvec2(0x19191908, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b2b2b, 0x08190808), uvec2(0x2b080819, 0x08190808), + uvec2(0x2b081908, 0x08190808), uvec2(0x2b190808, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819), + uvec2(0x08081919, 0x08190819), uvec2(0x08082b08, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819), + uvec2(0x082b0808, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819), uvec2(0x19190808, 0x08190819), + uvec2(0x2b080808, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x2b19192b, 0x08190819), uvec2(0x08080819, 0x0819082b), + uvec2(0x08081908, 0x0819082b), uvec2(0x0808192b, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x19080808, 0x0819082b), + uvec2(0x192b0808, 0x0819082b), uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908), + uvec2(0x08082b08, 0x08191908), uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x082b0808, 0x08191908), + uvec2(0x19080819, 0x08191908), uvec2(0x19081908, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908), + uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919), + uvec2(0x08190808, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x08080808, 0x0819192b), uvec2(0x08191908, 0x0819192b), + uvec2(0x19082b19, 0x0819192b), uvec2(0x08080819, 0x08192b08), uvec2(0x08081908, 0x08192b08), uvec2(0x08190808, 0x08192b08), + uvec2(0x0819082b, 0x08192b08), uvec2(0x19080808, 0x08192b08), uvec2(0x19191908, 0x08192b08), uvec2(0x2b08192b, 0x08192b08), + uvec2(0x08080808, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x192b192b, 0x08192b19), uvec2(0x19190819, 0x08192b2b), + uvec2(0x2b2b2b19, 0x08192b2b), uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808), + uvec2(0x08082b08, 0x082b0808), uvec2(0x08082b2b, 0x082b0808), uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808), + uvec2(0x082b0808, 0x082b0808), uvec2(0x19080819, 0x082b0808), uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808), + uvec2(0x2b080808, 0x082b0808), uvec2(0x2b2b0808, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819), + uvec2(0x08190808, 0x082b0819), uvec2(0x19080808, 0x082b0819), uvec2(0x19082b08, 0x082b0819), uvec2(0x192b1919, 0x082b0819), + uvec2(0x08080808, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x2b080808, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b), + uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x082b2b19, 0x082b1908), + uvec2(0x19080808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x19080819, 0x082b1919), uvec2(0x1919082b, 0x082b1919), + uvec2(0x2b192b19, 0x082b1919), uvec2(0x08080819, 0x082b192b), uvec2(0x08192b2b, 0x082b192b), uvec2(0x2b2b192b, 0x082b192b), + uvec2(0x08080808, 0x082b2b08), uvec2(0x08082b08, 0x082b2b08), uvec2(0x08082b2b, 0x082b2b08), uvec2(0x082b0808, 0x082b2b08), + uvec2(0x19191919, 0x082b2b08), uvec2(0x2b082b08, 0x082b2b08), uvec2(0x2b2b082b, 0x082b2b08), uvec2(0x192b2b08, 0x082b2b19), + uvec2(0x2b190808, 0x082b2b19), uvec2(0x08082b08, 0x082b2b2b), uvec2(0x082b0808, 0x082b2b2b), uvec2(0x2b08082b, 0x082b2b2b), + uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808), uvec2(0x08081908, 0x19080808), + uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808), uvec2(0x0819082b, 0x19080808), + uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), + uvec2(0x19080808, 0x19080808), uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808), + uvec2(0x19082b2b, 0x19080808), uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x192b0808, 0x19080808), + uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808), uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808), + uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819), uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819), + uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x19080819, 0x19080819), + uvec2(0x19081908, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x2b080808, 0x19080819), uvec2(0x2b081919, 0x19080819), + uvec2(0x2b2b082b, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b), uvec2(0x08190808, 0x1908082b), + uvec2(0x0819082b, 0x1908082b), uvec2(0x082b2b19, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x08080808, 0x19081908), + uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908), uvec2(0x08082b08, 0x19081908), uvec2(0x08190819, 0x19081908), + uvec2(0x08191908, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x19080819, 0x19081908), + uvec2(0x19081908, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x2b080808, 0x19081908), uvec2(0x2b191908, 0x19081908), + uvec2(0x08080819, 0x19081919), uvec2(0x08081908, 0x19081919), uvec2(0x08190808, 0x19081919), uvec2(0x082b1908, 0x19081919), + uvec2(0x19080808, 0x19081919), uvec2(0x2b192b2b, 0x19081919), uvec2(0x08080808, 0x1908192b), uvec2(0x08082b2b, 0x1908192b), + uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08), + uvec2(0x08190808, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x19081919, 0x19082b08), uvec2(0x19191908, 0x19082b08), + uvec2(0x192b082b, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x19081908, 0x19082b19), + uvec2(0x19190808, 0x19082b19), uvec2(0x192b2b19, 0x19082b19), uvec2(0x08081908, 0x19082b2b), uvec2(0x08080808, 0x19190808), + uvec2(0x0808082b, 0x19190808), uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808), + uvec2(0x08191908, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808), + uvec2(0x19081908, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x2b080808, 0x19190808), uvec2(0x08080819, 0x19190819), + uvec2(0x08081908, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x08191919, 0x19190819), uvec2(0x19080808, 0x19190819), + uvec2(0x1908082b, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x2b2b2b2b, 0x1919082b), + uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x082b0819, 0x19191908), + uvec2(0x19080808, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b2b0819, 0x19191908), + uvec2(0x08080808, 0x19191919), uvec2(0x08082b08, 0x19191919), uvec2(0x2b080808, 0x19191919), uvec2(0x2b082b08, 0x19191919), + uvec2(0x082b0819, 0x1919192b), uvec2(0x192b2b08, 0x1919192b), uvec2(0x2b2b0819, 0x1919192b), uvec2(0x08080808, 0x19192b08), + uvec2(0x08191908, 0x19192b08), uvec2(0x19080819, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x2b192b19, 0x19192b08), + uvec2(0x08192b2b, 0x19192b19), uvec2(0x19080808, 0x19192b19), uvec2(0x1908082b, 0x19192b19), uvec2(0x2b081919, 0x19192b2b), + uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x08190808, 0x192b0808), uvec2(0x19080808, 0x192b0808), + uvec2(0x19191908, 0x192b0808), uvec2(0x192b082b, 0x192b0808), uvec2(0x2b08192b, 0x192b0808), uvec2(0x2b2b2b19, 0x192b0808), + uvec2(0x08080808, 0x192b0819), uvec2(0x082b1908, 0x192b082b), uvec2(0x19082b2b, 0x192b082b), uvec2(0x2b19082b, 0x192b082b), + uvec2(0x08080808, 0x192b1908), uvec2(0x0819192b, 0x192b1908), uvec2(0x08190808, 0x192b1919), uvec2(0x19080808, 0x192b1919), + uvec2(0x19081919, 0x192b1919), uvec2(0x2b2b1908, 0x192b1919), uvec2(0x08080819, 0x192b2b08), uvec2(0x192b2b2b, 0x192b2b08), + uvec2(0x082b1919, 0x192b2b19), uvec2(0x0808192b, 0x192b2b2b), uvec2(0x19191908, 0x192b2b2b), uvec2(0x192b082b, 0x192b2b2b), + uvec2(0x08080808, 0x2b080808), uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808), + uvec2(0x08190819, 0x2b080808), uvec2(0x08191908, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b2b2b, 0x2b080808), + uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x2b080808, 0x2b080808), + uvec2(0x2b08082b, 0x2b080808), uvec2(0x2b2b2b08, 0x2b080808), uvec2(0x2b2b2b2b, 0x2b080808), uvec2(0x08080819, 0x2b080819), + uvec2(0x08081908, 0x2b080819), uvec2(0x0808192b, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x19080808, 0x2b080819), + uvec2(0x19190819, 0x2b080819), uvec2(0x19192b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x082b0808, 0x2b08082b), + uvec2(0x2b080808, 0x2b08082b), uvec2(0x2b08082b, 0x2b08082b), uvec2(0x2b2b0808, 0x2b08082b), uvec2(0x2b2b2b08, 0x2b08082b), + uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908), + uvec2(0x08191919, 0x2b081908), uvec2(0x19080808, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b082b19, 0x2b081908), + uvec2(0x08080808, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x2b2b1919, 0x2b081919), uvec2(0x08192b08, 0x2b08192b), + uvec2(0x192b2b2b, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08082b08, 0x2b082b08), uvec2(0x082b1919, 0x2b082b08), + uvec2(0x19192b2b, 0x2b082b08), uvec2(0x2b080808, 0x2b082b08), uvec2(0x2b08082b, 0x2b082b08), uvec2(0x2b2b2b08, 0x2b082b08), + uvec2(0x0808192b, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x2b080808, 0x2b082b2b), uvec2(0x2b082b08, 0x2b082b2b), + uvec2(0x2b19192b, 0x2b082b2b), uvec2(0x2b2b2b08, 0x2b082b2b), uvec2(0x08080819, 0x2b190808), uvec2(0x08081908, 0x2b190808), + uvec2(0x08190808, 0x2b190808), uvec2(0x19080808, 0x2b190808), uvec2(0x1919192b, 0x2b190808), uvec2(0x2b081908, 0x2b190808), + uvec2(0x08080808, 0x2b190819), uvec2(0x082b082b, 0x2b190819), uvec2(0x192b1908, 0x2b190819), uvec2(0x1919192b, 0x2b19082b), + uvec2(0x2b082b19, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x08081919, 0x2b191908), uvec2(0x19081908, 0x2b191908), + uvec2(0x19190808, 0x2b191908), uvec2(0x19192b08, 0x2b191908), uvec2(0x082b2b19, 0x2b191919), uvec2(0x2b190808, 0x2b191919), + uvec2(0x2b19082b, 0x2b191919), uvec2(0x19080819, 0x2b19192b), uvec2(0x19190819, 0x2b192b08), uvec2(0x2b2b192b, 0x2b192b08), + uvec2(0x19082b19, 0x2b192b19), uvec2(0x08191919, 0x2b192b2b), uvec2(0x192b0808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808), + uvec2(0x0808082b, 0x2b2b0808), uvec2(0x08082b08, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808), uvec2(0x082b0808, 0x2b2b0808), + uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x2b2b0808, 0x2b2b0808), uvec2(0x19190819, 0x2b2b0819), uvec2(0x19192b19, 0x2b2b0819), + uvec2(0x2b2b192b, 0x2b2b0819), uvec2(0x08080808, 0x2b2b082b), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b08, 0x2b2b082b), + uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b080808, 0x2b2b082b), uvec2(0x2b2b0808, 0x2b2b082b), uvec2(0x19080808, 0x2b2b1908), + uvec2(0x2b191919, 0x2b2b1908), uvec2(0x192b1919, 0x2b2b192b), uvec2(0x2b192b08, 0x2b2b192b), uvec2(0x08082b2b, 0x2b2b2b08), + uvec2(0x082b0808, 0x2b2b2b08), uvec2(0x082b082b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b0808, 0x2b2b2b08), + uvec2(0x2b2b2b08, 0x2b2b2b08), uvec2(0x08081908, 0x2b2b2b19), uvec2(0x2b081908, 0x2b2b2b19), uvec2(0x2b08192b, 0x2b2b2b19), + uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x082b2b2b, 0x2b2b2b2b), uvec2(0x2b190819, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b), +}; + +shared uvec2 iq2xs_grid[512]; + +void init_iq_shmem(uvec3 wgsize) +{ + // copy the table into shared memory and sync + for (uint i = gl_LocalInvocationIndex.x; i < iq2xs_grid.length(); i += wgsize.x) { + iq2xs_grid[i] = iq2xs_grid_const[i]; + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ2_XS +#define QUANT_R QUANT_R_IQ2_XS +#define A_TYPE block_iq2_xs +#define A_TYPE_PACKED16 block_iq2_xs_packed16 +#endif + +#define QUANT_K_IQ2_S 256 +#define QUANT_R_IQ2_S 1 + +struct block_iq2_s +{ + float16_t d; + uint8_t qs[QUANT_K_IQ2_S/4]; + uint8_t qh[QUANT_K_IQ2_S/32]; + uint8_t scales[QUANT_K_IQ2_S/32]; +}; + +#if defined(DATA_A_IQ2_S) + +const uvec2 iq2s_grid_const[1024] = { + uvec2(0x08080808, 0x08080808), uvec2(0x0808082b, 0x08080808), uvec2(0x08081919, 0x08080808), uvec2(0x08082b08, 0x08080808), + uvec2(0x08082b2b, 0x08080808), uvec2(0x08190819, 0x08080808), uvec2(0x08191908, 0x08080808), uvec2(0x0819192b, 0x08080808), + uvec2(0x08192b19, 0x08080808), uvec2(0x082b0808, 0x08080808), uvec2(0x082b082b, 0x08080808), uvec2(0x082b1919, 0x08080808), + uvec2(0x082b2b08, 0x08080808), uvec2(0x19080819, 0x08080808), uvec2(0x19081908, 0x08080808), uvec2(0x1908192b, 0x08080808), + uvec2(0x19082b19, 0x08080808), uvec2(0x19190808, 0x08080808), uvec2(0x1919082b, 0x08080808), uvec2(0x19191919, 0x08080808), + uvec2(0x19192b08, 0x08080808), uvec2(0x192b0819, 0x08080808), uvec2(0x192b1908, 0x08080808), uvec2(0x192b192b, 0x08080808), + uvec2(0x192b2b19, 0x08080808), uvec2(0x2b080808, 0x08080808), uvec2(0x2b08082b, 0x08080808), uvec2(0x2b081919, 0x08080808), + uvec2(0x2b082b08, 0x08080808), uvec2(0x2b190819, 0x08080808), uvec2(0x2b191908, 0x08080808), uvec2(0x2b2b0808, 0x08080808), + uvec2(0x2b2b1919, 0x08080808), uvec2(0x2b2b2b2b, 0x08080808), uvec2(0x08080819, 0x08080819), uvec2(0x08081908, 0x08080819), + uvec2(0x0808192b, 0x08080819), uvec2(0x08082b19, 0x08080819), uvec2(0x08190808, 0x08080819), uvec2(0x0819082b, 0x08080819), + uvec2(0x08191919, 0x08080819), uvec2(0x08192b08, 0x08080819), uvec2(0x082b0819, 0x08080819), uvec2(0x082b1908, 0x08080819), + uvec2(0x19080808, 0x08080819), uvec2(0x1908082b, 0x08080819), uvec2(0x19081919, 0x08080819), uvec2(0x19082b08, 0x08080819), + uvec2(0x19190819, 0x08080819), uvec2(0x19191908, 0x08080819), uvec2(0x1919192b, 0x08080819), uvec2(0x19192b19, 0x08080819), + uvec2(0x192b0808, 0x08080819), uvec2(0x192b1919, 0x08080819), uvec2(0x192b2b08, 0x08080819), uvec2(0x2b080819, 0x08080819), + uvec2(0x2b081908, 0x08080819), uvec2(0x2b190808, 0x08080819), uvec2(0x2b19082b, 0x08080819), uvec2(0x2b191919, 0x08080819), + uvec2(0x2b2b0819, 0x08080819), uvec2(0x2b2b1908, 0x08080819), uvec2(0x08080808, 0x0808082b), uvec2(0x0808082b, 0x0808082b), + uvec2(0x08081919, 0x0808082b), uvec2(0x08082b08, 0x0808082b), uvec2(0x08190819, 0x0808082b), uvec2(0x08191908, 0x0808082b), + uvec2(0x082b0808, 0x0808082b), uvec2(0x082b2b2b, 0x0808082b), uvec2(0x19080819, 0x0808082b), uvec2(0x19081908, 0x0808082b), + uvec2(0x1908192b, 0x0808082b), uvec2(0x19082b19, 0x0808082b), uvec2(0x19190808, 0x0808082b), uvec2(0x19191919, 0x0808082b), + uvec2(0x2b080808, 0x0808082b), uvec2(0x2b081919, 0x0808082b), uvec2(0x2b082b2b, 0x0808082b), uvec2(0x2b191908, 0x0808082b), + uvec2(0x2b2b082b, 0x0808082b), uvec2(0x08080819, 0x08081908), uvec2(0x08081908, 0x08081908), uvec2(0x0808192b, 0x08081908), + uvec2(0x08082b19, 0x08081908), uvec2(0x08190808, 0x08081908), uvec2(0x0819082b, 0x08081908), uvec2(0x08191919, 0x08081908), + uvec2(0x08192b08, 0x08081908), uvec2(0x082b0819, 0x08081908), uvec2(0x082b1908, 0x08081908), uvec2(0x082b192b, 0x08081908), + uvec2(0x082b2b19, 0x08081908), uvec2(0x19080808, 0x08081908), uvec2(0x1908082b, 0x08081908), uvec2(0x19081919, 0x08081908), + uvec2(0x19082b08, 0x08081908), uvec2(0x19082b2b, 0x08081908), uvec2(0x19190819, 0x08081908), uvec2(0x19191908, 0x08081908), + uvec2(0x1919192b, 0x08081908), uvec2(0x19192b19, 0x08081908), uvec2(0x192b0808, 0x08081908), uvec2(0x192b082b, 0x08081908), + uvec2(0x192b1919, 0x08081908), uvec2(0x2b080819, 0x08081908), uvec2(0x2b081908, 0x08081908), uvec2(0x2b08192b, 0x08081908), + uvec2(0x2b082b19, 0x08081908), uvec2(0x2b190808, 0x08081908), uvec2(0x2b191919, 0x08081908), uvec2(0x2b192b08, 0x08081908), + uvec2(0x2b2b0819, 0x08081908), uvec2(0x2b2b1908, 0x08081908), uvec2(0x08080808, 0x08081919), uvec2(0x0808082b, 0x08081919), + uvec2(0x08081919, 0x08081919), uvec2(0x08082b08, 0x08081919), uvec2(0x08082b2b, 0x08081919), uvec2(0x08190819, 0x08081919), + uvec2(0x08191908, 0x08081919), uvec2(0x0819192b, 0x08081919), uvec2(0x08192b19, 0x08081919), uvec2(0x082b0808, 0x08081919), + uvec2(0x082b1919, 0x08081919), uvec2(0x082b2b08, 0x08081919), uvec2(0x19080819, 0x08081919), uvec2(0x19081908, 0x08081919), + uvec2(0x1908192b, 0x08081919), uvec2(0x19082b19, 0x08081919), uvec2(0x19190808, 0x08081919), uvec2(0x1919082b, 0x08081919), + uvec2(0x19191919, 0x08081919), uvec2(0x19192b08, 0x08081919), uvec2(0x192b0819, 0x08081919), uvec2(0x192b1908, 0x08081919), + uvec2(0x2b080808, 0x08081919), uvec2(0x2b08082b, 0x08081919), uvec2(0x2b081919, 0x08081919), uvec2(0x2b082b08, 0x08081919), + uvec2(0x2b190819, 0x08081919), uvec2(0x2b191908, 0x08081919), uvec2(0x2b2b0808, 0x08081919), uvec2(0x08080819, 0x0808192b), + uvec2(0x08081908, 0x0808192b), uvec2(0x0808192b, 0x0808192b), uvec2(0x08082b19, 0x0808192b), uvec2(0x08190808, 0x0808192b), + uvec2(0x08191919, 0x0808192b), uvec2(0x19080808, 0x0808192b), uvec2(0x19081919, 0x0808192b), uvec2(0x19082b08, 0x0808192b), + uvec2(0x19190819, 0x0808192b), uvec2(0x19191908, 0x0808192b), uvec2(0x192b0808, 0x0808192b), uvec2(0x2b080819, 0x0808192b), + uvec2(0x2b081908, 0x0808192b), uvec2(0x2b190808, 0x0808192b), uvec2(0x08080808, 0x08082b08), uvec2(0x0808082b, 0x08082b08), + uvec2(0x08081919, 0x08082b08), uvec2(0x08082b08, 0x08082b08), uvec2(0x08190819, 0x08082b08), uvec2(0x08191908, 0x08082b08), + uvec2(0x0819192b, 0x08082b08), uvec2(0x08192b19, 0x08082b08), uvec2(0x082b0808, 0x08082b08), uvec2(0x082b1919, 0x08082b08), + uvec2(0x082b2b2b, 0x08082b08), uvec2(0x19080819, 0x08082b08), uvec2(0x19081908, 0x08082b08), uvec2(0x1908192b, 0x08082b08), + uvec2(0x19082b19, 0x08082b08), uvec2(0x19190808, 0x08082b08), uvec2(0x1919082b, 0x08082b08), uvec2(0x19191919, 0x08082b08), + uvec2(0x19192b08, 0x08082b08), uvec2(0x192b0819, 0x08082b08), uvec2(0x192b1908, 0x08082b08), uvec2(0x2b080808, 0x08082b08), + uvec2(0x2b081919, 0x08082b08), uvec2(0x2b191908, 0x08082b08), uvec2(0x2b2b2b2b, 0x08082b08), uvec2(0x08080819, 0x08082b19), + uvec2(0x08081908, 0x08082b19), uvec2(0x08190808, 0x08082b19), uvec2(0x0819082b, 0x08082b19), uvec2(0x08191919, 0x08082b19), + uvec2(0x08192b08, 0x08082b19), uvec2(0x082b0819, 0x08082b19), uvec2(0x19080808, 0x08082b19), uvec2(0x19081919, 0x08082b19), + uvec2(0x19082b08, 0x08082b19), uvec2(0x19190819, 0x08082b19), uvec2(0x19191908, 0x08082b19), uvec2(0x192b0808, 0x08082b19), + uvec2(0x2b080819, 0x08082b19), uvec2(0x2b190808, 0x08082b19), uvec2(0x08080808, 0x08082b2b), uvec2(0x08190819, 0x08082b2b), + uvec2(0x08191908, 0x08082b2b), uvec2(0x082b082b, 0x08082b2b), uvec2(0x082b2b08, 0x08082b2b), uvec2(0x082b2b2b, 0x08082b2b), + uvec2(0x19190808, 0x08082b2b), uvec2(0x2b192b19, 0x08082b2b), uvec2(0x08080819, 0x08190808), uvec2(0x08081908, 0x08190808), + uvec2(0x0808192b, 0x08190808), uvec2(0x08082b19, 0x08190808), uvec2(0x08190808, 0x08190808), uvec2(0x0819082b, 0x08190808), + uvec2(0x08191919, 0x08190808), uvec2(0x08192b08, 0x08190808), uvec2(0x082b0819, 0x08190808), uvec2(0x082b1908, 0x08190808), + uvec2(0x082b192b, 0x08190808), uvec2(0x19080808, 0x08190808), uvec2(0x1908082b, 0x08190808), uvec2(0x19081919, 0x08190808), + uvec2(0x19082b08, 0x08190808), uvec2(0x19190819, 0x08190808), uvec2(0x19191908, 0x08190808), uvec2(0x1919192b, 0x08190808), + uvec2(0x19192b19, 0x08190808), uvec2(0x192b0808, 0x08190808), uvec2(0x192b082b, 0x08190808), uvec2(0x192b1919, 0x08190808), + uvec2(0x192b2b08, 0x08190808), uvec2(0x2b080819, 0x08190808), uvec2(0x2b081908, 0x08190808), uvec2(0x2b08192b, 0x08190808), + uvec2(0x2b190808, 0x08190808), uvec2(0x2b191919, 0x08190808), uvec2(0x2b192b08, 0x08190808), uvec2(0x2b2b0819, 0x08190808), + uvec2(0x2b2b1908, 0x08190808), uvec2(0x08080808, 0x08190819), uvec2(0x0808082b, 0x08190819), uvec2(0x08081919, 0x08190819), + uvec2(0x08082b08, 0x08190819), uvec2(0x08082b2b, 0x08190819), uvec2(0x08190819, 0x08190819), uvec2(0x08191908, 0x08190819), + uvec2(0x0819192b, 0x08190819), uvec2(0x08192b19, 0x08190819), uvec2(0x082b0808, 0x08190819), uvec2(0x082b082b, 0x08190819), + uvec2(0x082b1919, 0x08190819), uvec2(0x082b2b08, 0x08190819), uvec2(0x19080819, 0x08190819), uvec2(0x19081908, 0x08190819), + uvec2(0x1908192b, 0x08190819), uvec2(0x19082b19, 0x08190819), uvec2(0x19190808, 0x08190819), uvec2(0x1919082b, 0x08190819), + uvec2(0x19191919, 0x08190819), uvec2(0x19192b08, 0x08190819), uvec2(0x192b0819, 0x08190819), uvec2(0x192b1908, 0x08190819), + uvec2(0x2b080808, 0x08190819), uvec2(0x2b08082b, 0x08190819), uvec2(0x2b081919, 0x08190819), uvec2(0x2b082b08, 0x08190819), + uvec2(0x2b190819, 0x08190819), uvec2(0x2b191908, 0x08190819), uvec2(0x08080819, 0x0819082b), uvec2(0x08081908, 0x0819082b), + uvec2(0x08082b19, 0x0819082b), uvec2(0x08190808, 0x0819082b), uvec2(0x08191919, 0x0819082b), uvec2(0x082b0819, 0x0819082b), + uvec2(0x082b1908, 0x0819082b), uvec2(0x19080808, 0x0819082b), uvec2(0x19081919, 0x0819082b), uvec2(0x19190819, 0x0819082b), + uvec2(0x19191908, 0x0819082b), uvec2(0x2b080819, 0x0819082b), uvec2(0x2b081908, 0x0819082b), uvec2(0x2b190808, 0x0819082b), + uvec2(0x08080808, 0x08191908), uvec2(0x0808082b, 0x08191908), uvec2(0x08081919, 0x08191908), uvec2(0x08082b08, 0x08191908), + uvec2(0x08190819, 0x08191908), uvec2(0x08191908, 0x08191908), uvec2(0x0819192b, 0x08191908), uvec2(0x08192b19, 0x08191908), + uvec2(0x082b0808, 0x08191908), uvec2(0x082b1919, 0x08191908), uvec2(0x082b2b08, 0x08191908), uvec2(0x19080819, 0x08191908), + uvec2(0x19081908, 0x08191908), uvec2(0x1908192b, 0x08191908), uvec2(0x19082b19, 0x08191908), uvec2(0x19190808, 0x08191908), + uvec2(0x1919082b, 0x08191908), uvec2(0x19191919, 0x08191908), uvec2(0x19192b08, 0x08191908), uvec2(0x192b0819, 0x08191908), + uvec2(0x192b1908, 0x08191908), uvec2(0x2b080808, 0x08191908), uvec2(0x2b08082b, 0x08191908), uvec2(0x2b081919, 0x08191908), + uvec2(0x2b082b08, 0x08191908), uvec2(0x2b190819, 0x08191908), uvec2(0x2b191908, 0x08191908), uvec2(0x2b2b0808, 0x08191908), + uvec2(0x08080819, 0x08191919), uvec2(0x08081908, 0x08191919), uvec2(0x0808192b, 0x08191919), uvec2(0x08082b19, 0x08191919), + uvec2(0x08190808, 0x08191919), uvec2(0x0819082b, 0x08191919), uvec2(0x08191919, 0x08191919), uvec2(0x08192b08, 0x08191919), + uvec2(0x082b0819, 0x08191919), uvec2(0x082b1908, 0x08191919), uvec2(0x19080808, 0x08191919), uvec2(0x1908082b, 0x08191919), + uvec2(0x19081919, 0x08191919), uvec2(0x19082b08, 0x08191919), uvec2(0x19190819, 0x08191919), uvec2(0x19191908, 0x08191919), + uvec2(0x192b0808, 0x08191919), uvec2(0x2b080819, 0x08191919), uvec2(0x2b081908, 0x08191919), uvec2(0x2b190808, 0x08191919), + uvec2(0x08080808, 0x0819192b), uvec2(0x08081919, 0x0819192b), uvec2(0x08082b08, 0x0819192b), uvec2(0x08190819, 0x0819192b), + uvec2(0x08191908, 0x0819192b), uvec2(0x082b0808, 0x0819192b), uvec2(0x19080819, 0x0819192b), uvec2(0x19081908, 0x0819192b), + uvec2(0x19190808, 0x0819192b), uvec2(0x2b080808, 0x0819192b), uvec2(0x2b2b2b2b, 0x0819192b), uvec2(0x08080819, 0x08192b08), + uvec2(0x08081908, 0x08192b08), uvec2(0x0808192b, 0x08192b08), uvec2(0x08082b19, 0x08192b08), uvec2(0x08190808, 0x08192b08), + uvec2(0x08191919, 0x08192b08), uvec2(0x08192b08, 0x08192b08), uvec2(0x082b0819, 0x08192b08), uvec2(0x19080808, 0x08192b08), + uvec2(0x1908082b, 0x08192b08), uvec2(0x19081919, 0x08192b08), uvec2(0x19082b08, 0x08192b08), uvec2(0x19190819, 0x08192b08), + uvec2(0x19191908, 0x08192b08), uvec2(0x192b0808, 0x08192b08), uvec2(0x2b080819, 0x08192b08), uvec2(0x2b081908, 0x08192b08), + uvec2(0x08080808, 0x08192b19), uvec2(0x0808082b, 0x08192b19), uvec2(0x08081919, 0x08192b19), uvec2(0x08082b08, 0x08192b19), + uvec2(0x08190819, 0x08192b19), uvec2(0x08191908, 0x08192b19), uvec2(0x082b0808, 0x08192b19), uvec2(0x19080819, 0x08192b19), + uvec2(0x19081908, 0x08192b19), uvec2(0x19190808, 0x08192b19), uvec2(0x192b2b19, 0x08192b19), uvec2(0x2b2b082b, 0x08192b19), + uvec2(0x08081908, 0x08192b2b), uvec2(0x08190808, 0x08192b2b), uvec2(0x19080808, 0x08192b2b), uvec2(0x1919192b, 0x08192b2b), + uvec2(0x08080808, 0x082b0808), uvec2(0x0808082b, 0x082b0808), uvec2(0x08081919, 0x082b0808), uvec2(0x08082b08, 0x082b0808), + uvec2(0x08190819, 0x082b0808), uvec2(0x08191908, 0x082b0808), uvec2(0x0819192b, 0x082b0808), uvec2(0x08192b19, 0x082b0808), + uvec2(0x082b0808, 0x082b0808), uvec2(0x082b1919, 0x082b0808), uvec2(0x082b2b2b, 0x082b0808), uvec2(0x19080819, 0x082b0808), + uvec2(0x19081908, 0x082b0808), uvec2(0x19190808, 0x082b0808), uvec2(0x1919082b, 0x082b0808), uvec2(0x19191919, 0x082b0808), + uvec2(0x192b1908, 0x082b0808), uvec2(0x2b080808, 0x082b0808), uvec2(0x2b082b2b, 0x082b0808), uvec2(0x2b191908, 0x082b0808), + uvec2(0x2b2b2b2b, 0x082b0808), uvec2(0x08080819, 0x082b0819), uvec2(0x08081908, 0x082b0819), uvec2(0x08190808, 0x082b0819), + uvec2(0x0819082b, 0x082b0819), uvec2(0x08191919, 0x082b0819), uvec2(0x082b0819, 0x082b0819), uvec2(0x19080808, 0x082b0819), + uvec2(0x1908082b, 0x082b0819), uvec2(0x19081919, 0x082b0819), uvec2(0x19190819, 0x082b0819), uvec2(0x19191908, 0x082b0819), + uvec2(0x192b0808, 0x082b0819), uvec2(0x2b080819, 0x082b0819), uvec2(0x2b081908, 0x082b0819), uvec2(0x2b190808, 0x082b0819), + uvec2(0x08080808, 0x082b082b), uvec2(0x08082b2b, 0x082b082b), uvec2(0x082b082b, 0x082b082b), uvec2(0x082b2b08, 0x082b082b), + uvec2(0x082b2b2b, 0x082b082b), uvec2(0x19081908, 0x082b082b), uvec2(0x19190808, 0x082b082b), uvec2(0x2b082b08, 0x082b082b), + uvec2(0x2b082b2b, 0x082b082b), uvec2(0x2b2b2b08, 0x082b082b), uvec2(0x08080819, 0x082b1908), uvec2(0x08081908, 0x082b1908), + uvec2(0x0808192b, 0x082b1908), uvec2(0x08082b19, 0x082b1908), uvec2(0x08190808, 0x082b1908), uvec2(0x08191919, 0x082b1908), + uvec2(0x08192b08, 0x082b1908), uvec2(0x082b0819, 0x082b1908), uvec2(0x082b1908, 0x082b1908), uvec2(0x19080808, 0x082b1908), + uvec2(0x1908082b, 0x082b1908), uvec2(0x19081919, 0x082b1908), uvec2(0x19082b08, 0x082b1908), uvec2(0x19190819, 0x082b1908), + uvec2(0x19191908, 0x082b1908), uvec2(0x192b0808, 0x082b1908), uvec2(0x2b080819, 0x082b1908), uvec2(0x2b081908, 0x082b1908), + uvec2(0x2b190808, 0x082b1908), uvec2(0x08080808, 0x082b1919), uvec2(0x08081919, 0x082b1919), uvec2(0x08082b08, 0x082b1919), + uvec2(0x08190819, 0x082b1919), uvec2(0x08191908, 0x082b1919), uvec2(0x082b0808, 0x082b1919), uvec2(0x19080819, 0x082b1919), + uvec2(0x19081908, 0x082b1919), uvec2(0x19190808, 0x082b1919), uvec2(0x192b192b, 0x082b1919), uvec2(0x2b080808, 0x082b1919), + uvec2(0x08080819, 0x082b192b), uvec2(0x08081908, 0x082b192b), uvec2(0x08190808, 0x082b192b), uvec2(0x19080808, 0x082b192b), + uvec2(0x19192b19, 0x082b192b), uvec2(0x08080808, 0x082b2b08), uvec2(0x08081919, 0x082b2b08), uvec2(0x08190819, 0x082b2b08), + uvec2(0x08191908, 0x082b2b08), uvec2(0x19080819, 0x082b2b08), uvec2(0x19081908, 0x082b2b08), uvec2(0x19190808, 0x082b2b08), + uvec2(0x2b082b2b, 0x082b2b08), uvec2(0x2b2b2b2b, 0x082b2b08), uvec2(0x08080819, 0x082b2b19), uvec2(0x08081908, 0x082b2b19), + uvec2(0x08190808, 0x082b2b19), uvec2(0x2b191919, 0x082b2b19), uvec2(0x08082b2b, 0x082b2b2b), uvec2(0x082b082b, 0x082b2b2b), + uvec2(0x192b1908, 0x082b2b2b), uvec2(0x2b082b08, 0x082b2b2b), uvec2(0x2b082b2b, 0x082b2b2b), uvec2(0x08080819, 0x19080808), + uvec2(0x08081908, 0x19080808), uvec2(0x0808192b, 0x19080808), uvec2(0x08082b19, 0x19080808), uvec2(0x08190808, 0x19080808), + uvec2(0x0819082b, 0x19080808), uvec2(0x08191919, 0x19080808), uvec2(0x08192b08, 0x19080808), uvec2(0x08192b2b, 0x19080808), + uvec2(0x082b0819, 0x19080808), uvec2(0x082b1908, 0x19080808), uvec2(0x082b192b, 0x19080808), uvec2(0x19080808, 0x19080808), + uvec2(0x1908082b, 0x19080808), uvec2(0x19081919, 0x19080808), uvec2(0x19082b08, 0x19080808), uvec2(0x19082b2b, 0x19080808), + uvec2(0x19190819, 0x19080808), uvec2(0x19191908, 0x19080808), uvec2(0x1919192b, 0x19080808), uvec2(0x19192b19, 0x19080808), + uvec2(0x192b0808, 0x19080808), uvec2(0x192b082b, 0x19080808), uvec2(0x192b1919, 0x19080808), uvec2(0x2b080819, 0x19080808), + uvec2(0x2b081908, 0x19080808), uvec2(0x2b190808, 0x19080808), uvec2(0x2b191919, 0x19080808), uvec2(0x2b192b08, 0x19080808), + uvec2(0x2b2b0819, 0x19080808), uvec2(0x2b2b1908, 0x19080808), uvec2(0x08080808, 0x19080819), uvec2(0x0808082b, 0x19080819), + uvec2(0x08081919, 0x19080819), uvec2(0x08082b08, 0x19080819), uvec2(0x08190819, 0x19080819), uvec2(0x08191908, 0x19080819), + uvec2(0x0819192b, 0x19080819), uvec2(0x08192b19, 0x19080819), uvec2(0x082b0808, 0x19080819), uvec2(0x082b082b, 0x19080819), + uvec2(0x082b1919, 0x19080819), uvec2(0x19080819, 0x19080819), uvec2(0x19081908, 0x19080819), uvec2(0x1908192b, 0x19080819), + uvec2(0x19082b19, 0x19080819), uvec2(0x19190808, 0x19080819), uvec2(0x1919082b, 0x19080819), uvec2(0x19191919, 0x19080819), + uvec2(0x19192b08, 0x19080819), uvec2(0x192b0819, 0x19080819), uvec2(0x192b1908, 0x19080819), uvec2(0x2b080808, 0x19080819), + uvec2(0x2b08082b, 0x19080819), uvec2(0x2b081919, 0x19080819), uvec2(0x2b082b08, 0x19080819), uvec2(0x2b190819, 0x19080819), + uvec2(0x2b191908, 0x19080819), uvec2(0x2b2b0808, 0x19080819), uvec2(0x08080819, 0x1908082b), uvec2(0x08081908, 0x1908082b), + uvec2(0x08190808, 0x1908082b), uvec2(0x0819082b, 0x1908082b), uvec2(0x08191919, 0x1908082b), uvec2(0x08192b08, 0x1908082b), + uvec2(0x082b1908, 0x1908082b), uvec2(0x19080808, 0x1908082b), uvec2(0x19081919, 0x1908082b), uvec2(0x19082b08, 0x1908082b), + uvec2(0x19190819, 0x1908082b), uvec2(0x19191908, 0x1908082b), uvec2(0x192b0808, 0x1908082b), uvec2(0x2b080819, 0x1908082b), + uvec2(0x2b081908, 0x1908082b), uvec2(0x08080808, 0x19081908), uvec2(0x0808082b, 0x19081908), uvec2(0x08081919, 0x19081908), + uvec2(0x08082b08, 0x19081908), uvec2(0x08082b2b, 0x19081908), uvec2(0x08190819, 0x19081908), uvec2(0x08191908, 0x19081908), + uvec2(0x0819192b, 0x19081908), uvec2(0x08192b19, 0x19081908), uvec2(0x082b0808, 0x19081908), uvec2(0x082b082b, 0x19081908), + uvec2(0x082b1919, 0x19081908), uvec2(0x082b2b08, 0x19081908), uvec2(0x19080819, 0x19081908), uvec2(0x19081908, 0x19081908), + uvec2(0x1908192b, 0x19081908), uvec2(0x19082b19, 0x19081908), uvec2(0x19190808, 0x19081908), uvec2(0x1919082b, 0x19081908), + uvec2(0x19191919, 0x19081908), uvec2(0x19192b08, 0x19081908), uvec2(0x192b0819, 0x19081908), uvec2(0x192b1908, 0x19081908), + uvec2(0x2b080808, 0x19081908), uvec2(0x2b08082b, 0x19081908), uvec2(0x2b081919, 0x19081908), uvec2(0x2b082b08, 0x19081908), + uvec2(0x2b190819, 0x19081908), uvec2(0x2b191908, 0x19081908), uvec2(0x2b2b0808, 0x19081908), uvec2(0x08080819, 0x19081919), + uvec2(0x08081908, 0x19081919), uvec2(0x0808192b, 0x19081919), uvec2(0x08082b19, 0x19081919), uvec2(0x08190808, 0x19081919), + uvec2(0x0819082b, 0x19081919), uvec2(0x08191919, 0x19081919), uvec2(0x08192b08, 0x19081919), uvec2(0x082b0819, 0x19081919), + uvec2(0x082b1908, 0x19081919), uvec2(0x19080808, 0x19081919), uvec2(0x1908082b, 0x19081919), uvec2(0x19081919, 0x19081919), + uvec2(0x19082b08, 0x19081919), uvec2(0x19190819, 0x19081919), uvec2(0x19191908, 0x19081919), uvec2(0x192b0808, 0x19081919), + uvec2(0x192b2b2b, 0x19081919), uvec2(0x2b080819, 0x19081919), uvec2(0x2b081908, 0x19081919), uvec2(0x2b190808, 0x19081919), + uvec2(0x08080808, 0x1908192b), uvec2(0x0808082b, 0x1908192b), uvec2(0x08081919, 0x1908192b), uvec2(0x08082b08, 0x1908192b), + uvec2(0x08190819, 0x1908192b), uvec2(0x08191908, 0x1908192b), uvec2(0x082b0808, 0x1908192b), uvec2(0x19080819, 0x1908192b), + uvec2(0x19081908, 0x1908192b), uvec2(0x19190808, 0x1908192b), uvec2(0x2b080808, 0x1908192b), uvec2(0x2b2b1919, 0x1908192b), + uvec2(0x08080819, 0x19082b08), uvec2(0x08081908, 0x19082b08), uvec2(0x08082b19, 0x19082b08), uvec2(0x08190808, 0x19082b08), + uvec2(0x0819082b, 0x19082b08), uvec2(0x08191919, 0x19082b08), uvec2(0x08192b08, 0x19082b08), uvec2(0x082b0819, 0x19082b08), + uvec2(0x082b1908, 0x19082b08), uvec2(0x19080808, 0x19082b08), uvec2(0x1908082b, 0x19082b08), uvec2(0x19081919, 0x19082b08), + uvec2(0x19082b08, 0x19082b08), uvec2(0x19190819, 0x19082b08), uvec2(0x19191908, 0x19082b08), uvec2(0x192b0808, 0x19082b08), + uvec2(0x2b081908, 0x19082b08), uvec2(0x2b190808, 0x19082b08), uvec2(0x08080808, 0x19082b19), uvec2(0x0808082b, 0x19082b19), + uvec2(0x08081919, 0x19082b19), uvec2(0x08082b08, 0x19082b19), uvec2(0x08190819, 0x19082b19), uvec2(0x08191908, 0x19082b19), + uvec2(0x082b0808, 0x19082b19), uvec2(0x19080819, 0x19082b19), uvec2(0x19081908, 0x19082b19), uvec2(0x19190808, 0x19082b19), + uvec2(0x2b080808, 0x19082b19), uvec2(0x2b19192b, 0x19082b19), uvec2(0x08080819, 0x19082b2b), uvec2(0x08081908, 0x19082b2b), + uvec2(0x08190808, 0x19082b2b), uvec2(0x19080808, 0x19082b2b), uvec2(0x08080808, 0x19190808), uvec2(0x0808082b, 0x19190808), + uvec2(0x08081919, 0x19190808), uvec2(0x08082b08, 0x19190808), uvec2(0x08190819, 0x19190808), uvec2(0x08191908, 0x19190808), + uvec2(0x0819192b, 0x19190808), uvec2(0x08192b19, 0x19190808), uvec2(0x082b0808, 0x19190808), uvec2(0x082b082b, 0x19190808), + uvec2(0x082b1919, 0x19190808), uvec2(0x082b2b08, 0x19190808), uvec2(0x19080819, 0x19190808), uvec2(0x19081908, 0x19190808), + uvec2(0x1908192b, 0x19190808), uvec2(0x19082b19, 0x19190808), uvec2(0x19190808, 0x19190808), uvec2(0x1919082b, 0x19190808), + uvec2(0x19191919, 0x19190808), uvec2(0x19192b08, 0x19190808), uvec2(0x192b0819, 0x19190808), uvec2(0x192b1908, 0x19190808), + uvec2(0x2b080808, 0x19190808), uvec2(0x2b08082b, 0x19190808), uvec2(0x2b081919, 0x19190808), uvec2(0x2b082b08, 0x19190808), + uvec2(0x2b190819, 0x19190808), uvec2(0x2b191908, 0x19190808), uvec2(0x08080819, 0x19190819), uvec2(0x08081908, 0x19190819), + uvec2(0x0808192b, 0x19190819), uvec2(0x08082b19, 0x19190819), uvec2(0x08190808, 0x19190819), uvec2(0x0819082b, 0x19190819), + uvec2(0x08191919, 0x19190819), uvec2(0x08192b08, 0x19190819), uvec2(0x082b0819, 0x19190819), uvec2(0x082b1908, 0x19190819), + uvec2(0x19080808, 0x19190819), uvec2(0x1908082b, 0x19190819), uvec2(0x19081919, 0x19190819), uvec2(0x19082b08, 0x19190819), + uvec2(0x19190819, 0x19190819), uvec2(0x19191908, 0x19190819), uvec2(0x192b0808, 0x19190819), uvec2(0x2b080819, 0x19190819), + uvec2(0x2b081908, 0x19190819), uvec2(0x2b190808, 0x19190819), uvec2(0x08080808, 0x1919082b), uvec2(0x08081919, 0x1919082b), + uvec2(0x08082b08, 0x1919082b), uvec2(0x08190819, 0x1919082b), uvec2(0x08191908, 0x1919082b), uvec2(0x082b0808, 0x1919082b), + uvec2(0x19080819, 0x1919082b), uvec2(0x19081908, 0x1919082b), uvec2(0x19190808, 0x1919082b), uvec2(0x192b2b19, 0x1919082b), + uvec2(0x2b080808, 0x1919082b), uvec2(0x08080819, 0x19191908), uvec2(0x08081908, 0x19191908), uvec2(0x0808192b, 0x19191908), + uvec2(0x08082b19, 0x19191908), uvec2(0x08190808, 0x19191908), uvec2(0x0819082b, 0x19191908), uvec2(0x08191919, 0x19191908), + uvec2(0x08192b08, 0x19191908), uvec2(0x082b0819, 0x19191908), uvec2(0x082b1908, 0x19191908), uvec2(0x19080808, 0x19191908), + uvec2(0x1908082b, 0x19191908), uvec2(0x19081919, 0x19191908), uvec2(0x19082b08, 0x19191908), uvec2(0x19190819, 0x19191908), + uvec2(0x19191908, 0x19191908), uvec2(0x192b0808, 0x19191908), uvec2(0x2b080819, 0x19191908), uvec2(0x2b081908, 0x19191908), + uvec2(0x2b190808, 0x19191908), uvec2(0x08080808, 0x19191919), uvec2(0x0808082b, 0x19191919), uvec2(0x08081919, 0x19191919), + uvec2(0x08082b08, 0x19191919), uvec2(0x08190819, 0x19191919), uvec2(0x08191908, 0x19191919), uvec2(0x082b0808, 0x19191919), + uvec2(0x19080819, 0x19191919), uvec2(0x19081908, 0x19191919), uvec2(0x19190808, 0x19191919), uvec2(0x2b080808, 0x19191919), + uvec2(0x08080819, 0x1919192b), uvec2(0x08081908, 0x1919192b), uvec2(0x08190808, 0x1919192b), uvec2(0x082b192b, 0x1919192b), + uvec2(0x19080808, 0x1919192b), uvec2(0x08080808, 0x19192b08), uvec2(0x0808082b, 0x19192b08), uvec2(0x08081919, 0x19192b08), + uvec2(0x08082b08, 0x19192b08), uvec2(0x08190819, 0x19192b08), uvec2(0x08191908, 0x19192b08), uvec2(0x082b0808, 0x19192b08), + uvec2(0x19080819, 0x19192b08), uvec2(0x19081908, 0x19192b08), uvec2(0x19190808, 0x19192b08), uvec2(0x19192b2b, 0x19192b08), + uvec2(0x2b080808, 0x19192b08), uvec2(0x08080819, 0x19192b19), uvec2(0x08081908, 0x19192b19), uvec2(0x08190808, 0x19192b19), + uvec2(0x19080808, 0x19192b19), uvec2(0x08080808, 0x19192b2b), uvec2(0x08192b19, 0x19192b2b), uvec2(0x2b081919, 0x19192b2b), + uvec2(0x2b2b2b08, 0x19192b2b), uvec2(0x08080819, 0x192b0808), uvec2(0x08081908, 0x192b0808), uvec2(0x0808192b, 0x192b0808), + uvec2(0x08190808, 0x192b0808), uvec2(0x0819082b, 0x192b0808), uvec2(0x08191919, 0x192b0808), uvec2(0x08192b08, 0x192b0808), + uvec2(0x082b0819, 0x192b0808), uvec2(0x082b1908, 0x192b0808), uvec2(0x19080808, 0x192b0808), uvec2(0x19081919, 0x192b0808), + uvec2(0x19082b08, 0x192b0808), uvec2(0x19190819, 0x192b0808), uvec2(0x19191908, 0x192b0808), uvec2(0x192b0808, 0x192b0808), + uvec2(0x2b081908, 0x192b0808), uvec2(0x2b190808, 0x192b0808), uvec2(0x08080808, 0x192b0819), uvec2(0x0808082b, 0x192b0819), + uvec2(0x08081919, 0x192b0819), uvec2(0x08082b08, 0x192b0819), uvec2(0x08190819, 0x192b0819), uvec2(0x08191908, 0x192b0819), + uvec2(0x082b0808, 0x192b0819), uvec2(0x19080819, 0x192b0819), uvec2(0x19081908, 0x192b0819), uvec2(0x19190808, 0x192b0819), + uvec2(0x2b080808, 0x192b0819), uvec2(0x2b192b19, 0x192b0819), uvec2(0x08081908, 0x192b082b), uvec2(0x08190808, 0x192b082b), + uvec2(0x19080808, 0x192b082b), uvec2(0x1919192b, 0x192b082b), uvec2(0x2b2b0819, 0x192b082b), uvec2(0x08080808, 0x192b1908), + uvec2(0x08081919, 0x192b1908), uvec2(0x08082b08, 0x192b1908), uvec2(0x08190819, 0x192b1908), uvec2(0x08191908, 0x192b1908), + uvec2(0x082b0808, 0x192b1908), uvec2(0x19080819, 0x192b1908), uvec2(0x19081908, 0x192b1908), uvec2(0x19190808, 0x192b1908), + uvec2(0x2b080808, 0x192b1908), uvec2(0x08080819, 0x192b1919), uvec2(0x08081908, 0x192b1919), uvec2(0x08190808, 0x192b1919), + uvec2(0x19080808, 0x192b1919), uvec2(0x19082b2b, 0x192b1919), uvec2(0x192b2b08, 0x192b1919), uvec2(0x2b19082b, 0x192b1919), + uvec2(0x08080808, 0x192b192b), uvec2(0x2b191908, 0x192b192b), uvec2(0x08080819, 0x192b2b08), uvec2(0x08081908, 0x192b2b08), + uvec2(0x08190808, 0x192b2b08), uvec2(0x192b1919, 0x192b2b08), uvec2(0x2b192b08, 0x192b2b08), uvec2(0x08080808, 0x192b2b19), + uvec2(0x082b2b2b, 0x192b2b19), uvec2(0x1908082b, 0x192b2b2b), uvec2(0x2b2b0819, 0x192b2b2b), uvec2(0x08080808, 0x2b080808), + uvec2(0x0808082b, 0x2b080808), uvec2(0x08081919, 0x2b080808), uvec2(0x08082b08, 0x2b080808), uvec2(0x08190819, 0x2b080808), + uvec2(0x08191908, 0x2b080808), uvec2(0x08192b19, 0x2b080808), uvec2(0x082b0808, 0x2b080808), uvec2(0x082b1919, 0x2b080808), + uvec2(0x19080819, 0x2b080808), uvec2(0x19081908, 0x2b080808), uvec2(0x19190808, 0x2b080808), uvec2(0x1919082b, 0x2b080808), + uvec2(0x19191919, 0x2b080808), uvec2(0x19192b08, 0x2b080808), uvec2(0x192b0819, 0x2b080808), uvec2(0x2b080808, 0x2b080808), + uvec2(0x2b081919, 0x2b080808), uvec2(0x2b190819, 0x2b080808), uvec2(0x2b191908, 0x2b080808), uvec2(0x08080819, 0x2b080819), + uvec2(0x08081908, 0x2b080819), uvec2(0x08082b19, 0x2b080819), uvec2(0x08190808, 0x2b080819), uvec2(0x0819082b, 0x2b080819), + uvec2(0x08191919, 0x2b080819), uvec2(0x08192b08, 0x2b080819), uvec2(0x082b0819, 0x2b080819), uvec2(0x082b1908, 0x2b080819), + uvec2(0x19080808, 0x2b080819), uvec2(0x1908082b, 0x2b080819), uvec2(0x19081919, 0x2b080819), uvec2(0x19082b08, 0x2b080819), + uvec2(0x19190819, 0x2b080819), uvec2(0x19191908, 0x2b080819), uvec2(0x2b080819, 0x2b080819), uvec2(0x2b081908, 0x2b080819), + uvec2(0x2b190808, 0x2b080819), uvec2(0x2b2b2b19, 0x2b080819), uvec2(0x08080808, 0x2b08082b), uvec2(0x08081919, 0x2b08082b), + uvec2(0x08082b2b, 0x2b08082b), uvec2(0x08190819, 0x2b08082b), uvec2(0x08191908, 0x2b08082b), uvec2(0x19080819, 0x2b08082b), + uvec2(0x19081908, 0x2b08082b), uvec2(0x19190808, 0x2b08082b), uvec2(0x08080819, 0x2b081908), uvec2(0x08081908, 0x2b081908), + uvec2(0x0808192b, 0x2b081908), uvec2(0x08082b19, 0x2b081908), uvec2(0x08190808, 0x2b081908), uvec2(0x0819082b, 0x2b081908), + uvec2(0x08191919, 0x2b081908), uvec2(0x08192b08, 0x2b081908), uvec2(0x082b0819, 0x2b081908), uvec2(0x19080808, 0x2b081908), + uvec2(0x1908082b, 0x2b081908), uvec2(0x19081919, 0x2b081908), uvec2(0x19082b08, 0x2b081908), uvec2(0x19190819, 0x2b081908), + uvec2(0x19191908, 0x2b081908), uvec2(0x192b0808, 0x2b081908), uvec2(0x2b080819, 0x2b081908), uvec2(0x2b081908, 0x2b081908), + uvec2(0x2b190808, 0x2b081908), uvec2(0x08080808, 0x2b081919), uvec2(0x0808082b, 0x2b081919), uvec2(0x08081919, 0x2b081919), + uvec2(0x08082b08, 0x2b081919), uvec2(0x08190819, 0x2b081919), uvec2(0x08191908, 0x2b081919), uvec2(0x082b0808, 0x2b081919), + uvec2(0x19080819, 0x2b081919), uvec2(0x19081908, 0x2b081919), uvec2(0x19190808, 0x2b081919), uvec2(0x2b080808, 0x2b081919), + uvec2(0x2b082b2b, 0x2b081919), uvec2(0x08080819, 0x2b08192b), uvec2(0x08081908, 0x2b08192b), uvec2(0x08190808, 0x2b08192b), + uvec2(0x082b2b19, 0x2b08192b), uvec2(0x19080808, 0x2b08192b), uvec2(0x08080808, 0x2b082b08), uvec2(0x08081919, 0x2b082b08), + uvec2(0x08190819, 0x2b082b08), uvec2(0x08191908, 0x2b082b08), uvec2(0x19080819, 0x2b082b08), uvec2(0x19081908, 0x2b082b08), + uvec2(0x19190808, 0x2b082b08), uvec2(0x2b2b082b, 0x2b082b08), uvec2(0x08080819, 0x2b082b19), uvec2(0x08081908, 0x2b082b19), + uvec2(0x19080808, 0x2b082b19), uvec2(0x192b1919, 0x2b082b19), uvec2(0x082b082b, 0x2b082b2b), uvec2(0x19192b08, 0x2b082b2b), + uvec2(0x19192b2b, 0x2b082b2b), uvec2(0x2b08082b, 0x2b082b2b), uvec2(0x2b2b082b, 0x2b082b2b), uvec2(0x08080819, 0x2b190808), + uvec2(0x08081908, 0x2b190808), uvec2(0x08082b19, 0x2b190808), uvec2(0x08190808, 0x2b190808), uvec2(0x0819082b, 0x2b190808), + uvec2(0x08191919, 0x2b190808), uvec2(0x08192b08, 0x2b190808), uvec2(0x082b1908, 0x2b190808), uvec2(0x19080808, 0x2b190808), + uvec2(0x1908082b, 0x2b190808), uvec2(0x19081919, 0x2b190808), uvec2(0x19082b08, 0x2b190808), uvec2(0x19190819, 0x2b190808), + uvec2(0x19191908, 0x2b190808), uvec2(0x192b0808, 0x2b190808), uvec2(0x2b080819, 0x2b190808), uvec2(0x2b081908, 0x2b190808), + uvec2(0x2b190808, 0x2b190808), uvec2(0x08080808, 0x2b190819), uvec2(0x08081919, 0x2b190819), uvec2(0x08190819, 0x2b190819), + uvec2(0x08191908, 0x2b190819), uvec2(0x19080819, 0x2b190819), uvec2(0x19081908, 0x2b190819), uvec2(0x19190808, 0x2b190819), + uvec2(0x19192b2b, 0x2b190819), uvec2(0x08080819, 0x2b19082b), uvec2(0x08081908, 0x2b19082b), uvec2(0x08190808, 0x2b19082b), + uvec2(0x19080808, 0x2b19082b), uvec2(0x2b2b192b, 0x2b19082b), uvec2(0x08080808, 0x2b191908), uvec2(0x0808082b, 0x2b191908), + uvec2(0x08081919, 0x2b191908), uvec2(0x08082b08, 0x2b191908), uvec2(0x08190819, 0x2b191908), uvec2(0x08191908, 0x2b191908), + uvec2(0x082b0808, 0x2b191908), uvec2(0x19080819, 0x2b191908), uvec2(0x19081908, 0x2b191908), uvec2(0x19190808, 0x2b191908), + uvec2(0x2b080808, 0x2b191908), uvec2(0x2b19192b, 0x2b191908), uvec2(0x08080819, 0x2b191919), uvec2(0x08081908, 0x2b191919), + uvec2(0x08190808, 0x2b191919), uvec2(0x19080808, 0x2b191919), uvec2(0x2b192b08, 0x2b191919), uvec2(0x2b2b0819, 0x2b191919), + uvec2(0x08080808, 0x2b19192b), uvec2(0x1908192b, 0x2b19192b), uvec2(0x192b1908, 0x2b19192b), uvec2(0x08080819, 0x2b192b08), + uvec2(0x08081908, 0x2b192b08), uvec2(0x08190808, 0x2b192b08), uvec2(0x082b192b, 0x2b192b08), uvec2(0x19080808, 0x2b192b08), + uvec2(0x2b2b2b19, 0x2b192b08), uvec2(0x08080808, 0x2b192b19), uvec2(0x19082b19, 0x2b192b19), uvec2(0x1919082b, 0x2b192b19), + uvec2(0x2b190808, 0x2b192b2b), uvec2(0x08080808, 0x2b2b0808), uvec2(0x08081919, 0x2b2b0808), uvec2(0x08082b2b, 0x2b2b0808), + uvec2(0x08191908, 0x2b2b0808), uvec2(0x082b082b, 0x2b2b0808), uvec2(0x082b2b2b, 0x2b2b0808), uvec2(0x19080819, 0x2b2b0808), + uvec2(0x19081908, 0x2b2b0808), uvec2(0x19190808, 0x2b2b0808), uvec2(0x2b2b082b, 0x2b2b0808), uvec2(0x2b2b2b2b, 0x2b2b0808), + uvec2(0x19080808, 0x2b2b0819), uvec2(0x192b1919, 0x2b2b0819), uvec2(0x0808082b, 0x2b2b082b), uvec2(0x08082b2b, 0x2b2b082b), + uvec2(0x082b082b, 0x2b2b082b), uvec2(0x082b2b08, 0x2b2b082b), uvec2(0x082b2b2b, 0x2b2b082b), uvec2(0x2b08082b, 0x2b2b082b), + uvec2(0x2b082b08, 0x2b2b082b), uvec2(0x2b082b2b, 0x2b2b082b), uvec2(0x2b2b2b08, 0x2b2b082b), uvec2(0x08080819, 0x2b2b1908), + uvec2(0x08081908, 0x2b2b1908), uvec2(0x08190808, 0x2b2b1908), uvec2(0x19080808, 0x2b2b1908), uvec2(0x2b082b19, 0x2b2b1908), + uvec2(0x2b2b1908, 0x2b2b1908), uvec2(0x08080808, 0x2b2b1919), uvec2(0x08192b19, 0x2b2b1919), uvec2(0x19190819, 0x2b2b192b), + uvec2(0x08082b2b, 0x2b2b2b08), uvec2(0x082b2b08, 0x2b2b2b08), uvec2(0x2b2b082b, 0x2b2b2b08), uvec2(0x19191908, 0x2b2b2b19), + uvec2(0x2b08192b, 0x2b2b2b19), uvec2(0x08082b08, 0x2b2b2b2b), uvec2(0x08082b2b, 0x2b2b2b2b), uvec2(0x082b0808, 0x2b2b2b2b), + uvec2(0x082b082b, 0x2b2b2b2b), uvec2(0x082b2b08, 0x2b2b2b2b), uvec2(0x2b082b08, 0x2b2b2b2b), uvec2(0x2b2b2b2b, 0x2b2b2b2b) +}; + +shared uvec2 iq2s_grid[1024]; + +void init_iq_shmem(uvec3 wgsize) +{ + // copy the table into shared memory and sync + for (uint i = gl_LocalInvocationIndex.x; i < iq2s_grid.length(); i += wgsize.x) { + iq2s_grid[i] = iq2s_grid_const[i]; + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ2_S +#define QUANT_R QUANT_R_IQ2_S +#define A_TYPE block_iq2_s +#endif + +#define QUANT_K_IQ3_XXS 256 +#define QUANT_R_IQ3_XXS 1 + +struct block_iq3_xxs +{ + float16_t d; + uint8_t qs[QUANT_K_IQ3_XXS/4 + QUANT_K_IQ3_XXS/8]; +}; + +struct block_iq3_xxs_packed16 +{ + float16_t d; + uint16_t qs[QUANT_K_IQ3_XXS/8 + QUANT_K_IQ3_XXS/16]; +}; + +#if defined(DATA_A_IQ3_XXS) + +const uint32_t iq3xxs_grid_const[256] = { + 0x04040404, 0x04040414, 0x04040424, 0x04040c0c, 0x04040c1c, 0x04040c3e, 0x04041404, 0x04041414, + 0x04041c0c, 0x04042414, 0x04043e1c, 0x04043e2c, 0x040c040c, 0x040c041c, 0x040c0c04, 0x040c0c14, + 0x040c140c, 0x040c142c, 0x040c1c04, 0x040c1c14, 0x040c240c, 0x040c2c24, 0x040c3e04, 0x04140404, + 0x04140414, 0x04140424, 0x04140c0c, 0x04141404, 0x04141414, 0x04141c0c, 0x04141c1c, 0x04141c3e, + 0x04142c0c, 0x04142c3e, 0x04143e2c, 0x041c040c, 0x041c043e, 0x041c0c04, 0x041c0c14, 0x041c142c, + 0x041c3e04, 0x04240c1c, 0x04241c3e, 0x04242424, 0x04242c3e, 0x04243e1c, 0x04243e2c, 0x042c040c, + 0x042c043e, 0x042c1c14, 0x042c2c14, 0x04341c2c, 0x04343424, 0x043e0c04, 0x043e0c24, 0x043e0c34, + 0x043e241c, 0x043e340c, 0x0c04040c, 0x0c04041c, 0x0c040c04, 0x0c040c14, 0x0c04140c, 0x0c04141c, + 0x0c041c04, 0x0c041c14, 0x0c041c24, 0x0c04243e, 0x0c042c04, 0x0c0c0404, 0x0c0c0414, 0x0c0c0c0c, + 0x0c0c1404, 0x0c0c1414, 0x0c14040c, 0x0c14041c, 0x0c140c04, 0x0c140c14, 0x0c14140c, 0x0c141c04, + 0x0c143e14, 0x0c1c0404, 0x0c1c0414, 0x0c1c1404, 0x0c1c1c0c, 0x0c1c2434, 0x0c1c3434, 0x0c24040c, + 0x0c24042c, 0x0c242c04, 0x0c2c1404, 0x0c2c1424, 0x0c2c2434, 0x0c2c3e0c, 0x0c34042c, 0x0c3e1414, + 0x0c3e2404, 0x14040404, 0x14040414, 0x14040c0c, 0x14040c1c, 0x14041404, 0x14041414, 0x14041434, + 0x14041c0c, 0x14042414, 0x140c040c, 0x140c041c, 0x140c042c, 0x140c0c04, 0x140c0c14, 0x140c140c, + 0x140c1c04, 0x140c341c, 0x140c343e, 0x140c3e04, 0x14140404, 0x14140414, 0x14140c0c, 0x14140c3e, + 0x14141404, 0x14141414, 0x14141c3e, 0x14142404, 0x14142c2c, 0x141c040c, 0x141c0c04, 0x141c0c24, + 0x141c3e04, 0x141c3e24, 0x14241c2c, 0x14242c1c, 0x142c041c, 0x142c143e, 0x142c240c, 0x142c3e24, + 0x143e040c, 0x143e041c, 0x143e0c34, 0x143e242c, 0x1c04040c, 0x1c040c04, 0x1c040c14, 0x1c04140c, + 0x1c04141c, 0x1c042c04, 0x1c04342c, 0x1c043e14, 0x1c0c0404, 0x1c0c0414, 0x1c0c1404, 0x1c0c1c0c, + 0x1c0c2424, 0x1c0c2434, 0x1c14040c, 0x1c14041c, 0x1c140c04, 0x1c14142c, 0x1c142c14, 0x1c143e14, + 0x1c1c0c0c, 0x1c1c1c1c, 0x1c241c04, 0x1c24243e, 0x1c243e14, 0x1c2c0404, 0x1c2c0434, 0x1c2c1414, + 0x1c2c2c2c, 0x1c340c24, 0x1c341c34, 0x1c34341c, 0x1c3e1c1c, 0x1c3e3404, 0x24040424, 0x24040c3e, + 0x24041c2c, 0x24041c3e, 0x24042c1c, 0x24042c3e, 0x240c3e24, 0x24141404, 0x24141c3e, 0x24142404, + 0x24143404, 0x24143434, 0x241c043e, 0x241c242c, 0x24240424, 0x24242c0c, 0x24243424, 0x242c142c, + 0x242c241c, 0x242c3e04, 0x243e042c, 0x243e0c04, 0x243e0c14, 0x243e1c04, 0x2c040c14, 0x2c04240c, + 0x2c043e04, 0x2c0c0404, 0x2c0c0434, 0x2c0c1434, 0x2c0c2c2c, 0x2c140c24, 0x2c141c14, 0x2c143e14, + 0x2c1c0414, 0x2c1c2c1c, 0x2c240c04, 0x2c24141c, 0x2c24143e, 0x2c243e14, 0x2c2c0414, 0x2c2c1c0c, + 0x2c342c04, 0x2c3e1424, 0x2c3e2414, 0x34041424, 0x34042424, 0x34042434, 0x34043424, 0x340c140c, + 0x340c340c, 0x34140c3e, 0x34143424, 0x341c1c04, 0x341c1c34, 0x34242424, 0x342c042c, 0x342c2c14, + 0x34341c1c, 0x343e041c, 0x343e140c, 0x3e04041c, 0x3e04042c, 0x3e04043e, 0x3e040c04, 0x3e041c14, + 0x3e042c14, 0x3e0c1434, 0x3e0c2404, 0x3e140c14, 0x3e14242c, 0x3e142c14, 0x3e1c0404, 0x3e1c0c2c, + 0x3e1c1c1c, 0x3e1c3404, 0x3e24140c, 0x3e24240c, 0x3e2c0404, 0x3e2c0414, 0x3e2c1424, 0x3e341c04, +}; + +shared uint32_t iq3xxs_grid[256]; + +void init_iq_shmem(uvec3 wgsize) +{ + // copy the table into shared memory and sync + for (uint i = gl_LocalInvocationIndex.x; i < iq3xxs_grid.length(); i += wgsize.x) { + iq3xxs_grid[i] = iq3xxs_grid_const[i]; + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ3_XXS +#define QUANT_R QUANT_R_IQ3_XXS +#define A_TYPE block_iq3_xxs +#define A_TYPE_PACKED16 block_iq3_xxs_packed16 +#endif + +#define QUANT_K_IQ3_S 256 +#define QUANT_R_IQ3_S 1 + +struct block_iq3_s +{ + float16_t d; + uint8_t qs[QUANT_K_IQ3_S/4]; + uint8_t qh[QUANT_K_IQ3_S/32]; + uint8_t signs[QUANT_K_IQ3_S/8]; + uint8_t scales[QUANT_K_IQ3_S/64]; +}; + +struct block_iq3_s_packed16 +{ + float16_t d; + uint16_t qs[QUANT_K_IQ3_S/4/2]; + uint16_t qh[QUANT_K_IQ3_S/32/2]; + uint16_t signs[QUANT_K_IQ3_S/8/2]; + uint16_t scales[QUANT_K_IQ3_S/64/2]; +}; + +#if defined(DATA_A_IQ3_S) + +const uint32_t iq3s_grid_const[512] = { + 0x01010101, 0x01010103, 0x01010105, 0x0101010b, 0x0101010f, 0x01010301, 0x01010303, 0x01010305, + 0x01010309, 0x0101030d, 0x01010501, 0x01010503, 0x0101050b, 0x01010707, 0x01010901, 0x01010905, + 0x0101090b, 0x0101090f, 0x01010b03, 0x01010b07, 0x01010d01, 0x01010d05, 0x01010f03, 0x01010f09, + 0x01010f0f, 0x01030101, 0x01030103, 0x01030105, 0x01030109, 0x01030301, 0x01030303, 0x0103030b, + 0x01030501, 0x01030507, 0x0103050f, 0x01030703, 0x0103070b, 0x01030909, 0x01030d03, 0x01030d0b, + 0x01030f05, 0x01050101, 0x01050103, 0x0105010b, 0x0105010f, 0x01050301, 0x01050307, 0x0105030d, + 0x01050503, 0x0105050b, 0x01050701, 0x01050709, 0x01050905, 0x0105090b, 0x0105090f, 0x01050b03, + 0x01050b07, 0x01050f01, 0x01050f07, 0x01070107, 0x01070303, 0x0107030b, 0x01070501, 0x01070505, + 0x01070703, 0x01070707, 0x0107070d, 0x01070909, 0x01070b01, 0x01070b05, 0x01070d0f, 0x01070f03, + 0x01070f0b, 0x01090101, 0x01090307, 0x0109030f, 0x01090503, 0x01090509, 0x01090705, 0x01090901, + 0x01090907, 0x01090b03, 0x01090f01, 0x010b0105, 0x010b0109, 0x010b0501, 0x010b0505, 0x010b050d, + 0x010b0707, 0x010b0903, 0x010b090b, 0x010b090f, 0x010b0d0d, 0x010b0f07, 0x010d010d, 0x010d0303, + 0x010d0307, 0x010d0703, 0x010d0b05, 0x010d0f03, 0x010f0101, 0x010f0105, 0x010f0109, 0x010f0501, + 0x010f0505, 0x010f050d, 0x010f0707, 0x010f0b01, 0x010f0b09, 0x03010101, 0x03010103, 0x03010105, + 0x03010109, 0x03010301, 0x03010303, 0x03010307, 0x0301030b, 0x0301030f, 0x03010501, 0x03010505, + 0x03010703, 0x03010709, 0x0301070d, 0x03010b09, 0x03010b0d, 0x03010d03, 0x03010f05, 0x03030101, + 0x03030103, 0x03030107, 0x0303010d, 0x03030301, 0x03030309, 0x03030503, 0x03030701, 0x03030707, + 0x03030903, 0x03030b01, 0x03030b05, 0x03030f01, 0x03030f0d, 0x03050101, 0x03050305, 0x0305030b, + 0x0305030f, 0x03050501, 0x03050509, 0x03050705, 0x03050901, 0x03050907, 0x03050b0b, 0x03050d01, + 0x03050f05, 0x03070103, 0x03070109, 0x0307010f, 0x03070301, 0x03070307, 0x03070503, 0x0307050f, + 0x03070701, 0x03070709, 0x03070903, 0x03070d05, 0x03070f01, 0x03090107, 0x0309010b, 0x03090305, + 0x03090309, 0x03090703, 0x03090707, 0x03090905, 0x0309090d, 0x03090b01, 0x03090b09, 0x030b0103, + 0x030b0301, 0x030b0307, 0x030b0503, 0x030b0701, 0x030b0705, 0x030b0b03, 0x030d0501, 0x030d0509, + 0x030d050f, 0x030d0909, 0x030d090d, 0x030f0103, 0x030f0107, 0x030f0301, 0x030f0305, 0x030f0503, + 0x030f070b, 0x030f0903, 0x030f0d05, 0x030f0f01, 0x05010101, 0x05010103, 0x05010107, 0x0501010b, + 0x0501010f, 0x05010301, 0x05010305, 0x05010309, 0x0501030d, 0x05010503, 0x05010507, 0x0501050f, + 0x05010701, 0x05010705, 0x05010903, 0x05010907, 0x0501090b, 0x05010b01, 0x05010b05, 0x05010d0f, + 0x05010f01, 0x05010f07, 0x05010f0b, 0x05030101, 0x05030105, 0x05030301, 0x05030307, 0x0503030f, + 0x05030505, 0x0503050b, 0x05030703, 0x05030709, 0x05030905, 0x05030b03, 0x05050103, 0x05050109, + 0x0505010f, 0x05050503, 0x05050507, 0x05050701, 0x0505070f, 0x05050903, 0x05050b07, 0x05050b0f, + 0x05050f03, 0x05050f09, 0x05070101, 0x05070105, 0x0507010b, 0x05070303, 0x05070505, 0x05070509, + 0x05070703, 0x05070707, 0x05070905, 0x05070b01, 0x05070d0d, 0x05090103, 0x0509010f, 0x05090501, + 0x05090507, 0x05090705, 0x0509070b, 0x05090903, 0x05090f05, 0x05090f0b, 0x050b0109, 0x050b0303, + 0x050b0505, 0x050b070f, 0x050b0901, 0x050b0b07, 0x050b0f01, 0x050d0101, 0x050d0105, 0x050d010f, + 0x050d0503, 0x050d0b0b, 0x050d0d03, 0x050f010b, 0x050f0303, 0x050f050d, 0x050f0701, 0x050f0907, + 0x050f0b01, 0x07010105, 0x07010303, 0x07010307, 0x0701030b, 0x0701030f, 0x07010505, 0x07010703, + 0x07010707, 0x0701070b, 0x07010905, 0x07010909, 0x0701090f, 0x07010b03, 0x07010d07, 0x07010f03, + 0x07030103, 0x07030107, 0x0703010b, 0x07030309, 0x07030503, 0x07030507, 0x07030901, 0x07030d01, + 0x07030f05, 0x07030f0d, 0x07050101, 0x07050305, 0x07050501, 0x07050705, 0x07050709, 0x07050b01, + 0x07070103, 0x07070301, 0x07070309, 0x07070503, 0x07070507, 0x0707050f, 0x07070701, 0x07070903, + 0x07070907, 0x0707090f, 0x07070b0b, 0x07070f07, 0x07090107, 0x07090303, 0x0709030d, 0x07090505, + 0x07090703, 0x07090b05, 0x07090d01, 0x07090d09, 0x070b0103, 0x070b0301, 0x070b0305, 0x070b050b, + 0x070b0705, 0x070b0909, 0x070b0b0d, 0x070b0f07, 0x070d030d, 0x070d0903, 0x070f0103, 0x070f0107, + 0x070f0501, 0x070f0505, 0x070f070b, 0x09010101, 0x09010109, 0x09010305, 0x09010501, 0x09010509, + 0x0901050f, 0x09010705, 0x09010903, 0x09010b01, 0x09010f01, 0x09030105, 0x0903010f, 0x09030303, + 0x09030307, 0x09030505, 0x09030701, 0x0903070b, 0x09030907, 0x09030b03, 0x09030b0b, 0x09050103, + 0x09050107, 0x09050301, 0x0905030b, 0x09050503, 0x09050707, 0x09050901, 0x09050b0f, 0x09050d05, + 0x09050f01, 0x09070109, 0x09070303, 0x09070307, 0x09070501, 0x09070505, 0x09070703, 0x0907070b, + 0x09090101, 0x09090105, 0x09090509, 0x0909070f, 0x09090901, 0x09090f03, 0x090b010b, 0x090b010f, + 0x090b0503, 0x090b0d05, 0x090d0307, 0x090d0709, 0x090d0d01, 0x090f0301, 0x090f030b, 0x090f0701, + 0x090f0907, 0x090f0b03, 0x0b010105, 0x0b010301, 0x0b010309, 0x0b010505, 0x0b010901, 0x0b010909, + 0x0b01090f, 0x0b010b05, 0x0b010d0d, 0x0b010f09, 0x0b030103, 0x0b030107, 0x0b03010b, 0x0b030305, + 0x0b030503, 0x0b030705, 0x0b030f05, 0x0b050101, 0x0b050303, 0x0b050507, 0x0b050701, 0x0b05070d, + 0x0b050b07, 0x0b070105, 0x0b07010f, 0x0b070301, 0x0b07050f, 0x0b070909, 0x0b070b03, 0x0b070d0b, + 0x0b070f07, 0x0b090103, 0x0b090109, 0x0b090501, 0x0b090705, 0x0b09090d, 0x0b0b0305, 0x0b0b050d, + 0x0b0b0b03, 0x0b0b0b07, 0x0b0d0905, 0x0b0f0105, 0x0b0f0109, 0x0b0f0505, 0x0d010303, 0x0d010307, + 0x0d01030b, 0x0d010703, 0x0d010707, 0x0d010d01, 0x0d030101, 0x0d030501, 0x0d03050f, 0x0d030d09, + 0x0d050305, 0x0d050709, 0x0d050905, 0x0d050b0b, 0x0d050d05, 0x0d050f01, 0x0d070101, 0x0d070309, + 0x0d070503, 0x0d070901, 0x0d09050b, 0x0d090907, 0x0d090d05, 0x0d0b0101, 0x0d0b0107, 0x0d0b0709, + 0x0d0b0d01, 0x0d0d010b, 0x0d0d0901, 0x0d0f0303, 0x0d0f0307, 0x0f010101, 0x0f010109, 0x0f01010f, + 0x0f010501, 0x0f010505, 0x0f01070d, 0x0f010901, 0x0f010b09, 0x0f010d05, 0x0f030105, 0x0f030303, + 0x0f030509, 0x0f030907, 0x0f03090b, 0x0f050103, 0x0f050109, 0x0f050301, 0x0f05030d, 0x0f050503, + 0x0f050701, 0x0f050b03, 0x0f070105, 0x0f070705, 0x0f07070b, 0x0f070b07, 0x0f090103, 0x0f09010b, + 0x0f090307, 0x0f090501, 0x0f090b01, 0x0f0b0505, 0x0f0b0905, 0x0f0d0105, 0x0f0d0703, 0x0f0f0101, +}; + +shared uint32_t iq3s_grid[512]; + +void init_iq_shmem(uvec3 wgsize) +{ + // copy the table into shared memory and sync + for (uint i = gl_LocalInvocationIndex.x; i < iq3s_grid.length(); i += wgsize.x) { + iq3s_grid[i] = iq3s_grid_const[i]; + } + barrier(); +} + +#define QUANT_K QUANT_K_IQ3_S +#define QUANT_R QUANT_R_IQ3_S +#define A_TYPE block_iq3_s +#define A_TYPE_PACKED16 block_iq3_s_packed16 +#endif + #define QUANT_K_IQ4_NL 32 #define QUANT_R_IQ4_NL 2 @@ -318,11 +1050,11 @@ const int8_t kvalues_iq4nl_const[16] = { shared FLOAT_TYPE kvalues_iq4nl[16]; -void init_iq4nl_shmem() +void init_iq_shmem(uvec3 wgsize) { // copy the table into shared memory and sync - if (gl_LocalInvocationIndex.x < 16) { - kvalues_iq4nl[gl_LocalInvocationIndex.x] = FLOAT_TYPE(kvalues_iq4nl_const[gl_LocalInvocationIndex.x]); + for (uint i = gl_LocalInvocationIndex.x; i < kvalues_iq4nl.length(); i += wgsize.x) { + kvalues_iq4nl[i] = FLOAT_TYPE(kvalues_iq4nl_const[i]); } barrier(); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index e9c6cb9d4..93ddbfadc 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -55,6 +55,11 @@ const std::vector type_names = { "q4_k", "q5_k", "q6_k", + "iq2_xxs", + "iq2_xs", + "iq2_s", + "iq3_xxs", + "iq3_s", "iq4_nl" }; From eb7cf15a808d4d7a71eef89cc6a9b96fe82989dc Mon Sep 17 00:00:00 2001 From: Nigel Bosch Date: Wed, 29 Jan 2025 12:45:44 -0600 Subject: [PATCH 191/279] server : add /apply-template endpoint for additional use cases of Minja functionality (#11489) * add /apply-template endpoint to server * remove unnecessary line * add /apply-template documentation * return only "prompt" field in /apply-template * use suggested idea instead of my overly verbose way --- examples/server/README.md | 8 ++++++++ examples/server/server.cpp | 9 +++++++++ .../server/tests/unit/test_chat_completion.py | 15 +++++++++++++++ 3 files changed, 32 insertions(+) diff --git a/examples/server/README.md b/examples/server/README.md index e788d8b59..cedae0b6d 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -576,6 +576,14 @@ With input 'á' (utf8 hex: C3 A1) on tinyllama/stories260k `tokens`: Set the tokens to detokenize. +### POST `/apply-template`: Apply chat template to a conversation + +Uses the server's prompt template formatting functionality to convert chat messages to a single string expected by a chat model as input, but does not perform inference. Instead, the prompt string is returned in the `prompt` field of the JSON response. The prompt can then be modified as desired (for example, to insert "Sure!" at the beginning of the model's response) before sending to `/completion` to generate the chat response. + +*Options:* + +`messages`: (Required) Chat turns in the same format as `/v1/chat/completions`. + ### POST `/embedding`: Generate embedding of a given text > [!IMPORTANT] diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c5efbdb09..6e28d283a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -4124,6 +4124,14 @@ int main(int argc, char ** argv) { res_ok(res, root); }; + const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) { + auto body = json::parse(req.body); + const auto & chat_template = body.contains("tools") && ctx_server.chat_templates.template_tool_use ? *ctx_server.chat_templates.template_tool_use : *ctx_server.chat_templates.template_default; + json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja); + + res_ok(res, {{ "prompt", data.at("prompt") }}); + }; + const auto handle_embeddings = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { handle_embeddings_impl(req, res, OAICOMPAT_TYPE_NONE); }; @@ -4300,6 +4308,7 @@ int main(int argc, char ** argv) { svr->Post("/v1/reranking", handle_rerank); svr->Post("/tokenize", handle_tokenize); svr->Post("/detokenize", handle_detokenize); + svr->Post("/apply-template", handle_apply_template); // LoRA adapters hotswap svr->Get ("/lora-adapters", handle_lora_adapters_list); svr->Post("/lora-adapters", handle_lora_adapters_apply); diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 2e15348dc..add3f810f 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -121,6 +121,21 @@ def test_chat_template(): assert res.body["__verbose"]["prompt"] == " <|start_header_id|>system<|end_header_id|>\n\nBook<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the best book<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" +def test_apply_chat_template(): + global server + server.chat_template = "command-r" + server.start() + res = server.make_request("POST", "/apply-template", data={ + "messages": [ + {"role": "system", "content": "You are a test."}, + {"role": "user", "content":"Hi there"}, + ] + }) + assert res.status_code == 200 + assert "prompt" in res.body + assert res.body["prompt"] == "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>You are a test.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hi there<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>" + + @pytest.mark.parametrize("response_format,n_predicted,re_content", [ ({"type": "json_object", "schema": {"const": "42"}}, 6, "\"42\""), ({"type": "json_object", "schema": {"items": [{"type": "integer"}]}}, 10, "[ -3000 ]"), From e0449763a4f335cca374254a72892141f41eaa59 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 30 Jan 2025 05:48:14 +0100 Subject: [PATCH 192/279] server : update json snippets in README.md [no ci] (#11492) This commit updates some of JSON snippets in README.md file and removes the `json` language tag from the code blocks. The motivation for this changes is that if there is invalid json in a code snippet these are highlighted in red which can make it somewhat difficult to read and can be a little distracting. --- examples/server/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index cedae0b6d..d1c9be589 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -460,7 +460,7 @@ These words will not be included in the completion, so make sure to add them to - Note: In streaming mode (`stream`), only `content`, `tokens` and `stop` will be returned until end of completion. Responses are sent using the [Server-sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html) standard. Note: the browser's `EventSource` interface cannot be used due to its lack of `POST` request support. - `completion_probabilities`: An array of token probabilities for each completion. The array's length is `n_predict`. Each item in the array has a nested array `top_logprobs`. It contains at **maximum** `n_probs` elements: - ```json + ``` { "content": "", "tokens": [ generated token ids if requested ], @@ -561,7 +561,7 @@ If `with_pieces` is `true`: ``` With input 'á' (utf8 hex: C3 A1) on tinyllama/stories260k -```json +``` { "tokens": [ {"id": 198, "piece": [195]}, // hex C3 @@ -776,7 +776,7 @@ Same as the `/v1/embeddings` endpoint. **Response format** -```json +``` [ { "index": 0, From 7919256c57f05c09b0b50ec9abb37ff62dab7251 Mon Sep 17 00:00:00 2001 From: Guspan Tanadi <36249910+guspan-tanadi@users.noreply.github.com> Date: Thu, 30 Jan 2025 12:58:02 +0700 Subject: [PATCH 193/279] readme : reference examples relative links (#11505) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ff8536773..382c67041 100644 --- a/README.md +++ b/README.md @@ -422,7 +422,7 @@ To learn more about model quantization, [read this documentation](examples/quant
-[^1]: [examples/perplexity/README.md](examples/perplexity/README.md) +[^1]: [examples/perplexity/README.md](./examples/perplexity/README.md) [^2]: [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity) ## [`llama-bench`](examples/llama-bench) From 496e5bf46bab757d05e18227cb4e17055ae42f42 Mon Sep 17 00:00:00 2001 From: Isaac McFadyen Date: Thu, 30 Jan 2025 04:11:53 -0500 Subject: [PATCH 194/279] server : (docs) added response format for /apply-template [no ci] (#11503) --- examples/server/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/server/README.md b/examples/server/README.md index d1c9be589..44da503df 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -584,6 +584,10 @@ Uses the server's prompt template formatting functionality to convert chat messa `messages`: (Required) Chat turns in the same format as `/v1/chat/completions`. +**Response format** + +Returns a JSON object with a field `prompt` containing a string of the input messages formatted according to the model's chat template format. + ### POST `/embedding`: Generate embedding of a given text > [!IMPORTANT] From 4314e56c4f8c5091f45732f39bd94c0c6c323798 Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Thu, 30 Jan 2025 11:05:00 +0100 Subject: [PATCH 195/279] server : use lambda instead of std::bind (#11507) This commit replaces the two usages of `std::bind` in favor of lambdas for the callback functions for `callback_new_task` and `callback_update_slots`. The motivation for this changes is consistency with the rest of the code in server.cpp (lambdas are used for all other callbacks/handlers). Also lambdas are more readable (perhaps this is subjective) but also they are recommended over `std::bind` in modern C++. Ref: https://github.com/LithoCoders/dailycpp/blob/master/EffectiveModernC%2B%2B/chapter6/Item34_Prefer_lambdas_to_std::bind.md --- examples/server/server.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 6e28d283a..b9aa5c81c 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -4387,11 +4387,13 @@ int main(int argc, char ** argv) { ctx_server.chat_templates.template_default->source().c_str(), common_chat_format_example(*ctx_server.chat_templates.template_default, ctx_server.params_base.use_jinja).c_str()); - ctx_server.queue_tasks.on_new_task(std::bind( - &server_context::process_single_task, &ctx_server, std::placeholders::_1)); + ctx_server.queue_tasks.on_new_task([&ctx_server](const server_task & task) { + ctx_server.process_single_task(task); + }); - ctx_server.queue_tasks.on_update_slots(std::bind( - &server_context::update_slots, &ctx_server)); + ctx_server.queue_tasks.on_update_slots([&ctx_server]() { + ctx_server.update_slots(); + }); shutdown_handler = [&](int) { ctx_server.queue_tasks.terminate(); From ffd0821c57edc7e5d04338ab0c6b1461198df15f Mon Sep 17 00:00:00 2001 From: mgroeber9110 <45620825+mgroeber9110@users.noreply.github.com> Date: Thu, 30 Jan 2025 11:10:59 +0100 Subject: [PATCH 196/279] vocab : correctly identify LF token for GPT-2 style BPE tokenizer (#11496) --- src/llama-vocab.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp index 561f8bdb8..ad9ffe66a 100644 --- a/src/llama-vocab.cpp +++ b/src/llama-vocab.cpp @@ -1692,7 +1692,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) { GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); linefeed_id = ids[0]; } else { - const std::vector ids = tokenize("\xC4\x8A", false); // U+010A + const std::vector ids = tokenize("\n", false); //GGML_ASSERT(!ids.empty() && "model vocab missing newline token"); if (ids.empty()) { From 3d804dec7661fbb7de9b7f93267e2fc3ca0193c1 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Thu, 30 Jan 2025 10:30:27 +0000 Subject: [PATCH 197/279] sync: minja (#11499) --- common/chat-template.hpp | 252 +++++++++++++++++++++++++++------------ common/minja.hpp | 119 +++++++++--------- 2 files changed, 238 insertions(+), 133 deletions(-) diff --git a/common/chat-template.hpp b/common/chat-template.hpp index 42ee0b615..75ba5d938 100644 --- a/common/chat-template.hpp +++ b/common/chat-template.hpp @@ -17,17 +17,26 @@ using json = nlohmann::ordered_json; namespace minja { +struct chat_template_caps { + bool supports_tools = false; + bool supports_tool_calls = false; + bool supports_tool_responses = false; + bool supports_system_role = false; + bool supports_parallel_tool_calls = false; + bool supports_tool_call_id = false; + // meta-llama/Llama-3.1-8B-Instruct expects arguments to be an object. + // Most other templates (and OpenAI's API) expect the arguments object to be stringified. + bool requires_object_arguments = false; + // CohereForAI/c4ai-command-r-plus simple variant + bool requires_non_null_content = false; + // MiniMaxAI/MiniMax-Text-01 special + bool requires_typed_content = false; +}; + class chat_template { - public: private: - bool supports_tools_ = true; - // Meta-Llama-3.1-8B-Instruct's template expects arguments to be an object. - // Most other templates (and OpenAI's API) expect the arguments object to be stringified. - bool requires_object_arguments_ = false; - bool requires_typed_content_ = false; - bool supports_system_role_ = true; - bool supports_parallel_tool_calls_ = false; + chat_template_caps caps_; std::string source_; std::string bos_token_; std::string eos_token_; @@ -41,15 +50,16 @@ class chat_template { { try { auto prompt = apply(messages, tools, add_generation_prompt, extra_context, /* adjust_inputs= */ false); - // fprintf(stderr, "Prompt: %s\n", prompt.c_str()); + // fprintf(stderr, "try_raw_render: %s\n", prompt.c_str()); return prompt; } catch (const std::exception & e) { - // fprintf(stderr, "Error: %s\n", e.what()); + // fprintf(stderr, "try_raw_render error: %s\n", e.what()); return ""; } } public: + chat_template(const std::string & source, const std::string & bos_token, const std::string & eos_token) : source_(source), bos_token_(bos_token), eos_token_(eos_token) { @@ -58,69 +68,120 @@ class chat_template { /* .lstrip_blocks = */ true, /* .keep_trailing_newline = */ false, }); - supports_tools_ = source.find("tools") != std::string::npos; - auto renders_string_arguments = - try_raw_render({ - { - {"role", "user"}, - {"content", "Hey"} - }, - { - {"role", "assistant"}, - {"tool_calls", json::array({ - { - {"id", "call_1___"}, - {"type", "function"}, - {"function", { - {"arguments", "{\"code\": \"print('Hello, World!')\"}"}, - {"name", "ipython"}, + auto contains = [](const std::string & haystack, const std::string & needle) { + return haystack.find(needle) != std::string::npos; + }; + + const std::string user_needle = ""; + const std::string sys_needle = ""; + const json dummy_str_user_msg = {{"role", "user"}, {"content", user_needle}}; + const json dummy_typed_user_msg = {{"role", "user"}, {"content", json::array({{{"type", "text"}, {"text", user_needle}}})}}; + + caps_.requires_typed_content = + !contains(try_raw_render(json::array({dummy_str_user_msg}), {}, false), user_needle) + && contains(try_raw_render(json::array({dummy_typed_user_msg}), {}, false), user_needle); + + const auto dummy_user_msg = caps_.requires_typed_content + ? dummy_typed_user_msg + : dummy_str_user_msg; + const json needle_system_msg = { + {"role", "system"}, + {"content", caps_.requires_typed_content ? json::array({{{"type", "text"}, {"text", sys_needle}}}) : json(sys_needle)}, + }; + + caps_.supports_system_role = contains(try_raw_render({needle_system_msg, dummy_user_msg,}, {}, false), sys_needle); + + auto out = try_raw_render(json::array({ + dummy_user_msg + }), json::array({ + { + {"name", "some_tool"}, + {"type", "function"}, + {"function", { + {"name", "some_tool"}, + {"description", "Some tool."}, + {"parameters", { + {"type", "object"}, + {"properties", { + {"arg", { + {"type", "string"}, + {"description", "Some argument."}, }}, - }, - })}, + }}, + {"required", json::array({ "arg" })}, + }}, + }}, + }, + }), false); + caps_.supports_tools = contains(out, "some_tool"); + + auto make_tool_calls_msg = [&](const json & tool_calls) { + return json { + {"role", "assistant"}, + {"content", nullptr}, + {"tool_calls", tool_calls}, + }; + }; + auto make_tool_call = [](const std::string & tool_name, const json & arguments) { + return json { + {"id", "call_1___"}, + {"type", "function"}, + {"function", { + {"arguments", arguments}, + {"name", tool_name}, + }}, + }; + }; + const json dummy_args_obj {{"argument_needle", "print('Hello, World!')"}}; + + // Note: the arguments are rendered in both cases, but may be double-escaped, which we don't want. + out = try_raw_render(json::array({ + dummy_user_msg, + make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj.dump())})), + }), {}, false); + auto tool_call_renders_str_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':"); + out = try_raw_render(json::array({ + dummy_user_msg, + make_tool_calls_msg(json::array({make_tool_call("ipython", dummy_args_obj)})), + }), {}, false); + auto tool_call_renders_obj_arguments = contains(out, "\"argument_needle\":") || contains(out, "'argument_needle':"); + + caps_.supports_tool_calls = tool_call_renders_str_arguments || tool_call_renders_obj_arguments; + caps_.requires_object_arguments = !tool_call_renders_str_arguments && tool_call_renders_obj_arguments; + auto out_empty = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", ""}}}), {}, false); + auto out_null = try_raw_render(json::array({dummy_user_msg, {{"role", "assistant"}, {"content", nullptr}}}), {}, false); + caps_.requires_non_null_content = contains(out_empty, user_needle) && !contains(out_null, user_needle); + + if (caps_.supports_tool_calls) { + auto dummy_args = caps_.requires_object_arguments ? dummy_args_obj : json(dummy_args_obj.dump()); + auto tc1 = make_tool_call("test_tool1", dummy_args); + auto tc2 = make_tool_call("test_tool2", dummy_args); + auto out = try_raw_render(json::array({ + dummy_user_msg, + make_tool_calls_msg(json::array({tc1, tc2})), + }), {}, false); + caps_.supports_parallel_tool_calls = contains(out, "test_tool1") && contains(out, "test_tool2"); + + out = try_raw_render(json::array({ + dummy_user_msg, + make_tool_calls_msg(json::array({tc1})), + { + {"role", "tool"}, + {"name", "test_tool1"}, + {"content", "Some response!"}, + {"tool_call_id", "call_911_"}, } - }, {}, false).find("{\"code\": \"print") != std::string::npos; - if (!renders_string_arguments) { - auto renders_object_arguments = - try_raw_render({ - { - {"role", "user"}, - {"content", "Hey"} - }, - { - {"role", "assistant"}, - {"tool_calls", json::array({ - { - {"id", "call_1___"}, - {"type", "function"}, - {"function", { - {"arguments", { - {"code", "print('Hello, World!')"}, - }}, - {"name", "ipython"}, - }}, - }, - })}, - } - }, {}, false).find("{\"code\": \"print") != std::string::npos; - requires_object_arguments_ = renders_object_arguments; + }), {}, false); + caps_.supports_tool_responses = contains(out, "Some response!"); + caps_.supports_tool_call_id = contains(out, "call_911_"); } - supports_parallel_tool_calls_ = source.find("tool_call_id") != std::string::npos; - - supports_system_role_ = try_raw_render({ - {{"role", "system"}, {"content", ""}}, - {{"role", "user"}, {"content", "Hey"}} - }, {}, false).find("") != std::string::npos; - - requires_typed_content_ = try_raw_render({{{"role", "user"}, {"content", "Hey"}}}, {}, false).find("Hey") == std::string::npos - && try_raw_render({{{"role", "user"}, {"content", {{{"type", "text"}, {"text", "Hey"}}}}}}, {}, false).find("Hey") != std::string::npos; } const std::string & source() const { return source_; } const std::string & bos_token() const { return bos_token_; } const std::string & eos_token() const { return eos_token_; } - bool supports_tools() const { return supports_tools_; } - bool supports_parallel_tool_calls() const { return supports_parallel_tool_calls_; } + const chat_template_caps & original_caps() const { return caps_; } std::string apply( const nlohmann::ordered_json & messages, @@ -131,13 +192,19 @@ class chat_template { { json actual_messages; - // First, "fix" messages so they have a chance to be rendered correctly by the template - - if (adjust_inputs && (requires_object_arguments_ || !supports_system_role_ || !supports_tools_ || requires_typed_content_)) { + auto needs_adjustments = adjust_inputs && (false + || !caps_.supports_system_role + || !caps_.supports_tools + || !caps_.supports_tool_responses + || !caps_.supports_tool_calls + || caps_.requires_object_arguments + || caps_.requires_typed_content + ); + if (needs_adjustments) { actual_messages = json::array(); auto add_message = [&](const json & msg) { - if (requires_typed_content_ && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) { + if (caps_.requires_typed_content && msg.contains("content") && !msg.at("content").is_null() && msg.at("content").is_string()) { actual_messages.push_back({ {"role", msg.at("role")}, {"content", {{ @@ -160,7 +227,9 @@ class chat_template { pending_system.clear(); } }; - for (const auto & message_ : messages) { + auto needs_tools_in_system = !tools.is_null() && tools.size() > 0 && !caps_.supports_tools; + + for (const auto & message_ : needs_tools_in_system ? add_system(messages, "Available tools: " + tools.dump(2)) : messages) { auto message = message_; if (!message.contains("role") || !message.contains("content")) { throw std::runtime_error("message must have 'role' and 'content' fields: " + message.dump()); @@ -168,16 +237,22 @@ class chat_template { std::string role = message.at("role"); if (message.contains("tool_calls")) { - if (requires_object_arguments_ || !supports_tools_) { + if (caps_.requires_object_arguments || !caps_.supports_tool_calls) { for (auto & tool_call : message.at("tool_calls")) { if (tool_call["type"] == "function") { auto & function = tool_call.at("function"); - std::string arguments = function.at("arguments"); - function["arguments"] = json::parse(arguments); + auto & arguments = function.at("arguments"); + if (arguments.is_string()) { + try { + arguments = json::parse(arguments.get()); + } catch (const std::exception & ecvt) { + fprintf(stderr, "Failed to parse arguments: %s\n", ecvt.what()); + } + } } } } - if (!supports_tools_) { + if (!caps_.supports_tool_calls) { auto content = message.at("content"); auto tool_calls = json::array(); for (const auto & tool_call : message.at("tool_calls")) { @@ -204,7 +279,7 @@ class chat_template { message.erase("tool_calls"); } } - if (!supports_tools_ && role == "tool") { + if (!caps_.supports_tool_responses && role == "tool") { message["role"] = "user"; auto obj = json { {"tool_response", { @@ -219,7 +294,7 @@ class chat_template { message.erase("name"); } - if (!message["content"].is_null() && !supports_system_role_) { + if (!message["content"].is_null() && !caps_.supports_system_role) { std::string content = message.at("content"); if (role == "system") { if (!pending_system.empty()) pending_system += "\n"; @@ -238,7 +313,9 @@ class chat_template { } add_message(message); } - flush_sys(); + if (!caps_.supports_system_role) { + flush_sys(); + } } else { actual_messages = messages; } @@ -261,7 +338,28 @@ class chat_template { } } - return template_root_->render(context); + auto ret = template_root_->render(context); + // fprintf(stderr, "actual_messages: %s\n", actual_messages.dump(2).c_str()); + // fprintf(stderr, "apply: %s\n\n", ret.c_str()); + return ret; + } + + static nlohmann::ordered_json add_system(const nlohmann::ordered_json & messages, const std::string & system_prompt) { + json messages_with_system = messages; + + if (messages_with_system.size() > 0 && messages_with_system[0].at("role") == "system") { + std::string existing_system = messages_with_system.at(0).at("content"); + messages_with_system[0] = json { + {"role", "system"}, + {"content", existing_system + "\n" + system_prompt}, + }; + } else { + messages_with_system.insert(messages_with_system.begin(), json { + {"role", "system"}, + {"content", system_prompt}, + }); + } + return messages_with_system; } }; diff --git a/common/minja.hpp b/common/minja.hpp index 80bdd4b41..f0e80fd7c 100644 --- a/common/minja.hpp +++ b/common/minja.hpp @@ -628,7 +628,7 @@ class Context : public std::enable_shared_from_this { if (parent_) return parent_->contains(key); return false; } - virtual void set(const Value & key, Value & value) { + virtual void set(const Value & key, const Value & value) { values_.set(key, value); } }; @@ -2648,31 +2648,34 @@ inline std::shared_ptr Context::builtins() { return filter.call(context, actual_args); }); }; - // https://jinja.palletsprojects.com/en/3.0.x/templates/#jinja-filters.reject - globals.set("reject", Value::callable([=](const std::shared_ptr & context, ArgumentsValue & args) { - args.expectArgs("reject", {2, (std::numeric_limits::max)()}, {0, 0}); - auto & items = args.args[0]; - auto filter_fn = context->get(args.args[1]); - if (filter_fn.is_null()) throw std::runtime_error("Undefined filter: " + args.args[1].dump()); + auto select_or_reject = [make_filter](bool is_select) { + return Value::callable([=](const std::shared_ptr & context, ArgumentsValue & args) { + args.expectArgs(is_select ? "select" : "reject", {2, (std::numeric_limits::max)()}, {0, 0}); + auto & items = args.args[0]; + auto filter_fn = context->get(args.args[1]); + if (filter_fn.is_null()) throw std::runtime_error("Undefined filter: " + args.args[1].dump()); - auto filter_args = Value::array(); - for (size_t i = 2, n = args.args.size(); i < n; i++) { - filter_args.push_back(args.args[i]); - } - auto filter = make_filter(filter_fn, filter_args); - - auto res = Value::array(); - for (size_t i = 0, n = items.size(); i < n; i++) { - auto & item = items.at(i); - ArgumentsValue filter_args; - filter_args.args.emplace_back(item); - auto pred_res = filter.call(context, filter_args); - if (!pred_res.to_bool()) { - res.push_back(item); + auto filter_args = Value::array(); + for (size_t i = 2, n = args.args.size(); i < n; i++) { + filter_args.push_back(args.args[i]); } - } - return res; - })); + auto filter = make_filter(filter_fn, filter_args); + + auto res = Value::array(); + for (size_t i = 0, n = items.size(); i < n; i++) { + auto & item = items.at(i); + ArgumentsValue filter_args; + filter_args.args.emplace_back(item); + auto pred_res = filter.call(context, filter_args); + if (pred_res.to_bool() == (is_select ? true : false)) { + res.push_back(item); + } + } + return res; + }); + }; + globals.set("select", select_or_reject(/* is_select= */ true)); + globals.set("reject", select_or_reject(/* is_select= */ false)); globals.set("map", Value::callable([=](const std::shared_ptr & context, ArgumentsValue & args) { auto res = Value::array(); if (args.args.size() == 1 && @@ -2720,41 +2723,45 @@ inline std::shared_ptr Context::builtins() { if (!text.empty() && text.back() == '\n') out += "\n"; return out; })); - globals.set("selectattr", Value::callable([=](const std::shared_ptr & context, ArgumentsValue & args) { - args.expectArgs("selectattr", {2, (std::numeric_limits::max)()}, {0, 0}); - auto & items = args.args[0]; - if (items.is_null()) - return Value::array(); - auto attr_name = args.args[1].get(); + auto select_or_reject_attr = [](bool is_select) { + return Value::callable([=](const std::shared_ptr & context, ArgumentsValue & args) { + args.expectArgs(is_select ? "selectattr" : "rejectattr", {2, (std::numeric_limits::max)()}, {0, 0}); + auto & items = args.args[0]; + if (items.is_null()) + return Value::array(); + auto attr_name = args.args[1].get(); - bool has_test = false; - Value test_fn; - ArgumentsValue test_args {{Value()}, {}}; - if (args.args.size() >= 3) { - has_test = true; - test_fn = context->get(args.args[2]); - if (test_fn.is_null()) throw std::runtime_error("Undefined test: " + args.args[2].dump()); - for (size_t i = 3, n = args.args.size(); i < n; i++) { - test_args.args.emplace_back(args.args[i]); - } - test_args.kwargs = args.kwargs; - } - - auto res = Value::array(); - for (size_t i = 0, n = items.size(); i < n; i++) { - auto & item = items.at(i); - auto attr = item.get(attr_name); - if (has_test) { - test_args.args[0] = attr; - if (test_fn.call(context, test_args).to_bool()) { - res.push_back(item); + bool has_test = false; + Value test_fn; + ArgumentsValue test_args {{Value()}, {}}; + if (args.args.size() >= 3) { + has_test = true; + test_fn = context->get(args.args[2]); + if (test_fn.is_null()) throw std::runtime_error("Undefined test: " + args.args[2].dump()); + for (size_t i = 3, n = args.args.size(); i < n; i++) { + test_args.args.emplace_back(args.args[i]); } - } else { - res.push_back(attr); + test_args.kwargs = args.kwargs; } - } - return res; - })); + + auto res = Value::array(); + for (size_t i = 0, n = items.size(); i < n; i++) { + auto & item = items.at(i); + auto attr = item.get(attr_name); + if (has_test) { + test_args.args[0] = attr; + if (test_fn.call(context, test_args).to_bool() == (is_select ? true : false)) { + res.push_back(item); + } + } else { + res.push_back(attr); + } + } + return res; + }); + }; + globals.set("selectattr", select_or_reject_attr(/* is_select= */ true)); + globals.set("rejectattr", select_or_reject_attr(/* is_select= */ false)); globals.set("range", Value::callable([=](const std::shared_ptr &, ArgumentsValue & args) { std::vector startEndStep(3); std::vector param_set(3); From c300e68ef490e6cf6c04ed96fd27a6a53ab8a422 Mon Sep 17 00:00:00 2001 From: uvos Date: Wed, 29 Jan 2025 17:46:23 +0100 Subject: [PATCH 198/279] CUDA/HIP: add warp_size to cuda_device_info --- ggml/src/ggml-cuda/common.cuh | 1 + ggml/src/ggml-cuda/ggml-cuda.cu | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index a66322da0..eec227dce 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -520,6 +520,7 @@ struct ggml_cuda_device_info { bool vmm; // virtual memory support size_t vmm_granularity; // granularity of virtual memory size_t total_vram; + int warp_size; // Number of threads in a dispatch }; cuda_device_info devices[GGML_CUDA_MAX_DEVICES] = {}; diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index de3f9c2ca..ecf06fec4 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -242,6 +242,7 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].nsm = prop.multiProcessorCount; info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].warp_size = prop.warpSize; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; @@ -256,8 +257,9 @@ static ggml_cuda_device_info ggml_cuda_init() { info.devices[id].cc += prop.minor * 0x10; } } - GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s\n", - id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, device_vmm ? "yes" : "no"); + GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s, Wave Size: %d\n", + id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, + device_vmm ? "yes" : "no", prop.warpSize); #else info.devices[id].smpbo = prop.sharedMemPerBlockOptin; info.devices[id].cc = 100*prop.major + 10*prop.minor; From 6af1ca48cbdf9a438438afd0a9a549a272bc95bf Mon Sep 17 00:00:00 2001 From: uvos Date: Wed, 29 Jan 2025 19:12:42 +0100 Subject: [PATCH 199/279] HIP: Prepare reduction operators for wave 64 --- ggml/src/ggml-cuda/common.cuh | 59 +++++++++++++++------------------ ggml/src/ggml-cuda/ggml-cuda.cu | 4 +-- 2 files changed, 28 insertions(+), 35 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index eec227dce..8d8d3932e 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -190,53 +190,46 @@ static __device__ void no_device_code( #define NO_DEVICE_CODE //GGML_ABORT("NO_DEVICE_CODE not valid in host code.") #endif // __CUDA_ARCH__ +template static __device__ __forceinline__ int warp_reduce_sum(int x) { #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE return __reduce_add_sync(0xffffffff, x); #else #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - x += __shfl_xor_sync(0xffffffff, x, offset, 32); + for (int offset = width/2; offset > 0; offset >>= 1) { + x += __shfl_xor_sync(0xffffffff, x, offset, width); } return x; #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE } +template static __device__ __forceinline__ float warp_reduce_sum(float x) { #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - x += __shfl_xor_sync(0xffffffff, x, offset, 32); + for (int offset = width/2; offset > 0; offset >>= 1) { + x += __shfl_xor_sync(0xffffffff, x, offset, width); } return x; } +template static __device__ __forceinline__ float2 warp_reduce_sum(float2 a) { #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - a.x += __shfl_xor_sync(0xffffffff, a.x, offset, 32); - a.y += __shfl_xor_sync(0xffffffff, a.y, offset, 32); + for (int offset = width/2; offset > 0; offset >>= 1) { + a.x += __shfl_xor_sync(0xffffffff, a.x, offset, width); + a.y += __shfl_xor_sync(0xffffffff, a.y, offset, width); } return a; } +template static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { #ifdef FP16_AVAILABLE - -#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - const half2 a_other = __shfl_xor_sync(0xffffffff, a, offset, 32); - reinterpret_cast(a.x) += __low2half(a_other); - reinterpret_cast(a.y) += __high2half(a_other); + for (int offset = width/2; offset > 0; offset >>= 1) { + a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, offset, width)); } return a; -#else -#pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - a = __hadd2(a, __shfl_xor_sync(0xffffffff, a, offset, 32)); - } - return a; -#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) #else NO_DEVICE_CODE; @@ -244,10 +237,11 @@ static __device__ __forceinline__ half2 warp_reduce_sum(half2 a) { #endif // FP16_AVAILABLE } +template static __device__ __forceinline__ float warp_reduce_max(float x) { #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, offset, 32)); + for (int offset = width/2; offset > 0; offset >>= 1) { + x = fmaxf(x, __shfl_xor_sync(0xffffffff, x, offset, width)); } return x; } @@ -269,35 +263,34 @@ static __device__ __forceinline__ half ggml_cuda_hmax(const half a, const half b } static __device__ __forceinline__ half2 ggml_cuda_hmax2(const half2 a, const half2 b) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) - -#if CUDART_VERSION >= CUDART_HMAX +#if defined(GGML_USE_HIP) && HIP_VERSION >= 50700000 + return half2(__hmax(a.x, b.x), __hmax(a.y, b.y)); +#elif !defined(GGML_USE_HIP) && CUDART_VERSION >= CUDART_HMAX return __hmax2(a, b); -#else +#elif !defined(GGML_USE_HIP) half2 ret; reinterpret_cast(ret.x) = __float2half(fmaxf( __low2float(a), __low2float(b))); reinterpret_cast(ret.y) = __float2half(fmaxf(__high2float(a), __high2float(b))); return ret; -#endif // CUDART_VERSION >= CUDART_HMAX - #else GGML_UNUSED(a); GGML_UNUSED(b); NO_DEVICE_CODE; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +#endif } +template static __device__ __forceinline__ half2 warp_reduce_max(half2 x) { -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) #pragma unroll - for (int offset = 16; offset > 0; offset >>= 1) { - x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, offset, 32)); + for (int offset = width/2; offset > 0; offset >>= 1) { + x = ggml_cuda_hmax2(x, __shfl_xor_sync(0xffffffff, x, offset, width)); } return x; #else GGML_UNUSED(x); NO_DEVICE_CODE; -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_PASCAL || (defined(GGML_USE_HIP) && HIP_VERSION >= 50700000) } #if CUDART_VERSION < CUDART_HMASK diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index ecf06fec4..383131c77 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -240,8 +240,8 @@ static ggml_cuda_device_info ggml_cuda_init() { info.default_tensor_split[id] = total_vram; total_vram += prop.totalGlobalMem; - info.devices[id].nsm = prop.multiProcessorCount; - info.devices[id].smpb = prop.sharedMemPerBlock; + info.devices[id].nsm = prop.multiProcessorCount; + info.devices[id].smpb = prop.sharedMemPerBlock; info.devices[id].warp_size = prop.warpSize; #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) info.devices[id].smpbo = prop.sharedMemPerBlock; From 27d135c970c00f655d486f870edacded792bef5c Mon Sep 17 00:00:00 2001 From: uvos Date: Wed, 29 Jan 2025 19:36:00 +0100 Subject: [PATCH 200/279] HIP: require at least HIP 5.5 --- ggml/src/ggml-hip/CMakeLists.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index ecc3bc66d..7a877bdc1 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -40,6 +40,10 @@ find_package(hip REQUIRED) find_package(hipblas REQUIRED) find_package(rocblas REQUIRED) +if (${hip_VERSION} VERSION_LESS 5.5) + message(FATAL_ERROR "At least ROCM/HIP V5.5 is required") +endif() + message(STATUS "HIP and hipBLAS found") file(GLOB GGML_HEADERS_ROCM "../ggml-cuda/*.cuh") From 8b576b6c55bc4e6be898b47522f0ef402b93ef62 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Thu, 30 Jan 2025 19:13:58 +0000 Subject: [PATCH 201/279] Tool call support (generic + native for Llama, Functionary, Hermes, Mistral, Firefunction, DeepSeek) w/ lazy grammars (#9639) --------- Co-authored-by: Xuan Son Nguyen Co-authored-by: Georgi Gerganov Co-authored-by: Xuan Son Nguyen --- .editorconfig | 8 + .github/workflows/server.yml | 2 +- Makefile | 9 + README.md | 1 + common/CMakeLists.txt | 2 + common/chat.cpp | 848 ++++++++++++++++++ common/chat.hpp | 50 ++ common/common.cpp | 22 +- common/common.h | 17 +- common/json-schema-to-grammar.cpp | 15 +- common/json-schema-to-grammar.h | 9 +- common/sampling.cpp | 11 +- examples/gbnf-validator/gbnf-validator.cpp | 2 +- examples/main/main.cpp | 27 +- examples/server/README.md | 76 ++ examples/server/server.cpp | 187 ++-- examples/server/tests/README.md | 13 +- examples/server/tests/pytest.ini | 4 + examples/server/tests/tests.sh | 11 +- .../server/tests/unit/test_chat_completion.py | 11 +- examples/server/tests/unit/test_tool_call.py | 352 ++++++++ examples/server/tests/utils.py | 6 +- examples/server/utils.hpp | 80 +- include/llama.h | 12 + ...reForAI-c4ai-command-r-plus-tool_use.jinja | 202 +++++ ...rch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja | 152 ++++ ...earch-Hermes-3-Llama-3.1-8B-tool_use.jinja | 152 ++++ .../templates/Qwen-Qwen2.5-7B-Instruct.jinja | 54 ++ ...seek-ai-DeepSeek-R1-Distill-Llama-8B.jinja | 1 + ...seek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja | 56 ++ ...fireworks-ai-llama-3-firefunction-v2.jinja | 57 ++ models/templates/google-gemma-2-2b-it.jinja | 4 + .../meetkai-functionary-medium-v3.1.jinja | 58 ++ .../meetkai-functionary-medium-v3.2.jinja | 287 ++++++ .../meta-llama-Llama-3.1-8B-Instruct.jinja | 109 +++ .../meta-llama-Llama-3.2-3B-Instruct.jinja | 93 ++ .../meta-llama-Llama-3.3-70B-Instruct.jinja | 109 +++ .../microsoft-Phi-3.5-mini-instruct.jinja | 8 + ...mistralai-Mistral-Nemo-Instruct-2407.jinja | 87 ++ scripts/fetch_server_test_models.py | 105 +++ ..._chat_template.py => get_chat_template.py} | 12 +- src/llama-grammar.cpp | 88 +- src/llama-grammar.h | 23 +- src/llama-sampling.cpp | 51 +- tests/CMakeLists.txt | 1 + tests/test-chat-template.cpp | 10 +- tests/test-chat.cpp | 521 +++++++++++ tests/test-grammar-integration.cpp | 2 +- 48 files changed, 3861 insertions(+), 156 deletions(-) create mode 100644 common/chat.cpp create mode 100644 common/chat.hpp create mode 100644 examples/server/tests/pytest.ini create mode 100644 examples/server/tests/unit/test_tool_call.py create mode 100644 models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja create mode 100644 models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja create mode 100644 models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja create mode 100644 models/templates/Qwen-Qwen2.5-7B-Instruct.jinja create mode 100644 models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja create mode 100644 models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja create mode 100644 models/templates/fireworks-ai-llama-3-firefunction-v2.jinja create mode 100644 models/templates/google-gemma-2-2b-it.jinja create mode 100644 models/templates/meetkai-functionary-medium-v3.1.jinja create mode 100644 models/templates/meetkai-functionary-medium-v3.2.jinja create mode 100644 models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja create mode 100644 models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja create mode 100644 models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja create mode 100644 models/templates/microsoft-Phi-3.5-mini-instruct.jinja create mode 100644 models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja create mode 100755 scripts/fetch_server_test_models.py rename scripts/{get_hf_chat_template.py => get_chat_template.py} (86%) mode change 100755 => 100644 create mode 100644 tests/test-chat.cpp diff --git a/.editorconfig b/.editorconfig index eac38a15f..5d63d0a51 100644 --- a/.editorconfig +++ b/.editorconfig @@ -40,3 +40,11 @@ indent_style = tab [examples/cvector-generator/*.txt] trim_trailing_whitespace = unset insert_final_newline = unset + +[models/templates/*.jinja] +indent_style = unset +indent_size = unset +end_of_line = unset +charset = unset +trim_trailing_whitespace = unset +insert_final_newline = unset diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index ed1c357a5..0cbc3d640 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -205,7 +205,7 @@ jobs: run: | cd examples/server/tests $env:PYTHONIOENCODING = ":replace" - pytest -v -x + pytest -v -x -m "not slow" - name: Slow tests id: server_integration_tests_slow diff --git a/Makefile b/Makefile index 295522ba3..ef152d246 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,7 @@ TEST_TARGETS = \ tests/test-arg-parser \ tests/test-autorelease \ tests/test-backend-ops \ + tests/test-chat \ tests/test-chat-template \ tests/test-double-float \ tests/test-grammar-integration \ @@ -983,6 +984,7 @@ OBJ_COMMON = \ $(DIR_COMMON)/ngram-cache.o \ $(DIR_COMMON)/sampling.o \ $(DIR_COMMON)/speculative.o \ + $(DIR_COMMON)/chat.o \ $(DIR_COMMON)/build-info.o \ $(DIR_COMMON)/json-schema-to-grammar.o @@ -1361,6 +1363,8 @@ llama-server: \ examples/server/httplib.h \ examples/server/index.html.hpp \ examples/server/loading.html.hpp \ + common/chat.cpp \ + common/chat.hpp \ common/chat-template.hpp \ common/json.hpp \ common/minja.hpp \ @@ -1471,6 +1475,11 @@ tests/test-json-schema-to-grammar: tests/test-json-schema-to-grammar.cpp \ $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) +tests/test-chat: tests/test-chat.cpp \ + $(OBJ_ALL) + $(CXX) $(CXXFLAGS) -Iexamples/server -c $< -o $(call GET_OBJ_FILE, $<) + $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) + tests/test-opt: tests/test-opt.cpp \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) diff --git a/README.md b/README.md index 382c67041..d40309875 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others) - **How to use [MTLResidencySet](https://developer.apple.com/documentation/metal/mtlresidencyset?language=objc) to keep the GPU memory active?** https://github.com/ggerganov/llama.cpp/pull/11427 - **VS Code extension for FIM completions:** https://github.com/ggml-org/llama.vscode +- Universal tool call support in `llama-server`: https://github.com/ggerganov/llama.cpp/pull/9639 - Vim/Neovim plugin for FIM completions: https://github.com/ggml-org/llama.vim - Introducing GGUF-my-LoRA https://github.com/ggerganov/llama.cpp/discussions/10123 - Hugging Face Inference Endpoints now support GGUF out of the box! https://github.com/ggerganov/llama.cpp/discussions/9669 diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 24b7f8741..72f0915c1 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -56,6 +56,8 @@ add_library(${TARGET} STATIC arg.cpp arg.h base64.hpp + chat.cpp + chat.hpp chat-template.hpp common.cpp common.h diff --git a/common/chat.cpp b/common/chat.cpp new file mode 100644 index 000000000..d9a654892 --- /dev/null +++ b/common/chat.cpp @@ -0,0 +1,848 @@ +#include "chat.hpp" +#include "chat-template.hpp" +#include "json-schema-to-grammar.h" +#include "log.h" +#include "minja.hpp" + +std::string common_chat_format_name(common_chat_format format) { + switch (format) { + case COMMON_CHAT_FORMAT_CONTENT_ONLY: return "Content-only"; + case COMMON_CHAT_FORMAT_GENERIC: return "Generic"; + case COMMON_CHAT_FORMAT_MISTRAL_NEMO: return "Mistral Nemo"; + case COMMON_CHAT_FORMAT_LLAMA_3_X: return "Llama 3.x"; + case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: return "Llama 3.x with builtin tools"; + case COMMON_CHAT_FORMAT_DEEPSEEK_R1: return "DeepSeek R1"; + case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return "FireFunction v2"; + case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2"; + case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1"; + case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro"; + default: + throw std::runtime_error("Unknown chat format"); + } +} + +const common_grammar_options grammar_options { + /* .dotall = */ false, + /* .compact_spaces = */ false, + // /* .compact_spaces = */ true, +}; + +static bool parse_json(std::string::const_iterator & it, const std::string::const_iterator & end, json & out) { + // // https://json.nlohmann.me/features/parsing/sax_interface/ + struct json_error_locator : public nlohmann::json_sax { + std::size_t position; + bool found_error; + + json_error_locator() : position(0), found_error(false) {} + + bool parse_error(std::size_t position, const std::string &, const json::exception &) override { + this->position = position - 1; + this->found_error = true; + return false; + } + bool null() override { return true; } + bool boolean(bool) override { return true; } + bool number_integer(number_integer_t) override { return true; } + bool number_unsigned(number_unsigned_t) override { return true; } + bool number_float(number_float_t, const string_t &) override { return true; } + bool string(string_t &) override { return true; } + bool binary(binary_t &) override { return true; } + bool start_object(std::size_t) override { return true; } + bool key(string_t &) override { return true; } + bool end_object() override { return true; } + bool start_array(std::size_t) override { return true; } + bool end_array() override { return true; } + }; + json_error_locator err_loc; + json::sax_parse(it, end, &err_loc); + + std::string::const_iterator temptative_end; + if (err_loc.found_error) { + temptative_end = it + err_loc.position; + } else { + temptative_end = end; + } + std::string json_sub {it, temptative_end}; + try { + out = json::parse(json_sub); + it = temptative_end; + return true; + } catch (const std::exception &) { + return false; + } +} + + +/** + * Takes a prefix regex that must have 1 group to capture the function name, a closing suffix, and expects json parameters in between. + * Aggregates the prefix, suffix and in-between text into the content. + */ +static common_chat_msg parse_json_tool_calls( + const std::string& input, + const std::optional & trigger_opt, + const std::regex & function_regex, + const std::regex & close_regex) { + std::smatch match; + + common_chat_msg result; + result.role = "assistant"; + + + auto end = input.end(); + auto it = input.begin(); + + if (trigger_opt) { + if (!std::regex_search(it, end, match, *trigger_opt)) { + result.content = input; + return result; + } + result.content = match.prefix().str(); + it = match.suffix().first; + } + + while (it != end) { + std::sregex_iterator rend; + std::sregex_iterator rit(it, end, function_regex); + if (rit == rend) { + fprintf(stderr, "No more tool calls found\n"); + result.content += std::string(it, end); + break; + } + auto name = rit->str(1); + result.content += std::string(it, rit->prefix().second); + it = rit->suffix().first; + + json arguments; + if (!parse_json(it, end, arguments)) { + throw std::runtime_error("Failed to parse json tool call arguments"); + } + if (!std::regex_search(it, end, match, close_regex)) { + throw std::runtime_error("Malformed input, missing closing pattern"); + } + it = match.suffix().first; + result.tool_calls.push_back({name, arguments.is_string() ? arguments.get() : arguments.dump(), /* id= */ ""}); + } + return result; +} + +static common_chat_msg parse_prefixed_json_tool_call_array(const std::string& input, const std::string & prefix, size_t rstrip_prefix = 0) { + auto content_end = input.find(prefix); + size_t tc_start = std::string::npos; + + common_chat_msg result; + result.role = "assistant"; + const auto process_tool_calls = [&](const json & tool_calls) { + for (const auto & tool_call : tool_calls) { + const auto & arguments = tool_call["arguments"]; + result.tool_calls.push_back({ + tool_call["name"], + arguments.is_string() ? arguments.get() : arguments.dump(), + tool_call.contains("id") ? tool_call["id"] : "", + }); + } + }; + if (content_end == std::string::npos) { + result.content = input; + } else { + tc_start = content_end + prefix.size() - rstrip_prefix; + result.content = input.substr(0, content_end); + auto tool_calls = json::parse(input.substr(tc_start)); + process_tool_calls(tool_calls); + } + return result; +} + +static void foreach_function(const json & tools, const std::function & fn) { + for (const auto & tool : tools) { + if (!tool.contains("type") || tool["type"] != "function" || !tool.contains("function")) { + LOG_INF("Skipping tool without function: %s", tool.dump(2).c_str()); + continue; + } + fn(tool); + } +} + +static common_chat_params common_chat_params_init_generic(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + + auto tool_call_schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + auto tool_schema = json { + {"type", "object"}, + {"properties", { + {"name", { + {"type", "string"}, + {"const", function["name"]}, + }}, + {"arguments", function["parameters"]}, + }}, + {"required", json::array({"name", "arguments"})}, + }; + if (function.contains("description")) { + tool_schema["description"] = function["description"]; + } + if (inputs.parallel_tool_calls) { + tool_schema["properties"]["id"] = { + {"type", "string"}, + {"minLength", 4}, + }; + tool_schema["required"].push_back("id"); + } + tool_call_schemas.emplace_back(tool_schema); + }); + const auto tool_call = + inputs.parallel_tool_calls + ? json { + {"type", "object"}, + {"properties", { + {"tool_calls", { + {"type", "array"}, + {"items", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { + {"anyOf", tool_call_schemas}, + }}, + {"minItems", 1}, + }}, + }}, + {"required", json::array({"tool_calls"})}, + } + : json { + {"type", "object"}, + {"properties", { + {"tool_call", tool_call_schemas.size() == 1 ? tool_call_schemas[0] : json { + {"anyOf", tool_call_schemas}, + }}, + }}, + {"required", json::array({"tool_call"})}, + }; + const auto schema = + inputs.tool_choice != "required" + ? json { + {"anyOf", json::array({ + tool_call, + { + {"type", "object"}, + {"properties", { + {"response", inputs.json_schema.is_null() + ? json {{"type", "string"}} + : inputs.json_schema + }, + }}, + {"required", json::array({"response"})}, + }, + })} + } + : tool_call; + + data.grammar_lazy = false; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + builder.add_schema("root", schema); + }, grammar_options); + + auto tweaked_messages = common_chat_template::add_system( + inputs.messages, + "Respond in JSON format, either with `tool_call` (a request to call tools) or with `response` reply to the user's request"); + + data.prompt = tmpl.apply(tweaked_messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_GENERIC; + return data; +} +static common_chat_msg common_chat_parse_generic(const std::string & input) { + json data = json::parse(input); + common_chat_msg result; + result.role = "assistant"; + if (data.contains("tool_calls")) { + for (const auto & tool_call : data["tool_calls"]) { + result.tool_calls.push_back({ + tool_call["name"], + tool_call["arguments"].dump(), + tool_call.contains("id") ? tool_call["id"] : "", + }); + } + } else if (data.contains("tool_call")) { + result.tool_calls.push_back({ + data["tool_call"]["name"], + data["tool_call"]["arguments"].dump(), + /* id= */ "", + }); + } else if (data.contains("response")) { + const auto & response = data["response"]; + result.content = response.is_string() ? response.get() : response.dump(2); + } + return result; +} + +static common_chat_params common_chat_params_init_mistral_nemo(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + auto schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + schemas.push_back({ + {"type", "object"}, + {"properties", { + // Important note: the model is probably trained to take a JSON stringified arguments value. + // It's hard to constrain that for now (while reusing the JSON schema conversion), so we're just expecting a plain object. + {"name", { + {"type", "string"}, + {"const", function["name"]}, + }}, + {"arguments", function["parameters"]}, + {"id", { + {"type", "string"}, + // Nemo's template expects a 9-character alphanumeric ID. + {"pattern", "^[a-zA-Z0-9]{9}$"}, + }}, + }}, + {"required", json::array({"name", "arguments", "id"})}, + }); + }); + auto schema = json { + {"type", "array"}, + {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}}, + {"minItems", 1}, + }; + if (!inputs.parallel_tool_calls) { + schema["maxItems"] = 1; + } + builder.add_rule("root", "\"[TOOL_CALLS]\" " + builder.add_schema("tool_calls", schema)); + }, grammar_options); + data.grammar_triggers.push_back({"[TOOL_CALLS]", /* .at_start = */ true}); + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_MISTRAL_NEMO; + return data; +} +static common_chat_msg common_chat_parse_mistral_nemo(const std::string & input) { + return parse_prefixed_json_tool_call_array(input, "[TOOL_CALLS]"); +} + +static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector & expected_properties) { + if (!parameters.is_object() || !parameters.contains("type") || parameters["type"] != "object" || !parameters.contains("properties") || !parameters.contains("required")) { + throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties"); + } + const auto & parameters_properties = parameters.at("properties"); + const auto & parameters_required = parameters.at("required"); + for (const auto & prop : expected_properties) { + if (!parameters_properties.contains(prop)) { + throw std::runtime_error("Parameters of tool " + name + " is missing property: " + prop); + } + if (std::find(parameters_required.begin(), parameters_required.end(), json(prop)) == parameters_required.end()) { + throw std::runtime_error("Parameters of tool " + name + " must have property marked as required: " + prop); + } + } + if (parameters_properties.size() != expected_properties.size()) { + throw std::runtime_error("Parameters of tool " + name + " must only have these properties:" + string_join(expected_properties, ", ")); + } +} + +static common_chat_params common_chat_params_init_llama_3_1_tool_calls(const common_chat_template & tmpl, const struct common_chat_inputs & inputs, bool allow_python_tag_builtin_tools) { + auto builtin_tools = json::array(); + common_chat_params data; + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + std::vector tool_rules; + + auto handle_builtin_tool = [&](const std::string & name, const json & parameters) { + if (name == "wolfram_alpha") { + // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/wolfram_alpha/wolfram_alpha.py + expect_tool_parameters(name, parameters, {"query"}); + } else if (name == "web_search" || name == "brave_search") { + // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/remote/tool_runtime/brave_search/brave_search.py + expect_tool_parameters(name, parameters, {"query"}); + } else if (name == "python" || name == "code_interpreter") { + // https://github.com/meta-llama/llama-stack/blob/main/llama_stack/providers/inline/tool_runtime/code_interpreter/code_interpreter.py + expect_tool_parameters(name, parameters, {"code"}); + } else { + return false; + } + + std::vector kvs; + for (const auto & [key, value] : parameters.at("properties").items()) { + kvs.push_back("\"" + key + "=\" " + builder.add_schema(name + "-args-" + key, value)); + } + + tool_rules.push_back( + builder.add_rule( + name + "-call", + "\"<|python_tag|>" + name + ".call(\" " + string_join(kvs, " \", \" ") + " \")\"")); + builtin_tools.push_back(name); + + return true; + }; + + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + std::string name = function["name"]; + auto parameters = function["parameters"]; + builder.resolve_refs(parameters); + + // https://github.com/meta-llama/llama-stack/tree/main/llama_stack/providers/remote/tool_runtime + if (allow_python_tag_builtin_tools) { + handle_builtin_tool(name, parameters); + } + tool_rules.push_back( + builder.add_rule( + name + "-call", + "\"{\" ( \"\\\"type\\\": \\\"function\\\", \" | space ) " + "\"\\\"name\\\": \\\"" + name + "\\\", \\\"parameters\\\": \" " + + builder.add_schema(name + "-args", parameters) + + " \"}\"")); + data.grammar_triggers.push_back({"{\"name\": \"" + name + "\"", /* .at_start = */ true}); + }); + data.grammar_triggers.push_back({"{\"name\":", /* .at_start = */ true}); + data.grammar_triggers.push_back({"{\"type\": \"function\"", /* .at_start = */ true}); + if (!builtin_tools.empty()) { + data.grammar_triggers.push_back({"<|python_tag|>", /* .at_start = */ false}); + } + builder.add_rule("root", string_join(tool_rules, " | ")); + }, grammar_options); + data.additional_stops.push_back("<|eom_id|>"); + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt, { + {"tools_in_user_message", false}, + {"builtin_tools", builtin_tools.empty() ? json() : builtin_tools}, + }); + data.format = allow_python_tag_builtin_tools && !builtin_tools.empty() + ? COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS + : COMMON_CHAT_FORMAT_LLAMA_3_X; + return data; +} +static common_chat_msg common_chat_parse_llama_3_1(const std::string & input, bool with_builtin_tools = false) { + // TODO: tighten & simplify the parser, don't accept leading text context. + static std::regex function_regex("\\{[\\s\\n\\r]*(?:\"type\"[\\s\\n\\r]*:[\\s\\n\\r]*\"function\"[\\s\\n\\r]*,[\\s\\n\\r]*|[\\s\\n\\r]*)\"name\"[\\s\\n\\r]*:[\\s\\n\\r]*\"([^\"]+)\"[\\s\\n\\r]*,[\\s\\n\\r]*\"parameters\": "); + static std::regex close_regex("\\}"); + static std::regex builtin_call_regex("<\\|python_tag\\|>([^.(]+)\\.call\\((.*)\\)"); + + if (with_builtin_tools) { + std::smatch match; + if (std::regex_match(input, match, builtin_call_regex)) { + auto name = match[1].str(); + auto raw_args = match[2].str(); + + // TODO: if/when builtin tools start accepting more than 1 argument, use parse_json for real parsing. + auto it_eq = raw_args.find('='); + auto arg_name = raw_args.substr(0, it_eq); + auto arg_value_str = raw_args.substr(it_eq + 1); + auto arg_value = json::parse(arg_value_str); + + return { + /* .role = */ "assistant", + /* .content = */ match.prefix().str(), + /* .tool_calls = */ { + { + /* .name = */ match[1], + /* .arguments = */ (json { + {arg_name, arg_value}, + }).dump(), + /* .id = */ "", + }, + }, + }; + } + } + return parse_json_tool_calls(input, std::nullopt, function_regex, close_regex); +} + +static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + std::vector tool_rules; + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + std::string name = function["name"]; + auto parameters = function["parameters"]; + auto args_rule = builder.add_schema(name + "-args", parameters); + tool_rules.push_back(builder.add_rule(name + "-call", + "\"<|tool▁call▁begin|>function<|tool▁sep|>" + name + "\\n```json\\n\" " + args_rule + " \"```<|tool▁call▁end|>\"")); + }); + data.grammar_triggers.push_back({"<|tool▁calls▁begin|>", /* .at_start = */ false}); + builder.add_rule("root", "\"<|tool▁calls▁begin|>\" (" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " space"); + }, grammar_options); + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_DEEPSEEK_R1; + return data; +} +static common_chat_msg common_chat_parse_deepseek_r1(const std::string & input) { + static std::regex trigger_regex("<|tool▁calls▁begin|>"); + static std::regex function_regex("<|tool▁call▁begin|>function<|tool▁sep|>([^\n]+)\n```json\n"); + static std::regex close_regex("```<|tool▁call▁end|>"); + return parse_json_tool_calls(input, trigger_regex, function_regex, close_regex); +} + +static common_chat_params common_chat_params_init_firefunction_v2(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + fprintf(stderr, "%s\n", __func__); + common_chat_params data; + data.prompt = tmpl.apply(inputs.messages, /* tools= */ nullptr, inputs.add_generation_prompt, { + {"datetime", "Jan 29 2025 13:00:00 GMT"}, + {"functions", json(inputs.tools.empty() ? "" : inputs.tools.dump(2))}, + }, /* adjust_inputs= */ false); + if (!inputs.tools.is_null() && !inputs.tools.empty()) { + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + auto schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + schemas.push_back({ + {"type", "object"}, + {"properties", { + {"name", { + {"type", "string"}, + {"const", function["name"]}, + }}, + {"arguments", function["parameters"]}, + }}, + {"required", json::array({"name", "arguments", "id"})}, + }); + }); + auto schema = json { + {"type", "array"}, + {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}}, + {"minItems", 1}, + }; + if (!inputs.parallel_tool_calls) { + schema["maxItems"] = 1; + } + builder.add_rule("root", "\" functools\"? " + builder.add_schema("tool_calls", schema)); + }, grammar_options); + data.grammar_triggers.push_back({" functools[", /* .at_start = */ false}); + data.format = COMMON_CHAT_FORMAT_FIREFUNCTION_V2; + } else { + data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY; + } + return data; +} +static common_chat_msg common_chat_parse_firefunction_v2(const std::string & input) { + return parse_prefixed_json_tool_call_array(input, " functools[", /* rstrip_prefix= */ 1); +} + +static common_chat_params common_chat_params_init_functionary_v3_2(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + // >>>all\nlet's call functions>>>fn1\n{"arg1": 1...}\n>>>fn2\n{"arg1": 1...}... + // Using ">>>f1\n", ">>>f2\n"... as trigger words for the grammar + common_chat_params data; + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2; + if (!inputs.tools.is_null() && !inputs.tools.empty()) { + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + std::vector first_tool_rules; + std::vector subsequent_tool_rules; + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + std::string name = function["name"]; + auto parameters = function["parameters"]; + auto args_rule = builder.add_schema(name + "-args", parameters); + first_tool_rules.push_back(builder.add_rule(name + "-call", "\"" + name + "\\n\" " + args_rule)); + subsequent_tool_rules.push_back(builder.add_rule(name + "-call2", "\">>>" + name + "\\n\" " + args_rule)); + data.grammar_triggers.push_back({name, /* .at_start = */ true}); + data.grammar_triggers.push_back({">>>" + name, /* .at_start = */ false}); + }); + auto first_rule = first_tool_rules.empty() ? "" : builder.add_rule("first_tool_call", string_join(first_tool_rules, " | ")) + " space"; + if (inputs.parallel_tool_calls) { + auto subsequent_rule = builder.add_rule("subsequent_tool_call", string_join(subsequent_tool_rules, " | ")) + " space"; + builder.add_rule("root", first_rule + " (" + subsequent_rule + ")*"); + } else { + builder.add_rule("root", first_rule); + } + + }, grammar_options); + } + return data; +} + +static bool consume(std::string::const_iterator & it, const std::string::const_iterator & end, const std::string & expected) { + auto expected_it = expected.begin(); + auto tmp_it = it; + while (tmp_it != end && expected_it != expected.end() && *tmp_it == *expected_it) { + ++tmp_it; + ++expected_it; + } + if (expected_it == expected.end()) { + it = tmp_it; + return true; + } + return false; +} + +static common_chat_msg common_chat_parse_functionary_v3_2(const std::string & input) { + static std::regex function_regex(R"((?:>>>)?(\w+)\n)"); + static std::regex close_regex(R"($|(?=>>>))"); + + std::string content; + auto it = input.begin(); + const auto end = input.end(); + + if (consume(it, end, "all\n")) { + std::smatch match; + if (std::regex_search(it, end, match, function_regex)) { + auto fun_it = match.prefix().second; + content = std::string(it, fun_it); + it = fun_it; + } else { + common_chat_msg res; + res.role = "assistant"; + res.content = std::string(it, end); + return res; + } + } + // TODO: tighten & simplify. + auto res = parse_json_tool_calls(std::string(it, end), std::nullopt, function_regex, close_regex); + res.content = content; + return res; +} + +static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + // https://github.com/MeetKai/functionary/blob/main/tests/prompt_test_v3-llama3.1.txt + common_chat_params data; + json tools = inputs.tools.is_null() ? inputs.tools : json::array(); + std::string python_code_argument_name; + auto has_raw_python = false; + + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + std::vector tool_rules; + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + const auto & parameters = function["parameters"]; + std::string name = function["name"]; + if (name == "python" || name == "ipython") { + if (!parameters.contains("type")) { + throw std::runtime_error("Missing type in python tool"); + } + has_raw_python = true; + auto type = parameters.at("type"); + if (type == "object") { + auto properties = parameters.at("properties"); + for (auto it = properties.begin(); it != properties.end(); ++it) { + if (it.value().at("type") == "string") { + if (!python_code_argument_name.empty()) { + throw std::runtime_error("Multiple string arguments found in python tool"); + } + python_code_argument_name = it.key(); + } + } + if (python_code_argument_name.empty()) { + throw std::runtime_error("No string argument found in python tool"); + } + } else if (type != "string") { + throw std::runtime_error("Invalid type in python tool: " + type.dump()); + } + } + tool_rules.push_back(builder.add_rule(name + "-call", "\"\" " + builder.add_schema(name + "-args", parameters) + " \"\" space")); + }); + if (has_raw_python) { + tool_rules.push_back(builder.add_rule("python-call", "\"<|python_tag|>\" .*")); + data.grammar_triggers.push_back({"<|python_tag|>", /* .at_start = */ false}); + } + auto tool_call = builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " space"; + builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call); + data.grammar_triggers.push_back({"([\s\S\n]*)$)"); + std::smatch match; + if (std::regex_search(input, match, python_tag_regex)) { + auto code = match[1].str(); + return { + /* .role = */ "assistant", + /* .content = */ match.prefix().str(), + /* .tool_calls = */ { + { + /* .name = */ "python", + /* .arguments = */ (json {{"code", code}}).dump(), + /* .id = */ "", + }, + } + }; + } + static std::regex function_regex(R"()"); + static std::regex close_regex(R"()"); + // TODO: tighten & simplify. + return parse_json_tool_calls(input, std::nullopt, function_regex, close_regex); +} + +static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + // (content)?({"name": "foo", "arguments": {"a": 1}})* + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + std::vector tool_rules; + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + std::string name = function["name"]; + auto parameters = function["parameters"]; + builder.resolve_refs(parameters); + tool_rules.push_back(builder.add_schema(name + "-call", { + {"type", "object"}, + {"properties", json { + {"name", json {{"const", name}}}, + {"arguments", parameters}, + }}, + {"required", json::array({"name", "arguments"})}, + })); + }); + auto tool_call = "\"\" space " + builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " \"\" space"; + builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call); + data.grammar_triggers.push_back({"", /* .at_start = */ false}); + // Not really a trigger but need to print this special token to get a successful parse. + data.grammar_triggers.push_back({"", /* .at_start = */ false}); + }, grammar_options); + + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_HERMES_2_PRO; + return data; +} +static common_chat_msg common_chat_parse_hermes_2_pro(const std::string & input) { + try { + std::regex start_pattern(R"([\n\s]*)"); + std::regex middle_pattern(R"([\n\s]*[\n\s]*)"); + std::regex end_pattern(R"([\n\s]*[\n\s]*$)"); + + auto end = input.end(); + std::sregex_iterator rend; + std::sregex_iterator rit(input.begin(), end, start_pattern); + if (rit == rend) { + return { + /* .role = */ "assistant", + /* .content = */ input, + /* .tool_calls = */ {}, + }; + } + + common_chat_msg result; + result.role = "assistant"; + result.content = rit->prefix(); + + auto it = rit->suffix().first; + while (it != end) { + json call; + if (!parse_json(it, end, call)) { + throw std::runtime_error("Failed to parse json tool call"); + } + const auto & arguments = call["arguments"]; + result.tool_calls.push_back({ + call["name"], + arguments.dump(), + // arguments.is_string() ? arguments.get() : arguments.dump(), + /* id= */ "", + }); + rit = {it, end, middle_pattern}; + if (rit != rend) { + it = rit->suffix().first; + } else { + rit = {it, end, end_pattern}; + if (rit == rend) { + throw std::runtime_error("Malformed input, missing "); + } + break; + } + } + return result; + } catch (const std::exception & e) { + return { + /* .role = */ "assistant", + /* .content = */ input, + /* .tool_calls = */ {}, + }; + } +} + +static common_chat_params common_chat_params_init_without_tools(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_CONTENT_ONLY; + data.grammar_lazy = false; + if (!inputs.json_schema.is_null()) { + if (!inputs.grammar.empty()) { + throw std::runtime_error("Either \"json_schema\" or \"grammar\" can be specified, but not both"); + } + data.grammar = json_schema_to_grammar(inputs.json_schema); + } else { + data.grammar = inputs.grammar.empty(); + } + return data; +} + +common_chat_params common_chat_params_init(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + auto has_tools = !inputs.tools.is_null() && inputs.tool_choice != "none"; + LOG_DBG("[%s] has_tools=%s\n", __func__, has_tools ? "true" : "false"); + + if (has_tools && !inputs.grammar.empty()) { + throw std::runtime_error("Cannot specify grammar with tools"); + } + + const auto & src = tmpl.source(); + if (src.find(">>>all") != std::string::npos) { + // Functionary prepends "all\n" to plain content outputs, so we use the parser no matter when + return common_chat_params_init_functionary_v3_2(tmpl, inputs); + } + if (src.find(" functools[") != std::string::npos) { + // Firefunction v2 requires datetime and functions in the context, even w/o tools. + return common_chat_params_init_firefunction_v2(tmpl, inputs); + } + + if (!has_tools) { + return common_chat_params_init_without_tools(tmpl, inputs); + } + + if (src.find("") != std::string::npos) { + return common_chat_params_init_hermes_2_pro(tmpl, inputs); + } + if (src.find("<|start_header_id|>") != std::string::npos + && src.find("ipython<|end_header_id|>") != std::string::npos) { + auto allow_python_tag_builtin_tools = src.find("<|python_tag|>") != std::string::npos; + return common_chat_params_init_llama_3_1_tool_calls(tmpl, inputs, allow_python_tag_builtin_tools); + } + if (src.find("<|tool▁calls▁begin|>") != std::string::npos) { + return common_chat_params_init_deepseek_r1(tmpl, inputs); + } + if (src.find("[TOOL_CALLS]") != std::string::npos) { + return common_chat_params_init_mistral_nemo(tmpl, inputs); + } + return common_chat_params_init_generic(tmpl, inputs); +} + +static common_chat_msg common_chat_parse_content_only(const std::string & input) { + return { + /* .role = */ "assistant", + /* .content = */ input, + /* .tool_calls = */ {}, + }; +} + +common_chat_msg common_chat_parse(const std::string & input, common_chat_format format) { + switch (format) { + case COMMON_CHAT_FORMAT_CONTENT_ONLY: + return common_chat_parse_content_only(input); + case COMMON_CHAT_FORMAT_GENERIC: + return common_chat_parse_generic(input); + case COMMON_CHAT_FORMAT_MISTRAL_NEMO: + return common_chat_parse_mistral_nemo(input); + case COMMON_CHAT_FORMAT_LLAMA_3_X: + return common_chat_parse_llama_3_1(input); + case COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS: + return common_chat_parse_llama_3_1(input, /* with_builtin_tools= */ true); + case COMMON_CHAT_FORMAT_DEEPSEEK_R1: + return common_chat_parse_deepseek_r1(input); + case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: + return common_chat_parse_functionary_v3_2(input); + case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: + return common_chat_parse_functionary_v3_1_llama_3_1(input); + case COMMON_CHAT_FORMAT_HERMES_2_PRO: + return common_chat_parse_hermes_2_pro(input); + case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: + return common_chat_parse_firefunction_v2(input); + default: + throw std::runtime_error("Unsupported format: " + common_chat_format_name(format)); + } +} diff --git a/common/chat.hpp b/common/chat.hpp new file mode 100644 index 000000000..ca165aa13 --- /dev/null +++ b/common/chat.hpp @@ -0,0 +1,50 @@ +// Chat support (incl. tool call grammar constraining & output parsing) w/ generic & custom template handlers. + +#pragma once + +#include "common.h" +#include +#include +#include +#include + +using json = nlohmann::ordered_json; + +struct common_chat_inputs { + json messages; + json tools; + json tool_choice; + json json_schema; + bool parallel_tool_calls; + bool stream; + std::string grammar; + bool add_generation_prompt = true; +}; + +enum common_chat_format { + COMMON_CHAT_FORMAT_CONTENT_ONLY, + COMMON_CHAT_FORMAT_GENERIC, + COMMON_CHAT_FORMAT_MISTRAL_NEMO, + COMMON_CHAT_FORMAT_LLAMA_3_X, + COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, + COMMON_CHAT_FORMAT_DEEPSEEK_R1, + COMMON_CHAT_FORMAT_FIREFUNCTION_V2, + COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, + COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, + COMMON_CHAT_FORMAT_HERMES_2_PRO, + + COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats +}; + +struct common_chat_params { + common_chat_format format = COMMON_CHAT_FORMAT_CONTENT_ONLY; + json prompt; + std::string grammar; + bool grammar_lazy = false; + std::vector grammar_triggers; + std::vector additional_stops; +}; + +struct common_chat_params common_chat_params_init(const common_chat_template & tmpl, const struct common_chat_inputs & params); +std::string common_chat_format_name(common_chat_format format); +common_chat_msg common_chat_parse( const std::string & input, common_chat_format format); diff --git a/common/common.cpp b/common/common.cpp index 6dea8e3d2..6c81d18f9 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -12,6 +12,7 @@ #include "json.hpp" #include "json-schema-to-grammar.h" #include "llama.h" +#include "chat.hpp" #include "chat-template.hpp" #include @@ -1774,11 +1775,13 @@ std::string common_detokenize(const struct llama_vocab * vocab, const std::vecto bool common_chat_verify_template(const std::string & tmpl, bool use_jinja) { if (use_jinja) { try { - auto chat_template = minja::chat_template(tmpl, "", ""); - chat_template.apply({{ + auto chat_template = common_chat_template(tmpl, "", ""); + common_chat_inputs inputs; + inputs.messages = json::array({{ {"role", "user"}, {"content", "test"}, - }}, json(), true); + }}); + common_chat_params_init(chat_template, inputs); return true; } catch (const std::exception & e) { LOG_ERR("%s: failed to apply template: %s\n", __func__, e.what()); @@ -1800,7 +1803,10 @@ std::string common_chat_apply_template( for (const auto & msg : msgs) { messages.push_back({{"role", msg.role}, {"content", msg.content}}); } - return tmpl.apply(messages, /* tools= */ json(), add_ass); + common_chat_inputs inputs; + inputs.messages = messages; + inputs.add_generation_prompt = add_ass; + return common_chat_params_init(tmpl, inputs).prompt; } int alloc_size = 0; @@ -1855,10 +1861,10 @@ std::string common_chat_format_single( std::string common_chat_format_example(const common_chat_template & tmpl, bool use_jinja) { std::vector msgs = { - {"system", "You are a helpful assistant"}, - {"user", "Hello"}, - {"assistant", "Hi there"}, - {"user", "How are you?"}, + {"system", "You are a helpful assistant", {}}, + {"user", "Hello", {}}, + {"assistant", "Hi there", {}}, + {"user", "How are you?", {}}, }; return common_chat_apply_template(tmpl, msgs, true, use_jinja); } diff --git a/common/common.h b/common/common.h index 571260372..6c1809277 100644 --- a/common/common.h +++ b/common/common.h @@ -109,6 +109,11 @@ enum common_conversation_mode { COMMON_CONVERSATION_MODE_AUTO = 2, }; +struct common_grammar_trigger { + std::string word; + bool at_start; +}; + // sampling parameters struct common_params_sampling { uint32_t seed = LLAMA_DEFAULT_SEED; // the seed used to initialize llama_sampler @@ -154,7 +159,10 @@ struct common_params_sampling { COMMON_SAMPLER_TYPE_TEMPERATURE, }; - std::string grammar; // optional BNF-like grammar to constrain sampling + std::string grammar; // optional BNF-like grammar to constrain sampling + bool grammar_lazy = false; + std::vector grammar_trigger_words; // optional trigger words to trigger lazy grammar + std::vector grammar_trigger_tokens; // optional trigger tokens to trigger lazy grammar and print trigger special tokens. std::vector logit_bias; // logit biases to apply @@ -602,10 +610,17 @@ std::string common_detokenize( // Chat template utils // +struct common_tool_call { + std::string name; + std::string arguments; + std::string id; +}; + // same with llama_chat_message, but uses std::string struct common_chat_msg { std::string role; std::string content; + std::vector tool_calls; }; // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 4d426b6bd..1f47e313e 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -343,7 +343,7 @@ static std::string format_literal(const std::string & literal) { class SchemaConverter { private: - friend std::string build_grammar(const std::function & cb); + friend std::string build_grammar(const std::function & cb, const common_grammar_options & options); std::function _fetch_json; bool _dotall; std::map _rules; @@ -764,10 +764,11 @@ private: public: SchemaConverter( const std::function & fetch_json, - bool dotall) + bool dotall, + bool compact_spaces) : _fetch_json(fetch_json), _dotall(dotall) { - _rules["space"] = SPACE_RULE; + _rules["space"] = compact_spaces ? "\" \"?" : SPACE_RULE; } void resolve_refs(json & schema, const std::string & url) { @@ -991,16 +992,16 @@ public: }; std::string json_schema_to_grammar(const json & schema) { - return build_grammar([&](const llama_grammar_builder & callbacks) { + return build_grammar([&](const common_grammar_builder & callbacks) { auto copy = schema; callbacks.resolve_refs(copy); callbacks.add_schema("", copy); }); } -std::string build_grammar(const std::function & cb) { - SchemaConverter converter([&](const std::string &) { return json(); }, /* dotall= */ false); - llama_grammar_builder builder { +std::string build_grammar(const std::function & cb, const common_grammar_options & options) { + SchemaConverter converter([&](const std::string &) { return json(); }, options.dotall, options.compact_spaces); + common_grammar_builder builder { /* .add_rule = */ [&](const std::string & name, const std::string & rule) { return converter._add_rule(name, rule); }, diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h index 4f43ab3a5..ba4112cb9 100644 --- a/common/json-schema-to-grammar.h +++ b/common/json-schema-to-grammar.h @@ -7,10 +7,15 @@ std::string json_schema_to_grammar(const nlohmann::ordered_json & schema); -struct llama_grammar_builder { +struct common_grammar_builder { std::function add_rule; std::function add_schema; std::function resolve_refs; }; -std::string build_grammar(const std::function & cb); +struct common_grammar_options { + bool dotall = false; + bool compact_spaces = false; +}; + +std::string build_grammar(const std::function & cb, const common_grammar_options & options = {}); diff --git a/common/sampling.cpp b/common/sampling.cpp index 7241ac321..bc7e49fdb 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -151,9 +151,18 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co lparams.no_perf = params.no_perf; + std::vector trigger_words; + trigger_words.reserve(params.grammar_trigger_words.size()); + for (const auto & str : params.grammar_trigger_words) { + trigger_words.push_back(str.word.c_str()); + } auto * result = new common_sampler { /* .params = */ params, - /* .grmr = */ llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"), + /* .grmr = */ params.grammar_lazy + ? llama_sampler_init_grammar_lazy(vocab, params.grammar.c_str(), "root", + trigger_words.data(), trigger_words.size(), + params.grammar_trigger_tokens.data(), params.grammar_trigger_tokens.size()) + : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"), /* .chain = */ llama_sampler_chain_init(lparams), /* .prev = */ ring_buffer(std::max(32, params.n_prev)), /* .cur = */ {}, diff --git a/examples/gbnf-validator/gbnf-validator.cpp b/examples/gbnf-validator/gbnf-validator.cpp index 17a0e27c4..a610e6a0b 100644 --- a/examples/gbnf-validator/gbnf-validator.cpp +++ b/examples/gbnf-validator/gbnf-validator.cpp @@ -76,7 +76,7 @@ int main(int argc, char** argv) { grammar_str = buffer.str(); } - llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root"); + llama_grammar * grammar = llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root", false, nullptr, 0, nullptr, 0); if (grammar == nullptr) { fprintf(stdout, "Failed to initialize llama_grammar\n"); return 1; diff --git a/examples/main/main.cpp b/examples/main/main.cpp index da2a03ab9..e654d3542 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -254,7 +254,7 @@ int main(int argc, char ** argv) { } } - const bool add_bos = llama_vocab_get_add_bos(vocab); + const bool add_bos = llama_vocab_get_add_bos(vocab) && !params.use_jinja; if (!llama_model_has_encoder(model)) { GGML_ASSERT(!llama_vocab_get_add_eos(vocab)); } @@ -264,9 +264,9 @@ int main(int argc, char ** argv) { std::vector embd_inp; auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) { - common_chat_msg new_msg{role, content}; + common_chat_msg new_msg{role, content, {}}; auto formatted = common_chat_format_single(*chat_templates.template_default, chat_msgs, new_msg, role == "user", g_params->use_jinja); - chat_msgs.push_back({role, content}); + chat_msgs.push_back({role, content, {}}); LOG_DBG("formatted: '%s'\n", formatted.c_str()); return formatted; }; @@ -503,12 +503,14 @@ int main(int argc, char ** argv) { std::vector embd; - // tokenized antiprompts - std::vector> antiprompt_ids; + // single-token antiprompts + std::vector antiprompt_token; - antiprompt_ids.reserve(params.antiprompt.size()); for (const std::string & antiprompt : params.antiprompt) { - antiprompt_ids.emplace_back(::common_tokenize(ctx, antiprompt, false, true)); + auto ids = ::common_tokenize(ctx, antiprompt, false, true); + if (ids.size() == 1) { + antiprompt_token.push_back(ids[0]); + } } if (llama_model_has_encoder(model)) { @@ -753,14 +755,11 @@ int main(int argc, char ** argv) { // check for reverse prompt using special tokens llama_token last_token = common_sampler_last(smpl); - for (std::vector ids : antiprompt_ids) { - if (ids.size() == 1 && last_token == ids[0]) { - if (params.interactive) { - is_interacting = true; - } - is_antiprompt = true; - break; + if (std::find(antiprompt_token.begin(), antiprompt_token.end(), last_token) != antiprompt_token.end()) { + if (params.interactive) { + is_interacting = true; } + is_antiprompt = true; } if (is_antiprompt) { diff --git a/examples/server/README.md b/examples/server/README.md index 44da503df..ce1ae8858 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -1117,6 +1117,82 @@ curl http://localhost:8080/v1/chat/completions \ }' ``` +... and even tool usage (needs `--jinja` flag): + + ```shell + llama-server --jinja -hfr lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF -hff Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf -fa + + # https://huggingface.co/meetkai/functionary-medium-v3.2 + llama-server --jinja -hfr bartowski/functionary-medium-v3.2-GGUF -hff functionary-medium-v3.2-IQ4_XS.gguf -fa + + # https://huggingface.co/meetkai/functionary-medium-v3.1 + llama-server --jinja -hfr meetkai/functionary-medium-v3.1-GGUF -hff functionary-medium-llama-3.1.Q4_0.gguf -fa + + curl http://localhost:8080/v1/chat/completions -d '{ + "model": "gpt-3.5-turbo", + "tools": [ + { + "type":"function", + "function":{ + "name":"get_current_weather", + "description":"Get the current weather in a given location", + "parameters":{ + "type":"object", + "properties":{ + "location":{ + "type":"string", + "description":"The city and state, e.g. San Francisco, CA" + } + }, + "required":["location"] + } + } + } + ], + "messages": [ + { + "role": "user", + "content": "What is the weather like in Istanbul?." + } + ] + }' + ``` + +
+ Show output + + ```json + { + "choices": [ + { + "finish_reason": "tool", + "index": 0, + "message": { + "content": null, + "tool_calls": [ + { + "name": "python", + "arguments": "{\"code\":\" \\nprint(\\\"Hello, World!\\\")\"}" + } + ], + "role": "assistant" + } + } + ], + "created": 1727287211, + "model": "gpt-3.5-turbo", + "object": "chat.completion", + "usage": { + "completion_tokens": 16, + "prompt_tokens": 44, + "total_tokens": 60 + }, + "id": "chatcmpl-Htbgh9feMmGM0LEH2hmQvwsCxq3c6Ni8" + } + ``` + +
+ ### POST `/v1/embeddings`: OpenAI-compatible embeddings API This endpoint requires that the model uses a pooling different than type `none`. The embeddings are normalized using the Eucledian norm. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index b9aa5c81c..d1ea343dd 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -113,10 +113,11 @@ struct slot_params { struct common_params_speculative speculative; // OAI-compat fields - bool verbose = false; - oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + common_chat_format oaicompat_chat_format = COMMON_CHAT_FORMAT_CONTENT_ONLY; json to_json() const { std::vector samplers; @@ -164,6 +165,8 @@ struct slot_params { {"n_probs", sampling.n_probs}, {"min_keep", sampling.min_keep}, {"grammar", sampling.grammar}, + // {"grammar_trigger_words", sampling.grammar_trigger_words}, + {"grammar_trigger_tokens", sampling.grammar_trigger_tokens}, {"samplers", samplers}, {"speculative.n_max", speculative.n_max}, {"speculative.n_min", speculative.n_min}, @@ -325,12 +328,50 @@ struct server_task { if (data.contains("json_schema") && !data.contains("grammar")) { try { auto schema = json_value(data, "json_schema", json::object()); - params.sampling.grammar = json_schema_to_grammar(schema); + LOG_DBG("JSON schema: %s\n", schema.dump(2).c_str()); + params.sampling.grammar = json_schema_to_grammar(schema); + LOG_DBG("Converted grammar: %s\n", params.sampling.grammar.c_str()); } catch (const std::exception & e) { throw std::runtime_error(std::string("\"json_schema\": ") + e.what()); } } else { - params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar); + params.sampling.grammar = json_value(data, "grammar", defaults.sampling.grammar); + LOG_DBG("Grammar: %s\n", params.sampling.grammar.c_str()); + params.sampling.grammar_lazy = json_value(data, "grammar_lazy", defaults.sampling.grammar_lazy); + LOG_DBG("Grammar lazy: %s\n", params.sampling.grammar_lazy ? "true" : "false"); + } + + { + auto it = data.find("chat_format"); + if (it != data.end()) { + params.oaicompat_chat_format = static_cast(it->get()); + LOG_DBG("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_format).c_str()); + } else { + params.oaicompat_chat_format = defaults.oaicompat_chat_format; + } + } + + { + const auto grammar_triggers = data.find("grammar_triggers"); + if (grammar_triggers != data.end()) { + for (const auto & t : *grammar_triggers) { + common_grammar_trigger trigger; + trigger.word = t.at("word"); + trigger.at_start = t.at("at_start"); + + auto ids = common_tokenize(vocab, trigger.word, /* add_special= */ false, /* parse_special= */ true); + if (ids.size() == 1) { + LOG_DBG("Grammar trigger token: %d (`%s`)\n", ids[0], trigger.word.c_str()); + params.sampling.grammar_trigger_tokens.push_back(ids[0]); + continue; + } + LOG_DBG("Grammar trigger word: `%s`\n", trigger.word.c_str()); + params.sampling.grammar_trigger_words.push_back(trigger); + } + } + if (params.sampling.grammar_lazy) { + GGML_ASSERT(params.sampling.grammar_trigger_tokens.size() > 0 || params.sampling.grammar_trigger_words.size() > 0); + } } { @@ -382,22 +423,12 @@ struct server_task { } { - const auto & samplers = data.find("samplers"); + const auto samplers = data.find("samplers"); if (samplers != data.end()) { if (samplers->is_array()) { - std::vector sampler_names; - for (const auto & name : *samplers) { - if (name.is_string()) { - sampler_names.emplace_back(name); - } - } - params.sampling.samplers = common_sampler_types_from_names(sampler_names, false); + params.sampling.samplers = common_sampler_types_from_names(*samplers, false); } else if (samplers->is_string()){ - std::string sampler_string; - for (const auto & name : *samplers) { - sampler_string += name; - } - params.sampling.samplers = common_sampler_types_from_chars(sampler_string); + params.sampling.samplers = common_sampler_types_from_chars(samplers->get()); } } else { params.sampling.samplers = defaults.sampling.samplers; @@ -544,7 +575,7 @@ struct completion_token_output { struct server_task_result_cmpl_final : server_task_result { int index = 0; - std::string content; + std::string content; llama_tokens tokens; bool stream; @@ -566,10 +597,11 @@ struct server_task_result_cmpl_final : server_task_result { slot_params generation_params; // OAI-compat fields - bool verbose = false; - oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; - std::string oaicompat_model; - std::string oaicompat_cmpl_id; + bool verbose = false; + oaicompat_type oaicompat = OAICOMPAT_TYPE_NONE; + std::string oaicompat_model; + std::string oaicompat_cmpl_id; + common_chat_format oaicompat_chat_format = COMMON_CHAT_FORMAT_CONTENT_ONLY; virtual int get_index() override { return index; @@ -663,18 +695,38 @@ struct server_task_result_cmpl_final : server_task_result { json to_json_oaicompat_chat() { std::string finish_reason = "length"; + common_chat_msg message; if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { - finish_reason = "stop"; + message = common_chat_parse(content, oaicompat_chat_format); + finish_reason = message.tool_calls.empty() ? "stop" : "tool_calls"; + } else { + message.content = content; } - json choice = json{ + json tool_calls; + if (!message.tool_calls.empty()) { + tool_calls = json::array(); + for (const auto & tc : message.tool_calls) { + tool_calls.push_back({ + {"type", "function"}, + {"function", { + {"name", tc.name}, + {"arguments", tc.arguments}, + }}, + {"id", tc.id.empty() ? json() : json(tc.id)}, + }); + } + } + + json choice { {"finish_reason", finish_reason}, {"index", 0}, {"message", json { - {"content", content}, - {"role", "assistant"} - } - }}; + {"content", message.content}, + {"tool_calls", tool_calls}, + {"role", "assistant"}, + }}, + }; if (!stream && probs_output.size() > 0) { choice["logprobs"] = json{ @@ -716,7 +768,7 @@ struct server_task_result_cmpl_final : server_task_result { finish_reason = "stop"; } - json choice = json{ + json choice = json { {"finish_reason", finish_reason}, {"index", 0}, {"delta", json::object()} @@ -1191,6 +1243,8 @@ struct server_slot { llama_token sampled; + common_chat_format chat_format = COMMON_CHAT_FORMAT_CONTENT_ONLY; + // stats size_t n_sent_text = 0; // number of sent text character @@ -1815,17 +1869,16 @@ struct server_context { if (use_jinja) { auto templates = common_chat_templates_from_model(model, ""); + common_chat_inputs inputs; + inputs.messages = json::array({{ + {"role", "user"}, + {"content", "test"}, + }}); GGML_ASSERT(templates.template_default); try { - templates.template_default->apply({{ - {"role", "user"}, - {"content", "test"}, - }}, json(), true); + common_chat_params_init(*templates.template_default, inputs); if (templates.template_tool_use) { - templates.template_tool_use->apply({{ - {"role", "user"}, - {"content", "test"}, - }}, json(), true); + common_chat_params_init(*templates.template_tool_use, inputs); } return true; } catch (const std::exception & e) { @@ -2275,11 +2328,11 @@ struct server_context { res->id_slot = slot.id; res->index = slot.index; - res->content = slot.generated_text; - res->tokens = slot.generated_tokens; + res->content = std::move(slot.generated_text); + res->tokens = std::move(slot.generated_tokens); res->timings = slot.get_timings(); res->prompt = common_detokenize(ctx, slot.prompt_tokens, true); - res->response_fields = slot.params.response_fields; + res->response_fields = std::move(slot.params.response_fields); res->truncated = slot.truncated; res->n_decoded = slot.n_decoded; @@ -2290,12 +2343,12 @@ struct server_context { res->stop = slot.stop; res->post_sampling_probs = slot.params.post_sampling_probs; - res->verbose = slot.params.verbose; - res->stream = slot.params.stream; - res->oaicompat = slot.params.oaicompat; - res->oaicompat_model = slot.params.oaicompat_model; - res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; - + res->verbose = slot.params.verbose; + res->stream = slot.params.stream; + res->oaicompat = slot.params.oaicompat; + res->oaicompat_model = slot.params.oaicompat_model; + res->oaicompat_cmpl_id = slot.params.oaicompat_cmpl_id; + res->oaicompat_chat_format = slot.params.oaicompat_chat_format; // populate res.probs_output if (slot.params.sampling.n_probs > 0) { if (!slot.params.stream && slot.stop == STOP_TYPE_WORD) { @@ -2773,6 +2826,11 @@ struct server_context { // track if given slot can be batched with slots already in the batch server_slot * slot_batched = nullptr; + auto accept_special_token = [&](server_slot & slot, llama_token token) { + const auto & trigger_tokens = slot.params.sampling.grammar_trigger_tokens; + return params_base.special || std::find(trigger_tokens.begin(), trigger_tokens.end(), token) != trigger_tokens.end(); + }; + // frist, add sampled tokens from any ongoing sequences for (auto & slot : slots) { if (slot.state != SLOT_STATE_GENERATING) { @@ -3136,7 +3194,7 @@ struct server_context { completion_token_output result; result.tok = id; - result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special); + result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok)); result.prob = 1.0f; // TODO: set it here instead of doing inside populate_token_probs if (slot.params.sampling.n_probs > 0) { @@ -3225,7 +3283,7 @@ struct server_context { completion_token_output result; result.tok = ids[i]; - result.text_to_send = common_token_to_piece(ctx, result.tok, params_base.special); + result.text_to_send = common_token_to_piece(ctx, result.tok, accept_special_token(slot, result.tok)); result.prob = 1.0f; // set later // TODO: set result.probs @@ -3722,6 +3780,8 @@ int main(int argc, char ** argv) { { "total_slots", ctx_server.params_base.n_parallel }, { "model_path", ctx_server.params_base.model }, { "chat_template", ctx_server.chat_templates.template_default->source() }, + { "bos_token", ctx_server.chat_templates.template_default->bos_token() }, + { "eos_token", ctx_server.chat_templates.template_default->eos_token() }, { "build_info", build_info }, }; if (ctx_server.params_base.use_jinja && ctx_server.chat_templates.template_tool_use) { @@ -3763,7 +3823,9 @@ int main(int argc, char ** argv) { std::vector tasks; try { - std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, data.at("prompt"), true, true); + const auto & prompt = data.at("prompt"); + LOG_DBG("Prompt: %s\n", prompt.is_string() ? prompt.get().c_str() : prompt.dump(2).c_str()); + std::vector tokenized_prompts = tokenize_input_prompts(ctx_server.vocab, prompt, true, true); tasks.reserve(tokenized_prompts.size()); for (size_t i = 0; i < tokenized_prompts.size(); i++) { server_task task = server_task(type); @@ -3779,8 +3841,8 @@ int main(int argc, char ** argv) { task.id_selected_slot = json_value(data, "id_slot", -1); // OAI-compat - task.params.oaicompat = oaicompat; - task.params.oaicompat_cmpl_id = completion_id; + task.params.oaicompat = oaicompat; + task.params.oaicompat_cmpl_id = completion_id; // oaicompat_model is already populated by params_from_json_cmpl tasks.push_back(task); @@ -3949,14 +4011,14 @@ int main(int argc, char ** argv) { }; const auto handle_chat_completions = [&ctx_server, ¶ms, &res_error, &handle_completions_impl](const httplib::Request & req, httplib::Response & res) { + LOG_DBG("request: %s\n", req.body.c_str()); if (ctx_server.params_base.embedding) { res_error(res, format_error_response("This server does not support completions. Start it without `--embeddings`", ERROR_TYPE_NOT_SUPPORTED)); return; } auto body = json::parse(req.body); - const auto & chat_template = body.contains("tools") && ctx_server.chat_templates.template_tool_use ? *ctx_server.chat_templates.template_tool_use : *ctx_server.chat_templates.template_default; - json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja); + json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates); return handle_completions_impl( SERVER_TASK_TYPE_COMPLETION, @@ -3966,6 +4028,13 @@ int main(int argc, char ** argv) { OAICOMPAT_TYPE_CHAT); }; + // same with handle_chat_completions, but without inference part + const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) { + auto body = json::parse(req.body); + json data = oaicompat_completion_params_parse(body, params.use_jinja, ctx_server.chat_templates); + res_ok(res, {{ "prompt", std::move(data.at("prompt")) }}); + }; + const auto handle_models = [¶ms, &ctx_server, &res_ok](const httplib::Request &, httplib::Response & res) { json models = { {"object", "list"}, @@ -4124,14 +4193,6 @@ int main(int argc, char ** argv) { res_ok(res, root); }; - const auto handle_apply_template = [&ctx_server, ¶ms, &res_ok](const httplib::Request & req, httplib::Response & res) { - auto body = json::parse(req.body); - const auto & chat_template = body.contains("tools") && ctx_server.chat_templates.template_tool_use ? *ctx_server.chat_templates.template_tool_use : *ctx_server.chat_templates.template_default; - json data = oaicompat_completion_params_parse(body, chat_template, params.use_jinja); - - res_ok(res, {{ "prompt", data.at("prompt") }}); - }; - const auto handle_embeddings = [&handle_embeddings_impl](const httplib::Request & req, httplib::Response & res) { handle_embeddings_impl(req, res, OAICOMPAT_TYPE_NONE); }; diff --git a/examples/server/tests/README.md b/examples/server/tests/README.md index 5787276ab..1de0eb30e 100644 --- a/examples/server/tests/README.md +++ b/examples/server/tests/README.md @@ -31,8 +31,9 @@ It's possible to override some scenario steps values with environment variables: | `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/llama-server` | | `DEBUG` | to enable steps and server verbose mode `--verbose` | | `N_GPU_LAYERS` | number of model layers to offload to VRAM `-ngl --n-gpu-layers` | +| `LLAMA_CACHE` | by default server tests re-download models to the `tmp` subfolder. Set this to your cache (e.g. `$HOME/Library/Caches/llama.cpp` on Mac or `$HOME/.cache/llama.cpp` on Unix) to avoid this | -To run slow tests: +To run slow tests (will download many models, make sure to set `LLAMA_CACHE` if needed): ```shell SLOW_TESTS=1 ./tests.sh @@ -44,10 +45,16 @@ To run with stdout/stderr display in real time (verbose output, but useful for d DEBUG=1 ./tests.sh -s -v -x ``` -To run single test unit: +To run all the tests in a file: ```shell -./tests.sh unit/test_{name of test case here}.py -v -x +./tests.sh unit/test_chat_completion.py.py -v -x +``` + +To run a single test: + +```shell +./tests.sh unit/test_chat_completion.py::test_invalid_chat_completion_req ``` Hint: You can compile and run test in single command, useful for local developement: diff --git a/examples/server/tests/pytest.ini b/examples/server/tests/pytest.ini new file mode 100644 index 000000000..6df308df7 --- /dev/null +++ b/examples/server/tests/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +markers = + slow: marks tests as slow (deselect with '-m "not slow"') + serial diff --git a/examples/server/tests/tests.sh b/examples/server/tests/tests.sh index 1e0777de3..33fa8cc64 100755 --- a/examples/server/tests/tests.sh +++ b/examples/server/tests/tests.sh @@ -6,9 +6,18 @@ cd $SCRIPT_DIR set -eu +if [[ "${SLOW_TESTS:-0}" == 1 ]]; then + # Slow tests for tool calls need quite a few models ahead of time to avoid timing out. + python $SCRIPT_DIR/../../../scripts/fetch_server_test_models.py +fi + if [ $# -lt 1 ] then - pytest -v -x + if [[ "${SLOW_TESTS:-0}" == 1 ]]; then + pytest -v -x + else + pytest -v -x -m "not slow" + fi else pytest "$@" fi diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index add3f810f..0be04bab5 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -2,7 +2,7 @@ import pytest from openai import OpenAI from utils import * -server = ServerPreset.tinyllama2() +server: ServerProcess @pytest.fixture(autouse=True) def create_server(): @@ -13,11 +13,12 @@ def create_server(): @pytest.mark.parametrize( "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason,jinja,chat_template", [ - (None, "Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length", False, None), - (None, "Book", "What is the best book", 8, "(Suddenly)+", 77, 8, "length", True, None), - (None, "Book", "What is the best book", 8, "^ blue", 23, 8, "length", True, "This is not a chat template, it is"), + (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", False, None), ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", False, None), - ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None), + # TODO: fix testing of non-tool jinja mode + # (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, None), + # (None, "Book", "What is the best book", 8, "I want to play with", 23, 8, "length", True, "This is not a chat template, it is"), + # ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None), ] ) def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template): diff --git a/examples/server/tests/unit/test_tool_call.py b/examples/server/tests/unit/test_tool_call.py new file mode 100644 index 000000000..e6ed9c9be --- /dev/null +++ b/examples/server/tests/unit/test_tool_call.py @@ -0,0 +1,352 @@ +import pytest +from utils import * + +server: ServerProcess + +TIMEOUT_SERVER_START = 15*60 +TIMEOUT_HTTP_REQUEST = 60 + +@pytest.fixture(autouse=True) +def create_server(): + global server + server = ServerPreset.tinyllama2() + server.model_alias = "tinyllama-2-tool-call" + server.server_port = 8081 + + +TEST_TOOL = { + "type":"function", + "function": { + "name": "test", + "description": "", + "parameters": { + "type": "object", + "properties": { + "success": {"type": "boolean", "const": True}, + }, + "required": ["success"] + } + } +} + +PYTHON_TOOL = { + "type": "function", + "function": { + "name": "python", + "description": "Runs code in an ipython interpreter and returns the result of the execution after 60 seconds.", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "The code to run in the ipython interpreter." + } + }, + "required": ["code"] + } + } +} + +WEATHER_TOOL = { + "type":"function", + "function":{ + "name":"get_current_weather", + "description":"Get the current weather in a given location", + "parameters":{ + "type":"object", + "properties":{ + "location":{ + "type":"string", + "description":"The city and country/state, e.g. 'San Francisco, CA', or 'Paris, France'" + } + }, + "required":["location"] + } + } +} + + +def do_test_completion_with_required_tool_tiny(template_name: str, tool: dict, argument_key: str | None): + n_predict = 512 + global server + # server = ServerPreset.stories15m_moe() + server.jinja = True + server.n_predict = n_predict + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start(timeout_seconds=TIMEOUT_SERVER_START) + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "tool_choice": "required", + "tools": [tool], + "parallel_tool_calls": False, + "temperature": 0.0, + "top_k": 1, + "top_p": 1.0, + }) + assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = res.body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"] + assert expected_function_name == tool_call["function"]["name"] + actual_arguments = tool_call["function"]["arguments"] + assert isinstance(actual_arguments, str) + if argument_key is not None: + actual_arguments = json.loads(actual_arguments) + assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}" + + +@pytest.mark.parametrize("template_name,tool,argument_key", [ + ("google-gemma-2-2b-it", TEST_TOOL, "success"), + ("meta-llama-Llama-3.3-70B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.3-70B-Instruct", PYTHON_TOOL, "code"), +]) +def test_completion_with_required_tool_tiny_fast(template_name: str, tool: dict, argument_key: str | None): + do_test_completion_with_required_tool_tiny(template_name, tool, argument_key) + + +@pytest.mark.slow +@pytest.mark.parametrize("template_name,tool,argument_key", [ + ("meta-llama-Llama-3.1-8B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.1-8B-Instruct", PYTHON_TOOL, "code"), + ("meetkai-functionary-medium-v3.1", TEST_TOOL, "success"), + ("meetkai-functionary-medium-v3.1", PYTHON_TOOL, "code"), + ("meetkai-functionary-medium-v3.2", TEST_TOOL, "success"), + ("meetkai-functionary-medium-v3.2", PYTHON_TOOL, "code"), + ("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", TEST_TOOL, "success"), + ("NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use", PYTHON_TOOL, "code"), + ("meta-llama-Llama-3.2-3B-Instruct", TEST_TOOL, "success"), + ("meta-llama-Llama-3.2-3B-Instruct", PYTHON_TOOL, "code"), + ("mistralai-Mistral-Nemo-Instruct-2407", TEST_TOOL, "success"), + ("mistralai-Mistral-Nemo-Instruct-2407", PYTHON_TOOL, "code"), + ("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", TEST_TOOL, "success"), + ("NousResearch-Hermes-3-Llama-3.1-8B-tool_use", PYTHON_TOOL, "code"), + ("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", TEST_TOOL, "success"), + ("deepseek-ai-DeepSeek-R1-Distill-Llama-8B", PYTHON_TOOL, "code"), + ("fireworks-ai-llama-3-firefunction-v2", TEST_TOOL, "success"), + ("fireworks-ai-llama-3-firefunction-v2", PYTHON_TOOL, "code"), +]) +def test_completion_with_required_tool_tiny_slow(template_name: str, tool: dict, argument_key: str | None): + do_test_completion_with_required_tool_tiny(template_name, tool, argument_key) + + +@pytest.mark.slow +@pytest.mark.parametrize("tool,argument_key,hf_repo,template_override", [ + (TEST_TOOL, "success", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (TEST_TOOL, "success", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (TEST_TOOL, "success", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (TEST_TOOL, "success", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (TEST_TOOL, "success", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (TEST_TOOL, "success", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (PYTHON_TOOL, "code", "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + (TEST_TOOL, "success", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + (PYTHON_TOOL, "code", "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + (TEST_TOOL, "success", "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + (PYTHON_TOOL, "code", "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + (TEST_TOOL, "success", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (TEST_TOOL, "success", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + (PYTHON_TOOL, "code", "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + # TODO: fix these + # (TEST_TOOL, "success", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), + # (PYTHON_TOOL, "code", "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), +]) +def test_completion_with_required_tool_real_model(tool: dict, argument_key: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None): + n_predict = 512 + server.n_slots = 1 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = n_predict + server.model_hf_repo = hf_repo + server.model_hf_file = None + if template_override: + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + server.start(timeout_seconds=TIMEOUT_SERVER_START) + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "Write an example"}, + ], + "tool_choice": "required", + "tools": [tool], + "parallel_tool_calls": False, + "temperature": 0.0, + "top_k": 1, + "top_p": 1.0, + }, timeout=TIMEOUT_HTTP_REQUEST) + assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = res.body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + expected_function_name = "python" if tool["type"] == "code_interpreter" else tool["function"]["name"] + assert expected_function_name == tool_call["function"]["name"] + actual_arguments = tool_call["function"]["arguments"] + assert isinstance(actual_arguments, str) + if argument_key is not None: + actual_arguments = json.loads(actual_arguments) + assert argument_key in actual_arguments, f"tool arguments: {json.dumps(actual_arguments)}, expected: {argument_key}" + + +def do_test_completion_without_tool_call(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None): + global server + server.jinja = True + server.n_predict = n_predict + server.chat_template_file = f'../../../models/templates/{template_name}.jinja' + server.start(timeout_seconds=TIMEOUT_SERVER_START) + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": n_predict, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "say hello world with python"}, + ], + "tools": tools if tools else None, + "tool_choice": tool_choice, + "temperature": 0.0, + "top_k": 1, + "top_p": 1.0, + }, timeout=TIMEOUT_HTTP_REQUEST) + assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = res.body["choices"][0] + assert choice["message"].get("tool_calls") is None, f'Expected no tool call in {choice["message"]}' + + +@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [ + ("meta-llama-Llama-3.3-70B-Instruct", 128, [], None), + ("meta-llama-Llama-3.3-70B-Instruct", 128, [TEST_TOOL], None), + ("meta-llama-Llama-3.3-70B-Instruct", 128, [PYTHON_TOOL], 'none'), +]) +def test_completion_without_tool_call_fast(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None): + do_test_completion_without_tool_call(template_name, n_predict, tools, tool_choice) + + +@pytest.mark.slow +@pytest.mark.parametrize("template_name,n_predict,tools,tool_choice", [ + ("meetkai-functionary-medium-v3.2", 256, [], None), + ("meetkai-functionary-medium-v3.2", 256, [TEST_TOOL], None), + ("meetkai-functionary-medium-v3.2", 256, [PYTHON_TOOL], 'none'), + ("meetkai-functionary-medium-v3.1", 256, [], None), + ("meetkai-functionary-medium-v3.1", 256, [TEST_TOOL], None), + ("meetkai-functionary-medium-v3.1", 256, [PYTHON_TOOL], 'none'), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [], None), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [TEST_TOOL], None), + ("meta-llama-Llama-3.2-3B-Instruct", 256, [PYTHON_TOOL], 'none'), +]) +def test_completion_without_tool_call_slow(template_name: str, n_predict: int, tools: list[dict], tool_choice: str | None): + do_test_completion_without_tool_call(template_name, n_predict, tools, tool_choice) + + +@pytest.mark.slow +@pytest.mark.parametrize("hf_repo,template_override", [ + ("bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + ("bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + ("bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + ("bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + ("bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-3-Llama-3.1-8B", "tool_use")), + ("bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + ("bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai/functionary-medium-v3.2", None)), + ("bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + # ("bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama/Llama-3.2-3B-Instruct", None)), + # ("bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), +]) +def test_weather_tool_call(hf_repo: str, template_override: Tuple[str, str | None] | None): + global server + server.n_slots = 1 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = 512 + server.model_hf_repo = hf_repo + server.model_hf_file = None + if template_override: + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + server.start(timeout_seconds=TIMEOUT_SERVER_START) + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 256, + "messages": [ + {"role": "user", "content": "What is the weather in Istanbul?"}, + ], + "tools": [WEATHER_TOOL], + }, timeout=TIMEOUT_HTTP_REQUEST) + assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = res.body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + assert tool_call["function"]["name"] == WEATHER_TOOL["function"]["name"] + actual_arguments = json.loads(tool_call["function"]["arguments"]) + assert 'location' in actual_arguments, f"location not found in {json.dumps(actual_arguments)}" + location = actual_arguments["location"] + assert isinstance(location, str), f"Expected location to be a string, got {type(location)}: {json.dumps(location)}" + assert re.match('^Istanbul(, (TR|Turkey|Türkiye))?$', location), f'Expected Istanbul for location, got {location}' + + +@pytest.mark.slow +@pytest.mark.parametrize("expected_arguments_override,hf_repo,template_override", [ + (None, "bartowski/gemma-2-2b-it-GGUF:Q4_K_M", None), + (None, "bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M", None), + (None, "bartowski/functionary-small-v3.2-GGUF:Q8_0", ("meetkai-functionary-medium-v3.2", None)), + ('{"code":"print("}', "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Llama-3.2-1B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + ('{"code":"print("}', "bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M", ("meta-llama-Llama-3.2-3B-Instruct", None)), + (None, "bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M", None), + (None, "bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M", ("NousResearch/Hermes-2-Pro-Llama-3-8B", "tool_use")), + (None, "bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M", ("NousResearch-Hermes-3-Llama-3.1-8B", "tool_use")), + (None, "bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M", None), + # (None, "bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF:Q4_K_M", None), +]) +def test_hello_world_tool_call(expected_arguments_override: str | None, hf_repo: str, template_override: Tuple[str, str | None] | None): + global server + server.n_slots = 1 + server.jinja = True + server.n_ctx = 8192 + server.n_predict = 128 + server.model_hf_repo = hf_repo + server.model_hf_file = None + if template_override: + (template_hf_repo, template_variant) = template_override + server.chat_template_file = f"../../../models/templates/{template_hf_repo.replace('/', '-') + ('-' + template_variant if template_variant else '')}.jinja" + assert os.path.exists(server.chat_template_file), f"Template file {server.chat_template_file} does not exist. Run `python scripts/get_chat_template.py {template_hf_repo} {template_variant} > {server.chat_template_file}` to download the template." + server.start(timeout_seconds=TIMEOUT_SERVER_START) + res = server.make_request("POST", "/chat/completions", data={ + "max_tokens": 256, + "messages": [ + {"role": "system", "content": "You are a coding assistant."}, + {"role": "user", "content": "say hello world with python"}, + ], + "tools": [PYTHON_TOOL], + # Note: without these greedy params, Functionary v3.2 writes `def hello_world():\n print("Hello, World!")\nhello_world()` which is correct but a pain to test. + "temperature": 0.0, + "top_k": 1, + "top_p": 1.0, + }, timeout=TIMEOUT_HTTP_REQUEST) + assert res.status_code == 200, f"Expected status code 200, got {res.status_code}" + choice = res.body["choices"][0] + tool_calls = choice["message"].get("tool_calls") + assert tool_calls and len(tool_calls) == 1, f'Expected 1 tool call in {choice["message"]}' + tool_call = tool_calls[0] + assert tool_call["function"]["name"] == PYTHON_TOOL["function"]["name"] + actual_arguments = tool_call["function"]["arguments"] + if expected_arguments_override is not None: + assert actual_arguments == expected_arguments_override + else: + actual_arguments = json.loads(actual_arguments) + assert 'code' in actual_arguments, f"code not found in {json.dumps(actual_arguments)}" + code = actual_arguments["code"] + assert isinstance(code, str), f"Expected code to be a string, got {type(code)}: {json.dumps(code)}" + assert re.match(r'''print\(("[Hh]ello,? [Ww]orld!?"|'[Hh]ello,? [Ww]orld!?')\)''', code), f'Expected hello world, got {code}' diff --git a/examples/server/tests/utils.py b/examples/server/tests/utils.py index 9964db2f9..ce0680662 100644 --- a/examples/server/tests/utils.py +++ b/examples/server/tests/utils.py @@ -26,7 +26,7 @@ from re import RegexFlag import wget -DEFAULT_HTTP_TIMEOUT = 10 if "LLAMA_SANITIZE" not in os.environ else 30 +DEFAULT_HTTP_TIMEOUT = 12 if "LLAMA_SANITIZE" not in os.environ else 30 class ServerResponse: @@ -41,7 +41,7 @@ class ServerProcess: server_port: int = 8080 server_host: str = "127.0.0.1" model_hf_repo: str = "ggml-org/models" - model_hf_file: str = "tinyllamas/stories260K.gguf" + model_hf_file: str | None = "tinyllamas/stories260K.gguf" model_alias: str = "tinyllama-2" temperature: float = 0.8 seed: int = 42 @@ -191,7 +191,7 @@ class ServerProcess: creationflags=flags, stdout=sys.stdout, stderr=sys.stdout, - env={**os.environ, "LLAMA_CACHE": "tmp"}, + env={**os.environ, "LLAMA_CACHE": "tmp"} if "LLAMA_CACHE" not in os.environ else None, ) server_instances.add(self) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index c5987250c..3d2c04666 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -17,6 +17,7 @@ #define JSON_ASSERT GGML_ASSERT #include "json.hpp" #include "minja.hpp" +#include "chat.hpp" #include "chat-template.hpp" #include @@ -376,7 +377,7 @@ inline std::string format_chat(const common_chat_template & tmpl, const std::vec throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)"); } - chat.push_back({role, content}); + chat.push_back({role, content, /* tool_calls= */ {}}); } const auto formatted_chat = common_chat_apply_template(tmpl, chat, true, /* use_jinja= */ false); @@ -483,14 +484,13 @@ static bool ends_with(const std::string & str, const std::string & suffix) { static size_t find_partial_stop_string(const std::string &stop, const std::string &text) { if (!text.empty() && !stop.empty()) { - const char text_last_char = text.back(); - for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { - if (stop[char_index] == text_last_char) { - const std::string current_partial = stop.substr(0, char_index + 1); - if (ends_with(text, current_partial)) { - return text.size() - char_index - 1; - } + auto it = std::find(stop.rbegin(), stop.rend(), text.back()); + while (it != stop.rend()) { + size_t length = std::distance(it, stop.rend()); + if (text.length() >= length && 0 == text.compare(text.length() - length, length, stop)) { + return text.length() - length; } + it = std::find(std::next(it), stop.rend(), text.back()); } } @@ -580,21 +580,30 @@ static json oaicompat_completion_params_parse(const json & body) { static json oaicompat_completion_params_parse( const json & body, /* openai api json semantics */ - const common_chat_template & tmpl, - bool use_jinja) + bool use_jinja, + const common_chat_templates & chat_templates) { json llama_params; + const auto & tmpl = body.contains("tools") && chat_templates.template_tool_use + ? *chat_templates.template_tool_use + : *chat_templates.template_default; auto tools = json_value(body, "tools", json()); - auto has_tools = tools.is_array() && !tools.empty(); + auto stream = json_value(body, "stream", false); - if (has_tools) { - if (use_jinja) { - LOG_WRN("tools param is not fully supported yet\n"); - } else { + if (tools.is_array() && !tools.empty()) { + if (stream) { + throw std::runtime_error("Cannot use tools with stream"); + } + if (!use_jinja) { throw std::runtime_error("tools param requires --jinja flag"); } } + if (!use_jinja) { + if (body.contains("tool_choice") && !body.at("tool_choice").is_null()) { + throw std::runtime_error("Unsupported param: tool_choice"); + } + } // Handle "stop" field if (body.contains("stop") && body.at("stop").is_string()) { @@ -619,7 +628,38 @@ static json oaicompat_completion_params_parse( // Apply chat template to the list of messages if (use_jinja) { - llama_params["prompt"] = tmpl.apply(body.at("messages"), tools, /* add_generation_prompt= */ true); + auto tool_choice = json_value(body, "tool_choice", std::string("auto")); + if (tool_choice != "none" && tool_choice != "auto" && tool_choice != "required") { + throw std::runtime_error("Invalid tool_choice: " + tool_choice); + } + if (tool_choice != "none" && llama_params.contains("grammar")) { + throw std::runtime_error("Cannot use custom grammar constraints with tools."); + } + common_chat_inputs inputs; + inputs.messages = body.at("messages"); + inputs.tools = tools; + inputs.tool_choice = tool_choice; + inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); + inputs.stream = stream; + // TODO: support mixing schema w/ tools beyond generic format. + inputs.json_schema = json_value(llama_params, "json_schema", json::object()); + auto chat_params = common_chat_params_init(tmpl, inputs); + + llama_params["chat_format"] = static_cast(chat_params.format); + llama_params["prompt"] = chat_params.prompt; + llama_params["grammar"] = chat_params.grammar; + llama_params["grammar_lazy"] = chat_params.grammar_lazy; + auto grammar_triggers = json::array(); + for (const auto & trigger : chat_params.grammar_triggers) { + grammar_triggers.push_back({ + {"word", trigger.word}, + {"at_start", trigger.at_start}, + }); + } + llama_params["grammar_triggers"] = grammar_triggers; + for (const auto & stop : chat_params.additional_stops) { + llama_params["stop"].push_back(stop); + } } else { llama_params["prompt"] = format_chat(tmpl, body.at("messages")); } @@ -638,14 +678,6 @@ static json oaicompat_completion_params_parse( throw std::runtime_error("top_logprobs requires logprobs to be set to true"); } - // Params supported by OAI but unsupported by llama.cpp - static const std::vector unsupported_params { "tool_choice" }; - for (const auto & param : unsupported_params) { - if (body.contains(param)) { - throw std::runtime_error("Unsupported param: " + param); - } - } - // Copy remaining properties to llama_params // This allows user to use llama.cpp-specific params like "mirostat", ... via OAI endpoint. // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp diff --git a/include/llama.h b/include/llama.h index 3b75e7607..61907ed40 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1199,6 +1199,18 @@ extern "C" { const char * grammar_str, const char * grammar_root); + /// @details Lazy grammar sampler, introduced in https://github.com/ggerganov/llama.cpp/pull/9639 + /// @param trigger_words A list of words that will trigger the grammar sampler. This may be updated to a loose regex syntax (w/ ^) in a near future. + /// @param trigger_tokens A list of tokens that will trigger the grammar sampler. + LLAMA_API struct llama_sampler * llama_sampler_init_grammar_lazy( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens); + /// NOTE: Avoid using on the full vocabulary as searching for repeated tokens can become slow. For example, apply top-k or top-p sampling first. LLAMA_API struct llama_sampler * llama_sampler_init_penalties( int32_t penalty_last_n, // last n tokens to penalize (0 = disable penalty, -1 = context size) diff --git a/models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja b/models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja new file mode 100644 index 000000000..f5baef30b --- /dev/null +++ b/models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja @@ -0,0 +1,202 @@ + +{%- macro json_to_python_type(json_spec) %} +{%- set basic_type_map = { + "string": "str", + "number": "float", + "integer": "int", + "boolean": "bool" +} %} + +{%- if basic_type_map[json_spec.type] is defined %} + {{- basic_type_map[json_spec.type] }} +{%- elif json_spec.type == "array" %} + {{- "List[" + json_to_python_type(json_spec.items) + "]"}} +{%- elif json_spec.type == "object" %} + {{- "Dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}} +{%- elif json_spec.type is iterable %} + {{- "Union[" }} + {%- for t in json_spec.type %} + {{- json_to_python_type({"type": t}) }} + {%- if not loop.last %} + {{- "," }} + {%- endif %} + {%- endfor %} + {{- "]" }} +{%- else %} + {{- "Any" }} +{%- endif %} +{%- endmacro %} + +{%- macro old_tool_parser(tools) %} +{%- for tool in tools %} + {%- if loop.index0 != 0 %} + {{- '\n\n' }} + {%- endif %} + {{- '```python\ndef ' + tool.name + '(' }} + {%- for param_name, param_fields in tool.parameter_definitions|items %} + {%- if loop.index0 != 0 %} + {{- ', '}} + {%- endif %} + {{- param_name + ': ' }} + {%- if not param_fields.required %} + {{- 'Optional[' + param_fields.type + '] = None'}} + {%- else %} + {{- param_fields.type }} + {%- endif %} + {%- endfor %} + {{- ') -> List[Dict]:\n """'}} + {{- tool.description }} + {%- if tool.parameter_definitions|length != 0 %} + {{- '\n\n Args:\n '}} + {%- for param_name, param_fields in tool.parameter_definitions|items %} + {%- if loop.index0 != 0 %} + {{- '\n ' }} + {%- endif %} + {{- param_name + ' ('}} + {%- if not param_fields.required %} + {{- 'Optional[' + param_fields.type + ']'}} + {%- else %} + {{- param_fields.type }} + {%- endif %} + {{- '): ' + param_fields.description }} + {%- endfor %} + {%- endif %} + {{- '\n """\n pass\n```' }} +{%- endfor %} +{%- endmacro %} + +{%- macro new_tool_parser(tools) %} +{%- for tool in tools %} + {%- if loop.index0 != 0 %} + {{- '\n\n'}} + {%- endif %} + {%- if tool.function is defined %} + {%- set tool = tool.function %} + {%- endif %} + {{-'```python +def ' + tool.name + '('}} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {%- if loop.index0 != 0 %} + {{- ', '}} + {%- endif %} + {{-param_name + ": "}} + {%- if not param_name in tool.parameters.required %} + {{-'Optional[' + json_to_python_type(param_fields) + '] = None'}} + {%- else %} + {{- json_to_python_type(param_fields) }} + {%- endif %} + {%- endfor %} + {{- ') -> List[Dict]: + """'}} + {{- tool.description }} + {%- if tool.parameters.properties|length != 0 %} + {{- '\n\n Args:\n '}} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {%- if loop.index0 != 0 %} + {{- '\n ' }} + {%- endif %} + {{- param_name + ' ('}} + {%- if not param_name in tool.parameters.required %} + {{-'Optional[' + json_to_python_type(param_fields) + ']'}} + {%- else %} + {{- json_to_python_type(param_fields) }} + {%- endif %} + {{- '): ' + param_fields.description }} + {%- endfor %} + {%- endif %} + {{- '\n """\n pass\n```' }} +{%- endfor %} +{%- endmacro %} + +{{- bos_token }} +{%- if messages[0]['role'] == 'system' %} + {%- set loop_messages = messages[1:] %} + {%- set system_message = messages[0]['content'] %} +{%- else %} + {%- set loop_messages = messages %} + {%- set system_message = '## Task and Context\nYou help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user\'s needs as best you can, which will be wide-ranging.\n\n## Style Guide\nUnless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.' %} +{%- endif %} +{{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }} +{{- '# Safety Preamble' }} +{{- ' +The instructions in this section override those in the task description and style guide sections. Don\'t answer questions that are harmful or immoral.' }} +{{- ' + +# System Preamble' }} +{{- ' +## Basic Rules' }} +{{- ' +You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\'s requests, you cite your sources in your answers, according to those instructions.' }} +{{- ' + +# User Preamble' }} +{{- ' +' + system_message }} +{{-' + +## Available Tools +Here is a list of tools that you have available to you: + +'}} +{%- set ns = namespace(new_tools=true) %} +{%- for tool in tools %} + {%- if tool.parameter_definitions is defined %} + {%- set ns.new_tools = false %} + {%- endif %} +{%- endfor %} +{%- if ns.new_tools %} + {{- new_tool_parser(tools) }} +{%- else %} + {{- old_tool_parser(tools) }} +{%- endif %} +{{- '<|END_OF_TURN_TOKEN|>'}} +{%- for message in loop_messages %} + {%- set content = message['content'] %} + {%- if message.role == 'user' %} + {{- '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }} + {%- elif message.role == 'system' %} + {{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }} + {%- elif message.role == 'assistant' and message.tool_calls is defined %} + {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }} + {%- if message.content is defined %} + {{- message.content|trim }} + {%- endif %} + {{- '\nAction:\n```json\n[\n' }} + {%- for tool_call in message.tool_calls %} + {%- if tool_call.function is defined %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '{\n'|indent(4, first=true) }} + {{- '"tool_name": "'|indent(8, first=true) + tool_call.name + '",\n' }} + {{- '"parameters": '|indent(8, first=true) }} + {%- if tool_call.arguments is defined and tool_call.arguments|length > 0 %} + {{- tool_call.arguments|tojson(indent=4)|indent(8) }} + {{- '\n' }} + {%- else %} + {{- '{}\n' }} + {%- endif %} + {{- '}'|indent(4, first=true) }} + {%- if not loop.last %} + {{- ',\n' }} + {%- endif %} + {%- endfor %} + {{- "\n]```\n" }} + {%- elif message.role == 'assistant' %} + {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content|trim + '<|END_OF_TURN_TOKEN|>' }} + {%- elif message.role == 'tool' %} + {{- '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>\n' }} + {{- message.content|trim }} + {{- '<|END_OF_TURN_TOKEN|>' }} + {%- endif %} +{%- endfor %} +{{-'<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write \'Action:\' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user\'s last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: +```json +[ + { + "tool_name": title of the tool in the specification, + "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters + } +]```<|END_OF_TURN_TOKEN|>'}} +{%- if add_generation_prompt %} + {{- '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }} +{%- endif %} diff --git a/models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja b/models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja new file mode 100644 index 000000000..149250bd5 --- /dev/null +++ b/models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja @@ -0,0 +1,152 @@ +{%- macro json_to_python_type(json_spec) %} +{%- set basic_type_map = { + "string": "str", + "number": "float", + "integer": "int", + "boolean": "bool" +} %} + +{%- if basic_type_map[json_spec.type] is defined %} + {{- basic_type_map[json_spec.type] }} +{%- elif json_spec.type == "array" %} + {{- "list[" + json_to_python_type(json_spec|items) + "]"}} +{%- elif json_spec.type == "object" %} + {%- if json_spec.additionalProperties is defined %} + {{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}} + {%- else %} + {{- "dict" }} + {%- endif %} +{%- elif json_spec.type is iterable %} + {{- "Union[" }} + {%- for t in json_spec.type %} + {{- json_to_python_type({"type": t}) }} + {%- if not loop.last %} + {{- "," }} + {%- endif %} + {%- endfor %} + {{- "]" }} +{%- else %} + {{- "Any" }} +{%- endif %} +{%- endmacro %} + + +{{- bos_token }} +{{- '<|im_start|>system +' }} +{{- "You are a function calling AI model. You are provided with function signatures within XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: " }} +{%- for tool in tools %} + {%- if tool.function is defined %} + {%- set tool = tool.function %} + {%- endif %} + {{- '{"type": "function", "function": ' }} + {{- '{"name": "' + tool.name + '", ' }} + {{- '"description": "' + tool.name + '(' }} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {{- param_name + ": " + json_to_python_type(param_fields) }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- if tool.return is defined %} + {{- " -> " + json_to_python_type(tool.return) }} + {%- endif %} + {{- " - " + tool.description + " + +" }} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {%- if loop.first %} + {{- " Args: +" }} + {%- endif %} + {{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }} + {%- endfor %} + {%- if tool.return is defined and tool.return.description is defined %} + {{- " + Returns: + " + tool.return.description }} + {%- endif %} + {{- '"' }} + {{- ', "parameters": ' }} + {%- if tool.parameters.properties | length == 0 %} + {{- "{}" }} + {%- else %} + {{- tool.parameters|tojson }} + {%- endif %} + {{- "}" }} + {%- if not loop.last %} + {{- " +" }} + {%- endif %} +{%- endfor %} +{{- " " }} +{{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}} +' }} +{{- "For each function call return a json object with function name and arguments within XML tags as follows: +" }} +{{- " +" }} +{{- '{"name": , "arguments": } +' }} +{{- '<|im_end|> +' }} +{%- for message in messages %} + {%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %} + {{- '<|im_start|>' + message.role + ' +' + message.content + '<|im_end|>' + ' +' }} + {%- elif message.role == "assistant" %} + {{- '<|im_start|>' + message.role }} + {%- for tool_call in message.tool_calls %} + {{- ' + +' }} {%- if tool_call.function is defined %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '{' }} + {{- '"name": "' }} + {{- tool_call.name }} + {{- '"' }} + {{- ', '}} + {%- if tool_call.arguments is defined %} + {{- '"arguments": ' }} + {%- if tool_call.arguments is string %} + {{- tool_call.arguments }} + {%- else %} + {{- tool_call.arguments|tojson }} + {%- endif %} + {%- endif %} + {{- '}' }} + {{- ' +' }} + {%- endfor %} + {{- '<|im_end|> +' }} + {%- elif message.role == "tool" %} + {%- if loop.previtem and loop.previtem.role != "tool" %} + {{- '<|im_start|>tool +' }} + {%- endif %} + {{- ' +' }} + {{- message.content }} + {%- if not loop.last %} + {{- ' + +' }} + {%- else %} + {{- ' +' }} + {%- endif %} + {%- if not loop.last and loop.nextitem.role != "tool" %} + {{- '<|im_end|>' }} + {%- elif loop.last %} + {{- '<|im_end|>' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant +' }} +{%- endif %} diff --git a/models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja b/models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja new file mode 100644 index 000000000..149250bd5 --- /dev/null +++ b/models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja @@ -0,0 +1,152 @@ +{%- macro json_to_python_type(json_spec) %} +{%- set basic_type_map = { + "string": "str", + "number": "float", + "integer": "int", + "boolean": "bool" +} %} + +{%- if basic_type_map[json_spec.type] is defined %} + {{- basic_type_map[json_spec.type] }} +{%- elif json_spec.type == "array" %} + {{- "list[" + json_to_python_type(json_spec|items) + "]"}} +{%- elif json_spec.type == "object" %} + {%- if json_spec.additionalProperties is defined %} + {{- "dict[str, " + json_to_python_type(json_spec.additionalProperties) + ']'}} + {%- else %} + {{- "dict" }} + {%- endif %} +{%- elif json_spec.type is iterable %} + {{- "Union[" }} + {%- for t in json_spec.type %} + {{- json_to_python_type({"type": t}) }} + {%- if not loop.last %} + {{- "," }} + {%- endif %} + {%- endfor %} + {{- "]" }} +{%- else %} + {{- "Any" }} +{%- endif %} +{%- endmacro %} + + +{{- bos_token }} +{{- '<|im_start|>system +' }} +{{- "You are a function calling AI model. You are provided with function signatures within XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: " }} +{%- for tool in tools %} + {%- if tool.function is defined %} + {%- set tool = tool.function %} + {%- endif %} + {{- '{"type": "function", "function": ' }} + {{- '{"name": "' + tool.name + '", ' }} + {{- '"description": "' + tool.name + '(' }} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {{- param_name + ": " + json_to_python_type(param_fields) }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- if tool.return is defined %} + {{- " -> " + json_to_python_type(tool.return) }} + {%- endif %} + {{- " - " + tool.description + " + +" }} + {%- for param_name, param_fields in tool.parameters.properties|items %} + {%- if loop.first %} + {{- " Args: +" }} + {%- endif %} + {{- " " + param_name + "(" + json_to_python_type(param_fields) + "): " + param_fields.description|trim }} + {%- endfor %} + {%- if tool.return is defined and tool.return.description is defined %} + {{- " + Returns: + " + tool.return.description }} + {%- endif %} + {{- '"' }} + {{- ', "parameters": ' }} + {%- if tool.parameters.properties | length == 0 %} + {{- "{}" }} + {%- else %} + {{- tool.parameters|tojson }} + {%- endif %} + {{- "}" }} + {%- if not loop.last %} + {{- " +" }} + {%- endif %} +{%- endfor %} +{{- " " }} +{{- 'Use the following pydantic model json schema for each tool call you will make: {"properties": {"name": {"title": "Name", "type": "string"}, "arguments": {"title": "Arguments", "type": "object"}}, "required": ["name", "arguments"], "title": "FunctionCall", "type": "object"}} +' }} +{{- "For each function call return a json object with function name and arguments within XML tags as follows: +" }} +{{- " +" }} +{{- '{"name": , "arguments": } +' }} +{{- '<|im_end|> +' }} +{%- for message in messages %} + {%- if message.role == "user" or message.role == "system" or (message.role == "assistant" and message.tool_calls is not defined) %} + {{- '<|im_start|>' + message.role + ' +' + message.content + '<|im_end|>' + ' +' }} + {%- elif message.role == "assistant" %} + {{- '<|im_start|>' + message.role }} + {%- for tool_call in message.tool_calls %} + {{- ' + +' }} {%- if tool_call.function is defined %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '{' }} + {{- '"name": "' }} + {{- tool_call.name }} + {{- '"' }} + {{- ', '}} + {%- if tool_call.arguments is defined %} + {{- '"arguments": ' }} + {%- if tool_call.arguments is string %} + {{- tool_call.arguments }} + {%- else %} + {{- tool_call.arguments|tojson }} + {%- endif %} + {%- endif %} + {{- '}' }} + {{- ' +' }} + {%- endfor %} + {{- '<|im_end|> +' }} + {%- elif message.role == "tool" %} + {%- if loop.previtem and loop.previtem.role != "tool" %} + {{- '<|im_start|>tool +' }} + {%- endif %} + {{- ' +' }} + {{- message.content }} + {%- if not loop.last %} + {{- ' + +' }} + {%- else %} + {{- ' +' }} + {%- endif %} + {%- if not loop.last and loop.nextitem.role != "tool" %} + {{- '<|im_end|>' }} + {%- elif loop.last %} + {{- '<|im_end|>' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant +' }} +{%- endif %} diff --git a/models/templates/Qwen-Qwen2.5-7B-Instruct.jinja b/models/templates/Qwen-Qwen2.5-7B-Instruct.jinja new file mode 100644 index 000000000..bdf7919a9 --- /dev/null +++ b/models/templates/Qwen-Qwen2.5-7B-Instruct.jinja @@ -0,0 +1,54 @@ +{%- if tools %} + {{- '<|im_start|>system\n' }} + {%- if messages[0]['role'] == 'system' %} + {{- messages[0]['content'] }} + {%- else %} + {{- 'You are Qwen, created by Alibaba Cloud. You are a helpful assistant.' }} + {%- endif %} + {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within XML tags:\n" }} + {%- for tool in tools %} + {{- "\n" }} + {{- tool | tojson }} + {%- endfor %} + {{- "\n\n\nFor each function call, return a json object with function name and arguments within XML tags:\n\n{\"name\": , \"arguments\": }\n<|im_end|>\n" }} +{%- else %} + {%- if messages[0]['role'] == 'system' %} + {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }} + {%- else %} + {{- '<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a helpful assistant.<|im_end|>\n' }} + {%- endif %} +{%- endif %} +{%- for message in messages %} + {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %} + {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }} + {%- elif message.role == "assistant" %} + {{- '<|im_start|>' + message.role }} + {%- if message.content %} + {{- '\n' + message.content }} + {%- endif %} + {%- for tool_call in message.tool_calls %} + {%- if tool_call.function is defined %} + {%- set tool_call = tool_call.function %} + {%- endif %} + {{- '\n\n{"name": "' }} + {{- tool_call.name }} + {{- '", "arguments": ' }} + {{- tool_call.arguments | tojson }} + {{- '}\n' }} + {%- endfor %} + {{- '<|im_end|>\n' }} + {%- elif message.role == "tool" %} + {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %} + {{- '<|im_start|>user' }} + {%- endif %} + {{- '\n\n' }} + {{- message.content }} + {{- '\n' }} + {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %} + {{- '<|im_end|>\n' }} + {%- endif %} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|im_start|>assistant\n' }} +{%- endif %} diff --git a/models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja b/models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja new file mode 100644 index 000000000..02a1c3bce --- /dev/null +++ b/models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja @@ -0,0 +1 @@ +{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '' in content %}{% set content = content.split('')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|>'}}{% endif %} \ No newline at end of file diff --git a/models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja b/models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja new file mode 100644 index 000000000..2ebfe7c1e --- /dev/null +++ b/models/templates/deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja @@ -0,0 +1,56 @@ +{% if not add_generation_prompt is defined %} +{% set add_generation_prompt = false %} +{% endif %} +{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %} +{%- for message in messages %} +{%- if message['role'] == 'system' %} +{% set ns.system_prompt = message['content'] %} +{%- endif %} +{%- endfor %} +{{bos_token}} +{{ns.system_prompt}} +{%- for message in messages %} +{%- if message['role'] == 'user' %} +{%- set ns.is_tool = false -%} +{{'<|User|>' + message['content']}} +{%- endif %} +{%- if message['role'] == 'assistant' and message['content'] is none %} +{%- set ns.is_tool = false -%} +{%- for tool in message['tool_calls']%} +{%- if not ns.is_first %} +{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}} +{%- set ns.is_first = true -%} +{%- else %} +{{'\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\n' + '```json' + '\n' + tool['function']['arguments'] + '\n' + '```' + '<|tool▁call▁end|>'}} +{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}} +{%- endif %} +{%- endfor %} +{%- endif %} +{%- if message['role'] == 'assistant' and message['content'] is not none %} +{%- if ns.is_tool %} +{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}} +{%- set ns.is_tool = false -%} +{%- else %} +{% set content = message['content'] %} +{% if '' in content %} +{% set content = content.split('')[-1] %} +{% endif %} +{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}} +{%- endif %} +{%- endif %} +{%- if message['role'] == 'tool' %} +{%- set ns.is_tool = true -%} +{%- if ns.is_output_first %} +{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}} +{%- set ns.is_output_first = false %} +{%- else %} +{{'\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}} +{%- endif %} +{%- endif %} +{%- endfor -%} +{% if ns.is_tool %} +{{'<|tool▁outputs▁end|>'}} +{% endif %} +{% if add_generation_prompt and not ns.is_tool %} +{{'<|Assistant|>'}} +{% endif %} \ No newline at end of file diff --git a/models/templates/fireworks-ai-llama-3-firefunction-v2.jinja b/models/templates/fireworks-ai-llama-3-firefunction-v2.jinja new file mode 100644 index 000000000..9b8136df7 --- /dev/null +++ b/models/templates/fireworks-ai-llama-3-firefunction-v2.jinja @@ -0,0 +1,57 @@ +{%- set loop_messages = messages -%} +{%- set message_roles = ['system', 'user', 'assistant', 'tool'] -%} +{%- set system_prompt_suffix -%} +{%- filter trim -%} +In addition to plain text responses, you can chose to call one or more of the provided functions. + +Use the following rule to decide when to call a function: + * if the response can be generated from your internal knowledge (e.g., as in the case of queries like "What is the capital of Poland?"), do so + * if you need external information that can be obtained by calling one or more of the provided functions, generate a function calls + +If you decide to call functions: + * prefix function calls with functools marker (no closing marker required) + * all function calls should be generated in a single JSON list formatted as functools[{"name": [function name], "arguments": [function arguments as JSON]}, ...] + * follow the provided JSON schema. Do not hallucinate arguments or values. Do to blindly copy values from the provided samples + * respect the argument type formatting. E.g., if the type if number and format is float, write value 7 as 7.0 + * make sure you pick the right functions that match the user intent + +Available functions as JSON spec: +{%- endfilter -%} +{%- endset -%} +{%- set system_prompt_suffix = system_prompt_suffix + "\n" + functions -%} +{%- set system_prompt_suffix = system_prompt_suffix + '\nToday is ' + datetime + '.' -%} +{%- set ns = namespace(role='', content='') -%} +{#- Basic consistency checks -#} +{%- if not loop_messages -%} + {{ raise_exception('Expected non-empty messages') }} +{%- endif -%} +{%- for message in loop_messages -%} + {%- set ns.role = message['role'] | lower -%} + {%- if ns.role not in message_roles -%} + {%- set message_roles_string = message_roles | join(', ') -%} + {{ raise_exception('Invalid role ' + message['role'] + '. Only ' + message_roles_string + ' are supported.') }} + {%- endif -%} + {%- set msg_content = message['content'] | default('', true) | trim -%} + {%- if loop.index0 == 0 -%} + {%- if ns.role == 'system' -%} + {%- set system_prompt = '<|start_header_id|>' + 'system' + '<|end_header_id|>\n\n' + message['content'] | trim + '\n' + system_prompt_suffix + '<|eot_id|>' -%} + {%- else -%} + {%- set system_prompt = '<|start_header_id|>' + 'system' + '<|end_header_id|>\n\nYou are a helpful assistant with access to functions.\n' + system_prompt_suffix + '<|eot_id|>' -%} + {%- endif -%} + {%- set ns.content = bos_token + system_prompt -%} + {{- ns.content -}} + {%- endif -%} + {%- if loop.index0 > 0 or ns.role != 'system' -%} + {%- set ns.content = '<|start_header_id|>' + ns.role + '<|end_header_id|>\n\n' + msg_content -%} + {%- if 'tool_calls' in message and message['tool_calls'] -%} + {%- set tool = namespace(calls=[]) -%} + {%- for call in message['tool_calls'] -%} + {%- set tool.calls = tool.calls + ['{"name": "' + call['function']['name'] + '", "arguments": ' + call['function']['arguments'] + '}'] -%} + {%- endfor -%} + {%- set ns.content = ns.content + ' functools[' + tool.calls | join(', ') + ']' -%} + {%- endif -%} + {%- set ns.content = ns.content + '<|eot_id|>' -%} + {{- ns.content -}} + {%- endif -%} +{%- endfor -%} +{{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} diff --git a/models/templates/google-gemma-2-2b-it.jinja b/models/templates/google-gemma-2-2b-it.jinja new file mode 100644 index 000000000..923ec253c --- /dev/null +++ b/models/templates/google-gemma-2-2b-it.jinja @@ -0,0 +1,4 @@ +{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '' + role + ' +' + message['content'] | trim + ' +' }}{% endfor %}{% if add_generation_prompt %}{{'model +'}}{% endif %} \ No newline at end of file diff --git a/models/templates/meetkai-functionary-medium-v3.1.jinja b/models/templates/meetkai-functionary-medium-v3.1.jinja new file mode 100644 index 000000000..29d64a215 --- /dev/null +++ b/models/templates/meetkai-functionary-medium-v3.1.jinja @@ -0,0 +1,58 @@ +{# version=v3-llama3.1 #}{%- if not tools is defined -%} + {%- set tools = none -%} +{%- endif -%} + +{%- set has_code_interpreter = tools | selectattr("type", "equalto", "code_interpreter") | list | length > 0 -%} +{%- if has_code_interpreter -%} + {%- set tools = tools | rejectattr("type", "equalto", "code_interpreter") | list -%} +{%- endif -%} + +{#- System message + builtin tools #} +{{- bos_token + "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if has_code_interpreter %} + {{- "Environment: ipython\n\n" }} +{%- else -%} + {{ "\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n\n" }} +{%- if tools %} + {{- "\nYou have access to the following functions:\n\n" }} + {%- for t in tools %} + {%- if "type" in t -%} + {{ "Use the function '"|safe + t["function"]["name"] + "' to '"|safe + t["function"]["description"] + "'\n"|safe + t["function"] | tojson() }} + {%- else -%} + {{ "Use the function '"|safe + t["name"] + "' to '"|safe + t["description"] + "'\n"|safe + t | tojson() }} + {%- endif -%} + {{- "\n\n" }} + {%- endfor %} + {{- '\nThink very carefully before calling functions.\nIf a you choose to call a function ONLY reply in the following format:\n<{start_tag}={function_name}>{parameters}{end_tag}\nwhere\n\nstart_tag => ` a JSON dict with the function argument name as key and function argument value as value.\nend_tag => ``\n\nHere is an example,\n{"example_name": "example_value"}\n\nReminder:\n- If looking for real time information use relevant functions before falling back to brave_search\n- Function calls MUST follow the specified format, start with \n- Required parameters MUST be specified\n- Only call one function at a time\n- Put the entire function call reply on one line\n\n' -}} +{%- endif %} +{{- "<|eot_id|>" -}} + +{%- for message in messages -%} + {%- if message['role'] == 'user' or message['role'] == 'system' -%} + {{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }} + {%- elif message['role'] == 'tool' -%} + {{ '<|start_header_id|>ipython<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }} + {%- else -%} + {{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}} + {%- if message['content'] -%} + {{ message['content'] }} + {%- endif -%} + {%- if 'tool_calls' in message and message['tool_calls'] -%} + {%- for tool_call in message['tool_calls'] -%} + {%- if tool_call["function"]["name"] == "python" -%} + {{ '<|python_tag|>' + tool_call['function']['arguments'] }} + {%- else -%} + {{ '' + tool_call['function']['arguments'] + '' }} + {%- endif -%} + {%- endfor -%} + {{ '<|eom_id|>' }} + {%- else -%} + {{ '<|eot_id|>' }} + {%- endif -%} + {%- endif -%} +{%- endfor -%} +{%- if add_generation_prompt -%} + {{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif -%} \ No newline at end of file diff --git a/models/templates/meetkai-functionary-medium-v3.2.jinja b/models/templates/meetkai-functionary-medium-v3.2.jinja new file mode 100644 index 000000000..74fd1e7af --- /dev/null +++ b/models/templates/meetkai-functionary-medium-v3.2.jinja @@ -0,0 +1,287 @@ +{# version=v3.llama3 #}{%- macro append_new_param_info(param_declaration, comment_info, examples_info, depth) -%} + {%- set offset = "" -%} + {%- if depth >= 1 -%} + {%- set offset = " " * depth -%} + {%- endif -%} + {%- if comment_info != "<|NONE|>" -%} + {{ "\n" + offset + comment_info }} + {%- if examples_info | length > 0 -%} + {# Append each example info #} + {%- for example in examples_info -%} + {{ "\n" + offset + "// " + example|string|replace("'", '"') }} + {%- endfor -%} + {%- endif -%} + {%- endif -%} + {{ "\n" + offset + param_declaration }} +{%- endmacro -%} + +{%- macro convert_data_type(param_type) -%} + {%- if param_type == "integer" or param_type == "float" -%} + {{ "number" }} + {%- else -%} + {{ param_type }} + {%- endif -%} +{%- endmacro -%} + +{%- macro get_param_type(param) -%} + {%- set param_type = "any" -%} + + {%- if "type" in param -%} + {%- set raw_param_type = param["type"] -%} + {%- if raw_param_type is iterable and raw_param_type is not string -%} + {%- set param_type = raw_param_type | join(" | ") -%} + {%- else -%} + {%- set param_type = raw_param_type -%} + {%- endif -%} + {{ convert_data_type(param_type) }} + {%- elif "oneOf" in param -%} + {%- set one_of_types = param["oneOf"]|selectattr("type", "defined")|list -%} + {%- set one_of_types = one_of_types|map(attribute="type")|unique|list -%} + {{ convert_data_type(one_of_types | join(" | ")) }} + {%- endif -%} +{%- endmacro -%} + +{%- macro get_format_param(param) -%} + {%- if "format" in param -%} + {{ param["format"] }} + {%- elif "oneOf" in param -%} + {%- set formats = [] -%} + {%- for item in param["oneOf"] -%} + {%- if "format" in item -%} + {%- if item["format"] == param["oneOf"][-1]["format"] -%} + {{ item["format"] }} + {%- else -%} + {{ item["format"] + " or "}} + {%- endif -%} + {%- endif -%} + {%- endfor -%} + {%- else -%} + {{ "<|NONE|>" }} + {%- endif -%} +{%- endmacro -%} + +{%- macro get_param_info(param) -%} + {%- set param_type = param.get("type", "any") -%} + {%- set format_param = get_format_param(param) -%} + + {%- if "description" in param or "default" in param or format_param != "<|NONE|>" or param["maximum"] or param["minimum"] or param["maxLength"] or param["minLength"] -%} + {{ "//" }} + {%- if "description" in param -%} + {%- set desc = param["description"] -%} + {%- if not desc.endswith(".") -%} + {%- set desc = desc + "." -%} + {%- endif -%} + {{ " " + desc }} + {%- endif -%} + + {%- if "default" in param -%} + {%- set default_value = param["default"] -%} + {%- if param_type == "string" -%} + {%- set default_value = '"' ~ default_value ~ '"' -%} + {%- endif -%} + {{ " Default=" ~ default_value ~ "." }} + {%- endif -%} + + {%- set format_param = get_format_param(param) -%} + {%- if format_param != "<|NONE|>" -%} + {{ " Format=" ~ format_param }} + {%- endif -%} + + {%- for field, field_name in [("maximum", "Maximum"), ("minimum", "Minimum"), ("maxLength", "Maximum length"), ("minLength", "Minimum length")] -%} + {%- if field in param -%} + {{ " " + field_name ~ "=" ~ param[field] }} + {%- endif -%} + {%- endfor -%} + {%- else -%} + {{ "<|NONE|>"}} + {%- endif -%} +{%- endmacro -%} + +{%- macro get_enum_option_str(enum_options) -%} + {%- for v in enum_options -%} + {%- if v is string -%} + {{ '"' + v + '"' }} + {%- else -%} + {{ v }} + {%- endif -%} + {%- if enum_options|length > 0 and v != enum_options[-1] -%} + {{ " | " }} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} + +{%- macro get_array_typescript(param_name, param_dic, depth) -%} + {%- set offset = '' -%} + {%- if depth >= 1 -%} + {%- set offset = " " * depth -%} + {%- endif -%} + {%- set items_info = param_dic.get('items', {}) -%} + + {%- if items_info|length == 0 -%} + {%- if param_name -%} + {{ "\n" + offset + param_name + ": []" }} + {%- else -%} + {{ "\n" + offset + "[]" }} + {%- endif -%} + {%- else -%} + {%- set array_type = get_param_type(items_info) -%} + {%- if array_type == 'object' -%} + {%- if param_name -%} + {{ "\n" + offset + param_name + ": {" }} + {%- else -%} + {{ "\n" + offset + "{" }} + {%- endif -%} + {{ get_parameter_typescript(items_info.get('properties', {}), items_info.get('required', []), depth + 1) -}} + {{- "\n" + offset + "}[]" }} + {%- elif array_type == 'array' -%} + {%- set item_info = get_array_typescript(None, items_info, depth + 1) -%} + {%- if not param_name -%} + {{ "\n" + item_info + "[]" }} + {%- else -%} + {{ "\n" + offset + param_name + ": " + item_info|trim + "[]" }} + {%- endif -%} + {%- else -%} + {%- if 'enum' in items_info -%} + {%- set item_type = get_enum_option_str(items_info['enum']) -%} + {%- if param_name is none -%} + {{ "(" + item_type + ")[]"}} + {%- else -%} + {{ "\n" + offset + param_name + ": (" + item_type + ")[]" }} + {%- endif -%} + {%- else -%} + {%- if param_name is none -%} + {{ "\n" + array_type + "[]" }} + {%- else -%} + {{ "\n" + offset + param_name + ": " + array_type + "[]," }} + {%- endif -%} + {%- endif -%} + {%- endif -%} + {%- endif -%} +{%- endmacro -%} + +{%- macro get_parameter_typescript(properties, required_params, depth=0) -%} + {%- set res = "" -%} + {%- for param_name, param in properties.items() -%} + {%- if param is mapping -%} + {%- set comment_info = get_param_info(param) -%} + {# Param Examples #} + {%- set examples_info = [] -%} + {%- if "examples" in param -%} + {%- set examples_info = ["Example " + param_name + ":"] -%} + {%- set examples_info = examples_info + param["examples"] -%} + {%- endif -%} + + {# Param Name declaration #} + {%- set param_declaration = param_name -%} + {%- if required_params is iterable and param_name not in required_params -%} + {%- set param_declaration = param_declaration + "?" -%} + {%- endif -%} + + {%- set param_type = get_param_type(param) -%} + + {# Handle indentation based on depth #} + {%- set offset = "" -%} + {%- if depth >= 1 -%} + {%- set offset = " " * depth -%} + {%- endif -%} + + {%- if param_type == "object" -%} + {%- if comment_info != "<|NONE|>" -%} + {{ "\n" + offset + comment_info }} + {%- endif -%} + {%- if examples_info|length > 0 -%} + {%- for example in examples_info -%} + {{ "\n" + offset + "// " + example|string|replace("'", '"') }} + {%- endfor -%} + {%- endif -%} + {%- set param_declaration = param_declaration + ": {" -%} + {{ "\n" + offset + param_declaration -}} + {{- get_parameter_typescript(param.get("properties", {}), param.get("required", []), depth + 1) -}} + {{- "\n" + offset + "}," }} + {%- elif param_type == "array" -%} + {%- set item_info = param.get("items", {}) -%} + {%- if "type" not in item_info -%} + {%- set param_declaration = param_declaration + ": []," -%} + {{ append_new_param_info(param_declaration, comment_info, examples_info, depth) }} + {%- else -%} + {%- if comment_info != "<|NONE|>" -%} + {{ "\n" + offset + comment_info }} + {%- endif -%} + {%- if examples_info|length > 0 -%} + {%- for example in examples_info -%} + {{ "\n" + offset + "// " + example|string|replace("'", '"') }} + {%- endfor -%} + {%- endif -%} + {%- set array_declaration = get_array_typescript(param_declaration, param, depth) -%} + {%- if not array_declaration.endswith(",") -%} + {%- set array_declaration = array_declaration + "," -%} + {%- endif -%} + {{ array_declaration}} + {%- endif -%} + {%- else -%} + {%- if "enum" in param -%} + {%- set param_type = get_enum_option_str(param["enum"]) -%} + {%- endif -%} + {%- if "nullable" in param and param["nullable"] -%} + {%- set param_type = param_type + " | null" -%} + {%- endif -%} + {%- set param_declaration = param_declaration + ": " + param_type + "," -%} + {{ append_new_param_info(param_declaration, comment_info, examples_info, depth) }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} +{%- endmacro -%} + +{%- macro generate_schema_from_functions(functions, namespace='functions') -%} + {{ "// Supported function definitions that should be called when necessary.\n" -}} + {{- "namespace " + namespace + " {\n\n" -}} + + {%- for function in functions -%} + {%- if function.get("function") -%} + {%- set function = function.get("function") -%} + {%- endif -%} + + {%- set function_name = function.get("name") -%} + {%- if function_name -%} + {%- set description = function.get('description', '') -%} + {%- set parameters = function.get('parameters', {}) -%} + {{- "// " + description + "\n" -}} + {{- "type " + function_name -}} + {%- if parameters and parameters.get("properties") -%} + {{- " = (_: {" -}} + {%- set required_params = parameters.get("required", []) -%} + {{ get_parameter_typescript(parameters.get("properties"), required_params, 0) -}} + {{- "\n}) => any;\n\n" }} + {%- else -%} + {{ " = () => any;\n\n" }} + {%- endif -%} + {%- endif -%} + {%- endfor -%} + {{ "} // namespace " + namespace }} +{%- endmacro -%} +{%- if not tools -%} + {%- set tools = [] -%} +{%- endif -%} +{{ bos_token + '<|start_header_id|>system<|end_header_id|>\n\nYou are capable of executing available function(s) if required.\nOnly execute function(s) when absolutely necessary.\nAsk for the required input to:recipient==all\nUse JSON for function arguments.\nRespond in this format:\n>>>${recipient}\n${content}\nAvailable functions:\n' + generate_schema_from_functions(tools) + '<|eot_id|>' -}} +{%- if tools|length > 0 and tools|selectattr("type", "equalto", "code_interpreter")|list|length > 0 -%} + {{ '<|start_header_id|>system<|end_header_id|>\n\nWhen you send a message containing Python code to python, it will be executed in a stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 60.0 seconds. The drive at \'/mnt/data\' can be used to save and persist user files.<|eot_id|>' }} +{%- endif -%} +{%- for message in messages -%} + {%- if message['role'] == 'user' or message['role'] == 'system' -%} + {{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }} + {%- elif message['role'] == 'tool' -%} + {{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' + message['content'] + '<|eot_id|>' }} + {%- else -%} + {{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'}} + {%- if message['content'] -%} + {{ '>>>all\n' + message['content'] }} + {%- endif -%} + {%- if 'tool_calls' in message and message['tool_calls'] -%} + {%- for tool_call in message['tool_calls'] -%} + {{ '>>>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments'] }} + {%- endfor -%} + {%- endif -%} + {{ '<|eot_id|>' }} + {%- endif -%} +{%- endfor -%} +{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n>>>' }}{% endif %} \ No newline at end of file diff --git a/models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja b/models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja new file mode 100644 index 000000000..33089ace1 --- /dev/null +++ b/models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja @@ -0,0 +1,109 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 Jul 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} diff --git a/models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja b/models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja new file mode 100644 index 000000000..1bad6a0f6 --- /dev/null +++ b/models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja @@ -0,0 +1,93 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- if strftime_now is defined %} + {%- set date_string = strftime_now("%d %b %Y") %} + {%- else %} + {%- set date_string = "26 Jul 2024" %} + {%- endif %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {{- "<|eot_id|>" }} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} diff --git a/models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja b/models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja new file mode 100644 index 000000000..33089ace1 --- /dev/null +++ b/models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja @@ -0,0 +1,109 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not date_string is defined %} + {%- set date_string = "26 Jul 2024" %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- This block extracts the system message, so we can slot it into the right place. #} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} +{%- else %} + {%- set system_message = "" %} +{%- endif %} + +{#- System message + builtin tools #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if builtin_tools is defined or tools is not none %} + {{- "Environment: ipython\n" }} +{%- endif %} +{%- if builtin_tools is defined %} + {{- "Tools: " + builtin_tools | reject('equalto', 'code_interpreter') | join(", ") + "\n\n"}} +{%- endif %} +{{- "Cutting Knowledge Date: December 2023\n" }} +{{- "Today Date: " + date_string + "\n\n" }} +{%- if tools is not none and not tools_in_user_message %} + {{- "You have access to the following functions. To call a function, please respond with JSON for a function call." }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- system_message }} +{{- "<|eot_id|>" }} + +{#- Custom tools are passed in a user message with some extra guidance #} +{%- if tools_in_user_message and not tools is none %} + {#- Extract the first user message so we can plug it in here #} + {%- if messages | length != 0 %} + {%- set first_user_message = messages[0]['content']|trim %} + {%- set messages = messages[1:] %} + {%- else %} + {{- raise_exception("Cannot put tools in the first user message when there's no first user message!") }} +{%- endif %} + {{- '<|start_header_id|>user<|end_header_id|>\n\n' -}} + {{- "Given the following functions, please respond with a JSON for a function call " }} + {{- "with its proper arguments that best answers the given prompt.\n\n" }} + {{- 'Respond in the format {"name": function name, "parameters": dictionary of argument name and its value}.' }} + {{- "Do not use variables.\n\n" }} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} + {{- first_user_message + "<|eot_id|>"}} +{%- endif %} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {%- if not message.tool_calls|length == 1 %} + {{- raise_exception("This model only supports single tool-calls at once!") }} + {%- endif %} + {%- set tool_call = message.tool_calls[0].function %} + {%- if builtin_tools is defined and tool_call.name in builtin_tools %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- "<|python_tag|>" + tool_call.name + ".call(" }} + {%- for arg_name, arg_val in tool_call.arguments | items %} + {{- arg_name + '="' + arg_val + '"' }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- ")" }} + {%- else %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"parameters": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- endif %} + {%- if builtin_tools is defined %} + {#- This means we're in ipython mode #} + {{- "<|eom_id|>" }} + {%- else %} + {{- "<|eot_id|>" }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>ipython<|end_header_id|>\n\n" }} + {%- if message.content is mapping or message.content is iterable %} + {{- message.content | tojson }} + {%- else %} + {{- message.content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} diff --git a/models/templates/microsoft-Phi-3.5-mini-instruct.jinja b/models/templates/microsoft-Phi-3.5-mini-instruct.jinja new file mode 100644 index 000000000..d1533d152 --- /dev/null +++ b/models/templates/microsoft-Phi-3.5-mini-instruct.jinja @@ -0,0 +1,8 @@ +{% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|> +' + message['content'] + '<|end|> +'}}{% elif message['role'] == 'user' %}{{'<|user|> +' + message['content'] + '<|end|> +'}}{% elif message['role'] == 'assistant' %}{{'<|assistant|> +' + message['content'] + '<|end|> +'}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|> +' }}{% else %}{{ eos_token }}{% endif %} \ No newline at end of file diff --git a/models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja b/models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja new file mode 100644 index 000000000..9c21a3f13 --- /dev/null +++ b/models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja @@ -0,0 +1,87 @@ +{%- if messages[0]["role"] == "system" %} + {%- set system_message = messages[0]["content"] %} + {%- set loop_messages = messages[1:] %} +{%- else %} + {%- set loop_messages = messages %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} +{%- set user_messages = loop_messages | selectattr("role", "equalto", "user") | list %} + +{#- This block checks for alternating user/assistant messages, skipping tool calling messages #} +{%- set ns = namespace() %} +{%- set ns.index = 0 %} +{%- for message in loop_messages %} + {%- if not (message.role == "tool" or message.role == "tool_results" or (message.tool_calls is defined and message.tool_calls is not none)) %} + {%- if (message["role"] == "user") != (ns.index % 2 == 0) %} + {{- raise_exception("After the optional system message, conversation roles must alternate user/assistant/user/assistant/...") }} + {%- endif %} + {%- set ns.index = ns.index + 1 %} + {%- endif %} +{%- endfor %} + +{{- bos_token }} +{%- for message in loop_messages %} + {%- if message["role"] == "user" %} + {%- if tools is not none and (message == user_messages[-1]) %} + {{- "[AVAILABLE_TOOLS][" }} + {%- for tool in tools %} + {%- set tool = tool.function %} + {{- '{"type": "function", "function": {' }} + {%- for key, val in tool.items() if key != "return" %} + {%- if val is string %} + {{- '"' + key + '": "' + val + '"' }} + {%- else %} + {{- '"' + key + '": ' + val|tojson }} + {%- endif %} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- "}}" }} + {%- if not loop.last %} + {{- ", " }} + {%- else %} + {{- "]" }} + {%- endif %} + {%- endfor %} + {{- "[/AVAILABLE_TOOLS]" }} + {%- endif %} + {%- if loop.last and system_message is defined %} + {{- "[INST]" + system_message + "\n\n" + message["content"] + "[/INST]" }} + {%- else %} + {{- "[INST]" + message["content"] + "[/INST]" }} + {%- endif %} + {%- elif (message.tool_calls is defined and message.tool_calls is not none) %} + {{- "[TOOL_CALLS][" }} + {%- for tool_call in message.tool_calls %} + {%- set out = tool_call.function|tojson %} + {{- out[:-1] }} + {%- if not tool_call.id is defined or tool_call.id|length != 9 %} + {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }} + {%- endif %} + {{- ', "id": "' + tool_call.id + '"}' }} + {%- if not loop.last %} + {{- ", " }} + {%- else %} + {{- "]" + eos_token }} + {%- endif %} + {%- endfor %} + {%- elif message["role"] == "assistant" %} + {{- message["content"] + eos_token}} + {%- elif message["role"] == "tool_results" or message["role"] == "tool" %} + {%- if message.content is defined and message.content.content is defined %} + {%- set content = message.content.content %} + {%- else %} + {%- set content = message.content %} + {%- endif %} + {{- '[TOOL_RESULTS]{"content": ' + content|string + ", " }} + {%- if not message.tool_call_id is defined or message.tool_call_id|length != 9 %} + {{- raise_exception("Tool call IDs should be alphanumeric strings with length 9!") }} + {%- endif %} + {{- '"call_id": "' + message.tool_call_id + '"}[/TOOL_RESULTS]' }} + {%- else %} + {{- raise_exception("Only user and assistant roles are supported, with the exception of an initial optional system message!") }} + {%- endif %} +{%- endfor %} diff --git a/scripts/fetch_server_test_models.py b/scripts/fetch_server_test_models.py new file mode 100755 index 000000000..05690b138 --- /dev/null +++ b/scripts/fetch_server_test_models.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +''' + This script fetches all the models used in the server tests. + + This is useful for slow tests that use larger models, to avoid them timing out on the model downloads. + + It is meant to be run from the root of the repository. + + Example: + python scripts/fetch_server_test_models.py + ( cd examples/server/tests && ./tests.sh -v -x -m slow ) +''' +import ast +import glob +import logging +import os +from typing import Generator +from pydantic import BaseModel +from typing import Optional +import subprocess + + +class HuggingFaceModel(BaseModel): + hf_repo: str + hf_file: Optional[str] = None + + class Config: + frozen = True + + +def collect_hf_model_test_parameters(test_file) -> Generator[HuggingFaceModel, None, None]: + try: + with open(test_file) as f: + tree = ast.parse(f.read()) + except Exception as e: + logging.error(f'collect_hf_model_test_parameters failed on {test_file}: {e}') + return + + for node in ast.walk(tree): + if isinstance(node, ast.FunctionDef): + for dec in node.decorator_list: + if isinstance(dec, ast.Call) and isinstance(dec.func, ast.Attribute) and dec.func.attr == 'parametrize': + param_names = ast.literal_eval(dec.args[0]).split(",") + if "hf_repo" not in param_names: + continue + + raw_param_values = dec.args[1] + if not isinstance(raw_param_values, ast.List): + logging.warning(f'Skipping non-list parametrize entry at {test_file}:{node.lineno}') + continue + + hf_repo_idx = param_names.index("hf_repo") + hf_file_idx = param_names.index("hf_file") if "hf_file" in param_names else None + + for t in raw_param_values.elts: + if not isinstance(t, ast.Tuple): + logging.warning(f'Skipping non-tuple parametrize entry at {test_file}:{node.lineno}') + continue + yield HuggingFaceModel( + hf_repo=ast.literal_eval(t.elts[hf_repo_idx]), + hf_file=ast.literal_eval(t.elts[hf_file_idx]) if hf_file_idx is not None else None) + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') + + models = sorted(list(set([ + model + for test_file in glob.glob('examples/server/tests/unit/test_*.py') + for model in collect_hf_model_test_parameters(test_file) + ])), key=lambda m: (m.hf_repo, m.hf_file)) + + logging.info(f'Found {len(models)} models in parameterized tests:') + for m in models: + logging.info(f' - {m.hf_repo} / {m.hf_file}') + + cli_path = os.environ.get( + 'LLAMA_SERVER_BIN_PATH', + os.path.join( + os.path.dirname(__file__), + '../build/bin/Release/llama-cli.exe' if os.name == 'nt' else '../build/bin/llama-cli')) + + for m in models: + if '<' in m.hf_repo or (m.hf_file is not None and '<' in m.hf_file): + continue + if m.hf_file is not None and '-of-' in m.hf_file: + logging.warning(f'Skipping model at {m.hf_repo} / {m.hf_file} because it is a split file') + continue + logging.info(f'Using llama-cli to ensure model {m.hf_repo}/{m.hf_file} was fetched') + cmd = [ + cli_path, + '-hfr', m.hf_repo, + *([] if m.hf_file is None else ['-hff', m.hf_file]), + '-n', '1', + '-p', 'Hey', + '--no-warmup', + '--log-disable', + '-no-cnv'] + if m.hf_file != 'tinyllamas/stories260K.gguf' and 'Mistral-Nemo' not in m.hf_repo: + cmd.append('-fa') + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + logging.error(f'Failed to fetch model at {m.hf_repo} / {m.hf_file} with command:\n {" ".join(cmd)}') + exit(1) diff --git a/scripts/get_hf_chat_template.py b/scripts/get_chat_template.py old mode 100755 new mode 100644 similarity index 86% rename from scripts/get_hf_chat_template.py rename to scripts/get_chat_template.py index 23bb1de59..e8982d11a --- a/scripts/get_hf_chat_template.py +++ b/scripts/get_chat_template.py @@ -4,12 +4,12 @@ If a model has multiple chat templates, you can specify the variant name. Syntax: - ./scripts/get_hf_chat_template.py model_id [variant] + ./scripts/get_chat_template.py model_id [variant] Examples: - ./scripts/get_hf_chat_template.py NousResearch/Meta-Llama-3-8B-Instruct - ./scripts/get_hf_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use - ./scripts/get_hf_chat_template.py meta-llama/Llama-3.2-3B-Instruct + ./scripts/get_chat_template.py NousResearch/Meta-Llama-3-8B-Instruct + ./scripts/get_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use + ./scripts/get_chat_template.py meta-llama/Llama-3.2-3B-Instruct ''' import json @@ -17,7 +17,7 @@ import re import sys -def get_hf_chat_template(model_id, variant=None): +def get_chat_template(model_id, variant=None): try: # Use huggingface_hub library if available. # Allows access to gated models if the user has access and ran `huggingface-cli login`. @@ -69,7 +69,7 @@ def main(args): model_id = args[0] variant = None if len(args) < 2 else args[1] - template = get_hf_chat_template(model_id, variant) + template = get_chat_template(model_id, variant) sys.stdout.write(template) diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index bebe4e9a3..6be5cbe0e 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -560,7 +560,7 @@ bool llama_grammar_parser::parse(const char * src) { } } } catch (const std::exception & err) { - fprintf(stderr, "%s: error parsing grammar: %s\n", __func__, err.what()); + fprintf(stderr, "%s: error parsing grammar: %s\n\n%s\n", __func__, err.what(), src); rules.clear(); return false; } @@ -960,10 +960,28 @@ struct llama_grammar * llama_grammar_init_impl( // Important: vec_rules has to be moved here, not copied, because stacks contains // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar // then the pointers would be invalidated when the local vec_rules goes out of scope. - return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, }; + return new llama_grammar { + vocab, + std::move(vec_rules), + std::move(stacks), + /* .partial_utf8 = */ {}, + /* .lazy =*/ false, + /* .awaiting_trigger = */ false, + /* .trigger_buffer = */ "", + /* .trigger_tokens = */ {}, + /* .trigger_words = */ {}, + }; } -struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) { +struct llama_grammar * llama_grammar_init_impl( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + bool lazy, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens) { llama_grammar_parser parser; // if there is a grammar, parse it @@ -1035,10 +1053,31 @@ struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, } } while (true); + std::vector vec_trigger_tokens; + std::vector vec_trigger_words; + for (size_t i = 0; i < num_trigger_tokens; i++) { + GGML_ASSERT(trigger_tokens != nullptr); + vec_trigger_tokens.push_back(trigger_tokens[i]); + } + for (size_t i = 0; i < num_trigger_words; i++) { + GGML_ASSERT(trigger_words != nullptr); + vec_trigger_words.push_back(trigger_words[i]); + } + // Important: vec_rules has to be moved here, not copied, because stacks contains // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar // then the pointers would be invalidated when the local vec_rules goes out of scope. - return new llama_grammar { vocab, std::move(vec_rules), std::move(stacks), {}, }; + return new llama_grammar { + vocab, + std::move(vec_rules), + std::move(stacks), + /* .partial_utf8 = */ {}, + /* .lazy = */ lazy, + /* .awaiting_trigger = */ lazy, + /* .trigger_buffer = */ "", + std::move(vec_trigger_tokens), + std::move(vec_trigger_words), + }; } void llama_grammar_free_impl(struct llama_grammar * grammar) { @@ -1055,6 +1094,11 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra grammar.rules, grammar.stacks, grammar.partial_utf8, + grammar.lazy, + grammar.awaiting_trigger, + grammar.trigger_buffer, + grammar.trigger_tokens, + grammar.trigger_words, }; // redirect elements in stacks to point to new rules @@ -1076,6 +1120,10 @@ struct llama_grammar * llama_grammar_clone_impl(const struct llama_grammar & gra void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_data_array * cur_p) { GGML_ASSERT(grammar.vocab != nullptr); + if (grammar.awaiting_trigger) { + return; + } + bool allow_eog = false; for (const auto & stack : grammar.stacks) { if (stack.empty()) { @@ -1115,6 +1163,34 @@ void llama_grammar_apply_impl(const struct llama_grammar & grammar, llama_token_ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token) { GGML_ASSERT(grammar.vocab != nullptr); + const auto & piece = grammar.vocab->token_to_piece(token); + + if (grammar.awaiting_trigger) { + if (std::find(grammar.trigger_tokens.begin(), grammar.trigger_tokens.end(), token) != grammar.trigger_tokens.end()) { + grammar.awaiting_trigger = false; + grammar.trigger_buffer.clear(); + llama_grammar_accept_str(grammar, piece); + LLAMA_LOG_DEBUG("Grammar triggered on token %u (`%s`)", token, piece.c_str()); + return; + } else { + // TODO: consider a smarter incremental substring search algorithm (store last position to search from). + grammar.trigger_buffer += piece; + for (const auto & word : grammar.trigger_words) { + auto pos = grammar.trigger_buffer.find(word); + if (pos != std::string::npos) { + grammar.awaiting_trigger = false; + auto constrained_str = grammar.trigger_buffer.substr(pos); + grammar.trigger_buffer.clear(); + llama_grammar_accept_str(grammar, constrained_str); + LLAMA_LOG_DEBUG("Grammar triggered on word `%s`", word.c_str()); + return; + } + } + LLAMA_LOG_DEBUG("Grammar still awaiting trigger after token %d (`%s`) (buffer: `%s`)\n", token, piece.c_str(), grammar.trigger_buffer.c_str()); + return; + } + } + if (grammar.vocab->is_eog(token)) { for (const auto & stack : grammar.stacks) { if (stack.empty()) { @@ -1124,8 +1200,10 @@ void llama_grammar_accept_impl(struct llama_grammar & grammar, llama_token token GGML_ABORT("fatal error"); } - const std::string & piece = grammar.vocab->token_to_piece(token); + llama_grammar_accept_str(grammar, piece); +} +void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string & piece) { // Note terminating 0 in decoded string const auto decoded = decode_utf8(piece, grammar.partial_utf8); const auto & code_points = decoded.first; diff --git a/src/llama-grammar.h b/src/llama-grammar.h index f8b40c651..252d54d4c 100644 --- a/src/llama-grammar.h +++ b/src/llama-grammar.h @@ -114,6 +114,15 @@ struct llama_grammar { // buffer for partially generated UTF-8 sequence from accepted tokens llama_partial_utf8 partial_utf8; + + // lazy grammars wait for trigger words or tokens before constraining the sampling. + // we still ahve trigger_tokens for non-lazy grammars to force printing of special trigger tokens. + // (useful e.g. for tool_choice=required) + bool lazy = false; + bool awaiting_trigger = false; // Initialized to true for lazy grammars only + std::string trigger_buffer; // Output buffered by lazy grammar. Will be cleared once trigger is found. + std::vector trigger_tokens; // Tokens that trigger a lazy grammar, or tokens to force printing of (even if special). + std::vector trigger_words; }; // @@ -127,7 +136,15 @@ struct llama_grammar * llama_grammar_init_impl( size_t n_rules, size_t start_rule_index); -struct llama_grammar * llama_grammar_init_impl(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root); +struct llama_grammar * llama_grammar_init_impl( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + bool lazy, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens); void llama_grammar_free_impl(struct llama_grammar * grammar); @@ -141,3 +158,7 @@ void llama_grammar_apply_impl( void llama_grammar_accept_impl( struct llama_grammar & grammar, llama_token token); + +void llama_grammar_accept_str( + struct llama_grammar & grammar, + const std::string & piece); diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp index b3a12386e..26974f539 100644 --- a/src/llama-sampling.cpp +++ b/src/llama-sampling.cpp @@ -1433,13 +1433,30 @@ static void llama_sampler_grammar_apply(struct llama_sampler * smpl, llama_token } } +// Fwd declare to break reset --> init_impl --> llama_sampler_grammar_i --> reset cycle. +static struct llama_sampler * llama_sampler_init_grammar_impl( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + bool lazy, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens); + static void llama_sampler_grammar_reset(struct llama_sampler * smpl) { auto * ctx = (llama_sampler_grammar *) smpl->ctx; if (!ctx->grammar) { return; } - auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str()); + std::vector trigger_words; + for (auto & word : ctx->grammar->trigger_words) { + trigger_words.push_back(word.c_str()); + } + auto * grammar_new = llama_grammar_init_impl(ctx->grammar->vocab, ctx->grammar_str.c_str(), ctx->grammar_root.c_str(), + ctx->grammar->lazy, trigger_words.data(), trigger_words.size(), + ctx->grammar->trigger_tokens.data(), ctx->grammar->trigger_tokens.size()); llama_grammar_free_impl(ctx->grammar); ctx->grammar = grammar_new; @@ -1448,7 +1465,7 @@ static void llama_sampler_grammar_reset(struct llama_sampler * smpl) { static struct llama_sampler * llama_sampler_grammar_clone(const struct llama_sampler * smpl) { const auto * ctx = (const llama_sampler_grammar *) smpl->ctx; - auto * result = llama_sampler_init_grammar(ctx->vocab, nullptr, nullptr); + auto * result = llama_sampler_init_grammar_impl(ctx->vocab, nullptr, nullptr, false, nullptr, 0, nullptr, 0); // copy the state { @@ -1484,7 +1501,15 @@ static struct llama_sampler_i llama_sampler_grammar_i = { /* .free = */ llama_sampler_grammar_free, }; -struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * vocab, const char * grammar_str, const char * grammar_root) { +static struct llama_sampler * llama_sampler_init_grammar_impl( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + bool lazy, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens) { auto * ctx = new llama_sampler_grammar; if (grammar_str != nullptr && grammar_str[0] != '\0') { @@ -1492,7 +1517,7 @@ struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * voc /* .vocab = */ vocab, /* .grammar_str = */ grammar_str, /* .grammar_root = */ grammar_root, - /* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root), + /* .grammar = */ llama_grammar_init_impl(vocab, grammar_str, grammar_root, lazy, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens), }; } else { *ctx = { @@ -1509,6 +1534,24 @@ struct llama_sampler * llama_sampler_init_grammar(const struct llama_vocab * voc }; } +struct llama_sampler * llama_sampler_init_grammar( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root) { + return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ false, nullptr, 0, nullptr, 0); +} + +struct llama_sampler * llama_sampler_init_grammar_lazy( + const struct llama_vocab * vocab, + const char * grammar_str, + const char * grammar_root, + const char ** trigger_words, + size_t num_trigger_words, + const llama_token * trigger_tokens, + size_t num_trigger_tokens) { + return llama_sampler_init_grammar_impl(vocab, grammar_str, grammar_root, /* lazy= */ true, trigger_words, num_trigger_words, trigger_tokens, num_trigger_tokens); +} + // penalties struct llama_sampler_penalties { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3fa43c295..40f83ff0d 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -93,6 +93,7 @@ if (NOT WIN32) llama_target_and_test(test-grammar-parser.cpp) llama_target_and_test(test-grammar-integration.cpp) llama_target_and_test(test-llama-grammar.cpp) + llama_target_and_test(test-chat.cpp) # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8 if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64") llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..) diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 190643136..4563f9dcb 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -328,7 +328,7 @@ int main(void) { // test llama_chat_format_single for system message printf("\n\n=== llama_chat_format_single (system message) ===\n\n"); std::vector chat2; - common_chat_msg sys_msg{"system", "You are a helpful assistant"}; + common_chat_msg sys_msg{"system", "You are a helpful assistant", {}}; auto fmt_sys = [&](std::string tmpl_str) { minja::chat_template tmpl(tmpl_str, "", ""); @@ -352,10 +352,10 @@ int main(void) { // test llama_chat_format_single for user message printf("\n\n=== llama_chat_format_single (user message) ===\n\n"); - chat2.push_back({"system", "You are a helpful assistant"}); - chat2.push_back({"user", "Hello"}); - chat2.push_back({"assistant", "I am assistant"}); - common_chat_msg new_msg{"user", "How are you"}; + chat2.push_back({"system", "You are a helpful assistant", {}}); + chat2.push_back({"user", "Hello", {}}); + chat2.push_back({"assistant", "I am assistant", {}}); + common_chat_msg new_msg{"user", "How are you", {}}; auto fmt_single = [&](std::string tmpl_str) { minja::chat_template tmpl(tmpl_str, "", ""); diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp new file mode 100644 index 000000000..ccc65d87a --- /dev/null +++ b/tests/test-chat.cpp @@ -0,0 +1,521 @@ +// Tests chat handling, including grammar generation and parsing for tool calling, for various templates. +// +// Also acts as a CLI to generate a Markdown summary of the formats of Jinja templates, +// e.g. given Minja (http://github.com/google/minja) checked out in parent dir: +// +// cmake -B build && cmake --build build --parallel && ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null +// +#include +#include +#include +#include + +#include "chat-template.hpp" +#include "chat.hpp" +#include "llama-grammar.h" +#include "unicode.h" + +using json = nlohmann::ordered_json; + +static common_chat_msg msg_from_json(const json & message) { + common_chat_msg ret{ + "assistant", + "", + {}, + }; + if (message.contains("content") && !message.at("content").is_null()) { + ret.content = message.at("content").get(); + } + auto has_tool_calls = message.contains("tool_calls"); + if (has_tool_calls) { + for (const auto & tc : message.at("tool_calls")) { + const auto & arguments = tc.at("function").at("arguments"); + ret.tool_calls.push_back({ + tc.at("function").at("name").get(), + arguments.is_string() ? arguments.get() : arguments.dump(), + tc.contains("id") ? tc.at("id").get() : "", + }); + } + } + return ret; +} + +template static void assert_equals(const T & expected, const T & actual) { + if (expected != actual) { + std::cerr << "Expected: " << expected << std::endl; + std::cerr << "Actual: " << actual << std::endl; + std::cerr << std::flush; + throw std::runtime_error("Test failed"); + } +} + +static std::string read_file(const std::string & path) { + std::cerr << "# Reading: " << path << std::endl << std::flush; + std::ifstream fs(path, std::ios_base::binary); + if (!fs.is_open()) { + fs = std::ifstream("../" + path, std::ios_base::binary); + if (!fs.is_open()) { + throw std::runtime_error("Failed to open file: " + path); + } + } + fs.seekg(0, std::ios_base::end); + auto size = fs.tellg(); + fs.seekg(0); + std::string out; + out.resize(static_cast(size)); + fs.read(&out[0], static_cast(size)); + return out; +} + +static std::unique_ptr build_grammar(const std::string & grammar_str) { + return std::unique_ptr( + llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root", false, nullptr, 0, nullptr, 0)); +} + +// TODO: extract to common helper (copied from test-grammar-integration.cpp) +static bool match_string(const std::string & input, llama_grammar * grammar) { + const auto cpts = unicode_cpts_from_utf8(input); + + auto & stacks_cur = llama_grammar_get_stacks(grammar); + + for (const auto & cpt : cpts) { + llama_grammar_accept(grammar, cpt); + + if (stacks_cur.empty()) { + // no stacks means that the grammar failed to match at this point + return false; + } + } + + for (const auto & stack : stacks_cur) { + if (stack.empty()) { + // An empty stack means that the grammar has been completed + return true; + } + } + + return false; +} + +// Dumps `{"a": 1}` as `"{\"a\": 1}"`, unlike nlohmann::json::dump which would dump it as `"{\"a\":1}"`. +static std::string dump(const json & j) { + return minja::Value(j).dump(-1, /* to_json= */ true); +} + +static void assert_msg_equals(const common_chat_msg & expected, const common_chat_msg & actual) { + assert_equals(expected.role, actual.role); + assert_equals(expected.content, actual.content); + assert_equals(expected.tool_calls.size(), actual.tool_calls.size()); + for (size_t i = 0; i < expected.tool_calls.size(); i++) { + const auto & expected_tool_call = expected.tool_calls[i]; + const auto & actual_tool_call = actual.tool_calls[i]; + assert_equals(expected_tool_call.name, actual_tool_call.name); + assert_equals(dump(json::parse(expected_tool_call.arguments)), dump(json::parse(actual_tool_call.arguments))); + assert_equals(expected_tool_call.id, actual_tool_call.id); + } +} + +const auto special_function_tool = json::parse(R"({ + "type": "function", + "function": { + "name": "special_function", + "description": "I'm special", + "parameters": { + "type": "object", + "properties": { + "arg1": { + "type": "integer", + "description": "The arg." + } + }, + "required": ["arg1"] + } + } +})"); +const auto python_tool = json::parse(R"({ + "type": "function", + "function": { + "name": "python", + "description": "an ipython interpreter", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Python code to execute." + } + }, + "required": ["code"] + } + } +})"); +const auto code_interpreter_tool = json::parse(R"({ + "type": "function", + "function": { + "name": "code_interpreter", + "description": "an ipython interpreter", + "parameters": { + "type": "object", + "properties": { + "code": { + "type": "string", + "description": "Python code to execute." + } + }, + "required": ["code"] + } + } +})"); +const json tools = { special_function_tool, python_tool }; +const json llama_3_1_tools = { special_function_tool, code_interpreter_tool }; + +struct delta_data { + std::string delta; + std::string grammar; + common_chat_format format; +}; + +static delta_data init_delta(const common_chat_template & tmpl, const std::vector & end_tokens, + const json & user_message, const json & delta_message, const json & tools, + const json & tool_choice) { + common_chat_inputs inputs; + inputs.parallel_tool_calls = true; + inputs.messages = json::array(); + inputs.messages.push_back(user_message); + inputs.tools = tools; + inputs.tool_choice = tool_choice; + auto params_prefix = common_chat_params_init(tmpl, inputs); + + inputs.messages.push_back(delta_message); + inputs.add_generation_prompt = false; + auto params_full = common_chat_params_init(tmpl, inputs); + + std::string prefix = params_prefix.prompt; + std::string full = params_full.prompt; + + // Check full starts with prefix + if (full.find(prefix) != 0) { + fprintf(stderr, "Full:\n%s\n\nPrefix:\n%s\n\n", full.c_str(), prefix.c_str()); + throw std::runtime_error("Full message does not start with prefix"); + } + + if (full == prefix) { + throw std::runtime_error("Full message is the same as the prefix"); + } + + auto delta = full.substr(prefix.size()); + + // Strip end tokens + for (const auto & end_token : end_tokens) { + // rfind to find the last occurrence + auto pos = delta.rfind(end_token); + if (pos != std::string::npos) { + delta = delta.substr(0, pos); + break; + } + } + return { delta, params_full.grammar, params_full.format }; +} + +/* + Applies the template to 1 user message w/ add_generation_prompt=true, then w/ the test message w/ add_generation_prompt=false, + gets the diff, removes any end tokens and parses the result w/ the grammar, checking that + the parsed message is the same as the test_message +*/ +static void test_template(const common_chat_template & tmpl, const std::vector & end_tokens, + const json & test_message, const json & tools = {}, const std::string & expected_delta = "", + bool skip_grammar_test = false, bool skip_parser_test = false) { + common_chat_msg expected_msg = msg_from_json(test_message); + + auto user_message = json{ + { "role", "user" }, + { "content", "Hello, world!" } + }; + + for (const auto & tool_choice : json({ "auto", "required" })) { + auto data = init_delta(tmpl, end_tokens, user_message, test_message, tools, tool_choice); + if (!expected_delta.empty()) { + assert_equals(expected_delta, data.delta); + } + + if (!skip_parser_test) { + const auto msg = common_chat_parse(data.delta, data.format); + assert_msg_equals(expected_msg, msg); + } + + if (!expected_msg.tool_calls.empty()) { + GGML_ASSERT(!data.grammar.empty()); + } + if (!data.grammar.empty()) { + auto grammar = build_grammar(data.grammar); + if (!grammar) { + throw std::runtime_error("Failed to build grammar"); + } + // TODO: exercice lazy grammars + triggers here, instead of skipping the test + if (!skip_grammar_test) { + if (!match_string(data.delta, grammar.get())) { + throw std::runtime_error("Failed to match delta against grammar:\n\n" + data.delta + + "\n\nGrammar: " + data.grammar); + } + } + } + } +} + +static void test_template_output_parsers() { + auto text_message = json{ + { "role", "assistant" }, + { "content", "Hello, world!" }, + }; + auto tool_call_message = json{ + { "role", "assistant" }, + { "content", {} }, + { "tool_calls", json{ { + { "type", "function" }, + { "function", { { "name", "special_function" }, { "arguments", "{\"arg1\": 1}" } } }, + } } } + }; + auto tool_call_message_with_id = json::parse(tool_call_message.dump()); + tool_call_message_with_id["tool_calls"][0]["id"] = "123456789"; + + auto python_tool_call_message = json{ + { "role", "assistant" }, + { "content", {} }, + { "tool_calls", json{ { + { "type", "function" }, + { "function", + { + { "name", "python" }, + { "arguments", + { + { "code", "print('hey')" }, + } }, + } }, + } } } + }; + auto code_interpreter_tool_call_message = json{ + { "role", "assistant" }, + { "content", {} }, + { "tool_calls", json{ { + { "type", "function" }, + { "function", + { + { "name", "code_interpreter" }, + { "arguments", + { + { "code", "print('hey')" }, + } }, + } }, + } } } + }; + + common_chat_inputs inputs_no_tools; + inputs_no_tools.messages = { + { { "role", "user" }, { "content", "Hey" } } + }; + + common_chat_inputs inputs_tools = inputs_no_tools; + inputs_tools.tools = json::array(); + inputs_tools.tools.push_back(special_function_tool); + + common_chat_inputs inputs_tools_builtin = inputs_no_tools; + inputs_tools_builtin.tools = json::array(); + inputs_tools_builtin.tools.push_back(python_tool); + + { + const common_chat_template tmpl(read_file("models/templates/google-gemma-2-2b-it.jinja"), "", ""); + std::vector end_tokens{ "" }; + + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_params_init(tmpl, inputs_no_tools).format); + assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_GENERIC, + common_chat_params_init( + common_chat_template(read_file("models/templates/microsoft-Phi-3.5-mini-instruct.jinja"), + "", ""), + inputs_tools) + .format); + + // Generic tool calls doesn't generate / parse content-only messages symmetrically. + + assert_msg_equals(msg_from_json(text_message), + common_chat_parse("{\n" + " \"response\": \"Hello, world!\"\n" + "}", + common_chat_params_init(tmpl, inputs_tools).format)); + test_template(tmpl, end_tokens, tool_call_message_with_id, tools, + "{\n" + " \"tool_calls\": [\n" + " {\n" + " \"name\": \"special_function\",\n" + " \"arguments\": {\n" + " \"arg1\": 1\n" + " },\n" + " \"id\": \"123456789\"\n" + " }\n" + " ]\n" + "}"); + } + { + const common_chat_template tmpl(read_file("models/templates/mistralai-Mistral-Nemo-Instruct-2407.jinja"), "", + ""); + std::vector end_tokens{ "
" }; + + assert_equals(COMMON_CHAT_FORMAT_MISTRAL_NEMO, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template( + tmpl, end_tokens, tool_call_message_with_id, tools, + "[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]", + /* skip_grammar_test= */ true); + } + { + const common_chat_template tmpl( + read_file("models/templates/NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja"), "", ""); + std::vector end_tokens{ "<|im_end|>" }; + + assert_equals(COMMON_CHAT_FORMAT_HERMES_2_PRO, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals( + COMMON_CHAT_FORMAT_HERMES_2_PRO, + common_chat_params_init( + common_chat_template(read_file("models/templates/NousResearch-Hermes-3-Llama-3.1-8B-tool_use.jinja"), + "", ""), + inputs_tools) + .format); + assert_equals( + COMMON_CHAT_FORMAT_HERMES_2_PRO, + common_chat_params_init( + common_chat_template(read_file("models/templates/Qwen-Qwen2.5-7B-Instruct.jinja"), "", ""), + inputs_tools) + .format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + "\n" + "{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}\n" + ""); + test_template(tmpl, end_tokens, python_tool_call_message, tools, + "\n" + "{\"name\": \"python\", \"arguments\": {\"code\": \"print('hey')\"}}\n" + ""); + } + { + const common_chat_template tmpl(read_file("models/templates/meta-llama-Llama-3.1-8B-Instruct.jinja"), "", + ""); + std::vector end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + + assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); + assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, + common_chat_params_init(tmpl, inputs_tools_builtin).format); + assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X_WITH_BUILTIN_TOOLS, + common_chat_params_init( + common_chat_template(read_file("models/templates/meta-llama-Llama-3.3-70B-Instruct.jinja"), + "", ""), + inputs_tools_builtin) + .format); + + // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, code_interpreter_tool_call_message, llama_3_1_tools, + "<|python_tag|>code_interpreter.call(code=\"print('hey')\")"); + test_template(tmpl, end_tokens, python_tool_call_message, tools, + "<|python_tag|>python.call(code=\"print('hey')\")"); + test_template(tmpl, end_tokens, tool_call_message, tools, + "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}"); + } + { + const common_chat_template tmpl(read_file("models/templates/meta-llama-Llama-3.2-3B-Instruct.jinja"), "", + ""); + std::vector end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + + assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}"); + } + { + const common_chat_template tmpl(read_file("models/templates/meetkai-functionary-medium-v3.1.jinja"), "", + ""); + std::vector end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + + assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, + common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + "{\"arg1\": 1}"); + } + { + const common_chat_template tmpl(read_file("models/templates/meetkai-functionary-medium-v3.2.jinja"), "", + ""); + std::vector end_tokens{ "<|eom_id|>", "<|eot_id|>" }; + + assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_no_tools).format); + assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, {}, + "all\n" + "Hello, world!", + /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + "special_function\n" + "{\"arg1\": 1}"); + } + { + const common_chat_template tmpl(read_file("models/templates/fireworks-ai-llama-3-firefunction-v2.jinja"), "", + ""); + std::vector end_tokens{ "<|eot_id|>" }; + + assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + " functools[{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}]"); + } + { + const common_chat_template tmpl(read_file("models/templates/deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja"), + "", ""); + std::vector end_tokens{ "<|end▁of▁sentence|>" }; + + assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, tool_call_message, tools, + "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" + "```json\n" + "{\"arg1\": 1}\n" + "```<|tool▁call▁end|>"); + } +} + +int main(int argc, char ** argv) { +#ifndef _WIN32 + if (argc > 1) { + common_chat_inputs inputs; + inputs.messages = { + { { "role", "user" }, { "content", "Hey" } } + }; + inputs.tools = json::array({ special_function_tool }); + + std::cout << "| Template | Format |\n"; + std::cout << "|----------|--------|\n"; + + for (int i = 1; i < argc; i++) { + std::string path = argv[i]; + if (path.rfind(".jinja") != path.size() - 6) { + std::cerr << "Skipping non-jinja file: " << path << std::endl; + continue; + } + common_chat_template tmpl(read_file(path), "", ""); + auto parts = string_split(path, "/"); + auto name = parts[parts.size() - 1]; + std::cout << "| " << name << " | " << common_chat_format_name(common_chat_params_init(tmpl, inputs).format) + << " |\n"; + } + } else +#endif + { + test_template_output_parsers(); + std::cout << "\n[chat] All tests passed!" << std::endl; + } + return 0; +} diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index e1bdbb925..288e08f51 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -13,7 +13,7 @@ using json = nlohmann::ordered_json; static llama_grammar * build_grammar(const std::string & grammar_str) { - return llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root"); + return llama_grammar_init_impl(nullptr, grammar_str.c_str(), "root", false, nullptr, 0, nullptr, 0); } static bool test_build_grammar_fails(const std::string & grammar_str) { From 553f1e46e9e864514bbd6bf4009146db66be0541 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Thu, 30 Jan 2025 22:01:06 +0000 Subject: [PATCH 202/279] `ci`: ccache for all github worfklows (#11516) --- .github/workflows/build.yml | 136 ++++++++++++++++++++++++++++++++++-- 1 file changed, 130 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7eaf9c460..c02dd6a81 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,6 +43,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-cmake-arm64 + evict-old-files: 1d + - name: Dependencies id: depends continue-on-error: true @@ -108,6 +114,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-cmake-x64 + evict-old-files: 1d + - name: Dependencies id: depends continue-on-error: true @@ -172,6 +184,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-cpu-cmake + evict-old-files: 1d + - name: Dependencies id: depends run: | @@ -249,6 +267,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-latest-cmake-sanitizer-${{ matrix.sanitizer }} + evict-old-files: 1d + - name: Dependencies id: depends run: | @@ -296,6 +320,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-latest-cmake-rpc + evict-old-files: 1d + - name: Dependencies id: depends run: | @@ -325,6 +355,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-22-cmake-vulkan + evict-old-files: 1d + - name: Dependencies id: depends run: | @@ -364,6 +400,12 @@ jobs: sudo apt-get update sudo apt-get install -y build-essential git cmake rocblas-dev hipblas-dev + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-22-cmake-hip + evict-old-files: 1d + - name: Build with native CMake HIP support id: cmake_build run: | @@ -396,6 +438,12 @@ jobs: apt-get update apt-get install -y build-essential git cmake libcurl4-openssl-dev + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-22-cmake-musa + evict-old-files: 1d + - name: Build with native CMake MUSA support id: cmake_build run: | @@ -435,6 +483,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-22-cmake-sycl + evict-old-files: 1d + - name: Build id: cmake_build run: | @@ -479,6 +533,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-22-cmake-sycl-fp16 + evict-old-files: 1d + - name: Build id: cmake_build run: | @@ -500,6 +560,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-cmake-ios + evict-old-files: 1d + - name: Dependencies id: depends continue-on-error: true @@ -531,6 +597,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-cmake-tvos + evict-old-files: 1d + - name: Dependencies id: depends continue-on-error: true @@ -566,6 +638,12 @@ jobs: id: checkout uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: macOS-latest-swift + evict-old-files: 1d + - name: Dependencies id: depends continue-on-error: true @@ -607,6 +685,12 @@ jobs: - name: Clone uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: windows-msys2 + evict-old-files: 1d + - name: Setup ${{ matrix.sys }} uses: msys2/setup-msys2@v2 with: @@ -675,6 +759,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: windows-latest-cmake-${{ matrix.build }} + evict-old-files: 1d + - name: Clone Kompute submodule id: clone_kompute if: ${{ matrix.build == 'kompute-x64' }} @@ -813,6 +903,8 @@ jobs: - name: Clone id: checkout uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Install dependencies env: @@ -821,6 +913,12 @@ jobs: apt update apt install -y cmake build-essential ninja-build libgomp1 git + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ubuntu-latest-cmake-cuda + evict-old-files: 1d + - name: Build with CMake run: | cmake -S . -B build -G Ninja \ @@ -847,6 +945,12 @@ jobs: with: fetch-depth: 0 + - name: Install ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: ${{ github.job }}-${{ matrix.cuda }}-${{ matrix.build }} + evict-old-files: 1d + - name: Install Cuda Toolkit 11.7 if: ${{ matrix.cuda == '11.7' }} run: | @@ -903,11 +1007,6 @@ jobs: echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 echo "CUDA_PATH_V12_4=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.4" | Out-File -FilePath $env:GITHUB_ENV -Append -Encoding utf8 - - name: Install ccache - uses: hendrikmuhs/ccache-action@v1.2 - with: - key: ${{ github.job }}-${{ matrix.cuda }}-${{ matrix.build }} - - name: Install Ninja id: install_ninja run: | @@ -987,6 +1086,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: windows-latest-cmake-sycl + evict-old-files: 1d + - name: Install run: | scripts/install-oneapi.bat $WINDOWS_BASEKIT_URL $WINDOWS_DPCPP_MKL @@ -1066,9 +1171,10 @@ jobs: & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version - name: Install ccache - uses: hendrikmuhs/ccache-action@v1.2 + uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ${{ github.job }} + evict-old-files: 1d - name: Build id: cmake_build @@ -1098,6 +1204,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: windows-latest-cmake-hip-release + evict-old-files: 1d + - name: Install id: depends run: | @@ -1195,6 +1307,12 @@ jobs: - name: Clone uses: actions/checkout@v4 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: android-build + evict-old-files: 1d + - name: Set up JDK uses: actions/setup-java@v3 with: @@ -1232,6 +1350,12 @@ jobs: with: fetch-depth: 0 + - name: ccache + uses: hendrikmuhs/ccache-action@v1.2.16 + with: + key: release + evict-old-files: 1d + - name: Determine tag name id: tag shell: bash From a2df2787b32e0846205f7151dfad88ceab592beb Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Fri, 31 Jan 2025 06:04:53 +0100 Subject: [PATCH 203/279] server : update help metrics processing/deferred (#11512) This commit updates the help text for the metrics `requests_processing` and `requests_deferred` to be more grammatically correct. Currently the returned metrics look like this: ```console \# HELP llamacpp:requests_processing Number of request processing. \# TYPE llamacpp:requests_processing gauge llamacpp:requests_processing 0 \# HELP llamacpp:requests_deferred Number of request deferred. \# TYPE llamacpp:requests_deferred gauge llamacpp:requests_deferred 0 ``` With this commit, the metrics will look like this: ```console \# HELP llamacpp:requests_processing Number of requests processing. \# TYPE llamacpp:requests_processing gauge llamacpp:requests_processing 0 \# HELP llamacpp:requests_deferred Number of requests deferred. \# TYPE llamacpp:requests_deferred gauge llamacpp:requests_deferred 0 ``` This is also consistent with the description of the metrics in the server examples [README.md](https://github.com/ggerganov/llama.cpp/tree/master/examples/server#get-metrics-prometheus-compatible-metrics-exporter). --- examples/server/server.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index d1ea343dd..1ebcb5085 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3633,11 +3633,11 @@ int main(int argc, char ** argv) { {"value", (uint64_t) res_metrics->kv_cache_tokens_count} },{ {"name", "requests_processing"}, - {"help", "Number of request processing."}, + {"help", "Number of requests processing."}, {"value", (uint64_t) res_metrics->n_processing_slots} },{ {"name", "requests_deferred"}, - {"help", "Number of request deferred."}, + {"help", "Number of requests deferred."}, {"value", (uint64_t) res_metrics->n_tasks_deferred} }}} }; From 1bd3047a939e561adfb3c7dd2e17c4cc7a4e4e6f Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Fri, 31 Jan 2025 00:58:55 -0500 Subject: [PATCH 204/279] common: Add missing va_end (#11529) The va_copy man page states that va_end must be called to revert whatever the copy did. For some implementaions, not calling va_end has no consequences. For others it could leak memory. --- common/log.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/common/log.cpp b/common/log.cpp index 04c7c0ed1..0b8994ae1 100644 --- a/common/log.cpp +++ b/common/log.cpp @@ -206,6 +206,7 @@ public: vsnprintf(entry.msg.data(), entry.msg.size(), ss.str().c_str(), args_copy); } #endif + va_end(args_copy); } entry.level = level; From 4a2b196d03d52da31236390e9f5694a88d43d11d Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 31 Jan 2025 08:12:40 +0000 Subject: [PATCH 205/279] server : fix --jinja when there's no tools or schema (typo was forcing JSON) (#11531) --- examples/server/tests/unit/test_chat_completion.py | 7 +++---- examples/server/utils.hpp | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/examples/server/tests/unit/test_chat_completion.py b/examples/server/tests/unit/test_chat_completion.py index 0be04bab5..f5d8b0572 100644 --- a/examples/server/tests/unit/test_chat_completion.py +++ b/examples/server/tests/unit/test_chat_completion.py @@ -14,11 +14,10 @@ def create_server(): "model,system_prompt,user_prompt,max_tokens,re_content,n_prompt,n_predicted,finish_reason,jinja,chat_template", [ (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", False, None), + (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, None), + (None, "Book", "What is the best book", 8, "^ blue", 23, 8, "length", True, "This is not a chat template, it is"), ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", False, None), - # TODO: fix testing of non-tool jinja mode - # (None, "Book", "What is the best book", 8, "(Suddenly)+|\\{ \" Sarax.", 77, 8, "length", True, None), - # (None, "Book", "What is the best book", 8, "I want to play with", 23, 8, "length", True, "This is not a chat template, it is"), - # ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None), + ("codellama70b", "You are a coding assistant.", "Write the fibonacci function in c++.", 128, "(Aside|she|felter|alonger)+", 104, 64, "length", True, None), ] ) def test_chat_completion(model, system_prompt, user_prompt, max_tokens, re_content, n_prompt, n_predicted, finish_reason, jinja, chat_template): diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 3d2c04666..70bd6a42c 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -642,7 +642,7 @@ static json oaicompat_completion_params_parse( inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); inputs.stream = stream; // TODO: support mixing schema w/ tools beyond generic format. - inputs.json_schema = json_value(llama_params, "json_schema", json::object()); + inputs.json_schema = json_value(llama_params, "json_schema", json()); auto chat_params = common_chat_params_init(tmpl, inputs); llama_params["chat_format"] = static_cast(chat_params.format); From 5783575c9d99c4d9370495800663aa5397ceb0be Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 31 Jan 2025 08:24:29 +0000 Subject: [PATCH 206/279] Fix chatml fallback for unsupported builtin templates (when --jinja not enabled) (#11533) --- examples/server/server.cpp | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 1ebcb5085..e7daceef1 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1858,7 +1858,12 @@ struct server_context { llama_init_dft.context.reset(); } - chat_templates = common_chat_templates_from_model(model, params_base.chat_template); + if (params_base.chat_template.empty() && !validate_builtin_chat_template(params.use_jinja)) { + LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__); + chat_templates = common_chat_templates_from_model(model, "chatml"); + } else { + chat_templates = common_chat_templates_from_model(model, params_base.chat_template); + } GGML_ASSERT(chat_templates.template_default.get() != nullptr); return true; @@ -4435,14 +4440,6 @@ int main(int argc, char ** argv) { LOG_INF("%s: model loaded\n", __func__); - // if a custom chat template is not supplied, we will use the one that comes with the model (if any) - if (params.chat_template.empty()) { - if (!ctx_server.validate_builtin_chat_template(params.use_jinja)) { - LOG_WRN("%s: The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses\n", __func__); - params.chat_template = "chatml"; - } - } - // print sample chat example to make it clear which template is used LOG_INF("%s: chat template, chat_template: %s, example_format: '%s'\n", __func__, ctx_server.chat_templates.template_default->source().c_str(), From b1bcd309fc8ac929cbd4a6207b3a19886bda031f Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 31 Jan 2025 13:48:31 +0000 Subject: [PATCH 207/279] fix stop regression (#11543) --- examples/server/utils.hpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 70bd6a42c..94e189457 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -484,13 +484,14 @@ static bool ends_with(const std::string & str, const std::string & suffix) { static size_t find_partial_stop_string(const std::string &stop, const std::string &text) { if (!text.empty() && !stop.empty()) { - auto it = std::find(stop.rbegin(), stop.rend(), text.back()); - while (it != stop.rend()) { - size_t length = std::distance(it, stop.rend()); - if (text.length() >= length && 0 == text.compare(text.length() - length, length, stop)) { - return text.length() - length; + const char text_last_char = text.back(); + for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) { + if (stop[char_index] == text_last_char) { + const std::string current_partial = stop.substr(0, char_index + 1); + if (ends_with(text, current_partial)) { + return text.size() - char_index - 1; + } } - it = std::find(std::next(it), stop.rend(), text.back()); } } From a83f528688324a21484a97af1d1be5e1bc8d4c8e Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 31 Jan 2025 14:15:25 +0000 Subject: [PATCH 208/279] `tool-call`: fix llama 3.x and functionary 3.2, play nice w/ pydantic_ai package, update readme (#11539) * An empty tool_call_id is better than none! * sync: minja (tool call name optional https://github.com/google/minja/pull/36) * Force-disable parallel_tool_calls if template doesn't support it * More debug logs * Llama 3.x tools: accept / trigger on more varied spaced outputs * Fix empty content for functionary v3.2 tool call * Add proper tool call docs to server README * readme: function calling *is* supported now * Apply suggestions from code review Co-authored-by: Georgi Gerganov --------- Co-authored-by: Georgi Gerganov --- common/chat-template.hpp | 4 +- common/chat.cpp | 21 +++++-- examples/server/README.md | 110 ++++++++++++++++++++++++++++++++++--- examples/server/server.cpp | 5 +- examples/server/utils.hpp | 4 ++ 5 files changed, 129 insertions(+), 15 deletions(-) diff --git a/common/chat-template.hpp b/common/chat-template.hpp index 75ba5d938..58e119a3b 100644 --- a/common/chat-template.hpp +++ b/common/chat-template.hpp @@ -283,10 +283,12 @@ class chat_template { message["role"] = "user"; auto obj = json { {"tool_response", { - {"tool", message.at("name")}, {"content", message.at("content")}, }}, }; + if (message.contains("name")) { + obj["tool_response"]["name"] = message.at("name"); + } if (message.contains("tool_call_id")) { obj["tool_response"]["tool_call_id"] = message.at("tool_call_id"); } diff --git a/common/chat.cpp b/common/chat.cpp index d9a654892..58db12af9 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -384,14 +384,19 @@ static common_chat_params common_chat_params_init_llama_3_1_tool_calls(const com tool_rules.push_back( builder.add_rule( name + "-call", - "\"{\" ( \"\\\"type\\\": \\\"function\\\", \" | space ) " + "\"{\" space " + "( \"\\\"type\\\":\" space \"\\\"function\\\",\" space )? " "\"\\\"name\\\": \\\"" + name + "\\\", \\\"parameters\\\": \" " + builder.add_schema(name + "-args", parameters) + " \"}\"")); data.grammar_triggers.push_back({"{\"name\": \"" + name + "\"", /* .at_start = */ true}); }); data.grammar_triggers.push_back({"{\"name\":", /* .at_start = */ true}); + data.grammar_triggers.push_back({"{\n \"name\":", /* .at_start = */ true}); + data.grammar_triggers.push_back({"{\n \"name\":", /* .at_start = */ true}); data.grammar_triggers.push_back({"{\"type\": \"function\"", /* .at_start = */ true}); + data.grammar_triggers.push_back({"{\n \"type\": \"function\"", /* .at_start = */ true}); + data.grammar_triggers.push_back({"{\n \"type\": \"function\"", /* .at_start = */ true}); if (!builtin_tools.empty()) { data.grammar_triggers.push_back({"<|python_tag|>", /* .at_start = */ false}); } @@ -586,9 +591,17 @@ static common_chat_msg common_chat_parse_functionary_v3_2(const std::string & in } } // TODO: tighten & simplify. - auto res = parse_json_tool_calls(std::string(it, end), std::nullopt, function_regex, close_regex); - res.content = content; - return res; + try { + auto res = parse_json_tool_calls(std::string(it, end), std::nullopt, function_regex, close_regex); + res.content = content + res.content; + return res; + } catch (const std::exception & e) { + LOG_ERR("Failed to parse functionary v3.2 input: %s\n", e.what()); + common_chat_msg res; + res.role = "assistant"; + res.content = input; + return res; + } } static common_chat_params common_chat_params_init_functionary_v3_1_llama_3_1(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { diff --git a/examples/server/README.md b/examples/server/README.md index ce1ae8858..276b43013 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -126,7 +126,7 @@ The project is under active development, and we are [looking for feedback and co | `--grammar GRAMMAR` | BNF-like grammar to constrain generations (see samples in grammars/ dir) (default: '') | | `--grammar-file FNAME` | file to read grammar from | | `-j, --json-schema SCHEMA` | JSON schema to constrain generations (https://json-schema.org/), e.g. `{}` for any JSON object
For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead | -| `--jinja` | Enable experimental Jinja templating engine (needed for tool use) | +| `--jinja` | Enable experimental Jinja templating engine (required for tool use) | **Example-specific params** @@ -1069,7 +1069,7 @@ Given a ChatML-formatted json description in `messages`, it returns the predicte *Options:* -See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). While some OpenAI-specific features such as function calling aren't supported, llama.cpp `/completion`-specific features such as `mirostat` are supported. +See [OpenAI Chat Completions API documentation](https://platform.openai.com/docs/api-reference/chat). llama.cpp `/completion`-specific features such as `mirostat` are also supported. The `response_format` parameter supports both plain JSON output (e.g. `{"type": "json_object"}`) and schema-constrained JSON (e.g. `{"type": "json_object", "schema": {"type": "string", "minLength": 10, "maxLength": 100}}` or `{"type": "json_schema", "schema": {"properties": { "name": { "title": "Name", "type": "string" }, "date": { "title": "Date", "type": "string" }, "participants": { "items": {"type: "string" }, "title": "Participants", "type": "string" } } } }`), similar to other OpenAI-inspired API providers. @@ -1117,17 +1117,111 @@ curl http://localhost:8080/v1/chat/completions \ }' ``` -... and even tool usage (needs `--jinja` flag): +*Tool call support* + +[Function calling](https://platform.openai.com/docs/guides/function-calling) is supported for all models (see https://github.com/ggerganov/llama.cpp/pull/9639): + +- Requires `--jinja` flag +- Native tool call formats supported: + - Llama 3.1 / 3.3 (including builtin tools support - tool names for `wolfram_alpha`, `web_search` / `brave_search`, `code_interpreter`), Llama 3.2 + - Functionary v3.1 / v3.2 + - Hermes 2/3, Qwen 2.5 + - Mistral Nemo + - Firefunction v2 + - DeepSeek R1 (WIP / seems reluctant to call any tools?) + +
+ Show some common templates and which format handler they use + + | Template | Format | + |----------|--------| + | CohereForAI-c4ai-command-r-plus-default.jinja | generic tool calls | + | CohereForAI-c4ai-command-r-plus-rag.jinja | generic tool calls | + | CohereForAI-c4ai-command-r-plus-tool_use.jinja | generic tool calls | + | MiniMaxAI-MiniMax-Text-01.jinja | generic tool calls | + | NexaAIDev-Octopus-v2.jinja | generic tool calls | + | NousResearch-Hermes-2-Pro-Llama-3-8B-default.jinja | generic tool calls | + | NousResearch-Hermes-2-Pro-Llama-3-8B-tool_use.jinja | hermes 2 pro tool calls | + | NousResearch-Hermes-2-Pro-Mistral-7B-default.jinja | generic tool calls | + | NousResearch-Hermes-2-Pro-Mistral-7B-tool_use.jinja | hermes 2 pro tool calls | + | NousResearch-Hermes-3-Llama-3.1-70B-default.jinja | generic tool calls | + | NousResearch-Hermes-3-Llama-3.1-70B-tool_use.jinja | hermes 2 pro tool calls | + | OrionStarAI-Orion-14B-Chat.jinja | generic tool calls | + | Qwen-QwQ-32B-Preview.jinja | hermes 2 pro tool calls | + | Qwen-Qwen2-7B-Instruct.jinja | generic tool calls | + | Qwen-Qwen2-VL-7B-Instruct.jinja | generic tool calls | + | Qwen-Qwen2.5-7B-Instruct.jinja | hermes 2 pro tool calls | + | Qwen-Qwen2.5-Math-7B-Instruct.jinja | hermes 2 pro tool calls | + | TheBloke-FusionNet_34Bx2_MoE-AWQ.jinja | generic tool calls | + | abacusai-Fewshot-Metamath-OrcaVicuna-Mistral.jinja | generic tool calls | + | bofenghuang-vigogne-2-70b-chat.jinja | generic tool calls | + | databricks-dbrx-instruct.jinja | generic tool calls | + | deepseek-ai-DeepSeek-Coder-V2-Instruct.jinja | generic tool calls | + | deepseek-ai-DeepSeek-R1-Distill-Llama-8B.jinja | deepseek r1 tool calls | + | deepseek-ai-DeepSeek-R1-Distill-Qwen-32B.jinja | deepseek r1 tool calls | + | deepseek-ai-DeepSeek-R1-Distill-Qwen-7B.jinja | deepseek r1 tool calls | + | deepseek-ai-DeepSeek-V2.5.jinja | deepseek r1 tool calls | + | deepseek-ai-deepseek-coder-33b-instruct.jinja | generic tool calls | + | google-gemma-2-2b-it.jinja | generic tool calls | + | google-gemma-7b-it.jinja | generic tool calls | + | indischepartij-MiniCPM-3B-OpenHermes-2.5-v2.jinja | generic tool calls | + | mattshumer-Reflection-Llama-3.1-70B.jinja | generic tool calls | + | meetkai-functionary-medium-v3.2.jinja | functionary v3.2 tool calls | + | meta-llama-Llama-3.1-8B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) | + | meta-llama-Llama-3.2-3B-Instruct.jinja | llama 3.x tool calls | + | meta-llama-Llama-3.3-70B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) | + | meta-llama-Meta-Llama-3.1-8B-Instruct.jinja | llama 3.x tool calls (w/ builtin tools) | + | microsoft-Phi-3-medium-4k-instruct.jinja | generic tool calls | + | microsoft-Phi-3-mini-4k-instruct.jinja | generic tool calls | + | microsoft-Phi-3-small-8k-instruct.jinja | generic tool calls | + | microsoft-Phi-3.5-mini-instruct.jinja | generic tool calls | + | microsoft-Phi-3.5-vision-instruct.jinja | generic tool calls | + | mistralai-Mistral-7B-Instruct-v0.2.jinja | generic tool calls | + | mistralai-Mistral-Large-Instruct-2407.jinja | mistral nemo tool calls | + | mistralai-Mistral-Large-Instruct-2411.jinja | generic tool calls | + | mistralai-Mistral-Nemo-Instruct-2407.jinja | mistral nemo tool calls | + | mistralai-Mixtral-8x7B-Instruct-v0.1.jinja | generic tool calls | + | mlabonne-AlphaMonarch-7B.jinja | generic tool calls | + | nvidia-Llama-3.1-Nemotron-70B-Instruct-HF.jinja | llama 3.x tool calls (w/ builtin tools) | + | openchat-openchat-3.5-0106.jinja | generic tool calls | + | teknium-OpenHermes-2.5-Mistral-7B.jinja | generic tool calls | + + This table can be generated with: + + ```bash + ./build/bin/test-chat ../minja/build/tests/*.jinja 2>/dev/null + +
+ +- Generic tool call is supported when the template isn't recognized by native format handlers (you'll see `Chat format: Generic` in the logs). + - Use `--chat-template-file` to override the template when appropriate (see examples below) + - Generic support may consume more tokens and be less efficient than a model's native format. + +- Run with: ```shell - llama-server --jinja -hfr lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF -hff Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf -fa + # Native support: + llama-server --jinja -fa -hf bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M + llama-server --jinja -fa -hf bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M + llama-server --jinja -fa -hf bartowski/Llama-3.2-3B-Instruct-GGUF:Q6_K + llama-server --jinja -fa -hf bartowski/functionary-small-v3.2-GGUF:Q4_K_M + llama-server --jinja -fa -hf bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M \ + --chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B ) - # https://huggingface.co/meetkai/functionary-medium-v3.2 - llama-server --jinja -hfr bartowski/functionary-medium-v3.2-GGUF -hff functionary-medium-v3.2-IQ4_XS.gguf -fa + # Native support requires the right template for these GGUFs: + llama-server --jinja -fa -hf bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M \ + --chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use ) + llama-server --jinja -fa -hf bartowski/firefunction-v2-GGUF -hff firefunction-v2-IQ1_M.gguf \ + --chat-template-file <( python scripts/get_chat_template.py fireworks-ai/firellama-3-firefunction-v2 ) - # https://huggingface.co/meetkai/functionary-medium-v3.1 - llama-server --jinja -hfr meetkai/functionary-medium-v3.1-GGUF -hff functionary-medium-llama-3.1.Q4_0.gguf -fa + # Generic format support + llama-server --jinja -fa -hf bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M + llama-server --jinja -fa -hf bartowski/gemma-2-2b-it-GGUF:Q4_K_M + ``` +- Test in CLI: + + ```bash curl http://localhost:8080/v1/chat/completions -d '{ "model": "gpt-3.5-turbo", "tools": [ diff --git a/examples/server/server.cpp b/examples/server/server.cpp index e7daceef1..3451e96a2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -345,7 +345,7 @@ struct server_task { auto it = data.find("chat_format"); if (it != data.end()) { params.oaicompat_chat_format = static_cast(it->get()); - LOG_DBG("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_format).c_str()); + LOG_INF("Chat format: %s\n", common_chat_format_name(params.oaicompat_chat_format).c_str()); } else { params.oaicompat_chat_format = defaults.oaicompat_chat_format; } @@ -697,6 +697,7 @@ struct server_task_result_cmpl_final : server_task_result { std::string finish_reason = "length"; common_chat_msg message; if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { + LOG_DBG("Parsing chat message: %s\n", content.c_str()); message = common_chat_parse(content, oaicompat_chat_format); finish_reason = message.tool_calls.empty() ? "stop" : "tool_calls"; } else { @@ -713,7 +714,7 @@ struct server_task_result_cmpl_final : server_task_result { {"name", tc.name}, {"arguments", tc.arguments}, }}, - {"id", tc.id.empty() ? json() : json(tc.id)}, + {"id", tc.id}, }); } } diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 94e189457..bfe623c4c 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -641,6 +641,10 @@ static json oaicompat_completion_params_parse( inputs.tools = tools; inputs.tool_choice = tool_choice; inputs.parallel_tool_calls = json_value(body, "parallel_tool_calls", false); + if (inputs.parallel_tool_calls && !tmpl.original_caps().supports_parallel_tool_calls) { + LOG_DBG("Disabling parallel_tool_calls because the template does not support it\n"); + inputs.parallel_tool_calls = false; + } inputs.stream = stream; // TODO: support mixing schema w/ tools beyond generic format. inputs.json_schema = json_value(llama_params, "json_schema", json()); From aa6fb1321333fae8853d0cdc26bcb5d438e650a1 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Fri, 31 Jan 2025 17:12:40 +0000 Subject: [PATCH 209/279] `ci`: use sccache on windows instead of ccache (#11545) * Use sccache on ci for windows * Detect sccache in cmake --- .github/workflows/build.yml | 6 ++++++ ggml/src/CMakeLists.txt | 12 +++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c02dd6a81..022b9bd03 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -689,6 +689,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-msys2 + variant: sccache evict-old-files: 1d - name: Setup ${{ matrix.sys }} @@ -763,6 +764,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-${{ matrix.build }} + variant: sccache evict-old-files: 1d - name: Clone Kompute submodule @@ -949,6 +951,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ${{ github.job }}-${{ matrix.cuda }}-${{ matrix.build }} + variant: sccache evict-old-files: 1d - name: Install Cuda Toolkit 11.7 @@ -1090,6 +1093,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-sycl + variant: sccache evict-old-files: 1d - name: Install @@ -1174,6 +1178,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ${{ github.job }} + variant: sccache evict-old-files: 1d - name: Build @@ -1208,6 +1213,7 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-hip-release + variant: sccache evict-old-files: 1d - name: Install diff --git a/ggml/src/CMakeLists.txt b/ggml/src/CMakeLists.txt index 566709135..0002ac18a 100644 --- a/ggml/src/CMakeLists.txt +++ b/ggml/src/CMakeLists.txt @@ -93,12 +93,18 @@ endif() if (GGML_CCACHE) find_program(GGML_CCACHE_FOUND ccache) + find_program(GGML_SCCACHE_FOUND sccache) - if (GGML_CCACHE_FOUND) + if (GGML_CCACHE_FOUND OR GGML_SCCACHE_FOUND) + if(GGML_CCACHE_FOUND) + set(GGML_CCACHE_VARIANT ccache) + else() + set(GGML_CCACHE_VARIANT sccache) + endif() # TODO: should not be set globally - set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${GGML_CCACHE_VARIANT}") set(ENV{CCACHE_SLOPPINESS} time_macros) - message(STATUS "ccache found, compilation results will be cached. Disable with GGML_CCACHE=OFF.") + message(STATUS "${GGML_CCACHE_VARIANT} found, compilation results will be cached. Disable with GGML_CCACHE=OFF.") else() message(STATUS "Warning: ccache not found - consider installing it for faster compilation or disable this warning with GGML_CCACHE=OFF") endif () From 5bbc7362cb93265f4c853fd89800a6255cc26985 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sat, 1 Feb 2025 00:01:20 +0000 Subject: [PATCH 210/279] ci: simplify cmake build commands (#11548) --- .github/workflows/build.yml | 86 +++++++++++++------------------------ 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 022b9bd03..03eabbbe5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -59,16 +59,14 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake .. \ + cmake -B build \ -DCMAKE_BUILD_RPATH="@loader_path" \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DGGML_RPC=ON - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) - name: Test id: cmake_test @@ -199,13 +197,11 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. \ + cmake -B build \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_CURL=ON \ -DGGML_RPC=ON - cmake --build . --config Release -j $(nproc) + cmake --build build --config Release -j $(nproc) - name: Test id: cmake_test @@ -283,26 +279,22 @@ jobs: id: cmake_build if: ${{ matrix.sanitizer != 'THREAD' }} run: | - mkdir build - cd build - cmake .. \ + cmake -B build \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} - cmake --build . --config ${{ matrix.build_type }} -j $(nproc) + cmake --build build --config ${{ matrix.build_type }} -j $(nproc) - name: Build (no OpenMP) id: cmake_build_no_openmp if: ${{ matrix.sanitizer == 'THREAD' }} run: | - mkdir build - cd build - cmake .. \ + cmake -B build \ -DLLAMA_FATAL_WARNINGS=ON \ -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ -DGGML_OPENMP=OFF - cmake --build . --config ${{ matrix.build_type }} -j $(nproc) + cmake --build build --config ${{ matrix.build_type }} -j $(nproc) - name: Test id: cmake_test @@ -335,11 +327,9 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. \ + cmake -B build \ -DGGML_RPC=ON - cmake --build . --config Release -j $(nproc) + cmake --build build --config Release -j $(nproc) - name: Test id: cmake_test @@ -372,11 +362,9 @@ jobs: - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. \ + cmake -B build \ -DGGML_VULKAN=ON - cmake --build . --config Release -j $(nproc) + cmake --build build --config Release -j $(nproc) - name: Test id: cmake_test @@ -493,13 +481,11 @@ jobs: id: cmake_build run: | source /opt/intel/oneapi/setvars.sh - mkdir build - cd build - cmake .. \ + cmake -B build \ -DGGML_SYCL=ON \ -DCMAKE_C_COMPILER=icx \ -DCMAKE_CXX_COMPILER=icpx - cmake --build . --config Release -j $(nproc) + cmake --build build --config Release -j $(nproc) ubuntu-22-cmake-sycl-fp16: runs-on: ubuntu-22.04 @@ -543,14 +529,12 @@ jobs: id: cmake_build run: | source /opt/intel/oneapi/setvars.sh - mkdir build - cd build - cmake .. \ + cmake -B build \ -DGGML_SYCL=ON \ -DCMAKE_C_COMPILER=icx \ -DCMAKE_CXX_COMPILER=icpx \ -DGGML_SYCL_F16=ON - cmake --build . --config Release -j $(nproc) + cmake --build build --config Release -j $(nproc) macOS-latest-cmake-ios: runs-on: macos-latest @@ -576,9 +560,7 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake -G Xcode .. \ + cmake -B build -G Xcode \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DLLAMA_BUILD_EXAMPLES=OFF \ @@ -587,7 +569,7 @@ jobs: -DCMAKE_SYSTEM_NAME=iOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-cmake-tvos: runs-on: macos-latest @@ -613,9 +595,7 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake -G Xcode .. \ + cmake -B build -G Xcode \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DLLAMA_BUILD_EXAMPLES=OFF \ @@ -624,7 +604,7 @@ jobs: -DCMAKE_SYSTEM_NAME=tvOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO macOS-latest-swift: runs-on: macos-latest @@ -654,17 +634,15 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake -G Xcode .. \ + cmake -B build -G Xcode \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DLLAMA_BUILD_EXAMPLES=OFF \ -DLLAMA_BUILD_TESTS=OFF \ -DLLAMA_BUILD_SERVER=OFF \ -DCMAKE_OSX_ARCHITECTURES="arm64;x86_64" - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) - sudo cmake --install . --config Release + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) + sudo cmake --install build --config Release - name: xcodebuild for swift package id: xcodebuild @@ -806,21 +784,19 @@ jobs: run: | git clone https://github.com/KhronosGroup/OpenCL-Headers cd OpenCL-Headers - mkdir build && cd build - cmake .. ` + cmake -B build ` -DBUILD_TESTING=OFF ` -DOPENCL_HEADERS_BUILD_TESTING=OFF ` -DOPENCL_HEADERS_BUILD_CXX_TESTS=OFF ` -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" - cmake --build . --target install + cmake --build build --target install git clone https://github.com/KhronosGroup/OpenCL-ICD-Loader cd OpenCL-ICD-Loader - mkdir build-arm64-release && cd build-arm64-release - cmake .. ` + cmake -B build-arm64-release ` -A arm64 ` -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" ` -DCMAKE_INSTALL_PREFIX="$env:RUNNER_TEMP/opencl-arm64-release" - cmake --build . --target install --config release + cmake --build build-arm64-release --target install --config release - name: Build id: cmake_build @@ -1284,9 +1260,7 @@ jobs: id: cmake_build run: | sysctl -a - mkdir build - cd build - cmake -G Xcode .. \ + cmake -B build -G Xcode \ -DGGML_METAL_USE_BF16=ON \ -DGGML_METAL_EMBED_LIBRARY=ON \ -DLLAMA_BUILD_EXAMPLES=OFF \ @@ -1295,8 +1269,8 @@ jobs: -DCMAKE_SYSTEM_NAME=iOS \ -DCMAKE_OSX_DEPLOYMENT_TARGET=14.0 \ -DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=ggml - cmake --build . --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO - sudo cmake --install . --config Release + cmake --build build --config Release -j $(sysctl -n hw.logicalcpu) -- CODE_SIGNING_ALLOWED=NO + sudo cmake --install build --config Release - name: xcodebuild for swift package id: xcodebuild From ecef206ccb186a1cde8dd2523b1da3e12f593f9e Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Sat, 1 Feb 2025 11:30:54 +0100 Subject: [PATCH 211/279] Implement s3:// protocol (#11511) For those that want to pull from s3 Signed-off-by: Eric Curtin --- examples/run/run.cpp | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index 9cecae48c..cf61f4add 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -65,6 +65,13 @@ static int printe(const char * fmt, ...) { return ret; } +static std::string strftime_fmt(const char * fmt, const std::tm & tm) { + std::ostringstream oss; + oss << std::put_time(&tm, fmt); + + return oss.str(); +} + class Opt { public: int init(int argc, const char ** argv) { @@ -698,6 +705,39 @@ class LlamaData { return download(url, bn, true); } + int s3_dl(const std::string & model, const std::string & bn) { + const size_t slash_pos = model.find('/'); + if (slash_pos == std::string::npos) { + return 1; + } + + const std::string bucket = model.substr(0, slash_pos); + const std::string key = model.substr(slash_pos + 1); + const char * access_key = std::getenv("AWS_ACCESS_KEY_ID"); + const char * secret_key = std::getenv("AWS_SECRET_ACCESS_KEY"); + if (!access_key || !secret_key) { + printe("AWS credentials not found in environment\n"); + return 1; + } + + // Generate AWS Signature Version 4 headers + // (Implementation requires HMAC-SHA256 and date handling) + // Get current timestamp + const time_t now = time(nullptr); + const tm tm = *gmtime(&now); + const std::string date = strftime_fmt("%Y%m%d", tm); + const std::string datetime = strftime_fmt("%Y%m%dT%H%M%SZ", tm); + const std::vector headers = { + "Authorization: AWS4-HMAC-SHA256 Credential=" + std::string(access_key) + "/" + date + + "/us-east-1/s3/aws4_request", + "x-amz-content-sha256: UNSIGNED-PAYLOAD", "x-amz-date: " + datetime + }; + + const std::string url = "https://" + bucket + ".s3.amazonaws.com/" + key; + + return download(url, bn, true, headers); + } + std::string basename(const std::string & path) { const size_t pos = path.find_last_of("/\\"); if (pos == std::string::npos) { @@ -738,6 +778,9 @@ class LlamaData { rm_until_substring(model_, "github:"); rm_until_substring(model_, "://"); ret = github_dl(model_, bn); + } else if (string_starts_with(model_, "s3://")) { + rm_until_substring(model_, "://"); + ret = s3_dl(model_, bn); } else { // ollama:// or nothing rm_until_substring(model_, "ollama.com/library/"); rm_until_substring(model_, "://"); From cfd74c86dbaa95ed30aa6b30e14d8801eb975d63 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sat, 1 Feb 2025 12:24:51 +0000 Subject: [PATCH 212/279] `sync`: minja (https://github.com/google/minja/commit/418a2364b56dc9be4ed9a1a2b0fb16fb53a7a22e) (#11574) --- common/minja.hpp | 49 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/common/minja.hpp b/common/minja.hpp index f0e80fd7c..bcb5a0824 100644 --- a/common/minja.hpp +++ b/common/minja.hpp @@ -693,7 +693,7 @@ enum SpaceHandling { Keep, Strip, StripSpaces, StripNewline }; class TemplateToken { public: - enum class Type { Text, Expression, If, Else, Elif, EndIf, For, EndFor, Generation, EndGeneration, Set, EndSet, Comment, Macro, EndMacro, Filter, EndFilter }; + enum class Type { Text, Expression, If, Else, Elif, EndIf, For, EndFor, Generation, EndGeneration, Set, EndSet, Comment, Macro, EndMacro, Filter, EndFilter, Break, Continue }; static std::string typeToString(Type t) { switch (t) { @@ -714,6 +714,8 @@ public: case Type::EndFilter: return "endfilter"; case Type::Generation: return "generation"; case Type::EndGeneration: return "endgeneration"; + case Type::Break: return "break"; + case Type::Continue: return "continue"; } return "Unknown"; } @@ -815,6 +817,22 @@ struct CommentTemplateToken : public TemplateToken { CommentTemplateToken(const Location & location, SpaceHandling pre, SpaceHandling post, const std::string& t) : TemplateToken(Type::Comment, location, pre, post), text(t) {} }; +enum class LoopControlType { Break, Continue }; + +class LoopControlException : public std::runtime_error { +public: + LoopControlType control_type; + LoopControlException(const std::string & message, LoopControlType control_type) : std::runtime_error(message), control_type(control_type) {} + LoopControlException(LoopControlType control_type) + : std::runtime_error((std::ostringstream() << (control_type == LoopControlType::Continue ? "continue" : "break") << " outside of a loop").str()), + control_type(control_type) {} +}; + +struct LoopControlTemplateToken : public TemplateToken { + LoopControlType control_type; + LoopControlTemplateToken(const Location & location, SpaceHandling pre, SpaceHandling post, LoopControlType control_type) : TemplateToken(Type::Break, location, pre, post), control_type(control_type) {} +}; + class TemplateNode { Location location_; protected: @@ -825,6 +843,12 @@ public: void render(std::ostringstream & out, const std::shared_ptr & context) const { try { do_render(out, context); + } catch (const LoopControlException & e) { + // TODO: make stack creation lazy. Only needed if it was thrown outside of a loop. + std::ostringstream err; + err << e.what(); + if (location_.source) err << error_location_suffix(*location_.source, location_.pos); + throw LoopControlException(err.str(), e.control_type); } catch (const std::exception & e) { std::ostringstream err; err << e.what(); @@ -897,6 +921,15 @@ public: } }; +class LoopControlNode : public TemplateNode { + LoopControlType control_type_; + public: + LoopControlNode(const Location & location, LoopControlType control_type) : TemplateNode(location), control_type_(control_type) {} + void do_render(std::ostringstream &, const std::shared_ptr &) const override { + throw LoopControlException(control_type_); + } +}; + class ForNode : public TemplateNode { std::vector var_names; std::shared_ptr iterable; @@ -961,7 +994,12 @@ public: loop.set("last", i == (n - 1)); loop.set("previtem", i > 0 ? filtered_items.at(i - 1) : Value()); loop.set("nextitem", i < n - 1 ? filtered_items.at(i + 1) : Value()); - body->render(out, loop_context); + try { + body->render(out, loop_context); + } catch (const LoopControlException & e) { + if (e.control_type == LoopControlType::Break) break; + if (e.control_type == LoopControlType::Continue) continue; + } } } }; @@ -2159,7 +2197,7 @@ private: static std::regex comment_tok(R"(\{#([-~]?)(.*?)([-~]?)#\})"); static std::regex expr_open_regex(R"(\{\{([-~])?)"); static std::regex block_open_regex(R"(^\{%([-~])?[\s\n\r]*)"); - static std::regex block_keyword_tok(R"((if|else|elif|endif|for|endfor|generation|endgeneration|set|endset|block|endblock|macro|endmacro|filter|endfilter)\b)"); + static std::regex block_keyword_tok(R"((if|else|elif|endif|for|endfor|generation|endgeneration|set|endset|block|endblock|macro|endmacro|filter|endfilter|break|continue)\b)"); static std::regex non_text_open_regex(R"(\{\{|\{%|\{#)"); static std::regex expr_close_regex(R"([\s\n\r]*([-~])?\}\})"); static std::regex block_close_regex(R"([\s\n\r]*([-~])?%\})"); @@ -2291,6 +2329,9 @@ private: } else if (keyword == "endfilter") { auto post_space = parseBlockClose(); tokens.push_back(std::make_unique(location, pre_space, post_space)); + } else if (keyword == "break" || keyword == "continue") { + auto post_space = parseBlockClose(); + tokens.push_back(std::make_unique(location, pre_space, post_space, keyword == "break" ? LoopControlType::Break : LoopControlType::Continue)); } else { throw std::runtime_error("Unexpected block: " + keyword); } @@ -2414,6 +2455,8 @@ private: children.emplace_back(std::make_shared(token->location, std::move(filter_token->filter), std::move(body))); } else if (dynamic_cast(token.get())) { // Ignore comments + } else if (auto ctrl_token = dynamic_cast(token.get())) { + children.emplace_back(std::make_shared(token->location, ctrl_token->control_type)); } else if (dynamic_cast(token.get()) || dynamic_cast(token.get()) || dynamic_cast(token.get()) From 53debe6f3c9cca87e9520a83ee8c14d88977afa4 Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sat, 1 Feb 2025 18:22:38 +0000 Subject: [PATCH 213/279] ci: use sccache on windows HIP jobs (#11553) --- .github/workflows/build.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 03eabbbe5..7392f2bfe 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1154,7 +1154,6 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: ${{ github.job }} - variant: sccache evict-old-files: 1d - name: Build @@ -1189,7 +1188,6 @@ jobs: uses: hendrikmuhs/ccache-action@v1.2.16 with: key: windows-latest-cmake-hip-release - variant: sccache evict-old-files: 1d - name: Install From 0cec062a638700495673f5494d200b74340538be Mon Sep 17 00:00:00 2001 From: piDack <104877312+piDack@users.noreply.github.com> Date: Sun, 2 Feb 2025 15:48:46 +0800 Subject: [PATCH 214/279] llama : add support for GLM-Edge and GLM-Edge-V series models (#10573) * add glm edge chat model * use config partial_rotary_factor as rope ratio * support for glm edge model * vision model support * remove debug info * fix format * llava.cpp trailing whitespace * remove unused AutoTokenizer * Update src/llama.cpp for not contain <|end|> or
Co-authored-by: Xuan Son Nguyen * add edge template * fix chat template * fix confict * fix confict * fix ci err * fix format err * fix template err * 9b hf chat support * format * format clip.cpp * fix format * Apply suggestions from code review * Apply suggestions from code review * Update examples/llava/clip.cpp * fix format * minor : style --------- Co-authored-by: liyuhang Co-authored-by: piDack Co-authored-by: Xuan Son Nguyen Co-authored-by: liyuhang Co-authored-by: Georgi Gerganov --- README.md | 3 +- convert_hf_to_gguf.py | 58 +--- examples/llava/README-glmedge.md | 43 +++ examples/llava/clip.cpp | 110 ++++++- examples/llava/clip.h | 2 + .../glmedge-convert-image-encoder-to-gguf.py | 280 ++++++++++++++++++ examples/llava/glmedge-surgery.py | 33 +++ examples/llava/llava.cpp | 17 ++ gguf-py/gguf/constants.py | 3 + src/llama-arch.cpp | 3 + src/llama-chat.cpp | 11 +- src/llama-chat.h | 1 + src/llama-model.cpp | 28 +- src/llama.cpp | 35 ++- tests/test-chat-template.cpp | 8 + 15 files changed, 568 insertions(+), 67 deletions(-) create mode 100644 examples/llava/README-glmedge.md create mode 100644 examples/llava/glmedge-convert-image-encoder-to-gguf.py create mode 100644 examples/llava/glmedge-surgery.py diff --git a/README.md b/README.md index d40309875..7f306d199 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Bitnet b1.58 models](https://huggingface.co/1bitLLM) - [x] [Flan T5](https://huggingface.co/models?search=flan-t5) - [x] [Open Elm models](https://huggingface.co/collections/apple/openelm-instruct-models-6619ad295d7ae9f868b759ca) -- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) +- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b) + [GLMEdge-1.5b](https://huggingface.co/THUDM/glm-edge-1.5b-chat) + [GLMEdge-4b](https://huggingface.co/THUDM/glm-edge-4b-chat) - [x] [SmolLM](https://huggingface.co/collections/HuggingFaceTB/smollm-6695016cad7167254ce15966) - [x] [EXAONE-3.0-7.8B-Instruct](https://huggingface.co/LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct) - [x] [FalconMamba Models](https://huggingface.co/collections/tiiuae/falconmamba-7b-66b9a580324dd1598b0f6d4a) @@ -117,6 +117,7 @@ Instructions for adding support for new models: [HOWTO-add-model.md](docs/develo - [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM) - [x] [Moondream](https://huggingface.co/vikhyatk/moondream2) - [x] [Bunny](https://github.com/BAAI-DCAI/Bunny) +- [x] [GLM-EDGE](https://huggingface.co/models?search=glm-edge) - [x] [Qwen2-VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d) diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 63b54a9cf..018a2a588 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -648,7 +648,7 @@ class Model: if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code res = "jina-v2-code" - if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b": + if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b" or chkhsh == "81d72c7348a9f0ebe86f23298d37debe0a5e71149e29bd283904c02262b27516": # ref: https://huggingface.co/THUDM/glm-4-9b-chat res = "chatglm-bpe" if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": @@ -4513,7 +4513,7 @@ class JaisModel(Model): self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias) -@Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration") +@Model.register("GlmForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration") class ChatGLMModel(Model): model_arch = gguf.MODEL_ARCH.CHATGLM @@ -4619,47 +4619,15 @@ class ChatGLMModel(Model): from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["padded_vocab_size"] + vocab_size = hparams.get("padded_vocab_size",hparams["vocab_size"]) assert max(tokenizer.get_vocab().values()) < vocab_size - tokpre = self.get_vocab_base_pre(tokenizer) - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[ChatGLMModel.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) >= 2 and len(merged) <= 7 - merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged))) - - # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined - added_vocab = tokenizer.get_added_vocab() - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - + tokens, toktypes, tokpre = self.get_vocab_base() self.gguf_writer.add_tokenizer_model("gpt2") self.gguf_writer.add_tokenizer_pre(tokpre) self.gguf_writer.add_token_list(tokens) self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges + special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) # only add special tokens when they were not already loaded from config.json special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"]) special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) @@ -4670,16 +4638,20 @@ class ChatGLMModel(Model): def set_gguf_parameters(self): n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_head_kv = self.hparams.get("multi_query_group_num", n_head) + n_head_kv = self.hparams.get("multi_query_group_num", self.hparams.get("num_key_value_heads", n_head)) self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) self.gguf_writer.add_embedding_length(n_embed) - self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed)) - self.gguf_writer.add_block_count(self.hparams["num_layers"]) + self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", self.hparams.get("intermediate_size", 4 * n_embed))) + self.gguf_writer.add_block_count(self.hparams.get("num_layers", self.hparams["num_hidden_layers"])) self.gguf_writer.add_head_count(n_head) self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"]) + self.gguf_writer.add_layer_norm_rms_eps(self.hparams.get("layernorm_epsilon",1e-5)) self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_dimension_count(64) + if "attention_dim" in self.hparams: + rope_dim = self.hparams["attention_dim"] + else: + rope_dim = self.hparams["hidden_size"] // self.hparams["num_attention_heads"] + self.gguf_writer.add_rope_dimension_count(int(rope_dim * self.hparams.get("partial_rotary_factor", 0.5))) self.gguf_writer.add_add_bos_token(False) rope_freq = 10000 if "rope_ratio" in self.hparams: @@ -4689,7 +4661,7 @@ class ChatGLMModel(Model): def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: del bid # unused - if name.endswith(".rotary_pos_emb.inv_freq"): + if name.endswith(".rotary_pos_emb.inv_freq") or name.startswith("model.vision."): return [] name = name.removeprefix("transformer.") diff --git a/examples/llava/README-glmedge.md b/examples/llava/README-glmedge.md new file mode 100644 index 000000000..603d01474 --- /dev/null +++ b/examples/llava/README-glmedge.md @@ -0,0 +1,43 @@ +# GLMV-EDGE + +Currently this implementation supports [glm-edge-v-2b](https://huggingface.co/THUDM/glm-edge-v-2b) and [glm-edge-v-5b](https://huggingface.co/THUDM/glm-edge-v-5b). + +## Usage +Build with cmake or run `make llama-llava-cli` to build it. + +After building, run: `./llama-llava-cli` to see the usage. For example: + +```sh +./llama-llava-cli -m model_path/ggml-model-f16.gguf --mmproj model_path/mmproj-model-f16.gguf --image img_path/image.jpg -p "<|system|>\n system prompt <|user|>\n prompt <|assistant|>\n" +``` + +**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so. +**note**: For GPU offloading ensure to use the `-ngl` flag just like usual + +## GGUF conversion + +1. Clone a GLMV-EDGE model ([2B](https://huggingface.co/THUDM/glm-edge-v-2b) or [5B](https://huggingface.co/THUDM/glm-edge-v-5b)). For example: + +```sh +git clone https://huggingface.co/THUDM/glm-edge-v-5b or https://huggingface.co/THUDM/glm-edge-v-2b +``` + +2. Use `glmedge-surgery.py` to split the GLMV-EDGE model to LLM and multimodel projector constituents: + +```sh +python ./examples/llava/glmedge-surgery.py -m ../model_path +``` + +4. Use `glmedge-convert-image-encoder-to-gguf.py` to convert the GLMV-EDGE image encoder to GGUF: + +```sh +python ./examples/llava/glmedge-convert-image-encoder-to-gguf.py -m ../model_path --llava-projector ../model_path/glm.projector --output-dir ../model_path +``` + +5. Use `examples/convert_hf_to_gguf.py` to convert the LLM part of GLMV-EDGE to GGUF: + +```sh +python convert_hf_to_gguf.py ../model_path +``` + +Now both the LLM part and the image encoder are in the `model_path` directory. diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 24073c5a9..7367d44cb 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -102,6 +102,7 @@ static std::string format(const char * fmt, ...) { #define KEY_HAS_VIS_ENC "clip.has_vision_encoder" #define KEY_HAS_LLAVA_PROJ "clip.has_llava_projector" #define KEY_HAS_MINICPMV_PROJ "clip.has_minicpmv_projector" +#define KEY_HAS_GLM_PROJ "clip.has_glm_projector" #define KEY_MINICPMV_VERSION "clip.minicpmv_version" #define KEY_HAS_QWEN2VL_MERGER "clip.has_qwen2vl_merger" #define KEY_USE_GELU "clip.use_gelu" @@ -160,6 +161,15 @@ static std::string format(const char * fmt, ...) { #define TN_MINICPMV_ATTN "resampler.attn.%s.%s" #define TN_MINICPMV_LN "resampler.ln_%s.%s" +#define TN_GLM_ADAPER_CONV "adapter.conv.%s" +#define TN_GLM_ADAPTER_LINEAR "adapter.linear.linear.%s" +#define TN_GLM_ADAPTER_NORM_1 "adapter.linear.norm1.%s" +#define TN_GLM_ADAPTER_D_H_2_4H "adapter.linear.dense_h_to_4h.%s" +#define TN_GLM_ADAPTER_GATE "adapter.linear.gate.%s" +#define TN_GLM_ADAPTER_D_4H_2_H "adapter.linear.dense_4h_to_h.%s" +#define TN_GLM_BOI_W "adapter.boi" +#define TN_GLM_EOI_W "adapter.eoi" + enum projector_type { PROJECTOR_TYPE_MLP, @@ -167,6 +177,7 @@ enum projector_type { PROJECTOR_TYPE_LDP, PROJECTOR_TYPE_LDPV2, PROJECTOR_TYPE_RESAMPLER, + PROJECTOR_TYPE_GLM_EDGE, PROJECTOR_TYPE_MERGER, PROJECTOR_TYPE_UNKNOWN, }; @@ -176,6 +187,7 @@ static std::map PROJECTOR_TYPE_NAMES = { { PROJECTOR_TYPE_LDP, "ldp" }, { PROJECTOR_TYPE_LDPV2, "ldpv2"}, { PROJECTOR_TYPE_RESAMPLER, "resampler"}, + { PROJECTOR_TYPE_GLM_EDGE, "adapter"}, { PROJECTOR_TYPE_MERGER, "qwen2vl_merger"}, }; @@ -500,6 +512,12 @@ struct clip_vision_model { struct ggml_tensor * mm_4_w = NULL; struct ggml_tensor * mm_4_b = NULL; + //GLMV-Edge projection + struct ggml_tensor * mm_model_adapter_conv_w; + struct ggml_tensor * mm_model_adapter_conv_b; + struct ggml_tensor * boi_w; + struct ggml_tensor * eoi_w; + // MobileVLM projection struct ggml_tensor * mm_model_mlp_1_w; struct ggml_tensor * mm_model_mlp_1_b; @@ -560,6 +578,7 @@ struct clip_ctx { bool has_vision_encoder = false; bool has_llava_projector = false; bool has_minicpmv_projector = false; + bool has_glm_projector = false; bool has_qwen2vl_merger = false; int minicpmv_version = 2; @@ -638,7 +657,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 const int batch_size = imgs->size; - if (ctx->has_llava_projector || ctx->has_minicpmv_projector) { + if (ctx->has_llava_projector || ctx->has_minicpmv_projector || ctx->has_glm_projector) { GGML_ASSERT(batch_size == 1); } @@ -734,8 +753,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 } // loop over layers - if (ctx->has_minicpmv_projector || ctx->has_qwen2vl_merger) { - // TODO: figure out why we doing thing in this way ??? + if (ctx->has_minicpmv_projector || ctx->has_glm_projector || ctx->has_qwen2vl_merger) { n_layer += 1; } for (int il = 0; il < n_layer - 1; il++) { @@ -1095,7 +1113,33 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32 GGML_ASSERT(false); } } - else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { + // glm projector + else if (ctx->has_glm_projector) { + if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { + size_t gridsz = (size_t)sqrt(embeddings->ne[1]); + embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings,1,0,2,3)); + embeddings = ggml_reshape_3d(ctx0, embeddings, gridsz, gridsz, embeddings->ne[1]); + embeddings = ggml_conv_2d(ctx0, model.mm_model_adapter_conv_w, embeddings, 2, 2, 0, 0, 1, 1); + embeddings = ggml_reshape_3d(ctx0, embeddings,embeddings->ne[0]*embeddings->ne[1] , embeddings->ne[2], batch_size); + embeddings = ggml_cont(ctx0, ggml_permute(ctx0,embeddings, 1, 0, 2, 3)); + embeddings = ggml_add(ctx0, embeddings, model.mm_model_adapter_conv_b); + //GLU + { + embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_0_w, embeddings); + embeddings = ggml_norm(ctx0, embeddings, eps); + embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.mm_model_ln_q_w), model.mm_model_ln_q_b); + embeddings = ggml_gelu_inplace(ctx0, embeddings); + struct ggml_tensor * x = embeddings; + embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_2_w, embeddings); + x = ggml_mul_mat(ctx0, model.mm_model_mlp_1_w,x); + embeddings = ggml_silu_inplace(ctx0, embeddings); + embeddings = ggml_mul(ctx0, embeddings,x); + embeddings = ggml_mul_mat(ctx0, model.mm_model_mlp_3_w, embeddings); + } + } else { + GGML_ABORT("fatel error"); + } + } else if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { embeddings = ggml_reshape_3d(ctx0, embeddings, hidden_size * 4, num_positions / 4, batch_size); embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings); @@ -1284,6 +1328,11 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { new_clip->minicpmv_version = gguf_get_val_i32(ctx, idx); } + idx = gguf_find_key(ctx, KEY_HAS_GLM_PROJ); + if (idx != -1) { + new_clip->has_glm_projector = gguf_get_val_bool(ctx, idx); + } + idx = gguf_find_key(ctx, KEY_HAS_QWEN2VL_MERGER); if (idx != -1) { new_clip->has_qwen2vl_merger = gguf_get_val_bool(ctx, idx); @@ -1308,6 +1357,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { LOG_INF("%s: vision_encoder: %d\n", __func__, new_clip->has_vision_encoder); LOG_INF("%s: llava_projector: %d\n", __func__, new_clip->has_llava_projector); LOG_INF("%s: minicpmv_projector: %d\n", __func__, new_clip->has_minicpmv_projector); + LOG_INF("%s: glm_projector: %d\n", __func__, new_clip->has_glm_projector); LOG_INF("%s: model size: %.2f MB\n", __func__, model_size / 1024.0 / 1024.0); LOG_INF("%s: metadata size: %.2f MB\n", __func__, ggml_get_mem_size(meta) / 1024.0 / 1024.0); } @@ -1575,6 +1625,18 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { vision_model.mm_model_ln_post_w = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "weight")); vision_model.mm_model_ln_post_b = get_tensor(new_clip->ctx_data, format(TN_MINICPMV_LN, "post", "bias")); } + else if (new_clip->proj_type == PROJECTOR_TYPE_GLM_EDGE) { + vision_model.mm_model_adapter_conv_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPER_CONV, "weight")); + vision_model.mm_model_adapter_conv_b = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPER_CONV, "bias")); + vision_model.mm_model_mlp_0_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_LINEAR,"weight")); + vision_model.mm_model_ln_q_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_NORM_1,"weight")); + vision_model.mm_model_ln_q_b = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_NORM_1,"bias")); + vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_D_H_2_4H,"weight")); + vision_model.mm_model_mlp_2_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_GATE,"weight")); + vision_model.mm_model_mlp_3_w = get_tensor(new_clip->ctx_data, format(TN_GLM_ADAPTER_D_4H_2_H,"weight")); + vision_model.boi_w = get_tensor(new_clip->ctx_data, TN_GLM_BOI_W); + vision_model.eoi_w = get_tensor(new_clip->ctx_data, TN_GLM_EOI_W); + } else if (new_clip->proj_type == PROJECTOR_TYPE_MERGER) { vision_model.mm_0_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "weight")); vision_model.mm_0_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 0, "bias")); @@ -2115,6 +2177,20 @@ bool clip_image_preprocess(struct clip_ctx * ctx, const clip_image_u8 * img, cli return true; } + if (ctx->has_glm_projector) { + res_imgs->size = 1; + res_imgs->data = new clip_image_f32[res_imgs->size]; + clip_image_u8 resized_image; + int32_t sz=ctx->vision_model.hparams.image_size; + bicubic_resize(*img, resized_image,sz,sz); + clip_image_f32 * res = clip_image_f32_init(); + //clip_image_save_to_bmp(resized_image, "resized.bmp"); + normalize_image_u8_to_f32(&resized_image, res, ctx->image_mean, ctx->image_std); + res_imgs->data[0] = *res; + clip_image_f32_free(res); + return true; + } + bool pad_to_square = true; if (!ctx->has_vision_encoder) { LOG_ERR("This gguf file seems to have no vision encoder\n"); @@ -2300,7 +2376,8 @@ void clip_free(clip_ctx * ctx) { } size_t clip_embd_nbytes(const struct clip_ctx * ctx) { - return clip_n_patches(ctx) * clip_n_mmproj_embd(ctx) * sizeof(float); + int extra_tokens = ctx->has_glm_projector ? 2 : 0; + return (clip_n_patches(ctx) + extra_tokens) * clip_n_mmproj_embd(ctx) * sizeof(float); } size_t clip_embd_nbytes_by_img(const struct clip_ctx * ctx, int img_h, int img_w) { @@ -2342,7 +2419,7 @@ int clip_n_patches_by_img(const struct clip_ctx * ctx, struct clip_image_f32 * i int n_patches = (params.image_size / params.patch_size) * (params.image_size / params.patch_size); - if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2) { + if (ctx->proj_type == PROJECTOR_TYPE_LDP || ctx->proj_type == PROJECTOR_TYPE_LDPV2 || ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE) { n_patches /= 4; } else if (ctx->proj_type == PROJECTOR_TYPE_RESAMPLER) { if (ctx->minicpmv_version == 2) { @@ -2475,6 +2552,12 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima if (ctx->has_minicpmv_projector) { GGML_ASSERT(batch_size == 1); } + if (ctx->has_glm_projector) { + GGML_ASSERT(batch_size == 1); + ggml_tensor * boi = ctx->vision_model.boi_w; + ggml_backend_tensor_get(boi,vec,0,ggml_nbytes(boi)); + vec = (float*)(vec+ggml_nelements(boi)); //offset for boi + } // build the inference graph ggml_cgraph * gf = clip_image_build_graph(ctx, imgs, ctx->load_image_size, true); @@ -2627,7 +2710,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima ggml_backend_tensor_set(positions, positions_data, 0, ggml_nbytes(positions)); free(positions_data); - { + if (!ctx->has_glm_projector) { struct ggml_tensor * patches = ggml_graph_get_tensor(gf, "patches"); int* patches_data = (int*)malloc(ggml_nbytes(patches)); for (int i = 0; i < num_patches; i++) { @@ -2651,6 +2734,13 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima // copy the embeddings to the location passed by the user ggml_backend_tensor_get(embeddings, vec, 0, ggml_nbytes(embeddings)); + if (ctx->has_glm_projector) { + //eoi + ggml_tensor * eoi = ctx->vision_model.eoi_w; + int offset = ggml_nelements(embeddings); + ggml_backend_tensor_get(eoi, vec+offset, 0, ggml_nbytes(eoi)); + } + return true; } @@ -2812,6 +2902,9 @@ int clip_n_mmproj_embd(const struct clip_ctx * ctx) { return 3584; } } + if (ctx->proj_type == PROJECTOR_TYPE_GLM_EDGE){ + return ctx->vision_model.mm_model_mlp_3_w->ne[1]; + } if (ctx->proj_type == PROJECTOR_TYPE_MERGER) { return ctx->vision_model.mm_1_b->ne[0]; } @@ -2827,6 +2920,9 @@ int clip_is_minicpmv(const struct clip_ctx * ctx) { return 0; } +bool clip_is_glm(const struct clip_ctx * ctx) { + return ctx->has_glm_projector; +} bool clip_is_qwen2vl(const struct clip_ctx * ctx) { return ctx->has_qwen2vl_merger; } diff --git a/examples/llava/clip.h b/examples/llava/clip.h index 1603edd26..841b4f6f9 100644 --- a/examples/llava/clip.h +++ b/examples/llava/clip.h @@ -93,6 +93,8 @@ CLIP_API bool clip_is_qwen2vl(const struct clip_ctx * ctx); CLIP_API bool clip_encode_float_image (struct clip_ctx * ctx, int n_threads, float * img, int h, int w, float * vec); +CLIP_API bool clip_is_glm(const struct clip_ctx * ctx); + #ifdef __cplusplus } #endif diff --git a/examples/llava/glmedge-convert-image-encoder-to-gguf.py b/examples/llava/glmedge-convert-image-encoder-to-gguf.py new file mode 100644 index 000000000..848ef1cf3 --- /dev/null +++ b/examples/llava/glmedge-convert-image-encoder-to-gguf.py @@ -0,0 +1,280 @@ +import argparse +import os +import json +import re + +import torch +import numpy as np +from gguf import * + +TEXT = "clip.text" +VISION = "clip.vision" +from transformers import SiglipVisionModel, SiglipVisionConfig + +def k(raw_key: str, arch: str) -> str: + return raw_key.format(arch=arch) + + +def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: bool) -> bool: + if name in ( + "logit_scale", + "text_model.embeddings.position_ids", + "vision_model.embeddings.position_ids", + ): + return True + + if name in ( + "vision_model.head.probe", + "vision_model.head.attention.in_proj_weight", + "vision_model.head.attention.in_proj_bias", + "vision_model.head.attention.out_proj.weight", + "vision_model.head.attention.out_proj.bias", + "vision_model.head.layernorm.weight", + "vision_model.head.layernorm.bias", + "vision_model.head.mlp.fc1.weight", + "vision_model.head.mlp.fc1.bias", + "vision_model.head.mlp.fc2.weight", + "vision_model.head.mlp.fc2.bias" + ): + return True + + if name.startswith("v") and not has_vision: + return True + + if name.startswith("t") and not has_text: + return True + + return False + + +def get_tensor_name(name: str) -> str: + if "projection" in name: + return name + if "mm_projector" in name: + name = name.replace("model.mm_projector", "mm") + name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1) + name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1) + return name + + return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln") + + +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True) +ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16") +ap.add_argument("--text-only", action="store_true", required=False, + help="Save a text-only model. It can't be used to encode images") +ap.add_argument("--vision-only", action="store_true", required=False, + help="Save a vision-only model. It can't be used to encode texts") +ap.add_argument("--clip-model-is-vision", action="store_true", required=False, + help="The clip model is a pure vision model (ShareGPT4V vision extract for example)") +ap.add_argument("--clip-model-is-openclip", action="store_true", required=False, + help="The clip model is from openclip (for ViT-SO400M type))") +ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.") +ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2","adapter"], default="adapter") +ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None) +# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711 +# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5 +default_image_mean = [0.5, 0.5, 0.5] +default_image_std = [0.5, 0.5, 0.5] +ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None) +ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None) + +# with proper +args = ap.parse_args() + + +if args.text_only and args.vision_only: + print("--text-only and --image-only arguments cannot be specified at the same time.") + exit(1) + +if args.use_f32: + print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.") + +# output in the same directory as the model if output_dir is None +dir_model = args.model_dir + +if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip: + vocab = None + tokens = None +else: + with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f: + vocab = json.load(f) + tokens = [key for key in vocab] + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + config = json.load(f) + if args.clip_model_is_vision: + v_hparams = config + t_hparams = None + else: + v_hparams = config["vision_config"] + t_hparams = None + +# possible data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 +# +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if args.use_f32: + ftype = 0 + +vision_config = SiglipVisionConfig(**v_hparams) +model = SiglipVisionModel(vision_config) +model.load_state_dict(torch.load(os.path.join(dir_model, "glm.clip"))) + +fname_middle = None +has_text_encoder = False +has_vision_encoder = True +has_glm_projector = True +if args.text_only: + fname_middle = "text-" + has_vision_encoder = False +elif args.llava_projector is not None: + fname_middle = "mmproj-" + has_text_encoder = False + has_glm_projector = True +elif args.vision_only: + fname_middle = "vision-" + has_text_encoder = False +else: + fname_middle = "" + +output_dir = args.output_dir if args.output_dir is not None else dir_model +os.makedirs(output_dir, exist_ok=True) +output_prefix = os.path.basename(output_dir).replace("ggml_", "") +fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf") +fout = GGUFWriter(path=fname_out, arch="clip") + +fout.add_bool("clip.has_text_encoder", has_text_encoder) +fout.add_bool("clip.has_vision_encoder", has_vision_encoder) +fout.add_bool("clip.has_glm_projector", has_glm_projector) +fout.add_file_type(ftype) +model_name = config["_name_or_path"] if "_name_or_path" in config else os.path.basename(dir_model) +fout.add_name(model_name) +if has_glm_projector: + fout.add_description("image encoder for glm4v") + fout.add_string("clip.projector_type", "adapter") +else: + fout.add_description("two-tower CLIP model") + +if has_text_encoder: + assert t_hparams is not None + assert tokens is not None + # text_model hparams + fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"]) + fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"]) + fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, TEXT), t_hparams["intermediate_size"]) + fout.add_uint32("clip.text.projection_dim", t_hparams.get("projection_dim", config["projection_dim"])) + fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, TEXT), t_hparams["num_attention_heads"]) + fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, TEXT), t_hparams["layer_norm_eps"]) + fout.add_uint32(k(KEY_BLOCK_COUNT, TEXT), t_hparams["num_hidden_layers"]) + fout.add_token_list(tokens) + +if has_vision_encoder: + # vision_model hparams + fout.add_uint32("clip.vision.image_size", v_hparams["image_size"]) + fout.add_uint32("clip.vision.patch_size", v_hparams["patch_size"]) + fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), v_hparams["hidden_size"]) + fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), v_hparams["intermediate_size"]) + fout.add_uint32("clip.vision.projection_dim", 0) + fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), v_hparams["num_attention_heads"]) + fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6) + fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), v_hparams["num_hidden_layers"]) + + image_mean = args.image_mean if args.image_mean is not None else default_image_mean + image_std = args.image_std if args.image_std is not None else default_image_std + fout.add_array("clip.vision.image_mean", image_mean) + fout.add_array("clip.vision.image_std", image_std) + +fout.add_bool("clip.use_gelu", True) + + +if has_glm_projector: + # model.vision_model.encoder.layers.pop(-1) # pyright: ignore[reportAttributeAccessIssue] + projector = torch.load(args.llava_projector) + for name, data in projector.items(): + name = get_tensor_name(name) + # pw and dw conv ndim==4 + if data.ndim == 2 or data.ndim == 4: + data = data.squeeze().numpy().astype(np.float16) + else: + data = data.squeeze().numpy().astype(np.float32) + if name.startswith("vision."): + name=name.replace("vision.","") + fout.add_tensor(name, data) + print(f"Projector {name} - {data.dtype} - shape = {data.shape}") + # print(f"Projector {name} tensors added\n") + +state_dict = model.state_dict() # pyright: ignore[reportAttributeAccessIssue] +for name, data in state_dict.items(): + if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_glm_projector): + # we don't need this + print(f"skipping parameter: {name}") + continue + + name = get_tensor_name(name) + data = data.squeeze().numpy() + + n_dims = len(data.shape) + + # ftype == 0 -> float32, ftype == 1 -> float16 + ftype_cur = 0 + if n_dims == 4: + print(f"tensor {name} is always saved in f16") + data = data.astype(np.float16) + ftype_cur = 1 + elif ftype == 1: + if name[-7:] == ".weight" and n_dims == 2: + # print(" Converting to float16") + data = data.astype(np.float16) + ftype_cur = 1 + else: + # print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + else: + if data.dtype != np.float32: + # print(" Converting to float32") + data = data.astype(np.float32) + ftype_cur = 0 + print(f"siglip {name} - {data.dtype} - shape = {data.shape}") + # print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}") + fout.add_tensor(name, data) + + +fout.write_header_to_file() +fout.write_kv_data_to_file() +fout.write_tensors_to_file() +fout.close() + +print("Done. Output file: " + fname_out) diff --git a/examples/llava/glmedge-surgery.py b/examples/llava/glmedge-surgery.py new file mode 100644 index 000000000..16bb915d0 --- /dev/null +++ b/examples/llava/glmedge-surgery.py @@ -0,0 +1,33 @@ +import argparse +import os +import torch +from transformers import AutoModel + +ap = argparse.ArgumentParser() +ap.add_argument("-m", "--model", help="Path to GLM model") +args = ap.parse_args() + +# find the model part that includes the the multimodal projector weights +model = AutoModel.from_pretrained(args.model, trust_remote_code=True, local_files_only=True) +checkpoint = model.state_dict() + +# get a list of mm tensor names +mm_tensors = [k for k, v in checkpoint.items() if k.startswith("vision.adapter.")] + +# store these tensors in a new dictionary and torch.save them +projector = {name: checkpoint[name].float() for name in mm_tensors} +torch.save(projector, f"{args.model}/glm.projector") + +clip_tensors = [k for k, v in checkpoint.items() if k.startswith("vision.vit.model.vision_model.")] +if len(clip_tensors) > 0: + clip = {name.replace("vision.vit.model.", ""): checkpoint[name].float() for name in clip_tensors} + torch.save(clip, f"{args.model}/glm.clip") + + # added tokens should be removed to be able to convert Mistral models + if os.path.exists(f"{args.model}/added_tokens.json"): + with open(f"{args.model}/added_tokens.json", "w") as f: + f.write("{}\n") + +print("Done!") +print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.") +print(f"Also, use {args.model}glm.projector to prepare a glm-encoder.gguf file.") diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 2cac7933d..300714045 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -311,6 +311,20 @@ static bool encode_image_with_clip(clip_ctx * ctx_clip, int n_threads, const cli img_res_v.size = 0; img_res_v.data = nullptr; } + else if (clip_is_glm(ctx_clip)){ + struct clip_image_size * load_image_size = clip_image_size_init(); + load_image_size->width = img_res_v.data[0].nx; + load_image_size->height = img_res_v.data[0].ny; + clip_add_load_image_size(ctx_clip, load_image_size); + + bool encoded = clip_image_encode(ctx_clip, n_threads, &img_res_v.data[0], image_embd); + int pos = int(load_image_size->width/clip_patch_size(ctx_clip)/2); + *n_img_pos = (pos * pos + 2); + if (!encoded){ + LOG_ERR("Unable to encode image \n"); + return false; + } + } else if (strcmp(mm_patch_merge_type, "spatial_unpad") != 0) { // flat / default llava-1.5 type embedding *n_img_pos = clip_n_patches(ctx_clip); @@ -395,6 +409,9 @@ bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, co if (clip_is_minicpmv(ctx_clip)) { num_max_patches = 10; } + if (clip_is_glm(ctx_clip)) { + num_max_patches = 1; + } float * image_embd; if (clip_is_qwen2vl(ctx_clip)) { // qwen2vl don't split image into chunks, so `num_max_patches` is not needed. diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 8fe84df21..ecac5b4bb 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1357,6 +1357,9 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.OUTPUT, MODEL_TENSOR.ATTN_NORM, MODEL_TENSOR.ATTN_QKV, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, MODEL_TENSOR.ATTN_OUT, MODEL_TENSOR.FFN_NORM, MODEL_TENSOR.FFN_DOWN, diff --git a/src/llama-arch.cpp b/src/llama-arch.cpp index a7260f495..97a1e7e5e 100644 --- a/src/llama-arch.cpp +++ b/src/llama-arch.cpp @@ -1024,6 +1024,9 @@ static const std::map> LLM_TENSOR_N { LLM_TENSOR_OUTPUT, "output" }, { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" }, { LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv" }, + { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" }, + { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" }, + { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" }, { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" }, { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" }, { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" }, diff --git a/src/llama-chat.cpp b/src/llama-chat.cpp index 5c19bab24..028a64794 100644 --- a/src/llama-chat.cpp +++ b/src/llama-chat.cpp @@ -51,6 +51,7 @@ static const std::map LLM_CHAT_TEMPLATES = { { "llama3", LLM_CHAT_TEMPLATE_LLAMA_3 }, { "chatglm3", LLM_CHAT_TEMPLATE_CHATGML_3 }, { "chatglm4", LLM_CHAT_TEMPLATE_CHATGML_4 }, + { "glmedge", LLM_CHAT_TEMPLATE_GLMEDGE }, { "minicpm", LLM_CHAT_TEMPLATE_MINICPM }, { "exaone3", LLM_CHAT_TEMPLATE_EXAONE_3 }, { "rwkv-world", LLM_CHAT_TEMPLATE_RWKV_WORLD }, @@ -115,7 +116,7 @@ llm_chat_template llm_chat_detect_template(const std::string & tmpl) { } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|end|>")) { return LLM_CHAT_TEMPLATE_PHI_3; } else if (tmpl_contains("<|assistant|>") && tmpl_contains("<|user|>")) { - return LLM_CHAT_TEMPLATE_FALCON_3; + return tmpl_contains("
") ? LLM_CHAT_TEMPLATE_FALCON_3 : LLM_CHAT_TEMPLATE_GLMEDGE; } else if (tmpl_contains("<|user|>") && tmpl_contains("<|endoftext|>")) { return LLM_CHAT_TEMPLATE_ZEPHYR; } else if (tmpl_contains("bos_token + message['role']")) { @@ -440,6 +441,14 @@ int32_t llm_chat_apply_template( if (add_ass) { ss << "<|assistant|>"; } + } else if (tmpl == LLM_CHAT_TEMPLATE_GLMEDGE) { + for (auto message : chat) { + std::string role(message->role); + ss << "<|" << role << "|>" << "\n" << message->content; + } + if (add_ass) { + ss << "<|assistant|>"; + } } else if (tmpl == LLM_CHAT_TEMPLATE_MINICPM) { // MiniCPM-3B-OpenHermes-2.5-v2-GGUF for (auto message : chat) { diff --git a/src/llama-chat.h b/src/llama-chat.h index 3a4d07ce3..2f6a0e3e2 100644 --- a/src/llama-chat.h +++ b/src/llama-chat.h @@ -31,6 +31,7 @@ enum llm_chat_template { LLM_CHAT_TEMPLATE_LLAMA_3, LLM_CHAT_TEMPLATE_CHATGML_3, LLM_CHAT_TEMPLATE_CHATGML_4, + LLM_CHAT_TEMPLATE_GLMEDGE, LLM_CHAT_TEMPLATE_MINICPM, LLM_CHAT_TEMPLATE_EXAONE_3, LLM_CHAT_TEMPLATE_RWKV_WORLD, diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 18bd0b071..0487c978b 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1093,8 +1093,20 @@ void llama_model::load_hparams(llama_model_loader & ml) { { ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps); switch (hparams.n_layer) { - case 28: type = LLM_TYPE_6B; break; - case 40: type = LLM_TYPE_9B; break; + case 28: { + if (hparams.n_head(0) == 16) { + type = LLM_TYPE_1_5B; + } else { + type = LLM_TYPE_6B; + } + } break; + case 40: { + if (hparams.n_head(0) == 24) { + type = LLM_TYPE_4B; + } else { + type = LLM_TYPE_9B; + } + } break; default: type = LLM_TYPE_UNKNOWN; } } break; @@ -3068,9 +3080,17 @@ bool llama_model::load_tensors(llama_model_loader & ml) { auto & layer = layers[i]; layer.attn_norm = create_tensor(tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, 0); + layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); - layer.wqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, 0); - layer.bqkv = create_tensor(tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, 0); + if (layer.wqkv == nullptr) { + layer.wq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}, 0); + layer.wk = create_tensor(tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}, 0); + layer.wv = create_tensor(tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}, 0); + layer.bq = create_tensor(tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bk = create_tensor(tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); + layer.bv = create_tensor(tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, llama_model_loader::TENSOR_NOT_REQUIRED); + } layer.wo = create_tensor(tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, 0); diff --git a/src/llama.cpp b/src/llama.cpp index 192b20a27..5760017e0 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -7215,17 +7215,30 @@ struct llm_build_context { struct ggml_tensor * Qcur = nullptr; struct ggml_tensor * Kcur = nullptr; struct ggml_tensor * Vcur = nullptr; - - cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); - cb(cur, "wqkv", il); - - cur = ggml_add(ctx0, cur, model.layers[il].bqkv); - cb(cur, "bqkv", il); - - Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); - Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); - Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); - + if (model.type == LLM_TYPE_1_5B || model.type == LLM_TYPE_4B || model.type == LLM_TYPE_9B) { + Qcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wq, cur); + if (model.layers[il].bq) { + Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); + } + Kcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wk, cur); + if (model.layers[il].bk) { + Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); + } + Vcur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wv, cur); + if (model.layers[il].bv) { + Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); + } + } else { + cur = llm_build_lora_mm(lctx, ctx0, model.layers[il].wqkv, cur); + cb(cur, "wqkv", il); + if (model.layers[il].bqkv) { + cur = ggml_add(ctx0, cur, model.layers[il].bqkv); + cb(cur, "bqkv", il); + } + Qcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd, n_tokens, cur->nb[1], 0*sizeof(float)*(n_embd))); + Kcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd))); + Vcur = ggml_cont(ctx0, ggml_view_2d(ctx0, cur, n_embd_gqa, n_tokens, cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa))); + } cb(Qcur, "Qcur", il); cb(Kcur, "Kcur", il); cb(Vcur, "Vcur", il); diff --git a/tests/test-chat-template.cpp b/tests/test-chat-template.cpp index 4563f9dcb..e0314ae1d 100644 --- a/tests/test-chat-template.cpp +++ b/tests/test-chat-template.cpp @@ -175,6 +175,14 @@ int main(void) { /* .bos_token= */ "", /* .eos_token= */ "", }, + { + /* .name= */ "GLMEdge", + /* .template_str= */ "{% for item in messages %}{% if item['role'] == 'system' %}<|system|>\n{{ item['content'] }}{% elif item['role'] == 'user' %}<|user|>\n{{ item['content'] }}{% elif item['role'] == 'assistant' %}<|assistant|>\n{{ item['content'] }}{% endif %}{% endfor %}<|assistant|>", + /* .expected_output= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>", + /* .expected_output_jinja= */ "<|system|>\nYou are a helpful assistant<|user|>\nHello<|assistant|>\nHi there<|user|>\nWho are you<|assistant|>\n I am an assistant <|user|>\nAnother question<|assistant|>", + /* .bos_token= */ "", + /* .eos_token= */ "", + }, { /* .name= */ "MiniCPM-3B-OpenHermes-2.5-v2-GGUF", /* .template_str= */ u8"{% for message in messages %}{% if message['role'] == 'user' %}{{'<用户>' + message['content'].strip() + ''}}{% else %}{{message['content'].strip()}}{% endif %}{% endfor %}", From ff227703d6d6e1888bdc7af6138514092ffcdb96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Moskal?= Date: Sat, 1 Feb 2025 23:55:32 -0800 Subject: [PATCH 215/279] sampling : support for llguidance grammars (#10224) * initial porting of previous LLG patch * update for new APIs * build: integrate llguidance as an external project * use '%llguidance' as marker to enable llg lark syntax * add some docs * clarify docs * code style fixes * remove llguidance.h from .gitignore * fix tests when llg is enabled * pass vocab not model to llama_sampler_init_llg() * copy test-grammar-integration.cpp to test-llguidance.cpp * clang fmt * fix ref-count bug * build and run test * gbnf -> lark syntax * conditionally include llguidance test based on LLAMA_LLGUIDANCE flag * rename llguidance test file to test-grammar-llguidance.cpp * add gh action for llg test * align tests with LLG grammar syntax and JSON Schema spec * llama_tokenizer() in fact requires valid utf8 * update llg * format file * add $LLGUIDANCE_LOG_LEVEL support * fix whitespace * fix warning * include for INFINITY * add final newline * fail llama_sampler_init_llg() at runtime * Link gbnf_to_lark.py script; fix links; refer to llg docs for lexemes * simplify #includes * improve doc string for LLAMA_LLGUIDANCE * typo in merge * bump llguidance to 0.6.12 --- .github/workflows/build.yml | 30 + CMakeLists.txt | 1 + common/CMakeLists.txt | 28 + common/json-schema-to-grammar.cpp | 9 +- common/json-schema-to-grammar.h | 3 +- common/llguidance.cpp | 270 ++++++ common/sampling.cpp | 22 +- common/sampling.h | 3 + docs/llguidance.md | 51 ++ tests/CMakeLists.txt | 3 + tests/test-grammar-integration.cpp | 2 +- tests/test-grammar-llguidance.cpp | 1140 +++++++++++++++++++++++++ tests/test-json-schema-to-grammar.cpp | 2 +- 13 files changed, 1555 insertions(+), 9 deletions(-) create mode 100644 common/llguidance.cpp create mode 100644 docs/llguidance.md create mode 100644 tests/test-grammar-llguidance.cpp diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7392f2bfe..8f9c82f87 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -302,6 +302,36 @@ jobs: cd build ctest -L main --verbose --timeout 900 + ubuntu-latest-llguidance: + runs-on: ubuntu-latest + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential + + - name: Build + id: cmake_build + run: | + mkdir build + cd build + cmake .. \ + -DLLAMA_FATAL_WARNINGS=ON \ + -DLLAMA_LLGUIDANCE=ON + cmake --build . --config Release -j $(nproc) + + - name: Test + id: cmake_test + run: | + cd build + ctest -L main --verbose --timeout 900 + ubuntu-latest-cmake-rpc: runs-on: ubuntu-latest diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c62d1788..74b48d24d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -80,6 +80,7 @@ option(LLAMA_BUILD_SERVER "llama: build server example" ${LLAMA_STANDALONE}) # 3rd party libs option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF) +option(LLAMA_LLGUIDANCE "llama-common: include LLGuidance library for structured output in common utils" OFF) # Required for relocatable CMake package include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 72f0915c1..e61015d2a 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -65,6 +65,7 @@ add_library(${TARGET} STATIC console.h json-schema-to-grammar.cpp json.hpp + llguidance.cpp log.cpp log.h minja.hpp @@ -91,6 +92,33 @@ if (LLAMA_CURL) set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} ${CURL_LIBRARY}) endif () +if (LLAMA_LLGUIDANCE) + include(ExternalProject) + set(LLGUIDANCE_SRC ${CMAKE_BINARY_DIR}/llguidance/source) + set(LLGUIDANCE_PATH ${LLGUIDANCE_SRC}/target/release) + ExternalProject_Add(llguidance_ext + GIT_REPOSITORY https://github.com/guidance-ai/llguidance + # v0.6.12: + GIT_TAG ced1c9023d47ec194fa977932d35ce65c2ebfc09 + PREFIX ${CMAKE_BINARY_DIR}/llguidance + SOURCE_DIR ${LLGUIDANCE_SRC} + BUILD_IN_SOURCE TRUE + CONFIGURE_COMMAND "" + BUILD_COMMAND cargo build --release + INSTALL_COMMAND "" + BUILD_BYPRODUCTS ${LLGUIDANCE_PATH}/libllguidance.a ${LLGUIDANCE_PATH}/llguidance.h + UPDATE_COMMAND "" + ) + target_compile_definitions(${TARGET} PUBLIC LLAMA_USE_LLGUIDANCE) + + add_library(llguidance STATIC IMPORTED) + set_target_properties(llguidance PROPERTIES IMPORTED_LOCATION ${LLGUIDANCE_PATH}/libllguidance.a) + add_dependencies(llguidance llguidance_ext) + + target_include_directories(${TARGET} PRIVATE ${LLGUIDANCE_PATH}) + set(LLAMA_COMMON_EXTRA_LIBS ${LLAMA_COMMON_EXTRA_LIBS} llguidance) +endif () + target_include_directories(${TARGET} PUBLIC .) target_compile_features (${TARGET} PUBLIC cxx_std_17) target_link_libraries (${TARGET} PRIVATE ${LLAMA_COMMON_EXTRA_LIBS} PUBLIC llama Threads::Threads) diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 1f47e313e..3ebcc3d9f 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -991,7 +991,14 @@ public: } }; -std::string json_schema_to_grammar(const json & schema) { +std::string json_schema_to_grammar(const json & schema, bool force_gbnf) { +#ifdef LLAMA_USE_LLGUIDANCE + if (!force_gbnf) { + return "%llguidance {}\nstart: %json " + schema.dump(); + } +#else + (void)force_gbnf; +#endif // LLAMA_USE_LLGUIDANCE return build_grammar([&](const common_grammar_builder & callbacks) { auto copy = schema; callbacks.resolve_refs(copy); diff --git a/common/json-schema-to-grammar.h b/common/json-schema-to-grammar.h index ba4112cb9..62a3b0a44 100644 --- a/common/json-schema-to-grammar.h +++ b/common/json-schema-to-grammar.h @@ -5,7 +5,8 @@ #define JSON_ASSERT GGML_ASSERT #include "json.hpp" -std::string json_schema_to_grammar(const nlohmann::ordered_json & schema); +std::string json_schema_to_grammar(const nlohmann::ordered_json & schema, + bool force_gbnf = false); struct common_grammar_builder { std::function add_rule; diff --git a/common/llguidance.cpp b/common/llguidance.cpp new file mode 100644 index 000000000..7aa8ddd80 --- /dev/null +++ b/common/llguidance.cpp @@ -0,0 +1,270 @@ +#include "sampling.h" +#include "log.h" + +#ifdef LLAMA_USE_LLGUIDANCE + +# include "llguidance.h" +# include + +struct llama_sampler_llg { + const llama_vocab * vocab; + std::string grammar_kind; + std::string grammar_data; + LlgTokenizer * tokenizer; + LlgConstraint * grammar; + LlgMaskResult llg_res; + bool has_llg_res; +}; + +static LlgConstraint * llama_sampler_llg_new(LlgTokenizer * tokenizer, const char * grammar_kind, + const char * grammar_data) { + LlgConstraintInit cinit; + llg_constraint_init_set_defaults(&cinit, tokenizer); + const char * log_level = getenv("LLGUIDANCE_LOG_LEVEL"); + if (log_level && *log_level) { + cinit.log_stderr_level = atoi(log_level); + } + auto c = llg_new_constraint_any(&cinit, grammar_kind, grammar_data); + if (llg_get_error(c)) { + LOG_ERR("llg error: %s\n", llg_get_error(c)); + llg_free_constraint(c); + return nullptr; + } + return c; +} + +static const char * llama_sampler_llg_name(const llama_sampler * /*smpl*/) { + return "llguidance"; +} + +static void llama_sampler_llg_accept_impl(llama_sampler * smpl, llama_token token) { + auto * ctx = (llama_sampler_llg *) smpl->ctx; + if (ctx->grammar) { + LlgCommitResult res; + llg_commit_token(ctx->grammar, token, &res); + ctx->has_llg_res = false; + } +} + +static void llama_sampler_llg_apply(llama_sampler * smpl, llama_token_data_array * cur_p) { + auto * ctx = (llama_sampler_llg *) smpl->ctx; + if (ctx->grammar) { + if (!ctx->has_llg_res) { + if (llg_compute_mask(ctx->grammar, &ctx->llg_res) == 0) { + ctx->has_llg_res = true; + } else { + LOG_ERR("llg error: %s\n", llg_get_error(ctx->grammar)); + llg_free_constraint(ctx->grammar); + ctx->grammar = nullptr; + } + } + if (ctx->has_llg_res) { + if (ctx->llg_res.is_stop) { + for (size_t i = 0; i < cur_p->size; ++i) { + if (!llama_vocab_is_eog(ctx->vocab, cur_p->data[i].id)) { + cur_p->data[i].logit = -INFINITY; + } + } + } else { + const uint32_t * mask = ctx->llg_res.sample_mask; + for (size_t i = 0; i < cur_p->size; ++i) { + auto token = cur_p->data[i].id; + if ((mask[token / 32] & (1 << (token % 32))) == 0) { + cur_p->data[i].logit = -INFINITY; + } + } + } + } + } +} + +static void llama_sampler_llg_reset(llama_sampler * smpl) { + auto * ctx = (llama_sampler_llg *) smpl->ctx; + if (!ctx->grammar) { + return; + } + + auto * grammar_new = llama_sampler_llg_new(ctx->tokenizer, ctx->grammar_kind.c_str(), ctx->grammar_data.c_str()); + llg_free_constraint(ctx->grammar); + ctx->grammar = grammar_new; + ctx->has_llg_res = false; +} + +static llama_sampler * llama_sampler_llg_clone(const llama_sampler * smpl) { + const auto * ctx = (const llama_sampler_llg *) smpl->ctx; + + auto * result = llama_sampler_init_llg(ctx->vocab, nullptr, nullptr); + + // copy the state + { + auto * result_ctx = (llama_sampler_llg *) result->ctx; + + if (ctx->grammar) { + result_ctx->grammar_kind = ctx->grammar_kind; + result_ctx->grammar_data = ctx->grammar_data; + result_ctx->grammar = llg_clone_constraint(ctx->grammar); + result_ctx->tokenizer = llg_clone_tokenizer(ctx->tokenizer); + } + } + + return result; +} + +static void llama_sampler_llg_free(llama_sampler * smpl) { + const auto * ctx = (llama_sampler_llg *) smpl->ctx; + + if (ctx->grammar) { + llg_free_constraint(ctx->grammar); + llg_free_tokenizer(ctx->tokenizer); + } + + delete ctx; +} + +static llama_sampler_i llama_sampler_llg_i = { + /* .name = */ llama_sampler_llg_name, + /* .accept = */ llama_sampler_llg_accept_impl, + /* .apply = */ llama_sampler_llg_apply, + /* .reset = */ llama_sampler_llg_reset, + /* .clone = */ llama_sampler_llg_clone, + /* .free = */ llama_sampler_llg_free, +}; + +static size_t llama_sampler_llg_tokenize_fn(const void * user_data, const uint8_t * bytes, size_t bytes_len, + uint32_t * output_tokens, size_t output_tokens_len) { + const llama_vocab * vocab = (const llama_vocab *) user_data; + int r = 0; + try { + r = llama_tokenize(vocab, (const char *) bytes, bytes_len, (int32_t *) output_tokens, output_tokens_len, false, + true); + } catch (const std::exception & e) { + GGML_ABORT("llama_tokenize failed: %s\n", e.what()); + } + if (r < 0) { + return -r; + } + return r; +} + +static LlgTokenizer * llama_sampler_llg_new_tokenizer(const llama_vocab * vocab) { + // TODO store the tokenizer in the vocab somehow + static const llama_vocab * vocab_cache; + static LlgTokenizer * tokenizer_cache; + + if (vocab_cache == vocab) { + return llg_clone_tokenizer(tokenizer_cache); + } + + auto tok_eos = llama_vocab_eot(vocab); + if (tok_eos == LLAMA_TOKEN_NULL) { + tok_eos = llama_vocab_eos(vocab); + } + + size_t vocab_size = llama_vocab_n_tokens(vocab); + + auto token_lens = new uint32_t[vocab_size]; + // we typically have ~7 bytes per token; let's go on the safe side here + auto token_bytes_size = vocab_size * 16 + 1024 * 1024; + auto token_bytes = new uint8_t[token_bytes_size]; + + size_t offset = 0; + for (size_t i = 0; i < vocab_size; i++) { + size_t max_token = 1024; + if (token_bytes_size - offset < max_token) { + GGML_ABORT("token_bytes buffer too small\n"); + } + + llama_token token = i; + auto dp = (char *) token_bytes + offset; + auto size = llama_detokenize(vocab, &token, 1, dp, max_token, false, false); + if (size < 0) { + GGML_ABORT("llama_detokenize failed\n"); + } + if (size == 0) { + size = llama_detokenize(vocab, &token, 1, dp + 1, max_token - 1, false, true); + if (size < 0) { + GGML_ABORT("llama_detokenize failed\n"); + } + if (size != 0) { + *dp = '\xff'; // special token prefix marker + size += 1; + } + } + + token_lens[i] = size; + offset += size; + } + + LlgTokenizerInit tinit = { + /* .vocab_size = */ (uint32_t) vocab_size, + /* .tok_eos = */ (uint32_t) tok_eos, + /* .token_lens = */ token_lens, + /* .token_bytes = */ token_bytes, + /* .tokenizer_json = */ nullptr, + /* .tokenize_assumes_string = */ true, + /* .tokenize_fn = */ llama_sampler_llg_tokenize_fn, + /* .use_approximate_greedy_tokenize_fn = */ false, + /* .tokenize_user_data = */ vocab, + }; + + char error_buffer[1024]; + LlgTokenizer * tokenizer = llg_new_tokenizer(&tinit, error_buffer, sizeof(error_buffer)); + + delete[] token_bytes; + delete[] token_lens; + + if (tokenizer == nullptr) { + LOG_ERR("llg tokenizer error: %s\n", error_buffer); + return tokenizer; + } + + if (tokenizer_cache) { + llg_free_tokenizer(tokenizer_cache); + } + vocab_cache = vocab; + tokenizer_cache = tokenizer; + + return llg_clone_tokenizer(tokenizer_cache); +} + +llama_sampler * llama_sampler_init_llg(const llama_vocab * vocab, const char * grammar_kind, + const char * grammar_data) { + auto * ctx = new llama_sampler_llg; + + if (grammar_kind != nullptr && grammar_kind[0] != '\0') { + auto tokenizer = llama_sampler_llg_new_tokenizer(vocab); + *ctx = { + /* .vocab = */ vocab, + /* .grammar_kind = */ grammar_kind, + /* .grammar_data = */ grammar_data, + /* .tokenizer = */ tokenizer, + /* .grammar = */ llama_sampler_llg_new(tokenizer, grammar_kind, grammar_data), + /* .llg_res = */ {}, + /* .has_llg_res = */ false, + }; + } else { + *ctx = { + /* .vocab = */ vocab, + /* .grammar_kind = */ {}, + /* .grammar_data = */ {}, + /* .tokenizer = */ nullptr, + /* .grammar = */ nullptr, + /* .llg_res = */ {}, + /* .has_llg_res = */ false, + }; + } + + return new llama_sampler{ + /* .iface = */ &llama_sampler_llg_i, + /* .ctx = */ ctx, + }; +} + +#else + +llama_sampler * llama_sampler_init_llg(const llama_vocab *, const char *, const char *) { + LOG_WRN("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); + return nullptr; +} + +#endif // LLAMA_USE_LLGUIDANCE diff --git a/common/sampling.cpp b/common/sampling.cpp index bc7e49fdb..e4b21ca10 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -156,13 +156,25 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co for (const auto & str : params.grammar_trigger_words) { trigger_words.push_back(str.word.c_str()); } + + struct llama_sampler * grmr; + if (params.grammar.compare(0, 11, "%llguidance") == 0) { +#ifdef LLAMA_USE_LLGUIDANCE + grmr = llama_sampler_init_llg(vocab, "lark", params.grammar.c_str()); +#else + GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); +#endif // LLAMA_USE_LLGUIDANCE + } else { + grmr = params.grammar_lazy + ? llama_sampler_init_grammar_lazy(vocab, params.grammar.c_str(), "root", + trigger_words.data(), trigger_words.size(), + params.grammar_trigger_tokens.data(), params.grammar_trigger_tokens.size()) + : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"); + } + auto * result = new common_sampler { /* .params = */ params, - /* .grmr = */ params.grammar_lazy - ? llama_sampler_init_grammar_lazy(vocab, params.grammar.c_str(), "root", - trigger_words.data(), trigger_words.size(), - params.grammar_trigger_tokens.data(), params.grammar_trigger_tokens.size()) - : llama_sampler_init_grammar(vocab, params.grammar.c_str(), "root"), + /* .grmr = */ grmr, /* .chain = */ llama_sampler_chain_init(lparams), /* .prev = */ ring_buffer(std::max(32, params.n_prev)), /* .cur = */ {}, diff --git a/common/sampling.h b/common/sampling.h index 348911b18..2064421db 100644 --- a/common/sampling.h +++ b/common/sampling.h @@ -102,3 +102,6 @@ std::string common_sampler_type_to_str(enum common_sampler_type cnstr); std::vector common_sampler_types_from_names(const std::vector & names, bool allow_alt_names); std::vector common_sampler_types_from_chars(const std::string & chars); + +llama_sampler * llama_sampler_init_llg(const llama_vocab * vocab, + const char * grammar_kind, const char * grammar_data); diff --git a/docs/llguidance.md b/docs/llguidance.md new file mode 100644 index 000000000..792d20704 --- /dev/null +++ b/docs/llguidance.md @@ -0,0 +1,51 @@ +# LLGuidance Support in llama.cpp + +[LLGuidance](https://github.com/guidance-ai/llguidance) is a library for constrained decoding (also called constrained sampling or structured outputs) for Large Language Models (LLMs). Initially developed as the backend for the [Guidance](https://github.com/guidance-ai/guidance) library, it can also be used independently. + +LLGuidance supports JSON Schemas and arbitrary context-free grammars (CFGs) written in a [variant](https://github.com/guidance-ai/llguidance/blob/main/docs/syntax.md) of Lark syntax. It is [very fast](https://github.com/guidance-ai/jsonschemabench/tree/main/maskbench) and has [excellent](https://github.com/guidance-ai/llguidance/blob/main/docs/json_schema.md) JSON Schema coverage but requires the Rust compiler, which complicates the llama.cpp build process. + +## Building + +To enable LLGuidance support, build llama.cpp with the `LLAMA_LLGUIDANCE` option: + +```sh +cmake -B build -DLLAMA_LLGUIDANCE=ON +make -C build -j +``` + +This requires the Rust compiler and the `cargo` tool to be [installed](https://www.rust-lang.org/tools/install). + +## Interface + +There are no new command-line arguments or modifications to `common_params`. When enabled, grammars starting with `%llguidance` are passed to LLGuidance instead of the [current](../grammars/README.md) llama.cpp grammars. Additionally, JSON Schema requests (e.g., using the `-j` argument in `llama-cli`) are also passed to LLGuidance. + +For your existing GBNF grammars, you can use [gbnf_to_lark.py script](https://github.com/guidance-ai/llguidance/blob/main/scripts/gbnf_to_lark.py) to convert them to LLGuidance Lark-like format. + +## Performance + +Computing a "token mask" (i.e., the set of allowed tokens) for a llama3 tokenizer with 128k tokens takes, on average, 50μs of single-core CPU time for the [JSON Schema Bench](https://github.com/guidance-ai/jsonschemabench). The p99 time is 0.5ms, and the p100 time is 20ms. These results are due to the lexer/parser split and several [optimizations](https://github.com/guidance-ai/llguidance/blob/main/docs/optimizations.md). + +## JSON Schema + +LLGuidance adheres closely to the JSON Schema specification. For example: + +- `additionalProperties` defaults to `true`, unlike current grammars, though you can set `"additionalProperties": false` if needed. +- any whitespace is allowed. +- The definition order in the `"properties": {}` object is maintained, regardless of whether properties are required (current grammars always puts required properties first). + +Unsupported schemas result in an error message—no keywords are silently ignored. + +## Why Not Reuse GBNF Format? + +GBNF lacks the concept of a lexer. + +Most programming languages, including JSON, use a two-step process: a lexer (built with regular expressions) converts a byte stream into lexemes, which are then processed by a CFG parser. This approach is faster because lexers are cheaper to evaluate, and there is ~10x fewer lexemes than bytes. +LLM tokens often align with lexemes, so the parser is engaged in under 0.5% of tokens, with the lexer handling the rest. + +However, the user has to provide the distinction between lexemes and CFG symbols. In [Lark](https://github.com/lark-parser/lark), lexeme names are uppercase, while CFG symbols are lowercase. +The [gbnf_to_lark.py script](https://github.com/guidance-ai/llguidance/blob/main/scripts/gbnf_to_lark.py) can often take care of this automatically. +See [LLGuidance syntax docs](https://github.com/guidance-ai/llguidance/blob/main/docs/syntax.md#terminals-vs-rules) for more details. + +## Error Handling + +Errors are currently printed to `stderr`, and generation continues. Improved error handling may be added in the future. diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 40f83ff0d..7a158d602 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -86,6 +86,9 @@ llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf) llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf) +if (LLAMA_LLGUIDANCE) + llama_target_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf) +endif () if (NOT WIN32) # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 288e08f51..890608648 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -129,7 +129,7 @@ static void test_grammar(const std::string & test_desc, const std::string & gram test(test_desc + ". Grammar: " + grammar_str, grammar_str, passing_strings, failing_strings); } static void test_schema(const std::string & test_desc, const std::string & schema_str, const std::vector & passing_strings, const std::vector & failing_strings) { - test(test_desc + ". Schema: " + schema_str, json_schema_to_grammar(json::parse(schema_str)), passing_strings, failing_strings); + test(test_desc + ". Schema: " + schema_str, json_schema_to_grammar(json::parse(schema_str), true), passing_strings, failing_strings); } static void test_simple_grammar() { diff --git a/tests/test-grammar-llguidance.cpp b/tests/test-grammar-llguidance.cpp new file mode 100644 index 000000000..8b696006b --- /dev/null +++ b/tests/test-grammar-llguidance.cpp @@ -0,0 +1,1140 @@ +#ifdef NDEBUG +# undef NDEBUG +#endif + +#include "unicode.h" +#include "sampling.h" + +#include +#include +#include + +static const llama_vocab * vocab; + +static bool match_string(const std::string & input, llama_sampler * grammar) { + llama_sampler_reset(grammar); + auto tokens = common_tokenize(vocab, input, false, false); + + auto n_vocab = llama_vocab_n_tokens(vocab); + + std::vector cur; + cur.reserve(n_vocab); + for (llama_token token_id = 0; token_id < (llama_token) n_vocab; token_id++) { + cur.emplace_back(llama_token_data{ token_id, 0.0f, 0.0f }); + } + auto tok_arr = llama_token_data_array{ cur.data(), cur.size(), -1, false }; + + for (const auto token : tokens) { + for (llama_token token_id = 0; token_id < (llama_token) n_vocab; token_id++) { + cur[token_id].logit = 0.0f; + } + llama_sampler_apply(grammar, &tok_arr); + if (cur[token].logit < 0.0f) { + return false; + } + llama_sampler_accept(grammar, token); + } + + // do we allow EOS at the end? if so the grammar is accepting + + auto tok_eos = llama_vocab_eot(vocab); + if (tok_eos == LLAMA_TOKEN_NULL) { + tok_eos = llama_vocab_eos(vocab); + } + + cur[tok_eos].logit = 0.0f; + llama_sampler_apply(grammar, &tok_arr); + + return cur[tok_eos].logit >= 0.0f; +} + +static void test(const std::string & test_desc, const std::string & grammar_str, + const std::vector & passing_strings, const std::vector & failing_strings) { + fprintf(stderr, "⚫ Testing %s\n%s\n", test_desc.c_str(), grammar_str.c_str()); + fflush(stderr); + + auto * grammar = llama_sampler_init_llg(vocab, "lark", grammar_str.c_str()); + + fprintf(stderr, " 🔵 Valid strings:\n"); + + // Passing strings + for (const auto & test_string : passing_strings) { + fprintf(stderr, " \"%s\" ", test_string.c_str()); + fflush(stderr); + + bool matched = match_string(test_string, grammar); + + if (!matched) { + fprintf(stderr, "❌ (failed to match)\n"); + + // DEBUG: Write strings to files so that we can analyze more easily with gbnf-validator program to see exactly where things failed. + // DEBUG: Write the grammar_str to test-grammar-integration.grammar.gbnf + FILE * grammar_file = fopen("test-grammar-integration.grammar.gbnf", "w"); + if (grammar_file) { + fprintf(grammar_file, "%s", grammar_str.c_str()); + fclose(grammar_file); + } + + // DEBUG: Write the test string to test-grammar-integration.string.txt + FILE * string_file = fopen("test-grammar-integration.string.txt", "w"); + if (string_file) { + fprintf(string_file, "%s", test_string.c_str()); + fclose(string_file); + } + + fprintf(stderr, + "\n NOTE: Debug grammar file generated. To analyze this failure in detail, run the following " + "command: ./llama-gbnf-validator test-grammar-integration.grammar.gbnf " + "test-grammar-integration.string.txt\n\n"); + } else { + fprintf(stdout, "✅︎\n"); + } + + assert(matched); + } + + fprintf(stderr, " 🟠 Invalid strings:\n"); + + // Failing strings + for (const auto & test_string : failing_strings) { + fprintf(stderr, " \"%s\" ", test_string.c_str()); + fflush(stderr); + + bool matched = match_string(test_string, grammar); + + if (matched) { + fprintf(stderr, "❌ (incorrectly matched)\n"); + } else { + fprintf(stdout, "✅︎\n"); + } + assert(!matched); + } + + llama_sampler_free(grammar); +} + +static void test_grammar(const std::string & test_desc, const std::string & grammar_str, + const std::vector & passing_strings, + const std::vector & failing_strings) { + test(test_desc + ". Grammar: " + grammar_str, grammar_str, passing_strings, failing_strings); +} + +static void test_schema(const std::string & test_desc, const std::string & schema_str, + const std::vector & passing_strings, + const std::vector & failing_strings) { + test(test_desc + ". Schema: " + schema_str, "%llguidance {}\nstart: %json " + schema_str, passing_strings, + failing_strings); +} + +static void test_simple_grammar() { + test_schema("min 0", + R"""({ + "type": "integer", + "minimum": 0 + })""", + // Passing strings + { + "0", + "10", + "12", + "10000", + }, + // Failing strings + { + "-1", + "-10", + "-10000", + "-100000000000000000000000000000000", + // "100000000000000000000000000000000", + "00", + "01", + "-0", + }); + test_schema("min 2", + // Schema + R"""({ + "type": "integer", + "minimum": 2 + })""", + // Passing strings + { + "2", + "3", + "4", + "10", + "20", + "1234567890000000", + }, + // Failing strings + { + "0", "1", "-1", "-100", "0", "1", "01", "02", + // "12345678900000000", + }); + test_schema("min 456", + R"""({ + "type": "integer", + "minimum": 456 + })""", + // Passing strings + { + "456", + "4560", + "457", + "460", + "500", + }, + // Failing strings + { + "455", + "356", + "50", + "050", + "-1", + "-456", + }); + test_schema("min -123", + R"""({ + "type": "integer", + "minimum": -123 + })""", + // Passing strings + { + "-123", + "-122", + "-11", + "-1", + "0", + "1", + "123", + "1234", + "2345", + }, + // Failing strings + { + "-1234", + "-124", + }); + + test_schema("max 9999", + // Schema + R"""({ + "type": "integer", + "maximum": 9999 + })""", + // Passing strings + { + "-99999", + "0", + "9999", + }, + // Failing strings + { + "10000", + "99991", + }); + test_schema("max -9999", + // Schema + R"""({ + "type": "integer", + "maximum": -9999 + })""", + // Passing strings + { + "-10000", + "-9999", + }, + // Failing strings + { + "-9998", + "0", + "9999", + }); + test_schema("min 5 max 30", + // Schema + R"""({ + "type": "integer", + "minimum": 5, + "maximum": 30 + })""", + // Passing strings + { + "5", + "10", + "30", + }, + // Failing strings + { + "05", + "4", + "-1", + "31", + "123", + "0123", + }); + test_schema("min -1 max 1", + R"""({ + "type": "integer", + "minimum": -1, + "maximum": 1 + })""", + // Passing strings + { + "-1", + "0", + "1", + }, + // Failing strings + { + "-11", + "-10", + "-2", + "2", + "10", + "11", + }); + test_schema("min -123 max 42", + R"""({ + "type": "integer", + "minimum": -123, + "maximum": 42 + })""", + // Passing strings + { + "-123", + "-122", + "-13", + "-11", + "-2", + "-1", + "0", + "1", + "5", + "10", + "39", + "40", + "42", + }, + // Failing strings + { + "-0123", + "-124", + "-1123", + "-200", + "43", + "123", + "0123", + }); + test_schema("exclusive min / max", + // Schema + R"""({ + "type": "integer", + "exclusiveMinimum": 0, + "exclusiveMaximum": 10000 + })""", + // Passing strings + { + "1", + "9999", + }, + // Failing strings + { + "0", + "01", + "10000", + "99999", + }); + + // Test case for a simple grammar + test_grammar("simple grammar", + R"""( + start: expr + expr: term ("+" term)* + term: number + number: /[0-9]+/ )""", + // Passing strings + { + "42", + "1+2+3+4+5", + "123+456", + }, + // Failing strings + { + "+", + "/ 3", + "1+2+3+4+5+", + "12a45", + }); +} + +static void test_complex_grammar() { + // Test case for a more complex grammar, with both failure strings and success strings + test_grammar("medium complexity grammar", + // Grammar + R"""( + start: expression + expression: term ws (("+"|"-") ws term)* + term: factor ws (("*"|"/") ws factor)* + factor: number | variable | "(" expression ")" | function-call + number: /[0-9]+/ + variable: /[a-zA-Z_][a-zA-Z0-9_]*/ + function-call: variable ws "(" (expression ("," ws expression)*)? ")" + ws: /[ \t\n\r]?/ )""", + // Passing strings + { "42", + "1*2*3*4*5", + "x", + "x+10", + "x1+y2", + "(a+b)*(c-d)", + "func()", + "func(x,y+2)", + "a*(b+c)-d/e", + "f(g(x),h(y,z))", + "x + 10", + "x1 + y2", + "(a + b) * (c - d)", + "func()", + "func(x, y + 2)", + "a * (b + c) - d / e", + "f(g(x), h(y, z))", + "123+456", + "123*456*789-123/456+789*123", + "123+456*789-123/456+789*123-456/789+123*456-789/123+456*789-123/456+789*123-456" }, + // Failing strings + { + "+", + "/ 3x", + "x + + y", + "a * / b", + "func(,)", + "func(x y)", + "(a + b", + "x + y)", + "a + b * (c - d", + "42 +", + "x +", + "x + 10 +", + "(a + b) * (c - d", + "func(", + "func(x, y + 2", + "a * (b + c) - d /", + "f(g(x), h(y, z)", + "123+456*789-123/456+789*123-456/789+123*456-789/123+456*789-123/456+789*123-456/", + }); +} + +static void test_special_chars() { + // A collection of tests to exercise special characters such as "." + test_grammar("special characters", + // Grammar + R"""( + start: /.../ "abc" /.../ + )""", + // Passing strings + { "abcabcabc", "aaaabcccc", + // NOTE: Also ensures that multi-byte characters still count as a single character + "🔵🟠✅abc❌🟠🔵" }, + // Failing strings + { "aaabcccc", "aaaaabcccc", "aaaabccc", "aaaabccccc", "🔵🟠✅❌abc❌✅🟠🔵", "🔵🟠abc🟠🔵" }); +} + +static void test_quantifiers() { + // A collection of tests to exercise * + and ? quantifiers + + test_grammar( + "* quantifier", + // Grammar + R"""(start: "a"*)""", + // Passing strings + { "", "a", "aaaaa", "aaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, + // Failing strings + { "b", "ab", "aab", "ba", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" }); + test_grammar( + "+ quantifier", + // Grammar + R"""(start: "a"+)""", + // Passing strings + { "a", "aaaaa", "aaaaaaaaaaaaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, + // Failing strings + { "", "b", "ab", "aab", "ba", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab" }); + test_grammar("? quantifier", + // Grammar + R"""(start: "a"?)""", + // Passing strings + { "", "a" }, + // Failing strings + { + "b", + "ab", + "aa", + "ba", + }); + test_grammar("mixed quantifiers", + // Grammar + R"""( + start: cons+ vowel* cons? (vowel cons)* + vowel: /[aeiouy]/ + cons: /[bcdfghjklmnpqrstvwxyz]/ + )""", + // Passing strings + { + "yes", + "no", + "noyes", + "crwth", + "four", + "bryyyy", + }, + // Failing strings + { + "yess", + "yesno", + "forty", + "catyyy", + }); + test_grammar("simple exact repetition", + // Grammar + R"""( + start: /[ab]{4}/ + )""", + // Passing strings + { + "aaaa", + "bbbb", + "abab", + }, + // Failing strings + { + "a", + "b", + "aaaaa", + }); + test_grammar("simple min repetition", + // Grammar + R"""( + start: /[ab]{4,}/ + )""", + // Passing strings + { + "aaaa", + "aaaaab", + "bbbb", + "ababab", + }, + // Failing strings + { + "", + "aba", + }); + test_grammar("simple max repetition", + // Grammar + R"""( + start: /[ab]{0,4}/ + )""", + // Passing strings + { + "", + "a", + "aa", + "aaa", + "aaab", + }, + // Failing strings + { + "aaaaa", + }); + // test_grammar("min / max repetition", + // // Grammar + // R"""( + // start: ("0x" /[A-F0-9]{2}/ " "?){3,5} + // )""", + // // Passing strings + // { + // "0xFF 0x12 0xAB", + // "0xFF 0x12 0xAB 0x00 0x00", + // }, + // // Failing strings + // { + // "", + // "0xFF", + // "0xFF 0x12", + // "0xFF 0x12 0xAB 0x00 0x00 0x00", + // }); +} + +static void test_json_schema() { + // Note that this is similar to the regular grammar tests, + // but we convert each json schema to a grammar before parsing. + // Otherwise, this test structure is the same. + + test_schema("empty schema (object)", + // Schema + R"""( + {"type":"object"} + )""", + // Passing strings + { + R"""({})""", + R"""({"foo": "bar"})""", + }, + // Failing strings + { + "", + "[]", + "null", + R"""("")""", + "true", + }); + + test_schema( + "exotic formats (list)", + // Schema + R"""({ + "items": [ + { "format": "date" }, + { "format": "uuid" }, + { "format": "time" }, + { "format": "date-time" } + ] + })""", + // Passing strings + { + // "{}", // NOTE: This string passes for this schema on https://www.jsonschemavalidator.net/ -- should it? + // "[]", // NOTE: This string passes for this schema on https://www.jsonschemavalidator.net/ -- should it? + R"""(["2012-04-23", "12345678-1234-1234-1234-1234567890ab", "18:25:43.511Z", "2012-04-23T18:25:43.511Z"])""", + //R"""(["2012-04-23","12345678-1234-1234-1234-1234567890ab"])""", // NOTE: This string passes for this schema on https://www.jsonschemavalidator.net/ -- should it? + //R"""({"foo": "bar"})""", // NOTE: This string passes for this schema on https://www.jsonschemavalidator.net/ -- should it? + }, + // Failing strings + { + R"""(["foo", "bar"])""", + R"""(["12345678-1234-1234-1234-1234567890ab"])""", + }); + + test_schema("string", + // Schema + R"""({ + "type": "string" + })""", + // Passing strings + { + R"""("foo")""", + R"""("bar")""", + R"""("")""", + }, + // Failing strings + { + R"""({})""", + R"""("foo": "bar")""", + }); + + test_schema("string w/ min length 1", + // Schema + R"""({ + "type": "string", + "minLength": 1 + })""", + // Passing strings + { + R"""("foo")""", + R"""("bar")""", + }, + // Failing strings + { + R"""("")""", + R"""({})""", + R"""("foo": "bar")""", + }); + + test_schema("string w/ min length 3", + // Schema + R"""({ + "type": "string", + "minLength": 3 + })""", + // Passing strings + { + R"""("foo")""", + R"""("bar")""", + R"""("foobar")""", + }, + // Failing strings + { + R"""("")""", + R"""("f")""", + R"""("fo")""", + }); + + test_schema("string w/ max length", + // Schema + R"""({ + "type": "string", + "maxLength": 3 + })""", + // Passing strings + { + R"""("foo")""", + R"""("bar")""", + R"""("")""", + R"""("f")""", + R"""("fo")""", + }, + // Failing strings + { + R"""("foobar")""", + }); + + test_schema("string w/ min & max length", + // Schema + R"""({ + "type": "string", + "minLength": 1, + "maxLength": 4 + })""", + // Passing strings + { + R"""("foo")""", + R"""("bar")""", + R"""("f")""", + R"""("barf")""", + }, + // Failing strings + { + R"""("")""", + R"""("barfo")""", + R"""("foobar")""", + }); + + test_schema("boolean", + // Schema + R"""({ + "type": "boolean" + })""", + // Passing strings + { + "true", + "false", + }, + // Failing strings + { + R"""("")""", + R"""("true")""", + R"""(True)""", + R"""(FALSE)""", + }); + + test_schema("integer", + // Schema + R"""({ + "type": "integer" + })""", + // Passing strings + { + R"""(0)""", + R"""(12345)""", + R"""(1234567890123456)""", + }, + // Failing strings + { + R"""()""", + R"""(01)""", + R"""(007)""", + R"""(12345678901234567 )""", + }); + + test_schema("string const", + // Schema + R"""({ + "const": "foo" + })""", + // Passing strings + { + R"""("foo")""", + }, + // Failing strings + { + R"""(foo)""", + R"""("bar")""", + }); + + test_schema("non-string const", + // Schema + R"""({ + "const": true + })""", + // Passing strings + { + R"""(true)""", + }, + // Failing strings + { + R"""()""", + R"""(foo)""", + R"""("true")""", + }); + + test_schema("non-string const", + // Schema + R"""({ + "enum": ["red", "amber", "green", null, 42, ["foo"]] + })""", + // Passing strings + { + R"""("red")""", + R"""(null)""", + R"""(42)""", + R"""(["foo"])""", + }, + // Failing strings + { + R"""()""", + R"""(420)""", + R"""(true)""", + R"""(foo)""", + }); + + test_schema("simple pattern", + // Schema + R"""({ + "pattern": "^[a-zA-Z0-9_-]*$" + })""", + // Passing strings + { + R"""("")""", + R"""("He_llo-12")""", + }, + // Failing strings + { + R"""("!")""", + R"""("Hello World")""", + }); + + test_schema("pattern with escapes", + // Schema + R"""({ + "pattern": "^a\\^\\$\\.\\[\\]\\(\\)\\|\\{\\}\\*\\+\\?b$" + })""", + // Passing strings + { + R"""("a^$.[]()|{}*+?b")""", + }, + // Failing strings + { + R"""("ab")""", + }); + + test_schema("", + // Schema + R"""( + { + "type": ["array", "null"], + "items": { "type": "string" } + } + )""", + // Passing strings + { + "null", + "[]", + "[\"123\"]", + "[\"foo\", \"bar\"]", + }, + // Failing strings + { + "", + "[123]", + "\"foo\"", + "[\"foo\", 42]", + }); + + test_schema("min+max items", + // Schema + R"""({ + "items": { + "type": ["number", "integer"] + }, + "minItems": 3, + "maxItems": 5 + })""", + // Passing strings + { + R"""([1, 2, 3])""", + R"""([1, 2, 3, 4])""", + R"""([1, 2, 3, 4, 5])""", + // this is in fact correct; keyword do not apply if the type is wrong + R"""(1)""", + }, + // Failing strings + { + R"""([1, 2])""", + R"""([1, 2, 3, 4, 5, 6])""", + }); + + // Properties (from: https://json-schema.org/understanding-json-schema/reference/object#properties) + test_schema("object properties", + // Schema + R"""({ + "type": "object", + "properties": { + "number": { "type": "number" }, + "street_name": { "type": "string" }, + "street_type": { "enum": ["Street", "Avenue", "Boulevard"] } + }, + "additionalProperties": false + })""", + // Passing strings + { + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type":"Avenue"})""", + // "By default, leaving out properties is valid" + R"""({ "street_name": "Pennsylvania" })""", + R"""({ "number": 1600, "street_name": "Pennsylvania" })""", + // "By extension, even an empty object is valid" + R"""({})""", + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type": "Avenue" })""", + }, + // Failing strings + { + // Change datatype from number to string + R"""({ "number": "1600", "street_name": "Pennsylvania", "street_type":"Avenue"})""", + // Reorder properties + R"""({ "street_name": "Pennsylvania", "number": 1600 })""", + // Reorder properties + R"""({ "number": "1600", "street_name": "Pennsylvania", "street_type":"Avenue"})""", + // Additional properties set to false + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type":"Avenue", "direction":"NW"})""", + + }); + + test_schema("additional properties can't override other properties", + R"""({ + "properties": { + "a": {"type": "integer"}, + "b": {"type": "integer"} + }, + "additionalProperties": true + })""", + // Passing strings + { + R"""({"a": 42})""", + R"""({"c": ""})""", + R"""({"a": 42, "c": ""})""", + R"""({"a_": ""})""", + }, + // Failing strings + { + R"""()""", + R"""({"a": ""})""", + R"""({"a": "", "b": ""})""", + }); + + // Properties (from: https://json-schema.org/understanding-json-schema/reference/object#properties) + test_schema("object properties, additionalProperties: true", + // Schema + R"""({ + "type": "object", + "properties": { + "number": { "type": "number" }, + "street_name": { "type": "string" }, + "street_type": { "enum": ["Street", "Avenue", "Boulevard"] } + }, + "additionalProperties": true + })""", + // Passing strings + { + // "By extension, even an empty object is valid" + R"""({})""", + R"""({"number":1600,"street_name":"Pennsylvania","street_type":"Avenue"})""", + // "By default, leaving out properties is valid" + R"""({ "street_name": "Pennsylvania" })""", + R"""({ "number": 1600, "street_name": "Pennsylvania" })""", + // "By default, providing additional properties is valid" + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type":"Avenue", "direction":"NW"})""", + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type": "Avenue" })""", + }, + // Failing strings + { + // Change datatype from number to string + R"""({ "number": "1600", "street_name": "Pennsylvania", "street_type":"Avenue"})""", + // Reorder properties + R"""({ "street_name": "Pennsylvania", "number": 1600, "street_type":"Avenue"})""", + }); + + // Additional properties: false + test_schema( + "required + optional props each in original order", + // Schema + R"""({ + "type": "object", + "properties": { + "number": { "type": "number" }, + "street_name": { "type": "string" }, + "street_type": { "enum": ["Street", "Avenue", "Boulevard"] } + }, + "additionalProperties": false + })""", + // Passing strings + { + R"""({ "street_name": "Pennsylvania" })""", + R"""({ "number": 1600, "street_type":"Avenue"})""", + R"""({ "number": 1600, "street_name": "Pennsylvania" })""", + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type":"Avenue"})""", + // Spaces are permitted around enum values + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type": "Avenue" })""", + }, + // Failing strings + { + // Reorder properties + R"""({ "street_type": "Avenue", "number": 1600 })""", + // Add "direction" + R"""({ "number": 1600, "street_name": "Pennsylvania", "street_type": "Avenue", "direction": "NW" })""", + }); + + test_schema("required + optional props each in original order", + // Schema + R"""({ + "properties": { + "b": {"type": "string"}, + "a": {"type": "string"}, + "d": {"type": "string"}, + "c": {"type": "string"} + }, + "required": ["a", "b"], + "additionalProperties": false + })""", + // Passing strings + { + R"""({"b": "foo", "a": "bar"})""", + R"""({"b":"foo","a":"bar","d":"qux"})""", + R"""({"b":"foo", "a":"bar", "d":"qux", "c":"baz"})""", + }, + // Failing strings + { + R"""({"a": "foo", "b": "bar"})""", + R"""({"b": "bar"})""", + R"""({"a": "foo", "c": "baz"})""", + R"""({"a":"foo", "b":"bar", "c":"baz", "d":"qux"})""", + }); + + // NOTE: Example from https://json-schema.org/learn/getting-started-step-by-step#define-required-properties + test_schema( + "required props", + // Schema + R"""({ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://example.com/product.schema.json", + "title": "Product", + "description": "A product from Acme's catalog", + "type": "object", + "properties": { + "productId": { + "description": "The unique identifier for a product", + "type": "integer" + }, + "productName": { + "description": "Name of the product", + "type": "string" + }, + "price": { + "description": "The price of the product", + "type": "number", + "exclusiveMinimum": 0 + }, + "tags": { + "description": "Tags for the product", + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1, + "DISABLED_uniqueItems": true + }, + "dimensions": { + "type": "object", + "properties": { + "length": { + "type": "number" + }, + "width": { + "type": "number" + }, + "height": { + "type": "number" + } + }, + "required": [ "length", "width", "height" ] + } + }, + "required": [ "productId", "productName", "price" ] + })""", + // Passing strings + { + R"""({"productId": 1, "productName": "A green door", "price": 12.50})""", + R"""({"productId": 1, "productName": "A green door", "price": 12.50, "tags": ["home", "green"]})""", + R"""({"productId": 1, "productName": "A green door", "price": 12.50, "tags": ["home", "green"], "dimensions": {"length": 785, "width": 250.5, "height": -0.359}})""", + }, + // Failing strings + { + R"""({})""", // Missing all required properties + R"""({"productName": "A green door", "price": 12.50, "productId": 1})""", // Out of order properties + // `exclusiveMinimum` is OK for llg + R"""({"productId": 1, "productName": "A green door", "price": -12.50})""", + R"""({"productId": 1, "productName": "A green door"})""", // Missing required property (price) + R"""({"productName": "A green door", "price": 12.50})""", // Missing required property (productId) + R"""({"productId": 1, "productName": "A green door", "price": 12.50, "tags": []})""", // tags is empty, but minItems is 1 + R"""({"productId": 1, "productName": "A green door", "price": 12.50, "dimensions": {"length": 785, "width": 250.5, "height": -0.359}, "tags": ["home", "green"]})""", // Tags and dimensions are out of order + // TODO: The following line should fail, but currently it passes. `uniqueItems` is not supported, as it would likely be too difficult to implement. + // R"""({"productId": 1, "productName": "A green door", "price": 12.50, "tags": ["home", "green", "home"]})""", + }); +} + +int main(int argc, const char ** argv) { + fprintf(stdout, "Running llguidance integration tests...\n"); + + if (argc != 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + const char * vocab_file = argv[1]; + + fprintf(stderr, "reading vocab from: '%s'\n", vocab_file); + + llama_model * model; + llama_context * ctx; + + llama_backend_init(); + + // load the vocab + { + auto mparams = llama_model_default_params(); + + mparams.vocab_only = true; + + model = llama_model_load_from_file(vocab_file, mparams); + + if (model == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, vocab_file); + return 1; + } + + // needed? + auto cparams = llama_context_default_params(); + + ctx = llama_init_from_model(model, cparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, vocab_file); + llama_model_free(model); + return 1; + } + } + + vocab = llama_model_get_vocab(model); + + test_simple_grammar(); + test_complex_grammar(); + test_special_chars(); + test_quantifiers(); + test_json_schema(); + fprintf(stdout, "All tests passed.\n"); + return 0; +} diff --git a/tests/test-json-schema-to-grammar.cpp b/tests/test-json-schema-to-grammar.cpp index 9d2db91f5..f38994c92 100755 --- a/tests/test-json-schema-to-grammar.cpp +++ b/tests/test-json-schema-to-grammar.cpp @@ -1246,7 +1246,7 @@ int main() { test_all("C++", [](const TestCase & tc) { try { - tc.verify(json_schema_to_grammar(nlohmann::ordered_json::parse(tc.schema))); + tc.verify(json_schema_to_grammar(nlohmann::ordered_json::parse(tc.schema), true)); tc.verify_status(SUCCESS); } catch (const std::runtime_error & ex) { fprintf(stderr, "Error: %s\n", ex.what()); From 69804487e0b10f2c5c06316f0ac0eb6ada68433f Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sun, 2 Feb 2025 09:10:15 +0000 Subject: [PATCH 216/279] Fix exotic ci env that lacks ostringstream::str (#11581) --- common/minja.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/minja.hpp b/common/minja.hpp index bcb5a0824..e77eb69d5 100644 --- a/common/minja.hpp +++ b/common/minja.hpp @@ -824,7 +824,7 @@ public: LoopControlType control_type; LoopControlException(const std::string & message, LoopControlType control_type) : std::runtime_error(message), control_type(control_type) {} LoopControlException(LoopControlType control_type) - : std::runtime_error((std::ostringstream() << (control_type == LoopControlType::Continue ? "continue" : "break") << " outside of a loop").str()), + : std::runtime_error((control_type == LoopControlType::Continue ? "continue" : "break") + std::string(" outside of a loop")), control_type(control_type) {} }; From bfcce4d693617ec843d0b2510f6ee16e6bc6720d Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sun, 2 Feb 2025 09:25:38 +0000 Subject: [PATCH 217/279] `tool-call`: support Command R7B (+ return tool_plan "thoughts" in API) (#11585) * `tool-call`: support Command R7B (w/ tool_plan return) * `tool-call`: cleaner preservation of tokens + warn when likely bad chat template override * `tool-call`: test cleanup / handle lazy grammar triggers --- common/chat.cpp | 86 +++++++++- common/chat.hpp | 2 + common/common.h | 3 + examples/server/README.md | 22 ++- examples/server/server.cpp | 52 ++++-- examples/server/utils.hpp | 1 + ...AI-c4ai-command-r7b-12-2024-tool_use.jinja | 156 ++++++++++++++++++ tests/test-chat.cpp | 154 +++++++++++++---- 8 files changed, 420 insertions(+), 56 deletions(-) create mode 100644 models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja diff --git a/common/chat.cpp b/common/chat.cpp index 58db12af9..f87583d85 100644 --- a/common/chat.cpp +++ b/common/chat.cpp @@ -16,6 +16,7 @@ std::string common_chat_format_name(common_chat_format format) { case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2: return "Functionary v3.2"; case COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1: return "Functionary v3.1 Llama 3.1"; case COMMON_CHAT_FORMAT_HERMES_2_PRO: return "Hermes 2 Pro"; + case COMMON_CHAT_FORMAT_COMMAND_R7B: return "Command R7B"; default: throw std::runtime_error("Unknown chat format"); } @@ -317,6 +318,79 @@ static common_chat_msg common_chat_parse_mistral_nemo(const std::string & input) return parse_prefixed_json_tool_call_array(input, "[TOOL_CALLS]"); } +static common_chat_params common_chat_params_init_command_r7b(const common_chat_template & tmpl, const struct common_chat_inputs & inputs) { + common_chat_params data; + data.grammar_lazy = inputs.tool_choice != "required"; + data.grammar = build_grammar([&](const common_grammar_builder & builder) { + auto schemas = json::array(); + foreach_function(inputs.tools, [&](const json & tool) { + const auto & function = tool["function"]; + schemas.push_back({ + {"type", "object"}, + {"properties", { + {"tool_call_id", { + {"type", "string"}, + // Command-R's template expects an integer string. + {"pattern", "^[0-9]{1,10}$"}, + }}, + {"tool_name", { + {"type", "string"}, + {"const", function["name"]}, + }}, + {"parameters", function["parameters"]}, + }}, + {"required", json::array({"tool_call_id", "tool_name", "parameters"})}, + }); + }); + auto schema = json { + {"type", "array"}, + {"items", schemas.size() == 1 ? schemas[0] : json {{"anyOf", schemas}}}, + {"minItems", 1}, + }; + if (!inputs.parallel_tool_calls) { + schema["maxItems"] = 1; + } + builder.add_rule("root", "\"<|START_ACTION|>\" " + builder.add_schema("tool_calls", schema) + " \"<|END_ACTION|>\""); + }, grammar_options); + data.grammar_triggers.push_back({"<|START_ACTION|>", /* .at_start = */ false}); + data.preserved_tokens = { + "<|START_RESPONSE|>", + "<|END_RESPONSE|>", + "<|START_THINKING|>", + "<|END_THINKING|>", + "<|END_ACTION|>", + }; + data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); + data.format = COMMON_CHAT_FORMAT_COMMAND_R7B; + return data; +} +static common_chat_msg common_chat_parse_command_r7b(const std::string & input) { + static std::regex response_regex("<\\|START_RESPONSE\\|>(.*?)<\\|END_RESPONSE\\|>"); + static std::regex thought_action_regex("<\\|START_THINKING\\|>([\\s\\S\\n\\r]*?)<\\|END_THINKING\\|><\\|START_ACTION\\|>([\\s\\S\\n\\r]*?)<\\|END_ACTION\\|>"); + std::smatch match; + + common_chat_msg result; + result.role = "assistant"; + if (std::regex_match(input, match, response_regex)) { + result.content = match[1].str(); + } else if (std::regex_match(input, match, thought_action_regex)) { + result.tool_plan = match[1].str(); + auto actions_str = match[2].str(); + auto actions = json::parse(actions_str); + for (const auto & action : actions) { + result.tool_calls.push_back({ + /* .name = */ action["tool_name"], + /* .arguments = */ action["parameters"].dump(), + /* .id = */ action["tool_call_id"], + }); + } + } else { + LOG_ERR("Failed to parse command_r output"); + result.content = input; + } + return result; +} + static void expect_tool_parameters(const std::string & name, const json & parameters, const std::vector & expected_properties) { if (!parameters.is_object() || !parameters.contains("type") || parameters["type"] != "object" || !parameters.contains("properties") || !parameters.contains("required")) { throw std::runtime_error("Parameters of tool " + name + " must be an object w/ required properties"); @@ -462,6 +536,10 @@ static common_chat_params common_chat_params_init_deepseek_r1(const common_chat_ "\"<|tool▁call▁begin|>function<|tool▁sep|>" + name + "\\n```json\\n\" " + args_rule + " \"```<|tool▁call▁end|>\"")); }); data.grammar_triggers.push_back({"<|tool▁calls▁begin|>", /* .at_start = */ false}); + data.preserved_tokens = { + "<|tool▁sep|>", + "<|tool▁call▁end|>", + }; builder.add_rule("root", "\"<|tool▁calls▁begin|>\" (" + string_join(tool_rules, " | ") + ")" + (inputs.parallel_tool_calls ? "*" : "") + " space"); }, grammar_options); data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); @@ -704,8 +782,7 @@ static common_chat_params common_chat_params_init_hermes_2_pro(const common_chat auto tool_call = "\"\" space " + builder.add_rule("tool_call", string_join(tool_rules, " | ")) + " \"\" space"; builder.add_rule("root", inputs.parallel_tool_calls ? "(" + tool_call + ")+" : tool_call); data.grammar_triggers.push_back({"", /* .at_start = */ false}); - // Not really a trigger but need to print this special token to get a successful parse. - data.grammar_triggers.push_back({"", /* .at_start = */ false}); + data.preserved_tokens = { "" }; }, grammar_options); data.prompt = tmpl.apply(inputs.messages, inputs.tools.empty() ? json() : inputs.tools, inputs.add_generation_prompt); @@ -822,6 +899,9 @@ common_chat_params common_chat_params_init(const common_chat_template & tmpl, co if (src.find("[TOOL_CALLS]") != std::string::npos) { return common_chat_params_init_mistral_nemo(tmpl, inputs); } + if (src.find("<|END_THINKING|><|START_ACTION|>") != std::string::npos) { + return common_chat_params_init_command_r7b(tmpl, inputs); + } return common_chat_params_init_generic(tmpl, inputs); } @@ -855,6 +935,8 @@ common_chat_msg common_chat_parse(const std::string & input, common_chat_format return common_chat_parse_hermes_2_pro(input); case COMMON_CHAT_FORMAT_FIREFUNCTION_V2: return common_chat_parse_firefunction_v2(input); + case COMMON_CHAT_FORMAT_COMMAND_R7B: + return common_chat_parse_command_r7b(input); default: throw std::runtime_error("Unsupported format: " + common_chat_format_name(format)); } diff --git a/common/chat.hpp b/common/chat.hpp index ca165aa13..33e64a430 100644 --- a/common/chat.hpp +++ b/common/chat.hpp @@ -32,6 +32,7 @@ enum common_chat_format { COMMON_CHAT_FORMAT_FUNCTIONARY_V3_2, COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, COMMON_CHAT_FORMAT_HERMES_2_PRO, + COMMON_CHAT_FORMAT_COMMAND_R7B, COMMON_CHAT_FORMAT_COUNT, // Not a format, just the # formats }; @@ -42,6 +43,7 @@ struct common_chat_params { std::string grammar; bool grammar_lazy = false; std::vector grammar_triggers; + std::vector preserved_tokens; std::vector additional_stops; }; diff --git a/common/common.h b/common/common.h index 6c1809277..b208d0c7e 100644 --- a/common/common.h +++ b/common/common.h @@ -4,6 +4,7 @@ #include "llama-cpp.h" +#include #include #include #include @@ -163,6 +164,7 @@ struct common_params_sampling { bool grammar_lazy = false; std::vector grammar_trigger_words; // optional trigger words to trigger lazy grammar std::vector grammar_trigger_tokens; // optional trigger tokens to trigger lazy grammar and print trigger special tokens. + std::set preserved_tokens; std::vector logit_bias; // logit biases to apply @@ -621,6 +623,7 @@ struct common_chat_msg { std::string role; std::string content; std::vector tool_calls; + std::string tool_plan = ""; }; // Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid diff --git a/examples/server/README.md b/examples/server/README.md index 276b43013..e9d0374ad 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -1128,6 +1128,7 @@ curl http://localhost:8080/v1/chat/completions \ - Hermes 2/3, Qwen 2.5 - Mistral Nemo - Firefunction v2 + - Command R7B - DeepSeek R1 (WIP / seems reluctant to call any tools?)
@@ -1202,21 +1203,28 @@ curl http://localhost:8080/v1/chat/completions \ ```shell # Native support: llama-server --jinja -fa -hf bartowski/Qwen2.5-7B-Instruct-GGUF:Q4_K_M - llama-server --jinja -fa -hf bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q4_K_M - llama-server --jinja -fa -hf bartowski/Llama-3.2-3B-Instruct-GGUF:Q6_K + llama-server --jinja -fa -hf bartowski/Mistral-Nemo-Instruct-2407-GGUF:Q6_K_L llama-server --jinja -fa -hf bartowski/functionary-small-v3.2-GGUF:Q4_K_M - llama-server --jinja -fa -hf bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M \ - --chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B ) + llama-server --jinja -fa -hf bartowski/Llama-3.3-70B-Instruct-GGUF:Q4_K_M # Native support requires the right template for these GGUFs: + + llama-server --jinja -fa -hf bartowski/Hermes-2-Pro-Llama-3-8B-GGUF:Q4_K_M \ + --chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-2-Pro-Llama-3-8B tool_use ) + llama-server --jinja -fa -hf bartowski/Hermes-3-Llama-3.1-8B-GGUF:Q4_K_M \ --chat-template-file <( python scripts/get_chat_template.py NousResearch/Hermes-3-Llama-3.1-8B tool_use ) + llama-server --jinja -fa -hf bartowski/firefunction-v2-GGUF -hff firefunction-v2-IQ1_M.gguf \ - --chat-template-file <( python scripts/get_chat_template.py fireworks-ai/firellama-3-firefunction-v2 ) + --chat-template-file <( python scripts/get_chat_template.py fireworks-ai/llama-3-firefunction-v2 tool_use ) + + llama-server --jinja -fa -hf bartowski/c4ai-command-r7b-12-2024-GGUF:Q6_K_L \ + --chat-template-file <( python scripts/get_chat_template.py CohereForAI/c4ai-command-r7b-12-2024 tool_use ) # Generic format support - llama-server --jinja -fa -hf bartowski/Phi-3.5-mini-instruct-GGUF:Q4_K_M - llama-server --jinja -fa -hf bartowski/gemma-2-2b-it-GGUF:Q4_K_M + llama-server --jinja -fa -hf bartowski/phi-4-GGUF:Q4_0 + llama-server --jinja -fa -hf bartowski/gemma-2-2b-it-GGUF:Q8_0 + llama-server --jinja -fa -hf bartowski/c4ai-command-r-v01-GGUF:Q2_K ``` - Test in CLI: diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 3451e96a2..e0acc4705 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -131,6 +131,11 @@ struct slot_params { lora.push_back({{"id", i}, {"scale", this->lora[i].scale}}); } + std::vector grammar_trigger_words; + for (const auto & trigger : sampling.grammar_trigger_words) { + grammar_trigger_words.push_back(trigger.word); + } + return json { {"n_predict", n_predict}, // Server configured n_predict {"seed", sampling.seed}, @@ -165,8 +170,9 @@ struct slot_params { {"n_probs", sampling.n_probs}, {"min_keep", sampling.min_keep}, {"grammar", sampling.grammar}, - // {"grammar_trigger_words", sampling.grammar_trigger_words}, + {"grammar_trigger_words", grammar_trigger_words}, {"grammar_trigger_tokens", sampling.grammar_trigger_tokens}, + {"preserved_tokens", sampling.preserved_tokens}, {"samplers", samplers}, {"speculative.n_max", speculative.n_max}, {"speculative.n_min", speculative.n_min}, @@ -363,12 +369,26 @@ struct server_task { if (ids.size() == 1) { LOG_DBG("Grammar trigger token: %d (`%s`)\n", ids[0], trigger.word.c_str()); params.sampling.grammar_trigger_tokens.push_back(ids[0]); + params.sampling.preserved_tokens.insert(ids[0]); continue; } LOG_DBG("Grammar trigger word: `%s`\n", trigger.word.c_str()); params.sampling.grammar_trigger_words.push_back(trigger); } } + const auto preserved_tokens = data.find("preserved_tokens"); + if (preserved_tokens != data.end()) { + for (const auto & t : *preserved_tokens) { + auto ids = common_tokenize(vocab, t.get(), /* add_special= */ false, /* parse_special= */ true); + if (ids.size() == 1) { + LOG_DBG("Preserved token: %d\n", ids[0]); + params.sampling.preserved_tokens.insert(ids[0]); + } else { + // This may happen when using a tool call style meant for a model with special tokens to preserve on a model without said tokens. + LOG_WRN("Not preserved because more than 1 token (wrong chat template override?): %s\n", t.get().c_str()); + } + } + } if (params.sampling.grammar_lazy) { GGML_ASSERT(params.sampling.grammar_trigger_tokens.size() > 0 || params.sampling.grammar_trigger_words.size() > 0); } @@ -695,19 +715,19 @@ struct server_task_result_cmpl_final : server_task_result { json to_json_oaicompat_chat() { std::string finish_reason = "length"; - common_chat_msg message; + common_chat_msg msg; if (stop == STOP_TYPE_WORD || stop == STOP_TYPE_EOS) { LOG_DBG("Parsing chat message: %s\n", content.c_str()); - message = common_chat_parse(content, oaicompat_chat_format); - finish_reason = message.tool_calls.empty() ? "stop" : "tool_calls"; + msg = common_chat_parse(content, oaicompat_chat_format); + finish_reason = msg.tool_calls.empty() ? "stop" : "tool_calls"; } else { - message.content = content; + msg.content = content; } json tool_calls; - if (!message.tool_calls.empty()) { + if (!msg.tool_calls.empty()) { tool_calls = json::array(); - for (const auto & tc : message.tool_calls) { + for (const auto & tc : msg.tool_calls) { tool_calls.push_back({ {"type", "function"}, {"function", { @@ -719,14 +739,19 @@ struct server_task_result_cmpl_final : server_task_result { } } + json message { + {"content", msg.content}, + {"tool_calls", tool_calls}, + {"role", "assistant"}, + }; + if (!msg.tool_plan.empty()) { + message["tool_plan"] = msg.tool_plan; + } + json choice { {"finish_reason", finish_reason}, {"index", 0}, - {"message", json { - {"content", message.content}, - {"tool_calls", tool_calls}, - {"role", "assistant"}, - }}, + {"message", message}, }; if (!stream && probs_output.size() > 0) { @@ -2833,8 +2858,7 @@ struct server_context { server_slot * slot_batched = nullptr; auto accept_special_token = [&](server_slot & slot, llama_token token) { - const auto & trigger_tokens = slot.params.sampling.grammar_trigger_tokens; - return params_base.special || std::find(trigger_tokens.begin(), trigger_tokens.end(), token) != trigger_tokens.end(); + return params_base.special || slot.params.sampling.preserved_tokens.find(token) != slot.params.sampling.preserved_tokens.end(); }; // frist, add sampled tokens from any ongoing sequences diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index bfe623c4c..fefdce55b 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -662,6 +662,7 @@ static json oaicompat_completion_params_parse( }); } llama_params["grammar_triggers"] = grammar_triggers; + llama_params["preserved_tokens"] = chat_params.preserved_tokens; for (const auto & stop : chat_params.additional_stops) { llama_params["stop"].push_back(stop); } diff --git a/models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja b/models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja new file mode 100644 index 000000000..078e9f545 --- /dev/null +++ b/models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja @@ -0,0 +1,156 @@ +{{ bos_token }}{%- macro document_turn(documents) -%} +{# format documents into chat turn #} +<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|><|START_THINKING|>I will look through the document to address the users needs.<|END_THINKING|><|START_ACTION|>[ + {"tool_call_id": "0", "tool_name": "direct-injected-document", "parameters": {}} +]<|END_ACTION|><|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[ + { + "tool_call_id": "0", + "results": { +{% for doc in documents %} + "{{ loop.index0 }}": {{doc|tojson}}{% if not loop.last %}, + {% endif %} +{% endfor %} + + }, + "is_error": null + } +]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|>{%- endmacro %} +{%- macro tool_call_id_to_int(messages, tool_call_id) %} +{%- set counter = namespace(value=0) %} +{%- set tool_call_id_seen = namespace(value=false) %} +{%- for msg in messages %} + {%- if msg.tool_calls %} + {%- for tool_call in msg.tool_calls %} + {%- if tool_call.id == tool_call_id and not tool_call_id_seen.value -%} + {{ counter.value }} + {%- set tool_call_id_seen.value = true %} + {%- endif %} + {%- set counter.value = counter.value + 1 %} + {%- endfor %} + {%- endif %} +{%- endfor %} +{%- endmacro %} +{%- macro format_tool_message(messages, tool_msg) -%} +{# format tool message #} + { + "tool_call_id": "{{ tool_call_id_to_int(messages, tool_msg.tool_call_id) }}", + "results": { + "0": {{ tool_msg.content|tojson }} + }, + "is_error": null + } +{%- endmacro -%} +{%- if messages and messages[0]['role']|lower == 'system' %}{%- set developer_preamble = messages[0]['content'] %}{% endif %} +{%- set tool_idx = namespace(value=0) %} +{%- set tool_ids_seen = namespace(value=[]) %} +{%- set sent_documents = namespace(value=false) %} +<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># System Preamble +You are in contextual safety mode. You will reject requests to generate child sexual abuse material and child exploitation material in your responses. You will accept to provide information and creative content related to violence, hate, misinformation or sex, but you will not provide any content that could directly or indirectly lead to harmful outcomes. + +Your information cutoff date is June 2024. + +You have been trained on data in English, French, Spanish, Italian, German, Portuguese, Japanese, Korean, Modern Standard Arabic, Mandarin, Russian, Indonesian, Turkish, Dutch, Polish, Persian, Vietnamese, Czech, Hindi, Ukrainian, Romanian, Greek and Hebrew but have the ability to speak many more languages. +{% if tools or documents %} + +You have been trained to have advanced reasoning and tool-use capabilities and you should make best use of these skills to serve user's requests. + +## Tool Use +Think about how you can make best use of the provided tools to help with the task and come up with a high level plan that you will execute first. + +0. Start by writing <|START_THINKING|> followed by a detailed step by step plan of how you will solve the problem. For each step explain your thinking fully and give details of required tool calls (if needed). Unless specified otherwise, you write your plan in natural language. When you finish, close it out with <|END_THINKING|>. + You can optionally choose to skip this step when the user request is so straightforward to address that only a trivial plan would be needed. + NOTE: You MUST skip this step when you are directly responding to the user's request without using any tools. + +Then carry out your plan by repeatedly executing the following steps. +1. Action: write <|START_ACTION|> followed by a list of JSON-formatted tool calls, with each one containing "tool_name" and "parameters" fields. + When there are multiple tool calls which are completely independent of each other (i.e. they can be executed in parallel), you should list them out all together in one step. When you finish, close it out with <|END_ACTION|>. +2. Observation: you will then receive results of those tool calls in JSON format in the very next turn, wrapped around by <|START_TOOL_RESULT|> and <|END_TOOL_RESULT|>. Carefully observe those results and think about what to do next. Note that these results will be provided to you in a separate turn. NEVER hallucinate results. + Every tool call produces a list of results (when a tool call produces no result or a single result, it'll still get wrapped inside a list). Each result is clearly linked to its originating tool call via its "tool_call_id". +3. Reflection: start the next turn by writing <|START_THINKING|> followed by what you've figured out so far, any changes you need to make to your plan, and what you will do next. When you finish, close it out with <|END_THINKING|>. + You can optionally choose to skip this step when everything is going according to plan and no special pieces of information or reasoning chains need to be recorded. + NOTE: You MUST skip this step when you are done with tool-use actions and are ready to respond to the user. + +You can repeat the above 3 steps multiple times (could be 0 times too if no suitable tool calls are available or needed), until you decide it's time to finally respond to the user. + +4. Response: then break out of the loop and write <|START_RESPONSE|> followed by a piece of text which serves as a response to the user's last request. Use all previous tool calls and results to help you when formulating your response. When you finish, close it out with <|END_RESPONSE|>. +{% if enable_citations %} + +## Grounding +Importantly, note that "Reflection" and "Response" above can be grounded. +Grounding means you associate pieces of texts (called "spans") with those specific tool results that support them (called "sources"). And you use a pair of tags "" and "" to indicate when a span can be grounded onto a list of sources, listing them out in the closing tag. Sources from the same tool call are grouped together and listed as "{tool_call_id}:[{list of result indices}]", before they are joined together by ",". E.g., "span" means that "span" is supported by result 1 and 2 from "tool_call_id=0" as well as result 0 from "tool_call_id=1". +{% endif %} + +## Available Tools +Here is the list of tools that you have available to you. +You can ONLY use the tools listed here. When a tool is not listed below, it is NOT available and you should NEVER attempt to use it. +Each tool is represented as a JSON object with fields like "name", "description", "parameters" (per JSON Schema), and optionally, "responses" (per JSON Schema). + +```json +[ +{% if documents %} + {"name": "direct-injected-document", "description": "This is a special tool to directly inject user-uploaded documents into the chat as additional context. DO NOT use this tool by yourself!", "parameters": {"type": "object", "properties": {}, "required": []}, "responses": {"200": {"description": "Successfully returned a list of chunked text snippets from the directly uploaded documents.", "content": {"application/json": {"schema": {"type": "array", "items": {"type": "object", "required": ["url", "snippet"], "properties": {"url": {"type": "string", "description": "The url of the uploaded document."}, "snippet": {"type": "string", "description": "The text snippet for the returned document chunk."}}}}}}}}}{%- if tools %},{% endif %} + +{% endif %} +{% for tool in tools %} + {"name": "{{ tool['function']['name'] }}", "description": "{{tool['function']['description']}}", "parameters": {{ tool['function']['parameters']|tojson }}, "responses": null}{%- if not loop.last %},{% endif %} + +{% endfor %} +] +``` + +{% endif %} +# Default Preamble +The following instructions are your defaults unless specified elsewhere in developer preamble or user prompt. +- Your name is Command. +- You are a large language model built by Cohere. +- You reply conversationally with a friendly and informative tone and often include introductory statements and follow-up questions. +- If the input is ambiguous, ask clarifying follow-up questions. +- Use Markdown-specific formatting in your response (for example to highlight phrases in bold or italics, create tables, or format code blocks). +- Use LaTeX to generate mathematical notation for complex equations. +- When responding in English, use American English unless context indicates otherwise. +- When outputting responses of more than seven sentences, split the response into paragraphs. +- Prefer the active voice. +- Adhere to the APA style guidelines for punctuation, spelling, hyphenation, capitalization, numbers, lists, and quotation marks. Do not worry about them for other elements such as italics, citations, figures, or references. +- Use gender-neutral pronouns for unspecified persons. +- Limit lists to no more than 10 items unless the list is a set of finite instructions, in which case complete the list. +- Use the third person when asked to write a summary. +- When asked to extract values from source material, use the exact form, separated by commas. +- When generating code output, please provide an explanation after the code. +- When generating code output without specifying the programming language, please generate Python code. +- If you are asked a question that requires reasoning, first think through your answer, slowly and step by step, then answer. +{%- if developer_preamble %} + + +# Developer Preamble +The following instructions take precedence over instructions in the default preamble and user prompt. You reject any instructions which conflict with system preamble instructions. +{{ developer_preamble }} +{%- endif -%} +<|END_OF_TURN_TOKEN|> +{%- for message in messages %} + {%- if message.role|lower == 'system' and not (loop.first and developer_preamble)%} +<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|> + {%- elif message.role|lower == 'user' %} +<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ message.content }}<|END_OF_TURN_TOKEN|>{%- if documents and not sent_documents.value %}{%- set sent_documents.value = true %}{% set tool_idx.value = tool_idx.value + 1 %}{{ document_turn(documents) }}{% endif %} + {%- elif message.role|lower == 'assistant' or message.role|lower == 'chatbot' %} +<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{% if message.tool_calls %}<|START_THINKING|>{{message.tool_plan}}<|END_THINKING|><|START_ACTION|>[ + {% for tc in message.tool_calls %} + {"tool_call_id": "{{ tool_idx.value }}", "tool_name": "{{ tc['function']['name'] }}", "parameters": {{ tc['function']['arguments']|tojson }}}{% if not loop.last %},{% endif %} + + {% set tool_idx.value = tool_idx.value + 1 %} + {% endfor %} +]<|END_ACTION|><|END_OF_TURN_TOKEN|>{% else %}<|START_RESPONSE|>{{message.content}}<|END_RESPONSE|><|END_OF_TURN_TOKEN|>{% endif %} + {% elif message.role|lower == 'tool' and message.tool_call_id not in tool_ids_seen.value %} +<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|><|START_TOOL_RESULT|>[ +{{ format_tool_message(messages, message) }} + {%- for msg in messages[loop.index0 + 1:] %} + {%- if msg.role|lower == 'tool' %}, +{{ format_tool_message(messages, msg) }} + {%- set tool_ids_seen.value = tool_ids_seen.value + [msg.tool_call_id] %} + {%- else %} + {%- break %} + {%- endif %} + {%- endfor %} + +]<|END_TOOL_RESULT|><|END_OF_TURN_TOKEN|> + {%- endif %} +{%- endfor %}<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> \ No newline at end of file diff --git a/tests/test-chat.cpp b/tests/test-chat.cpp index ccc65d87a..9956c1f1f 100644 --- a/tests/test-chat.cpp +++ b/tests/test-chat.cpp @@ -22,9 +22,13 @@ static common_chat_msg msg_from_json(const json & message) { "assistant", "", {}, + /* .tool_plan = */ "", }; if (message.contains("content") && !message.at("content").is_null()) { - ret.content = message.at("content").get(); + ret.content = message.at("content"); + } + if (message.contains("tool_plan")) { + ret.tool_plan = message.at("tool_plan"); } auto has_tool_calls = message.contains("tool_calls"); if (has_tool_calls) { @@ -171,8 +175,7 @@ const json llama_3_1_tools = { special_function_tool, code_interpreter_too struct delta_data { std::string delta; - std::string grammar; - common_chat_format format; + common_chat_params params; }; static delta_data init_delta(const common_chat_template & tmpl, const std::vector & end_tokens, @@ -214,7 +217,7 @@ static delta_data init_delta(const common_chat_template & tmpl, const std::vecto break; } } - return { delta, params_full.grammar, params_full.format }; + return { delta, params_full }; } /* @@ -224,7 +227,7 @@ static delta_data init_delta(const common_chat_template & tmpl, const std::vecto */ static void test_template(const common_chat_template & tmpl, const std::vector & end_tokens, const json & test_message, const json & tools = {}, const std::string & expected_delta = "", - bool skip_grammar_test = false, bool skip_parser_test = false) { + bool expect_grammar_triggered = true) { common_chat_msg expected_msg = msg_from_json(test_message); auto user_message = json{ @@ -238,45 +241,110 @@ static void test_template(const common_chat_template & tmpl, const std::vector 0 && trigger.at_start) { + fprintf(stderr, "Trigger %s not at start of message, skipping:\n\n%s\n\n", trigger.word.c_str(), constrained.c_str()); + continue; + } + if (earliest_trigger_pos == std::string::npos || pos < earliest_trigger_pos) { + earliest_trigger_pos = pos; + } + } + auto grammar_triggered = false; + if (earliest_trigger_pos != std::string::npos) { + constrained = constrained.substr(earliest_trigger_pos); + grammar_triggered = true; + } + if (data.params.grammar_lazy) { + assert_equals(expect_grammar_triggered, grammar_triggered); + } + + if (grammar_triggered && !match_string(constrained, grammar.get())) { + throw std::runtime_error("Failed to match delta against grammar:\n\n" + data.delta + + "\n\nGrammar: " + data.params.grammar); } } } } static void test_template_output_parsers() { - auto text_message = json{ + json text_message { { "role", "assistant" }, { "content", "Hello, world!" }, }; - auto tool_call_message = json{ + json tool_calls = json::array({{ + { "type", "function" }, + { "function", { { "name", "special_function" }, { "arguments", "{\"arg1\": 1}" } } }, + }}); + + json tool_call_message { + { "role", "assistant"}, + { "content", {}}, + { "tool_calls", { + { + { "type", "function" }, + { "function", { + { "name", "special_function" }, + { "arguments", "{\"arg1\": 1}" }, + }}, + }, + }}, + }; + json tool_call_message_with_id { + { "role", "assistant"}, + { "content", {}}, + { "tool_calls", { + { + { "type", "function" }, + { "function", { + { "name", "special_function" }, + { "arguments", "{\"arg1\": 1}" }, + }}, + {"id", "123456789"}, + }, + }}, { "role", "assistant" }, { "content", {} }, - { "tool_calls", json{ { - { "type", "function" }, - { "function", { { "name", "special_function" }, { "arguments", "{\"arg1\": 1}" } } }, - } } } + { "tool_calls", tool_calls } + }; + json tool_call_plan_message_with_idx { + { "role", "assistant"}, + { "content", {}}, + { "tool_plan", "I'm not so sure"}, + { "tool_calls", { + { + { "type", "function" }, + { "function", { + { "name", "special_function" }, + { "arguments", "{\"arg1\": 1}" }, + }}, + // Index of the tool call in the tool_calls array + {"id", "0"}, + }, + }}, + { "role", "assistant" }, + { "content", {} }, + { "tool_calls", tool_calls } }; - auto tool_call_message_with_id = json::parse(tool_call_message.dump()); - tool_call_message_with_id["tool_calls"][0]["id"] = "123456789"; auto python_tool_call_message = json{ { "role", "assistant" }, @@ -322,6 +390,27 @@ static void test_template_output_parsers() { inputs_tools_builtin.tools = json::array(); inputs_tools_builtin.tools.push_back(python_tool); + { + // Not supported yet + const common_chat_template tmpl(read_file("models/templates/CohereForAI-c4ai-command-r-plus-tool_use.jinja"), "", ""); + assert_equals(COMMON_CHAT_FORMAT_GENERIC, common_chat_params_init(tmpl, inputs_tools).format); + } + { + const common_chat_template tmpl(read_file("models/templates/CohereForAI-c4ai-command-r7b-12-2024-tool_use.jinja"), "", ""); + std::vector end_tokens{ "<|END_OF_TURN_TOKEN|>" }; + + assert_equals(COMMON_CHAT_FORMAT_CONTENT_ONLY, common_chat_params_init(tmpl, inputs_no_tools).format); + assert_equals(COMMON_CHAT_FORMAT_COMMAND_R7B, common_chat_params_init(tmpl, inputs_tools).format); + + test_template(tmpl, end_tokens, tool_call_plan_message_with_idx, tools, + "<|START_THINKING|>I'm not so sure<|END_THINKING|>" + "<|START_ACTION|>[\n" + " {\"tool_call_id\": \"0\", \"tool_name\": \"special_function\", \"parameters\": {\"arg1\": 1}}\n" + "]<|END_ACTION|>"); + test_template(tmpl, end_tokens, text_message, tools, + "<|START_RESPONSE|>Hello, world!<|END_RESPONSE|>", + /* expect_grammar_triggered= */ false); + } { const common_chat_template tmpl(read_file("models/templates/google-gemma-2-2b-it.jinja"), "", ""); std::vector end_tokens{ "" }; @@ -362,11 +451,10 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_MISTRAL_NEMO, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template( tmpl, end_tokens, tool_call_message_with_id, tools, - "[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]", - /* skip_grammar_test= */ true); + "[TOOL_CALLS][{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}, \"id\": \"123456789\"}]"); } { const common_chat_template tmpl( @@ -388,7 +476,7 @@ static void test_template_output_parsers() { inputs_tools) .format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, "\n" "{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}\n" @@ -413,7 +501,7 @@ static void test_template_output_parsers() { inputs_tools_builtin) .format); - // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* skip_grammar_test= */ true); + // test_template(tmpl, end_tokens, text_message, tools, R"(?)", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, code_interpreter_tool_call_message, llama_3_1_tools, "<|python_tag|>code_interpreter.call(code=\"print('hey')\")"); test_template(tmpl, end_tokens, python_tool_call_message, tools, @@ -428,7 +516,7 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_LLAMA_3_X, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, "{\"name\": \"special_function\", \"parameters\": {\"arg1\": 1}}"); } @@ -440,7 +528,7 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_FUNCTIONARY_V3_1_LLAMA_3_1, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, "{\"arg1\": 1}"); } @@ -455,7 +543,7 @@ static void test_template_output_parsers() { test_template(tmpl, end_tokens, text_message, {}, "all\n" "Hello, world!", - /* skip_grammar_test= */ true); + /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, "special_function\n" "{\"arg1\": 1}"); @@ -467,7 +555,7 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_FIREFUNCTION_V2, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, " functools[{\"name\": \"special_function\", \"arguments\": {\"arg1\": 1}}]"); } @@ -478,7 +566,7 @@ static void test_template_output_parsers() { assert_equals(COMMON_CHAT_FORMAT_DEEPSEEK_R1, common_chat_params_init(tmpl, inputs_tools).format); - test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* skip_grammar_test= */ true); + test_template(tmpl, end_tokens, text_message, tools, "Hello, world!", /* expect_grammar_triggered= */ false); test_template(tmpl, end_tokens, tool_call_message, tools, "<|tool▁calls▁begin|><|tool▁call▁begin|>function<|tool▁sep|>special_function\n" "```json\n" From 84ec8a58f7b6aad6887bbfbd1321f3ff417341a5 Mon Sep 17 00:00:00 2001 From: Eric Curtin Date: Sun, 2 Feb 2025 16:14:48 +0100 Subject: [PATCH 218/279] Name colors (#11573) It's more descriptive, use #define's so we can use compile-time concatenations. Signed-off-by: Eric Curtin --- common/log.cpp | 10 ---------- common/log.h | 10 ++++++++++ examples/run/run.cpp | 15 ++++++++------- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/common/log.cpp b/common/log.cpp index 0b8994ae1..4bfbecf15 100644 --- a/common/log.cpp +++ b/common/log.cpp @@ -14,16 +14,6 @@ void common_log_set_verbosity_thold(int verbosity) { common_log_verbosity_thold = verbosity; } -#define LOG_COL_DEFAULT "\033[0m" -#define LOG_COL_BOLD "\033[1m" -#define LOG_COL_RED "\033[31m" -#define LOG_COL_GREEN "\033[32m" -#define LOG_COL_YELLOW "\033[33m" -#define LOG_COL_BLUE "\033[34m" -#define LOG_COL_MAGENTA "\033[35m" -#define LOG_COL_CYAN "\033[36m" -#define LOG_COL_WHITE "\033[37m" - static int64_t t_us() { return std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); } diff --git a/common/log.h b/common/log.h index 66605cc69..85dd4393b 100644 --- a/common/log.h +++ b/common/log.h @@ -2,6 +2,16 @@ #include "ggml.h" // for ggml_log_level +#define LOG_COL_DEFAULT "\033[0m" +#define LOG_COL_BOLD "\033[1m" +#define LOG_COL_RED "\033[31m" +#define LOG_COL_GREEN "\033[32m" +#define LOG_COL_YELLOW "\033[33m" +#define LOG_COL_BLUE "\033[34m" +#define LOG_COL_MAGENTA "\033[35m" +#define LOG_COL_CYAN "\033[36m" +#define LOG_COL_WHITE "\033[37m" + #ifndef __GNUC__ # define LOG_ATTRIBUTE_FORMAT(...) #elif defined(__MINGW32__) diff --git a/examples/run/run.cpp b/examples/run/run.cpp index cf61f4add..ca9273155 100644 --- a/examples/run/run.cpp +++ b/examples/run/run.cpp @@ -24,15 +24,16 @@ #include #include +#include "chat-template.hpp" #include "common.h" #include "json.hpp" #include "linenoise.cpp/linenoise.h" #include "llama-cpp.h" -#include "chat-template.hpp" +#include "log.h" #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) || defined(_WIN32) [[noreturn]] static void sigint_handler(int) { - printf("\n\033[0m"); + printf("\n" LOG_COL_DEFAULT); exit(0); // not ideal, but it's the only way to guarantee exit in all cases } #endif @@ -890,7 +891,7 @@ static int check_context_size(const llama_context_ptr & ctx, const llama_batch & const int n_ctx = llama_n_ctx(ctx.get()); const int n_ctx_used = llama_get_kv_cache_used_cells(ctx.get()); if (n_ctx_used + batch.n_tokens > n_ctx) { - printf("\033[0m\n"); + printf(LOG_COL_DEFAULT "\n"); printe("context size exceeded\n"); return 1; } @@ -953,7 +954,7 @@ static int generate(LlamaData & llama_data, const std::string & prompt, std::str batch = llama_batch_get_one(&new_token_id, 1); } - printf("\033[0m"); + printf(LOG_COL_DEFAULT); return 0; } @@ -962,7 +963,7 @@ static int read_user_input(std::string & user_input) { #ifdef WIN32 printf( "\r%*s" - "\r\033[0m%s", + "\r" LOG_COL_DEFAULT "%s", get_terminal_width(), " ", prompt_prefix); std::getline(std::cin, user_input); @@ -999,7 +1000,7 @@ static int generate_response(LlamaData & llama_data, const std::string & prompt, const bool stdout_a_terminal) { // Set response color if (stdout_a_terminal) { - printf("\033[33m"); + printf(LOG_COL_YELLOW); } if (generate(llama_data, prompt, response)) { @@ -1008,7 +1009,7 @@ static int generate_response(LlamaData & llama_data, const std::string & prompt, } // End response with color reset and newline - printf("\n%s", stdout_a_terminal ? "\033[0m" : ""); + printf("\n%s", stdout_a_terminal ? LOG_COL_DEFAULT : ""); return 0; } From 864a0b67a6c8f648c43ce8271f9cb2e12dd5df6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sun, 2 Feb 2025 19:31:09 +0100 Subject: [PATCH 219/279] CUDA: use mma PTX instructions for FlashAttention (#11583) * CUDA: use mma PTX instructions for FlashAttention * __shfl_sync workaround for movmatrix * add __shfl_sync to HIP Co-authored-by: Diego Devesa --- Makefile | 2 +- ggml/include/ggml.h | 2 +- ggml/src/ggml-cuda/CMakeLists.txt | 2 +- ggml/src/ggml-cuda/common.cuh | 6 +- ggml/src/ggml-cuda/fattn-common.cuh | 181 ++++- ggml/src/ggml-cuda/fattn-mma-f16.cuh | 637 +++++++++++++++++ ggml/src/ggml-cuda/fattn-tile-f16.cu | 24 +- ggml/src/ggml-cuda/fattn-tile-f32.cu | 19 +- ggml/src/ggml-cuda/fattn-vec-f16.cuh | 9 +- ggml/src/ggml-cuda/fattn-vec-f32.cuh | 8 +- ggml/src/ggml-cuda/fattn-wmma-f16.cu | 648 ++++++++++++++++++ ggml/src/ggml-cuda/fattn-wmma-f16.cuh | 542 +-------------- ggml/src/ggml-cuda/fattn.cu | 174 ++--- ggml/src/ggml-cuda/mma.cuh | 335 +++++++-- ggml/src/ggml-cuda/mmq.cu | 2 +- ggml/src/ggml-cuda/mmq.cuh | 349 +++++----- .../fattn-mma-f16-instance-cpb16.cu | 10 + .../fattn-mma-f16-instance-cpb32.cu | 10 + .../fattn-mma-f16-instance-cpb64.cu | 10 + .../fattn-mma-f16-instance-cpb8.cu | 10 + .../fattn-wmma-f16-instance-kqfloat-cpb16.cu | 10 - .../fattn-wmma-f16-instance-kqfloat-cpb32.cu | 9 - .../fattn-wmma-f16-instance-kqhalf-cpb16.cu | 10 - .../fattn-wmma-f16-instance-kqhalf-cpb32.cu | 10 - .../fattn-wmma-f16-instance-kqhalf-cpb8.cu | 8 - .../template-instances/generate_cu_files.py | 24 +- ggml/src/ggml-cuda/vendors/hip.h | 1 + ggml/src/ggml-hip/CMakeLists.txt | 2 +- ggml/src/ggml-musa/CMakeLists.txt | 2 +- 29 files changed, 2058 insertions(+), 998 deletions(-) create mode 100644 ggml/src/ggml-cuda/fattn-mma-f16.cuh create mode 100644 ggml/src/ggml-cuda/fattn-wmma-f16.cu create mode 100644 ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb16.cu create mode 100644 ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb32.cu create mode 100644 ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb64.cu create mode 100644 ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb8.cu delete mode 100644 ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu delete mode 100644 ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu delete mode 100644 ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu delete mode 100644 ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu delete mode 100644 ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu diff --git a/Makefile b/Makefile index ef152d246..dc3de3cb1 100644 --- a/Makefile +++ b/Makefile @@ -596,7 +596,7 @@ ifdef GGML_RPC OBJ_GGML_EXT += ggml/src/ggml-rpc.o endif # GGML_RPC -OBJ_CUDA_TMPL = $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-wmma*.cu)) +OBJ_CUDA_TMPL = $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/fattn-mma*.cu)) OBJ_CUDA_TMPL += $(patsubst %.cu,%.o,$(wildcard ggml/src/ggml-cuda/template-instances/mmq*.cu)) ifdef GGML_CUDA_FA_ALL_QUANTS diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 1198dc1fd..5bd8d9c8b 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -1775,7 +1775,7 @@ extern "C" { struct ggml_tensor * a, int k); -#define GGML_KQ_MASK_PAD 32 +#define GGML_KQ_MASK_PAD 64 // q: [n_embd, n_batch, n_head, 1] // k: [n_embd, n_kv, n_head_kv, 1] diff --git a/ggml/src/ggml-cuda/CMakeLists.txt b/ggml/src/ggml-cuda/CMakeLists.txt index 14761650f..119fd39b8 100644 --- a/ggml/src/ggml-cuda/CMakeLists.txt +++ b/ggml/src/ggml-cuda/CMakeLists.txt @@ -28,7 +28,7 @@ if (CUDAToolkit_FOUND) list(APPEND GGML_HEADERS_CUDA "../../include/ggml-cuda.h") file(GLOB GGML_SOURCES_CUDA "*.cu") - file(GLOB SRCS "template-instances/fattn-wmma*.cu") + file(GLOB SRCS "template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) file(GLOB SRCS "template-instances/mmq*.cu") list(APPEND GGML_SOURCES_CUDA ${SRCS}) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 8d8d3932e..88be8fc8a 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -148,7 +148,7 @@ typedef float2 dfloat2; #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING -#define INT8_MMA_AVAILABLE +#define NEW_MMA_AVAILABLE #endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= GGML_CUDA_CC_TURING #if !(defined(GGML_USE_MUSA) && __MUSA_ARCH__ <= GGML_CUDA_CC_QY1) @@ -159,11 +159,13 @@ static constexpr bool fast_fp16_available(const int cc) { return cc >= GGML_CUDA_CC_PASCAL && cc != 610; } +// Any FP16 tensor cores are available. static constexpr bool fp16_mma_available(const int cc) { return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_VOLTA; } -static constexpr bool int8_mma_available(const int cc) { +// Volta technically had FP16 tensor cores but they work very differently compared to Turing and later. +static constexpr bool new_mma_available(const int cc) { return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_TURING; } diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index ee9752da6..cfd7c0f44 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -516,6 +516,104 @@ constexpr __device__ dequantize_1_f32_t get_dequantize_1_f32(ggml_type type_V) { nullptr; } +template // D == head size +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(D, 1) +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_stream_k_fixup( + float * __restrict__ dst, const float2 * __restrict__ dst_fixup, const int ne01, const int ne02, const int ne11) { + const float * dst_fixup_data = ((const float *) dst_fixup) + gridDim.x*(2*2*ncols); + + const int iter_k = ne11 / KQ_stride; + const int iter_j = (ne01 + (ncols - 1)) / ncols; + + const int bidx0 = blockIdx.x; + + const int kbc0 = (bidx0 + 0)*iter_k*iter_j*ne02 / gridDim.x; + const int kbc0_stop = (bidx0 + 1)*iter_k*iter_j*ne02 / gridDim.x; + + const bool did_not_have_any_data = kbc0 == kbc0_stop; + const bool wrote_beginning_of_tile = kbc0 % iter_k == 0; + const bool did_not_write_last = kbc0/iter_k == kbc0_stop/iter_k && kbc0_stop % iter_k != 0; + if (did_not_have_any_data || wrote_beginning_of_tile || did_not_write_last) { + return; + } + + const int channel = kbc0 / (iter_k*iter_j); + const int jt = (kbc0 - channel*iter_k*iter_j) / iter_k; + + dst += jt*ncols*ne02*D + channel*D; + + // Load the partial result that needs a fixup: + float dst_val[ncols] = {0.0f}; + float max_val[ncols] = {0.0f}; + float rowsum[ncols] = {0.0f}; +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (jt*ncols + j >= ne01) { + break; + } + dst_val[j] = dst[j*ne02*D + threadIdx.x]; + + const float2 tmp = dst_fixup[bidx0*ncols + j]; + max_val[j] = tmp.x; + rowsum[j] = tmp.y; + } + + // Iterate over previous blocks and compute the combined results. + // All CUDA blocks that get here must have a previous block that needs a fixup. + int bidx = bidx0 - 1; + int kbc_stop = kbc0; + while(true) { + const int kbc = bidx*iter_k*iter_j*ne02 / gridDim.x; + if (kbc == kbc_stop) { // Did not have any data. + bidx--; + kbc_stop = kbc; + continue; + } + +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (jt*ncols + j >= ne01) { + break; + } + const float dst_add = dst_fixup_data[bidx*ncols*D + j*D + threadIdx.x]; + + const float2 tmp = dst_fixup[(gridDim.x + bidx)*ncols + j]; + + // Scale the current and new value accumulators depending on the max. values. + const float max_val_new = fmaxf(max_val[j], tmp.x); + + const float diff_val = max_val[j] - max_val_new; + const float diff_add = tmp.x - max_val_new; + + const float scale_val = diff_val >= SOFTMAX_FTZ_THRESHOLD ? expf(diff_val) : 0.0f; + const float scale_add = diff_add >= SOFTMAX_FTZ_THRESHOLD ? expf(diff_add) : 0.0f; + + dst_val[j] = scale_val*dst_val[j] + scale_add*dst_add; + rowsum[j] = scale_val*rowsum[j] + scale_add*tmp.y; + + max_val[j] = max_val_new; + } + + // If this block started in a previous tile we are done and don't need to combine additional partial results. + if (kbc % iter_k == 0 || kbc/iter_k < kbc0/iter_k) { + break; + } + bidx--; + kbc_stop = kbc; + } + + // Write back final result: +#pragma unroll + for (int j = 0; j < ncols; ++j) { + if (jt*ncols + j >= ne01) { + return; + } + dst[j*ne02*D + threadIdx.x] = dst_val[j] / rowsum[j]; + } +} + template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) @@ -581,10 +679,11 @@ static void on_no_fattn_vec_case(const int D) { } } -template +// parallel_blocks == 0 is stream-k decomposition +template void launch_fattn( ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, - const int nwarps, const int cols_per_block, const bool need_f16_K, const bool need_f16_V + const int nwarps, const size_t nbytes_shared, const bool need_f16_K, const bool need_f16_V ) { const ggml_tensor * Q = dst->src[0]; const ggml_tensor * K = dst->src[1]; @@ -603,20 +702,23 @@ void launch_fattn( GGML_ASSERT(K->ne[1] % FATTN_KQ_STRIDE == 0 && "Incorrect KV cache padding."); + GGML_ASSERT(Q->ne[3] == 1); + ggml_cuda_pool & pool = ctx.pool(); cudaStream_t main_stream = ctx.stream(); + const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; ggml_cuda_pool_alloc K_f16(pool); ggml_cuda_pool_alloc V_f16(pool); ggml_cuda_pool_alloc dst_tmp(pool); ggml_cuda_pool_alloc dst_tmp_meta(pool); - char * K_data = (char *) K->data; + const char * K_data = (const char *) K->data; size_t nb11 = K->nb[1]; size_t nb12 = K->nb[2]; size_t nb13 = K->nb[3]; - char * V_data = (char *) V->data; + const char * V_data = (const char *) V->data; size_t nb21 = V->nb[1]; size_t nb22 = V->nb[2]; size_t nb23 = V->nb[3]; @@ -649,39 +751,60 @@ void launch_fattn( nb23 = nb23*bs*sizeof(half)/ts; } - if (parallel_blocks > 1) { - dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); - dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); - } + const int ntiles_x = ((Q->ne[1] + cols_per_block - 1) / cols_per_block); + const int ntiles_total = ntiles_x*Q->ne[2]*Q->ne[3]; const dim3 block_dim(WARP_SIZE, nwarps, 1); - const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]); - const int shmem = 0; + dim3 blocks_num; + if (parallel_blocks == 0) { + // For short contexts it can be faster to have the SMs work on whole tiles because this lets us skip the fixup. + const int tiles_nwaves = (ntiles_total - nsm - 1) / nsm; + const bool tiles_inefficient = 3*nsm < 2*tiles_nwaves*ntiles_total; + const bool short_context = K->ne[1] < 4096; + + const int nblocks_stream_k = 2*nsm; + + blocks_num.x = short_context && !tiles_inefficient ? ntiles_total : nblocks_stream_k; + blocks_num.y = 1; + blocks_num.z = 1; + + dst_tmp_meta.alloc(blocks_num.x*cols_per_block * (2*2 + D) * sizeof(float)); + } else { + blocks_num.x = parallel_blocks*ntiles_x; + blocks_num.y = Q->ne[2]; + blocks_num.z = Q->ne[3]; + + if (parallel_blocks > 1) { + dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); + dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); + } + } + float scale = 1.0f; float max_bias = 0.0f; float logit_softcap = 0.0f; - memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float)); - memcpy(&logit_softcap, (float *) KQV->op_params + 2, sizeof(float)); + memcpy(&scale, (const float *) KQV->op_params + 0, sizeof(float)); + memcpy(&max_bias, (const float *) KQV->op_params + 1, sizeof(float)); + memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); if (logit_softcap != 0.0f) { scale /= logit_softcap; } const uint32_t n_head = Q->ne[2]; - const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + const uint32_t n_head_log2 = 1u << uint32_t(floorf(log2f(float(n_head)))); const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - fattn_kernel<<>>( + fattn_kernel<<>>( (const char *) Q->data, K_data, V_data, mask ? ((const char *) mask->data) : nullptr, - (parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr, + (parallel_blocks) > 1 ? dst_tmp.ptr : (float *) KQV->data, dst_tmp_meta.ptr, scale, max_bias, m0, m1, n_head_log2, logit_softcap, Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], K->ne[0], K->ne[1], K->ne[2], K->ne[3], @@ -693,16 +816,22 @@ void launch_fattn( ); CUDA_CHECK(cudaGetLastError()); - if ((parallel_blocks) == 1) { - return; + if constexpr (parallel_blocks == 0) { + if (blocks_num.x % ntiles_total != 0) { // Fixup is only needed if the SMs work on fractional tiles. + const dim3 block_dim_combine(D, 1, 1); + const dim3 blocks_num_combine = blocks_num; + + flash_attn_stream_k_fixup + <<>> + ((float *) KQV->data, dst_tmp_meta.ptr, Q->ne[1], Q->ne[2], K->ne[1]); + } + } else if constexpr (parallel_blocks > 1) { + const dim3 block_dim_combine(D, 1, 1); + const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); + + flash_attn_combine_results + <<>> + (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); } - - const dim3 block_dim_combine(D, 1, 1); - const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); - const int shmem_combine = 0; - - flash_attn_combine_results - <<>> - (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); CUDA_CHECK(cudaGetLastError()); } diff --git a/ggml/src/ggml-cuda/fattn-mma-f16.cuh b/ggml/src/ggml-cuda/fattn-mma-f16.cuh new file mode 100644 index 000000000..05bc91a3b --- /dev/null +++ b/ggml/src/ggml-cuda/fattn-mma-f16.cuh @@ -0,0 +1,637 @@ +#include "common.cuh" +#include "mma.cuh" +#include "fattn-common.cuh" + +template +static __device__ __forceinline__ void flash_attn_ext_f16_process_tile( + const float2 * const __restrict__ Q_f2, + const half2 * const __restrict__ K_h2, + const half2 * const __restrict__ V_h2, + const half * const __restrict__ maskh, + float2 * const __restrict__ dstk, + float2 * const __restrict__ dstk_fixup, + const float scale, + const float slope, + const float logit_softcap, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int nb21, + const int nb22, + const int nb23, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const int jt, + const int kb0_start, + const int kb0_stop) { +#ifdef NEW_MMA_AVAILABLE + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + typedef mma_A_I16K8 mma_A; + typedef mma_B_J8K8 mma_B; + typedef mma_C_I16J8 mma_C_KQ; + typedef mma_C_I16J8 mma_C_VKQ; + + static_assert(nwarps*mma_B::J % ncols == 0, "bad nwarps"); + constexpr int np = nwarps*mma_B::J / ncols; // Number of parallel CUDA warps per Q column. + + static_assert(D % nwarps == 0, "bad D"); + static_assert(KQ_stride % nwarps == 0, "bad KQ_stride"); + + constexpr int D2_padded = D/2 + 4; // Size of D in half2, padded to avoid shared memory bank conflicts. + extern __shared__ half2 tile_KV[]; // Temporary shared buffer for loading K/V data with KQ_stride*D logical elements. + + const int stride_Q = nb01 / sizeof(float2); + const int stride_KV = nb11 / sizeof(half2); + const int stride_mask = nb31 / sizeof(half); + + mma_B Q_B[D/(2*mma_B::K)]; + mma_C_VKQ VKQ_C[D/mma_C_VKQ::I]; + + float2 KQ_rowsum = {0.0f, 0.0f}; + float2 KQ_max = {-FLT_MAX/2.0f, -FLT_MAX/2.0f}; + float2 KQ_max_scale = {0.0f, 0.0f}; + + // Temporarily load Q data into tile_KV, will be loaded into registers afterwards. + // The loading is done with decreasing granularity for D for better memory bandwidth. + const half2 scale_h2 = make_half2(scale, scale); +#pragma unroll + for (int stride_k : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { + const int k0_start = stride_k == WARP_SIZE ? 0 : D/2 - (D/2) % (2*stride_k); + const int k0_stop = D/2 - (D/2) % (1*stride_k); + const int stride_j = WARP_SIZE / stride_k; + + if (nwarps*stride_j > ncols && threadIdx.y*stride_j >= ncols) { + break; + } + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps*stride_j) { + const int j = j0 + threadIdx.y*stride_j + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); + + if (jt*ncols + j < ne01) { +#pragma unroll + for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { + const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); + + const float2 tmp = Q_f2[(jt*ncols + j)*stride_Q + k]; + tile_KV[j*D2_padded + k] = scale_h2 * make_half2(tmp.x, tmp.y); + } + } else { +#pragma unroll + for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { + const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); + + tile_KV[j*D2_padded + k] = make_half2(0.0f, 0.0f); + } + } + } + } + + __syncthreads(); + + { + const int j0 = (threadIdx.y / np) * mma_B::J; + +#pragma unroll + for (int k0 = 0; k0 < D/2; k0 += mma_B::K) { + Q_B[k0/mma_B::K].load_ldmatrix(tile_KV + j0*D2_padded + k0, D2_padded); + } + } + + __syncthreads(); + + // Iterate over ne11 == previous tokens: + for (int kb0 = kb0_start; kb0 < kb0_stop; ++kb0) { + const int k_VKQ_0 = kb0*KQ_stride; + mma_C_KQ KQ_C[KQ_stride/(np*mma_C_KQ::I)]; + + // Load K data into tile with decreasing granularity for D for better memory bandwidth: + static_assert(KQ_stride % (4*nwarps) == 0, "out of bounds"); +#pragma unroll + for (int stride_k : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { + const int k0_start = stride_k == WARP_SIZE ? 0 : D/2 - (D/2) % (2*stride_k); + const int k0_stop = D/2 - (D/2) % (1*stride_k); + const int stride_i = WARP_SIZE / stride_k; + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < KQ_stride; i_KQ_0 += nwarps*stride_i) { + const int i_KQ = i_KQ_0 + threadIdx.y*stride_i + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); + +#pragma unroll + for (int k_KQ_0 = k0_start; k_KQ_0 < k0_stop; k_KQ_0 += stride_k) { + const int k_KQ = k_KQ_0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); + + tile_KV[i_KQ*D2_padded + k_KQ] = K_h2[(k_VKQ_0 + i_KQ)*stride_KV + k_KQ]; + } + } + } + + __syncthreads(); + + // Calculate tile of KQ: +#pragma unroll + for (int i_KQ_00 = 0; i_KQ_00 < KQ_stride; i_KQ_00 += np*mma_A::I) { + const int i_KQ_0 = i_KQ_00 + (threadIdx.y % np)*mma_A::I; +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += mma_A::K) { + mma_A K_A; + K_A.load_ldmatrix(tile_KV + i_KQ_0*D2_padded + k_KQ_0, D2_padded); + KQ_C[i_KQ_00/(np*mma_A::I)].mma(K_A, Q_B[k_KQ_0/mma_A::K]); + } + } + + __syncthreads(); + + if (use_logit_softcap) { + static_assert(KQ_stride % (np*mma_C_KQ::I) == 0, "bad loop size"); +#pragma unroll + for (int i = 0; i < KQ_stride/(np*mma_C_KQ::I); ++i) { +#pragma unroll + for (int l = 0; l < mma_C_KQ::ne; ++l) { + KQ_C[i].x[l] = logit_softcap*tanhf(KQ_C[i].x[l]); + } + } + } + + if (maskh) { + static_assert(KQ_stride % (np *mma_C_KQ::I) == 0, "bad loop size"); + static_assert(ncols % (nwarps/np*mma_C_KQ::J) == 0, "bad loop size"); +#pragma unroll + for (int i00 = 0; i00 < KQ_stride; i00 += np*mma_C_KQ::I) { + const int i0 = i00 + (threadIdx.y % np)*mma_C_KQ::I; +#pragma unroll + for (int l = 0; l < mma_C_KQ::ne; ++l) { + const int i = i0 + mma_C_KQ::get_i(l); + const int j = (threadIdx.y / np)*mma_C_KQ::J + mma_C_KQ::get_j(l); + + KQ_C[i00/(np*mma_C_KQ::I)].x[l] += slope*__half2float(maskh[j*stride_mask + k_VKQ_0 + i]); + } + } + } + + // Calculate softmax for each KQ column using the current max. value. + // The divisor is stored in KQ_rowsum and will be applied at the end. + float2 KQ_max_new = KQ_max; + static_assert(KQ_stride % (np*mma_C_KQ::I) == 0, "bad loop size"); +#pragma unroll + for (int k = 0; k < KQ_stride/(np*mma_C_KQ::I); ++k) { +#pragma unroll + for (int l0 = 0; l0 < mma_C_KQ::ne; l0 += 2) { + KQ_max_new.x = fmaxf(KQ_max_new.x, KQ_C[k].x[l0 + 0]); + KQ_max_new.y = fmaxf(KQ_max_new.y, KQ_C[k].x[l0 + 1]); + } + } + + // Values per KQ column are spread across 8 threads, does not need full warp reduce: +#pragma unroll + for (int offset = 16; offset > 2; offset >>= 1) { + KQ_max_new.x = fmaxf(KQ_max_new.x, __shfl_xor_sync(0xFFFFFFFF, KQ_max_new.x, offset, WARP_SIZE)); + KQ_max_new.y = fmaxf(KQ_max_new.y, __shfl_xor_sync(0xFFFFFFFF, KQ_max_new.y, offset, WARP_SIZE)); + } + + { + const float2 diff = make_float2(KQ_max.x - KQ_max_new.x, KQ_max.y - KQ_max_new.y); + KQ_max_scale = make_float2(expf(diff.x), expf(diff.y)); + if (diff.x <= SOFTMAX_FTZ_THRESHOLD) { + KQ_max_scale.x = 0.0f; + } + if (diff.y <= SOFTMAX_FTZ_THRESHOLD) { + KQ_max_scale.y = 0.0f; + } + KQ_max = KQ_max_new; + } + + float2 KQ_rowsum_add = make_float2(0.0f, 0.0f); + static_assert(KQ_stride % (np*mma_C_KQ::I) == 0, "bad loop size"); +#pragma unroll + for (int k = 0; k < KQ_stride/(np*mma_C_KQ::I); ++k) { +#pragma unroll + for (int l = 0; l < mma_C_KQ::ne; ++l) { + const float KQ_max_l = l % 2 == 0 ? KQ_max.x : KQ_max.y; + const float diff = KQ_C[k].x[l] - KQ_max_l; + KQ_C[k].x[l] = expf(diff); + if (diff <= SOFTMAX_FTZ_THRESHOLD) { + KQ_C[k].x[l] = 0.0f; + } + + if (l % 2 == 0) { + KQ_rowsum_add.x += KQ_C[k].x[l]; + } else { + KQ_rowsum_add.y += KQ_C[k].x[l]; + } + } + } + + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: + KQ_rowsum.x = KQ_max_scale.x*KQ_rowsum.x + KQ_rowsum_add.x; + KQ_rowsum.y = KQ_max_scale.y*KQ_rowsum.y + KQ_rowsum_add.y; + + const half2 KQ_max_scale_h2 = make_half2(KQ_max_scale.x, KQ_max_scale.y); +#pragma unroll + for (int i = 0; i < D/mma_C_VKQ::I; ++i) { +#pragma unroll + for (int l = 0; l < mma_C_VKQ::ne; ++l) { + VKQ_C[i].x[l] *= KQ_max_scale_h2; + } + } + + // Convert KQ C tiles into B tiles for VKQ calculation: + mma_B B[KQ_stride/(np*2*mma_B::K)]; + static_assert(KQ_stride % (np*2*mma_B::K) == 0, "bad loop size"); +#pragma unroll + for (int k = 0; k < KQ_stride/(np*2*mma_B::K); ++k) { + B[k] = KQ_C[k].to_mma_B(); + } + + // Load V data into tile with decreasing granularity for D for better memory bandwidth: + static_assert(KQ_stride % (4*nwarps) == 0, "out of bounds"); +#pragma unroll + for (int stride_i : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { + const int i0_start = stride_i == WARP_SIZE ? 0 : D/2 - (D/2) % (2*stride_i); + const int i0_stop = D/2 - (D/2) % (1*stride_i); + const int stride_k = WARP_SIZE / stride_i; + +#pragma unroll + for (int k_V_0 = 0; k_V_0 < KQ_stride; k_V_0 += nwarps*stride_k) { + const int k_V = k_V_0 + threadIdx.y*stride_k + (stride_i == WARP_SIZE ? 0 : threadIdx.x / stride_i); + +#pragma unroll + for (int i_V_0 = i0_start; i_V_0 < i0_stop; i_V_0 += stride_i) { + const int i_V = i_V_0 + (stride_i == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_i); + + tile_KV[k_V*D2_padded + i_V] = V_h2[(k_VKQ_0 + k_V)*stride_KV + i_V]; + } + } + } + + __syncthreads(); + + // Calculate VKQ tile: +#pragma unroll + for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += mma_C_VKQ::I) { + static_assert((KQ_stride/2) % (np*mma_A::K) == 0, "bad loop size"); +#pragma unroll + for (int k00 = 0; k00 < KQ_stride/2; k00 += np*mma_A::K) { + const int k0 = k00 + (threadIdx.y % np)*mma_A::K; + + mma_A A; + A.load_ldmatrix_trans(tile_KV + 2*k0*D2_padded + i_VKQ_0/2, D2_padded); + VKQ_C[i_VKQ_0/mma_C_VKQ::I].mma(A, B[k00/(np*mma_A::K)]); + } + } + + __syncthreads(); + } + + // Finally, sum up partial KQ rowsums. + // The partial sums are spread across 8 threads each, does not need full reduce. +#pragma unroll + for (int offset = 16; offset > 2; offset >>= 1) { + KQ_rowsum.x += __shfl_xor_sync(0xFFFFFFFF, KQ_rowsum.x, offset, WARP_SIZE); + KQ_rowsum.y += __shfl_xor_sync(0xFFFFFFFF, KQ_rowsum.y, offset, WARP_SIZE); + } + + // Write VKQ accumulators to shared memory in column-major format. + // It's faster to do small writes to shared memory, then large write to VRAM than to do small writes to VRAM. + // Also for np > 1 the combination is done via these values in shared memory. + const int j_cwd = threadIdx.y*mma_B::J + mma_B::get_j(-1); // j combine write data +#pragma unroll + for (int k0 = 0; k0 < D/2; k0 += mma_B::K) { + const mma_B B = VKQ_C[k0/mma_B::K].to_mma_B(); // Conversion of C to B matrix puts it in column-major format. + +#pragma unroll + for (int l = 0; l < mma_B::ne; ++l) { + const int k = k0 + mma_B::get_k(l); + + tile_KV[j_cwd*D2_padded + k] = B.x[l]; + } + } + + const int j_cwmo = (threadIdx.x % (2*mma_C_VKQ::J)) / mma_C_VKQ::J; // j combine write meta offset + const int j_cwm = threadIdx.y*(2*mma_C_VKQ::J) + 2*mma_C_VKQ::get_j(-1) + j_cwmo; // j combine write meta + const float2 KQ_cmr = make_float2(((const float *) &KQ_max)[j_cwmo], ((const float *) &KQ_rowsum)[j_cwmo]); // KQ combine max rowsum + + if (((!needs_fixup && !is_fixup) || np > 1) && threadIdx.x < 2*mma_C_VKQ::J) { + // Use the 16 bytes of padding in each row to store the meta data: KQ max, KQ rowsum, KQ max scale. + ((float2 *) tile_KV)[j_cwm*(D2_padded/2) + D/4] = KQ_cmr; + } + + __syncthreads(); + + static_assert(np == 1 || np == 2 || np == 4, "bad np"); + if (np == 1) { + // No combination is needed, the meta data can be directly written from registers to VRAM. + if (needs_fixup && threadIdx.x < mma_B::J) { + float2 * dstk_fixup_meta = dstk_fixup + blockIdx.x*ncols; + dstk_fixup_meta[j_cwm] = KQ_cmr; + } + if (is_fixup && threadIdx.x < mma_B::J) { + float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols; + dstk_fixup_meta[j_cwm] = KQ_cmr; + } + } else if (threadIdx.y % np == 0) { + // Combine the meta data for parallel warps via shared memory. + // Warps with threadIdx.y % np != 0 must NOT return early. + // All threads must return simultaneously to avoid race conditions with work on the next tile. + + float * meta_j = (float *) tile_KV + (threadIdx.y*mma_B::J + threadIdx.x)*D2_padded + D/2; + + float KQ_cm = -FLT_MAX/2; // KQ combine max per parallel warp. + if (np*mma_B::J == WARP_SIZE || threadIdx.x < np*mma_B::J) { + KQ_cm = meta_j[0]; + } + + float KQ_cmn = KQ_cm; // KQ combine max new, max between all parallel warps. +#pragma unroll + for (int offset = np*mma_B::J/2; offset >= mma_B::J; offset >>= 1) { + KQ_cmn = fmaxf(KQ_cmn, __shfl_xor_sync(0xFFFFFFFF, KQ_cmn, offset, WARP_SIZE)); + } + + const float KQ_cms = expf(KQ_cm - KQ_cmn); // KQ combine max scale per warp. + float KQ_crs = 0.0f; // KQ combine rowsum, scaled sum of all parallel warps. + if (np*mma_B::J == WARP_SIZE || threadIdx.x < np*mma_B::J) { + KQ_crs = KQ_cms*meta_j[1]; + } +#pragma unroll + for (int offset = np*mma_B::J/2; offset >= mma_B::J; offset >>= 1) { + KQ_crs += __shfl_xor_sync(0xFFFFFFFF, KQ_crs, offset, WARP_SIZE); + } + + // Write back combined meta data: + if (np*mma_B::J == WARP_SIZE || threadIdx.x < np*mma_B::J) { + meta_j[0] = KQ_cmn; // Combined max. KQ values. + meta_j[1] = KQ_crs; // Combined KQ rowsums. + meta_j[2] = KQ_cms; // KQ max scales per parallel warp. + } + if (needs_fixup && threadIdx.x < mma_B::J) { + float2 * dstk_fixup_meta = dstk_fixup + blockIdx.x*ncols; + dstk_fixup_meta[(threadIdx.y/np)*mma_B::J + threadIdx.x] = make_float2(KQ_cmn, KQ_crs); + } + if (is_fixup && threadIdx.x < mma_B::J) { + float2 * dstk_fixup_meta = dstk_fixup + (gridDim.x + blockIdx.x)*ncols; + dstk_fixup_meta[(threadIdx.y/np)*mma_B::J + threadIdx.x] = make_float2(KQ_cmn, KQ_crs); + } + } + + if (np > 1) { + __syncthreads(); + } + + if (np == 1 || threadIdx.y % np == 0) { + // The first 2*2*gridDim.x*ncols floats in dstk_fixup are for storing max. values and row sums. + // The values after that are for the partial results of the individual blocks. + float2 * dstk_fixup_data = dstk_fixup + gridDim.x*(2*ncols) + blockIdx.x*(ncols*(D/2)); + +#pragma unroll + for (int stride_k : {WARP_SIZE, WARP_SIZE/2, WARP_SIZE/4}) { + const int k0_start = stride_k == WARP_SIZE ? 0 : D/2 - (D/2) % (2*stride_k); + const int k0_stop = D/2 - (D/2) % (1*stride_k); + const int stride_j = WARP_SIZE / stride_k; + + if (nwarps*stride_j > ncols && threadIdx.y*stride_j >= ncols) { + break; + } + +#pragma unroll + for (int j0_dst = 0; j0_dst < ncols; j0_dst += (nwarps/np)*stride_j) { + const int j_dst = j0_dst + (threadIdx.y/np)*stride_j + (stride_k == WARP_SIZE ? 0 : threadIdx.x / stride_k); + const int j_tile_KV = (j_dst/mma_B::J)*(np*mma_B::J) + j_dst % mma_B::J; + + if (!is_fixup && jt*ncols + j_dst >= ne01) { + continue; + } + const float * meta_j = (const float *) tile_KV + j_tile_KV*D2_padded + D/2; +#pragma unroll + for (int k0 = k0_start; k0 < k0_stop; k0 += stride_k) { + const int k = k0 + (stride_k == WARP_SIZE ? threadIdx.x : threadIdx.x % stride_k); + + float2 dstk_val = make_float2(0.0f, 0.0f); +#pragma unroll + for (int ip = 0; ip < np; ++ip) { + const float KQ_crs = np == 1 ? 1.0f : meta_j[ip*mma_B::J*D2_padded + 2]; + const float2 dstk_val_add = __half22float2(tile_KV[(j_tile_KV + ip*mma_B::J)*D2_padded + k]); + dstk_val.x += dstk_val_add.x*KQ_crs; + dstk_val.y += dstk_val_add.y*KQ_crs; + } + + if (!needs_fixup && !is_fixup) { + const float KQ_rowsum_j = meta_j[1]; + dstk_val.x /= KQ_rowsum_j; + dstk_val.y /= KQ_rowsum_j; + } + + if (is_fixup) { + dstk_fixup_data[j_dst*(D/2) + k] = dstk_val; + } else { + dstk[(jt*ncols + j_dst)*ne02*(D/2) + k] = dstk_val; + } + } + } + } + } + + if (np > 1) { + __syncthreads(); + } +#else + NO_DEVICE_CODE; +#endif // NEW_MMA_AVAILABLE +} + +template +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 2) +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const float logit_softcap, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int nb21, + const int nb22, + const int nb23, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { + // Skip unused kernel variants for faster compilation: + if (use_logit_softcap && !(D == 128 || D == 256)) { + NO_DEVICE_CODE; + return; + } + + static_assert(FATTN_KQ_STRIDE % KQ_stride == 0, "bad KQ_stride"); + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + + const int iter_k = ne11 / KQ_stride; + const int iter_j = (ne01 + (ncols - 1)) / ncols; + + // kbc == k block continuous, current index in continuous ijk space. + int kbc = (blockIdx.x + 0)*iter_k*iter_j*ne02 / gridDim.x; + const int kbc_stop = (blockIdx.x + 1)*iter_k*iter_j*ne02 / gridDim.x; + + // If the seams of 2 CUDA blocks fall within an output tile their results need to be combined. + // For this we need to track both the block that starts the tile (needs_fixup) and the block that finishes the tile (is_fixup). + // In the most general case >2 seams can fall into the same tile. + + // kb0 == k start index when in the output tile. + int kb0_start = kbc % iter_k; + int kb0_stop = min(iter_k, kb0_start + kbc_stop - kbc); + while (kbc < kbc_stop && kb0_stop == iter_k) { + const int channel = kbc / (iter_k*iter_j); + const int jt = (kbc - channel*iter_k*iter_j) / iter_k; // j index of current tile. + + const float2 * Q_f2 = (const float2 *) (Q + nb02* channel); + const half2 * K_h2 = (const half2 *) (K + nb12*(channel / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(channel / gqa_ratio)); // K and V have same shape + const half * maskh = mask ? (const half *) mask + (nb31/sizeof(half))*jt*ncols : nullptr; + float2 * dstk = ((float2 *) dst) + channel*(D/2); + + const float slope = get_alibi_slope(max_bias, channel, n_head_log2, m0, m1); + + constexpr bool is_fixup = false; // All but (potentially) the last iterations write their data to dst rather than the fixup buffer. + if (kb0_start == 0) { + constexpr bool needs_fixup = false; // CUDA block is working on an entire tile. + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, maskh, dstk, dst_meta, scale, slope, logit_softcap, + ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne31, nb31, nb01, nb02, nb03, nb11, nb12, nb13, nb21, nb22, nb23, ne0, ne1, ne2, ne3, + jt, kb0_start, kb0_stop); + } else { + constexpr bool needs_fixup = true; // CUDA block is working on the beginning of a tile. + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, maskh, dstk, dst_meta, scale, slope, logit_softcap, + ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne31, nb31, nb01, nb02, nb03, nb11, nb12, nb13, nb21, nb22, nb23, ne0, ne1, ne2, ne3, + jt, kb0_start, kb0_stop); + } + + kbc += iter_k; + kbc -= kbc % iter_k; + + kb0_start = 0; + kb0_stop = min(iter_k, kbc_stop - kbc); + } + + if (kbc >= kbc_stop) { + return; + } + + const int channel = kbc / (iter_k*iter_j); + const int jt = (kbc - channel*iter_k*iter_j) / iter_k; // j index of current tile. + + const float2 * Q_f2 = (const float2 *) (Q + nb02* channel); + const half2 * K_h2 = (const half2 *) (K + nb12*(channel / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(channel / gqa_ratio)); // K and V have same shape + const half * maskh = mask ? (const half *) mask + (nb31/sizeof(half))*jt*ncols : nullptr; + float2 * dstk = ((float2 *) dst) + channel*(D/2); + + const float slope = get_alibi_slope(max_bias, channel, n_head_log2, m0, m1); + + constexpr bool is_fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. + constexpr bool needs_fixup = false; + flash_attn_ext_f16_process_tile + (Q_f2, K_h2, V_h2, maskh, dstk, dst_meta, scale, slope, logit_softcap, + ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, ne31, nb31, nb01, nb02, nb03, nb11, nb12, nb13, nb21, nb22, nb23, ne0, ne1, ne2, ne3, + jt, kb0_start, kb0_stop); +} + +template +void ggml_cuda_flash_attn_ext_mma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + typedef mma_A_I16K8 mma_A; + typedef mma_B_J8K8 mma_B; + + static_assert(D % mma_B::K == 0, "bad D"); + static_assert(cols_per_block % mma_B::J == 0, "bad cols_per_block"); + + const ggml_tensor * KQV = dst; + + constexpr int KQ_stride = D <= 128 ? 64 : 32; + constexpr int nwarps = (KQ_stride == 32 && cols_per_block <= 16) ? + cols_per_block/mma_B::J * KQ_stride/mma_A::I : (cols_per_block <= 8 ? 4 : 8); + constexpr size_t nbytes_shared = std::max(KQ_stride, nwarps*mma_B::J) * (D + 8) * sizeof(half); + + float logit_softcap; + memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); + + fattn_kernel_t fattn_kernel; + if (logit_softcap == 0.0f) { + constexpr bool use_logit_softcap = false; + fattn_kernel = flash_attn_ext_f16; + } else { + constexpr bool use_logit_softcap = true; + fattn_kernel = flash_attn_ext_f16; + } + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); +} + +#define DECL_FATTN_MMA_F16_CASE(D, cols_per_block) \ + template void ggml_cuda_flash_attn_ext_mma_f16_case \ + (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \ + +extern DECL_FATTN_MMA_F16_CASE( 64, 8); +extern DECL_FATTN_MMA_F16_CASE( 80, 8); +extern DECL_FATTN_MMA_F16_CASE( 96, 8); +extern DECL_FATTN_MMA_F16_CASE(112, 8); +extern DECL_FATTN_MMA_F16_CASE(128, 8); +extern DECL_FATTN_MMA_F16_CASE(256, 8); + +extern DECL_FATTN_MMA_F16_CASE( 64, 16); +extern DECL_FATTN_MMA_F16_CASE( 80, 16); +extern DECL_FATTN_MMA_F16_CASE( 96, 16); +extern DECL_FATTN_MMA_F16_CASE(112, 16); +extern DECL_FATTN_MMA_F16_CASE(128, 16); +extern DECL_FATTN_MMA_F16_CASE(256, 16); + +extern DECL_FATTN_MMA_F16_CASE( 64, 32); +extern DECL_FATTN_MMA_F16_CASE( 80, 32); +extern DECL_FATTN_MMA_F16_CASE( 96, 32); +extern DECL_FATTN_MMA_F16_CASE(112, 32); +extern DECL_FATTN_MMA_F16_CASE(128, 32); +extern DECL_FATTN_MMA_F16_CASE(256, 32); + +extern DECL_FATTN_MMA_F16_CASE( 64, 64); +extern DECL_FATTN_MMA_F16_CASE( 80, 64); +extern DECL_FATTN_MMA_F16_CASE( 96, 64); +extern DECL_FATTN_MMA_F16_CASE(112, 64); +extern DECL_FATTN_MMA_F16_CASE(128, 64); +extern DECL_FATTN_MMA_F16_CASE(256, 64); diff --git a/ggml/src/ggml-cuda/fattn-tile-f16.cu b/ggml/src/ggml-cuda/fattn-tile-f16.cu index 4d314dacb..d4edbad07 100644 --- a/ggml/src/ggml-cuda/fattn-tile-f16.cu +++ b/ggml/src/ggml-cuda/fattn-tile-f16.cu @@ -45,7 +45,17 @@ static __global__ void flash_attn_tile_ext_f16( const int ne2, const int ne3) { #ifdef FP16_AVAILABLE + +#ifndef FLASH_ATTN_AVAILABLE + NO_DEVICE_CODE; + return; +#endif // FLASH_ATTN_AVAILABLE + // Skip unused kernel variants for faster compilation: +#ifdef FP16_MMA_AVAILABLE + NO_DEVICE_CODE; + return; +#endif // FP16_MMA_AVAILABLE if (use_logit_softcap && !(D == 128 || D == 256)) { NO_DEVICE_CODE; return; @@ -288,16 +298,18 @@ void launch_fattn_tile_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * const ggml_tensor * Q = dst->src[0]; switch (Q->ne[0]) { case 64: { - constexpr int D = 64; - constexpr int nwarps = 8; + constexpr int D = 64; + constexpr int nwarps = 8; + constexpr size_t nbytes_shared = 0; fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); } break; case 128: { - constexpr int D = 128; - constexpr int nwarps = 8; + constexpr int D = 128; + constexpr int nwarps = 8; + constexpr size_t nbytes_shared = 0; fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); } break; default: { GGML_ABORT("FlashAttention without tensor cores only supports head sizes 64 and 128."); diff --git a/ggml/src/ggml-cuda/fattn-tile-f32.cu b/ggml/src/ggml-cuda/fattn-tile-f32.cu index bb3360447..0d274f332 100644 --- a/ggml/src/ggml-cuda/fattn-tile-f32.cu +++ b/ggml/src/ggml-cuda/fattn-tile-f32.cu @@ -48,7 +48,12 @@ static __global__ void flash_attn_tile_ext_f32( NO_DEVICE_CODE; return; #endif // FLASH_ATTN_AVAILABLE + // Skip unused kernel variants for faster compilation: +#ifdef FP16_MMA_AVAILABLE + NO_DEVICE_CODE; + return; +#endif // FP16_MMA_AVAILABLE if (use_logit_softcap && !(D == 128 || D == 256)) { NO_DEVICE_CODE; return; @@ -287,16 +292,18 @@ void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * const ggml_tensor * Q = dst->src[0]; switch (Q->ne[0]) { case 64: { - constexpr int D = 64; - constexpr int nwarps = 8; + constexpr int D = 64; + constexpr int nwarps = 8; + constexpr size_t nbytes_shared = 0; fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); } break; case 128: { - constexpr int D = 128; - constexpr int nwarps = 8; + constexpr int D = 128; + constexpr int nwarps = 8; + constexpr size_t nbytes_shared = 0; fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, true, true); } break; default: { GGML_ABORT("FlashAttention without tensor cores only supports head sizes 64 and 128."); diff --git a/ggml/src/ggml-cuda/fattn-vec-f16.cuh b/ggml/src/ggml-cuda/fattn-vec-f16.cuh index 34a2992c7..d9ac44246 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f16.cuh @@ -42,6 +42,12 @@ static __global__ void flash_attn_vec_ext_f16( const int ne2, const int ne3) { #ifdef FP16_AVAILABLE + +#ifndef FLASH_ATTN_AVAILABLE + NO_DEVICE_CODE; + return; +#endif // FLASH_ATTN_AVAILABLE + // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(D == 128 || D == 256)) { NO_DEVICE_CODE; @@ -303,7 +309,8 @@ void ggml_cuda_flash_attn_ext_vec_f16_case_impl(ggml_backend_cuda_context & ctx, fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16; constexpr bool need_f16_K = D != 128; constexpr bool need_f16_V = D != 128 && D != 64; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, need_f16_K, need_f16_V); + constexpr size_t nbytes_shared = 0; + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, need_f16_K, need_f16_V); } template diff --git a/ggml/src/ggml-cuda/fattn-vec-f32.cuh b/ggml/src/ggml-cuda/fattn-vec-f32.cuh index a28fc8b7f..6ef8f9dcc 100644 --- a/ggml/src/ggml-cuda/fattn-vec-f32.cuh +++ b/ggml/src/ggml-cuda/fattn-vec-f32.cuh @@ -41,6 +41,11 @@ static __global__ void flash_attn_vec_ext_f32( const int ne1, const int ne2, const int ne3) { +#ifndef FLASH_ATTN_AVAILABLE + NO_DEVICE_CODE; + return; +#endif // FLASH_ATTN_AVAILABLE + // Skip unused kernel variants for faster compilation: if (use_logit_softcap && !(D == 128 || D == 256)) { NO_DEVICE_CODE; @@ -284,7 +289,8 @@ void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx, fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32; constexpr bool need_f16_K = D != 128; constexpr bool need_f16_V = D != 128 && D != 64; - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, need_f16_K, need_f16_V); + constexpr size_t nbytes_shared = 0; + launch_fattn(ctx, dst, fattn_kernel, nwarps, nbytes_shared, need_f16_K, need_f16_V); } template diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ggml/src/ggml-cuda/fattn-wmma-f16.cu new file mode 100644 index 000000000..1054ff95d --- /dev/null +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -0,0 +1,648 @@ +// Old and deprecated WMMA FlashAttention implementation. +// It is still needed for Volta since the memory layout of NVIDIA tensor cores changed with Turing. +// Long-term the WMMA code should be replaced with a dedicated Volta implementation. + +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-wmma-f16.cuh" + +#ifdef FP16_MMA_AVAILABLE +#include +#endif // FP16_MMA_AVAILABLE + +// D == head size, VKQ_stride == num VKQ rows calculated in parallel: +template +#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const float logit_softcap, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int nb21, + const int nb22, + const int nb23, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { +#if __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA + // Skip unused kernel variants for faster compilation: + if (use_logit_softcap && !(D == 128 || D == 256)) { + NO_DEVICE_CODE; + return; + } + + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE."); + static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16."); + constexpr int frag_m = ncols == 8 ? 32 : 16; + constexpr int frag_n = ncols == 8 ? 8 : 16; + static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0."); + typedef nvcuda::wmma::fragment frag_a_K; + typedef nvcuda::wmma::fragment frag_a_V; + typedef nvcuda::wmma::fragment frag_b; + typedef nvcuda::wmma::fragment frag_c_KQ; + typedef nvcuda::wmma::fragment frag_c_VKQ; + + constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel. + constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy. + static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps."); + + // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts: + constexpr int D_padded = D + 8; + constexpr int kqs_padded = FATTN_KQ_STRIDE + 8; + constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0); + const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0; + const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2); + + const int stride_Q = nb01 / sizeof(float); + const int stride_KV = nb11 / sizeof(half); + + const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); + const half slopeh = __float2half(slopef); + const half2 slope2 = make_half2(slopef, slopef); + + const half2 logit_softcap_2 = make_half2(logit_softcap, logit_softcap); + + frag_b Q_b[D/16][ncols/frag_n]; + + // A single buffer for temporarily holding tiles of KQ and VKQ parts: + constexpr int mem_KQ = ncols*kqs_padded*kqar; + constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded; + __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts]; + float * KQ_f = (float *) KQ; + half2 * KQ2 = (half2 *) KQ; + + float KQ_rowsum_f[ncols/nwarps] = {0.0f}; + float KQ_max_f[ncols/nwarps]; + float KQ_max_scale_f[ncols/nwarps] = {0.0f}; + +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + KQ_max_f[j] = -FLT_MAX/2.0f; + } + + half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}}; + half2 KQ_max_h2[ncols/nwarps]; + half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}}; + +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF); + } + + __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice. + half2 * VKQ2 = (half2 *) VKQ; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D/2 && i >= D/2) { + break; + } + VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f); + } + } + + // Convert Q to half and apply scale, temporarily store in KQ: +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; +#pragma unroll + for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D && i >= D) { + break; + } + KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f; + } + } + + __syncthreads(); + + // Load Q into tensor core fragments/registers since it will be used frequently: +#pragma unroll + for (int i0 = 0; i0 < D; i0 += 16) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded); + } + } + + __syncthreads(); + + // Iterate over ne11 == previous tokens: + for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) { + // Calculate tile of KQ: +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { + frag_c_KQ KQ_c[ncols/frag_n]; +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f); + } +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) { + frag_a_K K_a; + nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]); + } + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major); + } + } + + __syncthreads(); + + // Calculate softmax for each KQ column using the current max. value. + // The divisor is stored in KQ_rowsum and will be applied at the end. +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + if (std::is_same::value) { + float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k]; + + if (use_logit_softcap) { + KQ_f_tmp[k0/WARP_SIZE] = logit_softcap*tanhf(KQ_f_tmp[k0/WARP_SIZE]); + } + } + + float KQ_max_new = KQ_max_f[j0/nwarps]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f; + KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]); + } + KQ_max_new = warp_reduce_max(KQ_max_new); + + const float diff = KQ_max_f[j0/nwarps] - KQ_max_new; + KQ_max_scale_f[j0/nwarps] = expf(diff); + if (diff <= SOFTMAX_FTZ_THRESHOLD) { + KQ_max_scale_f[j0/nwarps] = 0.0f; + } + KQ_max_f[j0/nwarps] = KQ_max_new; + + float KQ_rowsum_add = 0.0f; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps]; + KQ_f_tmp[k0/WARP_SIZE] = expf(diff); + if (diff <= SOFTMAX_FTZ_THRESHOLD) { + KQ_f_tmp[k0/WARP_SIZE] = 0.0f; + } + KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE]; + KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE]; + } + KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); + + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: + KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add; + } else { + half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k]; + + if (use_logit_softcap) { + // There is no dedicated tangens hyperbolicus function for half2. + KQ2_tmp[k0/WARP_SIZE] = h2exp(KQ2_tmp[k0/WARP_SIZE]*make_half2(2.0f, 2.0f)); + KQ2_tmp[k0/WARP_SIZE] = (KQ2_tmp[k0/WARP_SIZE] - make_half2(1.0f, 1.0f)) + /(KQ2_tmp[k0/WARP_SIZE] + make_half2(1.0f, 1.0f)); + + KQ2_tmp[k0/WARP_SIZE] *= logit_softcap_2; + } + } + + half2 KQ_max_new = KQ_max_h2[j0/nwarps]; +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f); + KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]); + } + KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); + const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new; + KQ_max_scale_h2[j0/nwarps] = h2exp(diff); + const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); + *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask; + KQ_max_h2[j0/nwarps] = KQ_max_new; + + half2 KQ_rowsum_add = make_half2(0.0f, 0.0f); +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { + const int k = k0 + threadIdx.x; + + const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps]; + KQ2_tmp[k0/WARP_SIZE] = h2exp(diff); + const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); + *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask; + KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE]; + KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE]; + } + KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); + + // Scale previous KQ_rowsum to account for a potential increase in KQ_max: + KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add; + } + } + + __syncthreads(); + + frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { + const int k = k0 + (threadIdx.y % VKQ_ratio)*16; + nvcuda::wmma::load_matrix_sync( + KQ_b[k0/(VKQ_ratio*16)][j0/frag_n], + KQ + j0*(kqar*kqs_padded) + k, + kqar*kqs_padded); + } + } + + frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n]; +#pragma unroll + for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) { +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f); + } + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { + const int k = k0 + (threadIdx.y % VKQ_ratio)*16; + + frag_a_V v_a; + nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); +#pragma unroll + for (int j = 0; j < ncols/frag_n; ++j) { + nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]); + } + } + } + + __syncthreads(); + + const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded); +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += frag_n) { + nvcuda::wmma::store_matrix_sync( + KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio), + VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n], + D_padded, nvcuda::wmma::mem_col_major); + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + half2 VKQ_scale; + if (std::is_same::value) { + VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]); + } else { + VKQ_scale = KQ_max_scale_h2[j0/nwarps]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D/2 && i >= D/2) { + break; + } + + half2 VKQ_add = make_half2(0.0f, 0.0f); +#pragma unroll + for (int l = 0; l < VKQ_ratio; ++l) { + VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i]; + } + VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add; + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j_VKQ = j0 + threadIdx.y; + if (ic0 + j_VKQ >= ne01) { + return; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + + float KQ_rowsum_j; + if (std::is_same::value) { + KQ_rowsum_j = KQ_rowsum_f[j0/nwarps]; + } else { + KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]); + } + +#pragma unroll + for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + if (i0 + WARP_SIZE > D && i >= D) { + break; + } + float dst_val = VKQ[j_VKQ*D_padded + i]; + if (parallel_blocks == 1) { + dst_val /= KQ_rowsum_j; + } + dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val; + } + + if (parallel_blocks == 1 || threadIdx.x != 0) { + continue; + } + + float2 dst_meta_val; + if (std::is_same::value) { + dst_meta_val.x = KQ_max_f[j0/nwarps]; + } else { + dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); + } + dst_meta_val.y = KQ_rowsum_j; + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val; + } +#else + NO_DEVICE_CODE; +#endif // __CUDA_ARCH__ == GGML_CUDA_CC_VOLTA +} + +constexpr int get_max_power_of_2(int x) { + return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1; +} + +static_assert(get_max_power_of_2(1) == 1, "Test failed."); +static_assert(get_max_power_of_2(2) == 2, "Test failed."); +static_assert(get_max_power_of_2(4) == 4, "Test failed."); +static_assert(get_max_power_of_2(6) == 2, "Test failed."); + +// Number of VKQ rows calculated in parallel: +constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) { + return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m; +} + +static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed."); +static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed."); +static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed."); +static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed."); +static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed."); +static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed."); +static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed."); + +template +void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + constexpr int nwarps = 4; + + constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; + const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3]; + const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; + + float logit_softcap; + memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); + + if (4*blocks_num_pb1 < 2*nsm) { + constexpr int parallel_blocks = 4; + fattn_kernel_t fattn_kernel; + if (logit_softcap == 0.0f) { + constexpr bool use_logit_softcap = false; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } else { + constexpr bool use_logit_softcap = true; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); + return; + } + if (2*blocks_num_pb1 < 2*nsm) { + constexpr int parallel_blocks = 2; + fattn_kernel_t fattn_kernel; + if (logit_softcap == 0.0f) { + constexpr bool use_logit_softcap = false; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } else { + constexpr bool use_logit_softcap = true; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); + return; + } + constexpr int parallel_blocks = 1; + fattn_kernel_t fattn_kernel; + if (logit_softcap == 0.0f) { + constexpr bool use_logit_softcap = false; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } else { + constexpr bool use_logit_softcap = true; + fattn_kernel = flash_attn_ext_f16< + D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; + } + launch_fattn(ctx, dst, fattn_kernel, nwarps, 0, true, true); +} + +void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * KQV = dst; + const ggml_tensor * Q = dst->src[0]; + + const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); + + if (prec != GGML_PREC_DEFAULT) { + if (Q->ne[1] <= 32 || Q->ne[0] > 128) { + constexpr int cols_per_block = 16; + switch (Q->ne[0]) { + case 64: + ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); + break; + case 80: + ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); + break; + case 96: + ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); + break; + case 112: + ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); + break; + case 128: + ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); + break; + case 256: + ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst); + break; + default: + GGML_ABORT("fatal error"); + break; + } + } else { + constexpr int cols_per_block = 32; + switch (Q->ne[0]) { + case 64: + ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); + break; + case 80: + ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); + break; + case 96: + ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); + break; + case 112: + ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); + break; + case 128: + ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); + break; + // case 256: + // ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); + // break; + default: + GGML_ABORT("fatal error"); + break; + } + } + return; + } + + if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) { + constexpr int cols_per_block = 8; + switch (Q->ne[0]) { + case 64: + ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); + break; + case 96: + ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); + break; + case 128: + ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); + break; + case 256: + ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); + break; + default: + GGML_ABORT("fatal error"); + break; + } + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 16; + switch (Q->ne[0]) { + case 64: + ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); + break; + case 80: + ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); + break; + case 96: + ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); + break; + case 112: + ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); + break; + case 128: + ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); + break; + case 256: + ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); + break; + default: + GGML_ABORT("fatal error"); + break; + } + return; + } + + constexpr int cols_per_block = 32; + switch (Q->ne[0]) { + case 64: + ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); + break; + case 80: + ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); + break; + case 96: + ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); + break; + case 112: + ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); + break; + case 128: + ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); + break; + case 256: + ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); + break; + default: + GGML_ABORT("fatal error"); + break; + } +} diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cuh b/ggml/src/ggml-cuda/fattn-wmma-f16.cuh index 860d0e6dc..beeea95eb 100644 --- a/ggml/src/ggml-cuda/fattn-wmma-f16.cuh +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cuh @@ -1,543 +1,3 @@ #include "common.cuh" -#include "fattn-common.cuh" -#ifdef FP16_MMA_AVAILABLE -#include -#endif // FP16_MMA_AVAILABLE - -// D == head size, VKQ_stride == num VKQ rows calculated in parallel: -template -#if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) -__launch_bounds__(nwarps*WARP_SIZE, 1) -#endif // !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) -static __global__ void flash_attn_ext_f16( - const char * __restrict__ Q, - const char * __restrict__ K, - const char * __restrict__ V, - const char * __restrict__ mask, - float * __restrict__ dst, - float2 * __restrict__ dst_meta, - const float scale, - const float max_bias, - const float m0, - const float m1, - const uint32_t n_head_log2, - const float logit_softcap, - const int ne00, - const int ne01, - const int ne02, - const int ne03, - const int ne10, - const int ne11, - const int ne12, - const int ne13, - const int ne31, - const int nb31, - const int nb01, - const int nb02, - const int nb03, - const int nb11, - const int nb12, - const int nb13, - const int nb21, - const int nb22, - const int nb23, - const int ne0, - const int ne1, - const int ne2, - const int ne3) { -#ifdef FP16_MMA_AVAILABLE - // Skip unused kernel variants for faster compilation: - if (use_logit_softcap && !(D == 128 || D == 256)) { - NO_DEVICE_CODE; - return; - } - - //In this kernel Q, K, V are matrices while i, j, k are matrix indices. - - const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on. - const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. - - static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE."); - static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16."); - constexpr int frag_m = ncols == 8 ? 32 : 16; - constexpr int frag_n = ncols == 8 ? 8 : 16; - static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0."); - typedef nvcuda::wmma::fragment frag_a_K; - typedef nvcuda::wmma::fragment frag_a_V; - typedef nvcuda::wmma::fragment frag_b; - typedef nvcuda::wmma::fragment frag_c_KQ; - typedef nvcuda::wmma::fragment frag_c_VKQ; - - constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel. - constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy. - static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps."); - - // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts: - constexpr int D_padded = D + 8; - constexpr int kqs_padded = FATTN_KQ_STRIDE + 8; - constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half); - - const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. - const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0); - const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio)); - const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape - const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0; - const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2); - - const int stride_Q = nb01 / sizeof(float); - const int stride_KV = nb11 / sizeof(half); - - const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1); - const half slopeh = __float2half(slopef); - const half2 slope2 = make_half2(slopef, slopef); - - const half2 logit_softcap_2 = make_half2(logit_softcap, logit_softcap); - - frag_b Q_b[D/16][ncols/frag_n]; - - // A single buffer for temporarily holding tiles of KQ and VKQ parts: - constexpr int mem_KQ = ncols*kqs_padded*kqar; - constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded; - __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts]; - float * KQ_f = (float *) KQ; - half2 * KQ2 = (half2 *) KQ; - - float KQ_rowsum_f[ncols/nwarps] = {0.0f}; - float KQ_max_f[ncols/nwarps]; - float KQ_max_scale_f[ncols/nwarps] = {0.0f}; - -#pragma unroll - for (int j = 0; j < ncols/nwarps; ++j) { - KQ_max_f[j] = -FLT_MAX/2.0f; - } - - half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}}; - half2 KQ_max_h2[ncols/nwarps]; - half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}}; - -#pragma unroll - for (int j = 0; j < ncols/nwarps; ++j) { - KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF); - } - - __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice. - half2 * VKQ2 = (half2 *) VKQ; -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += nwarps) { - const int j = j0 + threadIdx.y; -#pragma unroll - for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { - const int i = i0 + threadIdx.x; - if (i0 + WARP_SIZE > D/2 && i >= D/2) { - break; - } - VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f); - } - } - - // Convert Q to half and apply scale, temporarily store in KQ: -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += nwarps) { - const int j = j0 + threadIdx.y; -#pragma unroll - for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { - const int i = i0 + threadIdx.x; - if (i0 + WARP_SIZE > D && i >= D) { - break; - } - KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f; - } - } - - __syncthreads(); - - // Load Q into tensor core fragments/registers since it will be used frequently: -#pragma unroll - for (int i0 = 0; i0 < D; i0 += 16) { -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += frag_n) { - nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded); - } - } - - __syncthreads(); - - // Iterate over ne11 == previous tokens: - for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) { - // Calculate tile of KQ: -#pragma unroll - for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) { - frag_c_KQ KQ_c[ncols/frag_n]; -#pragma unroll - for (int j = 0; j < ncols/frag_n; ++j) { - nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f); - } -#pragma unroll - for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) { - frag_a_K K_a; - nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV); -#pragma unroll - for (int j = 0; j < ncols/frag_n; ++j) { - nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]); - } - } -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += frag_n) { - nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major); - } - } - - __syncthreads(); - - // Calculate softmax for each KQ column using the current max. value. - // The divisor is stored in KQ_rowsum and will be applied at the end. -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += nwarps) { - const int j = j0 + threadIdx.y; - - if (std::is_same::value) { - float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE]; -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k]; - - if (use_logit_softcap) { - KQ_f_tmp[k0/WARP_SIZE] = logit_softcap*tanhf(KQ_f_tmp[k0/WARP_SIZE]); - } - } - - float KQ_max_new = KQ_max_f[j0/nwarps]; -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f; - KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]); - } - KQ_max_new = warp_reduce_max(KQ_max_new); - - const float diff = KQ_max_f[j0/nwarps] - KQ_max_new; - KQ_max_scale_f[j0/nwarps] = expf(diff); - if (diff <= SOFTMAX_FTZ_THRESHOLD) { - KQ_max_scale_f[j0/nwarps] = 0.0f; - } - KQ_max_f[j0/nwarps] = KQ_max_new; - - float KQ_rowsum_add = 0.0f; -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps]; - KQ_f_tmp[k0/WARP_SIZE] = expf(diff); - if (diff <= SOFTMAX_FTZ_THRESHOLD) { - KQ_f_tmp[k0/WARP_SIZE] = 0.0f; - } - KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE]; - KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE]; - } - KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); - - // Scale previous KQ_rowsum to account for a potential increase in KQ_max: - KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add; - } else { - half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)]; -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k]; - - if (use_logit_softcap) { - // There is no dedicated tangens hyperbolicus function for half2. - KQ2_tmp[k0/WARP_SIZE] = h2exp(KQ2_tmp[k0/WARP_SIZE]*make_half2(2.0f, 2.0f)); - KQ2_tmp[k0/WARP_SIZE] = (KQ2_tmp[k0/WARP_SIZE] - make_half2(1.0f, 1.0f)) - /(KQ2_tmp[k0/WARP_SIZE] + make_half2(1.0f, 1.0f)); - - KQ2_tmp[k0/WARP_SIZE] *= logit_softcap_2; - } - } - - half2 KQ_max_new = KQ_max_h2[j0/nwarps]; -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f); - KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]); - } - KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new)))); - const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new; - KQ_max_scale_h2[j0/nwarps] = h2exp(diff); - const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); - *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask; - KQ_max_h2[j0/nwarps] = KQ_max_new; - - half2 KQ_rowsum_add = make_half2(0.0f, 0.0f); -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) { - const int k = k0 + threadIdx.x; - - const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps]; - KQ2_tmp[k0/WARP_SIZE] = h2exp(diff); - const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD)); - *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask; - KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE]; - KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE]; - } - KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add); - - // Scale previous KQ_rowsum to account for a potential increase in KQ_max: - KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add; - } - } - - __syncthreads(); - - frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n]; -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += frag_n) { -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { - const int k = k0 + (threadIdx.y % VKQ_ratio)*16; - nvcuda::wmma::load_matrix_sync( - KQ_b[k0/(VKQ_ratio*16)][j0/frag_n], - KQ + j0*(kqar*kqs_padded) + k, - kqar*kqs_padded); - } - } - - frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n]; -#pragma unroll - for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) { -#pragma unroll - for (int j = 0; j < ncols/frag_n; ++j) { - nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f); - } - -#pragma unroll - for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) { - const int k = k0 + (threadIdx.y % VKQ_ratio)*16; - - frag_a_V v_a; - nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV); -#pragma unroll - for (int j = 0; j < ncols/frag_n; ++j) { - nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]); - } - } - } - - __syncthreads(); - - const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded); -#pragma unroll - for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) { -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += frag_n) { - nvcuda::wmma::store_matrix_sync( - KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio), - VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n], - D_padded, nvcuda::wmma::mem_col_major); - } - } - - __syncthreads(); - -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += nwarps) { - const int j = j0 + threadIdx.y; - - half2 VKQ_scale; - if (std::is_same::value) { - VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]); - } else { - VKQ_scale = KQ_max_scale_h2[j0/nwarps]; - } - -#pragma unroll - for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { - const int i = i0 + threadIdx.x; - if (i0 + WARP_SIZE > D/2 && i >= D/2) { - break; - } - - half2 VKQ_add = make_half2(0.0f, 0.0f); -#pragma unroll - for (int l = 0; l < VKQ_ratio; ++l) { - VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i]; - } - VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add; - } - } - - __syncthreads(); - } - -#pragma unroll - for (int j0 = 0; j0 < ncols; j0 += nwarps) { - const int j_VKQ = j0 + threadIdx.y; - if (ic0 + j_VKQ >= ne01) { - return; - } - const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; - - float KQ_rowsum_j; - if (std::is_same::value) { - KQ_rowsum_j = KQ_rowsum_f[j0/nwarps]; - } else { - KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]); - } - -#pragma unroll - for (int i0 = 0; i0 < D; i0 += WARP_SIZE) { - const int i = i0 + threadIdx.x; - if (i0 + WARP_SIZE > D && i >= D) { - break; - } - float dst_val = VKQ[j_VKQ*D_padded + i]; - if (parallel_blocks == 1) { - dst_val /= KQ_rowsum_j; - } - dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val; - } - - if (parallel_blocks == 1 || threadIdx.x != 0) { - continue; - } - - float2 dst_meta_val; - if (std::is_same::value) { - dst_meta_val.x = KQ_max_f[j0/nwarps]; - } else { - dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]); - } - dst_meta_val.y = KQ_rowsum_j; - dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val; - } -#else - NO_DEVICE_CODE; -#endif // FP16_MMA_AVAILABLE -} - -constexpr int get_max_power_of_2(int x) { - return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1; -} - -static_assert(get_max_power_of_2(1) == 1, "Test failed."); -static_assert(get_max_power_of_2(2) == 2, "Test failed."); -static_assert(get_max_power_of_2(4) == 4, "Test failed."); -static_assert(get_max_power_of_2(6) == 2, "Test failed."); - -// Number of VKQ rows calculated in parallel: -constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) { - return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m; -} - -static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed."); -static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed."); -static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed."); -static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed."); -static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed."); -static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed."); -static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed."); -static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed."); -static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed."); - -template -void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * KQV = dst; - const ggml_tensor * Q = dst->src[0]; - - constexpr int nwarps = 4; - - constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16; - const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3]; - const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm; - - float logit_softcap; - memcpy(&logit_softcap, (const float *) KQV->op_params + 2, sizeof(float)); - - if (4*blocks_num_pb1 < 2*nsm) { - constexpr int parallel_blocks = 4; - fattn_kernel_t fattn_kernel; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } else { - constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); - return; - } - if (2*blocks_num_pb1 < 2*nsm) { - constexpr int parallel_blocks = 2; - fattn_kernel_t fattn_kernel; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } else { - constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); - return; - } - constexpr int parallel_blocks = 1; - fattn_kernel_t fattn_kernel; - if (logit_softcap == 0.0f) { - constexpr bool use_logit_softcap = false; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } else { - constexpr bool use_logit_softcap = true; - fattn_kernel = flash_attn_ext_f16< - D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t, use_logit_softcap>; - } - launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true); -} - -#define DECL_FATTN_WMMA_F16_CASE(D, cols_per_block, KQ_acc_t) \ - template void ggml_cuda_flash_attn_ext_wmma_f16_case \ - (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \ - -extern DECL_FATTN_WMMA_F16_CASE( 64, 16, float); -extern DECL_FATTN_WMMA_F16_CASE( 80, 16, float); -extern DECL_FATTN_WMMA_F16_CASE( 96, 16, float); -extern DECL_FATTN_WMMA_F16_CASE(112, 16, float); -extern DECL_FATTN_WMMA_F16_CASE(128, 16, float); -extern DECL_FATTN_WMMA_F16_CASE(256, 16, float); - -extern DECL_FATTN_WMMA_F16_CASE( 64, 32, float); -extern DECL_FATTN_WMMA_F16_CASE( 80, 32, float); -extern DECL_FATTN_WMMA_F16_CASE( 96, 32, float); -extern DECL_FATTN_WMMA_F16_CASE(112, 32, float); -extern DECL_FATTN_WMMA_F16_CASE(128, 32, float); -// extern DECL_FATTN_WMMA_F16_CASE(256, 16, float); - -extern DECL_FATTN_WMMA_F16_CASE( 64, 8, half); -extern DECL_FATTN_WMMA_F16_CASE( 96, 8, half); -extern DECL_FATTN_WMMA_F16_CASE(128, 8, half); -extern DECL_FATTN_WMMA_F16_CASE(256, 8, half); - -extern DECL_FATTN_WMMA_F16_CASE( 64, 16, half); -extern DECL_FATTN_WMMA_F16_CASE( 80, 16, half); -extern DECL_FATTN_WMMA_F16_CASE( 96, 16, half); -extern DECL_FATTN_WMMA_F16_CASE(112, 16, half); -extern DECL_FATTN_WMMA_F16_CASE(128, 16, half); -extern DECL_FATTN_WMMA_F16_CASE(256, 16, half); - -extern DECL_FATTN_WMMA_F16_CASE( 64, 32, half); -extern DECL_FATTN_WMMA_F16_CASE( 80, 32, half); -extern DECL_FATTN_WMMA_F16_CASE( 96, 32, half); -extern DECL_FATTN_WMMA_F16_CASE(112, 32, half); -extern DECL_FATTN_WMMA_F16_CASE(128, 32, half); -extern DECL_FATTN_WMMA_F16_CASE(256, 16, half); +void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index 0b26b0f8e..b1e66d470 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -1,5 +1,6 @@ #include "common.cuh" #include "fattn-common.cuh" +#include "fattn-mma-f16.cuh" #include "fattn-tile-f16.cuh" #include "fattn-tile-f32.cuh" #include "fattn-vec-f16.cuh" @@ -7,144 +8,56 @@ #include "fattn-wmma-f16.cuh" #include "fattn.cuh" -#include +template +static void ggml_cuda_flash_attn_ext_mma_f16_switch_hs(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; -static void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * KQV = dst; - const ggml_tensor * Q = dst->src[0]; - - const enum ggml_prec prec = ggml_flash_attn_ext_get_prec(KQV); - - if (prec != GGML_PREC_DEFAULT) { - if (Q->ne[1] <= 32 || Q->ne[0] > 128) { - constexpr int cols_per_block = 16; - switch (Q->ne[0]) { - case 64: - ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); - break; - case 80: - ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); - break; - case 96: - ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); - break; - case 112: - ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); - break; - case 128: - ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); - break; - case 256: - ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst); - break; - default: - GGML_ABORT("fatal error"); - break; - } - } else { - constexpr int cols_per_block = 32; - switch (Q->ne[0]) { - case 64: - ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst); - break; - case 80: - ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst); - break; - case 96: - ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst); - break; - case 112: - ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst); - break; - case 128: - ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); - break; - // case 256: - // ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); - // break; - default: - GGML_ABORT("fatal error"); - break; - } - } - return; - } - - if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) { - constexpr int cols_per_block = 8; - switch (Q->ne[0]) { - case 64: - ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); - break; - case 96: - ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); - break; - case 128: - ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); - break; - case 256: - ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); - break; - default: - GGML_ABORT("fatal error"); - break; - } - return; - } - - if (Q->ne[1] <= 32) { - constexpr int cols_per_block = 16; - switch (Q->ne[0]) { - case 64: - ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); - break; - case 80: - ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); - break; - case 96: - ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); - break; - case 112: - ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); - break; - case 128: - ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); - break; - case 256: - ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); - break; - default: - GGML_ABORT("fatal error"); - break; - } - return; - } - - constexpr int cols_per_block = 32; switch (Q->ne[0]) { case 64: - ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case< 64, cols_per_block>(ctx, dst); break; case 80: - ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case< 80, cols_per_block>(ctx, dst); break; case 96: - ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case< 96, cols_per_block>(ctx, dst); break; case 112: - ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case<112, cols_per_block>(ctx, dst); break; case 128: - ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case<128, cols_per_block>(ctx, dst); break; case 256: - ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst); + ggml_cuda_flash_attn_ext_mma_f16_case<256, cols_per_block>(ctx, dst); break; default: GGML_ABORT("fatal error"); break; } } + +static void ggml_cuda_flash_attn_ext_mma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_mma_f16_switch_hs<8>(ctx, dst); + return; + } + + if (Q->ne[1] <= 16) { + ggml_cuda_flash_attn_ext_mma_f16_switch_hs<16>(ctx, dst); + return; + } + + if (Q->ne[1] <= 32) { + ggml_cuda_flash_attn_ext_mma_f16_switch_hs<32>(ctx, dst); + return; + } + + ggml_cuda_flash_attn_ext_mma_f16_switch_hs<64>(ctx, dst); +} + #define FATTN_VEC_F16_CASE(D, type_K, type_V) \ if (Q->ne[0] == (D) && K->type == (type_K) && V->type == (type_V)) { \ ggml_cuda_flash_attn_ext_vec_f16_case(ctx, dst); \ @@ -322,11 +235,19 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst return; } - if (!fp16_mma_available(cc)) { - if (Q->ne[1] <= 8) { - ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); + if (!new_mma_available(cc)) { + if (prec == GGML_PREC_DEFAULT) { + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f16(ctx, dst); + } } else { - ggml_cuda_flash_attn_ext_tile_f16(ctx, dst); + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f32(ctx, dst); + } } return; } @@ -341,5 +262,10 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst } } - ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); + // The MMA implementation needs Turing or newer, use the old WMMA code for Volta: + if (cc == GGML_CUDA_CC_VOLTA) { + ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); + } + + ggml_cuda_flash_attn_ext_mma_f16(ctx, dst); } diff --git a/ggml/src/ggml-cuda/mma.cuh b/ggml/src/ggml-cuda/mma.cuh index 7d11540af..9788a1389 100644 --- a/ggml/src/ggml-cuda/mma.cuh +++ b/ggml/src/ggml-cuda/mma.cuh @@ -1,11 +1,67 @@ +// This file contains primitives that expose the tensor core PTX instructions for CUDA code. +// The primitives can be used in a similar way as the nvcuda::wmma interface but with a well-defined memory layout. +// The documentation for the PTX instructions can be found under: +// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#matrix-multiply-accumulate-operation-using-mma-instruction +// +// Like with nvcuda::wmma there are three types of matrix tiles: A, B, and C with A @ B = C. +// A is a row-major matrix with shape I x K. +// B is a column-major matrix with shape K x J. +// C is a column-major matrix with shape I x J. +// Note that along their lowest dimension I, J, and K are measured in physical 32 bit elements instead of logical elements. +// The functions get_i, get_j, and get_k can be used to get the physical 32 bit index of the lth element of a thread within a tile. +// All matrix tiles have ne physical 32 bit elements per warp. +// +// As described in the documentation, all pointers for load_ldmatrix must be to shared memory and aligned to 16 bytes. + #include "common.cuh" -struct mma_int_A_I16K4 { + +#if CUDART_VERSION >= 11800 + +static __device__ __forceinline__ int ggml_cuda_movmatrix(const int x) { + int ret = 0; + +#ifdef NEW_MMA_AVAILABLE + asm("movmatrix.sync.aligned.m8n8.trans.b16 %0, %1;" + : "+r"(ret) : "r"(x)); +#else + NO_DEVICE_CODE; +#endif // defined(NEW_MMA_AVAILABLE) + return ret; +} + +#else + +static __device__ __forceinline__ int ggml_cuda_movmatrix(const int x) { + // Imagine transposing row-major matrix to column-major matrix. + const int src_i_low = 2 * (threadIdx.x % 4); + const int src_i_high = src_i_low + 1; + const int src_j = threadIdx.x / 4; + + const int src_laneid_low = src_i_low * 4 + src_j / 2; + const int src_laneid_high = src_i_high * 4 + src_j / 2; + + const int shift_low = ((src_j + 0) % 2) * 16; + const int shift_high = ((src_j + 1) % 2) * 16; + + const int ret_low = (__shfl_sync(0xFFFFFFFF, x, src_laneid_low, WARP_SIZE) >> shift_low) & 0x0000FFFF; + const int ret_high = (__shfl_sync(0xFFFFFFFF, x, src_laneid_high, WARP_SIZE) << shift_high) & 0xFFFF0000; + + return ret_low | ret_high; +} + +#endif // CUDART_VERSION >= 11800 + + +template +struct mma_A_I16K4 { + static_assert(sizeof(T) == 4, "bad type size"); + static constexpr int I = 16; static constexpr int K = 4; static constexpr int ne = 2; - int x[ne] = {0}; + T x[ne]; static __device__ __forceinline__ int get_i(const int l) { const int ret = (l%2) * (I/2) + threadIdx.x / K; @@ -21,27 +77,35 @@ struct mma_int_A_I16K4 { return ret; } - __device__ __forceinline__ void load(const int * __restrict__ xs0, const int & stride) { -#if defined(INT8_MMA_AVAILABLE) - const int * xs = xs0 + (threadIdx.x%I)*stride; - asm("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" - : "+r"(x[0]), "+r"(x[1]) - : "l"(xs)); -#else + __device__ __forceinline__ void load_generic(const T * __restrict__ xs0, const int & stride) { #pragma unroll for (int l = 0; l < ne; ++l) { x[l] = xs0[get_i(l)*stride + get_k(l)]; } -#endif // defined(INT8_MMA_AVAILABLE) + } + + __device__ __forceinline__ void load_ldmatrix(const T * __restrict__ xs0, const int & stride) { +#ifdef NEW_MMA_AVAILABLE + int * xi = (int *) x; + const int * xs = (const int *) xs0 + (threadIdx.x%I)*stride; + asm("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" + : "+r"(xi[0]), "+r"(xi[1]) + : "l"(xs)); +#else + load_generic(xs0, stride); +#endif // NEW_MMA_AVAILABLE } }; -struct mma_int_A_I16K8 { +template +struct mma_A_I16K8 { + static_assert(sizeof(T) == 4, "bad type size"); + static constexpr int I = 16; static constexpr int K = 8; static constexpr int ne = 4; - int x[ne] = {0}; + T x[ne]; static __device__ __forceinline__ int get_i(const int l) { const int ret = (l%2) * (I/2) + threadIdx.x / (K/2); @@ -57,31 +121,62 @@ struct mma_int_A_I16K8 { return ret; } - __device__ __forceinline__ void load(const int * __restrict__ xs0, const int & stride) { -#if defined(INT8_MMA_AVAILABLE) - const int * xs = xs0 + (threadIdx.x%I)*stride + (threadIdx.x/I)*(K/2); - asm("ldmatrix.sync.aligned.m8n8.x4.b16 {%0, %1, %2, %3}, [%4];" - : "+r"(x[0]), "+r"(x[1]), "+r"(x[2]), "+r"(x[3]) - : "l"(xs)); -#else + __device__ __forceinline__ void load_generic(const T * __restrict__ xs0, const int & stride) { #pragma unroll for (int l = 0; l < ne; ++l) { x[l] = xs0[get_i(l)*stride + get_k(l)]; } -#endif // defined(INT8_MMA_AVAILABLE) } - __device__ __forceinline__ void load_low(const int * __restrict__ xs0, const int & stride) { - ((mma_int_A_I16K4 *) x)[0].load(xs0, stride); + __device__ __forceinline__ void load_ldmatrix(const T * __restrict__ xs0, const int & stride) { +#ifdef NEW_MMA_AVAILABLE + int * xi = (int * ) x; + const int * xs = (const int *) xs0 + (threadIdx.x%I)*stride + (threadIdx.x/I)*(K/2); + asm("ldmatrix.sync.aligned.m8n8.x4.b16 {%0, %1, %2, %3}, [%4];" + : "+r"(xi[0]), "+r"(xi[1]), "+r"(xi[2]), "+r"(xi[3]) + : "l"(xs)); +#else + GGML_UNUSED(xs0); + GGML_UNUSED(stride); + NO_DEVICE_CODE; +#endif // NEW_MMA_AVAILABLE + } + + __device__ __forceinline__ void load_ldmatrix_trans(const T * __restrict__ xs0, const int & stride) { +#ifdef NEW_MMA_AVAILABLE + int * xi = (int * ) x; + const int * xs = (const int *) xs0 + (threadIdx.x%I)*stride + (threadIdx.x/I)*(K/2); + asm("ldmatrix.sync.aligned.m8n8.x4.trans.b16 {%0, %1, %2, %3}, [%4];" + : "+r"(xi[0]), "+r"(xi[2]), "+r"(xi[1]), "+r"(xi[3]) + : "l"(xs)); +#else + GGML_UNUSED(xs0); + GGML_UNUSED(stride); + NO_DEVICE_CODE; +#endif // NEW_MMA_AVAILABLE + } + + __device__ __forceinline__ void transpose() { + int * xi = (int *) x; + xi[0] = ggml_cuda_movmatrix(xi[0]); + + const int tmp = ggml_cuda_movmatrix(xi[1]); + xi[1] = ggml_cuda_movmatrix(xi[2]); + xi[2] = tmp; + + xi[3] = ggml_cuda_movmatrix(xi[3]); } }; -struct mma_int_B_J8K4 { +template +struct mma_B_J8K4 { + static_assert(sizeof(T) == 4, "bad type size"); + static constexpr int J = 8; static constexpr int K = 4; static constexpr int ne = 1; - int x[ne] = {0}; + T x[ne]; static __device__ __forceinline__ int get_j(const int /* l */) { const int ret = threadIdx.x / K; @@ -97,27 +192,34 @@ struct mma_int_B_J8K4 { return ret; } - __device__ __forceinline__ void load(const int * __restrict__ xs0, const int & stride) { -#if defined(INT8_MMA_AVAILABLE) && false // Loading as 4 byte values is faster - const int * xs = xs0 + (threadIdx.x%J)*stride; - asm("ldmatrix.sync.aligned.m8n8.x1.b16 {%0}, [%1];" - : "+r"(x[0]) - : "l"(xs)); -#else + __device__ __forceinline__ void load_generic(const T * __restrict__ xs0, const int & stride) { #pragma unroll for (int l = 0; l < ne; ++l) { x[l] = xs0[get_j(l)*stride + get_k(l)]; } -#endif // defined(INT8_MMA_AVAILABLE) + } + + __device__ __forceinline__ void load_ldmatrix(const T * __restrict__ xs0, const int & stride) { +#ifdef NEW_MMA_AVAILABLE + int * xi = (int *) x; + const int * xs = (const int *) xs0 + (threadIdx.x%J)*stride; + asm("ldmatrix.sync.aligned.m8n8.x1.b16 {%0}, [%1];" + : "+r"(xi[0]) : "l"(xs)); +#else + load_generic(xs0, stride); +#endif // NEW_MMA_AVAILABLE } }; -struct mma_int_B_J8K8 { +template +struct mma_B_J8K8 { + static_assert(sizeof(T) == 4, "bad type size"); + static constexpr int J = 8; static constexpr int K = 8; static constexpr int ne = 2; - int x[ne] = {0}; + T x[ne]; static __device__ __forceinline__ int get_j(const int /* l */) { const int ret = threadIdx.x / (K/2); @@ -133,22 +235,31 @@ struct mma_int_B_J8K8 { return ret; } - __device__ __forceinline__ void load(const int * __restrict__ xs0, const int & stride) { -#if defined(INT8_MMA_AVAILABLE) && false // Loading as 4 byte values is faster - const int * xs = xs0 + (threadIdx.x%J)*stride + ((threadIdx.x/J)*(K/2)) % K; - asm("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" - : "+r"(x[0]), "+r"(x[1]) - : "l"(xs)); -#else + __device__ __forceinline__ void load_generic(const T * __restrict__ xs0, const int & stride) { #pragma unroll for (int l = 0; l < ne; ++l) { x[l] = xs0[get_j(l)*stride + get_k(l)]; } -#endif // defined(INT8_MMA_AVAILABLE) + } + + __device__ __forceinline__ void load_ldmatrix(const T * __restrict__ xs0, const int & stride) { +#ifdef NEW_MMA_AVAILABLE + int * xi = (int *) x; + const int * xs = (const int *) xs0 + (threadIdx.x%J)*stride + ((threadIdx.x/J)*(K/2)) % K; + asm("ldmatrix.sync.aligned.m8n8.x2.b16 {%0, %1}, [%2];" + : "+r"(xi[0]), "+r"(xi[1]) + : "l"(xs)); +#else + load_generic(xs0, stride); +#endif // NEW_MMA_AVAILABLE } }; -struct mma_int_C_I16J8 { +template +struct mma_C_I16J8 {}; + +template <> +struct mma_C_I16J8 { static constexpr int I = 16; static constexpr int J = 8; static constexpr int ne = 4; @@ -169,8 +280,8 @@ struct mma_int_C_I16J8 { return ret; } - __device__ __forceinline__ void mma_K4(const mma_int_A_I16K4 & mma_A, const mma_int_B_J8K4 & mma_B) { -#ifdef INT8_MMA_AVAILABLE + __device__ __forceinline__ void mma(const mma_A_I16K4 & mma_A, const mma_B_J8K4 & mma_B) { +#ifdef NEW_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k16.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" : "+r"(x[0]), "+r"(x[1]), "+r"(x[2]), "+r"(x[3]) @@ -188,11 +299,11 @@ struct mma_int_C_I16J8 { GGML_UNUSED(mma_A); GGML_UNUSED(mma_B); NO_DEVICE_CODE; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } - __device__ __forceinline__ void mma_K8(const mma_int_A_I16K8 & mma_A, const mma_int_B_J8K8 & mma_B) { -#ifdef INT8_MMA_AVAILABLE + __device__ __forceinline__ void mma(const mma_A_I16K8 & mma_A, const mma_B_J8K8 & mma_B) { +#ifdef NEW_MMA_AVAILABLE #if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE asm("mma.sync.aligned.m16n8k32.row.col.s32.s8.s8.s32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" : "+r"(x[0]), "+r"(x[1]), "+r"(x[2]), "+r"(x[3]) @@ -216,6 +327,132 @@ struct mma_int_C_I16J8 { GGML_UNUSED(mma_A); GGML_UNUSED(mma_B); NO_DEVICE_CODE; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE + } +}; + +template <> +struct mma_C_I16J8 { + static constexpr int I = 16; + static constexpr int J = 4; + static constexpr int ne = 2; + + half2 x[ne] = {{0.0f, 0.0f}, {0.0f, 0.0f}}; + + static __device__ __forceinline__ int get_i(const int l) { + const int ret = l * (I/2) + threadIdx.x / J; + GGML_CUDA_ASSUME(ret >= 0); + GGML_CUDA_ASSUME(ret < I); + return ret; + } + + static __device__ __forceinline__ int get_j(const int /* l */) { + const int ret = threadIdx.x % J; + GGML_CUDA_ASSUME(ret >= 0); + GGML_CUDA_ASSUME(ret < J); + return ret; + } + + __device__ __forceinline__ void mma(const mma_A_I16K8 & mma_A, const mma_B_J8K8 & mma_B) { +#ifdef NEW_MMA_AVAILABLE + int * Axi = (int *) mma_A.x; + int * Bxi = (int *) mma_B.x; + int * xi = (int *) x; +#if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE + asm("mma.sync.aligned.m16n8k16.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3, %4, %5}, {%6, %7}, {%0, %1};" + : "+r"(xi[0]), "+r"(xi[1]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); +#else + // On Turing m16n8k16 mma is not available, use 2x m8n8k8 mma instead: + asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" + : "+r"(xi[0]), "+r"(xi[1]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); + asm("mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16 {%0, %1}, {%2, %3}, {%4}, {%0, %1};" + : "+r"(xi[0]), "+r"(xi[1]) + : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1])); +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#else + GGML_UNUSED(mma_A); + GGML_UNUSED(mma_B); + NO_DEVICE_CODE; +#endif // NEW_MMA_AVAILABLE + } + + __device__ __forceinline__ mma_B_J8K8 to_mma_B() { + mma_B_J8K8 mma_B; + + int * xi = (int *) x; + int * Bxi = (int *) mma_B.x; + Bxi[0] = ggml_cuda_movmatrix(xi[0]); + Bxi[1] = ggml_cuda_movmatrix(xi[1]); + + return mma_B; + } +}; + +template <> +struct mma_C_I16J8 { + static constexpr int I = 16; + static constexpr int J = 8; + static constexpr int ne = 4; + + float x[ne] = {0.0f, 0.0f, 0.0f, 0.0f}; + + static __device__ __forceinline__ int get_i(const int l) { + const int ret = (l/2) * (I/2) + threadIdx.x / (J/2); + GGML_CUDA_ASSUME(ret >= 0); + GGML_CUDA_ASSUME(ret < I); + return ret; + } + + static __device__ __forceinline__ int get_j(const int l) { + const int ret = 2 * (threadIdx.x % (J/2)) + l%2; + GGML_CUDA_ASSUME(ret >= 0); + GGML_CUDA_ASSUME(ret < J); + return ret; + } + + __device__ __forceinline__ void mma(const mma_A_I16K8 & mma_A, const mma_B_J8K8 & mma_B) { +#ifdef NEW_MMA_AVAILABLE + int * Axi = (int *) mma_A.x; + int * Bxi = (int *) mma_B.x; + int * xi = (int *) x; +#if __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE + asm("mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5, %6, %7}, {%8, %9}, {%0, %1, %2, %3};" + : "+r"(xi[0]), "+r"(xi[1]), "+r"(xi[2]), "+r"(xi[3]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[0]), "r"(Bxi[1])); +#else + // On Turing m16n8k16 mma is not available, use 2x m8n8k8 mma instead: + asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" + : "+r"(xi[0]), "+r"(xi[1]), "+r"(xi[2]), "+r"(xi[3]) + : "r"(Axi[0]), "r"(Axi[1]), "r"(Bxi[0])); + asm("mma.sync.aligned.m16n8k8.row.col.f32.f16.f16.f32 {%0, %1, %2, %3}, {%4, %5}, {%6}, {%0, %1, %2, %3};" + : "+r"(xi[0]), "+r"(xi[1]), "+r"(xi[2]), "+r"(xi[3]) + : "r"(Axi[2]), "r"(Axi[3]), "r"(Bxi[1])); +#endif // __CUDA_ARCH__ >= GGML_CUDA_CC_AMPERE +#else + GGML_UNUSED(mma_A); + GGML_UNUSED(mma_B); + NO_DEVICE_CODE; +#endif // NEW_MMA_AVAILABLE + } + + __device__ __forceinline__ mma_B_J8K8 to_mma_B() { + mma_B_J8K8 mma_B; + mma_B.x[0] = make_half2(x[0], x[1]); + mma_B.x[1] = make_half2(x[2], x[3]); + + int * Bxi = (int *) mma_B.x; + Bxi[0] = ggml_cuda_movmatrix(Bxi[0]); + Bxi[1] = ggml_cuda_movmatrix(Bxi[1]); + + return mma_B; + } + + __device__ __forceinline__ void load_generic(const float * __restrict__ xs0, const int & stride) { +#pragma unroll + for (int l = 0; l < ne; ++l) { + x[l] = xs0[get_j(l)*stride + get_i(l)]; + } } }; diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 270251df4..83cb78cbd 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -132,7 +132,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return false; } - if (int8_mma_available(cc)) { + if (new_mma_available(cc)) { return true; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index 3cd508a1d..c05c84778 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -87,7 +87,7 @@ struct tile_x_sizes { }; static constexpr int get_mmq_x_max_host(const int cc) { - return int8_mma_available(cc) ? 128 : + return new_mma_available(cc) ? 128 : #ifdef GGML_CUDA_FORCE_MMQ cc >= GGML_CUDA_CC_VOLTA && cc < GGML_CUDA_CC_OFFSET_AMD ? 128 : 64; #else @@ -96,9 +96,9 @@ static constexpr int get_mmq_x_max_host(const int cc) { } static constexpr __device__ int get_mmq_x_max_device() { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE return 128; -#else // INT8_MMA_AVAILABLE +#else // NEW_MMA_AVAILABLE #if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) return 128; @@ -116,7 +116,7 @@ static constexpr __device__ int get_mmq_x_max_device() { #endif // __CUDA_ARCH__ >= GGML_CUDA_CC_VOLTA #endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } static constexpr int get_mmq_y_host(const int cc) { @@ -209,10 +209,10 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) { #define MMQ_TILE_Y_K (WARP_SIZE + WARP_SIZE/QI8_1) static int mmq_get_granularity_host(const int mmq_x, const int cc) { - return int8_mma_available(cc) && mmq_x >= 48 ? 16 : 8; + return new_mma_available(cc) && mmq_x >= 48 ? 16 : 8; } -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { return mmq_x >= 48 ? 16 : 8; } @@ -220,21 +220,21 @@ static constexpr __device__ int mmq_get_granularity_device(const int mmq_x) { static constexpr __device__ int mmq_get_granularity_device(const int /* mmq_x */) { return 8; } -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE // ------------------------------------------------------------ template static __device__ __forceinline__ void load_tiles_q4_0( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + 2*WARP_SIZE); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI4_0; const int kqsx = threadIdx.x % QI4_0; @@ -250,12 +250,12 @@ template static __device__ __forceinlin const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b2(bxi->qs, kqsx); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + 0] = __vsubss4((qs0 >> 0) & 0x0F0F0F0F, 0x08080808); x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI4_0) + kqsx + QI4_0] = __vsubss4((qs0 >> 4) & 0x0F0F0F0F, 0x08080808); #else x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; @@ -271,11 +271,11 @@ template static __device__ __forceinlin const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(WARP_SIZE/QI4_0) + i/QI4_0 + kbxd] = bxi->d; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -322,14 +322,14 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a( template static __device__ __forceinline__ void load_tiles_q4_1( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q4_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI4_1; const int kqsx = threadIdx.x % QI4_1; @@ -345,12 +345,12 @@ template static __device__ __forceinlin const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx; const int qs0 = get_int_b4(bxi->qs, kqsx); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + 0] = (qs0 >> 0) & 0x0F0F0F0F; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI4_1) + kqsx + QI4_1] = (qs0 >> 4) & 0x0F0F0F0F; #else x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI4_1; @@ -366,11 +366,11 @@ template static __device__ __forceinlin const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else x_dm[i*(WARP_SIZE/QI4_1) + i/QI4_1 + kbxd] = bxi->dm; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -417,14 +417,14 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a( template static __device__ __forceinline__ void load_tiles_q5_0( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI5_0; const int kqsx = threadIdx.x % QI5_0; @@ -456,13 +456,13 @@ template static __device__ __forceinlin qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 qs1 = __vsubss4(qs1, 0x10101010); // subtract 16 -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; #else x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_0) + kqsx + 0] = qs0; x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_0) + kqsx + QI5_0] = qs1; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI5_0; @@ -478,25 +478,25 @@ template static __device__ __forceinlin const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(WARP_SIZE/QI5_0) + i/QI5_0 + kbxd] = bxi->d; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_q5_1( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q5_1, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI5_1; const int kqsx = threadIdx.x % QI5_1; @@ -526,13 +526,13 @@ template static __device__ __forceinlin qs1 |= (qh << 2) & 0x00100000; // 18 -> 20 qs1 |= (qh << 9) & 0x10000000; // 19 -> 28 -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + 0] = qs0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; #else x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_1) + kqsx + 0] = qs0; x_qs[i*(2*WARP_SIZE + 1) + kbx*(2*QI5_1) + kqsx + QI5_1] = qs1; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI5_1; @@ -548,25 +548,25 @@ template static __device__ __forceinlin const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_dm[i*MMQ_MMA_TILE_X_K_Q8_1 + kbxd] = bxi->dm; #else x_dm[i*(WARP_SIZE/QI5_1) + i/QI5_1 + kbxd] = bxi->dm; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_q8_0( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_tile + 2*WARP_SIZE); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q8_0, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI8_0; const int kqsx = threadIdx.x % QI8_0; @@ -581,13 +581,13 @@ template static __device__ __forceinlin const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 0 + threadIdx.x] = get_int_b2(bxi[0].qs, kqsx); x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + WARP_SIZE + threadIdx.x] = get_int_b2(bxi[WARP_SIZE/QI8_0].qs, kqsx); #else x_qs[i*(2*WARP_SIZE + 1) + 0 + threadIdx.x] = get_int_b2(bxi[0].qs, kqsx); x_qs[i*(2*WARP_SIZE + 1) + WARP_SIZE + threadIdx.x] = get_int_b2(bxi[WARP_SIZE/QI8_0].qs, kqsx); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = 2*WARP_SIZE / QI8_0; @@ -603,11 +603,11 @@ template static __device__ __forceinlin const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = bxi->d; #else x_df[i*(2*WARP_SIZE/QI8_0) + i/(QI8_0/2) + kbxd] = bxi->d; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -645,9 +645,9 @@ template static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k00) { - typedef mma_int_A_I16K8 mma_A; - typedef mma_int_B_J8K8 mma_B; - typedef mma_int_C_I16J8 mma_C; + typedef mma_A_I16K8 mma_A; + typedef mma_B_J8K8 mma_B; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; @@ -672,7 +672,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_0) { const int k0 = k00 + k01; - A[n][k01/QI8_0].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); + A[n][k01/QI8_0].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q8_0 + k0, MMQ_MMA_TILE_X_K_Q8_0); } #pragma unroll @@ -695,7 +695,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( mma_B B; float dB[mma_C::ne/2]; - B.load(y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + B.load_generic(y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); // faster than load_ldmatrix #pragma unroll for (int l = 0; l < mma_C::ne/2; ++l) { @@ -711,7 +711,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { mma_C C; - C.mma_K8(A[n][k01/QI8_0], B); + C.mma(A[n][k01/QI8_0], B); #pragma unroll for (int l = 0; l < mma_C::ne; ++l) { @@ -756,9 +756,9 @@ template static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k00) { - typedef mma_int_A_I16K8 mma_A; - typedef mma_int_B_J8K8 mma_B; - typedef mma_int_C_I16J8 mma_C; + typedef mma_A_I16K8 mma_A; + typedef mma_B_J8K8 mma_B; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; @@ -782,7 +782,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { const int k0 = k00 + k01; - A[n][k01/QI8_1].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); + A[n][k01/QI8_1].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q8_1 + k0, MMQ_MMA_TILE_X_K_Q8_1); } #pragma unroll @@ -805,7 +805,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( mma_B B; float2 dsB[mma_C::ne/2]; - B.load(y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); + B.load_generic(y_qs + j0*MMQ_TILE_Y_K + k01, MMQ_TILE_Y_K); // faster than load_ldmatrix #pragma unroll for (int l = 0; l < mma_C::ne/2; ++l) { @@ -817,7 +817,7 @@ static __device__ __forceinline__ void vec_dot_q8_1_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { mma_C C; - C.mma_K8(A[n][k01/QI8_1], B); + C.mma(A[n][k01/QI8_1], B); #pragma unroll for (int l = 0; l < mma_C::ne; ++l) { @@ -864,12 +864,12 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_dp4a( template static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k00) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE - typedef mma_int_A_I16K4 mma_A; - typedef mma_int_A_I16K8 mma_A_K8; - typedef mma_int_B_J8K4 mma_B; - typedef mma_int_C_I16J8 mma_C; + typedef mma_A_I16K4 mma_A; + typedef mma_A_I16K8 mma_A_K8; + typedef mma_B_J8K4 mma_B; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; @@ -893,7 +893,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += 8) { const int k0 = k00 + k01; - ((mma_A_K8 *) A[n])[k01/8].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); + ((mma_A_K8 *) A[n])[k01/8].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q3_K + k0, MMQ_MMA_TILE_X_K_Q3_K); } #pragma unroll @@ -916,8 +916,9 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( mma_B B[2]; float dB[mma_C::ne/2]; - B[0].load(y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); - B[1].load(y_qs + j0*MMQ_TILE_Y_K + (k01 + mma_B::K), MMQ_TILE_Y_K); + // Here load_generic is faster than load_ldmatrix. + B[0].load_generic(y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); + B[1].load_generic(y_qs + j0*MMQ_TILE_Y_K + (k01 + mma_B::K), MMQ_TILE_Y_K); #pragma unroll for (int l = 0; l < mma_C::ne/2; ++l) { @@ -929,8 +930,8 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { mma_C C[2]; - C[0].mma_K4(A[n][k01/4 + 0], B[0]); - C[1].mma_K4(A[n][k01/4 + 1], B[1]); + C[0].mma(A[n][k01/4 + 0], B[0]); + C[1].mma(A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < mma_C::ne; ++l) { @@ -942,20 +943,20 @@ static __device__ __forceinline__ void vec_dot_q8_0_16_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); NO_DEVICE_CODE; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_q2_K( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_Q2_K, mmq_y); int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % QI2_K; @@ -977,11 +978,11 @@ template static __device__ __forceinlin const int x_qs_k = (x_ql_0 >> (2*l)) & 0x03030303; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q2_K + k] = x_qs_k; #else x_qs[i*(2*WARP_SIZE + 1) + k] = x_qs_k; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int sc_m = bxi->scales[kqsx]; @@ -992,11 +993,11 @@ template static __device__ __forceinlin const half2 x_dm_ik = make_half2(bxi_dmf.x*(sc_m & 0x0F), bxi_dmf.y*(sc_m >> 4)); #endif // FAST_FP16_AVAILABLE -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_dm[i*MMQ_MMA_TILE_X_K_Q2_K + kqsx] = x_dm_ik; #else x_dm[i*(WARP_SIZE + 1) + kqsx] = x_dm_ik; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -1051,12 +1052,12 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a( template static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k00) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE - typedef mma_int_A_I16K4 mma_A; - typedef mma_int_A_I16K8 mma_A_K8; - typedef mma_int_B_J8K4 mma_B; - typedef mma_int_C_I16J8 mma_C; + typedef mma_A_I16K4 mma_A; + typedef mma_A_I16K8 mma_A_K8; + typedef mma_B_J8K4 mma_B; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; @@ -1081,7 +1082,7 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { const int k0 = k00 + k01; - ((mma_A_K8 *) A[n])[k01/QI8_1].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); + ((mma_A_K8 *) A[n])[k01/QI8_1].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q2_K + k0, MMQ_MMA_TILE_X_K_Q2_K); } } @@ -1118,24 +1119,25 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += QI8_1) { mma_B B[2]; - B[0].load(y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); - B[1].load(y_qs + j0*MMQ_TILE_Y_K + (k01 + mma_B::K), MMQ_TILE_Y_K); + // Here load_generic is faster than load_ldmatrix. + B[0].load_generic(y_qs + j0*MMQ_TILE_Y_K + (k01 + 0), MMQ_TILE_Y_K); + B[1].load_generic(y_qs + j0*MMQ_TILE_Y_K + (k01 + mma_B::K), MMQ_TILE_Y_K); mma_C Cm[2]; if (k01 >= WARP_SIZE * 3/4) { mma_A A1; A1.x[0] = 0x01010101; A1.x[1] = 0x01010101; - Cm[0].mma_K4(A1, B[0]); - Cm[1].mma_K4(A1, B[1]); + Cm[0].mma(A1, B[0]); + Cm[1].mma(A1, B[1]); } #pragma unroll for (int n = 0; n < ntx; ++n) { mma_C Cd[2]; - Cd[0].mma_K4(A[n][k01/4 + 0], B[0]); - Cd[1].mma_K4(A[n][k01/4 + 1], B[1]); + Cd[0].mma(A[n][k01/4 + 0], B[0]); + Cd[1].mma(A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < mma_C::ne; ++l) { @@ -1172,13 +1174,13 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); NO_DEVICE_CODE; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_q3_K( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else @@ -1186,7 +1188,7 @@ template static __device__ __forceinlin int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % QI3_K; @@ -1212,11 +1214,11 @@ template static __device__ __forceinlin const int x_qs_k = __vsubss4(x_ql_k | x_qh_k, 0x04040404); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + k] = x_qs_k; #else x_qs[i*(2*WARP_SIZE + 1) + k] = x_qs_k; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -1242,7 +1244,7 @@ template static __device__ __forceinlin const int sc = __vsubss4(sc_low | sc_high, 0x20202020); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE const int8_t * sc8 = (const int8_t *) ≻ const float d = bxi->d; @@ -1252,10 +1254,10 @@ template static __device__ __forceinlin } #else x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = sc; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } -#ifndef INT8_MMA_AVAILABLE +#ifndef NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*WARP_SIZE) { int i = (i0 + threadIdx.y*WARP_SIZE + threadIdx.x) % mmq_y; @@ -1268,7 +1270,7 @@ template static __device__ __forceinlin x_df[i] = bxi->d; } -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template @@ -1317,7 +1319,7 @@ static __device__ __forceinline__ int unpack_scales_q45_K(const int * scales, co template static __device__ __forceinline__ void load_tiles_q4_K( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + 2*WARP_SIZE); #else @@ -1325,7 +1327,7 @@ template static __device__ __forceinlin int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { @@ -1338,15 +1340,15 @@ template static __device__ __forceinlin const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride; const int qs0 = get_int_b4(bxi->qs, threadIdx.x); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(threadIdx.x/8) + threadIdx.x % 8 + 0] = (qs0 >> 0) & 0x0F0F0F0F; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 16*(threadIdx.x/8) + threadIdx.x % 8 + 8] = (qs0 >> 4) & 0x0F0F0F0F; #else x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = qs0; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*16) { @@ -1407,7 +1409,7 @@ template static __device__ __forceinlin x_sc[i*(WARP_SIZE/8) + i/8 + ksc] = scales8; } -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template @@ -1446,7 +1448,7 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a( template static __device__ __forceinline__ void load_tiles_q5_K( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + WARP_SIZE*2); #else @@ -1454,7 +1456,7 @@ template static __device__ __forceinlin int * x_qs = (int *) x_tile; half2 * x_dm = (half2 *) (x_qs + txs.qs); int * x_sc = (int *) (x_dm + txs.dm); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { @@ -1478,16 +1480,16 @@ template static __device__ __forceinlin const int kq0 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + 0; const int kq1 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + QI5_K/4; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq0] = ql0 | qh0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + kq1] = ql1 | qh1; #else x_qs[i*(2*WARP_SIZE + 1) + kq0] = ql0 | qh0; x_qs[i*(2*WARP_SIZE + 1) + kq1] = ql1 | qh1; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps*16) { @@ -1548,7 +1550,7 @@ template static __device__ __forceinlin x_sc[i*(WARP_SIZE/8) + i/8 + ksc] = scales8; } -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template @@ -1587,7 +1589,7 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a( template static __device__ __forceinline__ void load_tiles_q6_K( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); int * x_sc = (int *) (x_df + WARP_SIZE/QI6_K); @@ -1596,7 +1598,7 @@ template static __device__ __forceinlin int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); int * x_sc = (int *) (x_df + txs.dm); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { @@ -1619,13 +1621,13 @@ template static __device__ __forceinlin const int kq0 = 2*threadIdx.x - threadIdx.x % (QI6_K/2) + 0; const int kq1 = 2*threadIdx.x - threadIdx.x % (QI6_K/2) + QI6_K/2; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_qs[i*MMQ_MMA_TILE_X_K_Q6_K + kq1] = __vsubss4(ql1 | qh1, 0x20202020); #else x_qs[i*(2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020); x_qs[i*(2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256 @@ -1641,11 +1643,11 @@ template static __device__ __forceinlin const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q6_K + kbxd] = bxi->d; #else x_df[i*(WARP_SIZE/QI6_K) + i/QI6_K + kbxd] = bxi->d; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } #pragma unroll @@ -1658,11 +1660,11 @@ template static __device__ __forceinlin const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/8)) / 4; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8)); #else x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8)); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -1702,11 +1704,11 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a( template static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( const int * __restrict__ x, const int * __restrict__ y, float * __restrict__ sum, const int & k00) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE - typedef mma_int_A_I16K4 mma_A; - typedef mma_int_B_J8K4 mma_B; - typedef mma_int_C_I16J8 mma_C; + typedef mma_A_I16K4 mma_A; + typedef mma_B_J8K4 mma_B; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; @@ -1732,8 +1734,8 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( for (int k01 = 0; k01 < WARP_SIZE; k01 += 8) { const int k0 = k00 + k01; - A[n][k01/4 + 0].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + 0), MMQ_MMA_TILE_X_K_Q6_K); - A[n][k01/4 + 1].load(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + mma_A::K), MMQ_MMA_TILE_X_K_Q6_K); + A[n][k01/4 + 0].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + 0), MMQ_MMA_TILE_X_K_Q6_K); + A[n][k01/4 + 1].load_ldmatrix(x_qs + (i0 + n*mma_A::I)*MMQ_MMA_TILE_X_K_Q6_K + (k0 + mma_A::K), MMQ_MMA_TILE_X_K_Q6_K); } #pragma unroll @@ -1771,8 +1773,9 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( mma_B B[2]; float dB[mma_C::ne/2]; - B[0].load(y_qs + j0*MMQ_TILE_Y_K + 0 + k01, MMQ_TILE_Y_K); - B[1].load(y_qs + j0*MMQ_TILE_Y_K + mma_B::K + k01, MMQ_TILE_Y_K); + // Here load_generic is faster than load_ldmatrix. + B[0].load_generic(y_qs + j0*MMQ_TILE_Y_K + 0 + k01, MMQ_TILE_Y_K); + B[1].load_generic(y_qs + j0*MMQ_TILE_Y_K + mma_B::K + k01, MMQ_TILE_Y_K); #pragma unroll for (int l = 0; l < mma_C::ne/2; ++l) { @@ -1784,8 +1787,8 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( #pragma unroll for (int n = 0; n < ntx; ++n) { mma_C C[2]; - C[0].mma_K4(A[n][k01/4 + 0], B[0]); - C[1].mma_K4(A[n][k01/4 + 1], B[1]); + C[0].mma(A[n][k01/4 + 0], B[0]); + C[1].mma(A[n][k01/4 + 1], B[1]); #pragma unroll for (int l = 0; l < mma_C::ne; ++l) { @@ -1805,20 +1808,20 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma( #else GGML_UNUSED(x); GGML_UNUSED(y); GGML_UNUSED(sum); NO_DEVICE_CODE; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } template static __device__ __forceinline__ void load_tiles_iq4_nl( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_NL, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = threadIdx.x / QI4_NL; const int kqsx = threadIdx.x % QI4_NL; @@ -1836,13 +1839,13 @@ template static __device__ __forceinlin const int aux_q4 = get_int_b2(bxi->qs, kqsx); const int2 v = get_int_from_table_16(aux_q4); const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y; #else x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x; x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int blocks_per_tile_x_row = WARP_SIZE / QI4_NL; @@ -1858,25 +1861,25 @@ template static __device__ __forceinlin const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbxd; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kbxd] = __half2float(bxi->d); #else x_df[i*(WARP_SIZE/4) + i/4 + kbxd] = __half2float(bxi->d); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq2_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % (QI2_XXS/2); @@ -1905,36 +1908,36 @@ template static __device__ __forceinlin const int signs1 = __vcmpne4(((signs_packed & 0x30) << 3) | ((signs_packed & 0xC0) << 17), 0x00000000); const int grid1 = __vsub4(grid_pos[1] ^ signs1, signs1); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid1; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid0; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid1; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int ls = aux32 >> 28; const float d = bxi->d; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/4; #else x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = (ls*d + d/2)/4; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq2_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % (QI2_XS/2); @@ -1959,38 +1962,38 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos[0] ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos[1] ^ signs[1], signs[1]); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int ls = bxi->scales[kqsx]; const float d = bxi->d; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq2_s( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ2_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % (QI2_S/2); @@ -2022,38 +2025,38 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos[0] ^ signs0, signs0); const int grid_h = __vsub4(grid_pos[1] ^ signs1, signs1); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int ls = bxi->scales[kqsx]; const float d = bxi->d; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; #else x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = ((ls & 0x0F)*d + d/2)/4; x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = ((ls >> 4)*d + d/2)/4; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq3_xxs( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_XXS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % (QI3_XXS/2); @@ -2080,36 +2083,36 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos.x ^ signs[0], signs[0]); const int grid_h = __vsub4(grid_pos.y ^ signs[1], signs[1]); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l + 1)] = grid_h; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 0)] = grid_l; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l + 1)] = grid_h; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int ls = aux32 >> 28; const float d = bxi->d; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = (ls*d + d/2)/2; #else x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = (ls*d + d/2)/2; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq3_s( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % (QI3_S/2); @@ -2143,36 +2146,36 @@ template static __device__ __forceinlin const int grid_l = __vsub4(grid_pos.x ^ signs0, signs0); const int grid_h = __vsub4(grid_pos.y ^ signs1, signs1); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+0)] = grid_l; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*kqsx + (2*l+1)] = grid_h; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+0)] = grid_l; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+1)] = grid_h; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const int ls = 1 + 2*((bxi->scales[kqsx/2] >> (((2*kqsx) << 1) & 0x04)) & 0x0F); const float d = bxi->d; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = ls*d; #else x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = ls*d; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq1_s( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; half2 * x_ds = (half2 *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ3_S, mmq_y); int * x_qs = (int *) x_tile; half2 * x_ds = (half2 *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kqsx = threadIdx.x % QI1_S; @@ -2198,37 +2201,37 @@ template static __device__ __forceinlin const int grid0 = (grid >> 0) & 0x0F0F0F0F; const int grid1 = (grid >> 4) & 0x0F0F0F0F; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+0)] = grid0; x_qs[i*MMQ_MMA_TILE_X_K_Q8_1 + 8*kqsx + (2*l+1)] = grid1; #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+0)] = grid0; x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + (2*l+1)] = grid1; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } const float d1q = __half2float(bxi->d) * (((qh >> 11) & 0x0E) + 1); const float delta = -1.0f + IQ1S_DELTA - (qh & 0x8000) * (2.0f*IQ1S_DELTA/0x8000); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_ds[i*MMQ_MMA_TILE_X_K_Q8_1 + kqsx] = make_half2(d1q, d1q*delta); #else x_ds[i*(WARP_SIZE/4) + i/4 + kqsx] = make_half2(d1q, d1q*delta); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } template static __device__ __forceinline__ void load_tiles_iq4_xs( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE const int kbx = 0; // threadIdx.x / QI4_XS const int kqsx = threadIdx.x; // threadIdx.x % QI4_XS @@ -2246,13 +2249,13 @@ template static __device__ __forceinlin const int aux_q4 = get_int_b4(bxi->qs, kqsx); const int2 v = get_int_from_table_16(aux_q4); const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4; -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y; #else x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x; x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } #pragma unroll @@ -2270,11 +2273,11 @@ template static __device__ __forceinlin const int ls = ((bxi->scales_l[(threadIdx.x % 8)/2] >> (4*(threadIdx.x % 2))) & 0x0F) | (((bxi->scales_h >> (2*(threadIdx.x % 8))) & 0x03) << 4); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * (ls - 32); #else x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = d * (ls - 32); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE } } @@ -2307,16 +2310,16 @@ template static __device__ __forceinline__ void mmq_write_back_mma( const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max) { - typedef mma_int_C_I16J8 mma_C; + typedef mma_C_I16J8 mma_C; constexpr int granularity = mmq_get_granularity_device(mmq_x); constexpr int rows_per_warp = 2 * granularity; constexpr int ntx = rows_per_warp/mma_C::I; // Number of x minitiles per warp. const int i0 = (threadIdx.y / ntx) * (ntx*mma_C::I); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE static_assert(nwarps*mma_C::I == mmq_y, "nwarps*mma_C::I != mmq_y"); -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE #pragma unroll for (int j0 = 0; j0 < mmq_x; j0 += ntx*mma_C::J) { @@ -2505,13 +2508,13 @@ static __device__ void mul_mat_q_process_tile( int * tile_y = (int *) data_mul_mat_q; int * tile_x = tile_y + GGML_PAD(mmq_x*(WARP_SIZE + WARP_SIZE/QI8_1), nwarps*WARP_SIZE); -#ifdef INT8_MMA_AVAILABLE +#ifdef NEW_MMA_AVAILABLE constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_mma; constexpr mmq_write_back_t write_back = mmq_write_back_mma; #else constexpr vec_dot_mmq_t vec_dot = mmq_type_traits::vec_dot_dp4a; constexpr mmq_write_back_t write_back = mmq_write_back_dp4a; -#endif // INT8_MMA_AVAILABLE +#endif // NEW_MMA_AVAILABLE constexpr int blocks_per_iter = MMQ_ITER_K / qk; @@ -2643,7 +2646,7 @@ static __global__ void mul_mat_q( const int jt = kbc / (blocks_per_ne00*nty); const int it = (kbc - jt*(blocks_per_ne00*nty)) / blocks_per_ne00; - constexpr bool fixup = true; // Last index writes it data to fixup buffer to avoid data races with other blocks. + constexpr bool fixup = true; // Last index writes its data to fixup buffer to avoid data races with other blocks. mul_mat_q_process_tile (x, yc, dst, tmp_fixup, ne00, ne01, stride01, ne10, ne11, stride11, ne0, it, jt, kb0_start, kb0_stop); @@ -2749,7 +2752,7 @@ template static int mmq_get_shmem(const int mmq_x, const int mmq_y, const int cc) { const tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(type, mmq_y); const int mmq_tile_x_k = mmq_get_mma_tile_x_k(type); - const int shmem_x = int8_mma_available(cc) ? mmq_y*mmq_tile_x_k*sizeof(int) : txs.qs*sizeof(int) + txs.dm*sizeof(half2) + txs.sc*sizeof(int); + const int shmem_x = new_mma_available(cc) ? mmq_y*mmq_tile_x_k*sizeof(int) : txs.qs*sizeof(int) + txs.dm*sizeof(half2) + txs.sc*sizeof(int); const int shmem_y = mmq_x*sizeof(block_q8_1_mmq); return shmem_x + GGML_PAD(shmem_y, MMQ_NWARPS*WARP_SIZE*sizeof(int)); } diff --git a/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb16.cu b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb16.cu new file mode 100644 index 000000000..f09bdeff7 --- /dev/null +++ b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb16.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-mma-f16.cuh" + +DECL_FATTN_MMA_F16_CASE(64, 16); +DECL_FATTN_MMA_F16_CASE(80, 16); +DECL_FATTN_MMA_F16_CASE(96, 16); +DECL_FATTN_MMA_F16_CASE(112, 16); +DECL_FATTN_MMA_F16_CASE(128, 16); +DECL_FATTN_MMA_F16_CASE(256, 16); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb32.cu b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb32.cu new file mode 100644 index 000000000..221108873 --- /dev/null +++ b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb32.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-mma-f16.cuh" + +DECL_FATTN_MMA_F16_CASE(64, 32); +DECL_FATTN_MMA_F16_CASE(80, 32); +DECL_FATTN_MMA_F16_CASE(96, 32); +DECL_FATTN_MMA_F16_CASE(112, 32); +DECL_FATTN_MMA_F16_CASE(128, 32); +DECL_FATTN_MMA_F16_CASE(256, 32); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb64.cu b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb64.cu new file mode 100644 index 000000000..d24b08575 --- /dev/null +++ b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb64.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-mma-f16.cuh" + +DECL_FATTN_MMA_F16_CASE(64, 64); +DECL_FATTN_MMA_F16_CASE(80, 64); +DECL_FATTN_MMA_F16_CASE(96, 64); +DECL_FATTN_MMA_F16_CASE(112, 64); +DECL_FATTN_MMA_F16_CASE(128, 64); +DECL_FATTN_MMA_F16_CASE(256, 64); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb8.cu b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb8.cu new file mode 100644 index 000000000..bdf86c0ea --- /dev/null +++ b/ggml/src/ggml-cuda/template-instances/fattn-mma-f16-instance-cpb8.cu @@ -0,0 +1,10 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../fattn-mma-f16.cuh" + +DECL_FATTN_MMA_F16_CASE(64, 8); +DECL_FATTN_MMA_F16_CASE(80, 8); +DECL_FATTN_MMA_F16_CASE(96, 8); +DECL_FATTN_MMA_F16_CASE(112, 8); +DECL_FATTN_MMA_F16_CASE(128, 8); +DECL_FATTN_MMA_F16_CASE(256, 8); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu b/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu deleted file mode 100644 index 2d94e65c2..000000000 --- a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, float); -DECL_FATTN_WMMA_F16_CASE(80, 16, float); -DECL_FATTN_WMMA_F16_CASE(96, 16, float); -DECL_FATTN_WMMA_F16_CASE(112, 16, float); -DECL_FATTN_WMMA_F16_CASE(128, 16, float); -DECL_FATTN_WMMA_F16_CASE(256, 16, float); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu b/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu deleted file mode 100644 index c3d9df3c4..000000000 --- a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu +++ /dev/null @@ -1,9 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, float); -DECL_FATTN_WMMA_F16_CASE(80, 32, float); -DECL_FATTN_WMMA_F16_CASE(96, 32, float); -DECL_FATTN_WMMA_F16_CASE(112, 32, float); -DECL_FATTN_WMMA_F16_CASE(128, 32, float); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu b/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu deleted file mode 100644 index bb680e401..000000000 --- a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 16, half); -DECL_FATTN_WMMA_F16_CASE(80, 16, half); -DECL_FATTN_WMMA_F16_CASE(96, 16, half); -DECL_FATTN_WMMA_F16_CASE(112, 16, half); -DECL_FATTN_WMMA_F16_CASE(128, 16, half); -DECL_FATTN_WMMA_F16_CASE(256, 16, half); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu b/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu deleted file mode 100644 index 073f71b1f..000000000 --- a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu +++ /dev/null @@ -1,10 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 32, half); -DECL_FATTN_WMMA_F16_CASE(80, 32, half); -DECL_FATTN_WMMA_F16_CASE(96, 32, half); -DECL_FATTN_WMMA_F16_CASE(112, 32, half); -DECL_FATTN_WMMA_F16_CASE(128, 32, half); -DECL_FATTN_WMMA_F16_CASE(256, 32, half); diff --git a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu b/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu deleted file mode 100644 index d30710c5f..000000000 --- a/ggml/src/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu +++ /dev/null @@ -1,8 +0,0 @@ -// This file has been autogenerated by generate_cu_files.py, do not edit manually. - -#include "../fattn-wmma-f16.cuh" - -DECL_FATTN_WMMA_F16_CASE(64, 8, half); -DECL_FATTN_WMMA_F16_CASE(96, 8, half); -DECL_FATTN_WMMA_F16_CASE(128, 8, half); -DECL_FATTN_WMMA_F16_CASE(256, 8, half); diff --git a/ggml/src/ggml-cuda/template-instances/generate_cu_files.py b/ggml/src/ggml-cuda/template-instances/generate_cu_files.py index d7874e6ea..a2628f16e 100755 --- a/ggml/src/ggml-cuda/template-instances/generate_cu_files.py +++ b/ggml/src/ggml-cuda/template-instances/generate_cu_files.py @@ -12,13 +12,13 @@ SOURCE_FATTN_VEC = """// This file has been autogenerated by generate_cu_files.p DECL_FATTN_VEC_F{vkq_size}_CASE({head_size}, {type_k}, {type_v}); """ -SOURCE_FATTN_WMMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. +SOURCE_FATTN_MMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually. -#include "../fattn-wmma-f16.cuh" +#include "../fattn-mma-f16.cuh" """ -SOURCE_FATTN_WMMA_CASE = "DECL_FATTN_WMMA_F16_CASE({head_size}, {cols_per_block}, {kq_acc_t});\n" +SOURCE_FATTN_MMA_CASE = "DECL_FATTN_MMA_F16_CASE({head_size}, {cols_per_block});\n" TYPES_MMQ = [ "GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", @@ -57,20 +57,12 @@ for vkq_size in [16, 32]: with open(f"fattn-vec-f{vkq_size}-instance-hs{head_size}-{get_short_name(type_k)}-{get_short_name(type_v)}.cu", "w") as f: f.write(SOURCE_FATTN_VEC.format(vkq_size=vkq_size, head_size=head_size, type_k=type_k, type_v=type_v)) -for kq_acc_t in ["half", "float"]: - for cols_per_block in [8, 16, 32]: - if kq_acc_t == "float" and cols_per_block == 8: - continue +for cols_per_block in [8, 16, 32, 64]: + with open(f"fattn-mma-f16-instance-cpb{cols_per_block}.cu", "w") as f: + f.write(SOURCE_FATTN_MMA_START) - with open(f"fattn-wmma-f16-instance-kq{kq_acc_t}-cpb{cols_per_block}.cu", "w") as f: - f.write(SOURCE_FATTN_WMMA_START) - - for head_size in [64, 80, 96, 112, 128, 256]: - if cols_per_block == 8 and head_size % 32 != 0: # wmma fragment is 8x32 - continue - if kq_acc_t == "float" and cols_per_block == 32 and head_size == 256: # register spilling, bad performance - continue - f.write(SOURCE_FATTN_WMMA_CASE.format(kq_acc_t=kq_acc_t, cols_per_block=cols_per_block, head_size=head_size)) + for head_size in [64, 80, 96, 112, 128, 256]: + f.write(SOURCE_FATTN_MMA_CASE.format(cols_per_block=cols_per_block, head_size=head_size)) for type in TYPES_MMQ: with open(f"mmq-instance-{get_short_name(type)}.cu", "w") as f: diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 8594093f0..129478ed7 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -25,6 +25,7 @@ #define CU_MEM_LOCATION_TYPE_DEVICE hipMemLocationTypeDevice #define CU_MEM_ACCESS_FLAGS_PROT_READWRITE hipMemAccessFlagsProtReadWrite #define CU_CHECK(fn) {hipError_t err = fn; if(err != hipSuccess) { GGML_ABORT("HipVMM Failure: %s\n", hipGetErrorString(err)); }} +#define __shfl_sync(mask, var, laneMask, width) __shfl(var, laneMask, width) #define __shfl_xor_sync(mask, var, laneMask, width) __shfl_xor(var, laneMask, width) #define cublasComputeType_t hipblasDatatype_t //deprecated, new hipblasComputeType_t not in 5.6 #define cublasCreate hipblasCreate diff --git a/ggml/src/ggml-hip/CMakeLists.txt b/ggml/src/ggml-hip/CMakeLists.txt index 7a877bdc1..eb03e10fa 100644 --- a/ggml/src/ggml-hip/CMakeLists.txt +++ b/ggml/src/ggml-hip/CMakeLists.txt @@ -50,7 +50,7 @@ file(GLOB GGML_HEADERS_ROCM "../ggml-cuda/*.cuh") list(APPEND GGML_HEADERS_ROCM "../../include/ggml-cuda.h") file(GLOB GGML_SOURCES_ROCM "../ggml-cuda/*.cu") -file(GLOB SRCS "../ggml-cuda/template-instances/fattn-wmma*.cu") +file(GLOB SRCS "../ggml-cuda/template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu") list(APPEND GGML_SOURCES_ROCM ${SRCS}) diff --git a/ggml/src/ggml-musa/CMakeLists.txt b/ggml/src/ggml-musa/CMakeLists.txt index 415b2b2e0..2f555416e 100644 --- a/ggml/src/ggml-musa/CMakeLists.txt +++ b/ggml/src/ggml-musa/CMakeLists.txt @@ -29,7 +29,7 @@ if (MUSAToolkit_FOUND) list(APPEND GGML_HEADERS_MUSA "../../include/ggml-cuda.h") file(GLOB GGML_SOURCES_MUSA "../ggml-cuda/*.cu") - file(GLOB SRCS "../ggml-cuda/template-instances/fattn-wmma*.cu") + file(GLOB SRCS "../ggml-cuda/template-instances/fattn-mma*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) file(GLOB SRCS "../ggml-cuda/template-instances/mmq*.cu") list(APPEND GGML_SOURCES_MUSA ${SRCS}) From 90f9b88afb6447d3929843a2aa98c0f11074762d Mon Sep 17 00:00:00 2001 From: Olivier Chafik Date: Sun, 2 Feb 2025 19:58:34 +0000 Subject: [PATCH 220/279] nit: more informative crash when grammar sampler fails (#11593) --- src/llama-grammar.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/llama-grammar.cpp b/src/llama-grammar.cpp index 6be5cbe0e..9b518d1ac 100644 --- a/src/llama-grammar.cpp +++ b/src/llama-grammar.cpp @@ -1213,5 +1213,7 @@ void llama_grammar_accept_str(struct llama_grammar & grammar, const std::string } grammar.partial_utf8 = decoded.second; - GGML_ASSERT(!grammar.stacks.empty()); + if (grammar.stacks.empty()) { + throw std::runtime_error("Unexpected empty grammar stack after accepting piece: " + piece); + } } From 4d0598e1445a64c99cf2faac72f8d5d023f1e6a1 Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 2 Feb 2025 22:08:05 +0100 Subject: [PATCH 221/279] HIP: add GGML_CUDA_CC_IS_* for amd familys as increasing cc archtectures for amd gpus are not supersets of eatch other (#11601) This fixes a bug where RDNA1 gpus other than gfx1010 where not handled correctly --- ggml/src/ggml-cuda/common.cuh | 7 +++++++ ggml/src/ggml-cuda/ggml-cuda.cu | 4 ++-- ggml/src/ggml-cuda/mmq.cu | 2 +- ggml/src/ggml-cuda/mmq.cuh | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 88be8fc8a..232163c1c 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -61,6 +61,13 @@ #define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a #define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA +#define GGML_CUDA_CC_IS_RDNA(cc) (cc >= GGML_CUDA_CC_RDNA1) +#define GGML_CUDA_CC_IS_RDNA1(cc) (cc >= GGML_CUDA_CC_RDNA1 && cc < GGML_CUDA_CC_RDNA2) +#define GGML_CUDA_CC_IS_RDNA2(cc) (cc >= GGML_CUDA_CC_RDNA2 && cc < GGML_CUDA_CC_RDNA3) +#define GGML_CUDA_CC_IS_RDNA3(cc) (cc >= GGML_CUDA_CC_RDNA3) +#define GGML_CUDA_CC_IS_GCN(cc) (cc > GGML_CUDA_CC_OFFSET_AMD && cc < GGML_CUDA_CC_CDNA) +#define GGML_CUDA_CC_IS_CDNA(cc) (cc >= GGML_CUDA_CC_CDNA && cc < GGML_CUDA_CC_RDNA1) + #define GGML_CUDA_CC_QY1 210 #define GGML_CUDA_CC_QY2 220 diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu index 383131c77..bda10aec1 100644 --- a/ggml/src/ggml-cuda/ggml-cuda.cu +++ b/ggml/src/ggml-cuda/ggml-cuda.cu @@ -1205,7 +1205,7 @@ static void ggml_cuda_op_mul_mat_cublas( CUBLAS_CHECK(cublasSetStream(ctx.cublas_handle(id), stream)); - if (compute_capability == GGML_CUDA_CC_CDNA) { + if (GGML_CUDA_CC_IS_CDNA(compute_capability)) { const float alpha = 1.0f; const float beta = 0.0f; CUBLAS_CHECK( @@ -1750,7 +1750,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co beta = &beta_f32; } - if (ggml_cuda_info().devices[ctx.device].cc == GGML_CUDA_CC_CDNA) { + if (GGML_CUDA_CC_IS_CDNA(ggml_cuda_info().devices[ctx.device].cc)) { cu_compute_type = CUBLAS_COMPUTE_32F; alpha = &alpha_f32; beta = &beta_f32; diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 83cb78cbd..45212f66c 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -148,5 +148,5 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) { return cc < GGML_CUDA_CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } - return (cc < GGML_CUDA_CC_RDNA3 && cc != GGML_CUDA_CC_CDNA && cc != GGML_CUDA_CC_VEGA20) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; + return (!GGML_CUDA_CC_IS_RDNA3(cc) && !GGML_CUDA_CC_IS_CDNA(cc) && !GGML_CUDA_CC_IS_GCN(cc)) || ne11 < MMQ_DP4A_MAX_BATCH_SIZE; } diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh index c05c84778..7a2c4d85b 100644 --- a/ggml/src/ggml-cuda/mmq.cuh +++ b/ggml/src/ggml-cuda/mmq.cuh @@ -120,7 +120,7 @@ static constexpr __device__ int get_mmq_x_max_device() { } static constexpr int get_mmq_y_host(const int cc) { - return cc >= GGML_CUDA_CC_OFFSET_AMD ? (cc == GGML_CUDA_CC_RDNA1 ? 64 : 128) : (cc >= GGML_CUDA_CC_VOLTA ? 128 : 64); + return cc >= GGML_CUDA_CC_OFFSET_AMD ? (GGML_CUDA_CC_IS_RDNA1(cc) ? 64 : 128) : (cc >= GGML_CUDA_CC_VOLTA ? 128 : 64); } static constexpr __device__ int get_mmq_y_device() { From 396856b40029dd6747d2fbdb179e828683418045 Mon Sep 17 00:00:00 2001 From: uvos Date: Sun, 2 Feb 2025 22:40:09 +0100 Subject: [PATCH 222/279] CUDA/HIP: add support for selectable warp size to mmv (#11519) CUDA/HIP: add support for selectable warp size to mmv --- ggml/src/ggml-cuda/common.cuh | 8 +++++++ ggml/src/ggml-cuda/mmv.cu | 38 ++++++++++++++++++++------------ ggml/src/ggml-cuda/vendors/hip.h | 2 ++ 3 files changed, 34 insertions(+), 14 deletions(-) diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh index 232163c1c..174916bc9 100644 --- a/ggml/src/ggml-cuda/common.cuh +++ b/ggml/src/ggml-cuda/common.cuh @@ -176,6 +176,14 @@ static constexpr bool new_mma_available(const int cc) { return cc < GGML_CUDA_CC_OFFSET_AMD && cc >= GGML_CUDA_CC_TURING; } +static constexpr __device__ int ggml_cuda_get_physical_warp_size() { +#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) + return __AMDGCN_WAVEFRONT_SIZE; +#else + return 32; +#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__) +} + [[noreturn]] static __device__ void no_device_code( const char * file_name, const int line, const char * function_name, const int arch, const char * arch_list) { diff --git a/ggml/src/ggml-cuda/mmv.cu b/ggml/src/ggml-cuda/mmv.cu index ac45f2d17..5a9ddd958 100644 --- a/ggml/src/ggml-cuda/mmv.cu +++ b/ggml/src/ggml-cuda/mmv.cu @@ -5,9 +5,10 @@ template static __global__ void mul_mat_vec( const T * __restrict__ x, const float * __restrict__ y, float * __restrict__ dst, const int64_t ncols2, const int64_t stride_row, const int64_t channel_ratio, const int64_t stride_channel_x, const int64_t stride_channel_y, const int64_t stride_channel_dst) { - const int64_t row = blockIdx.x; - const int64_t channel = blockIdx.z; - const int tid = threadIdx.x; + const int64_t row = blockIdx.x; + const int64_t channel = blockIdx.z; + const int tid = threadIdx.x; + constexpr int warp_size = ggml_cuda_get_physical_warp_size(); x += (channel/channel_ratio)*stride_channel_x + row*stride_row; y += channel *stride_channel_y; @@ -18,8 +19,8 @@ static __global__ void mul_mat_vec( extern __shared__ char data_mmv[]; float * buf_iw = (float *) data_mmv; - if (block_size > WARP_SIZE) { - if (tid < WARP_SIZE) { + if (block_size > warp_size) { + if (tid < warp_size) { buf_iw[tid] = 0.0f; } __syncthreads(); @@ -67,16 +68,16 @@ static __global__ void mul_mat_vec( static_assert(std::is_same::value, "unsupported type"); } - sumf = warp_reduce_sum(sumf); + sumf = warp_reduce_sum(sumf); - if (block_size > WARP_SIZE) { - buf_iw[tid/WARP_SIZE] = sumf; + if (block_size > warp_size) { + buf_iw[tid/warp_size] = sumf; __syncthreads(); - if (tid >= WARP_SIZE) { + if (tid >= warp_size) { return; } sumf = buf_iw[tid]; - sumf = warp_reduce_sum(sumf); + sumf = warp_reduce_sum(sumf); } if (tid != 0) { @@ -96,10 +97,19 @@ static void launch_mul_mat_vec_cuda( GGML_ASSERT(stride_row % 2 == 0); GGML_ASSERT(nchannels_y % nchannels_x == 0); const int64_t channel_ratio = nchannels_y / nchannels_x; + int device; + int warp_size; - int64_t block_size_best = WARP_SIZE; - int64_t niter_best = (ncols + 2*WARP_SIZE - 1) / (2*WARP_SIZE); - for (int64_t block_size = 2*WARP_SIZE; block_size <= 256; block_size += WARP_SIZE) { + CUDA_CHECK(cudaGetDevice(&device)); + warp_size = ggml_cuda_info().devices[device].warp_size; + + int64_t block_size_best = warp_size; + int64_t niter_best = (ncols + 2*warp_size - 1) / (2*warp_size); + int64_t max_block_size = 256; + if(ggml_cuda_info().devices[device].cc > GGML_CUDA_CC_OFFSET_AMD && ggml_cuda_info().devices[device].cc < GGML_CUDA_CC_RDNA1) { + max_block_size = 128; + } + for (int64_t block_size = 2*warp_size; block_size <= max_block_size; block_size += warp_size) { const int64_t niter = (ncols + 2*block_size - 1) / (2*block_size); if (niter < niter_best) { niter_best = niter; @@ -107,7 +117,7 @@ static void launch_mul_mat_vec_cuda( } } - const int smem = WARP_SIZE*sizeof(float); + const int smem = warp_size*sizeof(float); const dim3 block_nums(nrows, 1, nchannels_y); const dim3 block_dims(block_size_best, 1, 1); switch (block_size_best) { diff --git a/ggml/src/ggml-cuda/vendors/hip.h b/ggml/src/ggml-cuda/vendors/hip.h index 129478ed7..81964611c 100644 --- a/ggml/src/ggml-cuda/vendors/hip.h +++ b/ggml/src/ggml-cuda/vendors/hip.h @@ -1,5 +1,6 @@ #pragma once +#define HIP_ENABLE_WARP_SYNC_BUILTINS 1 #include #include #include @@ -8,6 +9,7 @@ // for rocblas_initialize() #include "rocblas/rocblas.h" #endif // __HIP_PLATFORM_AMD__ + #define CUBLAS_COMPUTE_16F HIPBLAS_R_16F #define CUBLAS_COMPUTE_32F HIPBLAS_R_32F #define CUBLAS_COMPUTE_32F_FAST_16F HIPBLAS_R_32F From 6eecde3cc8fda44da7794042e3668de4af3c32c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Sun, 2 Feb 2025 23:48:29 +0100 Subject: [PATCH 223/279] HIP: fix flash_attn_stream_k_fixup warning (#11604) --- ggml/src/ggml-cuda/fattn-common.cuh | 10 ++++++++++ ggml/src/ggml-cuda/softmax.cu | 4 ++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/fattn-common.cuh b/ggml/src/ggml-cuda/fattn-common.cuh index cfd7c0f44..d40ee2da4 100644 --- a/ggml/src/ggml-cuda/fattn-common.cuh +++ b/ggml/src/ggml-cuda/fattn-common.cuh @@ -516,6 +516,12 @@ constexpr __device__ dequantize_1_f32_t get_dequantize_1_f32(ggml_type type_V) { nullptr; } +// The HIP compiler for some reason complains that it can't unroll a loop because of the jt*ncols + j >= ne01 conditional. +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wpass-failed" +#endif // __clang__ + template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) @@ -614,6 +620,10 @@ static __global__ void flash_attn_stream_k_fixup( } } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif // __clang__ + template // D == head size #if !(defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)) __launch_bounds__(D, 1) diff --git a/ggml/src/ggml-cuda/softmax.cu b/ggml/src/ggml-cuda/softmax.cu index da377200e..aac6e0999 100644 --- a/ggml/src/ggml-cuda/softmax.cu +++ b/ggml/src/ggml-cuda/softmax.cu @@ -18,7 +18,7 @@ __device__ float __forceinline__ t2f32(half val) { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpass-failed" -#endif +#endif // __clang__ template static __global__ void soft_max_f32( const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, @@ -126,7 +126,7 @@ static __global__ void soft_max_f32( } #ifdef __clang__ #pragma clang diagnostic pop -#endif +#endif // __clang__ static __global__ void soft_max_back_f32( const float * grad, const float * dstf, float * dst, const int ncols, const float scale) { From d92cb67e37abc23b1c6f7b0ef27a9889da8537e3 Mon Sep 17 00:00:00 2001 From: mashdragon <122402293+mashdragon@users.noreply.github.com> Date: Mon, 3 Feb 2025 09:42:55 +0000 Subject: [PATCH 224/279] server : (webui) Fix Shift+Enter handling (#11609) * Fix Shift+Enter handling `exact` on the Enter handler means the message is not sent when Shift+Enter is pressed anyway * build index.html.gz --------- Co-authored-by: Xuan Son Nguyen --- examples/server/public/index.html.gz | Bin 1207150 -> 1207129 bytes examples/server/webui/index.html | 1 - 2 files changed, 1 deletion(-) diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index 582ccc0d3f8d07ce79dd9e978772f9c5ea85c426..3a2529aa2fb84485aa480d3d476e5ef27eab8e27 100644 GIT binary patch delta 8942 zcmV!{H12 z(h+_QKaXF)egsEW0U!zsLA0db#}RI6y1_zKW>!|_S8Eo}Z$Yk~HnVSGpXJZDr#87V z(Blg9_^FwHUE0m10q1fu*xm3pygeO|e~a7Qm%RNn`1=#n+b7N|tadR}jZ)0?f?Zp!lfBhwr3xtLY#n)~T|a~?AL*l@f53F`U$ zX6Az3Jl{aMUvoeHQp)#&eZI;0EO~vnd>=RGe}_%Im#{y^h;3S^|R#pxLoJ(y=S}}+vog_ z+s(NG^1oirZs9&c7wnenmjzq7(EeP`4CI?{&hNo5zlU>ISD&DNh;zT&08V_rncYJE zPb?qvZv{SoU-<2;VlNl=?8D~#e>v!>+@F2(?Pa+?+xl~Hk#8AZu#hyiMfVA~zl470 zg6%?XU@P_YY-U53{EqF*Z{SqGM+QBt-(yXgLDfCjU3>w0zF_gLzrsH7-(es3!iD|K z1h(qF+Z6k}vs>`{>$kIez>in6nL4}t1m9C`z&`0~xXeK8G0e z+1vR1Vguj(uAE;kuik)Wf3DtMy`O#mX+}3!A7<~~zVLoDlk3Zw{%+~1^YCT^Ulx9@ z|FCvG%H5=; zs{hmTtBW6>-&}lre)H_37DSNuhku-ZaelnJoiAU#b)Mf`ethvNf5~ZndHdn!`TWOV z?fv6jz|&qT*T1rOl=IQZo?UKEPmjP94e0K{^Rpyy?x*u0c5lj@b0;GYCCQWo4)t%z@n+>yg0sYN$ux>L zsgvYfa2S8Vp&#MA+Iey%MIimO_>j?bO-mX4h&87u3bf&T!OQ}0xF#$`5nlV0zs~4= zK&C!*0^b!ff21MLuy8^@xSwvVO30NhT5`R2qU=4Gnvj*!maHG{6S7HtO172(tsH@= z2dx6o)p57rP}Zj%g7=gwD$Fx*34{#u#ZJO4ik`=Tm)(c+c>k(&(sIHq4WA0~%%2Jk zK5t<%yM8a!!EM#*g%kM8)Jaw~l`K-{M&JPnE#;r3f6fhEi%kf1ZXRA7~CS zq0CJM)|s2OnW~%m2^9-2o<73Diz?Sj^Q2uAc2?M@6?$AapFS1+cpvf|M(;!5!oMS4jbK`08qb=>8!<=iy-R}N zxQVy7e8>jW5VI-1U7O5;!@be1JfB%*lHVJJm3otd!?>d z?ICp1r61vxNrbgZZ^W}?ZJOZi<$ocp(}P}w)7O%_cAXrT@3-XV@!bQG)xK^)IrKJprXd2EMB6`jE_Ww&9wfs>RR ze}d|VG;or(0Z^@BfWVpTM-fQ{I`y=4oA%Hcsnw`0{JR>hYD!w9?55R5 zlhZh+f;wf*%71JUih9MeS^SL2wi5uTe*~n&B+Aw)N#&GegoCFlv)|jnw*SSCmdy+g z6}TNjS?i;$RVmBb5ZZblZC#*k454lG(KZCygS2h-(KZFz_7K|RKHB37?LpeM`Y2mf z%EPp6_tCZm+V&uoO!|mVD#Uf!80b1mukR=o`i86;gQQpceNz?aPpm!V*ZN#ve`~nD zZ0p6(5FT0|4{bQSXnhc%wLpM6&VQ^SOpHDzA{OY6(vjK6L<9wM2<>qn?Xd`EkJ6IW z$HW?rJX#+*Xf5bq452;gqdj>P@@T!-pf$09Vb3EpulD<}T7#frZ1g^)(3?nMsfK2o zmTKrjcxZh*Gy#pwg3AyddLIvce>fWHeL$hN07ao8Z8C(3*~df#3awPq5GLb3CL&@K z6|u)dm{@&GEP=`6EXwXF%eP37(Yf`yU;{$qKPI?#*eWtwcpQG0klkW zSi2Z~Xks+cWUQG(h--bs1sJgg(boHD>jFd#BX0B&H-=-A(FYqw3v8&ve-u9Mqb}mb zFydApaqBTdBcl%3c* z`=Ddo0v$t0Ykj0e#27+a?;|Y&#Sqd)A88REhLASZZeqs}%72qS%9F>SlHCg&b{lXQhN2Cb0NcI5VK;#T@M+jQd(sCS zlO}Lz_Cz;T+f*k*cxZh*wBG|xlRn^>v;c=LFBh!_SK4H( z82kEQPDAhIB7ljmTc)ZhrZI?#(aS_Y6Wul~%~r;$HHeGZ%S8YbL)RyUp^PWP>N4)- zG8TtznmSe#Yu^pWoPUPZ%f+ezmpRdty)izgVfS*eYtW^uin;d?G^a7?>`(Z{N#k9FM`#%0{g zMFbZ`ooEwnteAtiSiM}V8ieVpsqVdHto9%JY@YgLV=oIVU=Wi@FOx|P!AxDZ#+o|J zDV2V&RB9MDR#eR%4|7X(5AW|fs_l`$5A#wLm=L3=AicAM)=@(1ETMOl&^t>Q9VLv` z65%?ac?#D74}U%wQn5M-V;7076wx{eSRshRd)RTJUsknxBk*vWZM;H)^O(u*2!}F{ zoP|yh;5g~63!d?5Op?|iqV?u0DPQ^)6=iZHjj3m3m zX^H}FoX4GEcOQKl-w?PdKca7cKNXIgFpyjKh9_x-w-hk za$yMFfk^l-B#n7WoH(ve@26^BsL*Ti5!B{7$#lM^w3_y7>_=1POGPUCz^ra+6loAY z!wHR|Bvv-udCO2=pl; zMt1HhA5}SZyb2qNS>2LoRaNL z1lubJR=x+JtbXwDD8?%%SU71KZ&=V$9xv8ihksfFn2M~m(PH-e&`%`~XKST4PaT&$ zN-E8i)jA#S&76{E{&heN>y#c>H&LcL%=MHw-a~os0}}QrU9Te#U)DzoVgY8=6^{^v z+Qw?KsJW*#mOBt`9^ch?2?O|QeC&eVpaG$-c|@SB7$nT20_X7^=Dqf`jIP__wd+JK z34iLfs&>A44v5+m!V@7=J&vwgE{oT6M{J<@Chzr?s{Wj4859IK16eU9m3px^G*0U2 zE%g-zl@(n)=LM{4DyU~6zePA&hYi<1l#m;*q2&%)&a2;|SMd>&j-=x}9s7}+k`Qul zb8M|74DcSb!!X!Mxp-gc>r%W7*5sk-IDf7OH>%Lw0tZHox*iUkIT!>$!S2E}=X0IF zJv5cSqHmckA02n~(vqZM(yvnNkZpyb&TgibvF7Wv+(=o**YINfO&6gN%i@=(qAU339a}`ubh<}oMY@p;0 zS7qJREbIb`d^myNjwyd}kLQGLp#I?;8z`hTVhEkAw31rP=1+l#!T|DJ#1Y&!<$%v* zC!>{CHb|ve1g#(#mB&$~je0el^PbZP&1r;}t2m>q;n?7%CM-fHFqpmzgD5+dR+J{w z(P*>T$Qxab)8$B06lIj%E`KG&kp5Is#}eWrW&R9Pj8myP57|oKqq(0CZ01hN99BZN zKG~edcc+qq6r_%kI&O%B&T(_gKhbDK`8Iu#N=|3V(YQR8oO!k^Or{ z6j`$QVj@??Merz%8chM3uC;UB_#w&jZw13)W&;yV6fcllwXvGrFiW52&GR5IIuPxp zF!qc(DLt4XxbN+0qX2eX=q?VXC?r|tEJ-E@BwErHIz2r_O0~dO3H{9teD~&5ipZww z2({+_mfaOstwpo$wSOAjV&(@>2KkYw0qGI-Ma*ycjYE6X9r($<$Lb8$?MF+f-%GEf z{u!Y^Y_lEa#g8Q9`RSwCJAlZlqAi67XE?Ey|v3Q#^!syq2{tg7Jni0!amj z+Oy*bDIP;_Ap%`KgdovwsQ;sY{?CSafS3$L;A#8;lgMi}H;5_8vMhBCWe_^a-%w``SAc@~ zNGOfLIQSdE)%5lJ2DN6WUM#DAY`_;i&Lg1wk#xLDlYbC6T` z0@(co>|S?N_*0A{unvFA6iCyN5-QmIBl9(fA*}-uNaT&t-ohRU2qjW!3@9Ygx~;9( zf!NM9YbpVol`jVLs-mDYhD9z97F^I5{ImKC+F(;d3D!`ya=duc*7$*6cDq15h#il0 zx+4nRR;*?*X5NPNTo!|ctq2_=#3XzZO)Kq{_OXyI3|g3#+m%Qwy-HO*Aa4V;8IP*ub(q(XbAutK^BIVNEs-1i#ckR?P`yu9K2Y4KZ>722Q}y zWq(qXk|8D;QVD00;Zp(-JD46Ixx4g$gCiJ~HqD;J0T4szkmdi_vYzd`{nNP{ZU zVPjoH}1}!mJFxL{32Dj)5s>jvN6u?@>WsOxzhI2jf z=Hu$bEtF?3bFwWs@ewoZE}S%eKn~8oiU@y ztj1T*rbJ-OkBQzo0sZ>Ou2B($}kg&i^nvka^q@J+tzbjI)9n% z;zBOWg>VWKf!0yN%?P{qj499L=$C-s#X4 z2?=7?!pA{CQsigIjT4`EXpzPtf`3jXn~p}F<7fA4Uv}egy~;c{!w@+bGci4zb?iMDqST&?{g+h$dIyX ztf1YRuq_(EJ;g-%suZLA_yd7+we3x9*=I-#SZ z0!1}h)l@9o)?`*D!arSB3?l@OQIRF{>7+JDv+h4AY=D1&JjMEh$ zeho-!-eTj$MAl3@z=o`-CNjP-W!pN6mAIU=Yhi!Rvt^7$Hq{=nK+R|S}0^(BQ zF;`~`fCo^E1N60vQe_eFeYNl*FL>198VJGbb|rMo%ktF+zeA(NswoFeKjP?_b-5uj+~I>yl`PU((ZPv!!i0p8nU|o#5i>3ymf*#=>yA@ZMr&z2tGZICswy#uyh)LkMWEBUn zI!j}et^5TAr>cyocV{sS@J*A!v#Y4$#-H`|eDH*%Cag z;3HKQ|JF&pPI)doU(e?Oc~UnxHaUN1n;i>(SvRj~m@OH*#)}(r&tAyB*Lde=cv;Tq zYxDbhUgG%_uQQV3Fk50L@^DJ>!myU>`SfE(((7zFi@bN`n+BO}AvI-ihFAj3g`HR4>K6*=1KXlT2zAsYg zKDX5nM9)=atKDIwT0ZTQrm_pF9Ju0Dk86RVU;^r zR=1^?YZOb%C!>|#?qVzaGdn&vQEpdQ)lw23{jz%ikkrpv!LGBw4b4dqZz?;?e$=tU zd@@P|+Qn)#=Jm~1aY}!=@fN6z8M+=giT@qBKak2~k1U*ZKnqa31{b&&vFJDHB zSlYT}4x^kT;99p7EgQb&38FMC#=xKsP3m5eU=P3mo8aXhWNS*7Fy1{P!cAALrLYV$HISKen;-X2!1|**Wiik zlpH)o2Q*GT-xLoCJcNFfKR9=vX?#PX?BJKi7WtlLQZoMo?d0c@=r03oWmM>;akAjU z>&w?u)`5gEbC@s*V2s0#coHvfw(KcN0^($FbdimC*sKNl5si`OFBSw=caT(ktCoZ^ z{!K`YgGIpdef}UVN$53Qk(9H9zlHnOM_&XY8M5;1M$Q8j5B%hqc&2Zc&0`WJe_#7( z-}HXZ31{fHHCdCkth{hz zBys}klkA_@Gn*p*Azs;IgDFXfe?vb9PDVdRU`=^K!lYsLmB4ZG$R*XZMM{2KlgPcV zCVJ`pX9#2UlfWlwkpk)}o}D0wH^lqQN2o<=PNERv|66dI1-~Os7Dq5bX0rYB*>7FI zKg4?XoWM56h?Nk6j(CA>LZdfmT?A-bofHnK^5gDTcZm~uh%YjrB#m8?e`RQXk8(3) zlb!8?MKKL-o?PwRpZLF{6msVVYmel5c^$nYzwNIQRy5ZMEtRra!9mb`35x+rMqveD z_U0uzd`H|k4B_jrJ*KitLX!7{+~9ZW(ca2>jHAuGTZqr|>NC-Q7C)=W_iycH`ND;t zU6R|bgHUwkmxrL@D7a@6fAb7F9FN>GczBA|89y$~zq=ec$RlAa9Ajr|*kBUHN>Tac z=&7xo7%l0>3ibFk+-i~ zzF)y9Tviq(Y%&0)hXB!_S$k<67wZi6B;YE9kWd&pH&-q@r}~J8e=JI8IhDV^fBmZP z)Wc_S%4dGir9VX}S@0#rlXBKye$l3cRw-HVQvh&0V(HV>dd^NqESDr*I#GN(Do-Mh z=0Q9kg-%9EIwE&$$e)celHQVZ^zLeQd3_~^o`fjea6wO{&-1{EZX}dKOGPm(PmvTw zF_S2TEgfDKd9k~Oe`wR(+?=zct}W@o>lecE#TA!%iXPB6zpZ0>9QtXTQHSzRQ=~pE zUqqjiB2|%92;!(4oy&CJwUp7ZoUtDow=QqjPJDMQar5yQ&n&eqb+$nRi zBdjCJyX;-7r)?xzFxxEhF8$1PQcom)0WbHXSX~2dQh)E2r0lmETRcKJrtHef zEMG|}W(^R~LO57ES&77amm+V7?G&T3Gn#K?NR=SadQ8#*>{`@9jShG;OCEp)TX%5A z2mJ*O8Ni0iftZm*UNzfkEn98Fdb;j-$gXFEiTmLL~s1MLL-=HiBn3nT)lihfG zoT40CR|nIzaW(`bOK1u`?5R{I=4T&Peslx+09LKjy5<7qu7SAJHL~J0?G~WpIO4Ww z>qcO`e>Jm&eYJU|L31x)QT?YQ$C!Hw`(OSe zx`>neRxwK}ulmBy3EljDyBo$2;u9omEGgcdwp6vRH?R((BB(Y)?ptWa+d%4uV}le z26s@1$h4~u>xuW>r&Hq>V&q#_m7OXO>)=osg*LX}4@f*REL~M6 z`goL6W@joHIlPTksK^ z{R;_wdU&u-Jcp9$HpjtJ^e^nFq0H9-WkzFruD!A-Civ*dh~L2lmvX`V3}8?G4|4D; IBq|0Y08m?OdH?_b delta 8964 zcmV+fBm3Oh)=BQxNq~d_gaU*Egam{Iga(8Mgb0KQgbIWUgbaiYgbsucgb;)gv=V(t ze>FL7YiYjMufS#Wb!LtW6jzZ_E_0PjE!WN}*Jj_c$5BC%kVGdplTa$Nvg>W$Iy&ZI z=5ZqCZ}ji!KQQwXCJsPS1Q$_E+ugS#I_=7&K;WE%gL6Ir2aC>cL9QP+v#(*D<&U?g zHn}p;;|lcnv6+8e+Rdc_`*Jc^-S9TNe?1+Li`(7jy!|xz`yxG-^XUX$%xz6Ex&v-eu z&iM_un{x%^f4!XD!hM7;*e%yDJ8b1b`*S%nkZ-;@zX!kk7WQ3TeT4oY&i!r!IPvXf zb_@AGvV6?H75MyZ;kUDjyF9Z6$LbRU8H zOXznl*ec`(mQr8OW;S%mZ`r#126hE}V9>+*J=T;NRNaHs#TTIG3wGZ17gz`WE3D&Q zxUjyNz*5~en_``Jb_;%g{bqI#`0;8sQ)ibS;d{yrSSNiA_nBO7X5YP;e|-(-Md6A_ zxM%zgoS(h=0PXOCZ4!8M{>|(v1O2_4T^XQ@^77eR?HQf_aFyq~Heb!Y|KL3TUitXu z$Lo(jsQ!=7uP%Ogesl5R`OUKrS`b0r@BeoG+4rw(qnwXM_Uv+VdU}KwaT+@GP@?IY(EEf)M_=R{hmyO8MI6y7 zQe{mgA^OI7PyY7l+VP{O@BNTu=oQ(Zw{hr1PcxGGi{rqL2wo9?xuR25){ZH;qu7r; z649x`Kcyt{Na{z+f2p&kan1()mj@hXc;zow*o`BaIvK@|_scq?(?w(fu9p!wd%5hP>|xyzD-l$NN{Mla>=^Y4}u- zXZ}=Z@OcZ9+4Vc24sNSfFPy+%rcSb|sbrBlHv$hxe`qQHEOl<^T5Lk7d!u}j*iECe z{y=kx31x07u+H4H%~ajgPpDXM@$?ZEUR1eOnkVg|u(QHGt4;;_wkxe7ycdbY6Q~~(|Fc2-iSGB z?_ClEf5%O{y(KC2T_?a`)6sMuQyPaNp|dP;A`!)l4nt{DY$+=eXocWPW;x-IUGIoE z-Ya#zY7e24F8v6nOd_mRdLy1CYtsa8FaI-PogVZeoW7Rawd>@#e7_|>kMAB>9_&*R zW>e^RpmOylT?FLr7=B^Thn^Vp<;KA}-1ZJDf1s83r;74tA$@DXD?fsKdyuc1Ls2Yr z?($kH4Q+)+tKoiJ`WcZC)|DPK^S7YuYSseJ@ zahpjTw<*DMvK3@0H6ebgvn*eL4MD>8NqnEKgYAbk4B~k1@{!j7%ws!5s^|=cDZ35Z ze+`_Zm^@Nu(Z8qk6d#W)u2Fsp8$S`9LpZY%j6?Yxy1yd?V* z%YIGinViNk71SwfR{mp?P}D1y&EjWFf3}?fKqVk0CQ-IdNh+r#BOE+cnf=}lw*Ak3 zv}|U0sKD(I%32>~tx8$ehS1jgXzK!PV+d`dkG3Ju9;9uvkG3h$wujIj_t73#Xb;l1 z)koQ?QXZylyN|Xl(6$GuWYR}`QX#I(#z5CmdVNQ!&^Kh&7$m*g@0+SXe`4(^f4|n} z`dY*FWm_+PhVan(cxc1nMeBnAtpx(qasFcsVPf<#5wSpjl#a|kCL$=9LuiltXpcoO zdz6-}J|@<1q#f6)7Q=)=)S?*j_G1t5kBNvP#`rPn+Jz>16HPREGJcGGsr`Pg z3ZP}0!`j8@LldKkCS%PULR{-3F2IO2h_>EGTNfZ|7;&SIxG@}?j6T>fe_CKe9j5Sc zA9WEgh7q^=h+B^t8X0}?U^L)i@41-KhYCguDyZW@{-g9dkcbB6WSG{~eh(FKK;6?G z%|8D(oBlsEtYP-SfZ2qBp~GIY4-U*G99Wgm&Hg-$J|2buImORp2oJN5hdCU1%sy-| zTd+Ybe}*uz`j}Y1X^v+0e}RSB0v3a8b<#(BBG8^_gPcw2^)?YKbj8Sj2C?aPYE@uk zjfb%r_d&mmJQ+fI+(&vm9C5~d zU@&e018oRxyN|Yr0mCeN(nnkb0ZSXymb*}3+=c>*HOw#j9aD6F17qKCm+4UwHh-3A zwAF_WRudhDP`3Lh+mFE=s}CBi7HHTPcdTAyu$sugkmnJaSNlC!tpU)m2DbZf!fxV( zZde-s8Nx&Ba zh7CCFKE$vah%p$I>^|(Uo7gdg@_(d{^5ik7WcLDx-3AS^- zp7a67qzN3FJ<(0oHr2@x9$Fs{?YDr_qz^bIEx@6e%0x3K+IR>LqmPFG9L89i!2kLX z7G@s{0XIxdv4^347uMhtyo1DCzGY$(0ksgxEVF;vA?6n)?2pqz%*%SEff zl{OhG#=bt7)6jdl2w#vz4)G4dP<Wc_ z2&S5rY3_|J{Tn!&J2+*{R&`S`Y;719qnC>aE}Ci@x}n&{ATDMv7k>d<^s#E`V_i3f zaT)h=5y3@KC)z|CE9M|BRxcN;24T8ts(UXPtNoiko4Y>Q*vkS77{p}K%VbhRFjLp9 zv8E1lN~PZ`l^TYP6;-px!`xEc!~46oYI`K`!@N`lCd5-zkltBB>nNdhme4y&=$$2u zjuJ*|iEtgzJca9kAAdd=Qn5M-V;7076wx{eSRpuv_poC}zpQHYM&QS7w&xWRoS&KO zKH*U2k+aYV0vsp3mE0ByoODULi)@?EaY3}R;$T))LRQnPe6{2B5GPXn#q-#^-%+ua z+|wwgheg6-9Rx=Y^&>UxV25+xaWFY#r^Jw4o&w8A;-n6ZJAWE#dv;7X8~@I~hprxSH2~}4 zogXfXgFeN{(TE(wDZQLg`I*z@AGc@Gl#1o2QOT9wn7#%4(es_hwE>GygoGhILAhtD7j(9p-vU9PgpL_W=p(l&;s2hlll% zg4hAGYQ!T1p|)o=*{QjwHI_RNZXVy&cnJge>iO6OyFmkvy5=VWWyK(2ekyPt-(lWs zPs`JFTYtQEoya9Yy;jxEH_rj5HihGfkg0x-u39dO*L26(K=Do9>nm0LIngpG2yh0n zVoWOaVsB`i)YDt)D+($ry135^Sk+We&q98UaI_8^u74;YH(o=_9kQHPzeca(BP1P3 z$9X#TBR3@>ZiIB@1* z5C8?c3)h^_bprR$RQ`;NMmzgI^j(J0k&w@W*rv&uCxHKJYZv4g^yOr7OJClJ;M$N~8^iaW{Vkk89jF8e8q zFMptXp^)5-I8Xpg)Ek-&Zwn)4ld+0xpwgC4Mhr2|o}7$U#PQCaJUM}qXHO71@%&rl zd#93!AH zW&y;m7f9m`@=|9*QWPww{0YYii6mYMp??!UO4gJKMC0W$AeiTp5M04@D!D7--pu2> zQq-IX&nzMKo_J_<_N1T)@@KwB=1y9QCkmL;fJ|tGGdCq90s+hC&)8HTfeoeb*eJzC z=!7{KTo%=k*hzhd``@YL2609TG3vtS^9kwfWW=&psJE)ojRQ1aVz?~=Yl?!{kbjQN zRZt-zO75|Nk~>_Lbyu^n3n=p81cE!J{KY+<6S{%=hjVP8kk*JHbh6S)YB8HX1s)0m z$afJ(aNCpvK9ik{R$AF0m1YsNf?!l0N0m0})o{*xP9rp@5nit1jIxGfgO{4H2%W%S z`YsHj>{MD&noLKd&1NHSbU99!BY#a%lu>rOln_JuQ%N05h>w){GfXi~rRqFnD}j&Z zem=07J1KKm3Elc+a~|KFN(xetI!5ZaAre9-FuF(OgyvKtw{U=n1+GQ*Zy8Z!$>xiRToo6=qcmzX1!%g~&UNF5B+tJU42PKwOf*rvKyKB>YIegc zeVRAVgTUxOw3ouzGwP)DV2a?rx2KH)*m0q|IGCc4WSO%hnH-R4NmuCf^b{%80$(Kb z7dPA}g>~Ar-hls5!N$LJ$$vVCHz2$O@D9IYcF7!0y9Ke)* zbdtZJ&K#})1@n#%OP0j|7Ag zsWb)@l4#x5*6ToQ=bAN@fX&Jm1A0|aP#Qy!%Y%Rm`iy^4e?}W@YAC@P%2tjSZ`v9^ z@XKx&s0Xp*v42ii=%#yTDpnih#zj>%jLY_k%O#S3kdPgX@L?(5)+t#NlW0O@3gR_Ody!4$QUZPVr z{2AnMY;Kt|?{@k>-ZPg3W|f0o(ME=BnTDgwnlVQFH-GN{6f~BNiH5{C>_5!jT$@l5 z*^b8EX=Paz8LXsd578px_Sfy{MM zvZ*0PE`Pwl2{^h;ic&JfBtr^fVg*I68X;6Aq>h1=%fvw-m^@Kb1ajr#u-`#0yjiaw zD(hD$zy)bgMVkDP>j`q<7Cmt~m=V$@a0uLG!q^j?3xh#ROcup3o+%yw}h7iMzhPh4quYh&%?G%jM%Qezi`Xdoic01=sZ1{X3Y>`a}t>1|?| z$d)I1#fV(ih`4N7oa>HDxHZZadSI8xkaf#qu8DKs1kt#RAljHWC>Js)&8^)=^2&bx zjYE#+SUB%==!%2{v1{SuARsC7GvvmJPk%hLNaGMeCzDM_BhT@(`?W8-aX510Fo`1) z(QL$u<8AR#GwL>|1AY(mTDo5$$l5Z7-hRkwwrC4z>_1#??OFcd-ZyaqLMhSq?t!bF z9jtxMgb5i^R*eYStqIGb0bHJCVMA7RRmcIcHzm+zQ#S$g^|3geV-Y%4k+rdkRDb1# zYRWAPnsGu$M+J&%vZ|?AwynvmOoV^BtQbZJ9-|^l)-@YjGT^eUP1tYy+cC0a)wVxh z8_jXQ(io>JK>Qkz)V#&Ui;1k6c7P38QB7ogX3Dx%HQAV$pF;)lr|(t$bH|XP&C^=P zn}f#d#ZX<}XQ<95yDMxfs90Oa_g}ar@>PzzI}C=~d+R$?kB?*_`@@#Jx*~s42dOfK zSPT7SBp@y&9&>dT06c(N9H1{uXYaj%#+m+BUFUzA3euqYjs3`|c zKjP?_b-5;6g6a^Bub1Cn5*B}58nU|n#5i>3ymf*#>C2QatwDR%En#QDPBUPojwCeN zzd4(8$M)G{BmCPMd=;af5K%3I`_14dEJ+q`^AKR`!W@fo?Q->Mr&z?A8Hu7*+apyV zViGtmS;YZFXK9SGmA{~%{6cIe>y+oh^YwflkSBG6W0P~X*s<`Jb@Q5r*^;qqytpCv?1k)m zjdy;Am*tGUHovduC7w_5IwL6#vn6&S52qwA46$6#rynwsUT4c$XSF=*C!O8y54I8*&f5tjsDVlRL+y za+1Po3x$3s@~%18oy8cM)(aO5(buQw;J-x&B89F||7^ zS7?|#yvTj;PXd3x`uHzN{m@D8`N~SA1Kuh^5IwJ>&?iV|&?B@BxVD0jAR~3w5n0pJ z3F@qAoD}lHGWa_hCr#R&Yof*TlkM_o>^W?y5P??$7;;yxZa`u|-J+v~{FMdr1D3${ z9>LhQ77G3%wJo{wBCz?WBJxTX;^e`yy0FEZs|Ze?j8=boyOXZ)PizC;M7dpIRZB^D z^z-f^L{i_W1?$!VH#8?fys4~F`%%Xl^~op^XczHp%#%?KEmW<0iNNHG%qCp*+)E(1c z1*Nlh_ZfdBA=P3mo8aXhWNS*7Fu@A zP!cAALrG<~$iiQKL+-%{ema2H;EC&$96Ut_G)_L<6b}hJgnpDiICr0Dd_$t_;OE97 z`<`Y}GXEXzeD9gKD{z;$V-h8QkDjzIo4@0PGxY14tV!Eq>JEF$=eQybsfJd$=-Kaz zR|V1it*}|+mb&&huy-FhkmXbAL`&wwWeU0DAztRs_X#Y~b638j?x!S;=g!;@e0pCb z!IFLkSk-Xw|CmnR?da`M0q$1a-uWGgoPhcy`}_6Grij0bSN7OoN)qCK&`*Jr(N7Ur zQ=X78X_$Q_upvEiNi}Vel3&*(a__5&UV8rt4#N6L;FGjS0d*D6P7uT!;(g*H)FL$} zQ3&y$7TjjRZ;6w|5sZ+TY=3|DTNm(mvEDtWz|ApY;`Becagg>9~U z!~E4<;zSqm zf0eMJxlU-Ql+6mZlIB4$1}GVY7{ctkm+0^-Vt(x->OG@E9)_i zHuG-bgr8TRiT=I#SxvrwYd6aS7k+k0Zo9Tp(Uo6rql%;8o=wbuGw5(Ua?9Z1DOzWI z!!`f*bL1e8gt4#@o<+gIB#Ky5`St4Ud$V@z5%b7=y?n_yU6L?n8{UqjvqP4Cg!r4L z1fBZ_|9;$(VHKO&eMi!F#Em0wU$=a>f_=QKEK1m907?%5qCvCv(mF2I8LVo+WeOpo zFm!IlK0BxSh#z`?l+JQ0e|`7*RpF_J&*GHN{GdyJic+%RLCKSH)?a>Ar-W81S@3-X zu)$*K)75&;_E#*IBwadDd^;-lGLPm#JRgNlMoBs%cWlU?jWUwnl63U;YIb>jC5N7b zDBO`jPo+=uz=>`oltN2IF@&c`ilUfFl)|DAFN?g`UBh{Q)!f{iv!kv>?ZNR2A$)Pg zWuBr3^wqEHm>!3I8fVm@{L>VvPsl zSCE{K$9QI`E!5ct4eYdKHE0MUnT)WGDDSd&MNivEvg2^G$h-72*GWB*_yxS&k76|j z+@$`_xg{chX-L^G8e2R%`xeyWxP#?%gD2yT#U8N+odM8!I@-e>_{NElVhx39b!noer$S2? z?6!tI{SC;96tA--3>4n2=b=yAe5*OZt2Q94tTaZN&}Adowhtx%r!bgJ6pO(f3%{)U z&QG|1WXtl<+-YGBWha@_&Hu7f9_5G7WAoSgAv+L?kF84X;On_Nv@JpBYet=?|B6BK zR^Y9FmTTMx=&P?#mIO@8dArGOyxovdj;*VM>Do9Ojx?z0#n$7qEK)PNnj$Dt@;B3VqKD z$Z=b%Z{6Umknv3#pU&*>+ie zV9koQ8C1EV(D3^0WeN>y{W}5c9dT`>TRd*36TI6cj=O5~x#DQ?fLS>^quMdpLF+{42 zB=$Kj_pkr^pa1W_{10^H`SeNqMqaMo9h=y8RcSjg*sET(@t?3A?=Z}kNBdOi|NWo; z_21EU(_dG${xGTCIJxIz%)NyDpZ|m|;^e+n%+kuMzOZvbH^1MmhVhL!Z4|{AA0Bd+|M-t+yQl`YScu59s}JirF}zQw#;?@Km#!)+RUp>EVHAtn zb8a0!04qBZHUoFX2=Z2VK~jP?5QR=RPU;m?JAOndaoBzd8Y6e*2N0Hj&W|TOMHwN} z@(5E#9rvb)g0nyy3-|*Pj|@v!)rmeH<&@d3OhyiGPdioHwi+!3c&=`O-R$1Gb0{nW z)-?uqYaW0%=6mUxIN!swg#N;~o=X2fLZ2QUtP{_nWV+39@D%+6+ixiIIH1gEY diff --git a/examples/server/webui/index.html b/examples/server/webui/index.html index d3893ea4e..882570c81 100644 --- a/examples/server/webui/index.html +++ b/examples/server/webui/index.html @@ -154,7 +154,6 @@ placeholder="Type a message (Shift+Enter to add a new line)" v-model="inputMsg" @keydown.enter.exact.prevent="sendMessage" - @keydown.enter.shift.exact.prevent="inputMsg += '\n'" :disabled="isGenerating" id="msg-input" dir="auto" From 21c84b5d2dc04050714567501bf78762bfa17846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 3 Feb 2025 13:25:56 +0100 Subject: [PATCH 225/279] CUDA: fix Volta FlashAttention logic (#11615) --- ggml/src/ggml-cuda/fattn-wmma-f16.cu | 2 +- ggml/src/ggml-cuda/fattn.cu | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/fattn-wmma-f16.cu b/ggml/src/ggml-cuda/fattn-wmma-f16.cu index 1054ff95d..45702ad65 100644 --- a/ggml/src/ggml-cuda/fattn-wmma-f16.cu +++ b/ggml/src/ggml-cuda/fattn-wmma-f16.cu @@ -561,7 +561,7 @@ void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_ten ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); break; // case 256: - // ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst); + // ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst); // break; default: GGML_ABORT("fatal error"); diff --git a/ggml/src/ggml-cuda/fattn.cu b/ggml/src/ggml-cuda/fattn.cu index b1e66d470..b0cf152f5 100644 --- a/ggml/src/ggml-cuda/fattn.cu +++ b/ggml/src/ggml-cuda/fattn.cu @@ -235,7 +235,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst return; } - if (!new_mma_available(cc)) { + if (!fp16_mma_available(cc)) { if (prec == GGML_PREC_DEFAULT) { if (Q->ne[1] <= 8) { ggml_cuda_flash_attn_ext_vec_f16(ctx, dst); @@ -265,6 +265,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst // The MMA implementation needs Turing or newer, use the old WMMA code for Volta: if (cc == GGML_CUDA_CC_VOLTA) { ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst); + return; } ggml_cuda_flash_attn_ext_mma_f16(ctx, dst); From 8ec05832fa8409c49b3bbd13f957c6ae8486e618 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 3 Feb 2025 14:57:08 +0200 Subject: [PATCH 226/279] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index ddb9d817e..34f1cbf69 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -32f0b85987396945afea2291d5f4c5862434292b +498e0ecd2c4f9379439fd413805af10e8e9ff349 From 5598f475be3e31430fbe17ebb85654ec90dc201e Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Mon, 3 Feb 2025 16:45:38 +0100 Subject: [PATCH 227/279] server : remove CPPHTTPLIB_NO_EXCEPTIONS define (#11622) This commit removes the CPPHTTPLIB_NO_EXCEPTIONS define from the server code. The motivation for this is that when using a debug build the server would crash when an exception was throws and terminate the server process, as it was unhandled. When CPPHTTPLIB_NO_EXCEPTIONS is set cpp_httplib will not call the exception handler, which would normally return a 500 error to the client. This caused tests to fail when using a debug build. Fixes: https://github.com/ggerganov/llama.cpp/issues/11613 --- examples/server/utils.hpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index fefdce55b..5f97df5fd 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -5,10 +5,6 @@ #include "llama.h" #include "common/base64.hpp" -#ifndef NDEBUG -// crash the server in debug mode, otherwise send an http 500 error -#define CPPHTTPLIB_NO_EXCEPTIONS 1 -#endif // increase max payload length to allow use of larger context size #define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576 #include "httplib.h" From 1d1e6a90bcf485ad2dee309c31cf19bd802465e5 Mon Sep 17 00:00:00 2001 From: Woof Dog <197125663+woof-dog@users.noreply.github.com> Date: Mon, 3 Feb 2025 22:16:27 +0000 Subject: [PATCH 228/279] server : (webui) allow typing and submitting during llm response (#11626) --- examples/server/public/index.html.gz | Bin 1207129 -> 1207175 bytes examples/server/webui/index.html | 1 - examples/server/webui/src/main.js | 8 ++++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index 3a2529aa2fb84485aa480d3d476e5ef27eab8e27..df3cb1bef96b805742b458d0abf6c9b16826fae8 100644 GIT binary patch delta 868279 zcmV(^K-It5)=7ugNq~d_gaWh!ZJU4OIm?T19nO083TqL$aVB(o2U~3qdfxoXybHd{ z(s`1bZZUjPV^WHc;NFJfZiClBTA=!+2QDo&-WIOsHtj{$Y2Gc%sjNQ`8pFohaGkdW zm59YJnsL;$T+i+5+M?DM&*AOnbR9P5^D*%=+K3yfX*n^*R+qyE*xC-3IAebsxFa)k zW{ZcTG%LI^Zf3HB9h+y3D;Fq+mffC}XJo@y*KRD%Oro~Fs9`u^(;)rlr56$Sh3uUj zfT_=x6^xZv{FmkS{HEu_>xyZ~!rXF>)_N=+UzpcULe#LGDNy;dVdhYvy2ig{SWuv5 z!4B?l)iw)vDdSwbW?@}7R-ny8=mmdP zWN3>94;~a~hrt$rX@Q)!%wL3`)|mgYsyDb^ufufskIU|N53j%E>&Jg@$>Z@wTs-!< z8-dgQF1&Ue)1dR!moHv+!V7DU2o9TGZq5DJIQ1}Et`Klsnkd3!*MCSJve{#Xy@K- zf3kdKcwNeu2j`WjIjjWBo5FcslP|l*)l*oP8aJxu`MS4m9YL*;eCPqwx}xuw3FPOM z8pT>nb?tTC8G@IxKQQwJiG`){E>~*rXC3awmK1C&?Tr^?VUBVu4ufx3;ob|e?5%Fj7_EFrBs}*}#k8OEk5ia1UOeTLe zD4F~y?xM{tl+yH_;`1Lp{18l~OO?i+sbs?^*=8yYz$xy+YAx13+X9SdX>JY|lNa$@ zQUx z+kn8B;i?dH%LK|nys)Y9&cP@I%J-|v2k2V1MKpkj?pExj zIJLKG`*xh<<2akNw{BmLwN8dj84=)b^|7j>_g!x7J7D`0l=^6Qt*w8@3zR@junn#i zgl*($!+l~l-@yi~t$f2AG+5waw`=y?V7HOA*Ah{f4JZBd>8rEyo-r|_)%j=hWttVo zW}e#n^5%cK?9TnVxVH0nV&>Mry85`iV|IEy`){{*u(|gaKm=+UG-s)|8Ay{n!1=!L zq<+V8g39jTYhEIwJvctKdA+ltg=~9i`I;juv+H&~vVu8(x_J@6=ESMXes)XrDkc2T z8HZR`g&}07faELG%mZh#W6dj0<7DP6Ev;-AO#6Q-)GxBDU_3K1AZ9~>U@a!0Ha?G& z1^V+Sd+d6v|7seI6brBvIdYaF4Q+w_LbrDic({g2yE~Zy*+92<(0OtMZ5l(U8<3O+ zB9-^de_nR$IfTH%tFVb~fNmF7kzKpOjZ39#Z_~qneNYeZIBaOfpH|&&X|a1KF1meE z61sop0RH^gd7xA(WeE+zxQDMM-_!4Zyu3WQoa8HFh z$23dl{nB-(?4+uFo5cGoi*&0l%_a%lF3Q2o;{3u)`ahT~Ez7LGS2FHfKQGfR^((&B zIE%;i?*OLt6YC;fFG;7<+!XzH$)va#1bBJ5U1z1Zg8<^a)+&8zc0TF~2n>P)%4L7> zI6$Je-9>&8Pttb-h-VPk;1jhbd+Kg2c3(G>t?JQim6h`wSyME(m#)MbiN|?S+m|Z~ zIc34c%2aUZN)jNq)eAYuJ!-YNStfD$Fv-$oHEchGc>Kl|H^x;JZVj-iYLB$4*cx(G zvpx2zsyT_O*lT09%G5zHbunWz6qI$`I9IZsXS z&1Ypn_3S+I!_F6Jkv87(pEXu7&@XR?g9YRtpSen#3)Y4xZdX?`kC*+*o4_xurQkM; zb+HEd?6tpa;Dh@5`COjsi|l{em*4~UzON~F3oO#zo|5I|Iv$O(!Mh8SneHo__Wi7P z-PneS8K>SF145T%@XcV77KND{ysyFff`k_|ykOx44=+S`A+PN^*nKL8WlP>QClOya zhu;2B*=_`ioOE%1UGHR+&L$>XgN0D|W;C%H9gw_Ux8UNJNImnDfM9>KHpI-=`8=M@ zYU`!&AJ+M56y~dCtuzvUXn+Aes7}4oVc`HM5@?G0@hmxC3_xuHuRw3tqqG38o<5=i zzPVaj`>j5LCmTiK+OB=Fm;nCbCd#)UYn;wz@iI4q^2gd;Bb^0Mm@bR+EL|lZD^Z{)GL!YxGLE(4lIGSDgfXX zuG1A{gfs&XX!y!i0tXz(3Ei3DPGI}?(t8wKmouaTC<3|OmtCX-7bp2#`>Q3$P0F$D zUw;6(icICJIfUN2E*+u3`m&!CX1*TH((xOJ(wCd013wK7&<5ouuN`nY=&+G8m*=Dd zD-8{8xE^??>q%KLaaMqrAEg5s3lQee%tvMtrq;}*mqVokE>NwE&U(G-n+`6sG%dnc z8_Tt`>n)sr8)w~qe;UtbP}Lui&_zmfrUNM%9CKT65X*Qn8g0RWOE9+u2QJ0j796-FbC;*415T=G~3Y8iXjEy467Ouo45&4N&-p|Q3rhs^hzT|`5yv=CDGL9lRD^I zuq6U9L7|RG<+DB6A&m!tqrG+@^(VWr)sx+~>ZumIau7hs?1M(!%0Ylcs}OvX2Z2jl zaL`J_GYAkB`N38i9w>+6Ru-N?fHXJEs}wgt2gFIls$`?}5*HYAGp6Mr@R_&jiML?> zgTSZSs%6@O^6?<>A+zflwP3)5z^BowWzmWJkkMVOeVx; zx^zWs5NL`xfh?K`jwclm9U;OZAv81?42;#^XDanr16(Is{CRf9C&Z?@gB+ zMY2Z0SBdGdw5n)jgaEN-S+!k2fY2@^B%$uMYwRFau?R_F`Td^X^N8~#W1tPKp~%eY z>Z)5=mX*TY&+*-l`&qA)1+SFdZI>&E_X}J1wQbUVi38#4CZhcsI2p;JJzvtu{rPXX zNajoNU5%G!W#ze_Kh@NDe`!7yi{aOwYCk<~7fJDR<+*rYmagkP%FEX!WA+G`M* zRR{op9K%naYWk;|8p?_Fl)qMg{iz6~_S2L0>rdVj@Kk$hKh<{s2cGDs+7tK$7Qbsh zRkyG?nol+TZ_mH{p&D!FeD{~#rFlDLFS*QbEBUgcU!%nR_tPI~f1bd@0MJiR(QPol zdHi?I)1!^u?G)YDPRPX(}iD&&7J z&V+L$f339tRAaB42n6v;e7ZOe{s>d_r#!UtEwa6#znyY1US3qoxh8$M{(dTo8|I^8 zhXXIObnDuE++d%4f6Ig9qb!gkGxILj^3IF;7T5CZ>GztSZ-MK|VXPO-wnTg0{u^!o zRRcG_d4|t#OFOm|+s(m>pRRrcz4N3fob~-x$b9a63POS3Uz2I+@@=D2HU$sci^ys}K z?kIiU-&S*0$PP(yYb~8yd!%>H5a{9zDe**#E9DMFY-{>8uG}w&1{?ND*REZGitQVO zUw}t8KKRRBZH!yYE%2PVb!8FXDq0pXlr%c7iRt$gf97SiKg$j&>vBr0<|6C${RWcf+}T&cwQFe2J4tPgLvtHgUny zDa$*4GfDODDqTJu$iF{GH&0-;XTs?eJilEVuA_wF_v>;kzpA+Ii)zWfsh>9~|Fuoa zH^R>^fB9B=F26?}?fDs^EuMYRBrwe~%XfPgl*sqtK>6iI-7`*VJ|53^j;rEyymDHX zw($1Z|32!Ej1+JjCf(Qmafw1)ze9$1q^*!)907I}?eqbjw^y6Df1PCROD-df1J~A`%IEK1Znu>M=7o;< zo({hbE8~Kxe=MB~n2P#$dI`_ZSKu%2;fLLHJStv-$Tn^NOA>@Tp=Yia`?A_=#vW4E z+mveqc@$OblU`OOvE?y!{rvnC1<47__WSwyDR%Vin4EvTtai!LS$dAAXw|fov2FK0 ze@8mc$t9P6sk8m1&h}$29J{Snu=OrB-pQ3%_qF}KdRHued0v6qN$d)ub%CL{5N|Jm z7hAV@x0di1-yf-9M|SR1kvF+i!xqy!Rph9)IIo;FC1gnu-o|uj)E4DArfsRy33+!Q zKHn&Qe_nt?Gmh0A3+GSYdo}H;(k_3ie<`;m8h63AwhEb@Shj!vh(BCzc4s_zrkwY* z*Ur0ad`WxnqjkJ0iTC2m%YXd+b+5{?MfCZYbJY3RX#`$-RbFKy0zXz1UzYN}{wQ^# zQ~7?*_{!*Fi(^_<1`F|D2bnb-eP*%8X9@_>Sx=3e+nE| zRz4S;`R)IopWjM-zi7u5;BVAH`uqewKR*$`r(M0L1NzqIz##UT(bCL$_Xbe^=+?cNp1)4(>;?0vI(=ML!UtV_l+C{?d?R=6};%XG>Uti~e?LY0e zZ2nO!sptLebE^qk)_l9A5qDdE_Lmb}r+K>u9BVFi{1?NH;^alOe{6TByYj`A4+)>Q zFn@obXbJJ}*P~JkZ};V1q3`O&zCA}~_YZB^6|^_oxL0w%-qKy}ttEN49sAbqpdKF1 z!#cm1T-o1M)h8yD`WXhYw&Um@6~!Sh7UIJ`8((PcxBm=w;=kt z(3<&s>9(>Pv^=>~fAOK_GmKjU)dvXkx&7l~#?dE)bY~2^Yp82KrS9_;fmUYOiT+X$ zC07f-%8`F%C7oz zp?dZ7@`U8~@rU%$c6Nr~E1Lho%JWi#4km*oIr)W{{Esaq-&=SWOMClDdeleJ4kq6W zkAfsY_~G)8wdptYr*UwRw51t;X%_sYS?~iK<&@k9e-y_5008HX03ak>Jt082dU^q& zaCNT0c8>t;kH)R4dhz>9B1fj4?7q&lsVdvlUFKHl`(eGhqgZ&0S}q3R^b%ZR73}uT zIm@V^>p4+A`a{h5WIv!I&(Ck?CL`NnQ{?EyOp`>9a=qWe2BqN1-?tf3)ZfX_had>z z$V~c=e_HiKxz6d^)n4aUnDahA(zlxf^HNmy5?fmKsF~LJ^gilh@6OF=*XJ`0zwK>( zd#k?cPv-}F$@yXY&~x8DS6_P9-hes74to871Fusq>K}_`TE?`sbj*8`pAvZobU*nS zMl{|0Tx!n~0(|f};zB-HrCs)5{Z-J1KnH zri81f%C@?42@rh+uaqfcE+!EUS5I)b`n>zMoPX}GVZY@6>7{OBhpztb{aLWpjLK=X z9O?4gJr`2oH4h(u{xx+?S?a2q)Sq8LYS-nbolxqI_+?9C4!Ue%q#Jq^zeKv0s=D^# ze+9+9c=CgnZIu{Vruo z2(DrWN`{8VH(>F!Dybip91wqgdL^(EeUo>TivDBjL}eA0w^Hc~N*wOW`g)o~vU|6z zf1*G2G1Z!#%f0W{E35s7^vY@<(kr{Ae?IilzW=@&As4-fbv5v{@9#%^QgQsf{dfo6 zWj|y!XEl9iC*+e_RA*$w-YG}`|6 zcH>=ym)*$Z$hUp-JA0CyE`;p67uLH7FMFa#Q4oD+KlJFNT0Q#Se!PotN9wl(e|g8o z*gx@J;$-dWUka6{gPPOMcU;bcxnj(9N-}=~z0~`zAFmzG4{GY(A@mQ>cYm)udMxuB zc)q*e`HPD#xVyw`RifX{e1Gn|Yu>k~QU2@N)urUC`eVi@-@r5c*xUJD z1>fqw{LXv)!k6ZaOZP`k@AE_V!yvHY=4 zU*3K{(B<^3&*_KiKEJO%Ua@^fA#MLd6<@hwK3;crHs0CC2K?eq_;}5Ff25vW-QKJD z1HPvJJfG9K%>-YwIdV84Jh(AAng|7gBuK@J?H3=>=^1J=M?>pXS>CU#5gPrDo_e<(qJlvNW zaoFf5cir3<%@t+3A1VrXfA~7HM_M>a+_!tTOg!d_UyeNK z0w%W`xnOt~t21YLmpI|Gts0Z3pMh1wR!Ell#P-_l0j)Soj7Z{mVC0^2>khJznloiB)hAQa$het{jC}+rnqxB|Y(Lw&DMcQOfUCaI&l)9!vP0 zvHY@)f5TA(Ot1pCh~joSx;IOFdACH#AKpcgw-0k=OTZW7d?jxy_q*HmyS={K;ro64 zuj=MM-ro#+_#?A2ClRu*KmtIN=bHv<{#oILp8il#d zd9du~r@ueFfX`pc&SZ*{*^UEu%Uam}(!ajCe}SC87Ot9sT=Po*iyVi#nj1tXcJKv! zsy>0ImWY?Sm?wH$7a?AE-Uv=U|0BU&fB8^6Jh~KO4uX#{=JMZL%iv#s`UKSg0Ql)? zY3msktbYBeu&0He@-mMEqGR%Ea?ulGFqx57CuxSPI ze-jixZG{t0wr|+Jo#>yi`U%b_)s9qG|AbN7<`=46svLeP=I#n!DVzZY<;e z1OrErMY$=TFe;x@2q|~s%kJJ(LvxYIa}Iw4MPBPCjM^dCHC2;iOO7IW{RMae5#X0n z@{VW%pRhWNy&^ZjVCV+i3y6Ng>O=ttf6<4Ae*ghO?0Qq9KSAt%IzQED^GVHhCV%~@ zJPW&7-_FI8y887e5c=s!U2p%5e*KAfMSj`^Cd~KbezCnAq8)A#e1h2BeC|KZ!>=Md zA@gV~#lIH%Ukm*YzR(X;?Pb@3x4(}1zjnZ1NBv(5{jY`or#G^Z)6^zTJW}~)otrV1(`d^o;FRygB`aUDr zD+q6w!x8?z#QL>X`}~Tc#oxW$`XbobC#(+PuL$;W)W8;bt#%c)^z#cEu5$EO_TO0p zw(GT8TCMvPdAelt+4-qhrtrxsXD#A;-v@gt&(u%XmikAK9i!?IJ5y6e?_r;4;IeBmwqOnJK;{ADX|ysyq(W`E2S z#sTj+2|reFTP6GB)aFO(z+WHqof_@84E;5i`n%b<7{jf);&XW(fZPMG>ZIXIXa&Ch z^tzj|xBLE4;rx1Ap3dCy@Bx()1zDU&bvyljNrE7Ep!>9o<8*0+w8L1q$S-^c?-HVe z*TFq_@qer>%6!?s zN^a=hmc$QR@Z5>#sT%$f?b9*J#9f!;m*sYj$}cmv6rz-VML!+ba~J-!IiPMEE}g(^9VS1$ z+yOo@2R^Kw+#L82m*s1@NPidwW~9gQ%U-$bmTmCr%L|yh?Am#tzvXeEl=(Z^)oxpG z@L5oNUUOLRpE6?Ka^AQ8&kwu0Xw?VI*N?*t6b{95m)P(7m&%Z)p8T!M0}Z=atvKsqPO#6(;n*&zzlv1%KFY!Q&e(cI_fs zjg@-J+sobyOUXGN@PvM_ojb7ncDaK)#z!cvq-%}Jm6W%If2RhWk4u#Le!kXoGNd|h zS00Ro(&@K`Z+I{g@1b#}@%gW4&$UH#`L4PXd*soE?xQB=1f%3;{x=G$@Ll0R_P6J9 z8%pc#hUBgYc<_=lrGMSRENi2Gc>)1od-L9d~Ep4V^SM1_2 z>~fEFZ?(V=7mJhnpP!4`E@7@N(?83ScyCr8ojT;dbLviLb&_)kE*y51SL-VX<#FoH zJak>pwL+ z`ZY@2e?RR*Qr(Om1blj`6j%Hkv4ghx=#~8PVHP{OtLL`I*I%(;Km_=7!@t#+oF;!g zh6?&l8__+QKPm2McNmD{@pkCcWzt_~-u}$H7`JOj+QmrTEA(G*@?GiFd;NL0EC2DA zVBiBve;GoMr~Ti3sGt+q{-1NNl+VwfcaH7PQI#PgeH$iQHzQsDoZpmlso*=+?0}9z ziSpWV2}A+6y+l`^fKNr*^W_CQrw;dPxCnbx)KKQU+h* zjl$Nif-4`~Dtx{M`}eybCDi-ZgPun<&2@?QOC0#6l#~BGPv4e4f!TS2I4x0vJ@FKA zySK-dUOJJu(c60^L1b_82;<*0yCvvvn%~>A_$|G? zJ^%0b_|X|Y>G22W`1XpvH~ZJ;@ck8?>$oI#c*mFYiR+Pf zUAbH}?(&ugzB+<{gJ=RNNhZog;e||l* zEjTpg+F`wwCgg>RGq<<_F7ct>y_?8~s=A8mUfmxfhUZS|c1#6L zH{{e!-f=W`d4zq)ZBE|3_r2wu@1WntGtY(kX#jEtE8FhXtJ5ohe->K9M=it> z#PjVt%uXx$z*hasr1H;bQh6}Szik5fVT+irXG`evGG0=3KT#9eipWm1xv$>&pLcv5 zV6UeFoU(Z?-UG)Y87lNXPR%+ZPj=7vSvyVpgQ@X$3%y?F$Nh5Rv~~x2Lz;DRK%7dd zE{}nIKMNUB-iO`5?UG9=e}^6)o(B1aec%0q?BjCkF~o~+0G}QJ@4a=af6wlf+^;b@e-A;jN}2n#udn;# z>+(SR`ntbDT!|F&x@APaoiV2K;R|2jw<5|;!PMWpfWR>r2#_Zr#{{HE_}4$dfBm~0 zF~oZT-}ZhhpsjGgTfM+`Ik;~udc4b_VD%RYX6^`c|(=%{-8_0>y`8HL}CL-Iwhid6I@ z^;jA&r$hVH@+j^%Rqix6rwebVm&&iNxxnj?n16hI-JT6+auMqV*vnpk+%NslS6fL__JTnBCr_=YID0|ieG0luDK#Z~!FLVWmf5!>dwO}o zi&kXYs><@LY~Qd$;xhaCTF~;mwj)vB)o4aR_EyLf^It_goq`GVvSa>T1QaUD9)OOP zmQj%HGS$F6f6r6{H}(9luQ_Wqe%U?=-ir4@!EKFquY|9!2Xp6~XLK-r-rs!+tVPzb zlzRM-hMMhf8x(i;5KOQt}^HwD|<*$>AbSq zH&#|Vt*mypvV29H_|N(J8s_BUL>LWTilVm%W4JCAJy3xwF^jhPNBi}B@q*rE7pTj* zMZzuoAyrkynNAyj`g_aNl_5Gax33S&bLB)@9m*W{I2*aE*HO2x+q!{Q36-n z7S6VLe;5GlvbOJxlT7%K8S9>X0gL9@MGJ}rauWFDhq~w3uZ|+Eh_?Qr5bI|V*59cl z-`i0^8P8w6g?o@g_Se@P;0sW)g*+s}$JAH&n2eT^(XX%Dr}EC+^I)Wui<5Gde&9s6 z;}v}Vyvxj0LSD{wKVHF|IB2KyQH+MJogzH4f5J+IF7aM3R@QGha_#cHwY}b!?F*iE zDmka+?iJYPJ?{sl*kNY_lly6SLV%)`(0TVE7($1v;PRnAFll-yY8357&$4`A4d9HnQ(`Z zUUgET*zw<@^qz0P)YPs|H z?JVH+{u;d6K9CO~Gz*)FTNsbn_T_ULk-NgV4YMu3b1eZGwOS1;jqI2I+d%XGf8Imz z5~oU>oT7ObMh!eKzhCU2Pz2Sn_n{npoduEh`P~*3_9Q{{lE~(}g*`oyKZsuo)JHJK z+gIGX<#J7r{YU^Hf)dczM?h>PXAte+aG+Mh@OR;S+0)-`Paa?J-9moqDSGtyRM_ti z>mm)32U@3lVIg)*WOLtp1w#-3fBgDzV+!xG9~|;H$q%`An-}S(z6#$Tm$jz^?z_d7 zxpRu#gW^)QhVfqcGpb1t#Cs5TDB4Lw-XWOfvX9ZrGQ@3idD@1X)UsTxd=TNd ztgHIh2Ov&prHT-W3cd$+e~VO>%iN-rCwOiW%BwSAzdM{a$>d4Pu24w>z%}^YC3Brf zJ~q_1X=J(7Z4!Bc>n4RPm%2ypu8zl}0Jq5F)uAdcN(E6g6#x3bE_xXKDQ$dgD(;cS zlYzNK8BYh}gfKp~kZ#k((=+Wz7FAXK`uOQ{N)jI%_H&A;=@^KifA5|kX9N*LRhR(Y zgZdafJljUs$l;!&5k|V9NAU^#m#2cDKEGW-os+>IN;9_zKSVRPAm9GtEWtne;4FhY z{N5}B-63x$&3T7-mdidyJj)QbiRWn>ZW7OOvBzGT=MWD*G|OPOh-bOXE#i5C=O*#I zIs^8*!+Dc;Trc$Q1uCY~p_ZW7OOse82W>UcZ~aEo|e9jeEk zmgl?ZVf2qaEX%<6i08?`+#;T*gYn?2^0WoFi0A3C_Q;#^#sT!`lk(&=diX_oa)>^Um~HA! zjpMKXtJs=`tm<#OU!Lu*zqI6$KKZ6U>MLZ zxvB*Uf7Sqk>zE`(=6^USCP}#XS()YE;D{s{^4=fHeG&8V-8h?#U&qVOPxidQZv_D744I8ob^Cx&4iPePajw^3M69@W4pww!@I!_o-L;f)G4a8NSgxT;(c zyo4cE!Bo+PtU$266;&Niy|z=wYxM;Kv&?GJU1%6w6*@D@%P{edtbZS)$!shKb#^$^g(E~@4 z*9=8k^&!Jc{Dg6MJ#%^9ZqcN@UbR^&liYdWPZQL}dTFo#BN^0Wcd1Ogv{6+Eu$t)V zWHGOX3t6pz5*-*_-Vp{oHO0^{S>UOoZnRBic4%=j2?DL-aD!RRNKk*>T4X%d(%Do8 zf2+fWn8>|skzpf~MG{|Y(t$*qWVM1qi%!jy0^XE*vPVET;u2G1%6ekhER$jhGJWRCz70k zRMrz-p2)pgQ;S86Je^DC)BU{AK9k1$DoUCZFtI=LRQkxqYU*S=; z9df3<@wHKzzy#Vt~6WAu{6dq|~fPnLY0t|1uK1Uxyg6bYEH0!asJd@;yKJ;R&+ zrsCAasDiHyki{mb?2o*}^nmWbM0GSVs*A`QXu8ksl;R2EcRg#Ekqkw5LDy%!zWzw%?op@CFgf0yiyT&wFF8^*_2x7md7W-weTYY%4;&e8{Ssbvog zbDqvw}X-GwX^6_CR(cyFnC7DTHOznkG4kCaDGA zt<&Dj;+w44Z_TA{*fueF8qnrKi5JaUiypPTs5UUIE;H$9ize-`_~n7LGas zGt|i@4W}{Lt49O_x98wUz^YRP>k3?lRoZj~>klcgwiZwjG_+QKmV#BqBv5zhlFZOy z-4?-!y|6PMubjBPs7wPe*q=e>5xyRLZ{u# zsHP-20mWn>~wweAv) zC8K5NOr}@)N>x`ODAJP#h=N)?-K+qPs!prRCeSoC1KH3P;n1#9meDpN+JZZ^HiBlE z$WZn&A|OIt_EOJW>ar}$i)P4kEo!u3s@kUR4>B_B2yjYSf1|p*s56T`ie)s^6~rmG zgjNtNh(77jR2{>z4z!uYTCchxytde-gp}+|2Yr1?wtE#}Ic!HbD~Cp?t%%0D17m!M zrkxJwNVe73_$l4zc?MMm-V5cBxK~e<1@?ghhR0PbPeibyOxX>*iz} z>&ZA14Y{SSAa4k?xTMFsV|6jryv2}_W-~4}D|8lw^1SDeE;N7_PH5Lg6H-c=_M~Ah zn>b(xHDO`_b9*r#^xyymxOuxfXhH1AsII$gVno=OPq>h~p`LLl3+t`?!%Rbq!DKXPB=02(| zd(AYiE*hM{GFzE(GX@lTEjj6c!?Y#vf+2^)I&ZshNI@YPuhoe%LfJuWzJ$zlC9i@i z1;A=QL()F5!F*yf!I^4TAe*{TpP)0owH9aWyw1l%FzsYc5Fpvesf%qel!4*I<*QA* ze^;}cawDB>Tr(Dc7B7Z%*6VU?bpX{nUL?$P5J32VHS1|O0BkyCD?x8r>Csa<8MLJ; zHEoRoC+Ue1Ka$1`h9-0t1=_8~m>qLMy&A83G411KN)IPZXw40*fl1dnK5b6$IX}eB z`3hkTf8DS~JY$9~Cr;L;*PIEG*P)_`e>7msNg6gMH9u>au1_Pi4yRzfI%?CSPGbX5 zYt60n*D}@BVIgW_SsKP(+DV79z&l(6mOAE&rL0z+#XJ3MRh^S`j4YUe)Lt4vLP;T) zP}*=9qpqb&LPv}iOIlKeH0dP`frQLCvgVvbaq$GM%tt{}Y?>%NNPPuc#=6_kf7!tI>8NrB)k`7b>dP z)b%KUIn$sNqAv(lDh-!Y!Lny;e`jRXe1{a`8By~a>IjfFaxg>|Ixes#I-9stAJ<2= z1wmjoL$stFCWb->eZKFb6{6MdtdZVi<)ymr5hybV=R7)CcL!QZ3@42)i!CR9zzr2@ z=*p8Rm;fo)+h|FZZm-&rWRuL;tGB#ZjOoQfw`XIJ8qVPfBtih#Lj94ze{^-MwK79f zLS%Ec^wvwRDyvAyQXM~_>L!G2I+8GKcEwa-EAAr8Y^b(s&wH-KX(Mzw6{gMQ1~pv{ zQ-g2_Vr@MqG!i$JCr6JO-LoOp>*9u?9KbGK3&#V z8KEu}ay95C6=Gvja~`XYf5Ro|b?hONMG506S|6jM73(cP*RnH|<)h)c>fnh86vdFOMW`HQrO2BQJuX}!nB_X3XBgID6vl`=G#4sC~qf7=FQ5{)L;Nf3cv z24G^_-Qh5lyX30vS!)N&dP1FYKwIFWNk%QmHHpNrla3P7bZlWcn$guZFld1b(wYn; zgkO*=x+OUES;)DbiI^+DOU}p^H-wsutxZWbs1NudiklMF;(&R)#ck=KDO|=wUCA3ye+a7YtrV@anx$^ z-I%ZGNHZphjBbz}wH261X02_F(gsLVaT@7BKq#M@x~$XCnVP7E)q2ZNAcCJja5y!l zRlbWL9R#q)R$p6m^cqItn^nNJ@lHkTxR$qWkJ^Bfph}yse*hhafHq4;TX5z;O*?kf z>Sq#QVyliikXm&pU?@nF0?279z1W6_VjK(yEf{r8Ja(9D zrZIS1UrZW)0Ihufv-Bxez*bT#$fH~WSfW9#yMzaAHXsd|xv0%{;t0O@1{n0WVU}?Pp zbwNu3kCP{F9Q^kP-^|Ds#b@{D8NQb z$$*Gq)D!0$V&lWigcxaJz-=~5X*1^RAnsWqP^ZbnMU1f$8)K!f50Hi5^GI#nT1Vq} zCQ%b>xn9LGFVNCjV@xYefC1U7MA}*i00dCgI$3I7s|;V=8aPM7JdYu{r2t zV~ZL{e;F~t2%OhMpIdkBIwg=~Y)f6+-vo1xw@e#gD5jfMMjQ>$gLcRDwBf90TduDl zIt0)8We{{JoaT8G4>|+Vpowx=d(`Mnj83ON z8_@9}bEv7k(L;`IQp|{@1!S}72VhHtEwGB9f8K&&=QT{SJZwyMnsrO(s4-1EG+y;=tzCN5RvmymhmZY^C=JHLXg)U7iO{kXd&-bP6y=!QrP;E5ni*)c$ou}4ovR5#(qX^gjlRGb-p%mS9`K|BIMEz&fx zRi8FeDIH^0)m@{+tX-+PgFZm}v-#Mue~BSB3PH2W@Sp$)mIAK2VuO?<$Z9XDo2a=k zC#ER%bg98`+(I=cc5O45YBjpv0U~IElQTP8%k4JV?ua7>pNT?BRoXorrxrtcl4WGt zVx@s2uL*MGO+`L|%=T~|U?w+Tih~wOc89Y*v}l+@$CsR>H(RWi@jzS+907+!emhxFlU02GvG=Ahcs08@1z! z;}JBm@e_xufz+7eYeKyxuUx<1e`&%?K8`G01H0)AXId&x#Yvr7^qUx_wi%MF1wdD+ z(^Y_4qx^7Y;(*xesPcFPa^A2-TG?7r)A0r#8TDapD8M!^G&+!7?bQcP7p_Pc1kHL9 zm;fMHcGaO8j^Z)kcBY|zzi6W3PMl!oej+}A4$jqS!jZMjJr0b^S<4gy5H=}$d}#x|U~ z&d3EMxwT{pLg}E6_OvBRah=4P6t&gsUg zW4WftRePN-Z}%(6x+6*g)-+=;YuJ_bRM>EpO5f&Wv!}Egx;cpHNkwzv3Wke4tHKDn zRrmNM!Y%cfv87SP0C-mR>%zQ^4SRGKwnVQ1jyX=}SwWb%a9CyMG}^${^?xbRCv@8L~235`olLV-( z^%=6M<2cWP*2bGs{V_VRIyi0_Z8+NaOj7Toj3g37MyZwIf)_+;VhAH@70bOK=$o>z z9J;NIJ!fX^Iku8wj|TLCCV#j5L{~!}3dNN__BS?V!>(sdxwc~QMkSrs6Qia@kYl@{ zJ)d|>V6isvNxe7k)YJBWh44kVZp$7#ukm`eu?CHaMRcJBH*8`9E7gXn$|)-T(CJbNr&+ZGZC)Eb=3ggy!ak z6_R%7d9G?+yh*zz-)(sfk#y`))j|MJ zQ*}%i;w8h?ds9Y7P>< z7R_m8hILj`zcZS22GiAKYR#%RYSOx_QB|ci=9rnbatsi}8!XLegx3WnS6OBQo^h9s zEG$>cSCI*t*mi#H)(6CV4tg4a+!?6p<8@IJo zOu{@(Y=p2cIzd&W1A(hAyc`PRaF~}c zi!3JMT3|TOn19jeq^{Y>j3yHZ@HmR{&=$FI$j|=9= ztU6|4(d*FEM7FZtP>I^TQFpoq2CyNZeaURO8&-*1z+k#;4s~a=f>De1W15>Vyfh)! zZJ4tgoq(FatE#Y;BiJ5y`g6s?V~>a91&79Zr)^j=H-D@pJ)tgIWIUKj)AtVdNXC|$z%Pnc8 zO*fXr28k63pX+0Rqcb{`{8+4Le!qb-;H;l{Hi4K>do*$viZ!hgf-rJ8Y(XqcwjE*2 za8wb+%ztXAwr^=mw+^6L5AGAfl4(TKe#?}YNaGBuj!gYPS0curRyc{>V1vP^QYXwo zwsC89HpYEv;fHt)pcYVW+jo=#3dk%*oHzjytE$GDZmso3NqGg8dU^+N%t)& zU`)NIjFC|fiB<_!T}B(2RK#Q+2}_I9f~vf90)M#GMon_*W)pP06u`d9Z&Z>B`VPge z=}2y_Q8W!4EQn~99oU=Ii0F$Al^TLt29J3Lu19@cp3Z7rcEm^xide8=Wh8(NR_<5U z{j@dl_+ZvEJO@>Vs}<3$SBEuo4j@KIX>?4OLJR8z@VMHMLTc8r0U}tEb5d-!eSW}3 ziht$00J$~?T%ghN7-vWL*dI11iyK9R)sxiT6rSr9Qdkoz1UKr8goq7b7Mp>Xam{KI zu_f6BM)6pJg$G(d?LF9o1wU5l<;)G#ZVZ00~_Ja(QsmnzT%T zVpCNi;L057TKzP1cyy!~!%c=eOMs4zg((7%fekTJg%%qR+9YbVIjy(6!7M@-^M4`R zGJ0cO9X3J0&s?}WH+!LDW=_;>+kqW(g9T-@aeX4tZGII9BUI=`cpA*sjkd_wK(!HT zoKqc!lg!VW*{Va4&5oQbhqkV6>Snub#Xy+)eWF8f9h)5yZWTcK1Rd0>3Mfy>wXjsz zYCILk%#v?!dLqVpUBjJKM4-|jI)7Xs&RdnG(IGIL2}fRa)Sk(V)Ni{r1g2N(AZc;y z*wcsUAgnagC9!CtPS(mO6bJlywK_&b0Sy^*h4clGH!(gUXl*tWNSN`tr)xGLQbdQrtQK|P7t0e`5h@H#WC z;2J445=v3$fXy#7u%*x=6n9x+fZ*9+?n;6)Bib#)^ZO~&7lT0xc587)1#QDZ7WG!T zXbtL&i8GR2HJQYpi-V324(-Muo`+##E#Rgypnc3^C&&~;T5%LlQfIKPWbj~ITPn1z z2%gsD6vUG7w9X+MAFvyD*nbd(#e7~TXA{mdny{;{`ja7|NNGoC`6xCD4SA@>^W_p5 zv}6jxa7oo+veo8NLNu$SH6OHedOYxH5Tg~!t(g(jPv|)>tmiPli37q{v5tUuRk1Z7 zR)mr*n5GD|=6!wLnRn<~R;QZmLJ|R&ucIB5Z^Z+)U-6x8AFeLL4S%VKbvGuxkuSJQ zRvS#JgAqC~p2PkK{wq#FU2++4pX45jzq?)GoY&=5xz;Frdg#Uy9ESm zy}A@wu3xWvKx~_s(QmD3nB-wTN$Va6+P-g$YY<>M;Tl~U6ygJOTW-0t3>vEWtesiR7OCmzp9-ZbpDS)&Cv)S3+XwRT7C2V+S~ z+Ras?HlKFERi(NN7pcD9_+CAlP~EgSq+2$d&HbQ)U>F2PLu_em47lM!4XP)9*=ZVu56CR_7Us<0q}U6;hpUIA=t2!^|_yui#n~jT$Xj8*cQj zHki~!yy=Hial(#53)byPY|$PN5w_&C`7G_U@L?dU87=gol!fb7w>z2|sw^d%qLNat z?V@6R&QY|aL~Wk!nfhFrrvlvsB~M8@Fv%=red1J;0DotNYHhkfJL_>US~EioRF#z+ zi!HrAn`HALJ0^QUzcwPURi3RwfQN2B?&%gyD+BGQq?9D&;= z7mw#8;NywM_FAj*X=y;JDjrV2Z5s9DP0bS-W-vAN)g+E4t?I;1hKtasvt0g~iRrPRv@o`60%5JKkGhNH!e%3V8F@y4>wm+f?m#FO*QjYjoQu4))JbQl4q|Pt zWVTx2T}X!}O>(^P6>@GzLh6tK(qs&r$&`lVU~+%L8jXfJo+1dSHytF}uzF3MENNNo zv}dYmkgI-}r2y`AY+TnDt0A1ykfIExN;8~9&UmyY6{fnF(Y!E4d>S32A+AeMmv8Zo z0e>}sdON1u&eEc$(QI6AHC>17CbOz-2vd`HBpT&SuuJIDoL}m*IX|2E223q`%$%kt z%w)A(5|dbqJ6?+(uRA>G^@l);2tyE^1T5ho+CsABwI>BF z%SkinFJL0A_AGcG-ov(4zU$`MAL|#0}2&pevv0LpoOb-SZ5F2FRAPl<0Dr7MZ zG}OnPa6O)rDvFtPtH-y9w$+@bI$4wZMpGnI!iGl>8+aHucO4_#teUg&8ru+>i&xSR zsSK9wCdY`4R@*n%Lv!A2umnaiV1EcC>xwt11MODZYPwv#XJ&14B#p3nrIuAC3YBbC zpf{uz^n%7Bq#c+H;RcI`6_lLz`B@0|!y#B%3M*M3d9F@$0%pl6K)Y^rosncYK)s6q z{H&QI;}H+dSY)-5H}%$Lg@F>?;=~>*F*qMgn@9^$)>E9H>uwKeS<_Xltbg@!7)&xZ zfC+Llml(XQjMpKbl5@!}AF|0*<##a`k3slE#5Pvbdz1noR=9I*)j0T7R5p@IWi|KG+`v_43a7XsH_PUsEqjaQmxrg7Nw+%D7A zqOR)2)ltKt*My_==^BSe>zY9$aVOgNT|XN`e!AQcv!Gug$zTEk4S%dzqbCvo8jG$^ zSv^@WTJEyCK~Z|W7|C;X2Fxb{>1#oUFyJABbOw_?4)pwvqmTSRY~W@E?Caf39t#LJ z_o{GhRsq`}8>4{U05}|!NKu*NSUXDa!NhKgASnx!6OX5xwcn52N~mCcWnz-3kp(Fn z;A3eTtGz*|liFxwxPP*H369u;8^$WzUrJ`D0*>5N%uyi=P2%5Wtu-Eo45KqJfI6|c*9>e6Ht z7|z!WJJzA)CJ^n&bB6wS0#*8r$pGrf+M?yfpxOti&B#RDc7M}%J1Qi`Tovrh=YT@f zbiFbdhm%g&XvG`F?lsyQq2lTMX39iaE$Yn>L1d9gvvEpP)Pv2;fCT!o!dQOl{v9VZlIWsL-p3Xgoo^gYAT8})#g%@(pB zvx^}B4CPjDHGc{x;@m~R&`se`Z^fImXD_9-q_E57sM}6NX%RQZU~@Wv7o%q15nE|T z_lB_7`u2yV&AuqdVNC5D7?xU?mTGImpg&OPe(j?V&(uKVy*lN#wx6eVF z@eB->g6AxK0`l=?CdP)L5FKzOZ5m!O>OoUUnu;T5NPoGiHd62b!O5P|nI>7p_0?sq z~0x&CFg8a03SuRRE%8vr;Ff zg(W=!A5H*&|W$Hn8dv5dtcfx0@ zBqDoGWjd}`C-!8q;w(v80xNdIF>cq7UAQkJ6=^w1lvqpYwKb3k)=p@jRD-!xU&IXI zLiC~F$P6PAPpSjJTzU{a{Fkh9l4` zP=VUirn%G|*&rEn$OM#}tkr~>EJfJ0Rt6r`RTk_nJ&((%Za7(iXX?BGUU7}B)c4;s0B$d@B<-!YA284gbm zuuO?Q;YS9VqL|ERuc(mR+ATv$=6AytJN$})q}yjPT#`PIZgTFJS5u)CfnT#M^?#M! zb_Y?tRii0dxC+7WYkdsoQ3u;mU3u~8Vm2&ID`fEtX{S~&7UAGY#iMUA%K#tLgY7YD zjrJT=-nBmvKY5>p;%%SbU7yvJIe+;j`L*ei zWip%<>>y816W7zsb_K}keK*a`hvOMfa1S)3>!9iO^SDaa27zQa#PE4$5figl5mc7b zE-2v*u-<#fb-=z?I*yW3I{?rp3YujKVPVSE+fwEx-a!?QlsaLO^B{SW_oT`(Fs~kX zJ3jXT$~L#PaV_kY7xL>`Dt})~i01Sa4&LY8$9?*cBite%`_V$9(po?bWdonPevc_T zU7#+l6s6Ax)RzgQ_2rR6rAt@^iB8k;8;*4y2_#_o87q^XDl66>0)j@dQ>fLix6h+#)RevuLG3jqFThKB1 zrgZ^?)Y*ew_ksoaWGBMzp0NG^G*wKP$Hss*rD`@wvO86p=EFJvQb_{fwd}nRABv(} zm3f_l%DhWK4I@$4@@GKyp z&&(0w!CB`F_mOCGo_}|ZA~-=AX;&R-AB^9eVlIYZD~lPNH-*B)HzRpr zR5w7N_2(dK`G1xrTMKm@$JC-V_qu>2>$|r|4d1vhBD@4=L9W*tZw^G#gJCXjP|^=e zar+CzwVd?OGnU5xAD5BV6XE96;AHen zxSb(C$#BAZr`RG{zIMT4ZLVEDH`n6zu+#l6kGeEAmVboz6aY&li{JjEy>Z z;F~I2qLs|?bnTkZP4UR-N$FZBT@_~40z{rL&1qmg^htkZq3wm7`=zs=Kb_j9XC0Ax z@mi=#@qZ)-r;iR>^Z?uigsC~i5tRBQ?fy=8rk^shv3G0K?LGx=F?q(Vi91Kl;dPYL z`iMw9RJulV@^B_cD$bCna(J~sR|gjqccU1?P@rQ;Ity$CA)Ru_(t`+%I*IirupGey z(TYCXX7trb5cj?F1PE23W4s(~at=!_Ax;C_w7!Wtgc0m`EIhhaRw z$o4EPH@ChY@|H?S;6XXwN@fQBCV+KQun1m*gwq*I&NB*^0b`-q5hDJKjs<_iFGC^K zteuI8>12-aqzGJV(6=ApAvx1XIxx>h_n@N5&22pT{c%N~*^Ht)zeR9=v~_H|360cR zu7BFaj1}9JWl2!0E8w0#m!dj7r@5AJ?uXda(1G*Y;#zyU5`5&%c=K-0DXPcfib$6W zk%?gh8=$pO+JG*}Vx6@^w?}|C$-kO_7a2ok2DKs_c_5bYyYcX*TbFZ~cz3K5P zsxSsc8yhn{?vzL^%UpLqq%ukT)G^!hOkL}HSQrCvI%ih4Q4K?^Ka!VmSE1+(?tk`N zAc$+TMKvrw%iER){Z69y7eBH1-z2B10pGtG1e+qm z^vZqIF@1F%gbG`|B*az)vZ+n^ewZDmm4GApE62&zscCDRN#I0mAuP`?ZLk0`Cs0?c z%G#8TU*RqmByPJEa`y%{A%MV%U`FogGfuFF7^6PHIWkayB>=|ewfoT zrO1f&oF7^}NL4&c(?BxY-~=J%xglU#;>;Faz%=kelji;lD#bbvm!hBeGJo!ZkC#(7 zH*^A3nQFlnQ5@=7NGsBo)At5yb%!5pN?^*sZ}k=vOd{kEi6bO%E^Vd2@UWp?0DK{} z;y#5}GJs!Ej^;%vqL!~J$C7E4Ch+eRa1SG@K`4)&thq1THMU7G3yn{}6hZE1w*^3~ z7!@uUq~hcFg>k*|DdV=I>VL>o9EO|upaoCu1}?s_Dd6ax7r)TFo}vhWX$ zRMF^7!T-ALtHvD#)>Z98Okz7D8!&tPElC>b-dI$~lUMuui;S&E<@NAj27fma^`)u$ zv9)jNdEaU)%)1XRVG1+_2wkvD~S|%WPoAFBE70A z`J1cI6zSj!G&P70DSz;r@_ja5JY^0GHo`qMze5WZLW%$^B0Rx=fi|fLfwIWdWAxVO zC#nF15@IHNcz*lXuX}uf@R9){YK-}#L+Y^~YIuM8W^t}x7|{k&G{I1jDJ10m_EWH2 zo62g{&eSPdyj2XiPkGq-{`E+wMuB4bO5G|ufQ^g|_kME?T`Qz$%|nWOXshd^KEwb87q|Z| zW$qpTZS!yCs%O+tgoYq>LK4T?Ph#8^A)o9|t~t&PrA9n~Ij_mt^GzLX+Mq;Ce^e0l zw$>^_C=>94N`I)*l%6x9_ifauUgs?+8ojb_j)kYYx}qr*nTNTYETLD}R1V5vt6o!Z znv0Viih_-Ipo$QsQ(enH{TjyOoSZOF&h$2+Z*!QI(j;W}b5e@qx6yY&2ZQ7+L|)Y9 zkCuWCPwE}G9zb%x%Qvo0Gpxq^u`1sz8dpx)YJhufrGErE1R+PH(p3Az4*O<4#blHA z;gn2+C)l=D0$FTdJ?CAKQviPu5YVJU?sS3t4(bYj)>#2?*=dBmkW)V{RJ86gSmI3p zf2$Z_Mc#?uRE50}lZUb~)e6`qgMrspgIVv1q_N1f5&YP;7Q&-`vK0Z*s^Ya8!B-4z zXNH*V1b=AkZ8om7w8wFblI+UduW=qh((w0vdc-H;#OcDu)LgP?|w zmVW?@1eS*?uvd;Ws4rZmg<6T_zW8b&H9hD=(f95X#ne%1jNtNp`qi|RFcA9$m%+pP zee{y(6-`8_Ob5}O4J6UO9f&FX)wRgFw;J_vv+kS2%9hIyIOC8~_F{i*1{~l(N4J-g z!|Jm?D1_01R%)P9Hv_zRUlMO@TfMK?Cx5Gon_T^E>+tr@h|jU)tRR7H>+1P%inmM@ zwz`KzV8zHL@8n4*Shh7|M;Mth!w-DU)jPK`av=ZkwBJ&<4MKahc4efAgp!}ot`#R( zHvPEsSIYHsPW!-79E_CJF&%&iP0XXdndr(6vStzaS@KvFJMRF zb%#p#D7CD>VKBL=666SyQ>5GBNUI5fb9@OILOvUWaQXq|bKc+*L+mOW+j7_XPiBTa@@eK(M08vs2* zM>DJkIEGHeCKL$WJ0n9WHeUC|O%pU#L>B}#6(M<24bK^-RUNd$#Gc7HBY&`HJJcHk zp<)YJ_mQb@@(@nFEepVIN)Qv8y*LAdRZD)MW%*YPb+xF(2)5Z^+noWBtT~H%9e_|W z0GgYN^wJ9xe_L_w7m@%we99tg{rTP1RGTz&w$-((Edw#l{C|_Rk3pUUsg!)i zArtwH8_wc+x$_fb)Cnkm6e80Q*0<6losElUV0aZHjmwwZK08)Yq;}$M&SEzu=4#;o zW~v#@2uSY66)JK5>?6WQT>KHYkDPDm23^O>L5b7blhIz74-&S66oPkmw&;Mf@q_Yw z_!0VqsLTP5;xak=XMefuNGhOtei8zMhpCwkJB^~QS0L-=q83EoppWW~cTLwaWg>uF zsJ!XSapVo{XW1;^$w)(y@gxNmUE{^!S&FPXWL|m6?&%rURsdgT@9dExAR%ZyI6~wO zaHI~jz=~+``K(?jdo2x5L-Tg3gsN9gOSWP_l8bPVw8gqu9Df5df%^kBLchgcc|mcS zAPJWNSE@KxZOyBY1;X#HgX@Gm8+j*naIHNw+(o|!e$cY~f+^e>Hx)D`Xn%;z@~O%_ zb+;59W1SNcbozh4%; zO<4_@@zhBjuzyrMb3@^F>S?di+Z8u|jtrYLGJCBEEFYFReEbeKe*0r%4$!xLa=28n zt)3u(#86cJT+$gA1OH%l63O6T4Zx4z>;MMn++ws^Ub&?26}n5>YWL|a9!%==j>roG z>kS6QJERVv!E&Ph_jQ5>Hlj_StL)Gl)iK>w*mryCotwoI9lRQ%~ytlEH6vUdF-nOjOcj%r`&%4s#5%%h_B{X?O=; zgnZoUdO}1VmzB}NvWT@r1DStJdeb%FA3;hdUX0U0!8hAH3Z$M8qaFtjhjE}`l{^B? z??%J9f`6mG0S~mG6_Jbff4^f}(8rhii5;PUUSbn_ZDooZ!9O?C;Lj|Wa+Y@~x%aft z6?zJz=!+^)<)u8_1riS&;LQ$%*+ss^5VT^UvktS;-K6VZ4uiuUXU|W7!kSr9t!)bh z;TZy3%Dddw3-u!^Ws8KWvPo~U(M$CvNlQ)4+JCsiF@`JBqmLvV^@7z8`$ci*K1`q5 zNI92M*kd$MsF_ZM!-QL__NT&2OH4|P8NJ^8D!zhMq$3?u`JSLELo-X3FvW>UB;aPz z9eBW>G->DU<~wT=$skCaH2clFpI4=DcF9KUx>23IdsE*{SWMeSjpre;q> zZ3MDXuk5(jTm8n!GN`V#JUG8Am{++_XMamXKL>cHlK1G$C<{=Xce%bG6R~lur`9sg zAfU6pwTj-;v1q!cQU}Ph+#vgYdxgF$V#9hi=8U|g4N{90moFn2eR)(J-JVGI$VJUh zrkR{cA5WJ(yY_u2;amzF`lWN~0MG}>55q0-^Lj?7pbP~$27l^{ z0Kxp^vE^!=r*G6?N>h*!@c+i-+eYD?f`-iuLm}&?M1fqUTcT0;E*8kp-g)~#3#~C3 zX0t#&{01_ux~qMsxKFoiq{jcj+lT6vQ3)6UP|dNlZfA%Ta?Sj7vRCAc>UPV&moA3w znl1W4fDS8*4&hOd(^pT$r)Z6NDu4WNvQaFcF89S2KO1zL8g5FXid~AP0PwT4*>u28Egn zeZq@#r^Cn~LBI^4%PjU|$YrA6V{`g3Jvy$WgsU0pSh$ylp8biyW$iMJD}OESrUTL%5E$t!1~PJAb)6-t}?TlT0$x76ohzzW0g-TWgBB_@jF*ofICT<$K<( zznCrep(Yrds(dA8VB7q~O`9^vBbXK|*HKBd%x}&uCcZ%5(0p z#!Jy)Ll#Kl=V&y;+WAR1 zTU?fhGUt@nkBV}sjk={*0lE{qsu!4l^YNMo+dG2J<4Zq%Y=3`XaUWejGtAGrqj~4o z-!E@o6093K)FP$8#)pZ2fTuCh$qZtVas-v2-dnsafaCXxl}63!ISC4baTi zu_o2FGoqu1i&Z)e`t*EwL*NhD(BCfC^H-ddgHt4X8A5|<^$wkKl*a+rO}2BQ%;4E^ ze1mQ8Excia?4hvsZKB|MtzWx z_In_&D5MaGDR8Ez?v{Qtrswz=T|C+W2W4KjWC}bCie)#UK$s{v;B+u;#FP)z0UY z3q|VpRVeMdv}?rB+%#jC=@qY)6a_Ym=trUG?WOhoaPrT_w5AW?th?Pl=66tH8k?rR zQGYTIZ-@v$Edq`gwBEQj^2SjTr-0lqdm>`(HrJ{o1KEZU0$GtnDNlZ*a}6ME-)l;Wb-yM@Ta2Po*bUY8Yz;{#~Lr zRU*h$yW=U$MtK!zr7ne7sG4^fx40sq|32YyRe60}~_1N8_aswD5$&Ww*u;^RwW7512EVQMHq$>z?0o^LR}2FQLi; zZvA-}$8|>szOpNZ`@kxA%YRWnZ;OM+@kFihmnXlQg(fS}>U2z;kk}jtHU02wcpI>P z6>hhMjgaT7lhG(uJm)Ot*Mg)p#C-{%9HdRDy2R^m|2AGD1=l6n6g3cfLYc7NUp1|3 zX&s((FLc&Ow3LJ$U>Nno*NO1MJ$zJU;t6s2@X4N@9`x&N@CCJ5b$?`dk{5xKFYh&l z>bgWrvgq)vl|l#GbLjDE^$%k4e;K;|BJ8j{AAs50`tR-GUS0Py(Yd#%Xi1C;TXBQj zUSM{y$sE1ufH{Lr88>) z)!wpT#1!?ykzm-{fPWuV)!^b_>A{gk`J-c)8(!x*ms64?A}wcKA|?9fU;rQcR^)S| zs9XvP@FgZ`ii0VXn#$t8|P~=XJB8KD0|NY zgo)qrD)JyLuh7qL)%^p=--PkMI(R340-FrDNT)|kl4DukDIMI{F_Rm z#oCg4f5T2$&wnUPwxAyr?nM@IQN89{D39~$W#!W5Zny`j%3Ef#(qw8nDyaw#T0qcl z>E4`RC2&@bHQ1>NM#^oCuO&Lx$W%(8q<437Mz}E-$2ymv>h$U6cOB>ygauQ z5{dURUN+yNCZ|eTaoCn>>+r1)Fw-M-zBWLE-KD$}ki*aj?0h+8r>wjRzi7bd&UII; z=i@U7!hgu2Eq?Mv^3$9fd4{#*wgqXRP=fAR0HoXdn5uCcpPOgim--W3JMWHM7W_RQ zC>PY>s`vTnmb^bQJ}w6%8yRb(_g)Ot1yt^~W~)eF{$>U3BImUm!Z?Af#N|5y{m5OE z!!5#=xzWkRK42R-Oz|6$E-QC522Y7=-%vBZm4CjrhK`2WC-7Z7Sok~nv$aT2dW^Gl zL{^TVM(e1V`L0CzF2PQv!}4i|Oa~giZsReM)r6A0Wx|QnhUc;U$Vpw>ddB;#O4i?N z{nF=f;acZY$>3A^{cmYA~M9cjDw-CI6H+6fy9ZhBpDMAEL0ehTpfhS zMt@7-C+<6T7b^kb8QpS=Xo7eSg9Qb&s?{LI{1~pSaygrYjY*0h5S8tW=c-NI<1jf;&fKB(^_GhdOIgbXWC|$&~i8zs?!~r

&tr_7GT;|&w-P(jL5%a3+Tm4;?J0auvkxniM*l+RbO+-}9^;b`KT)a1Jmg|` zVQS`Vtd3K7K;mKY@KGKV9*}$t2h~V-^~>UXP@6bQix^W}olmISPusa%%!ky*UP2YOj;L^Pb5%vc+DX1w zTZQ?=LmyU&KuYfCA^u}j!@muOlYeQ6a$hwgvS?;$g8HkrbfP8>$xvDeeMr{=w%>GK z$3|YDNn6}^t|d;Hrc#R^ZOlEtRab4xb7fyZtqu2sfj|mxQ6?_!fcCQ1>Xs!tPTjf1{($}zAwUo|+;uYm{yV`{$*$eygViKdbpQG0r&(SN-9=6k&h zRMO<>XBsCWe-3L4NX)YYYK0iBM{%jd8GILphV7>QWHYEPf_yp#N@Or-TfF^LVJj_l zZiw$29q(%Lcdl}43@DvxHozHJ{OD1QyhwiT3B&?DwEp^9+dDGke|#Rr<64H9w#5(!Ni(v@WF)XWS6fES+mtz0PlLlcDz zj!w0NRCEB8rlaz=AqqG4^6_dHI-RvdPrN9As@OmjrC>iNRn#^Wo$ZF8M{A{ z zSH)~wc!W^}2ya&|p(Pl9BD>0~oBSKh06QrSgEuo2EQhAc*%2fjO9 z-2IyMet>|xyFYF%FVUS({6v2k9W910LlJgNj32_b2b$M5&q%7u2NqcR-w z0#SJAj8}2SVXqccFn`qHIv5xdeEVQOR;+-gJHMm1fEX_Ux7Xs#5`T^FpBPQ$zP^}3 zeTfO~ZQpu^%8dipH!A&>;x9tiS)_1nY$;FM2cthf%N3~f7TWAc72Ld}&aPFcne1>T z08k?DZ}EUJ_b!85Z)6!meUzx9?g-PCRA4C_ogHOYbmjBwU4MGen0R&yzn!(Mh{-IQ z1&tUjyC#?I;n;qK-FIL-$N;;vp4YwzryPw-=0Usk{_i`>6=LZtRZ{cOx z_0S&QAFSQ&^%4hCt2sI8uf3EmF^HT#elIpfK3hZv`^u2>hq!^X{x-IqgGz*cwms%R zxh`T~!k5%?dVfqTQa{)q&H$nz{l->(lnM0hMwRZg$|c#ZnQEdR^CwRf79zr(3Qq4#)-;yd39Bf>XfZr=&~ zT(T0byu1B7HMR}Q;^NNvD+k@ia*l^*(*Y6 z0H@EQpvyUh50nb2s_hS#B^g*OEMKRVXlsC8g~uxc6z4K85th8_6UVJ~V&BDb0dEWLtR7G}$g}#|aa4X4K%6Uxc3#C;mST@5 zP!SN z4EpQfQ+nV+mvCnkqOB&9I;KOjD=qN?RkrfS*MDZ0=fzAQ{~>Tl%_uCj5<&4b-VlHD zocq~?7jdRv96>N5J(Bg+f{4|j%yvUw%!@ZjQM#s|EvLq`X+{dAv&P}_J@KNF9Rp(n z5OXXALY^Mml4t<0vTYoFRXV7L!7o}}galnahY_a&QgOebfafP8YCS%2VKv_c$d z^MAh4<2Z00*yAHy_jAf@%KyD|uY=8257SQ|>#Y48_lp{%?02_1tXVU*iv+JR<%@fQ%_TTDpJ_usr*sV6H?X z&%sUnXmcZ8^yO&O*j?(i0yBts3RaOBwC+zV&2L&dFlklh%r~)^O&QgbIaI`0&k8j| zf;rcJ6}#JZBrk*u@KJ)Y9l5D%%5RGii$?k@B&E8vA8kj4wywHY5lB@{oSgk)CVyXq z3;yAineO?jL+^|;5$uLJM1(cC{8-Cd*{6$HnS|Iya`9Wnl_t+$9G}mT``&gD#hC{D z+g_I27a^UG3;=&68I3tF0}Bj>cJ@hFEmEoVjnWg^D2 zNq#i$^02pSax4$OLBR*lB4Jop?oA`+vdEz2_{ni;jfqKxaw{9DwuDL)1`QzV|Mj2} zKZz-ie!+A4%-+#=4pudzu z`SAf1aPjDMQ3M)$%WDaBz0sRbLHBGg`jRI*w>`bxA$*4ZCUVG*8PcE@N(D~Wa}bWM zT^HP_SR3sM+sRV`T85H`JZoclqQSl^YsY1iywa&N^Y$`?xD% zN8>&tuMFTgP#>(j0e28IfPYqzJMX2Q1=MA0%2Oj3MuKve#|e;s7dio9G%dzRN5cHY zjpy~Sn?`NiotwOM*NxdXI;6zLi}GSBYRg*SadgX@Mi;rYS`P_R+A!fUKYkn{=enZ3 zvrAgE`#)L&``GP@pZUVA%$lmdEkUA;CG3SZ;R{?Wk*%d?&dmB=hkwIO2CGo>h`dhA zEub$&@0m4D^Utae6~WUY0;oP44gY%GLH8h|(m^qMvc7inXYi?*gj-!rKwA_1uuwYM zB0DG8XKe-1-_OMX7hD5maZTTnvneowMHT&hHe6hw=1`E4%H=27Lpj$hFe5oOjh(Ep z8@B2h&wAd}V!9YBZhxv`Y@1=FqEpcrDkjpQ8Hoo*QhGGn-TD+u;i>|JqF80*XF1SWjBa!+ZrGC2E1BmGb zq1E@1``i#Z)k`F>@H3A^2;!?()$_8D9nMdA;IWA$VvRIGX?f`EtbsDs!BOF;3g(53 zSrp&OAS9UuV5J3Q4uT)9{O&|;s74h|1501#0GsVga(}hL&QkG#PboraH1CWwGX`8- zWQakz`Ch?(Q>tHltNbB-KMNbXP328BPl_SU)mqcp9E;eSjE_8nIOQG`Cpr(&DdB&aKGETqG@7-MdaroH|$VoEUOB48{w4M|p9f=3(6A23! z6o2Nsq_pDzK|sF0D%|tC5>qg}$lb-^A;?V5Uuka#%wEZf{H-6w>)JaZj4ytt;ir5+ zq0(s)WhvR>L&Eo+zzzPY@RA~0bc>;g+vD9X9kb@A9s+rb^tb^(nT8Lxyms3{?+6)V zg=Z}co@ZNM;oebvg{Z-LXv44%kk1?H100->PGWzE5ngLBkmJs}(r4Pn;h{B3ZEo21 zlpQ3T%Y@RYeC!8N%#>LRaE%Wv_ixGYzAxzHDZKYAl_7_XfK2XZlszwirg`!~g9w{lm8eWzd&dua(yX3?xzNx&VCffZ4a z$Ot$?`E1kn_Ul2LA{MH)hk%uDwUMH%$hlus5@NHgc-E4(G$M~IWE2Jyqjf$;D{ zI^h~w^}{z?u<3IJpK)ywe;B3J>84r<{h5Esd8-&vW~u)~A$XpeK$QbjY0%^Qo_aWy zK!%ZF-rgE6w3nkM-%Yf%GwNEDb1P-{?T- zjU}9Ly{`nmLQM=s@iqAVt13^J`$gHGnYE3(p`Q`;cI_P z+;N?ZH<;IIvnGjrPi+w$Z1Z`Eqvm^sjRZDAI;C2bg5=5wwkEuStIup^MD6na3fLUs zM)w2AFsg-3sP%XINVurTs8S*_r^=hH+_4bCcX6?ay8yX3I1z@9B&?TlCKbzzTxyhA zA-Yc!M{-iM5|Fq>gVN#RsP6sKKka|*TprB|y99}QArH+Lz-4gsS7h-`NPAv7t;nb^ zf?fAZ?mXM&z-AQfiFAlu>v}e(#2$rQMI(RAjMRDGr_op{l?+n_&=bs3?30~9x%%^d zYxR5r*VYYOso`P*3n(TIY80JHgi_6~ZtRFeka%%%LSdd_nOxlaubxyepF4j#d5kF+ z*Ris`ZxsQr@Q4lP6LM-Dqv_#VP~|K16uO$X0Q}2CcTgPNWKuHkI_}IFmTPD26>S1skNN#5*kW^#BtCov=Swvp?P<$| zG^;Lz{`k6XosD_kdMiPu{BVB&_m&`od+*(wd+(3y)d%mFJu)B&GIdO85M+uxl)pL| zhOaa_2fzVP{meXDmUlMl%_kkC3YZua#yg zP8i4RQT4hwf-~qi1*+#L7vP7h)WUXQOPN?`V6ToW!gxodR8-Mhgz$e*L8eDE%Xk|r zabW`Sqi5qS_YIP9|BJWZ0y&XuLc8quXKS#Rre54m2_1LK(i*4MB(ifhT7zo1n8R6@ zo0LrGK+~1jsPjzunY> zuOp`dT(Mf%qFXT+**$+{?Hc&x{t>xjdu~D&Gt(a&u4bayM~sI^C*gqhZ@?`f;O;d~iM$5>T)aS8o8GHOHBq^Dy zCr!Tw8*4M+ve20}nuKjvh8RCS`vnl3)CfwGqx9K{D86?svps(zh6D7v%Zlu!4<0#= zT+c1)*g=z-$gIOf;0vR|RE%%*somzHLKA4?$o#l?&^Hkgx3Gk5IsGU>^bmAuL?x<<3Ow3A8vorksU%;w-&S z(S7k>_oxUhZ9=^iw`$U}0^^<1TY@c8EYMLzfUAN=)N+6LP6_$=B3u-eT{=S+_b%ZaUj0v5d3<2{zXYJS|Gau5r}fj zuw<78(N2Fd3GgsB^eN+wX&34f7$Xp87ctHy^+(Ow^UbCrTl==Wp#z_@ zUZ68?$q~{qQIOo_LeemCihc8>a#>5KjXx74Vm8L~m7IScJ0djw44u5~^pnpcX1{Mf zi^fLho#06Qe!%_sb7r?cHYydO30h8Sh-*cnY9}C<-B3a5ZXGSIE6wcK)avf~02ALZ zR<3{b?t*>Xmli8r9a6=`0Niqm6x$(UafZlEjq1(!04&?J(U`!461tD~+hV)tSTvUPzm zGB=rSNJ;yZ7naNqiTJG7HH#A?F5?M#0m*-N@B|W3c58B)ZzH(x7-|z`lPc|zhy1@Wri#vMq)bolrVg1mZb}>4 zKCy}qM7)Bd-kePr#h8&D78VVOq=i##0?7NNSf2-}=54P+vayXZrZITAF=)rbEOWX$$3d~do+SA)Uj=y z0#?}D4iksCco{YjSn+dcjxYPZ()$d#3HFhjV#-tq!X15H4OZRaju62^AzV5LQ?!C6 zFI%a6laJUYJpy3_;`PX?LJn4R^YMRzgO_Z^x*UsGCG;`wXia1^b!Zw_yC5?Sh?U!q zAQL&@Fpe}4lqYFc9F*XKW_dPM#2d;u7nt9#bC>(OYrF4z{~T7gUX$ZZ3o&hD-qQE{7DRb?KpuxaTJW=&rqVh5N4 zJ`2PI2r(HD_yf!bAukeN7w$9JAf?ZLpe(pe04x3y<1iG`qWFiOi)k?I7E9rWVVPvg zA3*SaYN~_8r|BEiW%2}0hV6eH6?#d`6Jz+LR~Ny)L1@Q7n7c5@RS&dal4Nkih5}sp z8Do0}aX?CiQOffsyPJz>tO8a5UNNgFK4N~9+Ka!OtRp0ah=<8;DQns74;>%4+o6m7 zP5nX>xZtk9;<;RW>Qf4Xv6flnKz)>QY`?b*kD{{JC;Il^O!1*J5wd?Nu?&WG&oYdk z>09lA2l%R}gM|b8G%gn)Ey4V27nt7Z2WbhWR|*{^0e>@1?@_2Wu&!M@SM0|d|-^j13X`4F2Zq3@c*o(OtsMJ3|_~J3sISyus#B%h@1vk|Cm=2kK=Q$#{QnGm(QX6bN=3!kC8w@5@sB z$)U%7!A?2GBIPy`Eg9jm2Ah1fM>?xZ<|sHS^kT&9g!4J`7aevxh(rsQ*JK5Z4xeJr zx$+3@yp<){v7XsLj*WmVy064JR@NWTZF~GGL6=LA7q0D@nZA!o6`1^LXa4-{f^pw! zb8I||${Bx%gxfg<(m;n<+K%yuSd$?RlGxJ8M?ABEso!fODdyx;bsrjkQ&J7^sesoC zs<)l@sKFgE8HN`6fe*wF7M|Y=++!j_F8vTe%5AQ)!j;hUtC*o1v*J*22nDr^){M#V z=vh$Dihd;E%7zP!H;{i7`|RaKVuW!d0h_DvPgH-kiz1cP?)xnqP1Bh&>SxI|##*yz zfGO;jfbWUG$9hlUZNu>Sr~R{0$({}m_XH5+mOaqTd0^=&HYez}Orws9-6fqMpJ|yH zEC(c}Lj`Ydh{lK*T7+cCD_gQOBute84FDRU^o1QGE}s56m-%hM0v%*!&aUg^T0^s3 zr8a*eg}ipdg#$H#gEQYO0Iq6H>^>7cww=awvcNzVF-bNyu!|PGR%G>ZvE_L7;{rGv zMBxX`>U|~dQb+s&h>?cDKP;7UmsinYh(?FeAUCFyJHI9cbJrZ*~sUIdS-E+O+qsg$CZ|BGv$AIo*N+#MEfS;V)p4bom@55fHgKg%mfp` zS^naWcyzAUa|6uDQ-q1H(|%r<5mP*9Q|Y$ELzhTB!X{*geVnjK8w{t0h>)P7V88N3 z9?PTAbO*&lh2MI5Vw9{P9NZiI3)?SMANZV=vV1kjZa4CLkJJgdbR6YrkY95T2>X9! zkfJ(k@nnpOvlR8P!XY&C)U~|knE(hab|J5f%4qNnkgX_pa1>qSw2X+ z`mdDlJXRfZw@}PqNt+(}0w0j=mM20;m0-&wv`03A?(=*qAzOSZ_zfT9*`T*NnpMET z(W?^uVe3mhc8%5M3(|fp=;-CL1%`q^Z1h+V)fDT=)Q}#?u!R&+rUoC?$HpHeIgVf-Veg`z4}3&&+(*A zAcn~^w&SHUYls{%-#Xumw!h!G+5R{1S`Y-uPTWjLY7zU zo5Iid6i+Ww3u-50uL{)m;$ro8o3$5c=`<*1`ed@F5hPyNUq zJIFf0#1|co=~tBD^|nCa2)T&~;yq%vcA29ZAoD8tq z#Ga*_D2+4DBK)9$3GvLyCefuNx87j8xG`B+qjp*%u|?ohYH~}K4m5xLc&(_wE{gQo z@6-B?_mzzRi1?+tQhif9Q5li8To^;{wX`7JNyOt|+EgD$HpBxux5+?idsz3wl8bmJ zRJ(EF2uui&Gq`X1E9mCadhnGFrhZZQvz{dWrWcd-@%yWq~90 z#g^u|!dFC<&3S7wmY#oyBt&Pgb+@np^k?eVU5%-&-qI?&;ZONhvHn|Dja4Lb)MjaC z$W>chy%eY21OS*%79W}d4S4CaiVqjl0$-y-5ZP3U|D3edg z2neozv50!^Ihu`16pYBj#j)Sb|Ac|S=*woMG?zHrNZZ`edVYTyK952=q^f#8+qame z1zL@#oV?7ZYH;yZZo)v8>EF zG9jbZZ*!@>an__kR?|D&;4Ym;SeE*kiMv4BOH){IxVFx%3Rf$mMWE@=(e;{mVr4Ee zpFv0ed}$N$e|vv0V4A;9ci4YG3Zy!fR^X$Gk0`WoRihbirw{;62nM2=ds<#9=ehl} zck?B_=SDGBQ=sH!8PlAhMs-5D{5*+N3l}JLhP)fL5$9#ft0fxQ$Ae-B%X+*P znj}Dsrt}S z2p`BJxW=EUg4{8%?3_d)6o3Zvev@u%%Y#ikHqF8c=<{2>ANXB2KQ{2@Vy84AfcS?`YY^pznUMkKXfw3h@5UTe$EmBGgR z4HrrAztn#mi(oj&RXT=4$lA#ZQyQXpL$?yp+ki~#szkReN%XOr@7mR!iXh92mkc8o ziHXtT6xmPI?}n1luJ8NZ$qWYZt0vN}(;9H^cUgC9JZxyG0yBwYEh?UDxS2A_XZVlU zm>8i+k+kT1_GT<<(jSCk{HCA5W%8IvxsHa_oG5y(4;Nfk3J}X z7`&Q3!9+x$J8PEJag(XcrrGpD%`14}<5?>u99mVrAa|`hq1&}@S#xoist?n(kt9z; z{#>wtM?yXhfM`XB?N@@F)%>YZJ_uN;jl=I_J*%dBoX|_wBKXYi^kPj&g>CXp`8E(3 zWj}vANkad?a&T6etAd2k{|I7KFwPCXIOG^UoP{>+F9^T=bHh2`DOlAvn zcf(C{7rK!&`HM`-0Os+J)n&MJp1RYkyXUbwRUA$6wHT5sSe)Z8b^!o~BsqxbpzD~u z@NXf=qJVpt{6T|M3MlHZbO&&gW=J2zSt@^3T&%hAA-+$~?sh*173oQYUh#Y$Cn?NW zjsYk|n-QHC2d#bR^y(Au*AlYScEV8g+b$6z1H$L=n&PD$r?|PtC)niPB5n{@L^KB? z8`=aS{KI?zKMM)|`FZv9a{xy2(ZU^yB~K=BFNfk+P_P_(T#o1y7k+M%dLO6 zn?h_1H<$&8N6Al;=h?N*rIjy7dw%bk;>C7i!N1eLBNjIW9>ozdfqd9hY+KcpPI20- z9Pg+&D1Me9vE-P%CnM;{_|cOi!}d$SBo8;wsQSTo6yGYe#-7{&U4AvS)^ZpdA3g0I zjw$lR&WSa$#2S!1xTLf6-RITQ1TlYs21cjF)osS>nS74GW=vU$R}w9J?85p{DWg_a zHI8_S(duew036mQ3XnBNVv$ESs|uGXHD`-wSdMPt>SsY|bB_R}5c#vfo;t)xhJ>&? zE}7!U$EER9lWApx_ZAV_e0Is~SS2yrF*8gg9PD^bSQyzJ0IOSgqq}dQv9<0bPDA2;tE^tBh<0 zzDV)7&mc}}q)UA%>df|ag@k`6v=E;t;habgROGfC=+l^OF%U?ZGXHIcVVJ+do=E`b zejMQGj)#~G0uVgsmfnv?NJZepu5wvfjE}uUA)5No$TN-jn_ zy$<#?@B;v#?r;dKt1fanzo*$DM@@`cSfM>MS+na^4JJ2uR+L0or}2M<#!E-i_zl7I zWxbaCj0_AJ5lCTZ^$85w+sX{ApQD)(Ia%KvR#87@oTkl~K4}w#I4Sb-kicIu&URK< zH$<;nKHqus1Iw@xgEk6WVKW?3f$E0dYLT?|^LNXPm6DwP3ah6iRBfECuRy(-C|0=& zzfuzAZz#6<)xKfneD8nEnSG?~>_!T%L>2?%2nxw<@ig}n$oTHXwwV*qi`vl}zMk9( z_L{wc)Sx=tC2G-)o6!UVpgL^3xG)*wbh!t0eV0iuOwR3XD|4MvLtq5ZHl%=r_& zFIp5U%y*)V$(#cl-NL#9x0ox-(K>`o{(iq!7y+OPboi<4U#$20GjKHiO0^*qM3p%o zQ`(!M%B5Dk5_1s7%KV$XsB1 zYT2eDaIy^C+EROcGU+rLtsvK=Djxb>9&2B4_8_uCq$Yozz=>Z9gWQX4BKyPIHKxg0 zJgwm=%aF*VAB}fxtXF4kW}(WUvPe!#%DAnnLa)lHf`aslg_vU!`0%9nP|Ny3)ZWOZ zake^uUfcW8lb`7RI0C?%fQwG{RMPou^&+n=vG(RwHn7}$K(t*2Y*W*dTb1jwHgh5f z(-re_0>OWK-`Y!=U9qk<`|#aI*do7c>&_6uF8nukF1?B?7ybCuo_bU!f{YCqcIl~Y8^X>w1o!h{StuPvmeqcokHpB2(LG56KJM6M19IP^O#5p$32ugJD+rXd~ z`Za&?=HQ)bDrRko`N9D7LQAotYrsAigX+{v&T9wblY!gvw-@fXmOJGvjg=E74s|03C^7?kYaanLGF~B=}MXK-Bt?B4gC*j#27WGmlj465OyH}DFW4Wr_XUz6#Ezx@mi+g|K zq6pLDCdjW|SuNRGeoR%1Ks%Xpt@vV=2sQG_`3CH3x|z`;hUjP4VH@qTlXB*QHGB*h z&cATLYxZ}`h7Z^7%VBi;Xqr|3-Re00WW8-?A-+Es)q`-=uHz*?`QgZ^{OChM6Yv$s z6=TV|Tt7qLqZ{v&Gkt);g%L#R+6{j)eaby{4~P9GifXuo?nfd zB;MWA#Rl(xU|@bBEYZzk=%b@sGH6Y z=I2{uA&`>i*T9#O0hKo_eWu|kTv4c*ZPErbJ-nfG%|IO04r|!VD%jL1 z`8MaHYI7>dh&V=|OrDh>S`aMH{dNi!u>_aSS=MgmBBp-4r9^PsGGKJpN>?$7`Jy)G zq?rr=dq+MJ?sYn(A|#~Lcw>KQa00BgZfq)RdeJ0l-s*m=wg4Rgp-5yHZ3Jl`&99Kv zhXVPRat=j2zH4tq*N6qx^Tt&Q2fRuw?D(afedk+1Raf{?I^`3$h1y=q8*OF>LbB^iCgq7G9uC*L<%G1wXsUlRS+~XC?Vg0f zIEzM^biCPJW8NGBEssa}4UYj;m6 z_0DyjmBl$SnGpf9C_#L(HU>GDG`sGppQ9>M=pg!f+)HxnTkbuDKds5EwZ3f#C)a{G zl-+x!XDDE@%lvuZP2H8O<=pGZFwy$%D;k#=)97k7m)G7nZCHQSFSnMo5Y0^Yo-^cI zZqofRmfAdhGKX>3;m#$zum3Q+1_$l zF)kzU9B3U|=N#$XE>)FK$+wvpl!!_l-a1vaTf?Z0^#XsAqt}&FcYPdf%>hJ6aUa|r zRaEnV$W4O1C1tnlC@Z;o>RysIL3W+Y^#i*CJlOSdp@ z-+%o3z0@`G@BjMGb07bs%zoK_)ywx`74TkFbwuj6Hpn3 zahU(7uy%j&QZK`>&_80L^4Hh63#;jo`AcGkVPr307>45;hGAfzeDwEA&=mTUtm}V3 zs=vx$s*WYysgTMa!?4KuhqNCI!(c|zkz|FGZGrxy-|#2T{WJ6rlyOqz!PCEH;4q9t zY7E0W#5xX0{#(*W92kau?LSFA`KyumRe2_Amo$G87)Eyf0g>-NP)Bg@#?B-d`ctZy z|7Ioqslo%leNaC2$Pu$;81^>)`TX*ykXFZz z`B2B%Xj!;vt)(k1IB~L>>W&YUEML%l#1%?ot`WOo1`z~nFXjSFj&@%0+>XP)Jzpaf z1#+-!r!1;%8h?#8wG^MSKH%UL5%R}+^%3rbBr$13xtVbsP)j#*O3+h<%rKjezg~YP z$@AaJ=$3eYkI&SM!4%z#BCp}_R}^!Nhk2|j3@yWXyz1TsDwG*d!WB+7D4s{UNwG9{ zvu$qJC}lv`;j%MLfZHN7A%?z{__G6})$b(Tr?D_zcM<`!&V*x3x(Xu5ko*}1oD(w% z=`f1UdFTeUjgZ*MlQ-}p6bh{q0qlP<-!P}^1tr=Hm(eob53as|01)Y(p!V06-LD*v zFgxL53yXcshrI<6Wi9GlbobDsRv(T%!edI76Zf}`)a7nQEk&kt0f58eSO4;1&}tby z+>f96Glq_@LrOKQjs-z1fs6!*gZq^9tiqqcyY(OJ1NsTNMi4-`&m9 z!=X0NAvlQN2@@kggT$K9bH3;R2DZL1H3l>5jOeaeT0kQ z@R#=8(YQ1YjdaePMx^}&>Q%zg>8}eKwu^4nftp^VFQt^w;vgTV{aGH7pfQL~_9`F1 zB=(Z8c21*(fZ|5TrG97>MP+}7CHX!g1g&xG0dM&4hWG)`s}~KZ1GO3t5-OXNCjJC- zC!j0gSz(kOE;z(NQogY88pV8Fk$%q36h!?K*OCx)hE59>glLskt^Q(z!+2DiKHC%+ z2#fVB;q7;Zrpp=Eb{h|q?5f}YqMxLpMjg9OMlv7zQNBOYP!){cR}_EBk({S=ea8vK z>eZcdT#>x0Qsk;Xj#rLn>ad2jWS)waUnY8Hk075Z@4VoH8jV{Pr;aRgw5X*{v*Isp zY!T$G(%{aIQZZ-y{5|xJgiusiGjMdcp$&U_26&!dFE4I|bVc*>%qvG`*0ucb38f*v zj+t8|X-C)N zT7(IzSnD9)Mt$?e$dZ7?j(U`X@rg1#q0gOgFr)*QD_4IbFpI~PKdvsS4n33m=ls!N z)VY=%UC^DOKjv|i@W#5?COS_3@#pL=@!UBF`Y)NO@UasJju&2S$bmw7QR$cI6S9?sXWebH3s z0Z5kan2tACxmULU)?F#BTK(V$I}!mEXPW@_nrDBYA4710InRljUr4I2p6yn!Yi@0X z^4!F}0E%x*G>pJ}67`|vnHC>to>oQcVRutz>VK9IIimhDppUNHluGJ$J)ifi!9hRG zM|`>+RE)5v6UPB$q_V6gM3u$(+fE)4Y_v?$o?Gck)dco55DqaRr&h?l$u;2Ss_Xqj z6lQ<6l$D`kcs76_ zmNV82v$z`pvt<&okLB$LWHf-rgkgW2oRC0cRdr0^bUeoCS-^eM=s^0p$h1oiHl*>Q z|GA2YONtD8~3d=A08Iawzq zu16@JlCDa!6hOPM>%yX58sNyQoATfxz>bnVG#`dkrMU?VCa~5gPz+=LQY84zwv#y% zAqwybBOqSc{f6hn9i8)m$0AW0Nf;ZU4@g#)oP*k_^1y0j0)eDVDC&RYCuE5Hw)a)N zSSLm&h*Wvy*heylnQ&n7~=cE>G{Z$kLYd8 zTHxQz40EME&iNkD-td3L5XZegIO+%0jcLO@(ze)pRpgi{;Zl#O=lr3D^YTH=S>vZSYp_>|;tbC7;i)!s_vD z?|PQ*KQq-*rHF~boy5I|(!3)u407M*kI_+D`tbW7bBNZ{Zx%>%Ih52c5%3AW(X!K z>Nt$2QLr(pX5Mxj**r_mc5kRbdS;v<2)wcSt)rNTU4E2yAF)|jD2>)s`e42pqs`Vb zjPUl+?md6KYriB!0vnOQU<#Hujrf?c*w;v|C2lO-p<@h>`z{Eq(R6WEv!q!hU=ixw zd7RO|vHiWkmP&z?D?LKI3U|+E6oxwyWqY{58~~cIzv_>|bdNGjzR&wsyVGoFeOTR* zISTz4+Ac?3f3WbT`ABh^>R8rw{tY0w8PFWO60CnPqt4#+KK|X!TwCQGTE*okNxrKQ z0aWlu#OWvt!xB@(b6CR}B}2D3uz{p7rJgU1=~2XJT_Pw{h(eR%oFx@1N%%ey9h==_ zF42H_ZJ#Uot3%!5kL`snY`h>w{YC0=6)BU&JF?heYFH58KWc#+U$XcIEN-*(+xc{u z6}W#lw>GeJIPhbQCk~Abroh28_e#dG#&k1+%Q}R%Y2MUS3 zRAKouf$OgB{H0BR55U=#07NODka6Vbe?x-;Vn|S8TG@R~ez-%369`@$h>{j~FrIAA z-+*|iUKhB&<9%|@V7kzdeaTe3ltlF-pC5nJ+0e|;bfHryP$#px9j$`;F|9qrh`7{} zr`Ne#7`7@s3#JFr&m=(Q{S~agwBO0uGyEF6p$$S{SXhy`XMnB#X_jb~m*EY{M+H3? zTbNqRJ9zjQ<(SKB#^id41Y7mI9T`8%yLdqWn76qYE}PO<{(U`_9#B0YBA0t&O4xs_ z;c#ut&4N*-_AiJE9wkHQu}4kX2x7e2V4h0~;r)>o?wva$0#AyRWYC7n3Zm#wwpnu! zoad9VfFm~1q{q~Uc7cS^5Ok_{16p}$-2%j2hYV87H6k+>8PiRj`Sv9j!qn*V1CSdR z5YZjqM4l?uV7|(NtBFOOPvvSI6xM&k=+ju~GIW)=3FOchDBu8@aXn~u@e3iaMQqL3 z<0GC^Py2J|Y_lu6x;88?IyQ4@+`b$VO01%XPr5PniVr7Ep+VXWjo0guY8ns#ylI7T z;^SIe$F^TJ%d34oCtHR|zl@aaV82CL&KVqa6F~fn-g!hjkxML9kY1Nr-sgXr9*>i3 z3dGEjD8$~Sdn6C?8%RzDYTQGP5Mm_#3*)#I6+0#*JI;Csx5NYk|6x+637t2(hbqmsrjX_%C3mVvSI7?<1EWwZ%MY*Z64HuWyLXDrb z?WZXxWglEPdg0B(LCHNBE+>EbiLix1dd9Fc<4L&P=|DYcmRUMU7$z-akrNQF(~UBS ziZOL91l6A#HG?<|WqUphW0zk0IYH)i!b!;G%8vx)lqH6x&AQ^c=1Frt0{Lc$r=FoqtJfyJjhpbmN zqwL9F%M61dX8oOEzJwCdSi5PmyA-~^rMZ>J%E&X_1-jCW@M@2^v8CkBIQcGs zmxod0jI8uPajjPQZnenD+fHl?-R>4f(Y{IarTb5+Q&I6XJd4bxEVv&@Q*!Fy;gneY zYwTb8rGG}HTUA#Iu=;<6-2t*c2bg>zqfZ17`9uWP$DbMq>mdt3us=k3ksb)#3CTT^ zeZL?Y8$46^sDJlW*Sv0Fj@WbfMr3J`I*<2VLj@87Ykl@|3n)3MdUZVO1;x~XxbJvR zWuMz6RF3Ivx5PZLqf6B1!jP{({GOg%EJn}V0pBH9AH63I#1Vg3Bl?4{YN@NC(Z;lE zoiIV<{)n_YPD@gK&tQz}DZIf|fbEZ~4FqptF!=-*kPfy=3`)q;LYUH#k?IT=AHkoM zh-nW$@1YDi9!Y})3TFbvZyGZ<_B~;kTZVpuLg^{SO5NAEJR8Z}`8=GEzT9i?U>!5< zN8g56v{Or|qnLkgz_s;kH<*YYa+RUNi9vs6s>letLWp9zkgZazQM@hNcu-Q@mK}K$ zQQcH%p+5?$At=<(dfw5^zVEIT5t$@ILE8>j_lAATORcuk1x7mby~%D|jd&)nfz6RG z5Ua!GLcc2Fe&wZ;;rpCA#y)sAGi6IzZbKsNi7Jp3>mGmDF{+0vARu@`wOLCFry(O1 z(0@W7RFy58wCb)ZEIX0!U$Q#4NMf@M*Radsl)(a}?ln1RZam(2E@i^-k{*_IC6*rA zFpkI=8;N4hqXbN`j2zx-{q4vN+Hp%fw`_&s zx@l%_sV{%e;<<=`0+*0}vGP-B-~iJu6}K2)zr&OYc?_B|qZ3=ZlA7bhLKdzpe5LKh z5(2(5$(mQ@W-a6K)Uo2Winc1?#(_}MKwr!r;PsK}P0&4hzBY#ObgZU*WLp&b<4;)n zb=%WP1r%j~w4+sa$qWJq$`OZ`qIY(NIP0o(b^?D;$Iq$u)$BEw*(L4g2j_=W>fJs8 z{qWu_i;+@KIq)PiErQ2QV;p!!T55#_fDa(>|59`oNfrV@5IqnJ96n3j-Pz&p&gm!S z2EFNy>a5Is#OG+Sd%D)dOg}~I#n8>1>)K;aiJO{Vu)=K+kpiekOK` z2j-h!>oko#{}E(%RMeKF?EC0W@6;FJaBhDQy0(Cb(f@gyL0KUK70jRU5e6PtPuhji zNPy4riZY#>aAmh)$*%U#RJb?ot_W))$Yf(7o7c&*!dQqi*e~w%wE$l1a${odfn^wx z#8YwJH*8|sNI*oKytEZsEazTmbgnq1JAobey#uK-*l)`9*0J?jcnO{zZrrzWz6F2( z(E;Fzkaa;jq*rSJh)G;w)Vwg8uQ_Vts7=4MTLfYH_O>DSAJ(KQUH@X9c6nD3)reLz zgKCRw7TDNwr2JC8i~g(#^gUETEm8QH>sGvXLC^eI1XmG(Hs}WR`c+kMr@#i#gQyl1 zjfhRgwJw6YBbJ=4g8$I;rdIE5P6>aVW^_2OEB%6*Y>lTc9Av-vod}jEFZ|~fltfS* zGxbCWsm5}KiGe=0J}lW%xcCrPy2q!EurZ$sd}u?IgG2D*U@0$UW*)Rghm1Z~ql%7L z1PH8E?_H$WR_KMpIs{V5D`!fWl=9OQoNR#EIq0!RUCu0vM-5bwIisf_^LT%>63doo zq8r0SoQQHhbkTMccLxl5J?o0Gum|fkciR#IVKd9)9K^AT0`2j_+*;lit{z>qJoImN3;ZDaO;PGcVe}@jJN;NI;Bwm#AZg z`*^fTM_)uN)xrC)XCUWgTL8#*O;lk%@2|(UBPbY z`C`~b8FSmwO-Ran3orXgR?!bbjlF5Y_+KLgmSofn)9)9a5LI~Y1eJf#bCPLSDD*?S zYLlOp73yN9lJFhB0$6`=xlvxF#0?+C2VUHx!#Csn(Y92J#(61}4kV5~qpe@_iHCWe zcG_lpH!T1Sd9>TyzaJ=%J?mLhz`GdgQh>je`Ny){FV>V@+9NYke23cmR(Byk1N(T? z7^(T@0mNk&cdnjot@M9BdNflCAhfdP2sHCCg!+f&WD^Ez=9<6W*z2g71K7UPUg5L! zOyP8Nj=J$-V2gZWFrA2D^($aPHw@`FI+=mgv2H2}pK`soxcmLd(CpZcmsZAS7x|v7 zAE2%a{^yL!1Hl8;?j`=}6sQ$Ea|zdXT=*r~U}9C&Gv5+Y+t7ay1BDMl$%OHAEvky_ zclT9O+p5T#&@q6|+xE6mtn~^h(ygR*>vDd}+&p^VPZ(4&P?_VcFPpQ4Ru4^O%%_=! zSpE4&U2>amEin)2$0-wu&^4{+ljyeH3d z7~L!PjaN@f*Br+DUX(Ks{~lnJR$++r8wzLl%<`C zZ)Cun83oKT1@`TY)7xh5>`4sU!#0)|Io-%aRF|RxCU}1&gp+;3v-L3mSa?rkiga2A zpE$$t{BG8HS9Cio4f8tP~pLQ7)r1qUI2xq$K}0> z!M5vFvY>y_m59q6{x6Xtsm8p^lfPs?l59ivQ4KX>UTs>H%688kW#Tc1^4qhmhm)oD z$ug79Q?_fC*EiYoaw01WAMrCdIdt)qy*b-3R}@VWUw_aiIEHdd-%hA+VotGXqA=&d zaAL?|>%#bc{g%6JtZf1H0|9<+fvwGBG^avRN$Y={araVr`1wcskU}eTu_nRd6AbTV z%3#@3V(6{h-c>vdl{Ry8J;ka~(<1mTjh&Y~{a23$Ro9g>JCbA6)PVzKuG6&5g&D!E zIY{%h2~40At`j<(BHH^X5eJD8VYQW{c&tVini;tAwEW5wkD0NMFI#Z;XWas5|8iY>76}e;*0N+Kx zNM)PxVNxWO0XwJ_D13&+>x@VS0JtVWt(AY(_zzZcU8ltA2vCiiXWbDpi;4Rml@GXT zD|1&VAxm__>w|f?42Fm%A;G20>7U$WR-O3Oz1P6$$#s>8VZn5fvykVdhP?t%U|Z3G zWp5{rBu#i(jK2t#t}@=IG#D$bM96zZF`#^(EGvs^Np0C?&U-dkGd$(rDFPKIDeiv{ z;+ImuQpkUeaCwu{8AV&8qm{3AKFJ#iCW93EF-D=o^i9ibTAoiHQ|PyBi8Ng^LQlAt zP>}}XQdQM*u(~P}e|~op=Q>VxA8c6|zKi|n`S8E4GqFzbVlrA_gH79sHN4IkohVk` zEqMNqxk7+JMDY#$9Hv6(i%t+?;-r7bjr3Tm`Fj|D-p}an$;#5Vm$e5zG<^>DJ&HpD z^w-kE$$|qWZ(pG+ra4QzPXy`>`>H2YWR5psef+v6VhK!nL{ZEG$jZ3&rxxw0U=6?R zkJ%x}k}yl6!sjZWSB+ZJkH60_iRw|P7eG@78MAOtidjFoZGFpq;}$(b)T4g@T)+Ct zRIZwUIoIU90yomzY3yxJ@G7aaI4=|$r>d};hr4@@Zb5zb_*aaukV1JE;MD9J?0-7Z5PPw3Sf z*|@35U@lArtkkJ(se#nj=y_lYBPR^_IfgqpSg>vEyU}aH0H16SWq2GK*H@W-_FbVk zil>4bV1(6l6}@hAk+*YCoW&^yh}P+MPM6t9x9l7a2;?eahvI@YuwZUSl;Q&9(sYDk zTLci@R!xEj$s!fKLg#-}EPIaAe5Cdfl5qW&=ZY+;vE8v7jj$kjqsEM=+FkGt9M(n` z(*#h*mwIH$78D-S%Wr@oxV7qV5RVy`QR3D<`-nl(SIH{cw?h(HT+Yqpgbm)X@ zs$2^StV${n6b}X5smv>Hz6h>y3270V`QEWuoD^3?mbW8+a+H5-m;QvividisF+a4{ z-~uf|K!(#%g#9Hxfv=rEJPgnct`)`B+))m8Rra9NP;ijzG#CK$td5xn<2kWaPAnM9 zn_Xg7##r5#HCiI?=<;a>D>|!@{X)tpGznyuY!XL~YL$J_HyyYlEXEZRpED z+vsWz>*&UlxDCLPb-$3J)Ahmr879;zm=7HjOe(L4zqB;2fL+(;q5;ezVlR99=_jau) z1W~RITHytw9xoG5bXGKWWmx@KP`b$l?g!R>OVGNj=i1z(q7o-Z+a~!y2f2|Zj!}na z)v_|@+fT{0qJso~W^90YDS5nZ+`YNwYlxssw3cr}N3pu4@Q8>-IpB5XoU_>$Z>TSo zu=F4phoX0#5b4UK#`M&1ie$Xin&KdhCy=q9$qY;t1F}i%F6!w0bWo~7C$A#DcGIW( zbttOo$!d#gg1>AV)MFAF=J{2yLgNw`iD9qPtqb9euN!fHmT;{}6U>P+ZfDw(eWvC4 zB2q5#Msbae12sOWI9lQ}Q;A36*O$E+^%X@j&m@Ym__}0u~b4{1XKH1-H-e zvR3A}i3a(&?e}(3OtF$qXW0bJ_sO-hA<#6oo) zdwPXv5dqEk%?wNN4v=9=hE-{+*uTqGpfpN;Cgs3?wNM3(ak6>JlgqMZg5twYOGGBL z@V%IRwSq$tmT>?L93D2Ol-IodI4c+KLP+Y!i2t1#m-l6y;=~URe_;C+FtdZz|J8^^g!np+xjZ54MP?2= z#35*ZQ_xTqGbu0iY>sVYg5C&1x(Kd%_<5*%Ty@wlfbAvpug+~q`kt{i3YM5@47%rd zzr{SdBXIW@)m2ppSam<6o0{!$wkvTGCNLA~b?&Xn4L%1L4FcDiDsAtrCXj&IRks}MqSsCmiU;0n`!{A5l|Ghs{jX|-s8+rt7)Pjm?yy3W$@ zqtt9SUywda{aPr0CwEL2X`+tKhDWPLZA5-6_5;fGtSb6?h6$N4Bc(AC0xz%E7^15~ z{Kkk4Q3**{bh~lrezfzm#UtwXMJ4bxXP>n6ECRvr*O`8ORCW;d=!3lWI(k$Ek@#Z;in)_ULeu!k4A>Z9G@!ztz1G?&-zs+HD}Q7@{H z^Y&wzEbu{<2=Bmv<;DA*wA@-HofKhOjNsRTW$$`j$wI-9v1T z=2CtfBycywSwlxKkHYEx1mr#V)-`76htrthi0X2?et|iSC6&qC$GQ*cT%7}eJ*?O0 zZ7CJkxqSl`ZksX5bSAGDvym-QwjKq5yed=gmLWS4r-zK)By9*OvhktrMnb=T&oO={ zJ0*;Kw#{4}2!5qPqg=e$@LgQd{ky95&!;Ttyu`uPdI})u1sUYs9Y{&^zj3(rxA5W@ zS5mdLl7@^RDd_XXs+K#^zP|krwAMIp8-y*MHl`J@N2|#`Yf%{p*Tq2!_V>d z&g~wJc*|1t;`ISi@ipR52ey}gQO39cC{mjmmZZ|dahVSf<6pOAl^AXzOKdk4afL~& zrDa}r8i$M<&5s|fkD+sHa`3d06EUeT1R*M*yk+S$15v#h+3(4h!@j?grs9MTC6Cd5 zA!nbUyfhLdJVGQ3PZ*Y;y%XiThx8k`5%Vm zm!R6~%SlerYq~fS65P&_MIW1g;Uv*Hb1EG`^!kvQ ziMV55MH`v+phD984=_2;fsf>I8BDrYny|=Z)OgO5hHGG5XylxqxI~4AcD28Sa5+>- ze@V+uCO`T$!5pE!vvztdHIMip(DLh$vw5m`R1dLIeG=nLhreo}w@g)s^#vs_eDArU zh;Z<&h~B$TK;y@MDe25?&%(A#DzuH+gewwV-#m%LJI6+&wq7(Bt?`=pLfdRF>_vvm z9D(f0E=kpfMh@Mj2)@JgVympd+`S2}JECY2R9zE@j%{scE5oVxBnxN_!nX6IE)_H} zC|Yi~*Vat*VD$Kg2-fohlFvJM45@l_pUUSL-5RzISYpY`JBZqQksFWv^p4HjIq zSEY#-k~1uulc9K7b9CzN>Cg8xNfNc=xy{m^BsM)8!|+74PFB0INd}DB<=QjNrd69s zD5c7$_&9ifX<*4E+sjGzi^R^Jv?sWP2!B+dmD5R{_F-uNQ9vq7-M+LyW%~72vdD>* zxbWfl(&!v>A&coxAuT#c^L^UBYE@+k3#b`#`IA!M@32t{ZgH42ukxlWyMETwW>%61 zx8i?nD<(|7(q5i_iL%d8uW!HBz!I~~mvfzbSaCJ(NOOo#97*jo6n7YdFl41kA1 zbi=0N&g763+Awl6o$5Fnd}^Kq3PaH>3IvfqVM-5ka%pwnlkdR^Dt9Nns`3pBNR}Mb zW9j-lJnXQT^rtXgrU=6PN-tyr!VZd46eT8RUW(-Of#Gn|bKHQ{egI7Fu{wMz1zcI+ zQ8w*=4bQdfHKd!s1gdQC`l#FSe*TF}48*Lc@gS;ak~PT`9s=w~=v9PGHbQp6)7kD( zD$Fli=_Wk0FPHZtZUB%?0gHzly^A6|FWRmkemzNQ`qssf!veS{Id1pK~56E$p^ zsaR#(sfLBw2et!_oi4MhmV7g@_1?;@8gD#*b-13t=e?6N;-EfIFx8UQpU@2c1RM`i zI7O3D6QA|-I+3V#Bp#W-rcT!gpu&6!lujQ{_>T4L2-k~|BZ{hlps-{!s7k5pZNKeC zm`~58DR@s;MxNnbQ*&xNEY(C)LMsV@02VE+^?(9yk4ChQ%Y1gs_xMQH6h2?P)b%BQ zL#ZOAO8l~UsM9iJG4im{!e;Q55+OhW>6D7x$EJj&LLOiW1ISN>g+VV=R(Rwh8qE2F zh+#3M6edwG*GqDq*8BW`2 z8OtL=ry7fYk6Oe0iyTicGSg;dB?4ePf(T&$D^Qz{=0k#+_mDV-#GCFzV0%QdwQ8|- z#4{d?b(n53L5K3#LC4>)d3*R=NvilK&qtH7V}(%p2xRYI)zHXH!%-s9RSg0!r5KI zmnOOj=L|w9OC)HWo~qQ%TPxl6z7pf$5LjRvo2Cxen5QH0wM<>Cr8+mJT4t;<-y-P= z1G)cfkD|Yxe|&3L_FxVL?1oTQ92WW=4(!X{{Y(VO;GVN3$R2yTuH)W+q#w>Htl3!4 z!=%7I7aQR&&b?5$5Lqz#?^NC>R9K9@a5D@xz)rqj$Luv+5PUL6q^W%LVDH947%YJP z63aQO)8JZ+3{{|mFI{&L9a9UIi!1d zq*7MD#L(2C0j(ZrNjFB5(8Z@na!>{ZbjNV<*3zNRNufSE*@SvR*lBUa{@L5`-ydtT z=`x0K7h8d#6LCX2LlFnro~;3wK*A=e_v_hvh;fPjC|#tnj&vD+l-6~;(W!y$4Y#SB zPm-fdZ0T=~036%uX{S^mKC@f8gR`g1Y8|?hZlhVt+)IfgN4sOL+c?9u;6}+Lj{g+{ z2L_ZAWUlZsBOhy-xW>)O-assgG zZ8n(*Zutz7^`S0*@daO*#;9L9+Mw0t9agGPeXKqOTD|9d^6`pPH(7i@P^eG!#!8`C z`|#%AyJ1NO*VPw@7yQ@hMVmb932Zrvy&^jg=nWH}Xp(BDij8B%7e7pX{J-uMVOwch zp5e^VYcg^OKkI?|HqQN&%W8JJ=oG=WJfAMIsO74r!5E=`taa_*NPQ_etLmx^3!|cN z)PW)^144^DpwS+Y$~>nyT=-paH;&O%9C)m*E$XMM-&g-y8K^(NNccKdSYpUBXwY{G zMg1WXdFWYRg$dM=g7&0Jz?|2L9x7NiIXwGbx}Kib87EnNxQG;$P2pO_OE|ba6sER= z0#Ic`EzM7VILp-GWt~-ytLA$o4JR&sy_U$5IF#Eu`NN*~aU8i&uclFe#XZ6e((Gxg zmd*Qtr*?HIEs+7q6W_{4^L`w?@q6*JEE{7_50yXAkz%|cM)TJGYBNBP<_BfMN95yAe~WsiWlSdGBT(P-wP30|Yz;Zbkx^=bz|wY9ZX&<6sR+y-Al+7T zU_y`05A(C6O~hJU>O`~}!zrh{^VkV#JL*>iq4F??B!CvD(m%V35Wm|ySkpQ92G04l zYQwC5E~k~IhI_G6mU#6&P1e`8_Eg3-g_0$6+BPUP?In#UQkYjVx0!=}M*?rs;5Mpn zYAdCTUSBQuKM!qxr|w7*@c>31SF)-~R`>m_4zAwp09MA4*2;@fxWM}aH}FM>iulba zR+edo6*^?qcfu-K@OwsU&3f&8@^lf1r@7YtbXZ<-9mw0P|2$z@n$-$oMZ^PTSw#JR zt)T;H`{}~rjkE&tm~kWeJUvPJ%8=a0HDR!h`baIa3Ss43`IVL^TRDpq?~~w5)8*-VQ;eJnX`96}F5Vg; zO&4s6m)~u;_`rh`vGenEZOC%6N;P~O`F>YZiT`a42_&hnu-s3=%tB#*U?5Q- zWRYZQ%3;HkDQxcwPXtge&&(djaoO;morp;4vHYsA3$4tY)_~V8fU}Z-3ijE;aQs;M zEV0^_7(hzGZhM8{QxRjyF*1QVwM^SO=OX#mboc6i4R_HRT7D(m3XAX3BS- zCOcucOm#AIdq2+s%C_}<

dO5Puse!~2k$eN)V9^`w{zU%3MmQzPilgj5Xx!AV-X zAK+7JIfcm68b*Pat#i9x1i2mb3xWi)Yy}Vpp>~&$wpF=M)pEpFOj`t2~7CVB^X) zXK`zF!NZmBGyaK5r{HaUvK&7Gu9Uv}y;?=*4<~)*GIguauG!;Hse85Fa~;B^QLRJh z%@DQfut0%%9Po2P(OQ^nFtSiNj%c@H+MDh`dPAePG@XD~U7!+wF47}+%%EK{ec>5$ zwNJN-e`6)5I->;u@ihf{tbfNVlB$B`CM;~@Hz(8&+*%{nU7d@~lh3I4Eh}(cdftQ? z)Q190CiM2JtT6N;Z+GSRl!o= zGp$!@w~=C8M1W6!rSU_iz!wXmg6Ud}Qmz<K(jCDB9yMpt@Lx%lN-8D+FFO__>q>CTr&*5WnQ|WXXucp_Igip z+o&i{DEcuiHZIMIVIe|nImx+!MffO2pD|<;himQ~V6EGK>Mqu>zLalUNP`)#YPJ%N zQ2;6e3Fzy8P-t5sb=cFjO{Euo;uUt?aVzJc^IP2MCx`CWqJx|4y*!3fvEFIuKo>%u zb%~rqK_owysFtAp4wA#E)DO-(U#Vhh5|g0fW6Mb$QP3wiydVq4qxAx$gTV+cq~enL zJsf!7%-mrns;}X!aKJOzQ}-L5wBd>o*jQO`dggP2<^KadkXR zb=%(4tA7%`YT2@8z?lfFedy+6$stu}pKx>0Fk{1FSbv?Tw-#B?tog<)Zl?+4rs zi)_MAoeG^>cusX)ZjlZq^R)Ii26n_@FpDIAz%(KDCWWPnI0_kn9hh`5^)$VL#>OVq z#z%NCxo%Ff@q`+7jIfb>^? z23*%{apQKlGbz_K$0?afij=+WS9jEncr-Uov5V%|0f6DAJ_U_qHt`5}BnE+B zN<1%0qAi^&T(J)X9@Z*dSU@f1m!dpBdg$B2;N+I=4g+J6KeVjL3&l%65!Xx(Rr?fUyJp!Rxwp4>-S$jklk za7dv;gO2Eq)pa+7sPOOIQUvR*xGm~iY}?X)@ru|S)WCZS6}m+9$zM&ap?I8sU>(IC z-2>8!Mv0i(F4l+6-ui)`P*gQLsRq3EUA1C)%WV`&{9K>+KXWRbBBek=54Uv)g6|wI z6?x9c$Yb4(TRh`m>#4%EPVf*U?9+p{E0PA9dDT^f)drWCU`hNwnY6@eN+%i0 zAKZgQMSsmsZg}uN6k2Z>V$tc8nZN!QVCr}#{Bbut(Hok}G3-)Xk%3@;dkFCu7aC4) zt9oKia>5K49U?{Uw~*82?)yu+JPXB#9Wq)qS)nOxmARHDyFSJzpZq9c|1htVSA@$H zei20UN*yLGnseUV+*D$ubhr@3@yiBEiUv|!#CaPJhP&OO}W&5F->tm1R5 zO&zb|pqj{+u)IPd_w40=ceycJqPQZ^tSscpdumUaQafUQ$0e|FP{2&!=nlg-6`c## zQn3hrzg()2@2FiLCqrM00{7B&wrVF)x6rQ3G!GiCK{ZTTXPa@#8jhq9_1x3f9?{S6 z5_lzL#9n|3{l3ggJW}Q1xUz$;+{Wx7fUZc*)})J7`e_ciFE&Si8I>gr$@qd*tM$FZ zgQ6!z9Y?9SgUWfMFyi{j>}V;QXEuTCENeuJo7ei{hcEDFryP<>EL@$D13850&sE5Y z*=CscAWxRV;|gU__~Lz3-;FnnPJQFmM+*b=xFl}q088GW4hz&pN)EptlCDcXaVam( zjG8&tep`KEa|kMbfG6VHCa4EGVYcOpU$%ZIaZ#OS!B=#pTNZ9jF+0Pcf=hubv1hUB zx}Izf_JLW4kwo8#G((pYE}9VJzGpE%-&LSu;%1d#O`>-T z5ML$#14kj_^Ya|1xOcDOBi%knz$Y|ZgCtt8IU<&(&~QqB!;D0FEZg4?Od1Ov%lU&{ zppvcZ8NnNwKcWi-Q)eweGz+B+;KB(se`e;XH^IzFP z)}QVfTJ1ee%VMX>tlzTMSg;&=X8E{)AcLN+no6IZ8uR+P2H~zzstpgTI*B0J<^|p=Z?W zkxI>;UGV2a(Ov>OS~s+c50l5!yHn>zco|(|5JE?Pg_$KB@s4iQEyJJN?!Ad;S@mhW zlXRf}Z|NE*;oxq5g`~=~TNT=|IA>8v=Cu!HDa6O3*R-5j5WwVksW-Xe9pD4S{(W5a zdvQKnABhP_$6qV0fK(={2@o4pzp4q^!uW+~Sr^^qM?)UOT^{*ycLuXG4iMS2ljr!d zRN6m(T|5%ju9M}y3HHi^^!CMCzwXaO9I4aI(wtsq3y<+8=7-_?t?zL;1MP5IBqJH2AU=ljWC{@t6h$ zY}wusEn!8=5FUxA{C75m#07j7t|X<}H_AhQxKSQ(G7l222{tbOz)N&w>FKXoJH~^v zXBzu6A0H~!pNwYk>iEF|Cvqg+$xl5~%`fi5o!1&@de^`L{J-ez{vF94D`2O&Q%h*W zOdVr%q3^zo(|JE~m4iA}WkiT{aawMd1ox!%uFZgH;?)0sfS%;~rcqw_M!{mXxjWf^ zfYx#UCZrQpfVv?O(UIBJ3Uus(dxvYj*Bp+SwDL#OEn2p2s6L5P8~l~9%1JJMXGkD( zSm{dfuy`i_x^0{`9@`xKE@=vu!+z>djWgZ-g!7Vr+8I$= zGIE|p4X+o3kd7+F04g*xMEs!1E~N@7r`c%Gaw@Z-bU1E)1jnlT*=|@hHJD`%%fHL2 zgiW5s&lD=m!mhm4@i&V$FmNT%tAGq##7_cH^aF_SpA(sF#Q=b#X{v!_3~Q{#lo2$A z>ubm1#rK;B?<7IPSaNxVJ*3@#I`|jO5heagD}wuBvqWRKrjLBfl_1$xI)3Lvq1(Z! z!4^A=dx(#oi&Qj{Eg0DI<*|3J6K@i+R=;Jgm^MmfTh~MPfE8Dza)hFgY%&;- z(OGA2V~Ink-J^wXntEleP3WoK;&UC>G&A;#CsxC2f8J)JMVk27m!+ z8iL&05P#`8M&dmBqLO2WM0msPX~f4u=IkelAtveBAO;LUp?agllMqdk3op=%V5o6^ zWeppJf|wN*k4V6pbYBrFIY9nPzm_lysKtQg3b&B09R5cVKx(8Gxp=u5b(cE91#ZM} zG;2NCvh$A;_!zCIibj2Zx@Pg)DC@#j$gea!iC#EV{$*jQArS9%89bAzMJ_xpd)inkOr^M!xU~*S+ zm4pMmf2@dVmR`{YajYs)9i~x1aNPN-!zi4i^Rq63bm%6F#jnYl`pKC9utW*K@1ypw znx0|si(kSb(FIvDd?fGDa`4sQ-SbUpT4;Np)855l-*uYD0KNft2)_} z@F~W({bRB*t-ZB>p(9)rbY^>^WTApcNdiyTREzMO|HFi_vh7rw_G&^(xzr_PI+huOqD*h zjw>ZN6~)i|F?q{T*Q4PDL8*AUomcoTvP~*`w^8QKT6Xp!EdH=N>Lwc2!zgf(tb>YT zNQ;z@^mZ%>1L|T0f-l9n?XRf^ni^$hJ4m=RKzZ1h<6h_FM=#-bwa|-K2u&}5TN=Zy z#c0LwSA9x<)52dsvD`-}8(jv%3nGs0tI_oFo#TdaMWo%Mn2TwkAp#wy zRcH=q6$#>)Kula*zr0n5sQzX9ntET?t zl#9@B0$14o-W75~!j^TLA||br!d->li49Y0-;PsoJSbxzLnm;5 zY}hEUGY+-s@p`<#r(u+2@}TIlbj+xa+${m2!{}3bmwP65a5?G7^*y5*5i3;%0OXU1 zV2EEmLhci@FoE?&b}3@0kKekDWF*A}%r67-2m|)q@?E_d5L*&gE$77?(klyy=G^mB z0fW6m^p0q)*i$*nb=Syx?m&+pGIkYzETRM9guT%n+L0HE@vnYdL7Z-SEgMqTRapsD zWBF~kqDJzh?x}(&DsZ|ii$L_`NgO>HAW5loR5+zbIz|%>Jcxj5`_`%mwA;F{t*~yb z!l6EIhtzaT_X0||7_O%&6~H_3?T4!zcQPY3CK(YavNxluK;NpbW<@V^t@P<*{G+CTVU{GFv$;=K5k z#H})Hl!8N z+I-zihCpnX+3!;la2yhH1>_0Ydi-j@_SohgUHRK8}{5 z7R+!_oro(YqpSk^&hrwxOTfZ{4x){=0fM)ov}d3&iWh2kOYGF?V`ZiPEV`V+*CEZ* z+gTQve-vm#ya&#I&5K&L%>{3^Y2^*&I|L!>G`fMhroDss6$b>NnPqub(KK64b##Ii zxaM9jED2kn9{Eej=Szc#gAsDiz3a?3Lcl4*56H*7#d!* zW9ph?fq-{ZZvx88YZ^DE;3tAr=cZy*Fs`U5i^a310)owdgD8BWc?Cr?sa+`$W>i{U4YuTa1tzc&4h{0}GjrpL51%W7-S@S30hGcU05{ z9Lur+PT2*2NA}S`0S#Bw7w^R%W}E5~tv2CHRJT;;m!OZTLiuB*HbLQkO&JnVsMK}_ zQ8znd=_FRCkGK&~DfFB&5l{(U?1ycgAheX!4k50F{#6#1)PuTRaEUc$T_4G!>jYeX zn8Fo8&7re*-)ZUCR%zB7KiOh-of$wbS4K3FvKRJ$2!oVNEOjkvn`n7=pSt$eIyL;0-}K+)9*lSM_cV(_kht>rr`ae z9$K5OVz`*nSEbKPQv0}C8sCl~R00CGaQQXJp4yyY*8+&^(J-gs=0t?N?gUSLA|P#= zf#?f==dU3sr%PNVch;n#EjF7z@}$#XC-PG&@V08oLYL=IcArLdkAl)pXEE>O@S)%V zJ?p%>T{5S^cL((|<^3)O^)tJYN40)}FgtTUbW6PQd*d$i3geTKb%+iU)OHl@VLf-5gg}b#{#uGUbgcfk-gA;RwW0IQU096`b7D?&} zF7>b(KhiWuHKH-rF>#|FUuY*saMvWfhMN9CS{e-I&9^_39vz!hs+>wZJ^P`%;Y<>L z)7$n1mr@z+RR;SH(`EVIZ@1is{U5r(`xsUB#`Q zPzmOyS_g>7wR?p3GQ{r@10M2>Kb?24Vu{LUHS@@0m@Y+L;hZ$g&47H%v|}Im6CSy* z%|vT7-zyw9>Z~VpRqg4amwi#e95EKk6~6 zTt`50%^xIWw+d#|V|eN19o<=!Fz{&T^ehWsUOQkIDY>D4R z#oP)PF<*T$aq>r*U`Ma_rzYvhrzu0-48Sm>RRYUJ&hszQPyq~U2^*vZBTNG zeoniR`Vyr80juzs_M2kYJhtu^GAm}zE`n|(dRjeq=|R)UrIrSJmPZzL^&mQXZ{VY^ z;S5qApl-Wu1E$Fly`CI~q|!6dZL08nt9dobIeE3ANjO5GtZ4f0r1#VSns@D;xB2J~0xQr0W2yg$Z{en^vA^fuHW7tgAvS(K;q^sEPF)eZtbf zssEzicEy(d>ifNdzt8f|FL2tMKe{&O5DoJsZUqjRVzK_mktC zQ9ucU69CWF2=B3J;F&Ef{SmnN>?@KWRk2N3b5v+^S~I_Y%Gk20KJlcGFkl3QB4;`j zaJQ1^lDt^hgrqB1l`9yc01CkO72Cx=kH*+dF2fc&=SI-3tiXl3v0r$V*V5&**Ogiq z0@>DQlrU)lG(%I}O(0>IdZ+iqmDW#G__X{nAo&3paVgLZfVZFFBkou72U2xx<^CGs zxo3vENJl?^G}B0ug2egS3IQHZVm|B#;Q2xxcOStKq#!N{}Q<4Rg=n83TVV(+; zt|*G^I>;bZxIs#Rjj{lNB$3&tcli?Bv)oz@3GQZpdnDrN)05Opq-~htNf9`)xI%*P z-gi6ugDrU>lCkZz)@1X0GGc=ZKXzX?fO2isRouqapX^4UQ$>YUGj8Z7qijlg|KhD*t&7j&;Nf%*tY`@esU(W5lBvkI_#Bm;D(kQxZQtz-tjwZ`Scas#Yk$iYRG= zx4>^u5R~pGXLg$G4}nmB&FZ}AkhjOTv>i*eaRGjO3JQCh}+V~ zqm*wv46&>o$RGFpslY9luY$de_F0uAo}FHeQ6NzbmPLs)Th0cjO@7CB?d8*B-|^Nc zvH%QuI~!$2BGuYZ&d<=Q$==mFTjA2wl!xU`ERnwX<8WDIp?PyeR^waIz z2|PY2ndMXGOn&`erimnQ)%1nyiJ}SlMv2L$3(tuCg}kA22GtRDIx8>M3kZhruKF0{ z2GJ8WE}bmx9cXNXMjm1|?&5i@XMsO|YqKk|68JS~7HSw?-3;}spJ<_ROpZE*II8q_ zjy?T|vhRNk6FF{7Fwbj4EI`wUKu`)>TP97D(BErX3aD!12ProRdm5O7w>(N{^7bv2 z{+J|)7apzADNeZ>ls4YbTUZ8RIb^C4-60|AP%IJ|^$-+_Z@ndqPGlW#THBw0Gs7y* z&I1CzQN6%*k@(cM>td9g9q!oFUBW+;MC0#4Uvu{zciF-$uztm7&96UKSlyfv8A6vN zHo|xxVq`y;XR+SCO3T$3+jM1!4aD}@rD0&A+oWEBM1@+)ZgbRbTDB%QsllLcaDne4 zd33Lv>dWqtmeE^}*MT(5C`T)Q5y#+gT=t2eH#C*oAWxw?wl=fTX2_qNY6vIwZp z-<6G}FNX?u|7FUu^}GUqsAf8am`9dm3v5HDiXUYi7bu5XTq6xeMMnI;UMs)fKyy?s z!H4~O6czwmp)s}t3#t?$==8WkU>;9J^niI^cj0PA93G21HO=`FjQ6!6gFg(2u7~LxFMK6<31tZU&OhS!8+|oqO1}igR z`}yH80mnnebwM9!gl(R&i)dN`jCpL$Fn$aTKvDtV7tA?RU`DE|-@hP+qYbZru76ir z`uc_8_CsZVlwVOI02x;eMlu_ZQ)bh}9ratM!4%09V*R#B_gI&_>G7BX(>J;JW(_2* z$7mtdbTC-#kuT(-k7NyenZ=msEqDrCpp03YL|2|8IuOj$zVqf8U3yZ85p3^9KP=~@ zu^58bWbt8bahsG zz4=_lHw>-B96!v%X5lVeN4qH%pb?gXH2Lbc)FVbg_?VuJ@z+OR`m>vw`bO~EBbU&N z6^^T76_WW2=`D0;FpgcXSag$(ON&hzue194abSG^#ima6>v`%ky_^yLMfNvR9}dFD z@?si)ZYjJzT1EL(;cgjUa{L<-L;dTa!;4w##@ zuq3(_J&&@K10a3*5&=h89`RPA%H%*hmh{-QD)uBcfd*mQD4 z&DVVI$LJu-`brWVdZ03tYi-5aMTzr&cc?~9v3b3x* zx1}_-kym!1>eEO!*XLSWK`rPI&(2ns5=D%nLV*p-M={j?@vMCN5x}13JS=S12h_Mj z2sdaB8zg*CGBnXTpd_!4SE}`wnC`D~`mvBgvm$T0{l-dq$B)QouBWGD7%*~wU}WFJ z{0yyYicH>NVB`JL+E$}e3J{XWYoO+OHjRFMipZnQxHyyp}(Dg$$rGcYQ{kc%zx z(>Kr-?HHqgk^6aJyTPT{JJ6Xua2+Ocw!K;W(z(2=>(JjeO_IA9l$yA zFv&3iE58KfxyK8@3T33cFMqg7Nuh7QwZn%xKgCUq ziAT8kS`U(SVs^tEmtJmM;PfSU^rRCo0&EfI!Y6+9bu$B$=ChQ4+$Qp4GmEmR)JOcc zTo=^~wY-Yg&{RH6Zl*&i-%E+3B~b7L$1SR`4bn7b#wP56DHq4S#oQpe^0FM-=Z-NjY%O4=qR%Q zDSk_Y;iGE_nKsIQl4y1rDc@9_`je6|C=ba)%sytryC;nA1ndjVY&`&!QC0U50SI0A zb#7Xr{H#KuKZG)PiN$QngA8-67{&Om0`4`Wq4Io_Ssqa-;v@~QZ$~#*YQ}DDVd(=< zhs4@Lx}(+arzf&}7Pj+)zs2C%wS9LOcw|w<mCjt(kK?UNxN3`h|;6j zg_*SPFTC38x;iSA-A8@42hK}w;GC8Cwo775W<<-fEd-*0cxpHrg;mor$Kpih1h)N( z^NctN7sSs4ZH$sQHTObP%xIfc&g~&V>wVH>nZhxDOQlQeNBd81?Irq4x!$Xh=}~iI zDmTQ<+{t-LiY{2Y!uXK&47;gh6tBH~EgtnTx0Z*Bi!-OM#3bFtTp@aQh@Tdz5kNvR-#EzedBfWO+AaWGzJ`ra;Kv=B^S!x5sV2t*y|!|Da^PQbDWtn)WuLP^fsoJ`g2U>6|9(*DZX8*$6$K1}w`1=MYvlHg4LVJ7 z0Pp5WfpI@pbM|f_dbe5WdVDb*KN#V+7-k@Y!d20_ErTt92wsIc4vh1GKrM(RviyyK zj?=83MSqF0V+JpLO=(3Or-JkaQgUy&c+A|E|2sUNjgm7IsN z?EijaQklc+6tf=VUO1be3n7h3SLdZ@&W@H=-*)%L&C1OVO)>+c^nw5;>!OW1K zy=0t_Fd~kk=aQBZYByqF`IK}s9UzmaIngwc2dTODDZBq>oCIILL>~+7QM z`@9A|K?71QL-S<%TcvR=mxrrc?}f(p>1m96>#~?aO_}1}0;g#;96)8DsQ6>e1CbfA zuC&A3a`gJq&hOYH`)9feSOET)3N+(?6b|09`W{dHV)jb~^YgAHWtVLSQETjqDVr|+4wCFMRQ}@lI<30zXYzZ;A3EqLNE0ySboi_{ zVfhAG>KkkJ`Ta}R9W_d@t+W?fh?+Xr)y`*3be`^-U@^CM?%}n^W2aHA=M17>ZB&CB zqEx#`M*3P~?M0DA0b2PZ5VLO7e>uY`bzR*kNPDCZ{oDbwKJ!n(*Dv)u^qcZAQC!qO z0~1YbTR>eE7E(Jj5qwA{PUA?dKF%~GMSKluQ$?Lxf0l5LP8nDmMVHqc-``EZAisf< zKkl4e!x*aZwjh3l)!F@{-wm8R74MK^Ny|{p%8hv+6JDbIHIPUdD>I1se`1s}D7wlJ zrZn>?PwHdVz=jUB$`Ply@SIy~cV8y%S2p@MTQ76t1;lhRL=9I*N0Mx*Zdo1UIDYy{ z>OCZIfxrO5hOrrAioE1ixXg2{_JRN>HbcU8vstNDsCPyQ+`nECK}vExb;(h z`kSj;v81AX&AMEEwMY^^fAsquckI!CQ&#*d>}NgGwyks#^mw-vPSjs@zVb&jpdiMv z7>*O;)}{FlR+e+#+BuhHrCbfdio4Wbo3b+!Icyn7V;;(`rj}-RZyaBrPYv?iS^=v@ zTj#<7dS8m-no=Y0{(Wt)LMZ5)f8|-xtC~N){*9aRfHTEAa@TJ_e+x-#^^j%G&Oy>K z#6};X33= zu#fOEfhEJ-sg}y$fXz)c-KctyF3)?ZS)VU}re>)Y&@PNdodIug1vEciX86vQwd;2RQHUxb!Bm^kDBt}j2Osc@?~QOwf~CBor!a_K_D6=Gr0 z2Q{x-Fbg?imdrb&g*!L~A0Kuzd{-hY10x0MHUf7Cf0#Ws*P(-s(tsMuer~vo2S3dd z11{sRmh~fXx#^}ridcsX5x=!LOmCw)6rGLSUbex zol##IQ|K+{3g0WR{{b!--EER9U=>4VaFp|cDS|s8w6>C>)TZi z)=PHawe?G8`QghP1}IOZVP)OQ30V$J?TjPp|E2QZ&U ziyN-qYQ{}j^O`5YzxHb0j-v&cv|V--sdSslK=bj(cGH2h+;!JJXof=WXl2`W-~`Qv$=MfRV{1 z6+9&&v#xFLXA#FMj(N<{uT}asCOrP#=+V*DS;E-6NYsbW5D%pfZ&r!X{CHRa<;hf5 zo$1s%I(}#oo|=`cSSU!~in{2|v}3m>RJs+)~Feva*gL!rsUvfZ4w%dexZ7@$RL zAlqjLLnWMP<$65zUrMzTE9g8{f1Vqq6Uht=u2qGUhtl4+=gQ^Wx6onleh-{Fz?ch# zZDzaS=mbFo|xd(FYI(7HJc>HK0vP@GYe-#z!zty#g zoFLpfU-!N%an(x0hmxQz__C}eF(llHGn!p=K)L~MD2vYjvK zSm_}!t(r`qO~p6O+mi?yfBD&n4(V7wI}2Fy1-%+}YIL|iV~I~9KYd*r)`Z)D z$?mz+k#O5Wg2Q*CGI)o$3!S5CA+}#XD@PoB*TF*fP2y*DkfyJS-N+jO`AQe?+_D|M zM>zVa8{N3M2B1QwP*!g0oVKlyUj1eZXrt*n)}k+~*2{%gf9c9QY_O*Nnq02< z+VU?oNxwKjyrhG4sJGrFNI5@5{CG@iOK+A;$x%@K=D1YwXcl*Ie3+`@JV)Pq<<682 zhp@N1T+eo6K0#%$1r$Lt>}{f%!)|&yNp|?Hi&FP=cD9l|sJu`gpsEb?gs#r7&Ofu; zG+!yxowysW;P~A8e`Z%Ncxl<TfCvw@;G=3~)Ai+xw4QW9`?G#CIZ#X}3QHybWw78J`k{}0&vpf?skYqwZn70g zve~SbOfPY)e}351rb(oy2(VCQ30?P5-xj>q-Q6xuCj+2v8a6nExQIZ`!CeMeRXg#W zM|QBO#$*D_AFaz(z5BCmpkO5wtUJRo7_4~N_7UiM<;9#0CPo-oQ4{Qrr(dOks#tbu zE0=aI@BxYfdKHlW+J5RIxP^H%=5tPLlWzFMxA;OGe@PwGYL`k&`TmVm7a^2UkZcw5A_9DQ%~XY zrIw>$e^g?XXKVMbQpaJD9ghXqz-ax=vx&m)MJOUetjkBVo|&6!LLR>pznwzSSX2b5 z8S`^-uZtOml%_ktt6Vl#Tr#h024DM)!>QfULBOVH>#7DFXrFx8($OSa6^2656&+fG z@m6D~%hipty6Hc(IZQybp_x-b6cGTi_j)BEe`|QRxe9@}37Em+9?EQS1XC7pB?Wy@ zWT01^y9wEcbe&FAYrBIbJ?I57B93jkiHL>dV2B=#LZet$rF+Mgp|MK0sO8%B0vq+Y zx#kr=9So|HC|grzktcIo!EiV!(za#w1#_~gRxrC12L)tS zcOM~(iCNTqJ+;S`ylL>dU3Zv)U~YG1YW()c2@5CY`57lKiUZsNW7>K11G)<=z>`aX zhThR&fhXL)WMp342ZyHis5J7M_U#8ye-OCyIZj*1^ULQn2q&<9HtTc&@|POGVhGjh z1KWBaK>N^pS_p@rO$mO&r*^BJg2U_m~ll^d+MyC##R^FDZO2Z zyN|EmBcgKV0VoPtLZg?V>1!8P5aN|>@!s8lxdInSs=O;GGq@$3Pd8DIV;YzbwY)E7 zq25SCvdtd9n%-k_^y1GIrOG_me?La3P)khXKVT5w++J5%BE_2T#bjy?X^OJ=wPkh3 zZ(gKd{~M4zl0i9S<&|Aqnxfj4;Ly~%29f`*OgBm#3f7!}i1zqBq1>tL94mB=pHxcZ z#s}dB@t`rfpn`PB?e}vq+1=ht; z+iCiMv!KG-GJ_N)7~R;wvBF!@PvjG;D)v^IHqXOrmi|@}8&P)9t1Y*?4z8cwe8;F= z7DKp$&5BHR?cj1BP-(O!sKZ(|9)Fr_h`D8Zpbk#%nfz+p1JotxG;5lWr6aPB?`J|o z)QBf6fU%YQ%g9OvX`9B0fAywuyS#w#0eVfobxK(7?zIoBp?!$V7huq+%^|}Bgfxp5 zeoOTk0cI1i^P+fFBoi}vAFPIqtHHxN%9cn7W0OHB~;e;I6z6Pzf{-_aIc zHBzqao6DLq3NY@`u50-Yp82LP<4_GY4dE0HB2oeA>KjVEUyl5-$GgH~jm>H)ywbad zWVaGTEn0$F-;Nhq!7bg3X_i+ZvgQK!|-$zG+eooWD$j+vb};Lf4;j@Vi(wE3u(KdNe_tq zHqzpyJh!IhVjAl1_T(nNucAb64>k)4EIe0Mx+*xDbEJs{?z%U2)cyf@36WVy{` zPMX;wkL11%XFCKN<(ESH7C3CvhPeq`u8nj2IIMQm zIMpdkb+{~H?4@!Xy4J42uToCr9~7 z`sC(F^c3N$>mvv;Bz&j+cV1de|%Kcf|9mc_XV_uJp*OE#Vnf7 z*xxJ>yzBe17>M~Z=g%ExcNmHJ&$z!yVmR|}RT|7-{%5Sh2o2-?ts5f=Ov`XzV_mE~ zhz64s?B>Y!-8`IetDEyXxi!%v<~DgpG4BTV_=mCQhckuf=gF=%3&`$gkF$#I5T0#9 zixU%Ve{SxI4;II6?TO%!bnZ|{K9%UQTz4P7)}jE^U~@jB#?453sjY6z%)RBV zQqWKKI!$whCE>&$JJb{m`He;tTMW4q8jg04V2#KEGT4yW}JxDGql z{(^|X|L%t8HN&}|h?i!gE?)c70ONDMw|7ocO<Kj+9 z+=-aqHVbJ;@VgsZV5swhLAzoAsmEn|DS)>pU;ClJmOZ ze?Vu(xl7O!2VVFIvKyC$g*fmo*IGm!iaNI~r8>x4_dDb=sBVu>l~WjMFHu!YSXl2E z^;EbpUkObWLUq44t=~XFKK(dC$nt>&yW%L2K_t5ED6K((y9BJ*L5F*>sFJ~wdKav> z!7KWOXh9)|2XW|Kp(BPa9NXb&Mz>fDe<2_y(>QQZ9HvkB3!=5m4fq0LLoFAPVB!O> z?Gf}6sce3dz$8B19+EO7zo*v`>r44@z@^AdJ9mPR@=8JRSN_E}Bm5h^f3al96jndag!lN98nTB-ju& zFq&6?#WsF!A7vA3@nK;<6Yu%dL#Q6|dxj<|z5O0Q0f(B&6Q7o|62e$e%EJ&#t19ZY z^;x^vFu|v?`ARMYs;V_^xj`QEf1PaT#a9}}K9MWR>`El^?(5gjfUK5;i`Kkv^4sX0 zV$&0T4|Gd{upieBIOg_R?wohLCI?4U1eim1ZGd?J(~{IZS$^j=sVa z=%W!IT9xikrdbnb9QERgE4h!d<8^pcP_5pk`VoIHajMG|1?f2e2VL3!ZV z`zr61M5DL2L1l)NlSS)1-IGjeEZ?RcRia+ehVA6z1jZC_Vm5-@Zo6Z;%hTm}4TWn0xe_~1eXxb5ynYs*)(9sMwu(Un{JbP45RqL3kK%4uz_@Eg zIwnFdSFsuavaTzrcg#n&e`^Rl-RnJR$-a(5y-Q;OZ#!wDJ-Zo(k^3w`*ddWv04xAU zNfw_zP8g&L+)RXSwV0XFbb^H;rZSg4k#yqoJGMz9w#gXvxrH9Ur8 ze~Dybx}iTf2N?3APm{qnx5mHDUn50M3C>f$8I|801DT-Ao60y1f5#xQO~N>g%>}N0 zGk^OHA8Zw~AULeerq7n41cIM6J}1Y7_IjQk2>Iv9W0J^|#}vN9G7PdMhbVR>)Xvh- zN(2qKOk^CODT#Iv&1q}eLU3HvjbXQgZ9~zEjoe1b|H`Mfnc&Cy!;(M2WNc2TE*6J0 zkDdkLp-u?oVuT~qW9U&^7X0EahK%s>e z2NmxWwLnx<)B->KM4|h4_&@LSzTG`@v17Xx51YzdxpL(yWSL8MlPitSXaW%2pq$)T zeY>&c;70xPjqhf+s?r4cH-Dr5m$ffzOCIUE{y`iN6a^1=4M{2!R0bfNVUP+0h#Iwk zU;kaZ5>W4_yWeN{4!Um@&`J%v_ORC4e4L_<1tn)Qd%rzwt2c;gu%t^bOtTTiJVk<7 z$7)VrL`p!!qW)OZ-Oyi%X3G;Lm{b@8`2JxpXZCPQkIAbeYeHg~(tj;E7{z#wkLeuX zIRjM6<|fZ*?!g-#kaXNp^F8W-{KW2TzkN7dr;98+E{6-E8rwcsE@*F)o|N%F9R9Sy z9FHFtHq~L_%21HxBV+0zgB70;B^;_NZ_2pW>Vs4Xl^F$P?R^k<_>wdL&TO| z=X`g~6?S&+F8cQI%HMh+hVQYk)kL%Y!#dg_4=)o8ctcMrfPb&igwyewpgxb5Zwo!1 z=CtBOTRAyt&B2bMZG-E=sH!Pv*7Q_JPQU7q+IDO>K61%;5++cBp2i`kigS7=H3cog zg9r$4kHn%E;;ti-K+FSspcf!R$Lv3*V5%n?*AG*x(M8^iw5>bf*&*mptD?NFPuO9{ zTotzb_qK+y?SIzxUdZcu=g3*RZ)=xo0;}-O*F&ru`&5bc`4vAKGWtnPh|FGd$T@%!5+UWLl1FtuwEuEH}#_1N7^yFcv34IfEXTxd&1P`A#s< zAwy0Z+1Wd)C80{D)9qMhGt5_89@*0@UA=n{naDN}O9PP~g55N6_&k>%?UUo$S zi#FCkZHjJBB%#68Ui}bpHg(v$=<>|rlrK1G-pRe?rKI`fr{+i{KS0uOka$<8y}@R* zqDk@0&%@^w_D|&8JR@cc&a)#Z?*L14?sicMf?Pgg%su+L!p|eGg43iK2)&Lcx)34D zcDV2J%YTqUcHQ43g~u_eB@E1cR@Tc0%3&p{bJd%E(xQ z$68w~y1BP$h<@PS)YO!D-dUHY9Inoow}fY37i4g1_W9tnke1q|Y3Mp?)AMEV?wnv@ zt8for)>E9pkZ4J~*;8)Ei${4Uuj{be?SE!LHrodoXcso4KKijBzt>@4<^9QSyq)g& z+$9axlXm^5g1u|e2WlahA0W;b-QD%`oRqxHNEK z%y%m-s3Qk~6JOVA_)4@jC20g%rOMilCB^60?X&iuKVY)0&w*X-L5`EDR-~RUv41`y zX+`|2P3V7(Cww|4EQy`y@s+3B0mf+U^DdSk)9R7x`m{VZLne*G@c?#chj?zVh~4T_ zm~&!kTo<(O;))wwTzR!E&c)caW7zs24mpRd&1my($1Vm#-*MaFNN<=HaWS zZ?`}CbjY2HayPU%5>P%|PBGmT4}bVzblK!inrqOtAhbAc(5cn((}ny^-i1w|r3i&Itcz4YgxE1z(y@{({%WNgRnXBW24n%?f=l~!SoQgsBxf~SL&?uthr zwQ(Mrm6PTtgKOCqHDwQs?h1c;F2*bU%BD(D*oHeVnN5)24$H$5Fc)GC_kUgz?m!7K ztwJBo%qxleALr>>JL+=J`TdXc zh~Lvi=Q{*Ql+%KQB_4_*M;yFA2Zw+pM80l(mD#LRuA7Wy|2@2mG}aka2#F3HXlTC~ z%HU+V4p=UsuN$VHlrN>rVt?hj3?yV0-tXiC{)hXxx_$16Yl*euV98J@OPzDfgUHI} zyP@W`je=6G{Rbdl5F}Gh|haW_s0G2ZVuet#bLsoEKTk|0WBGK9i{DJnCWymoQ;rNN~N_ol1W@>HB}ZN1`P zYANPSbh*+S4;XRQlVR(3a{lVdwMX-p)$1^Vl3%7u+f-&@kPYV)lta!>J0>%qSH+2H z9!Tb-vp9T6lGnwH9NTWGZBo8XtP|GnSNlRWGCWm|t#izMrhoC-l5949C#s0JRIYo; z0($qlkOLt&$v)oi#-+(~Ar9!NAQuTwcrRY@s8Jiowe>us@0j0w=9`Y-*5pLGn5pNac!$`2I>;*W7yiqa`jw>gkEfytD3WUsEN)6CCf=5+vlI|uvXgK%^$MN&ctKt z!h#u$b3t8MXMe|+B81YOE?$QLy<{7rRm^1J_=9o8>A>YWv#HJ(q^vF+K32(nl|fmR zJG=f2TDax-zO@rg+;<=S)b;E15RE@@t$m)P z+nrVX+!x?~%09W!D5mO@ryJ^Tts|3r`HUftdYbz5L4Rba(2FDT&UldXA|iUspbQ71 zB1twRqqh?rR%PRY&Yr2p++1VZDoi7g6DY?u0m1pX2v{na8%`55RSr%_^i>C6}>(0>)0{r{UQs`5aM?+R*NipMZ){eC*`Uxs9jbzNfOwKzO=Ift9QDQ{H>}+sfX65E^U+*nLeW{D!q#f( zU78T{BOhp=>9Idmtb+F(B~Nep8oU>~`+pF9lGbjWxcpL>Kh3BO#anX9<{(8m*=i5o zFi)KJlKw-;cynNYCG2|c%ZhB2-A8_YggLws(1V5-FbTcCjB`}yzmz|GBGSgsImUBW ze@0z%VqM(5u<2jKp#6tY|JX1Fnn9kCQeJ&&>mygvUeIb`pJ3^$nv~$!poTt~;(upq z&yTH%t^Ma^#VKUo_Ud1E_hT1ZyHCrx9)S)YmRjs0It&15e2`PR9X<&3ua(Igvj+TU z>~$*5JpYQ7RA(2AFyz|g%^GuumyPRmKinbmR|60oz zD6{z=>uig-wSl6IA&f6z_UNVc2}m15vbtSE9KRP5Onrrvmev=Qjstnlsec;vUcMei zf(#u8_$RmWcdB2XeJMQO&U||QaQJyhZJoaea0XO0KGDJtGtM#3OX4L7WsMp)5z$gPjwi*eLWSI#{=t;VAxX!3$Po0L4csTM^?NO}b z$=aHhnN3t-Pj}L9^pG$ahBPT&I4KrlKD0?zzjFfbiF!Ty<{Viy-CQgt`2=!FN?Dy8 zRhxNrcHH}`_2ig0Oj_<*n9H+~`>mT2HFG~8b zf{-R~qC3gLy<8EMKjvDd^BwXEUb9ez7Ts|LvWSoiKzBB=Q4dj8T&COyRhT?l)*tua z)51C$?&*&E&96D_yMNAR;?M7{KR5X(nY-=!wHj2vY-}$Ns8Rvl$sTINkvQu`P!nRb z3F^1WX=uZ|8Ixr2dyOfKeAL~R7Gayq9ZmTP6vkKeDwGvL=rNN#>QANT6HD5sqF(A( z-#>x$t=eCn723fUPgllX_4|S~$j>900oVSmy-KtDNSU>pb$|EKckUPShP%wtI$YSg znVGvl+kJEIO&`L3)r1S=B|T^4L!Q>6ec#-NV^3`!2?0%dxPE6o44|SUu5Bo#8%-L# ze=a%doG0?$yoOvLlb71`$7-Sxs_X!KnK+w;mmA0Vl(l>6$9{t}D~*OL{K(;6bAj7} zj!(tH=$@Zv6Mr^+2>R9aJ^`}P)vGPPo?>tY@GM(f+upaet(t|c!#Ji51rPz0nVlxu zwLzdEqBPM%v3V@)t1V6Ry$77q9hsQc*b0XIr!lU>40Ws)_%K*oJcr+7+}`-y2UAPG zVbOlZ{a4&cj{N`2P2(7G$(Qg2P^a>mSJ>ulj7Y=Xw|{PNaS4^1PE-)nUAQB$42X{X z-MRjxDtG8%wGLF~(37;qCG;Vc8v(*$arlT(D5h&?n>Q=(@v{NMG3$QsknoZ~CKPrKTbrNa# zVP@{5b9TJx)wO>M{e7}a&W~VnVCJsmgeSI$rGHOLS#B>wqvva@tcY@-+1iQ-Sb|C3 ze5#NKrM8h9i3OlC;ThFUCi1370C_37?Xo#lC$%6DMifplm&e{P37@E{2SV-d*V<0# zVto&?i5!nkX(9&r8m)(hZ!_UxNB}hib8lbeDN&dh$kWsV4{-}r$xvp3q_jG=_fkl9 zMt>(t4yD#ux1vNH=5ZN1M&ziB!U`$}xr_u(L@GNv%>6$)Nmvn;D_0|rL{V|x z2O!l(K<NmWW?H7vVXn^on=(#cJc~!aN_au)%rwUybakd)bG6u z87oj~%B2`V?o*~7{p~4h-X5C?m$t?Xe*$SUzUp7-UdpXd@!|wu)0viAQOd;D zywwBUS`&!b(Wa2p9nMonLbYY_1H@bOHSh<^t2w)f1 zo)V4;!(H_NP<$p!4$3xZ7 z@T?AqX>EBQc|>(iX*GT%n!DG!aHc0;u;QlG&#fNv^S(!4RToZY_kQD`F@Mqef#1H- z<@>x{>me)D>o;|u^;ZGW>|Ixa?Bn?i?@O>KoW~a`t53G6@j} z@P+x->Hh~j24V7E#5taU3hfW(b3|deH0BeNXN-$iqC>i{#yRdCQkK(&7L1&$1?E^> zVlfKY`wWJY8Q3XPTr=J8S>XKqx@0Oxp0h_&eY#>CxT=h{J5BkR%6|&=;wXVZ6=;=H z`Xn7y2nUqg{fqsiGT^|6+8F>87_#>P;O;i=MB+KX=PteUf5UmqU`Rq3Px~K0Dt-H! zQvKiIKSpuwA|{2L+J7$!^>guAt2s@cDkhQ3n+H@@<4|_<5{oM?WD`;0acJ|JkJ=f7 zHG~N95U0G&NnVO?u77pQ?b&|!`)RRXH}0Uk&V$isI~c6kuo5z<4pUV#(Dkz&hlJj3 zixv92s&pnqq-lVz<3hmDvRnWw02qS`SCEAHihfDNV zG;V)}`+zLheSgfmI_hQOX5*MEVejv;{|4Xmmo;&R!*zP%2`FH=?tK}=P*o5iY^ zJ?YBlVLy6-w0)ay9IxA%|K{pk&w#-OdZ@$Jo{!8v4QZ$WwY)9wTujXGxpBjayBt3E z#r-U!5JnEv4Z(d1HO<)v74znS#MR1~%>ZA|0FN!=Xn*2jqB&_U`}mnxW1|hNn*~+& zINesBy!YHmJe(nJn{Cw~E_0b3vk77Vo?rJ7;sfa^^E~83p|;L`xtcq#(%&KOL{27J zQO1#%XCWWd$?Vr9InD1p9lKLoiYX}RUve@jh5cErIb$EybcZ^j$|12>xLQQ(eJIVn z;$Z0PGJkl+c}A~H-f+31(@nVHCcpc*n4F=v%kDbfuCXmQUBometDQVEHB?v=pnzJX zdF^d;MLCJ7{_gaIYZPc)oEVp6@zfWPai7e<6cJc z=cOet^V6{V`Dqo(dg5l)DZGn4ck{s0Lw^zSs{SJ7z+Ya%<5cOhy|uRPS^80|wm}-P z6%#tjiVsnaE4s^av9){}hm@q^F3AQJ-Xklksob-EqF}!#RdwfMO{&EK0K7KzL&EZk zFYKNdI;uLX4OHc)!K#uoQM8I~oncEv9r&czRjdgf!QKD+PMStxb z)c=*XJXY_$Gv+QZ|K9I6_;=3|6zaGFje;jMjA@RP3qM7Q;%5M!WocJNFOjN%f3 zV-Y!)WcoPbAY9lf2UI4o=UD$;+aO;jT)Io#e?6j}-23`vo7Y$zb{TTXm5&k?9yld{S zHp7mK-#nCuvCCxbL5j_H#;Bz%P~Q$^f~l-A_Om9c@Te9{R?~N;E#d-y4s^3*NIL|? zF=tQHz|lQ>3pX;j@$Q(uwP8QZ_qL)=Oc!uZCdvd!JB;WT5f>3&H)qcjo4AZL@zG*Wxw~?OK05 zrt3JurLz^ga2xLWTfK8mRVm_XF?7HKXOLzk(s^`YWG?v9q%_uBxBGG|5I@54GV#kY^3=hJT3p7*q3ZdEeiBr`!JR zdfu73TIWZho_~Nir286mT6`MUuLv71bUbAj^}u5x?K{~Q5IT|0KBuQD@h?@(TfFW)GUzSkW~kQ=z;9;{^P{BA zoBqSR8+_q4CNWsJ=-g>9v>dXlFLC#aOMl6!&^e>fFn@@{1J4xWBxJ#P@B4A)5gjsG zy#giKoU(O9QR71A9Q6YEIgb+vCD-AvH3O^a{d)h}oH?5)z6j!Y4!Q}Ute@P|FmP1p zJJoB=l%VvY-TAC0Sl2*}#6RBaJ391)wcdTdx2?|rl{jw_<8q8k`R#q8S-ELQki98r_XVXUw^x2#QkuMWpR#-AJfHc*VxqQ_@MWl ze)@f@?_zwjy9nzjt;WJP9%~RHA#fon-@BQ5vB}VfT`p{BA_^hKViCw}5Nu=V!I*&3 zljNd(yR7DjRSuH#e6NOtPamHqqV>O7(_b0w~|tOAH%_l`o0$9guCocrZ?MS(BfG;jAK zD{wznh+j8a3+YW;_Q#y%Kd`IvCmycLsecSl@$P=HK>1^MiLW zb_TTy=?R9t@YFnuB-_MW)b4EeV<^#Q9&(yi?>6C-fqV~@_Y}~|>GrFjL~~}Z;XnWE z^V1g34EQbwK7mSZCNnOn90oy|`G1bAh!c-3Hy(!e_5@5S;(#5sKez5^IekMk?}XPA zdLGVbS$Pmwn}}F1jO2|3%k&yb<61ymt)s6;)HTC80Pk^ z8hhXM)j#H!oWp~ScIEfFT=zPjm)(!`-62ovU9YFpbUh_+<0)0kZH_VCu760JU9sjE zn-o&=P)4BgdtNpJx5S$NV3(`hXOH6`F0C?6teC9E6PPs-xs-O2VjQM1Cz4=;5T8^# zE(^QSW1Ey7xS(n$31gx|)a!E7EuYA^-jXhUH-6HKMnA3T>S&j%USW8}WZg7uyZwpz zni4ez`L*7Bn84!~vBpo4HGg*A_1Gr|Z-?<=bP}uRWTn_=@DFe7o5q$#mi(e?PZu>$tC)i5?jLJc>?qyZ~BU9 zyRQw-D9LA-b7?tkMd>NjP!7A;QV^NsN+xn6rlXW8?}_PAPk)UlCx1XUz|kL=_VaK8 zvN{OPEB6efzZ>q!#oBo8I3l`;s8;09u?|@G9(3lQ-#_T!5%v`j)Kekj7KgEucczEH zRxH}Q67IFE{Bv$eDt~ZY(O(}Mzc*BQd%u5;Xum;pnm6uh@=r<+X|fg1c06_4!B%|zE#C1YIFU2`DP96K|t>CMs^Y8ioy3Y4Ce)z}v z#yk2urovj$9BQ$3%4#Pr!BeG&bR)-;eiQU``N#gK#+6f6Tc-@CoP;Y-@+{6Wcand6 zh84yNb~(0DXw3b&&*x(htv{W9&o}l(FnI^}b|;Dl^7wb|p{8Lif7iCW}&(Ee;}(|9m(Wo4@a ztlc@RTM>w;2@kU; zJ6uv5)8c*1QTMUa{c*)PtF9VFCMLhu*vjH||JqQ{gUlI~jSI7&cmEjcEpSC3Ovu+r z^*@Hx^os-0^b43m6-w5>8!HXce z9;vRdCV%LjyBaxFmWy$F)_;oOJ*Q&{3(Oz*>K3z@XIYuIkXz} z-QF979?^<;lD`1%gRjv`{}UXpLJkQV_Ft3>a%{%kf5v<@`T!o|FsAn^kG6S*jAk3^ zgMdP%F_86k z_=S2bSPvIxFHE~T=W98s-{f%Guj^0L%Ala}^(Y04jnY z8Gr6&;BMbl0gd-ZM{&=F`V?MsecNISw=?>(1Zn2*6spkVpuBTP7}^>|(*9@D?eoI$ zK>pAD@c(5lmnoH~1D6q3ahK?zcmFj~q!idARrRL#H965IXU_AtRTXit?}9DzA~5MG zcA`3k?H$;AsvftmO!a%)?eI+6eBDk|gMX08;Dx9=Z!3{)mQhnm!X8C=dDqvQaRrU% z%!?JsDx*7oLYbX*Zp1eh?V%!6)e*VyGiZZDI10@pgInb9eWDzb*PQ{`xA#rw!(Xm7 zupgL%vg`SPz)quecEc;p+PYEkgJ&sz)ZklAiRl&8?&rH}9KgT(vAW;=TGln!%6|=6 z7*WKb$qXk|IjG03MX;8L_j(vmgHeE`Fd|vSMkIB}==J@)vgu#GKl6wa7k8me6?f-) zOedv$fsZ;RM1L}9woZwn(v5G&@|jA>SpIoWh?b5{hFfi6PuyplWQcVNZIbVQe#VL6 z`k5gijdUTozkY_3%I`;$<=4nbiGR|R-XBW7b-ZHR9L;*z(|#!ZJ800~H+8B-MB4tH zMyLN8C$;wB7I*#DfBLx|a@*q5v4;}!DY1Sb=hee#2C8sD+h>qoA5c&6fckNTOk^&J z^2)_oPePE3`YlAvxl1>XW2m06%qofAA~Q2kS>e4s*AApPSsF%U#@-dDeSfUal!6Ta z`_L9!F13Srnc3-MLz6+6Vm$`(>EZRQK>QDZoIKAydg3y~mL9UTlj9TGNv&WAJTy8n z(Dsqb4DPVQ$947BYYqIWo5jC5OktaAya;oL+sgD%#`pmHAkg(|Hy8cKNjDcUQRN$z z^^tRB?t*ot8(F?gKS46CR)0HhhV$#psZ>5!@;p0p3VUn1%L@9go_l9yA3ui>MUs42Xw7pTOxJ+EIq z3-`xaplmT}AMG(`iT$;HV1It#It#=JZf`nNJA)0p6BpU?xGs4W-hXn=xB8!Z)4PJg zVH*3%8hlOb(_Px$Nj8MQP>%qyxjUn_2OqVtQ@o92&G#KuuMpVYg|s=YD!(!HrNyx0tTTe(o# z1*l>|bL<11hZJ%SuYV=XB_By;_CEPQzbV|IUjD+=1qA=Y9!}sAw)!LGFYUE&{d|LUzUh$M8|=zYJu&=! z@}}!unfoZ|dPXaL#JgumQPP%=|w%>U!L!K%)s4?1{(w+h^rm%s1s!7H|-`cY>^TQ?^yo}-<^tG`Hp zU`V&w!K3-nQ^5=3lKP9>rAkep#+Xb1W9*{9`v(eKp~`Ch578w z(^{E6No4s_6S{xA@12H%(D%$`FJn#sb+ey8nJf?&d9cq>9HbwY5V#ZRA{uGw*ed5E z%q8obc>Cpcavy4td26S1NI$gyYWOl}r#dZ%+jD@t`Mn-+%xT~{+x98voqgYW^m}gR zs+dtF9EN>ig?Y;kW#CtymsDTX) z&b!SeWQQLZ9n1|^9TpC2=o8SzcRxX0WLx|K89I7skxx=w}9B5;7 zux>ag)Fq66^k%to^8fU+Z~gZ2w2K?>vA5pMH@JWHhTDe`C7jWu+(KNB1LQmCFyck@ zH6S_mK|=kn$h})a0cH+9eRW5;D*1ZDxdT~Fq>P-+$6F*(i&vZ})X&ok!Nyz^_n4{G zoviyK!5-UAJnT&$-}uIDf2><SICGrjP7pH%0zF4Uomgz5XCegFj#5si@o$U<5Gyh{= zYnV$@H@o_dywiR`ZuH$Zk0d{S*X?iT>>R28?H;aC3tY=%|8qIYVN5AB8KqCb*T8a| ztv3`BIr+H{N*!k#JBXp4_gDFUdEdBGs5MOCGimEQWr%-&Qu!nbWVAeFoQ#K4D*0)ZoZst$&kksB;t6tX<7F?DJAI2D(|h3)4&Q+jqw}()5)Lci($(XODkl^PH&v z`D{@(6d%ZE0Iej0e18vPn~+H&PkCv2x*@MY7xh$xOB&{sK>@Y?BUBq@~-*F&v9})}uFR%x*Z#pbOjD+N%cETH4{>Od zc(PiepuQYONBwsiXX5|TH~Wj9+3&W*Zhvr}AU|Kd2GKbzY?}B-ZA~GrcbFRi>;WXj z+CAE5Ci}hKx!5txt0ba$xz`aKPy?6yn}LD=@l zrW>navN(T6%3$vh&+P~^|4l^BP0du}k!_)%)1%M&{RQrCM;j}iBG${;Cz@7tI%t0!~2y&l8)Dte;v|&UR zku7WLpT{$M8}6oB_%;kZ&>=SFW!QTg;;`U`Qa-SMRkDo`}F4oQIkz>1#ZcN1&An}Y@M7!}%44eQjWisA>>Xq{ zXUg)S%;!|qlE-68l{K|N5dW0T%;#hyIW)d&iMhQGQR_`IRiW;qPfLhh93O;TUl~qg z1qJrWnd<-fJh5w^)s73?=KJBlJvV=2W_o(d+#yLc+$$OW*Ylbd6r@D`WNtLul}2a| z>QW)j_5@%7M;-t%`8o*C`H{!roxvWQxI=Wtc*0|}ja-K9zX^|HlqJV{{c-GLeXrl} z8R8+5{^-k}jy|FK>W#DvA*)HEP)KX@^eoYS+8+>KdJn*h{%6|z)O_9l42gfc8SPBJ zb)#gX6MMb`*O<(~YhfsIQ)uyy`z^H0j?BP)jCJ zwofHCHywNsSrM39#GQ_+{(E!%yY{cY3;1^n+gSY$wE*l2K^Vy2034S^m&eV6l6ZLX|r)~9&;H#mw8ws z3_-r}ALC~9Yw(I(*{xjk#&1rqSMHwfbVyi#^a3x{9TshTqW@)` zi@$mvl4Q89)h~VpFR(ASOSIQFJl+0?K$1Rv;3=h|a$@W`|F3iFgI9m{m;He_HA|83 zrK3B%KS0}m4R!4pVj{({>pe0v`d(0!HgX(g?uyyiV3VoR0nyGAQ(X2dNxq2TLouce z4s?z<(1Av5%G;`aFV-{g5&aeFZPAuzhxQaXlW3b49OHu<->SRsfBwC(`0=-G!=>D- zvBA#l4IgVz<9Of|G1-5k-r4v7*L@q2?(e36U5$P}XZjzFW0e2(*V;bfm=5x9$Mm#< z@A>IBhS!gKyMM+#Vfr9}2A18%7yZ=gZ*@+t`d5sm%U z1&MZ<;jvZqi-UbxC|CrBlx$qXKRr5kRsr+Dr&&vGy?apEtv`Ql!xTi=j(z)lkd;q$ zg=vhW>ilf?-cr+ht3Or1`eXFzgwa=<0Q9j$ZTgvDAJ2BIPK*1`{*PzPE>YXNz3bUv zGH@Ai$c$z8Mw zZJ)~AwcD?;PnJuzKRAZiKSx#M(2kj7;)oayz7>$=JlTIQvxxN{KRGCu>Y)qs+-h-H zy!MIc=Ey&AxbO9sAz8+_k4FMB?6ci?6l!;e{iV@|KY`o-7*9YxJ#FUVw8T9_#vAWk zV4ah}F1y_Jw|Q>jwR)=8eSrEiW*^MV89WyQ)SNe8UouO4b3m^9;n#fG=`Wopx9c(3 z?T;Zfd6|FLT8`*GgL!4_!d?{tQw3S>q&XXqsG6&WLYfaKU>?L-n)7@&xGQwtaYp#r zBOjamPDK5>V4}2?P|g#Rs?Mk|=V^etefNW>d}kl=%)6)Y(bHy+&?pV5dxEI{b(g|C z!*MK%1u$sq$b>N1I{Svu*_gQ}Y9=+2DWufcoUN!E7hDnpDS4E-0shoBj!9 zK|in$^sQEF*|0TZh{##dzWNw%rVVw}khGY>I>KIh*qe>@@B6U6d_dZa)|z*Cyjwgr z1#A&Uii5F}7XdfD734{vep@1>$* z=#GELJfh0fIEkp28P{&l1N+`++8|Ez-qd}@$Xj(3pi$ZX8}neB{!Zui-hy|u)6c!( zbo7Eb7a~Q2`;*C{hOc3;aW5nIx}O_cU4;_lgUgg-uy_=8Ts3zIh-rT_CifGIOfDby zu=#2Gf#G_sF+Y3MDB-cf%LdrLF<-wp9P58Q)yVUeG=ge!tto@r^d8^ySM0^yyTA0F z;fl57lL>MzwwdgI|FC(aV2cQQPb=Xt>}{k3Sy^CgOa~#Aqk60=iGDrZup`k5{V@MA zy9{btBR1HgvhgVD2X(yf+c=>@oHJbg%MQPM<6vGjao|_&)L^^sk>fTnytsd1!>61R z>|1-BSws#)qTH+BZ=8wpwL)|2sstK0D%Fw~}wKoj-qlst)IU%VTeiF2?asj=6}jmt2=)_~z3S$qGWXEWA=C zZaGfu>1fClj~10RYx()n+iG4$LS-KE&=)h*ejQKyU(w6L@UALbRD*gouR9+4ELwbx zF1_6!zZcwO0-C@!U%Mxf*(7irwSGxU?0=>y9Lm=fw^ZA(e*L__{b7GUJ!3C!EaIf| z@^u9rC%t-j(HD~oJh<^Vv58E~edL8erFRD}7I*;1Z~RE7tJBVF9Nqem{Fuv5Yu-zX zBOLRP%4}=?rb13;6%d*GME{(NxL!GBO{Nd_yRX+j$H>8@R12GcSDW|a1qx-on(u2$ z?;1lSQZxmq=vuk&Ky!cau|6*U9P7=mxN4sL9^CIV{@wRa&i2gX0=l@hf)uAK5nRQLJon)DyeFjL21kYhYHzGpr_rEvBh_NY{dm81KI zZNAt3nyR4>o5!aAjyb20P;~Sae*isKHJE;L4Ajf^)larpUExQC@px|ZE+G9~JNVZ8 z==k8O?e}}*n5H=a@ZXTb@I4Mem@EWQlWY%fQe=8hq;eV7YSWBNl>bh(@s-^AEvag( zt)h>id{x7t@N9p9n9>kA`XFVebEkUun!kRCqs!EIC6MjZTZY8PxsR3VqmXN!Pp>w zoSifc%8y0bng^i^d2@^OuM7>md(6vuUXnC9HBi0t{^7!xYCaF~L(b`{W`V)2B ztY5YI?(BcrCH@(I|7;el4?9`!>r34iy0S68FC>i3dUSVwP?W)JxovFX8p_%vKT;Ko1b6vIUx-;4On)o~S;q{YH(HX5lCZ56d2|%hLYUg( z@oDYwe6h%dFn)`!j4sjN*1VR0HL*sJ;U=*1h$vHwLXEb_OR-OVan<9Yx%TF$`xIke z-mN%BsT{QPa?{r8>}iYhi=gE@+-z5w&ANZj-Tlz!LVZIMKbyJ;?z1wR%}MQIFCZF^ z4=-7nl$1Y{>z+{c#=WZ_H&(GF%1Is6xso5?Ur_2$xaRw~Y#hjlEk{%L;y6qR;+^GN zxYHLushmquO<9%^n`d7l5yxbkeUjo!F!lJc-3}vh9zOW6{dkwd$=k4L=-K+9=W~DX zrs0qIP!PBH7SwSxc(smCbHlg&&{6uSlQM|Xd>fWUdlzj*+pD6D4ZCw@uvddfdQ)6S z?7iupXHRkU=!mjz&j7~cQcTLhtRJ6+1M+2sdWOWB6|uyeeeL1CxXuUc^XcU@K9vf! z5{k;d@TYK_f0FUVWAc7_QLik`>V1Ep7Qq?*AA9fCow%-af&QR8U@i&du_Vj3d;{4S zV%~yDWMc>fLxlPDXOs*k>F&MuT6^#BoR`z1#|TEUk)_hDRCCVJ->@%D8P z#^jf}X!tFqyM`0MM510Zw(m`oo;Ns!+xt(0i?!ypwZ3VT{cL;0tY3NQ)(wBwWkfBp z2(bJh)gjsrkh>J^*`os2F`Dn$f_3-PiFQ6|bfhiwgJ8VQD^bObEGi{2Od=)mS5MVd^)c$F!rYdtCRDwDV-CcRnDT!I}Wq(=I7|4eoL;9 z(oy^TH9neV&Hi4&eSIN?Ln42SZA|Z4hJgNoa@;w7PD;o)tc@{ISA36ZkFTYin~ucCb8=OFT?YJc^A+~$LY_8Sv^i-rDEJo(nG z#fNoic>;}aBU~ag;_m4L*O8s$@L>RH3`!6_$yG5KO5zUQydrydigJGkl8w!D&mxVF z!F_y#g(;j=$H{+D`QXuV6JjZ-64qvbT)s;`f%f<%@bqnx3ru~-|=6&JOh!AF?P_F>>X5MkqLkL)^-f0fIQI-#KG7E zHSL_7hYqguA*I&-<6&s0CXtp+gh7x?!GVgOT+~bl!s7jE}=i|gy z1MdukNI52p`2s`|6XCs<%6)?L^OYz$rzq2fK2DVq$MWJ(pidVAE?%b_DJ&BbyK@u? zYP77=?rT{qN}zud`aIF{AKs-`KtBar!3Nzb-}$D9OxU+|VyXG*H0D5%7Nd;33V7Ck zPFd?;!@9n-G2f*=pWkUeE^4ekP{AIy(T6E( z{y{Kqz0;2&4=5GE!`DO=ov$@=Nti3(0YxxRWevx2M+tw7;;!I~A;w3&@J@;vA*g{M zoue#~g9@=j9ubonlLHH)oTs9@Nq|`me*5tOKDoJ)h~H*f=ak*wPutRMzvsryyRpmn zeM$1Y{u=y7v-c`*{yUdOQ}io9v158nTBR<9Dunms_FlwP{0sLe7d4Pt92pJc$KbDoT?=;=Q9Cf`Mkq8AH8>ZD?k_yl_^!z zu<#VO%-o-YM7(H97QJ2Lcg8irA*Fo@37E#6&*jQFs?Q8=yrh!-a|}c`xX0&%no`P) z$48FZ3N%&!9RC1IR?{0I1lonBAloOs0eOA<8k4|jdW zd+hF6?=?2xt}Scb;v8H2!%siQw|I5w^_=yy-L(Wd(;`q+_w}n6+x|hKQG)SDfZmCvebl zhp~ULNq`hX&h#x~Dlw_bX5Ji-3zUM8;W%acW<4;y;y43XfaY16>p0My_kY1P++%ld zaXgo_-Cuk=Lu+uFe~u5|@;YdA;QuPN*W|r0&Kzqa#_!Sb%Yl4d#u=k>$K0ny#kwRy zJuM1npT*e}^+CyeS5V1KCQF~fNRA!mQILQ0FUoDE1{Mfzn??Q+AO{D{BglpmQ}xqd z@rih1O(ZKJ<*x+!K+Aww7_+PK55bZI4*H-13iFG%sZJo71;z6VCoD6C^!O#+Mnz&W z#3)|sgo`Ah5@Ys)lC#05Ws`S>yq>?s06Qzxq@-F8vBiBBU?abh<{8&9P`}5T|G#It@-6?2Wq4p zM*Ef85|Ns_nyBFOwSbbU9s0&BJ}rN%H0Bgy_uXBmYbs^v&_pf@JRG(IJY%DB@~}cI zx77UraTbKAghF=QE(HHAKKQEV{jj^^Ki7X7|4Dwum1H-);#QZk(Yb1M&WxX2jCgMG z4PVh<=5KhQzl{&|)pIm~G`2zZ1dMvV7`T>T0|ZL?wU`Bj?L*}0RgdCZzOH{sFt)ek zrYSQ=R9`Mh)D^!%d-88^X2gRAe?IM>03?EnkBC!!~~vG8hxJqV=e>rVx2kN&gKd^*JgUOcls#V9vv-CD@G2 z%Q}$dhlyDEq`L5^;?uctYn#Hl@K+FsiH<&lOWQt(A%b7(eFydk@s8iHi&(Gx8VmZ} zuN2I$aRB%E{wMe3O$VR|r+4jTe%CneUi~RI&0ieny1$%d+G@m5Hr%o;or_B zZk(&|iCChFxt-zmtp^j-Xt3ahQ`{AAlyN47O(o}U_vIWL!n2Ctf8J)0jeM%LkQV-U zD+SG0Hw>d|UXCX&M{`igbP_***oYZ@AM`ovezoIPH<&?`Sg(Im-i}#_wX;Xy6&(MtzBM(6P?#PQ_`+(g??QY9s~Up3i_71YvD3?gaPcrfruDGStV( zofokhOXQ9z9Xx-lgv6T6jG;N7?K6;kx9Jxvj#x2xT}@dlu{n&;wLU%1{bg2r}phIr-&z;Wu5T5f+KKG^iCxm0I`B~a&#m#^o@ zTw4R8>nDexn7YOo23OWZ)p<5A* zL9{VWyHMoKH3}jtPTz-Ph_b$e8W19-FXf3!NE~`5hw!op@r<1q2HA?+UI{HoOq)+c+7J@7yc6K1lC%q}SI!v?aRN zb%tJ}BWHKG9??60@GrKeTAhK9P~#pmjkDFZb}ETY4C7oO9x-)E^)axbYDKmfSF~c>cO!9Qix+bX{vffyGt0G%h z&aL&JV_mjPu!pfeUy&GVnNpF%#!`GtL5(9AZ7Ie$-#9=dn=)gx-bhBqmjN@CLFe85 zr2uX*I8200#W%Y>*?c79a}Ya}gwMj_F~-y91F!KL3?n!UOs>cZ>x?CK82u z=jnfKEL7H=7qsE_mMz~(qZjlm9}D6in)x#dr^=j2)KJ5-{(*`9XymQ}&Gnr{6UlLJ z_M-WP^}L*Hq0DUaMI6IzLa>i+_h6H=;fKKz{VX;WOaGFWHuA$FHj;u`UA{44sc~|x{qN?{eo;D+@kG*9_HlnfpHA{VKA%r+ElY@DVqVzu>!UQeDjoVh zq>!hJ0C;Gs(2WR-r}@)AXt!#(Dw6UQx8RIY-t1BS7j8_}Gq$~qetNE)8_P?0A*r+$-wTpj)Id-gkTF#5ZG$KX#z8M1%4 ze41^g%hu_oWCMpuj0Ka@n>|0e)U{S|;=Q2{=KRS0GIJqf=ZzXz4E?M*d-7%@m(5+5 z$wpUhjr)$5UBml-<<0oU3xMmP%Qsx-mAk$&Em664?H2ibHl5U&g<^P24i0%~yzyGR zC3{ZhQd&&0*pJ;}Tz3hT68%ZG%@%*}rb;Yk576(*J6&eJ0wi@l6QY|(i=`M zB{fB4`y!2z=Sshy9E?expFC5XO>@_$)OeUDvsnN#W=i7;2?0Jf#o{HZGs8Zo;rNnq z-!$ks5_#E+ta!GV%u*$IgvhiLO~u|Wi~uo#1Rf`iU9%dN zI<7tart@*-YyF|uAh7Ks7+)$We5&G)y=06?={7je#DoK$=cCTM_mR_#5!4Kn?A4PA zz3L&7K-wdWOR>aF-$b{gxcv@`ulOG(%xcIvG|#q)8rl2L%rwC>Fm8W<#D|B+VrL=7 z9QoKxQ8oRv)A@vDMKZ+Y;QQ$HDr@!$-?`Ou=uHI1Z^qlb((hAO8smk(XYHs+7=3@b zTy^UyYTY&Oay$RCM=&4g_h|7?IsB@{bP1H5P=RZ3{T`J1-bL$FKD}*WHbRW-XXliD z62aL`LsC;2Peu>WRoH(n)4N}x)fj4hh5$iF$DN6Qc7jq5xER|4my{m0?>l8=QXg@D zfc*MIgwsDffBG%P4!-jtLK_>?-8{d3bssu8FAJYn)02 z9|oNup7k1+9Zp4z^Kmmq@>uim-uAlaOWOt$nOH!i;S${1s8IvA)Q%tF^Ce=nlyCL+*8am-9HViZnJw5E7U@zJ2T;kzeWMWAlt> z$RJ1YyRv-{40#hRJii@4g0UTps3@B$%UMm+4~&1Y#xv#lY_pi-Fs4!;q5+v(71Mi5 zq5a$m#t#p{r^x?vJ&(L`yxz}MZhoX2FT&M_eO)KV?lh$rkes2eyz^>=Bo!d5(37Kp zh|q~-U|G4!h#VB2#`*^&Q!H|n4u96hn=;Vtchfbn&RImRYZbkQJU6j;Ehjhf_ezba>9+bay5DGi zv?ea{(&(uBa$*u`e-X(TNJ&qkG`SheIaDq|9X64j^ErRBWWZ=V7a&U$)bb$_dd3Y3E4tZ{Y`u!fZc??aNk_yS!wn0b;>$&_9XekxBMI)yRD33^5(Hd{HLG5;XlzF!Md2XY`X-6CXKjh$x*4QV>f{)kryd?|Oeu z$onnM;BiDf6&x{Sl_{w2{0CXX%{~1dx8KAQuqN*5xsa`uDN>gxi!SR~Z%S~k3zk$}u_K4Hjp^^qxQl1MJ%#*J*avrZ zr${^5&De1rUtwR`I`&stap(9@W*kN2 zkbAbp_aL_t^(Va! zS3Z^(j9dE~=K{_A{F8SZ65N*p+u(Tgdz{ev`3W)_$<#!>#ybzY!C=kktN4EluJv+9 znN+v$wMHM6S9?36K9K)_xZklL%j-Nd*Bppa-1eJgJEs*Q4({>icz$t>*ZmPYf7@&I zEt7xay-shMKp#Kw7Frhvb!@yPgOZzK@;pM{CL}%+OVnM@k?R%|ulf1>9u?{`N%-j1 z!tomYer^#Jy9<06!;tPdgOYzZ*0b%K!LwMOcQahC$qKo7Y;PUV_i1H#r4&>!`XCTf zQ<{D0EOBkjq~>F^Df$-PhR+-F=)6Qy`h^$;QQ*)#;AS$*P`k#ZWzNVXbjsDrrf_mi zb!IB_n37>MQz;}y>BUSijX!>W=;D6*YlrLkKJw=3xc;fQH1&Lq8M=SjNi5+1te(3& z2e~B1I4>yYVemd&l8E^(IG>$&M(5;zTgP4X2fj;@BH!in@%%rj-y4t5b$lWAKh$yf zO<(fQ+(+31T(pMBKht=Kb|=5#Hp2Bu&Lo?_b%^Gm&ZfLr3jryo2U3`-}ejgD(wZ zoSQRVJ*SJh#n9jx3c2@p*jEUw2%*zgAUG$YihkjlT|2gGq4R%jmRBUgF->J*ESO0P z<1@VXYZ=Lr@ZvNG6@M0Xo)EyilBR}8m`UcgVlj(ROLu3R4CCH+#~IUoJ9{y*-WXIL z)?K)b3-DE1rW(2MJRm*AZ8PFTbJ!w&N;s#Ew|k4a25O1L@R^zv&w*YIbSqfa5gu-C>42VnVtH^t;OBMUMw(1D`L+F0i3aKCWZL_jZ3m-V!PP z9Um~%KQT<-@qS6~#y9kbc6~uzx4#&Ne{v7yiQSw_>a%O(KJM#1`#%}CizpHzu+k;c z&k|WVX415mw~p@Z%F+p=<<>MaI!Va4u{&fpdto_SgOqb4~e|Q{r8I=zjb>_ZwqSYQrI4mql`p9+Z3dn-`Z?q zvMEu!DTS`}0>T|f{L||qf5A6fsd0V?R``4luO^Yf#%;dSq$WvcfcgmLlY&(1-~hUM zwEn8rBC>y^XmudmZ~WI_wpuuLs}V0Utu=O=Lx{ZENqytK_6jO0zw@3sAl`?_c}vCg z`KKEr&{yw*z!J!=Ir zMQklGl%5AHc%DA5&N~Nr1W(t$2jtJg_);)jA}4<{z23{Xna)sy92i)W2d$s+P+^>X z0Tl7EKjKT9eJ+lFw|+o>#Fzd-hb{RvUiUhFUiCT)Q6@^cMDPx%%p+fYn*i1iM?LRH zkQHg_W{7je8|pkoH@&uHHHK1BBBW1+|NN}D1G0+gq>ngY)SZqY8kk+=ty>|k5jEH+ zF<^h}DaMy(K*@Wr{FfSIPaO9N9zh5-aLXi|b_MF5C~L93#-V<->-!k%>zDXsoHHBW zg@l);kN>+hhqoXy;3l+dd`=Z3T6Y>TZf9Wsc3%VT6QI*drJ6GV$xk&Nqhifk|F8Bl zs@;$EalOO<&W*nyy@EO~qe=^T5x~6de$%2_ zgD`$=hny~R8Gll}wqjYS$2`%hsAcu7w_aw~%jp$Lj8yXxr;d(%&^9Gjgd&tG$!2pQ<~ zM1}B%zFg$l;0#By$+?9YWwXBwP@I3y{S`-{ zwHyk|ymMrK*O_*|ohOAi%yZ8FJ6+nd_%C&BZ+%C7eky$yV38^BzQ;T;g`5yk9g9Y!aa%Q{t0(F1b1;O=9u$4O|q-U4Bnw}9yi2coA$S=prQ6=)p^X9G* zeX_)^(Qm$pl_S;MXMnLPxYxbvIxoYYI=Na8_h0Mewtk*Lv+kt|om9ck_yVorY}|Bl zzwb}*JmGd$d)HJD|KHk0Pja-01Ef<+Ty26PrwsPt>M%aVn?R%!qw)a-^aO|#rcCT zk>`ZrUbudxGNQ116~%eN(3g!oW{9;EX*{jZly&Ok)h0N-B(r~aNG&Q$OO(uF=f*aK zmEb%zaBAVPHS#03HZzE&^us7w*+rU}zzI|7VGhX@HM*S`ZPPNT;%i!n_c7r$RadQf z;1G763_Op2kWhhZAa6p;8IG|+!CI{UfCs_2?8g7p;yyAPZtdI7Q9;9j-1wS))mOjS zNdF#J|C|1L8^?cr;~miB3UcVRJRj}2)T|-Yoy(*-J6r827=ibxY<9-;qNEOA)Y=5) zFctXTf4^+;@?u)N)HuLCDU^kzbxIp5T z!_nNXBp1`Om$3gNL1TJlvmuG4KU+zu_K#yqt zp6fQ88g&}qeHdx0S^O_K=bvN*B9q))`I>vCtor<{5|@9zn;5(C`@7S=z<1hrUZUK# zx;;vDJs^Jqa#+i;WAg___>`HCIpRv17|D-k=lp-$`{Ta!p+FvhGXq~@P|JVU?(o;z z2J4@^{6Be=^uF)=9^^mcW+A4sj^Q+AMC+U}>c#swQO4xFi?@4eOgekYDf!I8zc}Fa}D=4X6srlU2=~d6ela}$)$%Bww1|8J< zLH~afF+nvt3Al&QdI`NqaY~iMaYO~0^TKehTDjMK+!IIt8Poge?7y|csNF;Vk+1g0 z9`09euC?3#M*7#934dw7BeI;L)?;c?s0bA0XR--GbM(rvp3Tlv;$H^lZ|#&XQtR_| zYYPAQKjMOUzo7oX!{mE?Lo*g^uVX@3BG!MQ;<#T7Ol-?tNY2fE5Z7p(hfC*4nrGxS zblbSbZ1z)g6YKM;1$RlDpSX?Hvf(vlTNe<6hyfX8_i9rgF6yu9?{-~SjD$I@p4?jF zSX`3xL-@6unDfofqwRMUa%(r`vQsQq1Z}zIR;1_Hn@x8iJ|PhI24oZg#61 z-{b-J-5W#etXsH-C0a-KHfG7zr&@mpW4NuYn`^CSMYdI)bUA@@Ml8y9VN?<(WWr$k z`~~d8-R?iB7)|KBdvsMfr_zm)uVtz%SI5LQu`UQ(v_IUGVt6L%E5a+RXSKw$egdY` z>42ZWl3(>HZkouM6J7>p+b^%*-E^kE;i%j7*sV>P{{L;=e!1~7WyQ5^aV&ozwhXo) zjU(cHJUX+jVK%m-w4^dLg&DjNNRuER`5*&ffe`acwq=pXK<>!NdR*C2wxwc0#CbGx zWRk~1nk-K((*|;w8dF-HL#h(XK4&MhU)uD%3#bSxuM7hU%oJ*j3sc7VA5@LXBf%hlg9yRJWGH=c%X z_TCNSEC1^>$+gc=23mtQ`5M0O9m*jF2Ip^aE6CGRT%Hw-kV7{>4i!)=WYBpS$=lz+ z#`9wM8lE+$tr$Lx#Nh8~&QI}7RUP5XJy41570R*Y+pE?|uhoG-qt9WHP?t}Gjde&W2@&?yaC%v#} zxi0$-wgVR1uKQuq*XZ>a82YW{c7Lhyq zvVsp9Y2Mlpxe8)wjR;ieBOXOD%u;KlaXa?L-FWftYcmU&j+>b7=mhNH3Hf0ZkKQi! z0lw@nNV196#k^$wY7Gld4yY+cY+`QhT>GE#tR$mTX54kIKlp!goHJp{f!ha88V&~3 z(_m!2MxQxyUnlwu+DhcsBw%l(cYzs>?Fl#4%R_&C@?dl0yG9&Uia4q{9+DxJo{(R4 z2!o5I{r`F$gzfgtH=W(I_-^~(ZsU^{R~%c?V&7adK+M#Ymn9`G>epNrTcWHfy>wkr z7>j2R8mXbHSYm(SOQOv@y_#&imvV8<)`ZCUIjXwsf)BC6u1vo#0Pn71#{U2E1YX=uHp)jC)4J=lMjrwSCz| z3FF8XlYzw&O*?t0K=W4^A`IZ~1M1)sFuyPP2F8O+ikZp!Mfo>;EUO z%FBPZ&v*UHU-w}xPFHg+Tr@tk4#*W}xBjks8C+J0`I#2v93Zcn>A8hPsu68DpI3ix;uxtEU_U@+J_you+dmGno*n;c&+q!apHhuHnrq?~ykA3s6pMQV& zGp^jJ?Roa}sB5_P$-J=|gjmRR*{al)&x7%L=HJj~T=RAvu za3=p=<1sZt#A@W+f(>eCS0Q;U(cgdZqg}#jLXr2HfSX|Z*mH?#s82&;V?c;_ewamB z!&iHdh*uwhUBdehzS`{N#t(6=>*TIErrYN3GtwMUIJO1zKj>uM@zZ(_9Q3bxrkXS9 z7j4s4zfSYt$^_49llF||rfsJdQ|(vk!UL83#TJ8n7v+<8 zRx`!CtlTM#%QQ`fn%h^upZp+e*+*<3$dUK1xHYj7)P|hD%MpXct(_VTR9j}72yxz0 zI-cCkiN@38+Sm1`0y$<^SFL~4%7sQuC(>};!TZe1@Z^u6C68si} z+_u{v=Bpn%wGA(4)8ButKH-^XT_6hH-3w2*>gF?CMN5fe_C5$}wz4k9*{6op_yRfM zg_X>}nAD8G-V+nE^ECHkWfR0_NLKJ<@0sGk5DWlQK&-!B`T3X?XKul4|L^^_l69}T zsM;y+UNy9Ej-f>Ia4HzcAOe|EugDs|pn>fsChwcrxx?z&R5Khq zxDB!!ihx>>?LEmFj=X$-zqOHr`-0B0$|sUhioQH)2@tIErwjM@GYa&B>rhevA&{}u zZoU95t73!J3kEI+0o?v!#G6F45r+c?KjlC#a{NV{(h4BW$U!(0CC@Za?m_HM^fUEw zV_*s%D)|_jXPA7PX~^cg*onLk)~lwkXYpx8^hiDmpS?9saOY|OFoiZ(= zGP9?n#XA3xcEMj)=g*F< z`LxW_*mn6_R!j?T$G_BiR*(1mVN_1&6xzMO0%(I?cFPC9m^^58I*Vuu<=(717ud1gP^KcmV z8JAx^z2}H+ZnukIrPDJ=E)AJvom$FXlrfNC+8!GfNT8NL1Rk$X3)Uc}ILa z(l6$xqPsV@#zaP>%U+WBD;n=D-kinS#K(=l zU|)kNpM|c`_owS{yemfrMgX%JF_UpOvNmj(%&jQj*q=%E7(I3QBq<)_7t0+c`$hcK z_4A~*rkOQ=HfHoOd6~suGhxl5?#pDh3Fpen%GraJZZ{>Jzswfr*)ExXu9iXf>)|DE zS9|LTUXG9NlT9a1?Umfw4ASXyezbX+?#^q!H@+;)P5EfgqeT&Ijo#{O(Xn=#C#E6Z z)mZp&m1Hk!GhH`uBWXJ_MrNV;r$|n&AI`>%Rx7af^s@53kpV(+39L67^UN z3ETVifTjw@pmyNOzyrcaE+D~08BusG=@!$608IEX5X1{W0owo(uO3j2&l6B6aD=x6 z3eEKq%QHbTRlJV%?%4a^G%~L5nF36fYb@Uw zpKmgM!W0C^QyyR(4ORHO0qnmzBWaI19ggMg!^a3*@Sc+CW+5Agj5x;$X#NQ$x(u!mO$UwBPRe18A_ zdzmQ&nKsuU0W0_=U6-e5f1jzgd}By@7%?7yN}}6i47|ep`uATASfU2DCGh$Y%kzTm zVe0zsV||B+cQZ5SOR|J$*%^Y3IX)_@ByifB$do^Q}GnHl0=N z{nzrCxde{w^TXb?y@hxNW4>u@bX$La)WF|6@>~7?cCE`lL`E#McVP+v3g2%Gy1svw z|66-}Ywx@LeY2P37yC)nzp~fg>|qMwP}_g!K{fWUuOJXQ9TG*R_&3&nMSDF1%>&)6 zKOVjw0zqDB)bDk_Iu4D?=KEj&O-ev*MO4-$n7-T5FFislT1XGU3rM1L2El)Sxe=E% z<`)CA8h(3jVlS)(P7BnOwhCnDB4I8k2gr~CgHB%Lb?NhIG)i;LJfhb}4a!^KUk$vm zFi~eS1Dr01Bmw4}OL9Qc!M&g-RG4&?Z^IEa%_l49fDB_c;c=D+1sm{$;tNl_VG;j*)T^Gx?GHMw`Mh;rn!i1!52K=7WNx| zg?v}o2fl>-SlCZ|ImlJv*nG9i`xwpm#tVo!su)1qJa7}HtS zGzr`YHWp1%_l0eCbY8p5bn``Dz|D4B5@%DlhW%5$fAcAY=^PicSu{5{Z)U_e|^bdb$aLxzm})))a`Zq%OI>|?>t=EtLmkvhOgc5 z!|5%*=A+f3)7?7r`SW7hUBAq|=k$Y*2XaWw-L-%H|NrjemIA6|1!Oj7vjwLF7j@$# zIVrLUU*|p;_7HaSIoWc)pUr)s?%aN)*v;&4&3k;wV_SiLwN=1^b>wsO)0!#c#|l_g zp<-XrQ&6ZikAvJ_g6UIIR>&nU2o*z^iW(yUyffsHDGqD{##liaCT|oM6Z*56&SOO^ zGN_PKrwpYyK)#4nUV&?hAs<`9qNr#HWv~_pK7}a6GOQ~gVs6hC6&_XMeML_ZS}Uke z%1XkN*KEyymoOJb(B`#kQgYvf#0TbXK5GwHD2&-UT7n(TWL`nAna|wioU%2KPTrhi z^aM%ylE6HwWM$@JPHi=|HVf`EVkV)L8nb-i!!EI8UWx1TVyXgtA1HlaK*XG4J%4&> zOcmXbG$oa+DACWOTUc@*diojWLfSpDn9s^(1$_;{g$*n9nw4I@>CUkBfOcR$!5IG21M-9V-j#a#J2FNGi?}S%KK3 zb0I5OnTb^>QirqxxfXeqb9}}ejuoWUwN6d1CS@g7g(#?^6!R{uIXRs{bj@f!)_*Io zWHcXtD-!Qe*_4Z#dYpeMh)?GLl1&9-o6K^VkYk=~HYx9x`8-C|nJ}$@9qigx!YbRg z^OxZGKDeSj)o7xkN2tF9j^A;EW0Wj#!$rF(76%~d5J>8S;}tzcVu z-6*X@R7O@=R?yw>zC2bWsN$}b#@+Ciw&b(2tVnoG7gZ%77TsA|MLZVRu8F5ud|XsO zOzb1#z$Q@4XcYl<6Hh^uqCJ@I48b0M3`CD&{n28#lSL8c6@gEVePYh;*Adz;F;oBg zjAGV&KiewP2pxR?s7DoSf43d(!&vt_AO+eXwvDjKLfc`FY78kiVg<=qf%$}}CUUjk znGcxmS~IIqtOzM*b7dCTr;IiBAyE){Jj)c2dyfPlN|!eED%Jv@zhnueCsovc)spKr zNwL0WUfoj`wr`Xi{3WEm&!`nw$BL9yB?6sp7xkGCoxb#aW;N-mszj>Oowz=evrd2W zea4&gQB{djr+c+p-`jtz$d>psORhP6QZO@?JhrgkRzAfDcyjBP^OPNvISr(L0Fp&G z#+1j38kI&(*siYAiY_GD3$NjSRDo67HOuF54~27}g3+N8K%-fdRs>gQ#HwI`^$~Kk z4~DGQST{*(0|SQh(QYrRNN{gReeYZq`dM*(1`7Coh3_-MX?3hcD3BinZK-d+5i zTmESNFWzGa#2fy_dp7BRU>&ZTF(w}Pr8tMcW?sRLm6=xDR5Ggn`u%b2NT>2xnOWuM z*~nkQ0NV)b`7h@QbdvRK1xdIb)1Dfm4M~{0F`PJiQ=sQVzKPRMFRDWa$LqPxmwFzJ zLt3pRfhb9^UB^YOfV?V8&$j~S#7%RLeKvv63XZ1Nn|po{NN(1Dp8mc1_j@>fbhweN zacx0x><|jX0en$c$d)H?9V09&t(1@?hGnJ1mPGr7DD}MQ>ABg+)=?ei+*rs87ADT4 zf3Pu$bMtK-Ls%#mbgW1c<_oM}h|DxP;kDFr+7jp=>OSW+K1Xc1vB<}yT34_)qXSTP z37)u?Bc)W4a{R}C@ixWr1j{i58^?BtPm08|ywd9m_fIy%+%0R?Iks5iJek^&m{Ydo zwYB(B9;-Z7rm8A{c}23dDD-@2xdoiA?YW+qXg9{UJXBE}yD7-12pwIv9vk>ec<_C+ zRe0fsr=XtWywbGQ5@2S@6E_vEH5C-!(y(shInG(Pytcc41j|jfdR{sS1S}Gaj=0*^ z=3n>oda&jcM#WmL^*C#eewMooq+ed!-liCxxBX)s)^{5eRINFEw9(F^1H6cCF6^$m zm{S?JoH}U-4JZ!}7i2d)pORnRAOT6Kj)l^Stm6OhF_L{*su! z4^LkGYhGA?>b2$1yv+Au=nZ!GKDxi?y7_nSgSJ*bsQ|}#mNa$P^uO(`-P0um+~8L` zu@yCf1FvDk>0mwf+uyf+pLw_*`Sy(et`D=fzvwI2KPp-5_P7pbqDM+T1WWWRy@n(# z%x!&5^!oA7{odAfs^=F|t;eLwIVCnX-tgHt@8t7;W&3`P?=u(|)v1~Sv2=Y&T$2nv z+&}1fFvT_Dph-U|QU<5W#Cejqy1&H4Y1XZ{&So=)7U26Yd_N!n#Ke5_qP`&SYLlH6@k;T%7(`fqr*?_u!1)>Ted_lT*L=>>oZdwDyD7cjBP)mCna#5>hPwI;ypt`b$(nAPuu(k zuKDiyuYFpuHvg)?=NpG_V@F^AMto8N%j9Uge)fihVE(tb$zL&!d7t$2+q_j(3D)L+ zeaEs-+q}>Ex!?PYGr(s*ctZj@KbGAb94f{R{*pqI|D;4w9V^c8*==5l<({^AOubI6tjT|^lR}rf&FkuCw|RG1j(gs^uBmGFofV|Z3N`1A z8Tw{$&RU4#q^LbtP)Fj;zpovAfxaq#lP8YmPkazgIu7BEiSd%FEgdI(!iUhmgqFojto-I6_m49Fum99{+aK=I`j zo_|mYse(P0B#?m2pD;%Jq{$(Fz;hQ<`}Y~x31o`w5m&jCAo08>)z=9uC0XNNWPt`& zHrI3j^dJ2$LsI;2zl(=##K-)pAScWx1pW>~ea#Kn2D}MOXL#HF`r=Q| zt?+Mx-_J3gi7}Ys`872lkyy?QR)SPm&Pe-U zW%K+AkUlsWtQ4HET~b|tZV9iFE*SZ~l7NY5|v1raOr;4bi z-~RTy!Eeu{3cvqLyVdVwiN;RU;v@ZlCabUt#_L^X-xB zdj;fz{i_-8)OY)T{Q@?^@gum$51AdQZt+8Z{6`2}fhZxCWG`A*U9eY(D&hcJA(yv*qc9?n5iZ#pjm8yh3vos~ zV0(yn#8a$-*dV@SUoZs{2v);1AR(|WOj8mnHh^gfL&ct8T9FB`XP6GiL|Fg{BSNCY zx)*9x5ECfa!X5k5t(N7+`f;PxZV@m0j#>qZCM9q&~4KBIbdm%zGii(uIbaDZqqMq`sG{t z;Fcbm%uQe1df8u9i}}K!TdU5t&;4w>`FM52?cPTrZuf5J?cQ;cT+=PmmMvlJZ;NTr zrVDvXx6C#@5p8;Odw#-Io4#hZbQ-qz^M0FuUf$F1&v&hR`t98~7pg=*8;J-w|TGisk7wf9G?y`M#G{Uz=DN7Gw6SHbP~*I-b4 zcFdgUG3(8YYu-Az*~yxp!`rjgtYPm^X`6W-)x)Lfzg)DxW9*J(?c_D(_1@R zM^>BO+Bomt?@#Xcw|2(6Z9A@$ew&`O>Aib;dQV@=Hoa}<^}=k^%l7?xZ9A?r)uvx= zcF6m|J^gY|Z_D3r``~(YfBwCG`fWddSg-6``q+e9`r!Wla&S*C+w^<=<+M$|*WY8? zPuCmLrYCK?KSuX->z;nQ-@W(qlC^dtrm{_M>#wxh=Zm)fUD7_kw5)r2`+VBgU&-70 z6Km0?|38Gi*RHKhx2|_9);iitY>Nz?f60=`IcH8Ra?a2Rmhasf^ZVzVOY)X~JdR4^ zQ6*YYMTgejX8dVi{xko-`+)LyALtd1L7Z>c^@o16acxhstE`;6;(^!MCeH3ni>B2 z&sDZ{bNq{hp#R#p{9orb{Es3%(trIx@S3X%`L8V9O9K0!1w@H|di{@!>(4K>pI1;n zfBH}UC}utW|6KN%k)|EG-r3~Rf23mf*IhW9N9fk(s-KrxKZ&Q~zGRSEuiZT3pG9!y z(4ZfhP4KJ+p_4WGSjHOw2^@be`LyGm5E)|T3XN}TzyZQr#20&O(`eQK?T|@+PEf78*L>TZqV&bIWjKkeUH6!#E@Lxm^Iihc6qzVLq5Y+&3F*M?=N6D6*U)!N~F76bhn!pM8dg%9bXUC=o_;u!&4$eLOoXMa| zh&Oti1{Rlpp*@Z=e7+RmN_-ONVuxzxYG{T`48>W#mlBoGmMCx)=Ld`IGtjK_RO{o^ ztkpqnN93i>cKWL=Cmk+qR=1)BMLQ2c&N}naVOY7itXze!VC!Oc@5+mR7>usmg934( z38UF3>)UHpT|qWI;3qL;CKXYGOUh~E$TiX*l!Xv1B?P8T*Q1S)p!UM@Qo}}Hyjs5! z(8Jnt5H)$$#ErtSMcEwJf@D2^H(H%Pmr9WZC4WcgX}47|n)W}rpd0S0`E*UW<5PxT zwr~V5p6moYA7V2*!oP_0K+#+u`>CWMgf%eo`!rmK zTkdWzajnB9i?BeQx6hmQbhqXMX7Mq>owO0mozZ$*J+qw(yJ5TLes&=vTGZqeKisfE zd!6BA=I-)o9?_tG-(tdMfuU*2?I`Csd*^^8bq{=p|Sn1BsunpiAp1pl<2wOUH4~W?lD=_`GxP(0mw=N67zkd( zQf!=X^^2FFlLZ$?_bM{;N(@`(brI?@2K96qr!SH54f+WfTelxqq)DZt{iTx`|6cH^ z&gYBQo|Bx$fIpiqmAVWb7r#$NP$QjyGqWlV_3QGN{*whN7PLn_V82kROi;HtB&*Km zJ093Won0=MK9mI+0=Wm5S(F7Ue=|x-GuTh_lS=h#YwF%GWty*#290#7%$X`@qLU^1 z)8F*iO35)iI}*$p@4FS^h9u;?S%|CQTU0QGeQ43M2dpBXHwKF`RipGQK{@e@KzmcsY|$0AH5!ZnQAATK^h6-scm(juWYowEYd-_LUJB zv0D)NMw^tGXig%gCwUY~n~s2qmyPokM;gxBP+cD#I`=l3q{uE|8|j!eNgc$>#MR~s zOF^Kv1FZEJ(CP1TW(VR@6|x~({L-!gACsiw(0h!CnB{dOe+@dCfAd^)BHS8Zxa--Y zx%}`fG=PHli!fC&%AxX`Xl>#~GA(WJBaE&nF`#{pO07O$e_w0JlY3}-mc2vy<0fz= zfceH2S0PF(h* z>T{%IpnBCz>I-omf7wNZ$Mc6zwIZ8$J*Ly^x$nB!I!)LVybpVHU8#^WCRMEQv&$5~ z)r5OQcJ^gL#it3XhtNsrxREsSsEm4DFYEmJ%pzU(xv>~W|DAfYyFI7$O3Tn=hhz&j zk{JN!rzgFVJgJl~(-Br3UfRbUIxHhh{EO0WfJy;>&6+@xfA%^=Yg4Bn?rshhbMw2V z;6Yb0O?zvK*+{T%E58>tZOSF4y>TG$)>PXUmp zv2JavG$B#m-{F($J4))GaQ6bfyS-z5t>z;>Sm|`xf9`5L>dFx1GieeHAjiiE2R^?lA3h5+Nf`DC&?jnh~_phEc-!ynhp{R4Sv((B-boooj+ zvPd&$e>uZ8&Srv!DiXX!W$s?jMbPv1#454vUoKtg4t!uEX)~ETysW05=P2sjFb2d_ z=6-rCdU4Jn!_EZr@q3{5yeEiO1Y{DC+QB7ch$*j-u@-zICEVZF-pC;JE#I$w5Pb=i zoMI?kF{Czo&B#}PuI`NB?s68yz%Lf2ifOHve}J0SXoa?xp1G&pv{vzB6YJBI$GVrE z-W6ffMWWGtvzkOo>m78R8R$8hIH0-5oySb>I)XtPpX2CoH_~$ebl%V9L~m4rR$|=x zqL+CjYG8r)UI1D$N_KV#@SWG=s}=z^`;#{1)RyU&n#B0NodbHFBqwco+E`@eFFM$+ zf5Ptewa+td3ePZ*0e|!u+Qtq zL&{c7{VSnL(?*UrMhTga!!MBw9XsOyz_oNSr~?o4V_z z=!^*Y9JLtZo;4YBq0jkAzy-_A4|y&WHy2kc&;bZDAk@1+V$?q>WZnd}>+;-MW|CQ5GC5)7yc*iw;BS;&nyOVs8dSMpCPg4uf$fU3OZj<3`t(N-`uPU)$Y? zhNc#;CLw82MbI!wKheuL-j7&GUzR!`dCp=7d zm${tE1mz79yXh=U*{*stklD<$ZjhGKJ+HITO{i`lP9$&%3Q^~mm(-dCD*@k@n*|ns zfqeR#ug=4@kY`Bw+LT${s4WXTJ3VK3{sO6i?uxVU`2KIl8X06{8$!J{b_PoCc{ zuWIbmex-g*{j3#YPuaU&M9OwGkrb;)CU{>g_xN5UmjZ?6aYeI(CI{_>1sFbLI2=5u zoy4T4F|oeZ&sy9X&QXl7wuPQd&H^+?#G;tnkvJG5VrXvN2wMv#o2jX6$Co6K6vIHZ zT|Da<8D7-h=mnv^Kw}7hVe(Z0(>GDz!pl(;d@v5oo0O04A$B?L8Fy0hEde@f zBLjWT!aU#Zgzu#2zkR&;5BpNDuinwx$rGEaPI&V2E`7PNI{Cv8VTI4n&XpZC`iYCT zsu1ZbmmZ!4FMsC~Z!KMOAGX!NO`~eizlxpW^LWsrDX->ikQ7&~-l9%az|Y~LE+d?D zWlNrcwp=4UhrSz%qu;Zdz+C*$Gy8C;Gr2KfRbMG#gGk%ybH=rbYq~-tQr-21V|8!C zIeDA_7;Yjl-@elew!17?tkY;p>hr=t%bx*?qO_5`Z)FvI<_G$!QM-l`n>^2^`Jo(f z8Ze7E8pePycrkc~Xsk_;w9juesu)XLC7MRp{#GE#O32h=rLIf>?xj0ZbVu-AdHV>K z6NtWhIu>6nFix3W0D4N}9O7DSc~&7&hO|%(dH=4LFrNh&e=i7q;ayJsB-Ar)l&B=K z5r|5oiD?G%p%`q1=7Ugb07|4}$aWnAur^&jc?@|SP?eIlFN@pLN3n|-VDrfxVaE>K zEPS!rq9d})NTQ5-9?jX%#3UcGWIH}FRBz=YXj6F)5%yjVYga@8=tKaqE6>$X zT_K_=Kps@OL_*EX-Y-^ox$#y@s`=Ws@yMq?V<%yJ!);#wOB5-}8rkYRbH!o2O@XVO z@5}=SC2%?i^isg!2eJ$XO_uZD0E#PS0*z2Q2HfhwfAK-XXz7p|1*bje4@!u>oQQzx zhpoa`80YmNzP?Nhi?pF)4YX4aWJpa-f_V9qQUsLA14klWWK`R0i<79SwgNne=&zY8f}4odKT$((n5~3-ZK0qz>70EFlO59`R7%7V&z) z@1IoAZ+$DlV#}0Q>ARGoMva&jNmAageBobre_(S&#lO?4X^8G{?);fo_s&2R^_=es zioV=Q@xZBsJG^2RQ2Q$7S;9IjNRES=KLwPnTPaw+1K1t~Fagt_0;hcUXXx_kQNS;d zkB{A>dNV?)?Y+}3SM!=fvePZwI^|z8z3jkyH5$_H0S*fvZ2HL5R@(+n?Ce{*CX@Ew3=etfmz^Mn;*nmB~x9a0_dXSZ6? zp5;%!!l_1{QEO0C^dzE|>*i~qFlw!U@2bqF&@!bWY$6Q+wA9amO)tb6(mIUfADNf z<*U=xj$1y}eW6!PyxLCGp~@=^7Cm?HFvP=&m&DyZ^5XX^3beo|<0mavxNPhQDlWJT zKZztKtM*yaq@z%vNA+VxCk4YBS(Q%hkp|06go4C9kFK65F$YTyaH@o+P&g{p%OY>7 zj9j#|RwC4~b(fo>R>U0;=g2S|e{Py$N=3!hRu|M)xfw90NEKQAaU~dS1`Ms$?9J;Mx@a@WL!7O~%BV(IW^(J6&m|d0kT2r5o^pzDx z0E&6xT(6EP)(G!H(`gW)=&v-Yx-w|t1Ai#cN!00C0|*DF%!^S-L$7-ff8#G0^-0{o z!27fVk4zbXpAha6?z<b1@;ZOoapSm%4l2H@7cYT=9$fA3>nSCOJ#o?M zGZir8E&~UXm&Bs1S1K`#YhCrlCCX(S4oFIHeO#F+{T(Vf&1e~$bxqw~#8IL@q_n2X zzQc0`QThg${rp|TDBvZg1*CYU0BT(G{ec#sJT&8;3qeo_L<{{Ue^G=oj{A?A`K^_} zBtmFiMDBuN!b>uu*143Ag9706PEf9^!@}#h(6k{e9$=N}n*zd77lEPGm=4GRl0E<{ z+sZfbMK|LNzsim89Qsxe$#|xE(WHo*r59mcCd1=FT>-9>m2T>4iTG> zcLSCrb#Zm;;Zn&~f5|0l4!8Y@XS5RKtiqw! z9@KqEeHd#Z8^K~!G6WG`TILB^n`h_~a$c>+=t-T`V&N}~QmLqP!8a!-KQ>hqLbt%! z<~1F@GLYZV(5r1VrYt~$`+L-7U35=`;GkGN)7RgbR6@6lf2qLG>#k3)%R^Z!l5oR1 z3FZZK8xYpJnDE}&71Lkke=NY*y$s2jP zfEpEunoTk1*R51Vo)oM1+aIPt5x8I&7_uYOTVl;gI7uC-+2db@th=_?K-g}r_Apk6 zP{&!F^4F=P$WyGOnU)02NY9eDcjmFJ*160_QUJftm$aq@7Jui61NvmnxWqpKWG@yg zH>URK8937`86&@Ggk{026(Unw%HL6>KX#35;<8(CpERQ8$?x=RGf}n`-p;GeeR#>96gl0(5Y2MOBAp z0ZAXOUvludV&hQQ;In4hLI{WZ(1zVCa&C;+Kk$t+D1TM%Ae^vzqjIR;zw$Y7+!RGm zruDDuMjTsElt{FMjOd2_g3(UtXL7b(H^2=A<(Wi^7h@}1Y1Jq4B7lo%uzHpv{f;4o zGn|(QF-D$aZ+E5T#j+zUkI+sFBJMSb8}bb@S|w0Kb*JDIn?lIE*0sdCI}1s+L4*~V za-!5*Z+}J3wHKR7xo&ABv;&Kg>0DS7d_^|$B+pIHwuPYb#UK~EpWe%nKnbJ*b6_x@ z6Eo!Cv2k>5y@3^G%ZB-qZwZfrPc5hh`mz4*!&QCCE6@7Ry6tT`eF5q9EsPY7l4>4p zlaXyHy%>9VZ;H>)<>S@fouM~4l-!UhUYk}pP=AS;J`X~NF+?TOr>P>5TOm{>Wd|Ba zBdQ)`#O@KI`nYv# z{x%8x8u+3WY#m{srq{ZtsODK{#;HE z(zeV?XwZS%3#?NKL^V*nj8`}@P3rw^t##DdKwQRk3}MKpUu$^Ii6vv)x_7OU-gbz3 zO~fk7u-bEMU`>qv>wfghE$Bb~^MC$-orV6puaNV{R|xC>_zDU1|60!Pua}VJKYyQ} zu!vvTyXarBa!l>a!lNR_5&vjvGw!dLIpZI>2)e!aJw_AgC$K{Lr3LsCdGiyVv{tlX zjJa6+I2ZXpjWXLWza>3xNwP$WCP#mbFaN9M_e%!WPLLZ<>VJ=dk$&WmlVK%0!h+`{ z6n^xQz3f*M$4^RF8c76!Yr|$vLVx~83h-AW(oc9VYs=7Oep3;K%Aes9zvlBJPe@ln zm(U2lXNd8Wp4ij=n>uYMCM7h&P5F~tiP81H?a}mO6Ja(AM`F{o#QhRz?tW@cqhHi4 zd}XNdBR*KbzuNm53NxGe`dTRSB)_iT`Ky=iSD0bZ+bfcn#xj(2@Ijy!{eOmqI5zwn zrb(>NNhju7ZX0K>W<_=8U;S(CxBlv{PU)4@{gjLj-P;EP2mpN$)A>$@Syg!sd}byv zjeyy>=ju=iMzX0>&yNK`+tp}A`c^(xsl>Y zdY+l1w0Czftrnc7G9+$b@P^&O`%&;4B)4tJ=Hk z916V*=lcu*00QvMJdn!mXmWy}Gg*3%?9=#keXD*(>i{4B!+(O*H(j}Xzg?*F|JfVn&=TN zyqA0onsQV&qH9M21AIZ+UX0vc7!gtM#!_1}%`tWG5LFd{l6xrV&n&?K=+%xfZJ{+= z+Q)Avh_&ud+JCCBefT8&kRmZdYgD9BEIqxW|=_lvCtpEmj z6o}T8tcPG8Xs7j=z=B9^^K8LEc6~|0hva#aOfUffqvSu5MSelVfgfp#N-N8{8#i3Y z<8KweAZw-2+rc4{=}93r6#gnbjJj#21&i~=@De5!n|~=kKPeV1W%?ptMy5nu&ljSs zs4wPQ7f3j!f5aL}OIusL#AX$}NS|bdNGs}5?_rdz zH)b^P=I%h>O*zDv@I5Gx_`)$4jSD*>@TLze*m_sbSl_pRn+6c4iVHy<-ug=!ld|2x zj{_Ia8-Jh(%yK_j6>pQ9{Ow;;v_qwmM*CaRWtAE2j5ROs4`UF^h+** zIDc-G-Fd+>?zPJWo&x&F0X+)?%%{E#ISHHA6tC#vbA~4Nd$dJl1(C^r& zDpEE~prKN(gtov6S#7PL%30j-T2nYVh+WOaK7HveZlO(YNEwxB18Q}?SbblEq&|lr z)2+geLSg9Fo=BS{e6XdeOy{|`4uFbSXMe_FbG1UcaU`FUMqtpez{AQJkVz!5{*}gW z&RNToP44T=b>IY0$komc;Q^8nQ4}NUt$>8;x=(BkkulY!0G3%$q`?T|Nn6h8!f`lh zpx)-=^dN216iuEykohxp?X#r|Z0F(rJ!3MJvTvKp1U)yFtQN6CP^x{8$&h*XI)5Wj zyr;z&NRvCgVuRi6CAQ>Lh{#oa?$WH{&ADc#oK|@EcU}VU6q1TDuJO|B zY9!5MNptL;sFH{p%?;1z;N}(!jNk&R>C)&0Ll?-FYu5crV!)eT|+>p_`GDR?%F)o>aQUT2nPoqvh%{At&zuG@tD_5($9vooby#(CgFnyv&%BO3wk zQ0S=>?|hSp#X=0gDa+14l5bEAkTUNb5Tl{w(Vw78q6JwvP3-WCykl_UD%vu49ft=O zXl0J(i(X~*W<<~eO#KgCF;LQ~d)_uXny{Yn&P`8&z$f>4mt+=eWs(#iD1T`p)cI(~ z6|ffp&Un!&Xi{-ww48F6L1-!W&8AQzX#PAn<5ORb?%`d!oz_gIwt-eDLW6>ucKqx~ zrTborONFL7IzjJBjTWuDi0#%iA&BJM&7F!yBjOrD7fUBfuoG=lc~y~P%HuG(j91xV zXnf1ntaSl_oJHhlg<}{0tbZbR%MB0MaWUR4+z4(!1p)&EMbc5#7nY;6pwe_Y^DmR6 z8`YdmqjN06+Fr4+&Xo?fHIW~x5Zc5?PgpflzBaxmXU4BsM5SB3!xyO)ZB~rp!M^Q1 z&|-$+V<>(2s$032D@sa}T7jLr2J+>zd_(a=8aAdMBQ-5wv7zC_QGdA$iCRygSgI8vb$L=$Jtfn(-JW?lXtLH4` zRxRb$$U_IUjKdjNaewjw8}Z%M@48(2Ar$dpVHwK@dy0j(nyR=2@O?IX&BidfxjOwaspblKfTitMbwyt!Z#F#lNet$78Y<9z@m3F{xy9Xh~ zmDTXa`h?QqJO;5+S`x&{s+$kl4`JTv8nx>fjS1}j*|Ryf(ICMo8?@}*OF|uToml~_ z1iH7fzwcMTd92`bovkvg)7$|aO()KJNcZOFI4TCKd~!(M-*^3JX;63~<90MmQSKy~2`YTY(a>$K)n0*& zj&ghVZ$*71`Xc*+C6R*z3-F#s(^%9g<#)+N1x=4^=JJNV+zY=6_?yP<~6-=N_z_KmNw#{m#+~ zYxtsjK@DD-J5po&mM?3~-^o3>rn1rdNP_OSqDGr+lr|W15JZge&K;PhCGuRqHK(N2ZD#KP{AhoOv1#rgqz?lR0WUXl&i-x|wd=orK zR9pQM5nVHcc@lqO;W5Gie+%&{aJUXSjMX{J21XlX7)@&AX@JtPw3xdXys!cg7=QC4 zbPSI=S%XOjIsoEwKTWI*VVNk!hxmK|py|re%K3K-^%MBaR|_&|pyzY+K~2)=boEn; z2!D7a$V%U_>V@lrn4=-zx^MOvjz$4_vFzmT(ndpINz^qT@SAa^GnKlHLlonUoRjT&M4Rjy1{iIgnXc9)0~p0CTEdA zA7&a-?X!w+>Ix>UqKe-zSDo=Y`gTE=Pvyf=P~1pg)5A_rdJl##!N#QrO)P49D;n$} zr+0wf$?ffPvJDrgoIYiwBMUiwXm4eN{jPrq$5VMIW@d8-!EYy!hB;(vv}_eU5te%P zU4mfeByU!u)LtAcfX7I>yXDfS5@=vh(6T!*e#oAfx8A+Gr(*)yX0~SD4%E{0dQxcH z){MN!+4|mI<$UsR^vrs~vuVeem&6&276u z(BMU3YBaGCxvtkK6T_*D)@2k zOYRFx4gs+*KJTH;w#-~?DmrbNV>L}%AnluWQ}&Tqp%brJ@yNh?CTQHy_34u#eRQoQI1m@ z;B)BPSs-O{uGt z^qZp7fhjEVRyp2)#rsgBKH@<^Rz5s`%X4GTr2!k;%y59(c6OwF7O&zJ*EP zrpRZ*!ma7I-&n!Qy?7x-t9*Z8@F)o7?{jKH+H#@XTqD?fMw%R@5fD$WiNoKk$2|s^_!dKpj%rUZT zZR|7ki934sp!1R|6#!JKp^L7A0lVPF@eY5@R4KWsMTYOfW)C72U*om?SS z(=)fR8;IN;21lrp!e)QYXZ0Gj#thbdFc=6qA?Qkd`OjPT4hYUpIfCG~0z10(go$xp zOlQYKG*HV^W2W?i5!Y~B$M%Pb<~a480_e1?NQiiYFY7(d{#_8TFkTlUS8%ByLarr z=WcKx^1-PJ9|$R6VZubt$dj9NDb*3^HB<~qtzeBOPW&1=Jz+u@7WuiXMhGMy8FPhK zUy-O#giMeC_q%^beoCTAc)C`_9;+dtSX$qyrBNYpFbs6d1U^HNhhLSui3_W)j7X91 zNxqhOxaH^VY4_C%?9m{MukjJ;9ew`<)iveYrLzb~S7s)YTaIp+2;! ze`0k%Q?YL;fJqfV6T*AYmqJ# zzL&t?vl_-Jjamo_aAbcYnNQPFm&ORw$0BPdPg8%#qD<2ry<7#YDQbk3{Exz7;n3kf z-(G&FYvgNI`;l$RZ7HPt5Bt!DOanUa)h@*Zyl~cu!R#N&vdf+2W*M=En$309x>kq1 zFYQZT52x??^Y3&=Ki_kJ^qTMBV_B9@bJwq&bN`E!P2d2+m*Col{O|8A>|Zwj$hQ6Z z_~C!q|ITPfn+eV4Ecv5(Hu|(c8y}jn+32DN`oVq#BUhS}Ipm^!de%VP)-{^*x#UL% z?zU;w4qBRxxwQgk8k=eB(pQ?Xx%FZ&n!;$|C+85;vkgs;^!^2@T5D*g=X;w@=$EHW zT0?2p=6=ci{fy_&_2v+O0Xbd|Q_2FDFLrW(oCI-)FtZ~g*`Se(BrtP^<{GOuCvJ;fkJ&dFN5js zZO5Xubt5l6o_YR+)pGL1RX0t=|MbVy^r(z76EppM*W(;9G~c{5E6Co=H|;1fAV`1J zLu<-Z1>WGt0tt*+XPc%8P}laTOE)2l%Hz;eXddPf0^GVI}=gy_MS{KtEE7> zX}OBOOcb>sC@N~fllk8##l1h5_xpdm&zX~x%uaTsbhWhWz80nJgmwXi*g3e#*GxpP z8&9cnP{xUOU))N?i)Jky(!4Zf6QuyxiYPpXX#G)m4BCC9ol)vmQoLs6^l z#0rMjbLhuhvDoZCii5`vT#&ugVEfO*yUAH_v1MyvBKoKMa(@SeMkemm{j_EhcPj3H}MDsi}3-kxHaZH`TI*xT1t z4!evo^|?&UD;Oi~=-kkDamas-G(}Xmc1uI-Ze}`Y#X1j=ihTX}U+cBKHlnPGhDawXtv1!mGe?kL$bBzGv6;zEEz=jmYVm2G8a zMyeoNV>{Ya^Y?Lb2$bes<>e3|Ep*PoD6#j3cOQvPd5?HCH)4)d@AiKT-`nu8Jjd!8 z9|hMRdxbr>V|TFIMt4YKh0ewPNP?5k&SE#;Az*Pf3r@(cxuzmdIP9HT`_{ZM?tkn{;hygtXbRv)!b4o=(AA@9;YRvT%V& zyw|nh7M1wus~dfMbDMwbJ-~*4FWk+#%<}1|7K+)n$poG_JvNG|P&&<*%lT+(?hoGW zg`?zNA;z`p78$`_!6u@Fsc10V*DQ77}D|@&fmM3RmOxe>rb00U)=n|qqzCVAs>2f(%PdK{X9eLFA zQ&;dYMl>jcV>Axd&uOwbC@wMzhnBBXVpnOK8>8FkM(PVatedZ}tk)~l#ZEn;X>-%( ztCx3Hw<$5!`Lj^p5jJ)tNaADu8f+)cs<72UB~GqM^Rj^{hk^z*~{JLGdz9xc3;@DT6NL*)@+%3R)c@gx9TdV&meHsqVw&bHRz&l78~n* z^NVTzChN;|cA8kHv8!vvZ1@B%&~Jz7+T~S!K2J8CyyD z8yJ7a;(l~588)w5?7vYRkLKnuTBQ5z>lqDqidfugdS7ekRowHuvGnD1w>`Ki9oorr z=z;OBY|>7&@N4t7&vYRYr{3(*gyHDLN^`ZJzxT0Q74@?XyF}SezoNb8CQBQ8qu@%8 zaj`zP{5;)%j~K}{TSuDrg=IS?cslk;91nxb^oQ#xG|hORd`M(+xbh`edVg! z+D1GGzHVcD@SWG0B&#Xic?n4ukB53U&!_ouz8IE%I@7^tUtSvSp>of@o^aRUnE%P7QRxjT^kZtS*O?5I3Wbx<|=eQ(y)J_ z+_sxIEw8$N)UR$=x9>f*&Nuh{-Y+-$Gk1$=_hvJrB#!1T8^dM|+``?yII(TH&0wnS zbAH_F`^UBnly-M_N27b|_?k5;*6+J-ZWgzT+m35@J6VNKoS)W?*A?ES7}zNvO;2$? z+WY3&IuBR%ds7a{>3CUQJgs>!$3K5fO!TU=YN5UD;c#i{t)`8p9LEM>X+lGXTC2S5aT z^y7H&wSVTT@~s-tR2e>ZmNGjJhS_pA-o)b>C_yOK_#C`?*zJfXPQdbmJ}!S4(QJqw zj!)}I3H^P{KPb;uj?Pd-yso1U8HP3Bh7n9!`0=8IbI~wM0JT z)%=>AgGVu0D8{@o^5uFxf2YMkVOC?jEwdip@1&Zg-uYsNeg|f@8*QU#;U}f>cpeAp zLU~nIG)&EdH$6V1T3c+a0)Kz8<)-NFfpnUs|0f`o+dtn}cMk`zflSVA3f-e6va)2TBq*N)HU*OZ2IfPZa>~D`ucEx4qkto8TW2e@^D=*CNI!pc&lj&A=r~r+hL)^-Ejp>ud!ICHg0ITLvo!ssUG!JCIHX7J z?1$LgZ0hv&vdL|GJ~q2FogLb1wf-6$?61q|Q7-NGn;s5y9+LcgzTRC?(#8aBo|gwHK5{U znl%nqvP=A43^adb{@S&Rqq{DG1jl%@w)OaSino__*NzqC{&ud)LvKyF z(hv13`{aL71N}-xJClnW`wgc%ps_qQ8VLZmC2taBZxXm&b!PHdpm=a-I@t9aPhSS} zxi)6bOCB%~0nkS@(M$%|Et)kg(xIoNWABu8dYdNho675v_s-s4$j06&9lNLW?dDmc z`z+AgbgiFUxmoDgZPQqX%s?VFX_1|+B0E_kH4%T$3<%R1oT9V!W@qainW$FlRa^QR zD_4lIivba~M@g_3(H=qlWisl35EkUoiZEef#!?5B4#CRa4`{w*s%5~|FInlVLvaS5 z2Zr+4V!;rwMc{_b!#K@-4M;*-fv{>!hQM9S%h+Zq!pzP1YC7fw74jc&S9YQMS{{=n1(zpJ> zk(|FNaru2M@pJ0&(@9)QSXbhC=;8E4d3vI!NcrGg(jii;f|`h@KW)vvzOT#1lxGI* z=%=4kHiA&Bv7iJA?|$?lw2?Y>yGX^N9Ql75jZEWgV6?H^T*2T+jmMh7l3KS=@EdB= z$^_Bc&cBU1qeY*+K?6r8<8Z z*0)=eYA6a!q?$gs{n}YT;a~>VT5iq11TdPGSrY%&2md{eP~jHVZ#<3kH(l#_6d{qy z`VY=NI39Y~E&4Wj6zOgHb|oH*a!0yyni|w3=V}CEVVw?w7`eK#Ak{5s0+!bauyTt8 zy`=96C14>(frT3yGa{O`Q$!2Q47`6sYP26620Eh{we(3^Hw?JCram)CFyA0PFa6o2ANlpp%?d=VQealX6l7IyfH)(X;vRKDmvQKC4nlt?MqZh~ z$=eC{;`S)*MDASo5;|d&8V0e@?m7xl=_1h>sEy7TXr%6igMBy9K{50$3qaH&^2-MX zg2x90<6$_$3dRhwa-G9JxhPxxZ=J%hX3l zWh1XkFV=57yuFj#^}0&($1i`Gk+jd`f!{y0mvR{Y!2IA#_nEve`@ytr_MU~7xT6nq z?GodX5<|H9@+2Nb#0(51UA55_1hp9E25z}SP<>hYWvct$(#}(YL$cM{iRWNKAxf|g4Kz8r;^z=%a~>0#`uNYEwn=hAbpQdph)(JddmTr7#l9&hx3 zP?qSwZNRDLui{5v`7G2d-s+cBSoh?6`e1ouG8-1=I%S zuc(L&1QAAp?5K#a8B~9v#e!5jku~OuOM+|~N@a~)Pv?}fd>&wS<@S&xXx%gT(Gpn6 zd_i1-_OB&g7>E;$yU4~q9|0!_1U12LbcBbyheMf`M>d8gFmhgiAP50v8l*%oR2_so znIIz?S=vZ|HV&#>-~?7 zUArIL8h_}lkLTE~T#uWl#;cAGNE7H0daAtvs@`R~dx{ePbv z<4f{#Z%ZSI-=fzOKfL~9Bi`ToB4j7p{q}vlu>X~9721E%-e%Z&y7abf19^6J8P!qC z&0V5uzF&I*YEs-iEKSU>n9mL5>za|L+Ok3u%I0@qnY0sB%l$9~!40A48iJ?QMjF9n zr@zaI_HIh-H(TIri3Dl9Q@SIhM6Z@wdbv1bASKPqOQ4Gk*ekJ7bS0c++vSx-U0nmC zrryzrzTJN*0={-Qbz!);7|hjyk!oE6$1QQG4~8AK7Zj;?!#{iwsV6#5ZodC^f9rwH{n4MM{&qk90c)0d_trS~ynKIX_&%|9U!LJxT{gUI+_uJA0u#M> zX{unZ-ZmwX8&?<2*;C*5a*a@E8{)) zah?@~MM0$HhAC76tfojP0cBkBj%f~u`@4TOKW^XRrsc{+NA=Ex`J;a+h%bYg@UYaI zl%oQm7_PcCUZ#um^37MK?z468 zmw)?tAHRpLT{s)qcVo;W6Y?};t?FPU<%Zap4r+9xXz(p>G%`V%!PBLl&(F3}JHUS_ zHA%5bG=T|^D$)woHuB+>w^4j-Mt(lp!iy6Xz+KmZ`%) zy=ttah^4ZJmjjVuk&VGxH#D_~BE-%S%Mt{{(5>z9ifW>Fszp}#>{n@VoD&ro#TFtV zTg~>tDZQCYB;Zp|_gSWk)C8^;Zupqp7$f7iYIz_yW&}gazk+16B>sQaxNM=oK=!2n zz|tH{SA!MOi5AF%#$fvnG3)8@+o$?v`w!6ebm{5fz1i?LeOzg4dLH~kmzvg_ng7ro zUG`T@?uD$67o3h>HBtLCYyx2AYT3?B3@JuHr%A`R23HY;m1%Y45#h2nFbfbL>e7~K z*aD9UB=w?SxbmpqrB{E{gP1XmMr7uTW=RXOHxPBP+7uef>txe7ZH9G!e#YA7fj9>DQ?s3)2NB-mg_qhGj z#~r~I44iXK3+sXXZY+q!G)7{(Q6FKLGmjv$(OK;}p}PTA9^H-Sk(?h0c|>-R(0IHV zo4J)9d}0vvOt%5SFGhXDv%T4>v+&G_#skc{M%V4h5XBpTdA&9ZCDd@O5fN!-(YhO^ zXb7R%Q}-QSh&g}o$t(6%?wvNrTgZHQkej~xT7aVByF74^WPqJ$=H~J!V|v#=^A5tPwtw(*ABW!Zj2-+8L$C=5PYHsBX0{pDsz^!J zO>LvecppYz7~PFMVTuOacLn!-Fec0-p5zrRs9lN!@CtwC0i#RSQr!|{4M7x;mPQ&& z*Ov5ePRKU`g@RAGrI6GZb}NI=H||(*^&-W)00=PdqP1P1oMMcXqbE{d6WLAb%fo8% z1hv{LwFf4ea*EY`xP%8p^ulc-j5==JCyMX75~E5{Me(BE5GpmwqL(}t?b7y`k}{< zE&uTNZ~N5${>AejT3n=GptIl4eYV0mAXf(;^ccC;KXXZL<+YU&R_e(@K!Gc_r9nI- z#8H12tfs)JgR`fDI7WFhJ)P7T2hmOs=#}`e!al?xByH04=+6T$tVTK~SdWwwVGBZh zVi=4#;72wv9%7$)D9~+>m-N?Plcz=aeBM=QXyEFiiGW`uI z2&FOK9AGs^5OFwbNPhTGzN*(ie+#vgJWNcZl44!GgvuYW; zQ%Cv{t;@GcT3OIwBqr`s&kr7(9r>TKcAGd0%BXwhU(c1Kt3r4P`wdplP;(-H~BUY!RO`nhx zLU^>+n6!a{5=^&x?lLWhn~T%?%7ofug3(-g8~M;O1ebU|4`z+l2uB`~Eal-r{>mTp zvDb)d*>{}vD>q=!!ZmwYDd|Hyv~m$KCJbs&G~@B7Be&otdgXG|_O}P&`-R{;|gMnB>g>( z^X8fHqtB$Bh95us!!v(F^2xoQjYz58pgIu7Fo?`Wmkqc1-nKyI7Kw?uzo(30ZJdrY zsk;Jqt9NdQgm}4}I&Sn;`yf0dYNDN*3*uyX*+(zTGhGMbZ~UgjAn>~7V^f>$y+YkO zT-7kB=4o@4G8=%O4MAiG*gV0H%h}**kdLxVXWTY0U<>OOW-fmY-Zdj+m|vWYg+xkF z@v_?#HiOrtbVWuirW(PNB2#e#%zJyS0uZg}^PBVD??&Hw_n%n9J-I)6TiWB%A9(+X zHGku@jS@YNl0R(pfAW>S|9iiv&x`)pjqmvL&)j`_`L-dw?M5O+*x!KNr85kTYaq7A zf-Hl$^wG&hm?(dh88k5oEOJIFB0S9~FHKsI6mtF!L{nLiU5IEXmb#D;BfLZf%IC3; zNrPI~GP)=x9r6^W8o;DZEd$)v77kMEB9bn->H%YcaZrGCAU z6I<$;kC=UjpuQrMhPuyNEC|+c8aYD}+E6<#VFx5w)aQTpTb5%%PX@{khJS@U%XHvA z3jC-ftTFWdy>2*IgS(e8LsOJ{E9Xn&UE2_< zFYIS$XYxGqvyL_HemOZa=}DD3H2{_#5TYx@qL2UqevVw!*1cYFB(D!6saNh&uS9t* z^$PmrihlAO*&jC9?f&6swpr(Xbn9O^zK?B|{%L=@c7Nqx{){vF__X*dSJ=6Kz5n~X zGsy6+Mz-FTK7x8oeG(Yp`l|%BhCc=)U@-wVF@}DnbC+Nd<(MfLz1;&6)O38V$LV~( z%#R`Mc!I=rXSSK9Us0fi^Xtr4MkdHl0fa{l0uLjtO*)& zYXuG00$808QOS$NKHAkr$aX9AR?Iu_DfWL#D}{@OPjkCuO@?Q}b@|)T+~#{{FaoeT zH?|uewfB1nFxRI4@^cbxFgY$V6qQ==vvU{!RW}9g`=Iqm`IG^)GlL5}1=wI!@=K5- zKm^qsEapC?MBn84m_;MM48eo1NW%h#Fv`GYZUng0Qx0dCUItTG+MvxfTfTRnHgJD{ z)j@ul@&wpG&A{T0GZ>^;Ww1dO0fV_MXdEvDl<=nD@b4+f`6{Qu8qgLc&R}w!`6w#? z^$uxr(=hI?;Vz($?dlrK#YI2`F1S7K_b~%#7^P=&RI`r&9oCikEL3Ady*gz$s=Ugd40^CN>0=p4MjpOQe|PzOOYEChc9&5rI6B*8D*JsQZG1}e>-Nft#~O6a2j-ZW62SmAP+A!KJm z$gi~;&`%BMyn*cI?7(s`^z+n8@AL!)X@UEG8-jR}Yy~79_npr?anW^{slb#N+lA3d znfm50$8r8C3mO)jl884z?h?;z2E|oi;KlX4V6c@nL<_;+B zu!;bkio(kqq-%dE#&#-U0}s<~BuLRZv0k*u@JSK`LQA640gnXfS|^HgCmMd&`AshC z$>>wBnY%B)7B%^qk$nw})o|WGSGCal_`=itLsEt=Juq-t&W7MS9T!Lt5$p zu_D_p3T)GnAqcK>Is=pjzsr(;B~6=8AJ|Gh_mds0m~wxR^EXom@IQG+K5HW7j(|aV zPgD8EL9co~OdXjNaZi%AYzDBp%N&FL;a?OrAnVIGN*oBNLb<<#rIaZx=w%2~1CI^F z5g-CY$mIfdk5^sN74*FE8MSBFvD>DQG|>x*ujuaVVC^^r=|S^o5N{erW4jv(Bj{Ra z`}(cp5b}SiZ<>ZheZ7gh7Ug~Y-f@WesaNI8{!e|^wb<|L?+$i<(|-m=N*+;yUGCfg zt^FpaQf{$mXom$czU2~acrf%++QG8{z3p{OFcMB6PSgRGo+VC~!a5@-*7=Fzsem=z z(LmogJKzprp>@v|z|v#AkWn0_BoSI6@uBo`&kBFUC2k%*Pl16C#KOfHw>t29Lz}zN zS+cL(aR^Y{JQ_@!h7m63N4u_t0_6Nk$06FMK581q`g*JAS`_s47snySr(WVW>+9{H zYccNYZ;nH(PrceutrL*Iqrvlr8dwx)LSo_#sK&b;yyC9E7gQmgrdxPgvf3>~- z>fnFh^BMN?C3bsTPD#e814}kqN|`LEBW1Y|%s`u-RwFp2A>Z}%(dZvMNS3IKaRVf2 zG;(WLg!Cv*UA^4VO2WDgIIyy3|6k!*L$%~HZ>{I`Z+Kn*ujV0j-oKmYryi2Wx0~Z@ z>S^f^1^nnngOTtG!B-fW|9<{h0Fn3_39o+>PyH<(jm8_Q3GR3Q-cB@tG)XHqa#d#s zlU|3Sw4zw${tQ6ME)=m<)K`-#nbg;*^rfU7rHb7xfgs&ylCD!}PxL#idmDJO(zh_EVz(L~+`vP#J1kP^V@SIi7jJ)r zL8s{XMmW`ts=^<;d6l+Vwm0dBocuEus9Xv-|0^ED(l-D2F@%j!NoSz5kzPvtBI4yn zUq-!6Yry&!xF;tneGQzR{4-IFZYgl-8Bzx1SU%&>CI1@IU;I}zkop9I=G4GO>ULz$ zK$rSj`W63b+|u7mKYkA1;HC8Q_(p%?#cxhDF8>;j)I}>Szr)`Oz4I{b?O}ALc=An8 zcP{y675o9?fGu@5#mJz_p&wfs@<*eojpWq^WhZtA&DsIBq7SVaKv9BKzQV4=8UC}5 zI-pxSX|sM{)2e}}3U<9+nqV0d7;?qeH*Y2XwVXSJ%c<7cOYHV~cGdkoZq|Rn^PBcO zb%3YF_Fv-gHxD9^dfzFxlFy9>?X^UQT~t*m)!L17CjxwScXkN<6OQg8{U31n-}NW$ zgfH!Jv=zGIrBVK1NzD05FP}{aNSFGRxJ{$g_kX~mNAOQr)-+mkFACtRkGx3S{14|N zdCj?lkUHM~ru|3$(r>!&^9Fy!i51^`vIB>8%}MGMr*PHZ&rh3%)zTmNqmTbJ8TM!Y zMpmSJbl5J=(hv8w)L^FuVl7R2ACj;U+4W^z`j7uwe$t*dH@$sXVOKRe!YN5l{!I2Q z^{Br2J-@7J7))50MgM?c%3YZrqRlqvqN65H_PbM>*=~arD5@Fd|}ZEEd2+f zNPb*74h0~oyk`A%)L;9KL(%vBpuhgy4?pmkypBHb{dN3-@2`^&e1E-m9GM3|ntXPG zyLTUBbj_pD%cc!Z@Bws{(29$`^+DG;FQn&qaN=0ng2JO^vi!VasNYa2&kKbg3bFe&;P(9*89G@Z@f^cpJ4EVm%RJHe{h0n z-}f)rQuh4|wvb~Ef5AGABQ27QI}3sGXNQ1le#}o)#&~gs?HSO`4m3+g>L0%217xns z&4a`t2-3->fnPRHGz?63r!t>+`&_#&0=eRf08;EX;S_)2;?@G>_{qIK_lY}MuJDti zpYlY0s|O@#Eih;l7xJZc`edMi;gcrdsL=KF2^QY@AO)ZN+oKww1kAvQV8d*=UJUn#HQWb- znULQX_J4mCn6%FmB9Nap;^I4h_$f0e!2YA20|l4;E(_#>D}I#`Km$JiU1kXPCp7$& z`QP;#vpPuA-(?O83^*`>USI+o80x?%VIxotCX>m_8Vna~23+QkUD;AA<@c7I#G1=M z$h@Wxr~=c4EkhSDRG@%D*>1~FCCjNm6<{InTc3Y2JZ0Da6VGiyV}CwZmCp?%yg^{c zKl#*DuBS2>Y#4fgS<3oczR%w;;IUh$pKbG^?IjjZJ`tbm|f|1E~xt!m&FT3DK`WSOr zUjlz=4j4S($OLrw*uJZnlqm%q~`uunK zDP#51zb0TW&=|}N9F87x{`-FZwZw}m;dXyLUNTs5xc>jaKmTG}cZtVT;?ejgyd(L> zq_7e2CGqJ1*s|T0Aw{;MfP!!Q^z_?HIvR4kA3oc0q{wlFl5U0q8#(^}IX>y-3UdkH z)BD}a=P!Q#&|@diS>kCg$1le#pJSAe3Hd8N^6;V8yVqy3o!OVme*6+&CX;+!$^L&M zfh-_B{a2D68;PI){9N+SetSO9QwiUF>yOt*e?Z&I8T-s9{+{1|H9iV*9;w93Ro15r z74o;A*WdWhzsH3@C*SQDEIHV+e&5Rt$nr~mH<}4_lK9z5Jz+BW{O9*Ln?L!{5oiS@ zg%Cdcx0hd%Yc)6mUJ^bPAO6hx{#}2`CvREa0X%={7d(ehAVcEm&(~E0)&1EC?!P!7 zX#P1O2YS0v0*=7qZ~@FE|K9-1v=2PwcpV|-mdO6zQXV`8BhWJW9q>escPU^Fnu37u zhr4qrTp|ZU*0U<-~XTgOYTVUe;l{j;lF=hor`~}`wx2? z9@hUi-c|*_Z?Acg{kOihd34p`KQ6MH>T^dN3Z6gyLsgXj*4(rI_~#i)qdyN(a(^A7 zbf5oqh!VP8{(6P>uP}9%riwRsBV59`-H4)Ib25TLzWS6&DaKJ+3N+#_53=2&sPue? zS|BE0n+YCtbMxYIwk(&MMMr;&>U=_lZa8R~@cni=>?)HKcQt9-YSwt}*LZh!z2R|P zch~!ExVXpL{Y<0zYpS*Xo{~mSw>L#Y|GKzKmFccMc87 zYj#s->9k^Yb-4%V`SpJqozvCVd!6mZw^y5XMLKGBXsGXObJo)2X$Hx$8!2625T#n5 zvi)klFeZMnsAF=$$8)q?(?Qna=~z4Ny~8wX?bmed`_qZFA1$vhpaCZZ5-n~V1eazBz3OkD%0w8V6A&WpOo1Gq^>7;+*xxX5X4zK1I4Gz}X z&3LhZ%$b#O2F&%2i)ppNemJw{S~eVK=3r*u%GvW03a3QV$zVLYz?+!+TXnD}fJcML z>Tp7*o6$wN*fwmp&^4x!Z?I-Vra0Tdxr+%|_cZx1MX4b9$G;5QjvsVY{Lw(dpTe?XVKEl~+ zdv;+AgvafO7mvvqz__hk4xEj4nuhCD8kkN|?RYup#>J+-IlfhOF5f-X5W^SUBn7%G zC*#swDf(mT86st!I^Ao=IHLrWqk%qbRzU)rZn`>DWLkd+t;u;tx9rr2TcI?;Zn$T` z`!>hjexjh~@I2mrq1~y}mhNeqybrN5ldC??7T+#OLtf|6*B2>og{jw@m9@Q~ibRcZ zfiGVyZ8nJx>QZ=h%t$rm4Ab+XbH!E0qBC&Sy@2KZ{8oy3Vn4TGN3FKKZWn5GT5e;q z-crD)08@XyZ%NTyBiGZB$B0$qb?{t1@B$sS53fw2IxYDj%!nJkl-n(~bm#C=r?A%E z`?0wT0%LcbL^t-eI6vI#nS!N1-p492_iNJlyjWu8P^iIWvM}7u6tG6xF`q-)yazbJ znbtl5*GrDlLLsk5P;2^%ZFj5PirD9%W@)zBl-qx=IU>umAJiHn2BO}uDhfa)}X zI9}})K`VyVyZ4QaE9yP&dwxh!6$~bkd!m2kU~Qqh69Zpj4DmLo@vsQ za21qBK`po$j)>r7=`gD>+nRN)v-F1i)BW_7=FhpFRI_wX#6;M8J^i{pVYAx$%h}yk zhmXVS_+Zl|7xZ#n@MUA71>ITc`EXn223Lk#JJ(sOY^M*CA)$ENW#QzPNqr19OvHae zIA0{U3Yfn;nRgJqZ8GiOHQ z+$pvjYj&78&1J$b_s#wh%;^FLXqo9OEf%|bH9w@#S#AD+FMiDK2mMQI+-qPhqt%45 zh!_jcpy1e>FMn&&d&@R6YY@SX4)%X)Ei7^hn06V$D%ah&Sm)z(-U)lM-VYbf+}XWv z)9o6)R|Hg;ZVoCcb5*}>k4t295TuvI&9O+k_Gef1L~qtyZ#`B|+t`Z3QB_6z&6moW z&+yfO!fU>SUkS67hmSPwp8*<`W^?UCXYe;xD|gpWN)i- z6lOWOc)}|IKfiD@^`;4LoJ|RH`?gqNK@@cu@pG8nT-@5`ItnXw8Bm`F*2Og(-);4Y zd)mds&z#s+=jF8Am-~Bc(;F{T*PpznYkC_k^DCN~;(5gu`1fdh*nH*qTs-$l;Y`T+ z@_No%9!BB*O^(yUyWO9h#K?b^?lR`aWm35N(QTdY&HNBrb?JtRuonOqR-VM$>4y6%Mg7Wc$_hoRK%o`O8(1yN)N<+UhQsAZ&|O zZjb0-yf?E!^-_v!71ZO|Xj;?DBRKDjXovGOm*U25Gi$ zJhK+&R#;x&ev&qjiYJtjEI{n@A!F(qWcJ4)!5DBHXaWH z7hm{pZg-=pnU7{B+Z}&T_2bnl%dwhh96hfJ!`&V)rZmM-RBV0k3i@TQ*Tu{|7OHzJrtVRemBFzV$IM*KqWxKaXW4DCXm6*J z&nAPdnOx4=WB)|X*ZApT9-eKg&h7`x)aH-E7wC(6wME`r4cq?ya4r$q-_QBDxpF7S zl-W#I)Q7h#b!}H$Ux(XyNbz*>(9G2df4Rr7JG}+ zJNMaqxhP+?MJ}&rfc8p%e)gm6mb){yjpVWBn=-z@@oRjjN@ua0j`xDQm71jr{xY@; zMyD^Mt`)n&VbdDd(fk;oYMtG^G+P(=oU_vOquYwdJNfae7&0e3sUv8wD%{KW1pJfg zu4z==?@Akhwp>@%?c_5z7_M_XA- z#W&hxs;ChM+rImMkK#Kpg_#oS#E+9I(+gm47v|M%R(3!GCOKd9M1H@}=46&n_P7yT zS6oTUhTs;3ih7-J1LEqPC9w*QMkI<2``sieDUx6CNn8Xd%F8F>It&;-3me@pM_5W8 z7VolR7YlOqE-HEyQUT}LAujLPxIZ>vJF#!F5Wlz0iU2l$hCsMyiyn=f#CUvVPRT}e zXu6Z6c`6sHClq?NkG5J~HfAF| z8^X*V^e?ayrs!@4cqKjg`ODrmEp`^z8f_osdL^nvW*;UYS+ z=VDRV_@UR{tKhZXz591AIg)7d^uUXy$^uhZie~7gEW>L>A*OcOm4%s(Ue8ktZLT$M z5VZL)3~(5^_ziU_-z;`ayt6TDP6a1-Ch8?W``0(3Xq~_TpxsHY%NpZeT^q&^pQJaa zy>2XjYpGck61$MR5?9mGj)$c7AlL%~Mh~J+4V{3l4ppC58<5^t*(ZUZ-ak~(UT5HK zesp@}j}c(;m<4b^rh{Gjz?$`4=LjCd9pBLvXG78-$eFCX1?3pDt~4olpOz&=#FXy) zxMlx7Z{Wl0wvPcf^1QZd7)Ruw3E&2Mps|sEmn1PPB&l<-j-Q3^wn_jzN<(eO8Gttl zh|`1OJQrqLj22DpLyy|I3bbiAOCxoXZ+c30qjfYlP^bC(NdvTy8JLLWgT@n{@a<7K z4;!FiZ&#D7;-N~ThNjj($Y1b`*of@@9%;ki0%o%!j&|YF+Uvq7UoV8Lfn<0f+DJuz z(Zg6^^rGoD>VKC0l;Y3^1RNuJ@jm{C|)34OkrwDxiu62+OX zh!{uI@DTIc``tiWwD3lNBuvm)%08VDOcLMbCL;sSNtXFEjoJiaN3TSOTl^b(_yg%( z4^#o-Vjh9>amgEbM+VFr_rYb1y{UzNYEx2qI4GM*)JyUSB*t!Yp^rRn^GE*(@mMaJ zX3KZqnEbgg_8DzI2B37rH(X8XUa!>fe1{+j4u*k~Uv5+-M?>qDM>Ww91suHeC!^((JLIOYMvj9t=Ux0GXBEn@qrSplC=hB6K0!Ftb| zKiOA~vPjN>6C;>#dDwy0!MhyPmBkWpp_kP{7(U)-FagB%cJKDaR~OoR!mSQc(Cm*; zT5KK$V?4`4J(4cVt6EJ?WcAy^Y#$=uv&zy$2J{|`nvYkQy~%HcGwX?epl7ZSw>N<8 zYH$0wN1q~OUp{f%4sMo6O&724bu7I z@zToB)5BtL*!L=wvpmy(1Tuk!L0zqAx%jesVdnu0Xm!cuF_gSnF7ujX-tlao=9Efp z;j`2}!=F-MYz<7oseBD_H+l^CK1YE2 zCjygkzd&F$oDOO4P#Bu3neM`#br7LVdFRRifD7Y7M2GCHx^)$QWLP(2C$N$IT#(Mc zPsWsX_`N>coJ_q0f40zc0?o4ysA}`oNK#Xu=KxCu$i=Dcy~_RKF$%pTE$}f9N6E0p z>hK1N6_N=;rI1Y4#{~9imi%Vl6f~ZL_ox6W8yyODgCFKNlYn^MI5qRKp@D(ru@a%I zbRn%k+FluIjWrT~-KY>3>2Wx{waKqavjI0E92-#r)~WS7N==k=FP2iz*ucogGRU}U zp#o`9Yhys3%K=qzfYW~R+HrR>{Ki#>IJU?vG$C@a!-T5vv805l`ic&6=4f@|dQ)_) z(f-wb?hnNq*ZaaWqXk>#B>P@Pg>A@FRmZKyD~ydtXX=1|wiEKrP|c;M?iB|RKCO$U zSV^4$^_KQ>u{!uo4w|1`!3vnsh6-ks`Rp;9p~%Z{XTQv+GL0&!y0Z7+=wNz?5rONC zf$I(w%={ll0R&IMn3Juz!Q`eNQaZlM!2Yz}duaoV@f4iE?N9RoQrB$(Sh?^ijDlqF zls_6U;Ao|PSg`5@3*m`s&VY;TiV{0ufr__K|5Lc48e2gy;PV7*qVsx0DedtnCYsa=nb@<}-AyL`9)VyVmqs{ca3 zA_2lEbKtaOxX_zYvrmEABCvk@!3otT)0I%H0`d~$z0q5RvXM!u!3}wAW?Xbv-8fO5 zKQJACxSuJc*mHu1lZ)R=aHsTyOm15%jb9Bqu1bUM9&0<{y)kB`d$|N*LuK-R3&T{3to}na#M!}e73$4>h;5-``gDC&Aq)$UN2=eU)Do@`c9p& z*;qtb<_pQ;WR{af$^P}j1eu(G$Wv@D*us9f?xu!3JX5^H*TGnrS<9z7?J?z(>nP-Z zPF6aqUJBbe`r5!_#(B4J7n+a5Ojyd;lrOT)-TmY`5Si1i_8@8aTo{&X9*)jTth2!h z3?J(SjY8%{qzxXud^d~b47nh>x{BdJNwrCqP?cAfSUiUsOuFQJdx|SiozXXyv9oe_ z-`a@!Bcu&Y6Pny@*1Hv50H@{s$%kow84*PL#LeFZ(266U>-0z6-A=$HsrKPxWAal^7c}jkg~fZ;AY?0T~oS62u!_FLdWs@)Y1yHhdklwmZpxhqHSg`E)qNVdg5Z+a;IcVdK!_><{`ej-&-@apHmqLntgrhO>)rkICyPf$aM!sb~ zvqdJ6K%t(TQ3RDUguKpx!7th~;|4&=a2G^i^wRaKKN5U4e`aluQ_mfq6u7D4iLxljhYf4U4NCN+Jol-!zs7P_JH`uWCQiwG@}Jm{haNp? zr1u$=^MCPQ=; zeZuy~sI&p@K02s7hhsyH^@0DYdX@fommg3?d@WcSUvp@GFpWE-L6|0pYp@63PE#w+ zOi3k*a%SGY;Z^s3=pC79$QMcxLNoL(1$}*re@FiGWgbV5yx>$6BEuIz59KK5Ng%$3 zZr1=aL8+2zY7%jWE~jKM(ifXX{Zw$ko|zza3?~s2QFUB*usDje15^Z}Yj2Vs|BW5yJyFr6}zh@x_PQJfOq* z{)OyVxV@k}N{Qg1g5V84u@hMh z>e84M3Q2g*q&!hffc}=f7=Z5X1T6yo&ePLFl9o?@ey{IeYsqizze#k!e!MwyDr1Sf>Ks0Z@=$L?T2ZY^W>z)9<0|)x}o*(*>hGI6B*VTaHcho2n1D`z;e2OEkHGwG4 zEMD?i!C4UTYv))`(jAau%!KS)03YZb!Ry8blhmgA!4+q%x>vpFFh zf~Q@7ppg>Bn(xp$LTJ*3F-Hy75Y>+*J*unynQFf=V*SuU%@zAE3<`(q zILG{<#)sk(b-5JpQMPXN2t1J^sPU^_F|{Ip;!)*u0EHDpT7JRv!;6kut4ouKc|fDf z%bRx9!_X$|yTlu=f!YGPVpsdrtEZEx&6%f6Bu-}>o=InRSAOk5L5_j|k-AH_t-k8M z;IM*W9~GDK(`Ano5-%0yS<~LeN&2%v9NHYuMkOlddiC0~=WO<;w%wv|>4x*jTljo` z@wR7<^gSwux!=Tmkf}>_Hx$enXY-)Ko-Y4;X8H>=KIna;&RT~}D`>RCZh;Ef#Buc) zR=&Dr1lQvJh%TR^mJ&TO;>eO;JpQovysKW{%dMF3^+dQA8SL#Z^Xu_XD{m~k#2W;I zp~De>0PcX(l#EUMEmp{|)~CiD?SqDYqG z`4p>{`Y4B`F8=d(ZiL4p`l1k3Mf1>-rW#fT1iL&bJ_*n0it!6 zLgePf0FV)q5pQ+kh#-!2evz`ch!v#H_~RDXO`s5k-2t z;T;gbGHTjC5SklB$5J<+-P|63Svj%|pN%BE(WaU+5{$>UvdqtN1N$pvEVEb~JHc29 z-x5uVRn!WtWU&}Menz?qgtNa~#ZS2Xf~!h)i1ExyG>?BnnEOn5|It1F&;J&pApeIE zq&Ob_(1Wlp8kO7JQ*EaI+3jQT#WOyv3@((Nu`hSq6zgTF@NIH_NkpEDsuxRt& zyw^XQPKrEu`nUOq^0Lz3h|~O-388)FG!p-~VgH~0`G1CjBxuTK$Ulmezf<@}nIivD zWSk>Ej?rZj3{`)_MgGZuDM0@A*8&nVXSwHy`Dat{=>JHX_#?@p;HZD&zsePc)r0^3 zzkd`gMLvp0>M)G-kM~9TJ2hdKqN!!nJ4emGe2H@W55p?Mu(MUHpN;-0SpL2q|Ku_B ze@(UWA0XOAfNAbDXIF;tu5dH~04OVySN=sS!KOkQNrDaiWge`q2lK@d8ye+s zEJ-E&*iUCWoEv9!c|XMK-80aHC0rbt6_mglNx}_HzmjTs$Y|H4_>~lGblm5P@^)&o zi#;m6jx59O2wEJVg_16i?=FaNri3y^)(T8BgeWCWw*X3i`h2)1Moo@yau>rFitQ4) zv+xTw5U<5R@IlVzwH(z5gUtmUY~&=Y9L-lv^yUzO4;q#A2F8^&e)yv!?gxPKC#eWq z_}vyNTd|EJfA|?0X6HmiBK}a=QiR{n;^gqsnYl4EZT)%v6tS2Zh(=q3D-hdV(Y^cy zU$s_@G)s$r_zTb9k#n=-lj|NF1a-pUuNN2C04&}i)RQ7+4Z}rc45L8!SwS30LaiL zYUcnHKR<$HWo}+=MFH7d11OlZRe0Jq%h&wA8s?^?Gk=g)j=#IBVK} zBJZHeQMpECE&rF4z~%#JBy{?uvrjSZZTP*E5Ju|`HoO_AffcZ~Uku#PAcL=p){P*K z+lKdcn%+RCZp(re8tpRN)W%D|N$yCEwB2`~yBT6Y%(?WggwPcC3zjJYw=HIegBuS9 z;(&_5-}GlOGz~HgS8145N$9La?};;iExllYxS04a5{wuKo?|j(X|!3t&9)t@oNje7nDDyU$h9+va_FW;pe;NMQ!lXY^jBgu zTuR1wUd3jTK=4&REbx2!!wL8i_fS+@F7n(?Y~YA1#8ZMx0zS(8*dSVI7?>QDy*eI? zTiLBwI>od>+qfT(yslXsG;xi8@3vU)aD}Mx96}1zlzb$k!|*Y)_mm%(M~4E-+ga)} z9W7NFBq>^rdx0}nc3ud_9wAXu%W#ICB))>#5k)D(WfIS5S$O4^1F^7<(<|T+$8o$; zxWCSwEQBzLH&LbTC)kAVS1*)3BCfA(RQ{-l0yW7GiLD1AMnZ%G!mcwGN{kP(v};_iV225f zAvzs1Q;fOx4#LCk=h5=!Mk0P5inQ|)V?zzDnEfJ8S@j|8YRa4!rC2s2v8`paGDSDn z?*y0V;Y#w1z)GIw7^;PTZ&4D(wbMOC?+#?o&=6>p8ZOEs5OK|y;LKhrRQ9TubNLIx zZazRR&Tck1kOnY@%5dPN(d`7xTd9TTJQGhHf2*oKWqC&>tjbDn-LEKhM9_krtF&MC zytYDiEhe>UNHepXj`?&1m+16aQDxZ;2p#O><~|nUu3o@ahtU3 zZePiHF1W>*s>3saQYpV#TJB{<>dR6cYG`SVXgc-8Ky;ZSVi}Rfl5(<3?A-;*-Sa{! z`_4Q+@~@=$-VP8q%%R+)Ro>CG?bri1zK$~wd{&jTmt~10K}fmf{jx$hECrw@a=F)_0et7B1N%mVrb%DYkVD<9pf+oXQiSsh^@+ z7pmmw6*IKU6L|URYGMm1rQCYSrEI!`tpYqbr&@8!{KGnbV9M8l@L_z?;52-pHW`Un zYPK0l<@dTm9F^|xyHuYCxJB-B*>Jv_>;pH4?4;^LSjUfvMkd8rs-5AwYCt{yfL~HD z;&4Eb!)`x?Wck^<{*Ldiki1q}3|44w`Q;1zfV^392)n%81(=mDje#iGuBsUvPZ1ED6vWOqJQ<2DJ}ttBBE7`eg?|>zy;Vw04FGyV#0s4H+-x)yd(^DcY+lrUhSo1LSrrM+K zousLMjkLzXBOqS?O?LgZf+z3~s>pk$3~q_^IntP*WGo*}YFFHVz&C@#&e>v;KAp0?3Vs{A|l^Y4X<{8^N=wm`)$7@N4+m7?!*n|myuppUBh5| z>aB9Hm&k@up+CsZjFHcn8AnL~biJwK-Zuv8XnDSYrau}|3+j=s&k>oGbFq}$K~%oD zL8s=w)uD~l+0uJV8%cgS+x7Um6yRK?t_&du*lf=sCXvRJ)Xj)ssR_l>=2 zCRI1$Ns{)~M6>>Jj$v6lXWszmT_zQO(t`0KX4kXS5kbg^xFbqr2GY}anh-3u8Sv~d z=POyxPFC?uSVBr$zbBxUK)57hH1r*LN?a5Zt<36=WY z)ZL7Iaz)?{Hv$^F%QR%@SL8e0-JRs@HahU z?$|p;5nNv#^sZO+X&ZiciS=g0e~`; z-Gj$31%}V?iOityP6e;T`L$?sA!@4T7E&>_4)oN$zfn(oPadZ2W$4_YofEAWwH7{f zGCc8@2HKnd_^r;{GIJwlq%iA$rq3s}pWY<1e#lu~xm0J#$dt_UfdCFl{oxCc2rCX! zM7DZ37sD+g(r+ZmG&Lv!sVq(xu}0hp4(DY`WS0yjor6*9lp41l?&@wo_q7!m`B{lb1cqY+g+N$ao^}n;JD|;+s?M39kGYniE_myTH3)xPdCo7KvxY?DFPPRO%cM&X(6fC}| zZMvKb^b!PzThq1Vm%6`c>a5_rShc00Q4_lkHQ>@7^Z>w?MY>HF zpS_P|P?)rLGrQf{%5n$HVMZ2y!N=!F-R@Nj;aS5lNteMN%HCTPU|E~2Q}s}kSK<&w zutMjAMCGY}AsS*LlEvS1R|O@;Qw~80Sn0y5a+klW4fr_2Y1nlOyBN8P5tnFc9Dngg z6aw4;y-jOuKKl&lj}b|r_=Kc~#1N(!+x}MY)r*?1`z1O7^IWpKni(FYbvoG6*)o9$ zy}+pYqvU-<^e@|FOD0HC>9St$o{ulSS^HZtcMjTrQ?gO6UmlG)g&(if?=F8Q<@9Yj zJf`ouag}DyN#-aF%1|>K`s1S?;=6YzYqrScSKp%4yX)ZQX62x#_y$j{aw(QXFSv;LsL!#kuyf<$CLVs1sXe*L1=DZ3YG z5X1~;u!B=2IWm?crW+RVpP=}n`_)1bz>%+v_Zo9y_o2_Zz_upomQjDts?-7LX$xt8 z8j3mNq?$WulfO_K%0LCjFTiQQaKDw$=8~5nkPeW)F5zerFb_DG=1*oH#*C( z00*PtHv$JZt@-0HKqQ*)?6O5%n~YX9b95U#{pqxs$uV&OV2o@1Hc3u&TmEUgAeaF? zys~}-T&||QrbO1~ke}(z8HZZyo{JuTJ@3mpnnuJs*xZsKWU`IMZ6tmW+7sy12D4a) z?vY731P@u+lL9S#GYn-Ts3{HLpoZINX^G7XVfw-f?XS^H*nFP5qRTp$+|KMcoF|dl z#hOXGI$91@-s!rLbWI6cc7L3ABjA5b&apyHwodaf?aLfo!}1d27biuxipD z<+Y|L7YU@YqUM7vSvYTMV#sJV>0fZXRLlwTr)yJgt&kn=^#(w0|6W1F_$@;Jmq=@f zNp==!)QUL@K6LDrl?nm6!=j1_hQhokuNVIv zX{{qRhMWJtk=EEnKddb9|3s;O{Le^hxzG4yo|su+MWY1K!`$DJ@a$Ci5Pch&gEmX? zH=T*{%aS!r>w^SH8iSnt<_+c^JE8YT#k*Hp3k_Z3!E_4cka2At?1*Q1jSJHK>1r1j z0nyTgQlktav?a{`E$E$MV;SJ#p94SUgOpY%hCe(w6H#yPLL{4i#+M*}>@Sg+2n<0* zFrt^Lfxf=!gqwwK6_|?A(vm$PrZSg4k@W25JGMz9w#i04b10sH47-(5>XvxrH9Ur8 z=R`6w-OwML0}Of5$I;-MTjQBM5pyi31n2QS4bnj7HktalUoQ=3Q)Qh*Vr0-_=zp>W z{_T%#{^E8OHerAB(~NU}%rZjzF`TIQv(R><%z_}@44tLWHTZy(4H6i;&JM#6prP+^ zpKW3uSI1%SEwY|r6^dHMj77hhNu@zFq(7vCpmj{X8uC-C5}@DBzGWy4tN{g(M*pcQ z38Y78D8_s969Wla%NP+r-T{-NgaTC%8krltVxaA-w{_2yLxN|2vO)z2KoOYf@vn)7 zRAZDN8gZ!QM2O&*SyTLb#tdBb;KfSY7_y0CgFSI2rCo8pQWa}AA9OpTzU2x4=EjxC z8kat)4qwG<9rDFlE{P{H_jGf&Q_i@dcqB4aZ=4$A_`krnYvB%t}@R4N9Gf%~}&!49s%Fl6F75_n@pIYmA zTSZ#$dsS+S-7eJ1&3R9l4r zoeWsr<6>ZUDh+2YZlfe6D?$R3J8F+QJd5qxs;HdpsYV@tZ~F6*OR~|>>KK90FNt^l zMs?*%nnkc%$fOL(2kNWw%7gZAXn!!v^1eQLNTijLxgqxpeMmD z*vQp)BOWE>oEd6>c@}I;5<&`fm$l&at~1^_m9vZ!7Npm++T8FMYu>u^C2IL7>jMW< zBD%8=qfbA7XA>vF)xt?TY#0%)RkKnzkFRm%QtnBt@`CUb)~gv&clN#ai7ywofXa2O z5SAkEt-+70T`U#R)!3KK<0N2V zGIfyJIZRbZ4b1x-YJ5@2`}-|j06}&mqfa8k&d`%ZkCD_jPdsbM?gajxf#uxy)2UFM zb$kriY4YMb!W?TQZn;g~pDI(Q+$!`i)!Z|GaVn+Tv;c^$C459wzbU0lg(6d@)T|+t zu+-vB`^Os4kV)9$Mc@4y*jh_|uaK+VKOrN$Z?drU-D}M6ZLeoidJ_uqm~Jv;5UN$`nBh~X2#7^jeT%VKGv3@&0#XN zY!D7uL*H5%uT#E{cOi=kf|ZYREBh1sBR2lfO!B8B7zsN9nV(868GQlxtps=d6eG7) z{P4s~gRh+wWQt$%x;1|A7YK7#lN;=RJ4twSK~Nq>TSBR+5I9$tNZKmQG}v!|u6yPf zBj)+8IuEUKdVdSzUsv-UNulSRPStO9JEHxBcp}IiS9{bD`HqB7hWEs^B4VQ%=5fT+ zS2xxe_Rapcl&CbM(8Z?0+atD-@0@e|K8IFan1GpihocE4PWKv!R}~?cJmAxRb5Rm@ zxrl%X!l?nSXv=4#($#|`CNe=uqPN*ntU}Ph~_=zzm zfoIeO2mPhJYmU+|V z7Ttj7BxudjF!?Q_TZ@bF&d*mW>bM%;S6Zofwnj(1pF>JNLFWx?mo#T*RzEMh-pPJh z!4Q1eHgzVJNA~leC*KSN*&f~HRiul*&6QJnos0*ajSRQ_lWY4}H{Z^jI#5d*gy_Q- zDP4I~N7$af^al%lJz0}~$?u1wyFD(m*IW!V|7kF6$>PKF2+^=)ota&(pNRbfd=vs1 zkpS2Bdv?L6EheoluFFH~y~I+QHk$otpv51GNS}vt8UUD?x(+hgyT<2J!$_=A5Gc>q z{(YDf5DROSr7xnSNsgK$c~BR zx2&$jj4uga(KW2)vWRdSHS@@dbT?Ar-dwSaiO+VSBE6A27Ls8#IiLKBkQkLcNdrEq zoBOBFV)S|n)^wvZ!;__+YL>d z(`Kq0n&LMy;?t$YvnOWKNbwAV4&BF|Jh}zd>!)Y^cw!1jPA|VrLbqkjmLvBH> zE9TQK_qBq5xt}FtdN;dJcv;GRd-aC76M32d8;*q^QGZ<8?*uh&DQmS)#Mh8fVkcS= zsYqR4&5HE+nZ$Q@6!v{Ks@={<<}+84`C9i0mZ1Hq-Z$ZLl1&5W0xY2LUC4S349>hE z;}^g=Bbj7YO|@AIjrTK#4>K1&B}ywKawed{+LcIuU6{^h`F*F<18CYipg+DX!)5{D z%k}%mt_~Yvhq#o~0(zh9W9F?%s#_a3<0luUZL|_W%DwZ=U42ogk=G`*UCH0|k{g1^ zB<>$5c3*jkmA7FXs3Wwux(_>paaL07ItLX~_|YH_QCI%ZwGnq-#D!cMYht+eVK&9@ zgF?`M_8ARi$PC29Wus94M4ZVWcr*6@W&!^X6!19yiv;`+q|N%vMq_Ta@nPdKegU1O zf27>`580MT|B`)wBp<^t`M)Hb+n1Bma6`e=4#Q~YKZVntPVAIN$_b!5!?5=sR%r3k ze~^6t*0%S*nxJ;d%aI1 z2XyB=b}>IKIQ(W9>H611l}mJ=go7UA3d69=KkXgqUsCUn%=@DcTLWFGquiT9^B`kOySEB-3&KN6C8nZNK-<1f;G zjxiqL6TIRRJmVck=QSE71grgJa^dgz3ZyXJxb@^OOfg^>

_pZVT>z$>9H6Mvnd? zfd5ZXUsHr4`zMI|SCIH$veEyq%=im>_5M%kcrvJ60skGtH%e%H1!_E4%{w#Cbia^u zoWWO*Q*auYX%xkH3Xi!Vk#{grX!eeOfBu3{??rk7PvjJfWquahC~SinrgGDT`5Fk* z%>vsTT<8=L?+6CB3!BXUN%L+_kgg2#6pp=TD-y*MAZ{#`_gvDXcj>z>t4mZ={O_BcgaF}aI0`pif*KAg zDr&&4{_eY?W4vR`HRoEpckQYkHv`B+NSL2@ z_6m_OOo>t*0-i$G(G7Pa9HC4dgWf z45CU-9oPwA_CV)KVDPMhyipM&ROQ6n%p{^GWCJ)cwP7xR3uiv)(uPZ?bW zC>(A8vSZ-E))8RucpxYDCBQf%0m%9KCtyeN0d<_YwSwHzk=!tUtsUgl@DJc&s+j_u z@EhRp<$QytPFxh30{k14TfXh3y>`+buI`wGJj`pkN<&Hw3~7dVdC;k&0;Yk0N&*nj zwyC+trZ1^fsY50o_c}@p8o?=NI-pbsP256`L*q4{lj>w|^6BFh;nU7?v?=QW~oZ5 z_Ht*IJvG7HUB`HeY7$`xUgu__rDfC=OuJf`4z{}UAjcPfmCCtEEL_-f}=0+U*Y#__znF1_ATi~W8WZ`&L4p3-?sG)(*6Y&I(1(%a*+eofH4dr8r7n1 z(oOuz*dWw@s%zL#KwE#S6NsR*SAn_Q3lLG>+K%ry(59+Cv>P8ex)v!6g6?3lPJ=$F zRNi0+qhmBE2(e5DCjsX`9b;eu$Q2^kZY`<^I5DAvEIdjlK#2}P<72NmND54U#D^w+gRJ)S*F4j|f>l?}8>IFV z6C44U{_)|jbOOIYh5y*%7YM5W)BOd6{?isoKK{>qG#eLsl3+fa!04K<6I-@ahY2zAmO~PNpE>54UbNkTXVj zV*^GL+stx@$_AoJv70eNiyVRm9%A0e)==2OZJU(UkafA~N47T8L|WdWDpGN>dceMG zG?2P>NvMYC2@Z0jQi=>nL!)Ha1STRwaZ_2d2fQ(`uZ*LBc&e|#BBoHs*OdyCEufu$ zs+-=f)~Tscu9_eUQQv8R$)<){36$W5&L{w!LS4Hvmlss0e6~3&8^ACW0IaC&EJQKH zq!S+*7Wpq??At%MKoel}L-f|wFP)wJ0`OzCh6xXaRrw~x0p;A6#=Gakx=tSls7XaI?K7cUjofP-1(n`8FnG4jQNWhCH zh%pWIQiC~7v)6qOYIkjBg=P1|Sih9D))za2Dtn8SnvfHq%t5`Rsvshy7VwrB0!j_U zr3`YC+G=GaVV_%6j3=@nWNXxS81cdk29`pAb;zYkuSnqRr&A0K*p>#F*rzncG7|Kh%i`uBZg_-bROq3QDpqJ+49 z8me_P7y%#%(uXx;OWa43uy@uvln)vpC=8{eQ7Z*t@+HQ=@^QP_AgTcx#YQPbkg~|S zM(9e334w!gskn|ACx?or>sKUy`Mgo>Ds6~LBAq%&rNlxoReTs-6Oc-m6&oXpb#2l( zuoTY)6~Vis$Y0)Z`OZb2b`;eDv!40Hpy}UY^%vl{i_-7dC-T6zP6FClpai-KyPE@Lh*U9umKLGfxtTz6 zA*4BmA8s zy;K^^8UrRDJbl-;#P>jn_lQQ&Al~+HGJVJ!4^SRxAnE~gCx|v{l`0TT8?M2~G4ZZx zd!ms6_!?IT3QHIG(D90YW)3(LzC2+AEFeM-Lm}TRbh03g}9I#7Nc0PYe_Uc~l^r zy4GoB6cN4Jyfc9SGGtPs^&~}%IF6ROfKyH5iVA2$b=G*s;BTVsDy6|BK@`8#5<7a& z94vlpNI@}HWL!{N6(6;SH#Y{KylMIsr#HWJEPU(HpZHJSI{2|&=XVbI%{u|SJ`w)% zi>Dvm{Q_M#hxBBB4u7%pKfd7`jQ!7=^p(PMDRA$pMr!J*eWZa#4)UO6nFwwcbk=}Jy&t9$^y$G260T8uh z2I!N=J@YdZtrq1rz`9CJKwHo1WfSzoL$sMO*Y;I^5&IZ{fGGtmO)E`$d0$2l^rP!# z4(__BkEpltf;pzBkN5!S9dE!VUn5_6*`Hd`S3dil5B$>o?={ue`bp##E=qs$y&u2+ zz8@TSsd&4uGc@dn&3!GUP(>?J^<4}_xsx}$=p;KJP6xkcQ*TN)T6ADC z72;b^Qri%sD^1IbdGLN4My#up0?ov_3QI!Y=I?b?FV=&F^sWKbUpY_ty|(%l+gMS3 z`GM&VKZ+CiURQnm!keGCbVi_#PJi+*p6VulZ7e`B(r)kuO8I71-5#8amC1^dq|<-rhA@*lMCwfKnAO_t!ez$Pq{(1S#?pW&nzbH#9xu zbCvp1?5eT~naCk|0BDB2Au1h&Wi(pOiXKp159lb^?#}1{7e1pD`x%p<6pY>*E;6+npfoce`-O@sx$3g zgi(|tB%RiS#5FEQfbxRXv?IE;!nlS)%|*;1sEQb5)Im&+kf@DNZfo$Ex<~r8?r&9}T2}$G z0&mU}@xSY*$V)$K_FuaB727^_$Mkpp|JRzZsD;+i2-L;!w)xL`>RkXzK()W0)ac)H ze>hw^lYi1-yg3HW(Tw6LJKEH7*A>2LwSz#9bIZr6QIZ1&oY_9v`>~W8w8Lhp_{3K- z>{ANL?xb-V0!hnMKq@y&H-M3S3RCS*4mV?47(r8|7z5hf!LB2aVGJ3WCB2B7P2o{# z9jJ1pQ)vT1lu;dT2v?{&FpRiV27aLff7-l^77r)mS@7!S#uC|aywh^QOVANBav`WfGGyQHB1kN0Ae?I8_ z<;FJYSPS^g=~GSZTMh6O;#YiRLE(bZyaur75hDos{J5723_5UksFYyd?I7R?F-fcg z94s}y8nm_Ux0O-JW0lHF7dCznliXCL&f<=ZXp|uC*ccX>j?F;#l>kaWA|*N6Gz~Fh zEg9wzvNxz*HC}R+Qri`vI`HO%f8Tl9pM31I4voPNzrJ`Nqcjhw6XPsVe=o|M90Lp5e$@tko!2 z2WO@7wl-l=1-3KRn6!hSeQ2P;kf#Kz&PrnxBhc>E9%vh%23pxX0nhra6wNV^eT3*4 z_0HI4iixE}!F`S#4#ajeeJGLu)r5AXydpW|ObG1)hG#e`&YDC>bef!4?U~m5@ebwSV zx>Gy9;`7_zNx#;N#_zqdFP-_=>mX|A4cA}YEuk#}6z6#Q5R2?Pe}N=XId$wh#o9%4 zGEGV#!wz80+AEa{rV0h;hLWlb$Ia@DgdN4B7X_VTR2|Y`G$2Z_uTt9}-XqaUiWtct zlW(_rX2*kMK@n%+dE8TKi8@>SuCsCUu3Nt7Pwo3vjKC-FeUJMqC;syP#kc-^aiKzWG(RWO~6upd_Rwr zzJnT90zF>MH+8v>)z6#M?=Qg&g|Nqqut$%xxBvdN=l9{$+gGQt_Fcyb>6Z?Py_r8Y zYkciDeAjyh?e}_7)QJNb{mP9#YZLa@{fBq|!IwNL2l|M#m}4ic+db}63yQir!_dx% z1IlLY^-`wye-0&4W$Kh!kV5c%i3_xta%#$hh^MG@4O5j_tA0d&ymHkfV)FnU^*_%@DY5-`A!~hoz`0p9e_Op;B|6tt_a%$ktFY(8c!tp+ z>>0o9`3Ar8@sB*-HtlO5jh(vqm2Zn!@TdO%6EFVa1JmIx$M5@o;^`UkEW{;!`5GN! zMvz6iCwgJ3QaMyXY7p3T2oC;M8mJ|Tn za5v36-!R3i3{T~czFHIreSQ70)xr77A&S@6`NxN`-*w!ty89p97W^O|9q#GtYp?N7 zeb^j|zt@ufQYZP7Prmou94>D!X_q=ku}x6Ue~BKYYS~<q%#kdUx6) zWv`c!rNX^i3WP$&HY%s(wpYZyl&$qjjQa>7UK~}lG9bmasGVyR2YNC&8vrAXr79Nt zf9o{TA$6cOyHo;T7XhcUe5Zt*n1FHF!s1!VJ==a(VK1|fwNHx}S1qX~CoFluqe!w@D|lXy0P&%)U7LyWYJtk`it1+#p9 zEl@ApcgmS+7tWo3eW{BA@Jz>bGf~#-e{}>@zHa~VM7I%knHGTkG&@Vpx%by`2c5ebP7HDt8U}nAF`>BCtft!$vA;4cD_*alf zAwo}~z34`=BgeRxM4?fw7# zUR5L-ov$yKeanG}{w!6zsr9}>e<@<)@pvqc$Le?{|K$s89RQtxnU3R9O9H@xB?IbF zvpbN5$+6-td~i!UvaCW@99JAvarvO)S+GtAx6%h(b^vA_fGUxK;SI(+omg*37ltfgrdXu_p2TV0K80!G9f5kDKYE1`x zy0-}e%v4isagsy@$pNC0H>hOZx`a63K*$%K_;z!4mA>_rtlJ z=8OW}<0^LR2Z%s=JZ71$WgQS7l5FY@K$_@rmIScrV5Snu%Q>|u>m#{tsyor z=me%6C~A5Ggh(VOf#^eHb`Es=i>F}dSY~4*Z3ycC6STS?v%I6&rBYnK|1SYdaa?yG zx{?5KgdJ8@$P(k}LZF#Bz~)g)Qw`*4Y-R~^GZHtf!!S&20Ij|&I}EcwvJ0jh4g*J# zbL)hPVwcfs$`Kkke+pbg2E+k!QJMpkp0*q$9tR2HaA@xxXgczlr$?ur3<0c!T?p`X zs>3Y10hmr9{+g#{+r(5ERQ-rtNiK2@_@!T6_Ybz<*a$E`KpX^};IrBRdQ1`2s>&^i zPFaUOx&YvVI~q(o1adLABoK0iX%?S27@|6j{O>Wi|GCeAf19Eo@agBr7JzC7Zt3HI zg9O|=AiwRONY+cG8~`b%3BoxUw9KmTy#q?uQG)y&Hfcof9R!_tK0%19l54$nMjQ_= zN^(G{bbad~5t-OWe|#Guj&<|Y0j(xB!$9{eGSxYbTt>)+5gYhKBDI}^z{d9|u`$lD zpP4prv%r<@e;kB1zWWKjhu>}bpo#YIt39%93~=MdiH!r?2)SyggZP%@08n8gk=3gL zjPNEt85yGv;N-0jLrfDJL!m2zf3&??gtT><18P;c?toVQIv4s2=oIl!ALRJCF7$t; z`b#nYo$y0{*PQ`zwGnnlfp{}T$h5V7HERH+^`^BoX}5 zg6`hIqU$J*tzpV_H1wd1Zgvhb+s5jrz3n z=t%HpqC*A+ZaQr&E46Wi(1TY)my@ID3wIp_e@x7eR6{TJZS5S?wv7=%Vt%yisPk?& zgQ17$-F~udtbN)Af75rng@zsu@AivrWAbSi{B7RtcE#D+1~5faZV#NLHb_$h)sKw5 zckC6m*CF{TVyS}$ZVwM`3;(5J;lGTxUH%6>BU3DsLBw*#`~k2I%++)$Y*P4XND~`^ ze^fiZWtAeg^kDavk5m29K`QHhWO3kcxf5J_NOA1Pj3<6qy=eyyI-uPCztX#iGdKUt z+kNZ$7kw1}n|XK~1dsnP&rdsmTj?w3!t2VByO2UHXiGZ>QyT$=;Nm~e-v)qAvG6Z5 z8_AnBnsN_uwibN;k8uKVfL$RgHGQ#rf2TIWhXSpNm|a1m6CYo_37>Sbs-~#@!7Yha zR>G%s0N3i|eP_>Q;u9TTI^JsstM-hfSq`?0FznjoN5wRm3l_j>~H z@CVl*Q;}PwHN9a6V2dNkxe*(R97DuaKd~K5K*aQ=gH0A~lgNC>P1K0k>>5NWf9f=F z_0$Hs@T>o*VHHO2LxHo@EIC}P$(Evym_gV_AUrG|(#K{|En zZ{7O$cJ4gL0N9$evWWY34#s~S_eCXW{v~@Taut@{{)vaqj}Btc2_NsjfAxIJHOWmQ zcn^}8v9;Dbx)4B3_&<#QPFLRP-|5JI>aQkfuD^M60~XEK z2JTheztU2DV*ZWRM+5hUhb89owwG4$=6^Py2ZxRG=v3k3q4veYkNthieX4U6x#(L@ zI&fF5!C`9z9^_U1JYU38e@2h=_N$NoEgKeo=il1O+dc@nT4{hd#4gK_$6@4_**k~y z)zp88bN0?3|IvQLT*%QsiTDB(@?tp&0P`!?c=s*hPJH^A-!Sw*ZEyAsbKq0KFCuah zZ|KUmF4{mBIiY8N+Qr!I8{p#JAQ1KvF@O2aTScA?HJ^s|4NR?Sf69aEf6^|V4RC~) zU?$E(A?u*McUXG+O+RpipVmbl{r!A9hn{T%OXLrs$ZN3DbqoN%>h7He#4-FFpBs~U)P-v{UDC%hyHyWedyoE>4*M(Ty&jSzhg%{JGE`+y~YspPlDkk*6{RA*LEEj z`Rki5f71c&O}9bAe@o2$LnjG30HQbD-nPXa@fY1N3;;k)9H&SeYoFs_7y$jcUWwzc z>mPa|j2${>pt_3-gMn( z{@R-^|I8nL=;Y7*KXgx_Lu=Ii_zeSy{7tcz5Quw=Kqi2n9VuANmfHo1G*0?)#gT5YhJ! ztr&CohZev4e;t1Dkwl{J@i$*eF>Y1);tMi9^j|z7O!WPSwi4qG|Ik|Rar~j}x{h_9 zb*Bw;@~^x%}@`(4t4MM z^{<~}&M~0vwWGkGHzxoLTh^V5+A)a5 zxyHbTi|1jGgU>h%21{TFSPF6OfQX6v3b3QF6LbZGx)@*1pagIVat;U4o>9QWIBYR4 z1b`>zi#sunNX#<<@BpwQ&O4wtAba13#XMVao^o&(a?HehjYy2A6!Q-_EP+PF^;Eof ze@(@?&OdxHi0F@t`EJELk@QdFP2O@~@Fkv;^ABGf3Lb)g9tR8<6zl*z3Hlv{l9=CT z{_-Eb?-=NXd^{lTbAUzv_w?f<_xHSBU;KEFE8;?4jkx|0{Ld)p#P#6o+-9Ht*8t9d zQGgu>4x&tK#`a7P{^f#c9^9^W(`sf?bDD=enbM>_y_wTuM z0)SCU*5FjA5EJy+0cqAjmyrg@rh;U#vq%|C%Mp|bQwcun(uPJOo)}hO%|y3c$lqL#q7ShRqO1QWvLPksaj!NP1ETJymp6h*%K|EJdX9$3l7)# z;5cpPwgxU+|9;A^XLa1~<4D?F?iN)phMlR>aLF6Kj7l@ei=|yAd<&+pvOMQ2Y|O^6 zAdz0?_tgMwPky$ie|$a~MO0D-JTrG)wA9Ivck6t1X@lFvYKv)C2<0>2Oi%rtF^6s=^5u+hlXV3F1kMMkXU(_sCZjpj->~JZE!BM2T zzFhZkGd1K@{W!cv{I#8V9@tM0_Re2Grr?^n=7TBrdI6RVFFew01aHaS?+-%3v z%SsP8%j4}We-5l>)-M*4a+^)Yk(FF?AV2m+HYrX;GG6STm+@H6vemM`A4bY`^L(PL zUn8dn$>XUp z%Fg?7Z5U^0?9SoC$Ms^m9hOUeK9*{Ifp_3k^Q%woe{g$T_#~byJ~gCrYM*)*-%r7= znmFWj%TjL^_^&j*T2%n zqK{huRB|kvRF|Bl3CjEEM%gejkL&YBQcrGC^E{eLMmW-^qwuaE`>G`Bd3kf@hCR2` zbDw(8f4Yd|ZL&WHC%@esNxsr@{W#yE^SZeXkx?r9Q?T|lMWQ-q<&MeR#sr7z<>>fYfNSyBEuGboDQ#!{B-dhIlc*bla8>#a4=X zHeolpV~SNQnw#^yqm!%8-Q;=?7SvQ`MLlcw~{EIN6P(+7QC@^CdJS2=Rjd@AF4B=hX#l38?3$YqphRd3sFti|}M z(zE7UX#izAOAmS!g&GX%v}rLlLP zWu_KB>a^TNOVTbR-));RTaNT&T#i-^*aGPGk=Xt)kn3rsFA1ZfvOyJToHLHKMw^a5UHR{*q6m<7X zy}TYb{$T6oIbQAxn>G6Mlw=n@I`34L-SxpJ&uQlPPW3QZ1uOr&-`uX+nYP`q!H*c3 zuQQz#D>o@Zwlkb9Jw|bIe|l`j&%o`x+CD+EJZaqtHiu(h@?p5|x088WZ0k)qn@n1e zG;I=8_h-IV=OZN|8D8t=Ku)$N8Gbpdaa4Ccd>to7yxd&smE^0Duiw_qyhtba=?R8@ za$LRDTOY9QIQv^QWn-}v=sK2;QRZG-ms=Tvscb1wniplWXQ==S6>eykH&h+sUcz6QKw#~U7%m<_eo)S-W1$we`Y^1IHbM!c+%nCFYDkC z@3pO~tztQ(mE2Qxrmva0Z{4+fC<8WCCL`RCk&}XF&rahiR369CESTWCy0mWlwajMc z=4I@RNQ2K?)i0J|7~%QW^4nrCZ+CkJZr1`XOuJoIZZkS->*M@f^J=%8OqnviD7C`H zr)Ii#k|%?8e|B9fpG<;XgQu5k&_6@}ZsovM4;(k!*Eljfd7j(qcE##&i>8-LFIC5_ z4;FBHXmq8BE=IVm{WgTyGwM)I=DAlSsNmL06e^MP6JU>DEG)Yz*7tw6jxq)a@o+(h(#{we+p}_`qN@_+=cItrRZlT!&l#v z?<4xyKfUq%HE$0!KQ$Lf_U6z?o6>~aojpX=wSSDxllxZNYq3JIOENDrd*ZL6o4vf*8Ov<>x-gkCRkHx}u$)Rc22%72he`#x%*^Lahoz{h$nY2Hgxw?*WuwWKX zhG4QS%wuA_9_xFy(Ixv~x6ah@+hElD&oU~=e&$CjX_dX&QM9;9nyT!_!$n=hqoda@ zeFJLO$1IC?nwKic;k5Ls6;Xrj`Rp1{y++s3-5_T&<14qjPFM9jOZ@e04SCRvNu;kE zfA{HSlC+Q8^#Sg9wpE(fc}AAwGPvFQ06n<{W_BhQ5T2DQ-Sx?F+>TCrBV7A_(`L!? zY8{r^Y&PDnKs=f^o|y&*FX7-p0_Ogui(loq2 zDv+^_Ep(nA!Ej@XO?M8jT%T|8gF4gge{-l)zmJWRGZ~Y}jmlB7g z!}PIJg3Nk38!1ZLy}2sq$Dwzm@vxmilg+b3RPMAcE=SAYki1y2m53DKejCrVwRTA5 zO+IO6DoZvJTJA5ZbMt__Si@_*_7tnj#(7-=wVEFG*0c+@O^GzLNh>F1v%owgf6C%X z9{J6GDf)C3PNHo5Tm@R8@9N_;b8Te60pva#3lmxCdW7Ta(Zbf!AFJqJ|F zo>5-)GEaj}Mu*IRrd93g>3Y3hpQWx!Vmx|u>SSzQ0wvX$Gnv8DHA$S)ZGB3$etcPl zqw=UW^Z8-MbF`=e1{R}ui{QRVf6h^4^I$qm^7wJMlh{hiFY@gRh3Ro5o|m44(H1e;8*PZ|Ae| z)0eWGNi)7Fs%`G`JQ$IgJU+;)I8}@JLX*Su+Qgfu8sM{a$ajF8FL!5gRgZPXYQ&k=$zEk%Us7vUYh%2I$C!& zONOQGje9*?U)MVB9_JG+pWC)+{K;{(iC6jIx}OYTXl{z|*iK*je@%Rz-HjY2xrPy% zFHinp-kcg_jZ~zE#VtlT@@8mJO*YRtojroU+*}hQ9{Y=y%FeYMH@njYj^ZakX~yF29eSGq~Trl$k2UD#fPazM6Y-znq`d13zEcjalb%+zsbW+CJpL zdmVENQdd3fM$_Hof8dPn;MsO<+%-$@wKEi>naSPiem_(}5e+&YJJaNHt|qHOaTDz{ z+O64sZ^!jQRK03_7EINPf7CsehDFy)wgJoJFgY&H8%!m(dXDdBrO~Bwnue#*h`)R= zdJfO^+>IW#&e_o1P};q~rO^sPv; z>XtHv;i8+Df5;)q_)#Jlc-m}xm~MBSRH~`X`+ZxM_zExS-uCg6&FAwxt!HRFH{5#_ zDYJ=VPJ`v;Zee~q&9d>ZvPO_wmGKHnbz!Xwvak{xK zj(lnXZs}2#<%4z=Z?Zq8Zq*rmNj)lg?snvq=V zas$K#e=bWGfrgIgbk zhig1fAH(WWIy-qdPpUe9sTu}>)K12hrCr9BGg$;%G#<(8cw?~nFuu>p#xINKigUNz zMJYT5f5bfmO>jD`o~F7)>(Q9X=fieV`Rtl+&qtMB)EpfBeRH^7U)yzfnR@c3NYLw0 zZAQ_K!oiqyr)V+FnccX~R0;#fE9Hyl&No4}k49s4p~P`DYhaGc{b&e!2_942o+h`% zUMyrXxmmNjq{;sBlzUZ-aXpb0M5oH_a*=(ce|gmr`xaYQt4C$0ckX6>*7-WrR%kpO z4MRFCLuoq?r+2CLqEj2^P7yx2KWmlRK#sAT*+*}D==Lkx+^(DcY2WV&n?0}AJ-*9N za~PB4G1q3MeF?g!S4>o=^$ayZ*?D=~pYl9-9H*~tr%hOLr_Li8Fd=ScZcb*k0PDFC ze{IHG_DA80T%?`HM)uJxl`}xN9;LD|T_J0mzz7^M@J?%Id+h`Jxr*G-JEpKv3_mFi zb(mVqy@P6h^w?oJ-K9Qfel_pzXK$ z=CQx_@vPgJPObj_+>Sf{d^Mj1>de`NDI=L)N&4n(=d^Z6thh4plt9QAHSHP!Hr zs3m@XMYp{YvL%kaYqttV^uovfuw{?66fYNJWBc4y)hZl$30ZB~=SrPL^45LWmG8w2&9B1%(8bD)`7NBh7Ll{ZclugHuG>zq=j-wA zu?0tYv%XnZBWNs!jK?DE%wcywvU)k4W}2}o!;!C6lWTc*;mQrK^i@x*V6-Y@9dDE8 zG}-O8k+G%R+|DYz?k{_sb|C5xfBVOt+J2cOEQZm+x;6Z?J9jS)$41~y;)Q!r!uU4Q zB8_LM>MM`!Dc12R8}%c{*wyM~kv-f~b)6Kak*&a`x+;g~tik(HeNPfC)&0lo$kAz9 zQ2p#}PS#=HYU=UC*l}^Q+KaSRFQ@2e&z}4JK*wll2dqP{(WcSCd?DQ?f1B$*j8rld z?k1F6$r{_qtGA8Q>-~{!6vf)u1|?6ItNLHBUz;H#WjPN*{@8_s=0KA4eH& zyUtZ%y0f|=%jZ|!+m-W4f5`7wMV@TSY$unsHhbK5gJ*^kq^Tk87qDbEOHzmBdVhMt z-c^^g#cF)m*7=e>M)6@^HjigZeRah$a+~uQJr;bKcm23^Hu`h-l%}JT;{@ZY#h;Ep zVyjJiUeB$Ua{|NI*=FmYV3Kkb{y-gc>f7K&A&-3$2n}k+3 zAJ34q7*}ZbxB}E4PTLwE2kH*t$&q$3Sd4cd##P>ps@o_VoWyKt!*5qk4)iXy)$v>< zx#BqBHJ5f_W$MjKslxP9=EiBBU2c40e|_E>=`h_-XOn8NanVZ&iYuyCR`9ry7o`uR>u_gIwX+lkD`sXZ z4X(S(kr*dwr%tb9c#Ndc#SDRS#bsl+7u~-5b4ynjTVu8BXuk1Re0_f{)cxG=xAk;l zl#R1ngmzj_p5sj4>@U^~WO&I!*c!z;IqmS~am)_-u^T;tf6GuE^fI;26V%Ro`4!oV zySKMD3x0Pz=x)2!dY4$Z3-`0SDRlp_*fxV*?2%5h<1)BAGK?Hag zD8PI3;vvI(@40%_{{Ev=Iwhq;Rnw3RMbER=sj$U|IC?$K9s~2g>uP1N&F{!18jxXg+0s3O5f%ehf5IMqr&7CIbx&=jiSTv&VY5r&yZ~zy= zF!#}CvnNx>j4JMmk^aeOZ1v9GXzkC6cD1Y7?=uMxKDt)N%VQatChHI@`34FWyrc z0dQqHGR7HqWLPImw(QF6rtNyF>ZiSp$Z{HEe-C9@WSHyhkU(bW=b6G6l+d!|_%I!E z%?HMj6?}T+b=SM0=@l*BvU9NAl)4IX`kLo(gEYMY&irhoXj=kel`p;|WcLZ;VH6A$H6_#Su8X$rXkn{hD)ye=Ph0 z>kbD5#LJW0F=?W3+HNbSUtu~(PBBi#{*ovn02X*SLGN(2B9sj>(f98C{M=TEu+{F>;)FZf^7JEj4I`=Yt1;dqH59EM@ zOM5HWmBfoFo5+Z*=k4S=VYk?43 zza$Uhs%K@+Ao6DUj>`s9QwOmxy+|2JgH#enqOm0n(rY9+Y-Mg)TW3G4e7BXh?_+007xkosQ^{*QABSG0q8jo0rc-KV#g*M*V$^xCnSQ~?;6SnQqE{?5th&-f5yklr&+lUA1^rc zINWDXf)7iy;CV;~A(y7ng3x0A0?BY|xKH+u)GFlU?!Eo_DXVgJE5J?{xWpNUOlo(h z09PYZ7M$A;&OFY<>}%~FcqsBFT(=$hF?##u?R!m50Zn*)6l9Cei&2#Q9AYG!bCQeY zH=?XVx(GlUzdCEYe_6JL^=a!tev*^K-Y-jV@zx4q>_z9hDSA6CI$${fE#NGiXMxI{ zaobLfPaNVpsbUO9B`4qv@x`h^&qu;IHCu*5$i{ASifRK9>0KKESovIz8-*h%7eV(mbsq~-h^(fR@_l+WBNtO617B4S2edRC7zWp2qNe<8cQBy7$dhv?Ug8t2af zrtQJu-~pEj0e3>~X^eXhn%ki?T}@PFPE;9OH6>xhV=lf1c|_#s`XO` z6!&y0;B|7h46r(+c=>mxQ30G@aLnQquil7nEgjUrujmuhHXkCOejv4>Ee@vJ_<-bQ z7=l>c18C4GfA#+H^h!+K5hEW`u91&Z!wHHwAAD47>xnA3A0+yt(74lL3k;oYD`+PU z&=~SzAfc{6>z|A-Kfk93DSPa&$LH(!+tAY5t}*SG zMJ)FQf9fBv`(o#pQ+BzEvD~pHC~^k6b~3ViFdU>h6BJ~B+mtyC^VB!gs93-KKD8sN zAepK(T1V20D@Zv2keujJG*s!#lFPs`H;SU35y(W-Tf@)fotiY0d=-ojfyE-hn#caC zTCIMVdOoTu1rP81C|w58zDIf8F9|#=NSY(CouYY6`f*NU~WvQ7oSX zviSnmPEyek(zU=7>ZdsF#f_6f-$o|M<>#lIfS0?#&|uN??$*xbggQE5bo4MLbKiUD zu^!^L%Lj244yjO0yD3lx4o4u04*ribbQ1ws?hVms^T|ONz;DI0mH~GGUiZv%)!M{b ze@+oN%Rda5>egC0o!%km6*bWo%a ztAofLK2Jj+kkE(j8Q2g8Le@DhzhTnT+he`*YVyo0vjnSy>CdXl>E*wIabKxmOR|lK zT%{Ds=h5ZJpA=evNkh(a8=wAAYX2M>`4+cHL*>$;Mf%^5I()*PURor+DP4Q#_bI8#LEV zu>(fwU;R>Dz$OEV*xmDDb%xG~I5<+)W?0QEJu*tr?3(eHHeSu!4NL38e-{a2K{W1V z0bLbDL<(jP*UI`|dcT?Nt}{9luW02P^VsZ<9-DzoXQtZ_MG_g5Wtt0io4&lZRxX(uy!4y_IwhY)$M@yQ z-Z)uL35g6nJ#-HB>?CS=e+x8zQ*FQ!-1fB~lSbUF*TD7Nx$xe+ZT6e>(2PKYp|Ye2 z+?mpuqXa*crz9=)`MD3uRT~zvCuB4vgSw3en(Tj}o0Z_n+t#`OP!5!^Dp=D8-Y$MC zn1=>^UuMi>B_}=?ewaw1sxqX9al$jtD=8zCfNbLhqFz^VinW4re=@5>Qjnu+4wz)( zZzR1R6Nq}8vC#*`Tf&Q z`jrW6oW4_#nf+#82rLOv!rIQTw5cN~p~3L2B=MxpK>$pbh6gi;ngPXPFVDL7+?$9Z zHVq$BHiJZWNL@{Uf2Sve;E(mVr-{rY6tqoST!5W;41*5o=Hu=z&bV)~nQ6v&@nQ(C zvDQ`$fEu_+jqhMAK6oB9lO@;=$2-2W@Hu`OV-GkZm|Y|58sA$d8wEExQmjQ?`A~3J zTp2)?8e2gpO1j_no)sh`hpqu%u=X?9+p~w5AzU8ch7YIcf6YfGwXiA+Juzcg18&ix zoHErd`mnwb`!%0x zK|hwgSwy}qaNPI##l(*R%aGi-$M*+yg{e}~S|T8U;Y?!_n%E)?*QSoM^i z1m*s37a!^KdkP)LXyf9~V*(b)hR|j;XFXR$%~fc3Bd3kZEm7}WMm7q>v&Pwl z_iYgQ{akb=tAw zNuF{lCn*%a8Q^9EG$b%aqt~J+lT5Z%uj*LhF(A0^YONZTIXIOu#sYhS!#+S2e^lZk z_wT7vrbCOeoV`RiUnWcI3_%~;l4F*{fz6JMJlZH_aW;Bg-)vgvWQRq<79ysdB)vJG3ZrE4;lvPz9Sf^3@@9w9Qb9NtYrcE&_JlD?h!M zCC);UQr2%(?*UR|ZZv@&J27HLe=ucOxN{=-zz;wb*k`$-?KDe%7f4>J>px2vq56jp zYgATFN=Miq?s9qMJ)+*raOwz$hS2 zLSu|}8#~Xh;<_*o8SpwIkva_T0H`u7Rjsu(TZ!DKU0491f#oK>JOhd39ePO^ijUG? z&mZez571U9i;Vsqz2nj>{n;*4`~XvnTu>{g^?c*VDu*z?w`dr3&#vsXTv{+*w)4}@ zZh7*FOnN_}s0D3@=*awDe}cmxm`(X&;FHKd&n%Fzs984Gws@L6GPJzs+J!j1fEdQG zQ#cElv?5I6yoeTx=^C$)cJ%~MOfBFyUmG@_$ltJVw0*!(T2vfcXkJpOahm={I!<1= zrS18AxKdq3=~M~3RBZFjN5H5!*)}^>a##6Hn&e>EIJ$fZ{zN?Oe=Tft{O9;kuw-lU z8%OiAr=U$JV3bZj1Pd5)(QU)H2xjDO0P)kVqLjDS0gHio+v&ZdKOS)BLvR3igIRox zCK8Z=oZx?D>dT~pmeIM3ncd)#|Kg^Nz_+qZ! znvfcf0!-jQGVb%@GYTY^TU$~ zu-*`wg06;ph?*=4ChwbNTPFJ}n&V~JEJlh`%}zA|G2FXEs{KJ_nkb!06c7`N~0?SXVDdCtV-k zm+iu80rfU6lRyZKkVU^CL_^uabg%UworS@zj4Fdql*_v%3m1HRVt#Zp90)Q~EXFsn z58`x)Lh3}mn3TcY;Z(85QVS(PK4htj4wePOTHX#df6O-K{mZn+vb6r~Mqd;!p~@pcio| z4)7-$umF#%%|*}AdzU=|VI*)jzO*MnEduHd7<(BUadiPVnk8~nh`pB7pOadr{gvF^ z?xkaQe}qj$f|cJ$#%+Er2kpN;(Z@M$6uP6Gy2yyItvH+~in7VxK;Z~b!TcZ;*;9fskR|l_Jw` zaMYcx{Ke-(sP3bZv4{SN3Q;v@Ef?=V$^uKnxv?blwY{d;H@HaFR;jyYShjfgphCoz za(?y4CVxlLFBt)HTgg`k%{84u3WjtFjEi(aF^83l6_TKg@2M!VylKJ1RiTGFOlUPe z*w)2Ki2Zd-%Td&-k5BsTJuhI!Y(s(6hTBOGee#5zk$9dQ&y-3wI)7)iWX`5f^6Jhi z`&eZwjcTb)e&H@5L?*UYDA_s4Ww&^s>1@t}WvS;S6TcH%BjSk#u?- z(Vz~KBO`Zpq6VaOnnJ0LYr&F^5%lpe(MjPMe`F#3@fHf%91@9(^rP!bPMKPwlv&KY8Nm5nvMq*#nTZh}|N=D^hD9ZuKR@bi48 zwAY*OQ0*Taw^3Qef28`%Aj)FUZp1u}hQA766~J8Zw^>3KNv(!<)EhOcsBAOQ8*Q z)!h86bEZ)=sg6Uw!C2wfC+rX6xo4Nf8-J*NeBTJ3pu!|xQ z*Whey&}s!)M_DJ+mxsr;xIjr2Ew@NO(}elEL_NGDKU!q9NOX>>HB$pglN+AV)AHt+ z)XduH!6JgWB97rQO6e9{HCcDu!vFf8GH~Sov4Q`s{@*!T=l|qrdHeqF9Ie9q4=nBf zCqZlbFZBn=UH<8{#rSWn{yPIsjeq~qhcn#x5BHY*)lt0R`^9mLCeUALBm9?PZvPRA z83bZFc+92Z56ip$>A~6U&peJ9aZ8dV@?Sb}{y(f>?!WN5e;p>sgD3SrEDghOA;K`s z7d8B5>3<4wv1b@&`e!2N|N3K)7%(|R`llCX7_O)O3n$Dl+)rWZ-;qi%RDb@_IoQ9Z zh3T)A$`T%a#P8PCG&QSC3{e1sQ>GeO5bY%E0f&S^o|H}WT>F)26R?P0c=B2p?WgXNA+#<(G z1iCW}f;uJrG8|7#9;!m}5P#1=t|uP;nVi;xlx1#_KWaC~8|95Zr3`4FF_k85MQw;b z%2l(>nST;A#iCbjMv~#Yl9JF;ro`PM#KTtgdZ?Ge{?bK04wPb?BVhh9s>jvZsw!StZiQ=$^!`|6uE{KQyyDG_1j9v5md^s3-1lL9*_;QzhfaDRlUa+<} z2H;f|1WICBn~0hu`+v!}vRnJ{>#ZMl4Zoc;1V)c*;r8^o+CG{AXKt#qfm$P(vSMy8 zbR`!AdC823#OK1+FBD^p?44u z?6dYN4s2lm4%dB$xEV|4Juh!#sPkhovP#k*{Aw=VXk-`3j6Ts^`5P?S-v-y`g_55V zjfX-&tid$kl7GGfQ|YIZlU&a@taym)5^9g4Wpy8b@u-imXZSgOMF@A@se)jK9W_0; zc1fm8GJkza0hDZE1Ii`NO2xz&#^sL*Z{}jvt}gU~=BjAR>as}vs9%!)CaAU4J9GfF zkvL*5!>#mZN@D3ypr99rItn0f!9C|`hRL@a3K{^)DTug_6N^e)_PM7 zIN`;{81Z9hslF~}JJ{;&QAkZeRj-s;sOrZax2PJ9hPZ3p&nYWJuKwh&=2d*FQ!5v8 z%9|iX!ghM{7uxQ~{~j+3tsH0BkyAMe;bSX36dw&^$B-(Ja}RYKkI1={Vx;ek-$4iq z*HQp~!+*u_r!=Esy-83^#^^K+wnOP4IH>lqXCVC1H5h+IgX_tK;KCqU#QBVRfC$h7 zxHJ@zUcmeI%|0zlwpP6{yX~-|0txV$_`d*PV(~%(f}Xdq9RNLk8i58WGBHF@G0{Ua zm7U%EJoP!}Vqox^?bqsmGYf?L@X0e&p|EX>XAj!UgE(pWt0Kf4}W}vSh^67 zw~GQs^EnFTYV(2o2GHJdMp6<~>&Q=R+f~Rm_23A$;?R*T50@Gd%09%^L4-wY<$SHz zlYcYRV8(a(SW(M3dr7!<}a|Jw;`xM(+-yX)7S<$rx1JuU;zB}fWS?Z^2*WTf3i+g3Bn}1e6 z`FfP{5We7k)E`V1doHPh=3QK7mOGzzcsBA!GQKsToaIPhha;D*LnY1vIv0l$m1FmP zUwz-c3oNdazp+W|Hya^-X*q7VUK7Ja`N*K-F!yfERVtudjkw8k?9dk-&4PO-;FHVq zXRZyB&0eoP6?a6N2AM<2WN)94_o;wNJ1O+GXcJt*}%?P>A%yFSbg9Z;C8wn*Mk zB*@PgUfhF6r@Y0k9;k);a1?>gM0hn4uW;I%o{rL(|_JwMlT(r1Z8Y2F%ILtJY^AZbyl^7CYA6&Ra> zI$wYS%tbk+edH)9A!f1BoB)p^op^v{1d>qxGpetonjEX6GJSwCPd4E#KEkEjBb17Uqvv0S2 zqVaY^D0KFW-3pG8@}QTI7(evXb?-$y0&h(molG+5F5dr2o%zIAP+NxK2M)s$9Oc#8ju%*vM#5e2}jq8s8(stlh4 z3Ty~Tc!R{eEVc<5%(hJ!n_!4~?r+K(wtK#>l74gquSV|kVHhKu z@_6cD;-hw{Fqds>K*vYBPQVo*UgkH~#FE=!jtsd^9Z(l=$gxE#d9ZJ>&2i$!*uX2; zcmP|)Vbs6B_e@O0^@s4?JHb~MzHoZc9R#RdA)3WS6n_`zA$s-dhz1TpQC|)|@nI3w zxX0CWs&Dr>-Q?`>rwt<_cMFx`H?X1etVa()Z|L$PRf((-sDmnf#F0 zOWc#JuaeY$(7KQg!0wT_{RSLMPJCyx0Zxn?RZDtvfQAN?N-S8YUQQ=ilAkzmPqk|B z^OJi?Wq*;q|0Mzs^Z}>sa*X@j%N!^V$7f#*DKLP#pp^(uhX9aEFrS%V1p-AL1#e=b z3j~0oTkU{1>gCkUktgQ2E74J(`N)o`8nGjlLB&8)>%G z8#zZ>Oe#JFRJ&w?sqBmF!Z@&o;M(rHJm{@xz<)Kyw?1W6hiHch0cbOL_0y^2Y*z=x zD-%HOHpx7Hd?WS8+_;wATXDBo`536Gv#~>9EjIWA>1oFG3B#AGkTOHQ?s0XQE0P_Z zG0BZ$_g*4n2iXKJ$yc1J49^T1Br0DVyiPrb1n{arn~efENI&@LCr7F!D?iZV>Nl}T z6MuxcFvpH@f_J`8 ziYpVpbv(|{8mza;hJ%)hdXZGS?^>5@6@Lf|fZNJ@DhicK1pXTT_@S`{S#0nFX1Z$Q zsVU+^Aj|o{Desw*MS&@#k8u7QcuS9*J$C#}SX%;9JevJF>!rb9g01{%G1%)$RAKAmDPR;p=%Q4-N_0z9Z3pnvlz zKX>#<(TDn;@j{R+FCwu9ZPxrS#S;lWR-d-b4a()&b<+j9H*~2LUeWOv3GEcJpUlInnAbV}vj`WL~z= z=_!_>?Aw3h>P_1HmBF1;bV{u5)X#aft5J)Fi8)FVeF%a{cD~Ralh0RWjBxvIy9=RiDq>+cobu1@_?+(3(xm=aW{RqWskx2UiOn$C3o-| zZk3h1cbpaQMadNs5WsjqErc+EvGf2&-Uf8b9}`1Q6Am6%=N%&QvJnW+1~^3{mz+O0 zBfOKo-GYLHBKQV8LHREW`+q(zPcR&-hMjIdCK;{om|?Wtx6$sLVPS6VP0>+V@6Wsk zbV~a!gTS{EmRFh~u((l@TK<%qp^quGVU&0b7|BVoKBfzSEi7$bWvIM%xvS*Q@)VYg zBy4&?;=eD0M546HH_sO6@ce zdd6%HOG0Clhe~6RJ=9#C-R}m}WVpU_z2s#gl$3Q8rnR#Q4x4 zM?{POpMV_Mdu~Q1o1x2hy{&wpC|T^kdZXdImEA!X#b+CZ=BDnw^ARVZ@w0kfl&yf8 zJ$OKd=OoJw=zl%wn?j5Ltz(NaNJv^Y_G^;a@(h@6RUah9I#zH`IA2!AI}6H=lS>4H zf#P3nrSp?eLr+VXTl!t(tz;YK=KgyQ`S{^81!Jq|ZiBX~1l2tDG+Ys2%~fcwr|fmP zR}MKA?UMw%9AgFOsW$aYJ_1nz*THP-{H9P;A4nB0DSwEqX_^>|c-d)=2;yFLwP$-^ zMwXD{h@rna2ZG=qBm+O99jFPv@1c5F(Hxq^jEsPdxb@#{!u zrqY8Ag=g7N3r;cqYwpksml4h+kb2wmCngpQxO=8`O%xvo#DAnk-iw2e?oj!Ch}6aJ zR4Vb9L4QTKwzZQ-ua<@&4fA4;o5aTcM9qs(;ur^=S6rkejq_Sn|NHj51OtV2W0} zYcKpdxJLg4A{p@M?WBb0|BOm~5rPh+x5s2YuPzBqtPZDRVx=h?&W?Dgj%tyYZPs98 zU!Y-@0LQkM_a}m-u}0(bS-ED8-p!vgI&8O;j)N#28ZuPNfr#FK-lQ|xIufO zw3!#zoPuV7@kFDF!|J;0!3;SwP2ueBDZ%1TytnGR4o!C5AsC;D+0h)I+^D*>c6nfb zrm%ZJgm1(KxY*YDiZvXs7eel?`gH`;IuTuHhKEp zFQH@n;O9oL4jlj8Fzt$G8{n`QcXmsg-kEe6a-cloC~C8&;>u0EcWH2W=NpXA(3v?5 z)TSGXZnrWGFK6Up(8269IQVhNDw-iN5{YsH(R5`>BdRSg~>oxcwVhz*jpj(<}~ z-xxv=2TFazeBI_Zh8TX?6wFo1q4Wi=lv;63h(hBr8Ycd|AWsvK4Zgi41S-cF)m;KC zdxu(38B9Vy)<;KfsMWy(8G=Jh2)U0_UdEEC&yu}Ny+S}uA0YB3UU8A9N39K`I zmKUo}U*chW)7yj{W@Vnv>%DBJrDSeC`*{NGXOpD3h18g9qAxOoi_O^sg7tSUlOEG& z$x4a#16!+jxPgzhfs&6z2M)Jv#9GtAbiXxhiKBOnw~ns<6uFD$Sh3KOUVlua&`+m^ z-1V)apg`w}Jjbsz<(;G{xJAa^eRs`r9=~WeUsPfs~H6{A9+$FX5PP zD$Z{KvoXS>A60r~wga0A)_>Z39^WOQfKmcHu{WJNQ<);Q!>e%%?De?vc0X`0l(xt? zYMp*3(YZO$02J}#eEI2;jk4(0S7=E!f>Yyg0QVt=8O7xT{#pl=KS zETrz{%#mXPn2h2P4VA}BR(3&it&?~6-YCY?;y1j09Q4&&-lF?Pl%9=?iG#IkTkTcY zW!I^Lw>?c6312(D9}>DOYv~(6EaK-mi~#^$y`&^l8OJ{>HVaTjRR2iz=|0}%LV0wQ zz9e`q_gw9+88vk927efjo5wQBe3#I;rnAU22=Vh{Bo)wrKk;|Q+jqu0LOB<#AtheZ zvQQm9EznMnOFDwOEtWaigr56>TwcYeZiaTsDu`tUc^a zWMmgFq}mfrtb{tDi0TlsQ%61CrPKWR5=2cog(;Ukb=31zTz^3m879JOG)8rU7$`8Q zoxo!>4D3OEV}+mUdv^}loxxF|0OQl>aC|`NSR_)3%ugID>wrqMY7#kvqUx6W17yMn z_PpgtxAW|;I55XZq-7n|dm#d@2eFs)zL1=`aT!#63vwDVe(ne;`AI@lZ{IaIba;~7 zj$}@hlpy>xt$!gaDO~$whx#J}RCo2KgP2J%oBJC2`PP<^VVU8X&Y@<7u)loIu-5v{-#tj6c^<_2XvoVNMNig3b6*dk+p&xb2dT5V7fjjm! zakc6`7f5I~#TB30h^oqvHEFGlal&>M_WYK10gmwlH{x&i;%EVBlQYeK~KF{5zo zjejcqp3g^_q;5$}MxU*kq-xBJ^%k4r6`()yVn-W4wLs(;+ z9fDV0pQ+;Hofz+VvA3;P^cfvHMEBtIz8wuFXvw->2|hcWP-Z>5duyJ!C9~^$e0`I< zKLRzld9pmSAts+**qLKPj6Bm+f$1z;46U-CcWH^}tsiuX4}}qQL5E^2m1$f3%#w3k zp|mpfCR1mt-1&Tm8K)2WOE(%6e1BBn6IOm3SE#-(_MVT;^VeAQ=61 z@`o|HUOVp%Ugh0#@QD!0i~g9#yFbwaw%mi}+)4|1Q5_1){>Bd;)_g-{)13RdaEKW=vmYmsie0(fYHUe23tT zaKhJ7Dyn&+-oCV4w5z^_)TnotZOxn5Q>WNSACA?>{PPwkQ+|TNwDEON`OG!hh%OBWCXgl13#N-Y49$y6yP%LSn}vIyJ313=- zRaMU(o~Sn2>9L9T({_`}u4m>?z-if-fzPvv3^^sJY200_ZH0KbCyyG1&~WHcCfS0M z5>d&pJf1H4^IGmM@4U_dop`Q=F9SOpqH3~I`jI{$@jlzlyX-IWIx%G^ySq5^G%fW( zf9-E?#68#Ad_HwOXMZg$aRJt~&c$H+t7b!YAHemoz~?pcPsSCnIcAhk7I={WfcD67O8?K_2Y(V z`~cmGZDyB?XH0tC-@}y1A1sL9eI81$w9hVaZ&#ujwSQ3hq!_Mz;O8aKinC2U6JD;X z(!@X{UE18C{<;|SOye%6qd%$IP*CuM*1dUL#j7%%J-lDwJ{+J*ew$XgnA zN#+rh6_i(#>x^edBozS)t30=?xAgPFL7~>&9)ASRy%G<94dXd6)Cbzm)m424>5bXq zB@_G4a2c|*>y62AQ}j#IZPOg8=fk`w9OB8^)c6hJZTv1%nLm|rZnNWRJQ?nzKM3&QnVcn4vX3Ky(@1+7*3Ax$%@Hpp=dSlJ8^Bb z^nYq30T=2)$&P!Fo~CP z>7tmy9#>=}B0M+W_%n$PM1B@*)XN%edw+^Y)i0_qEja)pU0u|~ksoFzvM=#9ghHA) zh|=9udl9TeZ)>Xou158kUJ$>8VxNw;;q@WTwfLwkKNPPQr|87cOXsz75R5};6in0uBOZYoyQ{u^J3vF3c+aiYd+IjGbE#j^Y(ldJYxoOvrtABCn z-SW-l%N1U{Ck3oq#qk7#0q%{7TT?JxX}YnSQzCU{K1?ClXX+z0B6b5*{I<-&UZdWc ztk5d&vwHvFdPu}s4nHS%Gj(z0{X0^(+=iEP5Ze#b%$QzLzB9c-%URV8ECu0R4elX7 z1}_pa+wW*Wy$$$kioX9&a0O$qWq&=Z&vUQ&*Ed;2GP{;ujqouGo;|}lMMZv?nPMt&11+i+LRT4{NhhYF>k5Uq9kxV_`l5R&IqN2-*+3&v$e z#PxG62Gga6FhdV=n!uB*8iF>!o$()9nnN~7&gQVty&uq2x z$Bu$*4x3LfW1g(wx9&AIVplnao$!-vekz`9qUglo%zbkFn@nBO_(2hG(G3#6GHBTO zr=+m`_bq0m)}AjEWG_1rcmbadUFdua8L1~;y0|+|TdyG0_1< ztL^wq$j^S9fFwripvcchWv-8>u3)`h7o~ppuh|fwi8%437|Li_aNJFpS5YU%Q~`An zwr6m$qF{MN^M5uYA2+AR{Z!mPTtB>@v3cs{CZA^#c)4|sM`URD6qW{4Ve!k*p0j4r z+-X>y2B;%cDzuq znADlO(zoV?ry&X^lnt+|D_g0UB%jws-NQAPUpb-NjDOYa;tPeh=dpP28BSHvb0UIt zWMO+c6M2Ts@AuVHHOOezy2jf4aTH3%;`IbVy)n0TEu82^y2LHbV`n+VTOUJVXK!1> z!}};E4D*R5LuyF-Q=cy3<8BXG72?Ug2d@`<97xF@ec2~S*3v8E!6vf{ zfwVl#j(-Q`X8JEoP|rbqlXuvi;7yj{P}XF4YU(S&t^S<#8hL40LHFJ-Z|RqPwbbi1 zd#$a0>+k}fLW{Z%*iFXC&BpmpZ5r>IlEc*Pu=BnN}@NIor1jFajt!fcVvzG)tu6)6kGDl zF(@-$G3Kc!xSr7Il4IsuItk%Tio@=5o$S}MDoJsTX3CxshB?j7zniTt_^9&T+qwjK zcYnDTZEI<*@qUwGRcIXXZqxB1*xY)$eoUYjC-VM8Ug@Xv=QBKgT9DddP^OM`7iV`b zqtuj26g<7gHRa`8??%eiL8VBbo!@Y{UV*bsrr&!>haVL^TIw zUXbB3oP#Z#(BspxZG>29!M9Hzoy!f6%zwM)2h*by+!vj;eO!_gvEsTEIQleR+I3R% zPqpYhY$lFi)wsbp>b&fc^0px6JXp$vSCzr}$J#0Ha0)ITco&+doM(1^Gc$L-yt(tE zQ@DDFcynv7Im7UtL2h^CJlc8Cc_!3$UjdOhm)55auGM9@Y<(8CLrT|BdcXVRMSmHg zy+;q^o&Cux&HhdMdS_RWOt_HQd@ewz)S8l^Wt~gJowucv9_+BrAO z5=FH;8}J@Z75%WDedoBE(2=o2=+B&3Lv4^6Eab!sS+wuSUY7h6-C*3@9$8!&RJAXs zBC$#7aT2%y5*HLZWVmt9-JFu?(tmH}iKgY9I4j-dbv0N1GU*~sJ{s>a$?J9D+SPlQ zjB$?Mdr-Wv=hdB)=(?M?H_xt$a9w=weStG=+a0&A#%kp?pY!xZ@jUL%GfKLV!>MJS zb)@3V^`?BXp%|V{oZ^!ayfJiZ<*U$$YEYfY&%zntivQe6dmM2QXi$HzjeiAykJgBQ zHNNL!0Hy!uxQ`6@5;(_C^SD;hK;VXW7q}zLfM|e}hf661prBX@C^8rp_El;%SR*b( z1_N#scS0zKJjKV0Gea~AEeglu;@n}$LP(X;ZK(jOvpc~{!T@3@=TmMK8lvyT?! zdggPLp9<0JZ+4^!Lli7_h6KV3eLE+G3m%@UG7R{uGeil#5@A)VMfY&h(-KQwb;;_xi@dc+@A|FOU1q8|?z9sz6QejekJ10XGbJ$@3Yu z<2%OO_194>_T+u^2=aMzpWuNv?8h6KD;#)8CT%iAs-j z$}JR;D`e8to;m2Bfsd7qh~DgQ#!pL$NBqlZ9=ecgv*fwfk&$I)7<5WK|gvy}oZJ-M{HlAD%Yt>}7jDD@B4anzRi7!0pP)7mRKF zxO_l6tJ0IF?Zo7<^%k0BU4B&Df9BhH;W9^jKETC$!mGV8(U~87WCueBF~KO4RrG&VTM?gEvrgCLCt3AE3^| zn1<|p#iP>)>lG7~kRp4dj73-$pXa^VMD=o?Facxkv$25s$+89DpomHWNKF!vu)gaK zUQwc7M&g6T%U!o@km!-b$%*cQ6)ZXr0K#?7qy^Eq(s9JHCFh0$h$J|M(x5LYw&BtE zs0)2qok--yhJVz>>*prsfE;aEggk=gTqz^~kRq#1vF(;ckQMIhYQD9%xTrnEDX=F_ z1w~=$H_^nTuvMYVqgK)*8ty%-*z()4&1&V{__;^fTg%k$!}FDfqBomDdp4)Kix*RM zTzF!bTyZ&uz)#*jVN|G9rDm)p4{`<-@8j5-)FhLThJVHDrE?YeeEWqjs0rw^W-_8e z#Ti^%Z-_kK!`r5kty1JCW}B=`nGTbz*47U%AJq%I-2!`bBYw zhZ$EPZhu{~iExDHO`K)4=ZbK*(@wJzdVhSp6Id>CVD^tDDqgN~xofy9j8cAG17nUS zdwNR=+vVJ=8}Ws#15LA*(2MEfEg1%<}Eqx7Q}Q$e0;>15WcO=0z0E?MJYmvWRt z0YB~wR+=;7AT_H`Sz#?l(eL4;t^4f}O$`1-y2m$<$a%ND@?>)clGMT{VnU`Jz27f| zgMZI^SG5F$oU8Q?8K}8vOysiRJ6U$L=$DW(o^N+g+`{aAs%P&@DZFwVy_2(!rHMCz z$j4=R$?v?=*r;DpI)woF+0AL7I+h#-jYUIyq~xpoTnTT81bcLe^V*EbzzRh1V8xlx z_28lwqkJz{S7q}@K1ZR#@%);Zo2m!(yMJk%UD(*LIw@=oc|R98=jpZV-KJvU{M6^i z4H1(((}xPUhs-Dh0FW%*i}CQV+YyfB_0)(fT%qLS;Lp6KK_EAg_) zi@535kMn!KIEl#c?#{QKb=fj;D<gy#RA4-SJ9E&+wi z=XH^)XloUTeoK|ibi3M;;-wRaC`(t*Os zb(#zQyvNJ4cw9WpziJb8Ia_rU8!vyGg`4*&KQ!sT?TntcQGZ3@3Nl=~w)ZN!^^&wb z#HI)&WxMT_XPh1FTm(=1X@3M2@Nlo~G3~im@9mlB3rCmMi$!UFH+6ONqoIPH-%%yA zsCoDt(z#zUNsvh@U(q7ZYv}e=-<}iE9U`y4S^)-Hg?=Q==X6~~`K__4mCB>6yCX)ii7NrdnFxmS^=*H;>!t`eaeEPO$3EW(n+>kS z^@{E%HmmY=qT{E7i+><6Fi~WtyFO&B^hj8Hhoeu@65%mXo}Jzvk>U&U9OWtG;HRFd zYE}gEnkI!F$%Gob2?L0Gr}FECb5HTE_A;{s+8F2Xf>`f^TFJ`kJ@@sl9^b3G1$c5j zdAXqnrpg+m^Wg7(9rkP4UvoDOWSD-Wtz*U)S*7#*)&}lnEPu|s9+gLaqHd)~T;3FT zUoQ#$!P8SkQL8HYm~(ob*}~R7Js((CIW+=4VqPz5lKO_e(+ttt(Ml zAt<%?&FHVjdr#k=M@@3RI;g!%p3lp0D=l?QN`#(y9k4>Fun!rix(Wgv52ME_HV}d> zek_9Rajb|xlYiKc+J|7@g@?Y4>z>O>|J3(AH)G*l>GgHn(M@c5jnmCpRM8BcZkpE( zO4=j?S1zy0dm`koq;>E$oph3%R?X%VvJUsjmSW~P=<9jZlZ)lP*{TUOW2=p!#?+`J zuZ>@kxh?fdPgvs~w*=qZ2XipE`GqvQxk&IcW*x)rDt|$klCW|dc`f1Xo)~qiy-BM* zxn~>$!WDZ~`#SS3V}iK&uyyL2+egU7VPfFUoT|%8U*mP9QF%Iz;C}ZK$@%o_uK0m) zKVzr-U0Ll;iQW|_-l>A$dW^`-^c?%=+i`;a=qXSbBeA)>S+)RG1%ED^`Z>C2ebrIo zwvwC+QGdc-n+m`iTt{2nX_eYjd7y>#8JmW8#%9T;u=Cqb>Ug)%CbGCs2XoQma+Qg% z9>(^SUihh1oO0sm!lwjVW@KRSIg{j9)%qlPonL1Yk9Chtrh_flt{v-8w%EDl~)sD0pETjSN z0)r`k>XS(1%9yR{NQ}$anZvC(;G!Qs`ty(g7k5A8=J z=*i&DPm9m@rdtz+E|_b3vo(5^ua65qx_=#U-h#FhsXIf9^P|M=cXJ){H)CNim4f&| zO_IuAlm0%Z**x9$IKIB`w=L|}_s(4RC8Mv}o}S<7^xT!kDn$d+o@RwY>9!Q|3{RAk zHv@Cc&X=l^ww@sDyQ`dJ{weoS*gwylvBe$f`sw)|sCn7UvwBf;-hF?@>C=Hj=YQ~w z3o|C%?RKW@b$nq=uYU5l7TYwsD{uAl35ruC6U5+N*i<|vtwkn}4HdLt`}FBLCnx2E zuDJ9#6DP{r-zk~Afp^j7`yYRiL5X1o$45HJ1DfY_)<;58_jkTlWdLM$KUY|K)U(Ft zO>IozP?Z1a+fH`Iq1_a32q}TJqkpHb3PB&rzIxnU{uF{C9Q9typZJwwAI>c+KKBV1 zcQ|euINk`?`yD>E3B~g%)q@*s$@oEtd;B8PxFIeV1};=?hG|VG-7ajG{2itZ@i^ZC zW0Q8Gth;gRqIVZ5kHoO!;e50U9*dwlGr3ezDJP#Kn-(g{;ue4SP~sdcEr0ga10)Pv zfc4+KR{p2e%Io#F(+U9mApn@)_8sXe;J8K=M?VSDFpy?R*j$)$+35CHb$D9HdF4{%5W`F%Ri0C4w1i7+|ww^PbjtAKj({cAO(QrGk5OufG& zSODORA`$&67Oe)oN&q-2HwZzN3KXqJ&_CWBkYSfPKnDX3vFP;NLR<{Oj93 zMwX7wC-B?p1OO;7$M@CjX!de+ngJV-{iF2?TmaBle&?NR=N*0PdVjR(rR-zHyn&Pl zpo9yC0dNEA20#Y*1;{T%tFV+=y`D*fi#DM`qFm`5T=d>>5vlKTAV`OZf?9+3p8N)Q zX~hGfLb$>rTmdqJjW`QJx)<0e6}RXZmkLq6s zvSZ$7qxp`{<4Rc)swgxX4_9iv=C^2oW#kJm+VxT=4HxzDCVz{9jo`HK7c;D%0Jj6! zy;vPajaX8u=mx5X$6H96j@PxB!7G#AmUo-e7mtX6#sf-)BAYsGUUbnb3{WrDSq}nz z1`F82 zb|n`ZfXnT-dw&n`fQSQyW?u_X0Z4Xu1jWy~;NN-MNP=V;fNHcA;N3y$LJ^KgMrHXv z?2T9v!y_0hS`8oqaMD$P_)T3rU#b7tFFFf|;sqOpLMfyjyZFND2j14+q*dkxN1eGU z)FSF6MnX&pmF~E{e-7L}O4R5(&g9Mrz>C=P0=#3KEPvp?<23BZ7C!35 zZ1x8Ikc-gvLKj?l)(hFSy@Gl66zmeUWalRpK)f?QqgPhef&BT#DByttBna%0{gfa- zp2ICa_K^4J49jddd_vi?K-kfsFOg(4SqRH45SFZ0)`ZqU3oS@RrZYZ_YN@rQUDy)g zBD@IFmVaR~^v43a>2@z%A`KUarGLavv5sef(`zX(t-`H?$1Scx2_EF1%5 z`iOUNUUdby@+9ohVYA`K=k-lkqd>5c2$!6+!G9{UENu%5Z7U790NPJAyR^JzbcL02 z1)pvd-{8Q3_*?`oi@^fBJ>pQD7x`$DH7O2%mXW?;2)W=h4Eon^V1;2kpb-lX{DIRU z4f311|Kd&^;q|^e)n<8?&Pc2ENFVl4@AM7Sh$RpZzy4avUwa4(KPU@-1{E%04HzJh z41ccFEDl?5tSxi*Z#ya(sCSG2l?R!_p0EY1=GolgQjI(T`)A(rCV%lxSfV4A53D$W zBj!u~5}kWG74lbGQ*SMRqU!1a93se6-PH@sYvB>kv7*DEF}R}2-YR7H`**(J=)A=$ zKzaZ;9`n^f$#3ea4+@HW!A85@M7yzim4D&to6zW+(Rg1>QAnNz4nP|6JScund?kkw zEHxhBnX*Wnv=m&fp#UKMsRP|$XF=0APdMzQecNLpcwNB#>(AxapDWqP&VO(WZrKU^ zVjNr|5iXf1*as$NA*hCvP@1r+@m}>yM|1s9-d81B6<$~)7A%+bTvBm5D89U-hkpde zOmd?Q2{=Nw=YtXnY>AHfx7bdz>>`bT^^W=9;W7Ws3xz831rRlIot=N@jVoK!7_LnI zGsd6%VCSRz|*GD%)h zr?30~%M31kDPU6x!%kP(cAk0CJdn8~(1zf7|BA9yTaZflZ~sHl3KeBjX4?pMU$KZR0|j z@V;W~C% z1}Z&at*Zi1=F4u@TFoW3_kWhnkQ}NU|AlAFLUIR+5(p(|j`fr61+mBaZQdU|I~z@QVEZv9 zQQke$*rT)Z#r}VJjDINlU8iQeoOHtr1_Q$o@`Vk{(G=U4Zf$FBH^yj%s}wb3Xz!I? z(XTnNX~pyyLWbqdr9dLK)Ya-hlcK?jK|2 z;>u#jr^P;Gsh%gP!Y6f+iX#`O&3@XCe6Y>5T7H`xk^n_Oy1&?#0sMcv{uvMtereD4 z|Ihz__`>kRHxEC~f^TeGj=X>@q4-KC-HK{9=tJ`e2wJpc4!G`U| zD)uTGTSeI}^-wtE6@P6i=9(G(TwuQedozj+#Y`5N#U!*%$ zfqiCY08U4KQGR$?2RDk9H(65tKjV=~`55!?6BFN9A&tMeC)Xz!!M=1^jZNSNy-!P}34aLHhkjK@8~sHw5r{kY_Ii2SxsRLazsk~dvXUz2J6>z4tY;fLm8tC9p+L|>@mYAFgCHRuoU5nK~@5@tu8;kS%xJ0pA484CK=6a8O1?9q=8)=?vWX|jaD(u|O5M8-)!3{nxGOkNSX9a&DP6eFp-6eaecCm`EhO}eq z(Hbp*2OGUYRK(Hd2>>mn%r7TM+8=>0H<8B&l@)x{F0Cb^hg8xJg#(`Dw}xWth+6@H zAXQsF&uAMM6kFDOUROmi6F9*aO~M{QP1ix^*X+7n@ePg3nmM?}K{$+wMSN~$$VN(-)W8}+e&cgCj=l#S>L;|;s z>#+>Xk`AJJ^fV|tYF$=>m(ALWdaFJzf^MBSNO*NMaHvKfWvcGkM!O|YY|+nx1VypR zX1P?hAAXQC%U&G00v1<}Cz&-_aiL_$F{*lb1b%;ohyO* z0^FA$_7eJ;=QqB8VmY|r z>t=t3-IcFiFg$yI43N4+9_bs^x%z}2UYpgJiqL?;f^=yu!?a?AL&E_4fKiK z1gchKy&309dXqSPa%|k=%k^TN2i~k+14{_iAR3m*Esj;MZ@A7Quphdxw)fIL{2AC7 ziL>WYoq=%hV1ws>*7+<{ne-dS1^DKsKYZfr$H(WxU#vWQ;=qBQ{M{=XC4KqW_UC`! zee=VwzkF-dpZEYG+0}AugT)kWkSQCQG@nFHTd7<%#{#?XDPJav-A?MhtV)e?snm0} z8g;%br3hC`gP;qs^4AD5w(7!3gq_YwdaJip1)SGe#C4jtc}sSp)mi6LiVKoe`L+bT zQdl!ahEY*fOJ~$=Y1{HmVP#$h_?>^Z7f)Em*aT|B?Q!I6nM|jR{k)a~R9deUp)g@CkQ?~I~}j3 zfn?WK?>9gH_=#1@2Nyc#`aXYZT;o6f@}q?ves*9;75&sAoBaLUI5dz(->ec5Uyi-W zBK+cyP(PZnSDXyVTLlG9UnFN-)wi$e+e6?wbQUY04;-HlJN%ti)Ky!T!F0E z$d^`Khe)(IRwi`kg0*fm@U`A)Zdps!%e4Jvh-3!pt76DyM#&BY37>!M66EuddhOX( ziNHn`?|7Y6&n?HuuF~SGzI^%O9Xn-gx~S8^+w_Akx?tp%_Q!t?JpI}xlH{W<-v89% z5j?O|D)n7Ia`jK{IDG9l=X~*qjmz#6_7_&(eO^YHB|093>hdwe(bd+0p+I(v0dC7| z6h;-#yQCbus|NC&f#82ZS^h?~e6tW-Bh$!cIDVCw7|OI;wW(^G3HRU&d42g77%9^s z6`8E>+A*&WMqy`}jTiNC=x}BUo7@46p+|7g;M#9#g4Prf7% z-u1=LA3bDH>g)~bgU3W{OkP%{_C;-_R-xh@aTh`zo+7o@PF;T+L2pNk1$-m6J6C@Y z567lEc8OH60W}*eY-DGI3_+~ecq%T`2Q`l#>*N!3biHZikCxGdRQY!sb*jCqjP7ru zSY3cp>oIRD$94%2EUX6`&OS@X0|5Y5^L5)P)vu0&4sQR;4Sw^)&zgSu(UDhbWzY6M zJnC1Y{DYTYPW^u$j`rna#o-6Wa@0)O8*F+F*NCNN@1mm7a#pH?Bm=#QH$>>+T7x*) z`>|a!0aq=S!AzH%%vddt@63t8RaB8#=)j8aA%uPPF8zQ|54- z-<+Mv>K5Jea{T2Ph{b?9171FOM#uf+9}ymU!DYEja>*BU@niK?i%?FC@CA=UQTv>6tD<}H(@^BqFgf-eH zSSBo9ZU%pDTLAGl&7q?b&8v-Po#C{h=pf@+%C^4xn!lk^6`&*8-kIZoCRU!K+my$( znWS;xs%?Ft+2D(Z;s?7!1F%!cDhD1Wf9sX(8_z#}{$1Z+KR7tn5ijk5lV2V1Cr3x( zfBl;+JF&+;#xIxGO8t-j{Aj7=Z_V?|Ie$3*w+?^(+EzN^@!&P(5qF*^l}9{g{CYDG ztd|R4mZDUn81cMpC*vkV%H<(1isk6DHm~vkw^)cT;=1sx?JcqmYJQDOpP65p(@xxi zO1Lhni0Dv2%TtFsvdb}5Jv1p?#F$M>Yfw>An7tZYV3VpFJia7Laml%I%oNxLB2V=S z;(32|6bsAOBP@m%s^Ph@vP zD6*bnk&9qBIxMw3@;I1QJsHX zNEB(N3 zD`iDD3kbQ1C`zgX0XZu_;TJ)6d?a&<>2M%$Dp1I!Qu1r&$W@8B(vu+2S%7#58|a1} z3o{%MYRi*CPl;@WasghvfaS}uqD^IsbOhtOwArTQ&PP{D2KhKHl`Su#FOPq4v5n{E zqh90Z;|34;6)2Xv@JGHC4$L&1KV1A5KM#)e7aI@kpjf#5!@a)T{tq7LUmnIC_JzN7 z-BH)9$mZ?rFF*a!V6!*mesQ;y=(?IyqlSW0(wM}di?AYF#?ec!XUc>NEweJRY^sr- z@e>7E(hkwcm!rwrbj+=zBs+iTDhZ3L_SDT5`j2iep&06;yaB{kqr%ovtu`maD=+A^ z#t}~}*>Su0YTy=}D`I2XSTH40c z^69|^GKP&M#@mZ4J@~uedbx;XmM!UAaORJ?eCuQlyHZ?-wBWaXV&Q-C-ADMN1spu* zU)|*d_HV2XhwuLKweYtN*naDypEWr0`hWI*Nn`(Nyalwg0<2#hp51xxt#C?aleU03 z{7SGRZ9+d59exnFnFc<7;hM7GJs)bk^Ko0RQhZSA?M2pMtn!emIhalS+G-HtrfBO8 zCvM1rFI(^q^~_tO&3k`K|4)87yTU5hA_OKx84nJb74f1j7&(%KU08no_M~$-Qdb~k zAO>H*O%1#J?#KV+tgqJetD$`J9bs@3`=6Zki=Rs*2~~FJD!*Lo*c<($ZylJB4gc)j z6kz>oU_W?TdiB4Y%xLb0)`0)^&-x*x?nBP)go|14$$?UosyTn;%J>qva|4%hQvscU zWb|yLi;mld3ZU^zqr1Fw7-Zf#7)5L~vizzuf^DauoaL$oss82hGV6i13e5W_{s|PrIbT*r}=%(u$i%h_PV@$7$M_+sS=aO55Osn;|A)jtFcqmZeBCl1h|f9&P^tA9AZ{QR$0^`qPT z!N_l{A9`rzd567!IsA{F|Bs$!tlzcz*1^YKj5q4E4ihe%cHwQe63@ui(|@0 z&pN)CTv<+n+Zv!;B+rAH9~x}ywaK=q1%0y^=CzsH0;EDuZ za_R=V>VSWCD6(;lGYr!4(#mOqjk1bZ5H4CaX$9EA%Gl4B4Yp(4=i-7yMF)fkE0`w zzPjFz4*abv|JGlBxM2vlAKl_Nrp~|nr2P-BeYJn&@P~81Qn@NfjYTI=W`-Axw&>6h zA|9#sXlsir+h;nEU6<1~{ml2zh+C?A*3h9)&8fUMm)J(U<$6W#+iPXx7`K*d2bK@p zKXi`sZhU2b3bHjEPWDo}8bn8TI#1aZ8yJG4J6c0D?us9H6CZf9mg?s?9v2<;woe$+ z&;EZ5{D(UQzqQBj+W)E5e(JX$ERW7ssUN(7{oPLp{=Ap-vyb(SmlLRT>BnY&-)kU# zdEQ?=2RMutmvpJ|IwP)f$@`0=z?Rk8S>RDd1ueChQBLSPV{tAdl^w+_qHDXRUwTtg z61S9c1^y3v@3!nJudE5aOW$=kI>0tQ?Ye)dln@|}7DvNPhmCEHHny=1xTD{_LU_zM z|Fze@*6xam?yDX*1FS@(q|8(*^OM+Ug>^E-q@8I&UBv5F={rT~n5Br>J;N|~9mS8S z3)+wlYSyTBCM9Z39yXeu^?quYRoiaMU_y%A#a<DHbszp(507zGCI7((jQaEwOaJC;e#HhH_#>Ay`v)&!X4FStTQsvjW4$ln zy4{cKBJKu&qoTxKtOd{}*$`5Ul-xE!nVkEE#HjSYR1d86{bzF4BLi?@m{; zTjiLgS4C`BN{IqpEI^+Jco?j~&`N`Xly2QuqLVNZrm-OSSOIYvXuRZyEV+bBnO(Qzdtu z4-FK;URU2mMpO%q$8Mx@Z-K^WypwE#EADS%R>6+pO`HH*!tnG1nVeMg1e}YKjl;r1 z)im$}gEo2!pA1hisTGoVSkciDS4YjG;|K8o%Yu>FZ*E_sjOP`s1Gj&#|3=P#;u>l6 zJ9qN!kKH%6IocZX%VwbNm!1BJr~j7s?>hdMZ!`18$uQkwD2ObcXuo_MVm9O#=z>1? z{M2m{e?7>9`|_f8NxKT8o~9Ps$R@-quZrz_2uhzNe&AgmraZODjIBw}Op4pBRg(u? zq?d;NUKUwX*n6*;8^(VveBW>Wp$v1luuT-ZPgYnj&nKPgrw3h1e7k6n?Kfe9Th$dU zKfw)wJhjdJbLtksH(xQ2@rjuZ5T2K^6=lR%So0q&h-T%I@)IZg`YE6EpD;VWaa5l# zw&%M}jDPSMqmAdWwJ)78>n+m1gMQs^z4oo4t=2$w#YOf4Mq+>BiY2}rBig5`+C$5R zW{>ms8Q~feKc{ikR7F=KS%#Km?4W=Vqk`3JPHw+TXaL(icUIAb}k9Vm$%2}7XRv#3oCy_NV ztdiVB^9Fc;`C5N+EfZ$xH~$4I;S4f^m-X{}K)!OCPOUO^^}?FkizO?{Tt6j%-%!Qq zJtPNn8T=P(`+IDQj*#QeQy$i13Wr*VWOnAo!cnS-3b2!Qz=cCAi1iA^w zjDzRCIFY>5;foW|{@^t^zVT<>-lu#{=4Ds-l@I9Sot1wn{i~!RxUQFeki)Xfg%8v9 zEOND>!cZRfYR5#)O<0O+Q1J0{!mdI(F-4-GZskT$kp4D{KmrCkV+R>4Suo;}l!G9F zCc(qvd{$pPOY4t#mfo8G#1gbo2fsQu|C9F#cB9?vxi7^1Lq9ExO$xnJ976h%nHZ=$ zm=jsN-+h1jq+W`GGN>U&du0fEYn~U8moCh!TvZp5`c)ds-ji+B_NKB<)mpN1yQ6W& z>(P%51>pkw49ysfaVCZR#F+-`Cm%ibWjOLWeCIhOQyV{PW?MwmEw1kw34vwI?!*Q? zBIxA4t_{-7G~9&2z+SGL8_}nJ$UN?F76f5pA)$ZMCBP$YGW)UjyKn69SG(D-nAY{j zzWu_I{Tb(sASGu{+yyINrix9TgNdtNE*GLx=T*Sc)-a|=517V4GjXZ<@9uRVjVybLq z6h(DwcpkeKU24lbwt+w+U7!^osi7Z~f9H#xAZMx!_X_BUhfPO3EcsX=8mzIee(d{{ zc-GYZSX|!5IfIYyFCSEhV|H>aB40^hK%jreY@;KW`C*ma?W$r1juUMvp_--FmAZ~p z8%XTYg$qiEam-@TJz6!LsC`=O_$O15ELcE~aI82uF$XLHexy&m7hT9e9^=)2Z~zMb zF#SL2ZT>4Jmulb#BFBjQ(p}iC)S^f|(#wsc+H|9!|GpN=vupE1JXP8qISs!ak7+>#} zg3YuQ&-WYSyquap_LBP-f8_EP4uKX!#uc~cFN>;=9#W#N^pS3DcdiAyonY3|<{`78~S%hr4yHm_k2aC7a z;)CioUu`UM}EFBysO^*=-RKG;CElZnAg`c z5}6Rp_hMxrFTRp`eN)R!fnKt48Xk17W@a7xq)=E;+eb%iL&7?Nx~$p-o$r6UmTh#@ z_q^=v4P(CVDSQa5GM>l$!xv^_{L%m0e*J5H<}=##xsn@Lmx-cA_LRu$%HFK z&2>=jqD+}2ZhPJZLw;5jmlSDcF1jdk1JmfL!OFFj=XHAz;t^j>&%zZBfbShtfF+J& z1X*g*bU`MV890 z{fQwxu0%-mLfmi1&pH!Gwn77bi|-k47{}fJ$`i%Y?;3r?ocyx6<2rv``OrD|uN{My zlHSz=XdTsCW9&9tS|=|b?CKh8DI(dRca@=f6JH^nmn-p+{KN+&<`6h`9QmiVpY%D- z>;5m9N`GSKKmO;hSX%dE_d9L}e*ZSoqapt0Duz@W`@M_99~ni$!ZzGY;#+zsVof0j z$(a_F10V5acMLnbY=wU_diF|DvFG%rvW)0hvh#u6kz&icp_5^kpQ-CQQJJfHRw*Qp z8&`&#G23Wc)S9$p0;yNsoLhUU(F367Nvk_GnOcGGj@4~CKl&d!T1g5l=4Z53UPN0U(SGKdNk@`>mZnIa23mJC zXh{YIABVGC31)xZ#MVCX+`_%3sGDJIiTj}w9B!|K6`wyeC?uc^9^8M@3(7`6uTQ^N z#4`P^U3zlSf8!X^(C+`@TlLIGeGvWZ54o(b#05rV@YAWR1ci|`m4}$`ND~dCg%)_- z+A8(L1j9tSne>1B`id1Dw|aups*00~j4?@(M+rsFmi-H-S!i_Zr= z(-J-}qlbT${@}N+zx^*HktU=0rVpt2$3Iu^$&m6=db?tFYM~?{QtOilVrr>j(#xz z$)6ypS-)baQfCH*296YB7}%3r=d+M4B5vWEV~^k;c@sdN|xFR#d5j zyTIrnaE2SPy{7ijn#Emm)I6~=dr6U5yF;0Dm!;=b&!iv!O%N7L-9aO9-1qPOOl0)^ zMto`{~E_f9$nyg-8e#_*gOV8`73!foi&C%h;ls=EjO9t1xGJ%KbE*+v~pB{@Cl?AAH_8m#P0G3qO6VTi&kaziw2JKBJx7JcvgmTN- zWvRHA?bhtlQn$zbmlF5CCmk&SrFe%Z=SOL3txO~vO_uBAs+wu;tyq-jq^zG z6Q3UK033H4t4eiL+-ilN<9;(t=&0BC?H!7Eqx4w>;*=d^>!C+tzbmAWPAj6|ZlZsM z)yDFvxK%o&xF1)}0<7bcxAoeGG3q_ch0R(vJ6eNG!5#5^;>Un5K8yP`Xv{7B_6skz z7k}}hzvBPDZ0hh2Sf<_A*GP99+x5#A{*{Z?ix>&ZY~?d*;=ULBS*^=J0=J6~23VRk z6X$AgsXA-n;L(&3h(0D zG+FGJ**Yd_SL`&n8`U~=pbKzkA-SL#AxG~ZR>bPtwF>Q=mg{pgq>tTD7ntYTiNu`3 zJvb;g5l$Vsh)d-{G6a^b^g+hkvE8l<$SiDjeBbi^V`R;DAK$nicf`wl{pt}*@S`{V zKaB6V=GkWSFLPIn0W6F3mcM_CvXh^QsSc8iKJ?{uS-bm)1u2ySWR?Osb(z%5kaWUU zQ*|9eAcd?QWmyn8C`cORP!Cl4$et3Y$hGU%#smMRds{f#Pw0ne(-lg zg-%S1`F<{g;{m)lQ{dNpMaOxT$5DPTyMNZ^U;pajD*`~HxNDy~SPh(&>T|7Yc9|RPUw+X z@rkYFi?fc7@=u{nif{f-Z!YQ2RVG&zfP=;4A3$~3vU z>r6G|%t~;dI($!_ih^EtrPFR;F&iqrKLEI&2%ygakGaJiDy(07O?KPt2fs7Ke~pFw z;N*Y%V+dvI8|0z_C3)&7-9hKB4zP~pzB6`ZGP!rw`z9y83$cG;cbbbmYk&HrS5+n{ z8T6|wH>IaqIbyAEb}h@i=5dw!izZhRt8B|HzWS@5*A65Z&wF0^9QMWGelZDFAMm|} zeBS-FW^*Zzt9<@~8RMrHBzemZ*3Em-NTLqyPVopXXY8T& zEQ~y+>68Ag0l(r&e7^a{3;yDh{E1zD?Q=KcY#qpV z`%{k|Fo=KcwzCJj?AEf_Y@IIKYSq*w%5$dXY=Sz;*4%iD7#;ifg~dWe%-as}jDEH= zj1P_tH_I9rHLnS!MGcTsN6Dn4(py|l6JfCAJ;x1?01+}`kp2=s_~K_rpCJCpiHvc% zpPYz<(LZ9wzZiyZ9`HY5A^#3nlFq>EwYWd3SFe9QDT=9gb^KJBeUq8KJ=LP9b&c=J z&1K8j3gd+$OYPN?JJk(E6=CONdaT#-;Ju$pqRG;Tex$~d{}YxZ&kt~~oo$(fhHM({ z#O4_}l+3{0F%X`nmKklr=- zU*&%SBff+|g76j&+~c~(u;p?vr1Kw)HMd1SIzH~79rGxP|JVJ#BQ}oHd@&yAuls&} z{BohQh|*)fJlfXKcy1sq?T_NIXlmy20_`+RS=EL(uTDxq)x9r?np}yNNOy83*}B@b zM%SV}P=EWWUiMHG%4a>pJzenAmobY)dpdtH{CfRl^7={8k=P>JFml}6czriOykhoq z!0Z2iQttofC*{Jnf$=N_0OYgpJ%DoLJ5U9F?#e6$Ba@e)02(^1LfXQfDHJf1&kQ68 zcyPc3#7tg1}-et;AJ-7)q_iZK0)&lCSZktm`pM_Av*;% z+w#1DECQIYAwW8Q!cQxUeCyAKLXcSzAW05*$ZQjX%6uQtS`HXwu*raesR1I0pFaSM zfSl7F z*PYPmIjAk(ZG{Qc48l(1n?o_?`+4IE%ucrfD!k*H(-$$=on``L*k`bYbS_LlUh)AH zGMK_v0*C1)5I{u+HN+O^ynl&>3I<2m1fVi=CkX9V-(Ea_f%eEg^L2fLxI=%({zirr zKCfy8aN*C0xT_K1?B`skZ?$}i;Y|GK!FWi8HkQQWFC9?8lpn#g0eKBkhfbh1 z&|pZUZdYPHPGh7EE+C7r?GS$gMTD5)jH0rHlSKtV5F|m!gpDA~-@;MYe1zG1GL>ZM z81~cfho67*$2VYiSqmtPNSIKJ%ol(Y1g{R&_C7K}R9DbM5xP)%3nLS-9;pky;Tu%D zq%(S;p4x$O>3~?JYf|o~$}yqO7D(G-lqJQigQ6>`XXWMrWD}#R?W})5VFY4aG3&lR%{@skdQSEmGaOb1fZ14K-;e$%Q0s;DVLU;@$W`(OaE znHZMt0aQ24@)oQ*3iW?Ogh@%=ml0vy1qzDk0YT)pSu5u(!fNLzbwW*o)Je3mm8kv#}02$Ik~ z$-~e?rYo)L<}RLfgzHZP88ukunY_UyBm zW0(M0z$zVo`<0c9*MA@Z#;>jz+5;WOB5gN-6rb(1xZbuVwz3wXUwRqE>3}<;>SLd_ILWxY&;ed8D1$nX zJo088#T~S;ezXj*8@h3eXvqqsv>@^Ruql6+4??q;>%bw1RUkn4eAL{s)@9UKVbkc_ zVyw#>dceTjSC}S26t~Y7TW!l|#@nakp@(5CuT7JXZ#nPZ;9EWnJu1fX#x#lcE$984 zjpar$^w=BAUnYh%A5ZfDZxf@eMR`Vv^&R8YIQMoC;m#}?K9_c;32$QsZ*IS=(SCoW z*}s=FC}Te>$GIFMjq*9Rvdd8?3q0y_OHdX?e5~w@&WNNA`}yC_6D9zgsT~A%>EK!C za2d)V(mi-la_V554{7bicf%cQjoa%;rH64I^=#ah83H#5__#X7&;igj;yZ#w;?-!pf*T7~}hfe}0DIg+|4+Pe&03ZF7<_+TRpOTK zpY-6+cI<-OTf(Yw-ZaL!SxifJuyZOVSqq8G=h45Bv5e2nzx)_P>@2}fhXOlKc>jVc z$L@>YXXBi90Q@(7pVNWY!!A@zatF0#3pd$)0M-v+upd5Rj5}00e`{}JZcjKLD^R&@ z2k+G(FU;G~uh9QaW{7?vUq63GWhI{{W7TC5CI>nw$v^O;RL@rK+7=*cQChNMoDUVf zfY;A>mRp(2jrbQD++l9JF=TFGYa?$nBAEx6m?oE1*iq5wV2Jp<@*S%KK&WIdDtFx1 z?4bBd8Sh&!;&aw7T2()Qq6e38UV5>NAJ|mJgY_d@^}p8hK`JSRby0t2=y=j~Qobd)279s;F~soW>#gLL4shj~D5=MzWJ}(!)0w)b{2oP@g(^6;aC4Izs5a2fJH_6BPVKyMe9KaSW_&Oj`{ojs%a8~ z`y+p;<9|(u$?4zd3fGS|E1Dbk!wH+AimpImGiCH45wUZ1yho$|`1g9Rj(s}LFLGBp zsLd%xDiu$>Za(ki(cyRdG}9#D6z!|^0V!jDW!xuO8GQXP&Vzq3_X(wM`Kv>f^=f54 zPa=KP7Y}J)zm@yhBvcSDkMHII8UIDO$=?kw^IJLP_m*kGg4A%-s`tS9Z{(C(P zpD}zkw9i(_+LnLE5s1axpO~h_GgvaGi;CgH<2_%xmeTm#d!Vwz5LQ={JQ$mkJt5=e!RYX>BsBq$Oj<8 z`PgILE&?v^Q?=rJ@cEh3-N$yZAG&Y*(U-3Kwm%r@p5K4=D29L_h`u|J3Sx!Eq-;3`>7iJ>L`C^h44tv{bBz%y~usOKlDF3&B$;1W5MeeAAhkO z^YI zWd89TO~zwYG7ajY2c|mF0no2GFEt4u(HK-lz<7U**B==&go}F*aKvpNtH7Fj%Yd>+ z!{O!#pyl^Edo=Lt{>6BVACcB$sNi;$U>ceH%wJTwCTSqf-N?)+&^-P{8@aDu?^GIu z#S-%{BXglp?cnQ$Gin0n9~_!9Q4;c;<)wZPdVlx z&G(;jnICG||G6i?zeo1xUI9Y+A>_aA)&Ep(!=*uoKlM8xGkM03h*#JAWaI^C6!r{V zK^+gJ&Nv=2?%>8bu`=lRlMaI?$IDnejw^rjC=Vi_29S2>25Jh23^GtGh{yW?4jC%> ze;$kmQho0^UN`%nc`pGQ{`y`XZ$~Dlj~RM#I>z%mDuD!0&5s&C@$=YK0wKRwGT3!U zK_o`CEv-Hu}pBegqT5ZAj{*E7mgnju+e)tMH-tQIv{ROJU&!=el@7~Y7nSJZu0-A%Ca>((Ek54@Eo$)O# zIcOP}y!{%5q=N~^SNy*(AAiSJ%lm&Z=lvM}GyUtgT*wl~-L(8W14l0B8iPIOOQApg zGmhVk|4+t#i@)QjG0vC6>pd`3kNwo49F(zNW1c^8Gc@q>eTPKQcs=mq@4*=g#{S~< zWB7}_e)a9pq01;2yguHKynSi~DVG-=dicvp! zyR?+kkMc9pefdJ5ke}zqbIFXayzqVme*Xr%=60(5k*AK+rF`7Ke72mwL&y6+1+zn2 zK5jne^Q&(i*MF1a;k41d4I_X5WBpvtfAkgpA}4qaKG%2B{X@D-kgQ+%)W84Zd9-m3 zqjEhjjahED^k!^TaeOVjzk8<2-4LIf~frjgs zk->wfpG-a;c>SX{l%G>4f*gRI!JO-n!(a(=3tQlMGQNclA#Mj;#@m0D^5nYC_iK6` zvVe&EeA+Efwgn7%TrNXEA(w}++#>llJ+^&r~kO}D(deq-Kog( z=YL3|_}|(``XB%J;Tirln%S>U&m6^#Iv&yR&HjmeC}G4ubpZH%`ma)k=)&<**%~Ju zN9GxeUiOX|nL)witOGL(C?E_<;ewVGjg@)nK_Dn}=iAt^WGjEa#zuc$pOt6f(Y%-7j*4)_zH63m@xRT`t?`Mr?Y^9?&+%Gpv@8#DiP=ovaP)+8i-8_9SAU>Z% z=>@`j8_Z_&m9>}GY2#LH=ZJE=THXtgmtJN8KFR|z>x&$ird7K@d9O_X`Ia)mX^Eq|>S z;EKt)H}0!&zS^BP?#;~beQk$lx_tpI7oLDQR%C4@1&n zHQNc#R!di5_&6<7`Ebw8Nw>bfFGJ#i`H9`8wN^9jr3LC_61|2Ooo|NoPzvj;yetp1 z(-D8x(`|j+!)F3j+bW(@TNup!B3M)O+_!9w*s-m4Zl?RJLc`};%mP`Th3vh#(!?YiFvUbOXHg-S-LblQ6rNw;`r_H?mTz$NdyPVc3( z8Vsv7F6_MEqwLE!F_v}U%*8?M$%RD=;ywh>8(4<4%WxqxmUVZk#QrsIae0k3=-u!X7 z>DN7VhTXB4dAE$1L@xX-of$*rf?;4n{f~qD=B^~!eyQ%GX+|14JyrK+hc|W^i+O*Z zKaFBb+es2VdM~IB@%k8=C*eBt)+C&Ru5BO2t9jykCti5XVS+z z$+{i7v2v8!e6Fb8W_f!blhmAUhr)lT=Z}}>9>}xbB-hJDK9EbXw*CW6QgZ$ogdZ$pc!)8Qp{wTlY%X}z6TljlLGJi@b@Y(i<>zzLg~ zMX%^o+491PTXs@EpGFGbd2YtZ1(G|;hv%d=VAW@aEUO}wanmKCCN7*c7NW)A5}9rq zGhl^fkic5Fd3nFZ4Z1}EnmvE+luV}gK)Dr<1&eFa1fvB?zm6O%f;$8 zlik%1obI#UMtzHJt+_Xi{G^F>7#E?~!1Ne61!DO+KzHak63%eEtcAdcH?pN6Uo*pp zq}e*wgK0b*tV%gv($}HFk>#7JBVNr)6{F9k+6U6yF*iGN9S0~XR*Qc`o*gfO{hHq5 z^3fGRM9;-?ekpIxU0HwHtX^fSO)dw<^eRsa47FBwzF!y{SGj*qYiXC(?K{tB?5sT1 zr$UPI(+?$qz0o_CpW~rod(lx`0otCmfzGaCUG=Z6uvx`)a;VDb?siDeNpq8%D3|P% z^%*L4lAkAHb>3}e`(=OLs?)h}O$*0dubs8wUlQ-OWtY2}RJx9ar1 z(;yH1!|SLA&!vA_;I*`bVV_iwlQ%oxZX$g(LvC5q=;$(tfg!HqBrC7jM&9Jw#wFNCZUJ|B!7P%iO-61niQ9XcQw{= zl&w7x4ApF`aVI&8t^v3JtErjEFcE8_0LR4vCdOfWW zU6i}m+HmM){dRxV%PK;X2{rFtsbDKUM>u7z?Qb{hsS_u857N`mxOnKWUm7j{8SiA~Oi3g;qk3B;t6dPLGVyI5Ats_vl?<9Hj8b~ z6l7Z~tzQ}_WSG1Zkl?#&ymsdW1+>7C~uvn`*^aL)UU_%y7TSJX_wdcLol7s zs@48-cBv8D7s^hdEjgQHDcFlel&sq_6&-!@5gx_nvK-deIj9e7vDqc-iR|{bV0qKa zxjlcsy7j{p=krLbq{%edZzT6!3-9OIdEL$TDlUipN?R{B+o4~*747zJb{j`uhqKN4 z9khLKr!Vs%44)7mqih=C?WYF3kWb#j+gPTCuDJ0MFXx|-m}qD3^C&il+3j{cBa58Y zpVi5T&m?(HH#IUZKB3|C*dH!ceO{jS*QtMf^y6nGR~Y3E^{=x<9g88(%LkheEgI6z z;j`XLie0JJ2P7}iq~hrd{>tp9r7+aqx~~>_wRlOgsk&-v^HX)BSu+W=#o0OSAFlx3 z&)M`md%^ou9-Y_Ryyl&DI=tUb7)*tY|9Z~f^ILRGG6_HJZm8Lj9EA1m^`3u1`M7_i zsJjW>H5c@qC(7@YD0V67=6-h8uvLW0Orp)1Ua}5J*UMd(Hu+L%7fpiXeh}c8C$04A zyv|86+AE$RUk<{KO3kq@O8>EoUNtLqQY7ccGpc9LNrTZbpWmY8^Y!sPKbGlTzkJFr zYvPzNP4e@ip03~kuj#5h38Bgk&q;rCE$@r@>W2J9257;gKI?CbVS4_o1udgFLyFLy zii^4I*7@u;>6)2YzF8nXpXqaYwEgwxHBE)Bba~JPK9|Wdxj4#hyG5Vt=CnBE!i$Po zBJJMNsrBR9-tOW{xjZjkwy=5E`MSB6uh~AoIQnwZ9ww9dJbJ9RO_I%yr9XeKAI9me zRP(f{r#*-ZZTm{&>HM%1-%GKZ!oo2XXS=x7F%mQo`pIfx1xm7kZOkj_-SLMAK`(qr)X% zOtdu|#d)|q8fLS@G|!^sM8lOj-I#TMyDpNru%>WDskQQ#_Y)45rgzxU`BA$!ei7QI z^I-b9?`}^UwzzrysVvra&uSkXQ}5CI`3Wgmbh$~7?L9dJA9XK2dtIM9egE7E>qwkG zgPB8&^`X|ox%WzU_e-p8SC3=h9=cO)l~{?i{q&9`T~l8w19jJ*(IUE~uhx=(7K_;> zc|*8(zDpgQ40KVX)DZH8O{G~^PXuzopW>0|EYFfBzRWT=?{@c*>FeWfhT2^{Y}-tJ z4jUI288|(e-_MS+t&hvitBQ7QAFa0K>)x+=wOYTiCiwbF@3rmg9iOAq0$sF~>l(7^>)3G)}M_glIJ!|9CyBI7WRD-s6|`u zHmg;+PFH%fu=j&=n5?u!lmZiJ&x3Xsw_Tt*Yr7Y=(Ui_U`|?;#XI^0iQ+0VhhNoLy zZN%~w%%N@b=c2nkDA5dy z`*?$zdt}^~bi2O@{#C!MGJ9U8Xty+n)0J$z;8Z4yv$~0y<;NjeJfoXeOQ-8SST5)` zXP>o-_c=@TtK^gB+Gxit`~<~p-d;m{e?Q(|cWWkl!c;$?^76s@r|Ngp;&hlC?)%z2 zeQ=u&m%T~%iwClQlh=CT6=mF8#N5A+=t+Ha&; ztPYps+u5yena}KNnMT?+5pJ`~`cfzMZj#_`B`=L=`%GPb9g4YinJspT=+Jj`+WVWz za}sUke{Ah!LaTN&MUxY?J)JL#tICE(C;joc&8VQ;Yr(ea;as|IvM)k^62CpgtRo#O zXX_x-W+pa9XOe=gq#<1u*Y(7If2NbelP(7LvybTMb38zb z+H=j8^PPErns)ZRJJ9W&q``Cjc1R-~$>oWb@8DcM&0+K2&jo7}KNH){>X!*UdET{deQ00<9ThGa2kG2OR(x9vEJ^7IJHfGmFMa4++KrYCC<;A73Le4#QaZ6abO$53krpTNbvU;Ae2UzO70(xw3z5E^x?p3D7Ce_Pg* z%wBi@Z8iJpQe5DdRu0QMH0a~65>?F8XQAQ6BHV5li~EjEkEzCXMA5V^I@r1~pYM}_ zL~LtnV(f>PTQI*X*U9@cyzV`y(YBa- zrwm$W=1I@H_)=#~*A2BohM(=NEljgYdY~gq63&G!peC+CNTik!cmTed~0~(xGas zHa#WgHgF zKy_r{TyBuDR!>~Ag7U>SnB29h2ZsriD*>iA3qMc?>;bY~+&T6x66KxYE3CR5#6|0ATCqFEN zucqs1I>=tVkkpr89}f2RHZIq?!(_+QGUCK!D%Kb^?tE9 z1$MZVx`G}q@kW@q!NaK+&Nj(^7x-?(kvbRVg|@qkTl0W>!#PV6v zDfCm3z*Q_SDoMP@Ay}8Y^M0}|=V_UTo5I>vVxhe|W2c%k_de`S?}hq*u{L#B84G8a zZCY=>Wo^AvOY61VK01AWiR**?dIts7`fc!3jioy`R&`@3<)Vrv&U>>zE%I`12GR0( z9B3En&)MGHE}HlBKR5l_N%ed-?qw*+U`cqu9I=-&oyJjjSP@eJY zVLzR#zncmA%i7uAY@Sh}=84g&w4PeY)aOz9nD7Y$oH&UgT@pW^Sz7Zy*gB6@>w#{&zKeTC2IbUL z&T%XElylB{dUHN({}--%0*R%n5)@)lV@&;4rbIvy`J6%7kX`DLup zpk@=WL4%PK(pj{@v175jR9L?ur4&YR6_e-0v1`(im@2}#kl)0c%1DG}&jHGe{020lNr_6Pcqa<6R~QF+h>lHm=5 z;}Y;_cqFrb9Nx1ysR4?)B6Cd!h6M2JxYC|w9MiW`FNM?%n2V%*6C6PKxHbg0nj@|v z;W_^*@iLHX0DW9`R(TkNbL8GHwDcg0JFVeb9;|E(lmg0q0C=?=lo(MFSkt>2s&EwL z_=_A^0{o^3^;afOf#AA;5}e5%UKk8P$D>cO+x*^t%M-_~lLC=(k4~5nMVW|(o(S*} zQvSG^!FyxF-jw+5twAC+LRaJdQlCTd_E;RUter+F?uU@3+Pb6OnnCs!Y5fxfCY+WB z+)DUNJLa!acpx>imRP!F9Av`qGZI(t9jibSDJ+z1iWU^`)0X40yI)}0fW>o3VUIeA zXwZy*_$047%r!b@92MUv;H8Mxp%yGea0|?n+x9o*WIch@m7i>%JAwUV`KCeOg9DE)Olo9eR$}4gx zf!}R(Ouy#P>}@e+flheX{R3#DuDe4DM zT@aUSybKhrSo5^=QVnBJP#Gm~?3C*3qHSUxv}qUyN16@_Zgv*t+r|s!>2uQPOM~Rp zwz}w6WZ*^zm2EGOn$d<007_P^O(h3^Zy#dt)A94xp!YYii#-;Y+Qz8tB0&+_Y`&r+~nj)=Q)4R5X zu(9<3>}H}%JrXF+r?EJupW;#?J^Mz~1{)huKv!2y3|slCW!&@yvLpq%M!vRxRnSDv zj4zVddKO_g!WQTnx<4D-=n%IC)aNvF%2B1SC>z*;&}gLQaKf>%yKa+*(>?ug>MIf_ zS$yZFceOz}u*UsB!(`KPAFRnl^KPu&(H6oN6bg$ig5;a6UdfJ_BsK-_agrPn$0|^j z(|XB5R2KPeqGhn9;G>Z0pr>Vj^ByfxP{#%x@C8HeN~Gt8({lb(qvi0~*f^MzI#SbQNPb%yh8O>HHT=l1RmTZCvSB=E3uR1Oz&?M_ z0FJtW8rUk(q{4^&MKMcVUCd7+`^5249X`7zCxPE z+yf&2*>?&%rg9(;I&Fx}M9PJrg?D$Erwa?UZgvS&?+rhMMQl=j2F%{}%BM0-7BcQyg-_xTs z9HPHvz+u{h-gTX_k{`i;X`hbs!7>PoBARX)V|g92(h=Av9z^U-*fm#olUT&Z6?p^8 ztUP{K6df%UR?51W2q~=gMrTQu%dJ(d-AqZ>izQ^KBsh$_lnj>EyBi8XwXxM%u@$^~ zzJ83K-s9q;#9%Bv7}UB>=UtWcWsOkfBaOJwO{s4V1Wq46V-9A29FD3=l;sUCEwXe+ z6IgU+24RCAhOg$~zHEwmyJW>C~I)wb^nxrQfphn!k5x0Bo_k z3TTqC-r&H&F;$F$h3E?UDf<;Ug2+)G2=aumqvxt1SWV!65Wrra9R1s}u!B~O^XJ@!g;iU_T$tAto`u{#Ll^U#^=DgibLstmKk?109e^oEv_`-i+joA1f^E- zwS`$-;h&s3K&RZ4J?an5N)-s_9gJLJ=j~!7`p*t3a3wecK((!{@YWdS$mm$ywwxs} z)!PI_PT-#KfDGcVB$Ou|VQiUq7!)p%0V$&6=XP~JkxpVfT6O~7L7}_Dy<@Rxm2gdR z=^wFwr+$~z^LTb@fg@^y0(diP)(V4YQD%_o96Ncovualr>}p-sq?4}^^o9)(^>a|4 zV^gjWMmx?Z$eB0@z*ZM>*s))k)pLC~e?g9w;Aez<@|Z8v(?Aw!B)&7slU!XcO7WR; zUDdrqGgai6DCQ;6&I>-V)nlrS(d=o19Et#I28q*3>+!DvhV-c5b~?$Zmsw zlLI-?UH#PGO_;68kq)5(>G)I!0v{1U6QEUCw`05CV)Cv2=m(bXK}`R3B7^t>A1X?V zC?1|+Ku#KO$7u`MZs>UA&i}p!D=-9f_F($mZ=go5EQ9oKN4YP0rWR>OlICzd$Cd*M zQDuEd@d^iMnZ3;dj&aX1D}IJol%nTW{@04rX7XwNL{G+jQDjyaWlY?9$wQ;(A+ zQfa_0GUj3zQ_0cLk6X=!*(%kJ>O)sD;H!r2H9~z*1-LeHDIMU<{fYxA35$z=n-Qxi zm^=(|W>F_h&)Jv6nD&&Z+*{jhjFqIN&!U1y^~7o{r(1pC*R-+_l1JhrOR1}$Qs3od zaWMu`hghJ#y5!CE3c1a=cuz^hWI}pJ#j{;Rir-qJKSmba_~3seu1;KNMDV0~eU_A| z0<3S&O|K?=hn5&Wvp+rrtld99pSca@uOyc za)6$n+29fU4DFV2ds!!PVQN33yaG`Wo=VWm0RT# zopAZZJk^TOKehO}51ZCp&FTdbXae_kutFrH84)=3sIZ(Ux^wa>PZK16i1wgpGzVb2 z@bq?nh+6EZ5I|~4H0hK*T1nwvN%Vjoi^r|U*F8W^o)~|O}=HM<{4m|eyr?s zG)ZS9NiU|Gjt2Y%pnUz8jk{RqW!vwQZO+;+7LW{k((ChnBWpjTk&^rlFe$?H(?e2* z$cu5Vg6o;$+(M&yMKec{=JM!A2q2Z%>2)r=UKfHXx(T0n7-aa9z$YLc<;|?9BA;~0CMB1lgIdBeS%@Iue zL9+2syfYc7k|wql9)W6>q#on4nw5G9A%n_1(`=r=+8$Y4BfeW^cUJLQ zYI6fMy@{fK{AMP5?Jfjt50P{ZtGcGz4UOCDmi)bADqU4kX8eTcpuRivDfih{gCs*h zEf~_5;YKNtpv)OoY6p^BR|(9i?r6ma0Ad4(s*uW^BZ4oZDEe~UHz2SqJPkp|Bw{NK z?7sWi*L(lU2MQEIoTz6wsN~1Hv>7>4&EPp`*-zGgZKkhf4c$nS>Qb@|j{Ac1&gu2a z8If-)7Av-j_T`2+Mlw&U&tK1CfZg#`5pnDFZdB#JyADu!T21(`M8pOs>kxy_A>d?5 z-fkivYx0q|g}e2|$v+rA)(c*S0=dm3BMI%>upeh$Up=A&neobYn)XNZgG0$*{v}W! zQeAw1%dxY!(xc7+XLa>gR&og%9rg&7t&_)3@GX*uR{-X7;iJiI^PQo=!iz?JjZOY| zmsGd*CCZqgu$(q66q4AFsAYl=I&gY0i;7wwfsa{Co6K!zPyFEmS6J+1SaCwEgNA_R zGfgybfm5@45eD$wtg;`_CGjvB%+tKr_-fC8b!r1fBv3S2yU*m7f@U$jV2?5=m$yw2 z&>RD#;W#c_$Fpfji@Mh0Ta`WY5i+=C=oKe)FXfe`a5av^shO0O>2HVdg%Zoyj|*iOb}f+=|d@g_2Ct@5mdgf(e@ z;jRXv&PQcnM0p0ShuYa=Boj58c_GxeEndY%Xz{ls*tko1nqJS*+90InO7(i(4V+ri z6KE3J2WEbb7}>Nd>9nHadwRbVGZ8LO;1t`TUtW(#(I$_BG1W@O0iHic3D8XhsZKa` zlS6I#?}$aaDV44O3Lk_+=?S?h)cquX%lss%A$jJc8*^iC*nhi%o%|hB6~xw6OQ&cy z0PHTi_nHjTwqtaD%Me=->{YXYlU=i{3Z73yMg+1Vj+JvwqFdO3z($6upVn0T%lK(r z&p_O;S6}a$(vA<(E(5rpTU=G@?_?BRs;j%0s}rLApVPzX+dv+DOOut!s`S{7vm=L4Qm zH07uP*^0?P!1ZHYnul_lvP>mx`Lq!-`)Mls}k^$PRKPO7xARBCZn5Z zMA!3_LZkKb`pz6+nv?y`bF51nE?&CFM2_UdFYsBD@7VijkhNlecoT#!u0j!h7R|DphFVtQlBxj=FAZn?&nQOjfnorOFWgYd2NgpAM6 z<-KcWA1IlDC8bt>R8eE=nq?u;nNUw}%L%I;_7t#yqT4=cUg`tP z^-Nlckr>R)>n5hx>Gc}Oa5oM5G5K$HuwyXGWVCt@Y9v$&I6ihe5|Ft z7IfQ9=asXMvw(3;&kU5Ds%fX}VD4@UOkrdz>sW_BD`%kPmzH)`zw7qr1{L*~{# zMDV5=c|n97Tx~fho{x6N$!XfZ@=HJ_+K{cT`De3uO(vcln`>Ub0jzNnD*kZ-T$LZw znFf)6tfJ1T%p0JQj6XBJ5+2ydUtt1@OzsY&&CJdJg`Qwx3~F-t%LpYIyjr>#Pl+g$ z{1g&hJh7$ElD3(wcepNB$@8T*b~)Z130xM zgC`3~-cK;~&oldxtgDzzEh@@%u2C|_H8hg-c4YsJ(dJupGvjT{izLQa<7AD4E7I~HU)A|?pi zUE%l^<3K!jmv*hRLZ>JzcS8m97<%}{mfm2j;Pi{+^`e^i`B^y7AwA>_^ogW@PQ3U?6Ctz7rM-I#Lx*P~;mWa;LsIL-$gy4oYc1Wl&Js9rj4$&pqH zyeHv~*i5O!bO3#%E=tDHGQnGa4KTxGUSdhYwEL*mqMoKm-^nJb$K+#l^Kzig4opiH zg`U@HOVH&H6%Fa%vx`dFl~DN&VAh?pPl7$JC5&`u|3Pj4VX*(>fBvuk4#)miHrD;W zY%FiZ|BsE8o&UdX_&-#v?Z4FD&bj<^f{XujH?>>f#u%DM9R0Dj!$07EI{G8A<#)bB z0E^KC`V(*%&Obc+pZ3~jkdevQF_((tuiih4@U}AygBfv4k|k371ta{8&-oA2{jao> zAP=7WUHT5gaKT@_&r<3`bb>hv|K-t7{lB^W(-kA}n{aQ~%tsQh!B-?%$+;bWrAh@SY*&Uo_G9&rxLj&4T9w3s;;GZpuH+F_-TDM_>Fm zNS1eqIUB_zaTvz<=kS*P>IuE1ptZH!8EXFJM!d-1^LqYwYu#&PA!PPV{??CY|1s&^ z-%zw-{_SgCnyXpXA%hKFSOhpHw2Z@aVlK2C=3v}fw~MBg$D5UZz)7Fe(kJOdHZV%V z1ByN<9hq4{S743?$OMQK7Zk=v(SsuWCK*F$&d%zWZ^4t8(oXHc>mJ zIAwgUQ_DjH0y;C*YP8ud@T>X|==7H9UBNQ9P8|fu*Is{}eePD}SJgB(KL*k(ONA87 z_cuz;s(XYX=38BVmttrA22}z$jqD9xZ@*uQlX2C^K3cn_s}ly*CKqCv`^wz>`TjOH zudk{vGYz9&Q|=p0{yk}Fzgyidy{OCGa|ZRCc4<^y(YGJsV%EVCL2OOilVuYU*i{(k z_o39YlK|>=w}okiU{26CQWJ`-C@sd|#xk3B<+++`M4pg;S~s5X+s59NLN(BX?M>VL za@l}W3I0Ni0N@s?k2$dJ=q*uG&gKS*#XXmao|Rs{R1#q{BY#1%Bw3++l~_X`;WPf` z;Rq6gqKA*=B_CK-fx>5sl5$QEJJ*?XRZSnJASzXJ@a&s|ok4Pe!WyW%YW4G2kaTyh zWukU$|FofhLRjbGmsL4{RbOQ=#*Kpx087ILmFx?QQLCj}66`4o3gxU8!y=ztcB<)A z8Uo&uiFPmrVO!>0E;)b>I+;cC{CcRMW!Yxvq~@y{wB6N?kt5aLMn%8)W$0_KvD0n? z;NvA9B=Ryrx2cDIU znLELFoPjW#rw2Wve4p_B z%1Mxaz19XdyF2~>5M>z_Pz(H?3#;ZO}|3H)@i$av>A*dS_vBMCcq;xi`{MirUghOrw?{~m@fV171<39OXTeOjX7 ztoJUhyt>Vbi6|*AQ+U#|E{uKNah7<{qxbnCppwPBJxz2|MdY_n1yP+pLIAky(T|aT zM?vI0^pT=SLoeYB-L6S(P@cc)5a5L$#PhyU%PC*7AQ}_ytLvfl%$T$d{o-)Hq`vCs ztr+ijO#GQ!e%G+r+6!$gewa~7mqR5jnS7xd_YlV1m0iXiSv9w2H&C22lu*_1BV3VE zNM`N|#`>F47wZSFB!b~2n2F>`a|QT+4kbL1e099Fz1NqjrbpvC8)+|d<0}Sj7S!

((rM?vjx6-|acui#FGEPVt@$&ORy*HF$l z4h8TDF_Pc%lt%=u4dJi8xz`Y>1aLImkfTRNIjb7kS8ZlVK)Nq+#Mkt73kF$#F%~>) z$1H$HF_$J3ClG>Kudl=rxqa0lqm8sW*J^7IMYWFfs%6K=F;=48pX3EVfQ2LwV8RiX zuY05ydH8(Df8o~!wd_fu=q5_I1HX@K)g(GPgRL5zVnA_wtv*nCwffnxmC3t+2L9vO4Mik?Jf%)lyF?==Z7S-Cj{TW35+m;+P6DWqp;buNVao~gw zY-m7Ocg~szO;Cjg;eX7;=Ps#krwG27%CjhO%k5$d+g~ zB*obRH(8LRJ9+y(+}p3>ko+8N<}sM8Kq|Vk6VxWgqyaOlxoa196!dL)sTeEz#7!TB z&9-gKyk_xX(q{p9eG$Wi#braQfk(i|gn685%Cbd~*Ju^j#k34ZE=82O{e*UPbGSIP zqj5y*seJOrQ9pkz{bELc6{Yzt;wJC2@bYF$wKXa@#y>KVG{ia+6C|1#Nq(B)U}J?G zWk$irY0iDS|2~R=!4G)d|g*xYKN{bfNLX;k^a6Qv!!Gpfu{|hy88OPkvbf zvGeJ(i>nbm-G`zm4;|5!f)z${z_f=l*o~6JoicuApR@{RmKPs?PTa6~ogk%FC^^WT z3C^ZMZP5VY58(2cbU6b(Vd;Xr@IlE1O0Na;73qXV1#8O1yHvF8t4FaYDZp%$L6)Ud z02U!&W<%TLxsB&+NvK7KxP)w?|N5E7k4cncTt;^*2sT-e#ER2qk?6tVrdY&!`M!KQ z6UA(YF^X9 *zgR@Bd#x(_2H1cqHhA5mnjJ2*74Wx44jW;V~baUgsYQApNB=MP07c>uZ z?STQj{~6(bp7Lf3Zo)dPZ$5cL)|gzHC5YN6O<;s08OD^(x=<;ta)M!VPd63j4p2}! zOwvvE(NqL>9d6%kI{dB4;Nr?Fm`NX9s|%N@JvYE1w3Bem9p{q2nSygRNZTs`Di_-? zPHb6gJ*%HUnbL{VzR;aPpbvC%?cUI}q5I$gOGRCOgP6wP9mb`%RICt06yi6fJU5uf zKMTF8vRz{a@y}k(EgyuoHFPcvR{!yDB{y8^iu<=GN<;P z_*E3BE z=*HK7$S|Q#Bi1lx1S{Hj&E(!i?}9Xnf?8qu!lqRHkPd<&j+6=9n`Wfz4w@B=!Cl{807H8rdxq(gaO96C$ zGvd}>7_f6DKgBzjFNFx#Z7c5*%DR!ZUkb0xz`0Mcst|ZS_JIrCSTvWmbAu6g_9S4PXCWdbg_4|xlRW3I)goq?3STvzWq zY8|=)2%pPJ7^!utP-~E&gV$l99hwt=Baw&G^Hj(-L(=gZ($7yAbtK|nAm#eyB;)S9 zpQI;inDckxiP(*+HQNk+H5#XqtqGwZeIhh;Nm$^zAv*`ns2f3i4v^!?M+;SNcu*0Y z3offF2~WS)WMfc46q2cU&xU6-r%c5O`HS^B1yS^SAt7`PmoD1prbILfJdNsqvKkZ( z-uEn(Q0cd*@d`z&D!pjWxEfxpZx*JOrfP;fXC@K>G&1ZsOb@Zl$H;{GP7VS9M?&+6 z!%!mfCyY?HU!$)%aeXS#M3{@!&HS)ciUoiU}j6>9FFT{pL`K3F5V(2f2HwU(qpC@*if%dB;U5r3YyRW!#-wE4Iz)d3_ zfc&PX4WNNsTA#tDynj*#Jv-#l^ZU}~XjVCN8&EhvAn8rLr-heezID(5FWaSS)Q=>D zu+5QwI=dYXc%X`2b{XbLaT3VNvRW^GP>L$~FUb;r^7au3VJg-H z>AI`)+By%=T5;Jv-zLbO$=#{_f(YQo;QAMl;tB`71++$#P;2zF^$f-<%)Q4K@{}=Om?=yHSh*)kW_dNj!y;TN^2f)em-7%*x^W^ z4R_BsCHxG>%!lN0GP=xvST|!%wymGTt&d^&qK5`!@UabY)9J|Hk>!%hW)|$J{T5=1 zvYz1suLn&7b-^zL(}>KcWG>vK_2HeQg2jvlF%_x3m&G4Hx$VQNJwflpK(sZ91X7Q= zOc+n{>K@jhP7#TOnA<4XGNDb`T7F5Xn^4BTLRs$iUE4905?&_!NuEBgSd6uHLpTF4{ z0zce1?B3N9&m`u=xY%@K!>%(5?O@!~oeXc(GFuUt+)6%!xAku;F2uK!Gh;t!qP#0h z%`0BtNTrdkrk#0zKY|j_tKa(f@u5{^QhHg%L!|N$?}cz)4E4HWhs;lUvaoCa&QYDh zLk4HlC>v5kQe2)t7yOXl6iSma)JL;cGcR?#QwfWjR2E*Vf${-}#=*!oE4X9J4rB!7 zXSDJ3-1IlFzSQdBD;OgUoJunHPM^Ae$^R(Q6*ao97)`0xD6*cQJFCaTcX10OO zEZtp#-MsOCe2+E-oGBvXOH(E-iZs~jsgfx@Y3h64FEMywS*a*$`?F5&KJPJBBRzn@4qj4!!&kKS{au4MmbxwHl2LVd^ z@RElX66N0`j>7Z4A$F79I+U0$M$lcJ=e$rfG4g4DH(v|H@>m`krvaqaL~WRZN5Q-8 z;tmP7Kq_P$e-XpVW_aS!I3=c7UR>B2t=j2+AxMjLP*QEg+~w}KH@%J4|M*~294T^% z=q52>Y+*bp=^HLnE8Is14N_=%(g5W&d(MJ5_XJvOLBY!s=a?p&_%RV)44XaytE~n$ zBU2E6KAdr@lOz(BKVCSimS1f>rj)B^jzRWP717iTripWTYVYvz1G(&JMloW8o`cBa z-=(X|QnnDLCs~hSGx8#-TJ^Rc_|yki_zm3b=qXGH5$0LP#WR8_l@mI@G1~6&voxCs zX1d2$(oqys4pUSF0HHV06@q;CH*@K2p#S23F-gRyvfYVK2dJjZx>i2pNo=*{pKen3 zk3E01K1abY!9xmX0 zr_KOXSfBjcC1)#;umeG1P|=0WYeMyZ(F2&X>|iohTAV==h_h+m;$sX_S5Ugwo}06O zJ#6Y*;fsYP|d5&3txu zlfTdt?h?at$(u$mkHjUfDaJ2J6L(*Ke!VlLpUPOwUF=+3%vysutE3QsM5@SBJd)po z@q6y_I$c&^1f~!5RwL}Ze(6o}3ggfr-1-~aDs54c>tHoJkG{XB0x3^;B}AX}X(+sL z-cf1C8Fe+$!4G1intv04AN4>-a+AOG(V7$D4u;W2k5z33xyYud8RK&Mu|`mT%)+}} zTD6+ybOd{{D*!T&ku0~j93G$gWHykcBAYV=eW7=3BFZP?19Gwt_u~f zQ+M|mC$FIJt+-NMHG7etmriKbErTL`ZpD;;3F*2HSX4%1i0su~`OF18IRMz2eJy`+ zBY&x6+9-uW>m|B`$Y@&;2h!Ak5$ngIsgFp$jRgK>%q!OxI!D^HQX>W;qE{Db8lhD4 zU-QfPiNC^d0CNx1!d)~zfBnKgKgx*g9)vi7a(k~2`RyrUv2#Ml`<|cya9;QzqKYoo zyZ5uBDv#&5dD-Y#4rji`UxpXPSFOE(_9c^O7-y^#NPV;tM`H=CRbzjDD`iPguSylP zxO{9rOm4bSKq52>(ZEtaw7#!6Lb#v?6)_5B+aXsA#-hdyKKEs&%avWwSSE)JyI>>P>rm)oeg8OON&PTY+TCjua3-Acw*iw@j>=e+T$ibQ%OM-oFd zsW9GZpLQEkkLeOD53r;|D%#`8M@FUY8`4uIkAHbW0@uDQ@9rlNqzrQ=wgrs`pv)23 z#O6joABHZF7Wm^$kL@tD2M`s*|E4UBSPafznVmk|=EU>kKqwPRx;B2LIkf|2A7La% z{`3G_>2Xf-%M-4DL3H@UCr^mpy(5N@35Dy7P?W9mif-H$t#4S;_D#d2<|jmPEH#a4 zyz3Ze85}N@qCtGJAd0S~9)Fpjc-_UT=<@;&(8U_k+E*uKY_~nUz{t_AbQj=zlgB$5_ZVHZ< z%(>!pAn@u)hS{V~L8@pa#50R8(W_mGr`R6s6xlWc41X*=GFRfE``4~+u@(v#dInc} zeMj84oGnKXMRimXtd_vApMoGyVPuYywi0RAtf0>@IPEeU6SE`^k|?2M&k;YkKR=j% zT>>A`4*ez0bke)3b>HWmHp* zWiS+JM4>WCxRW34fyf&EegUs^Ed~gq+oez!4zix!4R1OmA%i8rd$tl>3oMgeQz`gId_u0aiCg#cc8E3? z@NQQVUN8+hrN^i6vqyt{zT8eH35$*GW2AzAEw;X*|5e#Au3QJ7`8kk=hhKZkTV`KL z&mv?cDEb#3#0(Q@RJ!{JAD_J++*e_%cMYAHc1?|gtfu9)8D8(5ntnY_J!&2EL^&S* zmrqVdxFn1Rvbg-Nt>PcAB`=u@LUV@kI3VNO_Ui@liZV_uGq`?ccxzJ7rZAnoOPKys zZ_3_e+filN7W^PJ0D&ZEWW$}h#;KtPh`N%55XIN$Kx;+peeStA-pf|RTA>iQ*|cfX z%yHvRta+_dD_Z^III8=$xVYDw?)j9ehnahCH2OBT4K4>-1OXejXfg+03M{^c5d1yr z0!z>wM@Zmea{nB{aR8K$7koL#>jGoIg}~yWfOr~&GHe#{bG(=GJO`r_UweJeFER2x z2*ZV@83ZhF`rqHTs67XW0Ra)WF+T^!0e{g8q+YcQn#7A(Q9|||0ug5;1_w*7vCo=C z4PLtvaoyQ2qApCwC}DS z>?UC*{jD>jQot?(IhVXz`rMMuqsx_YpkLLnv);i9Q@FVUilYfYb z3*v>)#K*;X%HRatjK%dc8)l9=-F@W@y@Y4(baWLTOLY&QsCpSLC(O?9WF<=XwsJ_q zR1`L}LOtZBx)H{Mo2E4@JF2MBSlU8pBmCJZV8Cv4m!?Nb+HXmcI$goLXOMfRkFUXU@CFx*0+(EUnTm|1;OFi$m>E!j*$L8r( z5x7$}z-f5yw-k%_*y-)!u|1!s(dPGO;hy3_MS)#k%rg+U?H%4@dBfJZ)tY$Z(M#(T zd?~0fY(%bkpX8-elZ`YQ{Z`$zUx9N#m;leQS~hL;&d)=xhD^h zuG4id)WG@3Q+l1-#ZWvamO3d8yui5lj*V&g_Iku*S13rIQpdkxqW)0Ss516oQ-BgFQr8e{0P_&Pr zK3|hWztjjjs~E1sfoX}W)-s~AuBOzSb7$-%0yEmuE?Rs%-PPfosei*W_bno?uP5`q zQ@!Y>e)fVnp-~!UbZHw}n*QXpK%kLiu&0Ma1?DlL@@bD`S8dr$Q$BacYliHeZPNH7 z5)466KHGJo(&^bZI>2P^hFbA5><#-A0);}p5a+U$v4ekooNvk`!PPNlDf4tz(|!Ch zD-YvbwU@J?%nk8b6o0l*gHMXh87t@#9V+FMEH9bA$YwHDomlfE+V;o9WCT>@;dEs@ zTolr%6QtCY!@KA@=8OjN-baTnbLq~wzQmSOX1>LpM2Q@&Z5vabTBn`t+DbGlaI$L~ z&11F-m(xt0oQT;WqGl#70bV7;-0iZGD33YF>Rs&^Z;vi&cz<6X!ChMdTOXNb7jK}| zVjZ^}@1DPrsu})+IlbESl25gLH}cy(ZQJ{a@lZFv^SPC~i~rY@U%`?Mt@>Mm2H%!7CK zYY^O4`cWj>eGG1zR$rEzoAIyl$tGbjn-qNK=?t`a4n7(qVqQ6l* z9X5V8I!(I0osO&`&=ZCUB6#Badz=rIYz{`XJZpnpRJdTc_gCy;Mtcte!Exw92>SwUiQb1 zZkos9(aB!YtE3c&-rje&S~IjT^9XYC+(i9$dp8%WWf*G!NkF#0f%)e;W^@X9%q_+6 zwTwOCPOmkzA6E=zSTW{VVfI;8@Ur!J0*NHV3V&UuZ1aEpDr7KhNJsagpI~RcjkMqH z=Ho{Dm&3}SWa|0kaXpQCvAw`evIDW4M+$*_$r9e4 zFm~3@n}WgM+PjT!U^j^87IW)@vCno~ugbSj2rudJX5|pR;xh2^j}Fxf>y7GyNnH*- z$8rE?OGSS$5hyI-!^W8 z$r-4h2^{wy(Le+vK&;Z7F#^-xO0%8^?2g?Y`q*(NyM_f%%f}RfJLgY7^+>OjkqKK> zRLZ!AErpy-SopQlNm&MAi|&=qNsFFuaFWeI$iRO~je7}2l>?`)jR)AgM{zkTl8`^@ zU4(U=Hnhl}?4a~hMSfMa9;IAxTiv*e`?yd0+uoaECTX`~);DTFlZidLX5HS;R%81wcqZ;Q;zLiAZ;k6tP9@DKDTk0MC8294zf6hJmudyfZ_ur`} zzg+|KTl_DubP4^Vw_)xoEdX5l9T*ac#}pIxJGBG=K-%9?A!~CVP{}0`JYv)yz5_|< zcz{D1$n%Mu0N{%6cXQ(J)RONU6{x4*zg9ykb$!j^K0N;GF=C3-sYLXvSk!wo9oP51 zEeWG!|4u3SP9-@emwcN+tUacoO?~Lh&=3H|w}~NwRAidScV~x}0pRTK;+CJOB;P3{ z003P1{(m?}0ntTG4z48t_Uqe3LYc_ycTF~)c?kgj{4D+9U=hrdUAh!abV%_#XaIn+2>Y@7fTUe?9$*$y5g$m z_1o=}B+N$q8_eJCr#LMm1g`vow%055*RG_|WIn8TPbzh&d4G48hq9-JCmF0DH4;67 zqaDW7%*Hwuh?{mwT_!6!gZ+NLkYZ@zJum|xpntxf-@SmhkqJRY7|r1;4_7vd^

From 902368a06b915b860236cfc97ff885b2aceae256 Mon Sep 17 00:00:00 2001 From: Charles Duffy Date: Wed, 5 Feb 2025 19:52:31 -0600 Subject: [PATCH 248/279] metal : avoid breaking build when metal API predates TARGET_OS_VISION (#11690) Avoids breakage in nix flake build introduced by b0569130c5e9c671152c913d82803b7c2f014ff9 --- ggml/src/ggml-metal/ggml-metal.m | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m index c63dbad20..944d90af3 100644 --- a/ggml/src/ggml-metal/ggml-metal.m +++ b/ggml/src/ggml-metal/ggml-metal.m @@ -19,6 +19,10 @@ // max number of MTLCommandBuffer used to submit a graph for processing #define GGML_METAL_MAX_COMMAND_BUFFERS 8 +#ifndef TARGET_OS_VISION +#define TARGET_OS_VISION 0 +#endif + // create residency sets only on macOS >= 15.0 #if TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 || \ TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 180000 || \ From 1b598b30581bad59e5af86c94362f9a30f261fac Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Thu, 6 Feb 2025 00:02:18 -0600 Subject: [PATCH 249/279] vulkan: use smaller combined allocations to avoid fragmentation (#11551) --- ggml/src/ggml-alloc.c | 14 +------------- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c index 9a3bf9f29..7244a9cbb 100644 --- a/ggml/src/ggml-alloc.c +++ b/ggml/src/ggml-alloc.c @@ -989,19 +989,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte this_size = GGML_PAD(ggml_backend_buft_get_alloc_size(buft, t), alignment); } - if (this_size > max_size) { - GGML_LOG_ERROR("%s: tensor %s is too large to fit in a %s buffer (tensor size: %zu, max buffer size: %zu)\n", - __func__, t->name, - ggml_backend_buft_name(buft), - this_size, max_size); - for (size_t i = 0; i < n_buffers; i++) { - ggml_backend_buffer_free(buffers[i]); - } - free(buffers); - return NULL; - } - - if ((cur_buf_size + this_size) > max_size) { + if (cur_buf_size > 0 && (cur_buf_size + this_size) > max_size) { // allocate tensors in the current buffer if (!alloc_tensor_range(ctx, first, t, buft, cur_buf_size, &buffers, &n_buffers)) { return NULL; diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 48ac489a6..2e1bcf691 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -156,6 +156,7 @@ struct vk_device_struct { vk::PhysicalDeviceProperties properties; std::string name; uint64_t max_memory_allocation_size; + uint64_t suballocation_block_size; bool fp16; bool pipeline_robustness; vk::Device device; @@ -2269,6 +2270,7 @@ static vk_device ggml_vk_get_device(size_t idx) { device->physical_device.getProperties2(&props2); device->properties = props2.properties; + device->vendor_id = device->properties.vendorID; const char* GGML_VK_FORCE_MAX_ALLOCATION_SIZE = getenv("GGML_VK_FORCE_MAX_ALLOCATION_SIZE"); @@ -2280,7 +2282,20 @@ static vk_device ggml_vk_get_device(size_t idx) { device->max_memory_allocation_size = props3.maxMemoryAllocationSize; } - device->vendor_id = device->properties.vendorID; + const char* GGML_VK_SUBALLOCATION_BLOCK_SIZE = getenv("GGML_VK_SUBALLOCATION_BLOCK_SIZE"); + + if (GGML_VK_SUBALLOCATION_BLOCK_SIZE != nullptr) { + device->suballocation_block_size = std::stoul(GGML_VK_SUBALLOCATION_BLOCK_SIZE); +#if defined(_WIN32) + } else if (device->vendor_id == VK_VENDOR_ID_NVIDIA) { + // Limit batching of allocations to 1GB by default to avoid fragmentation issues + device->suballocation_block_size = 1024*1024*1024; +#endif + } else { + device->suballocation_block_size = device->max_memory_allocation_size; + } + device->suballocation_block_size = std::min(device->suballocation_block_size, device->max_memory_allocation_size); + device->subgroup_size = subgroup_props.subgroupSize; device->uma = device->properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu; if (sm_builtins) { @@ -7561,7 +7576,7 @@ static size_t ggml_backend_vk_buffer_type_get_alignment(ggml_backend_buffer_type static size_t ggml_backend_vk_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) { ggml_backend_vk_buffer_type_context * ctx = (ggml_backend_vk_buffer_type_context *) buft->context; - return ctx->device->max_memory_allocation_size; + return ctx->device->suballocation_block_size; } static size_t ggml_backend_vk_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { From 8a7e3bf17aa5a8412854787746c92a28623a8925 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9my=20O?= Date: Thu, 6 Feb 2025 07:09:59 +0100 Subject: [PATCH 250/279] vulkan: initial support for IQ4_XS quantization (#11501) --- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 25 ++++++++++++ .../vulkan-shaders/copy_from_quant.comp | 2 +- .../vulkan-shaders/copy_to_quant.comp | 2 +- .../vulkan-shaders/dequant_funcs.comp | 38 ++++++++++++++++++- .../vulkan-shaders/dequant_funcs_cm2.comp | 23 +++++++++++ .../vulkan-shaders/dequant_iq4_xs.comp | 34 +++++++++++++++++ .../vulkan-shaders/flash_attn_cm2.comp | 2 +- .../vulkan-shaders/get_rows_quant.comp | 2 +- .../vulkan-shaders/mul_mat_vec.comp | 2 +- .../ggml-vulkan/vulkan-shaders/mul_mm.comp | 21 +++++++++- .../vulkan-shaders/mul_mm_cm2.comp | 2 +- .../src/ggml-vulkan/vulkan-shaders/types.comp | 28 +++++++++++--- .../vulkan-shaders/vulkan-shaders-gen.cpp | 1 + 13 files changed, 169 insertions(+), 13 deletions(-) create mode 100644 ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 2e1bcf691..1c99ebe2e 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1622,6 +1622,7 @@ static void ggml_vk_load_shaders(vk_device& device) { //CREATE_FA(GGML_TYPE_IQ2_S, iq2_s) //CREATE_FA(GGML_TYPE_IQ3_XXS, iq3_xxs) //CREATE_FA(GGML_TYPE_IQ3_S, iq3_s) + //CREATE_FA(GGML_TYPE_IQ4_XS, iq4_xs) CREATE_FA(GGML_TYPE_IQ4_NL, iq4_nl) #undef CREATE_FA @@ -1655,6 +1656,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) + CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_XS].f16acc, matmul_iq4_xs_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) CREATE_MM(pipeline_dequant_mul_mat_mat_f16[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f16, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3) CREATE_MM2(pipeline_matmul_id_f16, matmul_id_f16, wg_denoms, warptile, vk_mat_mat_id_push_constants, 4) @@ -1673,6 +1675,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f16acc, matmul_id_iq4_xs_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f16, , mmqid_wg_denoms, warptile_mmqid, vk_mat_mat_id_push_constants, 4) #undef CREATE_MM #undef CREATE_MM2 @@ -1726,6 +1729,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f16acc, matmul_iq4_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); } else { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_Q4_0].f16acc, matmul_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); @@ -1744,6 +1748,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f16acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); } @@ -1770,6 +1775,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f16acc, matmul_id_iq4_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } else { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_Q4_0].f16acc, matmul_id_q4_0_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); @@ -1788,6 +1794,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f16acc, matmul_id_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } } @@ -1837,6 +1844,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f16acc, matmul_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f16acc, matmul_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f16acc, matmul_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f16acc, matmul_iq4_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f16acc, matmul_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. @@ -1861,6 +1869,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f16acc, matmul_id_iq2_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f16acc, matmul_id_iq3_xxs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f16acc, matmul_id_iq3_s_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f16acc, matmul_id_iq4_xs_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f16acc, matmul_id_iq4_nl_f32, _f16acc, mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } #undef CREATE_MM2 @@ -1902,6 +1911,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ2_S].f32acc, matmul_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_XXS].f32acc, matmul_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ3_S].f32acc, matmul_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); + CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_XS].f32acc, matmul_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); CREATE_MM(pipeline_dequant_mul_mat_mat[GGML_TYPE_IQ4_NL].f32acc, matmul_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_push_constants, 3, ); // If there's not enough shared memory for row_ids and the result tile, don't create these pipelines. @@ -1926,6 +1936,7 @@ static void ggml_vk_load_shaders(vk_device& device) { CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ2_S].f32acc, matmul_id_iq2_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_XXS].f32acc, matmul_id_iq3_xxs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ3_S].f32acc, matmul_id_iq3_s_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); + CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_XS].f32acc, matmul_id_iq4_xs_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); CREATE_MM(pipeline_dequant_mul_mat_mat_id[GGML_TYPE_IQ4_NL].f32acc, matmul_id_iq4_nl_f32, , mmq_wg_denoms, warptile_mmq, vk_mat_mat_id_push_constants, 4, _id); } #undef CREATE_MM @@ -1962,6 +1973,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq2_s_f32_f32_len, mul_mat_vec_iq2_s_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq3_xxs_f32_f32_len, mul_mat_vec_iq3_xxs_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq3_s_f32_f32_len, mul_mat_vec_iq3_s_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_xs_f32_f32_len, mul_mat_vec_iq4_xs_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f32_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f32_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f32_f32_len, mul_mat_vec_iq4_nl_f32_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_F32 ][i], "mul_mat_vec_f32_f16_f32_"+std::to_string(i+1), mul_mat_vec_f32_f16_f32_len, mul_mat_vec_f32_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2, 1, 1}, {device->subgroup_size, 2, i+1}, 1); @@ -1981,6 +1993,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ2_S][i], "mul_mat_vec_iq2_s_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq2_s_f16_f32_len, mul_mat_vec_iq2_s_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ3_XXS][i], "mul_mat_vec_iq3_xxs_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq3_xxs_f16_f32_len, mul_mat_vec_iq3_xxs_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ3_S][i], "mul_mat_vec_iq3_s_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq3_s_f16_f32_len, mul_mat_vec_iq3_s_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_XS][i], "mul_mat_vec_iq4_xs_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_xs_f16_f32_len, mul_mat_vec_iq4_xs_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq, i+1}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_f16_f32[GGML_TYPE_IQ4_NL][i], "mul_mat_vec_iq4_nl_f16_f32_"+std::to_string(i+1), mul_mat_vec_iq4_nl_f16_f32_len, mul_mat_vec_iq4_nl_f16_f32_data, "main", 3, sizeof(vk_mat_vec_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq, i+1}, 1, true); } @@ -2001,6 +2014,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ2_S], "mul_mat_vec_id_iq2_s_f32", mul_mat_vec_id_iq2_s_f32_len, mul_mat_vec_id_iq2_s_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_XXS], "mul_mat_vec_id_iq3_xxs_f32", mul_mat_vec_id_iq3_xxs_f32_len, mul_mat_vec_id_iq3_xxs_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ3_S], "mul_mat_vec_id_iq3_s_f32", mul_mat_vec_id_iq3_s_f32_len, mul_mat_vec_id_iq3_s_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); + ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_XS], "mul_mat_vec_id_iq4_xs_f32", mul_mat_vec_id_iq4_xs_f32_len, mul_mat_vec_id_iq4_xs_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {rm_kq, 1, 1}, {subgroup_size_16, rm_kq}, 1, true); ggml_vk_create_pipeline(device, device->pipeline_dequant_mul_mat_vec_id_f32[GGML_TYPE_IQ4_NL], "mul_mat_vec_id_iq4_nl_f32", mul_mat_vec_id_iq4_nl_f32_len, mul_mat_vec_id_iq4_nl_f32_data, "main", 4, sizeof(vk_mat_vec_id_push_constants), {2*rm_stdq, 1, 1}, {subgroup_size_16, 2*rm_stdq}, 1, true); // dequant shaders @@ -2020,6 +2034,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ2_S], "dequant_iq2_s", dequant_iq2_s_len, dequant_iq2_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_XXS], "dequant_iq3_xxs", dequant_iq3_xxs_len, dequant_iq3_xxs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ3_S], "dequant_iq3_s", dequant_iq3_s_len, dequant_iq3_s_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_XS], "dequant_iq4_xs", dequant_iq4_xs_len, dequant_iq4_xs_data, "main", 2, 5 * sizeof(uint32_t), {256 * 32, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_dequant[GGML_TYPE_IQ4_NL], "dequant_iq4_nl", dequant_iq4_nl_len, dequant_iq4_nl_data, "main", 2, 5 * sizeof(uint32_t), {256 * 16, 1, 1}, {}, 1); // get_rows @@ -2035,6 +2050,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ2_S], "get_rows_iq2_s", get_rows_iq2_s_len, get_rows_iq2_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs", get_rows_iq3_xxs_len, get_rows_iq3_xxs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ3_S], "get_rows_iq3_s", get_rows_iq3_s_len, get_rows_iq3_s_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs", get_rows_iq4_xs_len, get_rows_iq4_xs_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl", get_rows_iq4_nl_len, get_rows_iq4_nl_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_F32 ], "get_rows_f32_f32", get_rows_f32_f32_len, get_rows_f32_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), { 512, 1, 1}, {}, 1); @@ -2049,6 +2065,7 @@ static void ggml_vk_load_shaders(vk_device& device) { ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ2_S], "get_rows_iq2_s_f32", get_rows_iq2_s_f32_len, get_rows_iq2_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_XXS], "get_rows_iq3_xxs_f32", get_rows_iq3_xxs_f32_len, get_rows_iq3_xxs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ3_S], "get_rows_iq3_s_f32", get_rows_iq3_s_f32_len, get_rows_iq3_s_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); + ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_XS], "get_rows_iq4_xs_f32", get_rows_iq4_xs_f32_len, get_rows_iq4_xs_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_get_rows_f32[GGML_TYPE_IQ4_NL], "get_rows_iq4_nl_f32", get_rows_iq4_nl_f32_len, get_rows_iq4_nl_f32_data, "main", 3, sizeof(vk_op_binary_push_constants), {1024, 1, 1}, {}, 1); ggml_vk_create_pipeline(device, device->pipeline_matmul_split_k_reduce, "split_k_reduce", split_k_reduce_len, split_k_reduce_data, "main", 2, 2 * sizeof(uint32_t), {256 * 4, 1, 1}, {}, 1); @@ -2995,6 +3012,7 @@ static vk_pipeline ggml_vk_get_to_fp16(ggml_backend_vk_context * ctx, ggml_type case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -3048,6 +3066,7 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_pipeline(ggml_backend_vk_conte case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -3084,6 +3103,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec(ggml_backend_vk_context * case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -3132,6 +3152,7 @@ static vk_matmul_pipeline ggml_vk_get_mul_mat_mat_id_pipeline(ggml_backend_vk_co case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -3163,6 +3184,7 @@ static vk_pipeline ggml_vk_get_dequantize_mul_mat_vec_id(ggml_backend_vk_context case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -8037,6 +8059,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -8110,6 +8133,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm //case GGML_TYPE_IQ2_S: //case GGML_TYPE_IQ3_XXS: //case GGML_TYPE_IQ3_S: + //case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: break; default: @@ -8132,6 +8156,7 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm case GGML_TYPE_IQ2_S: case GGML_TYPE_IQ3_XXS: case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_NL: return true; default: diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp index aeae5400d..9c9fe9626 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/copy_from_quant.comp @@ -12,7 +12,7 @@ layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; #endif void main() { -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); if (gl_LocalInvocationIndex.x != 0) { return; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp index d4b068e61..660811086 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/copy_to_quant.comp @@ -217,7 +217,7 @@ void quantize(uint dst_idx, uint src_idx) #endif void main() { -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); if (gl_LocalInvocationIndex.x != 0) { return; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp index ee6877531..ecfdbfaa8 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs.comp @@ -304,6 +304,42 @@ vec4 dequantize4(uint ib, uint iqs, uint a_offset) { } #endif +#if defined(DATA_A_IQ4_XS) +vec2 dequantize(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint iq = 16 * ib32 + (iqs % 16); + + const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF; + const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3; + const uint qshift = (iqs & 16) >> 2; + u8vec2 qs = u8vec2(data_a[a_offset + ib].qs[iq], data_a[a_offset + ib].qs[iq + 1]); + qs = (qs >> qshift) & uint8_t(0xF); + + const float dl = float(int(sl | (sh << 4)) - 32); + return dl * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]); +} +vec4 dequantize4(uint ib, uint iqs, uint a_offset) { + const uint ib32 = iqs / 32; + const uint iq = 16 * ib32 + (iqs % 16); + + const uint sl = (data_a[a_offset + ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF; + const uint sh = (data_a[a_offset + ib].scales_h >> (2 * ib32)) & 3; + const uint qshift = (iqs & 16) >> 2; + u8vec4 qs = u8vec4( + data_a[a_offset + ib].qs[iq + 0], + data_a[a_offset + ib].qs[iq + 1], + data_a[a_offset + ib].qs[iq + 2], + data_a[a_offset + ib].qs[iq + 3] + ); + qs = (qs >> qshift) & uint8_t(0xF); + + const float dl = float(int(sl | (sh << 4)) - 32); + return dl * vec4( + kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y], + kvalues_iq4nl[qs.z], kvalues_iq4nl[qs.w]); +} +#endif + #if defined(DATA_A_IQ4_NL) vec2 dequantize(uint ib, uint iqs, uint a_offset) { const uint vui = uint(data_a[a_offset + ib].qs[iqs]); @@ -321,7 +357,7 @@ vec2 get_dm(uint ib, uint a_offset) { } #endif -#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_Q4_0) || defined(DATA_A_Q5_0) || defined(DATA_A_Q8_0) || defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) vec2 get_dm(uint ib, uint a_offset) { return vec2(float(data_a[a_offset + ib].d), 0); } diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp index 974efd3f9..78c3bddf2 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -454,6 +454,27 @@ float16_t dequantFuncIQ3_S(const in decodeBufIQ3_S bl, const in uint blockCoords } #endif +#if defined(DATA_A_IQ4_XS) +layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_XS { + block_iq4_xs block; +}; + +float16_t dequantFuncIQ4_XS(const in decodeBufIQ4_XS bl, const in uint blockCoords[2], const in uint coordInBlock[2]) +{ + const float16_t d = bl.block.d; + const uint idx = coordInBlock[1]; + + const uint ib32 = (idx & 0xE0) >> 5; // 0..7 + + const uint sl = (bl.block.scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF; + const uint sh = ((bl.block.scales_h) >> (2 * ib32)) & 3; + const uint qshift = (idx & 16) >> 2; + const uint q = (bl.block.qs[16 * ib32 + (idx % 16)] >> qshift) & 0xF; + + float16_t ret = d * float16_t(int(sl | (sh << 4)) - 32) * float16_t(kvalues_iq4nl[q]); + return ret; +} +#endif #if defined(DATA_A_IQ4_NL) layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ4_NL { @@ -504,6 +525,8 @@ float16_t dequantFuncIQ4_NL(const in decodeBufIQ4_NL bl, const in uint blockCoor #define dequantFuncA dequantFuncIQ3_XXS #elif defined(DATA_A_IQ3_S) #define dequantFuncA dequantFuncIQ3_S +#elif defined(DATA_A_IQ4_XS) +#define dequantFuncA dequantFuncIQ4_XS #elif defined(DATA_A_IQ4_NL) #define dequantFuncA dequantFuncIQ4_NL #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp new file mode 100644 index 000000000..f930852a4 --- /dev/null +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_iq4_xs.comp @@ -0,0 +1,34 @@ +#version 450 + +#include "dequant_head.comp" + +layout(local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +layout (binding = 0) readonly buffer A {block_iq4_xs data_a[];}; +layout (binding = 1) writeonly buffer D {D_TYPE data_b[];}; + +void main() { + // Each thread handles 1 subblock (1 scale and 32 quantized values) + const uint ib = gl_WorkGroupID.x * 32 + gl_LocalInvocationID.x / 8; + + init_iq_shmem(gl_WorkGroupSize); + + if (ib >= p.nel / 256) { + return; + } + + const uint ib32 = gl_LocalInvocationID.x % 8; + + const float d = float(data_a[ib].d); + // Scales are 6 bits + const uint scale = ((data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF) + | (((data_a[ib].scales_h >> (2 * ib32)) & 3) << 4); + const float dl = d * (int(scale) - 32); + + const uint b_idx = 256 * ib + 32 * ib32; + const uint q_idx = 16 * ib32; + [[unroll]] for (uint l = 0; l < 16; ++l) { + data_b[b_idx + l + 0] = D_TYPE(dl * kvalues_iq4nl[data_a[ib].qs[q_idx + l] & 0xF]); + data_b[b_idx + l + 16] = D_TYPE(dl * kvalues_iq4nl[data_a[ib].qs[q_idx + l] >> 4]); + } +} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp index 043a53023..ba88ce79a 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/flash_attn_cm2.comp @@ -104,7 +104,7 @@ ACC_TYPE Max(const in uint32_t row, const in uint32_t col, const in ACC_TYPE ele #endif void main() { -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp index 09dc43d8d..c16a2a9f6 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/get_rows_quant.comp @@ -12,7 +12,7 @@ void main() { const uint i11 = (gl_GlobalInvocationID.z)/p.ne12; const uint i12 = (gl_GlobalInvocationID.z)%p.ne12; -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp index 48156e7ba..d7e99727d 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mat_vec.comp @@ -133,7 +133,7 @@ void compute_outputs(const uint32_t first_row, const uint32_t num_rows) { void main() { const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z); -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp index d0559aac8..33b2234e7 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm.comp @@ -95,7 +95,7 @@ shared ACC_TYPE coopmat_stage[TM * TN * NUM_WARPS]; #endif void main() { -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); #endif @@ -547,6 +547,25 @@ void main() { const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> (16 * (idx % 2)); const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); + buf_a[buf_idx ] = FLOAT_TYPE(v.x); + buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); +#elif defined(DATA_A_IQ4_XS) + const uint idx = pos_a + (loadc_a + l) * p.stride_a / LOAD_VEC_A + loadr_a; + const uint buf_idx = (loadc_a + l) * SHMEM_STRIDE + loadr_a * LOAD_VEC_A; + + const uint ib = idx / 128; // 2 values per idx + const uint ib32 = (idx % 128) / 16; // 0..7 + const uint iq = 16 * ib32 + 2 * (idx % 8); + + const uint sl = (data_a[ib].scales_l[ib32/2] >> (4 * (ib32 & 1))) & 0xF; + const uint sh = ((data_a[ib].scales_h) >> (2 * ib32)) & 3; + const uint qshift = (idx & 8) >> 1; + u8vec2 qs = u8vec2(data_a[ib].qs[iq], data_a[ib].qs[iq + 1]); + qs = (qs >> qshift) & uint8_t(0xF); + + const float d = float(data_a[ib].d); + const vec2 v = d * float(int(sl | (sh << 4)) - 32) * vec2(kvalues_iq4nl[qs.x], kvalues_iq4nl[qs.y]); + buf_a[buf_idx ] = FLOAT_TYPE(v.x); buf_a[buf_idx + 1] = FLOAT_TYPE(v.y); #elif defined(DATA_A_IQ4_NL) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp index 27c5d68b3..7e29bbfec 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/mul_mm_cm2.comp @@ -106,7 +106,7 @@ D_TYPE perElemOpD(const in uint32_t r, const in uint32_t c, const in D_TYPE elem #endif void main() { -#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_NL) +#if defined(DATA_A_IQ2_XXS) || defined(DATA_A_IQ2_XS) || defined(DATA_A_IQ2_S) || defined(DATA_A_IQ3_XXS) || defined(DATA_A_IQ3_S) || defined(DATA_A_IQ4_XS) || defined(DATA_A_IQ4_NL) init_iq_shmem(gl_WorkGroupSize); #endif diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp index 9e56a3530..db643a54c 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/types.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/types.comp @@ -1026,6 +1026,23 @@ void init_iq_shmem(uvec3 wgsize) #define A_TYPE_PACKED16 block_iq3_s_packed16 #endif +#define QUANT_K_IQ4_XS 256 +#define QUANT_R_IQ4_XS 1 + +struct block_iq4_xs +{ + float16_t d; + uint16_t scales_h; + uint8_t scales_l[QUANT_K_IQ4_XS/64]; + uint8_t qs[QUANT_K_IQ4_XS/2]; +}; + +#if defined(DATA_A_IQ4_XS) +#define QUANT_K QUANT_K_IQ4_XS +#define QUANT_R QUANT_R_IQ4_XS +#define A_TYPE block_iq4_xs +#endif + #define QUANT_K_IQ4_NL 32 #define QUANT_R_IQ4_NL 2 @@ -1042,7 +1059,13 @@ struct block_iq4_nl_packed16 }; #if defined(DATA_A_IQ4_NL) +#define QUANT_K QUANT_K_IQ4_NL +#define QUANT_R QUANT_R_IQ4_NL +#define A_TYPE block_iq4_nl +#define A_TYPE_PACKED16 block_iq4_nl_packed16 +#endif +#if defined(DATA_A_IQ4_NL) || defined(DATA_A_IQ4_XS) const int8_t kvalues_iq4nl_const[16] = { int8_t(-127), int8_t(-104), int8_t(-83), int8_t(-65), int8_t(-49), int8_t(-35), int8_t(-22), int8_t(-10), int8_t(1), int8_t(13), int8_t(25), int8_t(38), int8_t(53), int8_t(69), int8_t(89), int8_t(113) @@ -1058,11 +1081,6 @@ void init_iq_shmem(uvec3 wgsize) } barrier(); } - -#define QUANT_K QUANT_K_IQ4_NL -#define QUANT_R QUANT_R_IQ4_NL -#define A_TYPE block_iq4_nl -#define A_TYPE_PACKED16 block_iq4_nl_packed16 #endif #endif // !defined(GGML_TYPES_COMP) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp index 93ddbfadc..77e7e1148 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/vulkan-shaders-gen.cpp @@ -60,6 +60,7 @@ const std::vector type_names = { "iq2_s", "iq3_xxs", "iq3_s", + "iq4_xs", "iq4_nl" }; From 2c6c8df56d8a3edd657b9a295e95d469a37f0044 Mon Sep 17 00:00:00 2001 From: Jeff Bolz Date: Thu, 6 Feb 2025 00:15:30 -0600 Subject: [PATCH 251/279] vulkan: optimize coopmat2 iq2/iq3 callbacks (#11521) * vulkan: optimize coopmat2 iq2/iq3 callbacks * build: trigger CI on GLSL compute shader changes --- .github/workflows/build.yml | 4 +- .../vulkan-shaders/dequant_funcs_cm2.comp | 79 +++++++++---------- 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8f9c82f87..6841ba589 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,10 +10,10 @@ on: push: branches: - master - paths: ['.github/workflows/build.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal'] + paths: ['.github/workflows/build.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp'] pull_request: types: [opened, synchronize, reopened] - paths: ['.github/workflows/build.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal'] + paths: ['.github/workflows/build.yml', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp'] concurrency: group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }} diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp index 78c3bddf2..0eba37420 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/dequant_funcs_cm2.comp @@ -323,15 +323,16 @@ float16_t dequantFuncIQ2_XXS(const in decodeBufIQ2_XXS bl, const in uint blockCo const uint8_t qs = bl.block.qs[iqs]; const uint signscale = pack32(u16vec2(bl16.block.qs[4*ib32+2], bl16.block.qs[4*ib32+3])); - const float16_t dscale = bl.block.d * 0.25hf * (0.5hf + float16_t(signscale >> 28)); + const float dscale = float(bl.block.d) * 0.25 * (0.5 + float(signscale >> 28)); uint sign = bitfieldExtract(signscale, 7 * int(ib8), 7); sign |= bitCount(sign) << 7; - const uint8_t g = unpack8(iq2xxs_grid[qs][(idx & 4) >> 2])[idx & 3]; + uint g2 = iq2xxs_grid[qs][(idx & 4) >> 2]; + g2 >>= (idx & 2) * 8; + const vec2 g = vec2(unpack8(g2)); - float16_t ret = dscale * float16_t(g) * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); - - return ret; + vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); + return float16_t(ret[idx & 1]); } #endif @@ -350,14 +351,16 @@ float16_t dequantFuncIQ2_XS(const in decodeBufIQ2_XS bl, const in uint blockCoor const uint iqs = (idx & 0xF8) >> 3; // 0..63 const uint16_t qs = bl.block.qs[iqs]; - const float16_t dscale = bl.block.d * 0.25hf * (0.5hf + float16_t((bl.block.scales[is] >> sshift) & 0xF)); + const float dscale = float(bl.block.d) * 0.25 * (0.5 + float((bl.block.scales[is] >> sshift) & 0xF)); uint sign = uint(qs >> 9); sign |= bitCount(sign) << 7; - const uint8_t g = unpack8(iq2xs_grid[qs & 0x1FF][(idx & 4) >> 2])[idx & 3]; + uint g2 = iq2xs_grid[qs & 0x1FF][(idx & 4) >> 2]; + g2 >>= (idx & 2) * 8; + const vec2 g = vec2(unpack8(g2)); - float16_t ret = dscale * float16_t(g) * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); - return ret; + vec2 ret = dscale * g * ((sign & (1 << (idx & 7))) != 0 ? -1.0hf : 1.0hf); + return float16_t(ret[idx & 1]); } #endif @@ -369,24 +372,23 @@ layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ2 float16_t dequantFuncIQ2_S(const in decodeBufIQ2_S bl, const in uint blockCoords[2], const in uint coordInBlock[2]) { uint idx = coordInBlock[1]; - uint lsb = idx & 1; - idx /= 2; - const uint ib8 = (idx % 128) / 4; // 0..31 - const uint ib32 = ib8 / 4; // 0..7 + const uint ib32 = (idx & 0xE0) >> 5; // 0..7 + const uint ib8 = (idx & 0xF8) >> 3; // 0..31 + const uint qhshift = 2 * (ib8 % 4); - const uint scale = (bl.block.scales[ib32] >> (2 * (ib8 & 2))) & 0xf; + const uint scale = (bl.block.scales[ib32] >> ((idx & 0x10) >> 2)) & 0xf; const uint qs = bl.block.qs[ib8]; const uint qh = bl.block.qh[ib32]; - const uint qhshift = 2 * (ib8 % 4); - const uint sign = bl.block.qs[QUANT_K / 8 + ib8] >> (2 * (idx % 4)); + const uint sign = bl.block.qs[QUANT_K / 8 + ib8] >> (idx & 0x6); const float d = float(bl.block.d); const float db = d * 0.25 * (0.5 + scale); - const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); - const uint16_t grid = unpack16(iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 2) >> 1])[idx & 1]; - const vec2 v = db * vec2(sign01) * vec2(unpack8(grid)); - return float16_t(v[lsb]); + const ivec2 sign01 = 1 - (2 & ivec2(sign << 1, sign)); + uint g2 = iq2s_grid[qs | ((qh << (8 - qhshift)) & 0x300)][(idx & 4) >> 2]; + g2 >>= (idx & 2) * 8; + const vec2 v = db * vec2(sign01) * vec2(unpack8(g2)); + return float16_t(v[idx & 1]); } #endif @@ -401,28 +403,25 @@ layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3 float16_t dequantFuncIQ3_XXS(const in decodeBufIQ3_XXS bl, const in uint blockCoords[2], const in uint coordInBlock[2]) { + decodeBufIQ3_XXS_packed16 bl16 = decodeBufIQ3_XXS_packed16(bl); uint idx = coordInBlock[1]; - uint lsb = idx & 1; - idx /= 2; - const uint iqs = (idx % 128) / 2; // 0..63 - const uint is = QUANT_K / 4 + 4 * (iqs / 8); // 8 values + const uint iqs = (idx & 0xFC) >> 2; // 0..63 + const uint is = QUANT_K / 4 + ((idx & 0xE0) >> 3);// 8 values const float d = float(bl.block.d); const uint qs = bl.block.qs[iqs]; - const uint signs = pack32(u8vec4( - bl.block.qs[is+0], - bl.block.qs[is+1], - bl.block.qs[is+2], - bl.block.qs[is+3] + const uint signs = pack32(u16vec2( + bl16.block.qs[is/2+0], + bl16.block.qs[is/2+1] )); const float db = d * 0.5 * (0.5 + (signs >> 28)); const uint32_t sign7 = bitfieldExtract(signs, 7 * (int(iqs / 2) % 4), 7); - const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (2 * (idx % 4)); - const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(int8_t(sign << 1), int8_t(sign)))); - const uint grid = iq3xxs_grid[qs] >> (16 * (idx & 1)); + const uint sign = (sign7 | (bitCount(sign7) << 7)) >> (idx & 0x6); + const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign))); + const uint grid = iq3xxs_grid[qs] >> (16 * ((idx & 2) >> 1)); const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); - return float16_t(v[lsb]); + return float16_t(v[idx & 1]); } #endif @@ -434,23 +433,21 @@ layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufIQ3 float16_t dequantFuncIQ3_S(const in decodeBufIQ3_S bl, const in uint blockCoords[2], const in uint coordInBlock[2]) { uint idx = coordInBlock[1]; - uint lsb = idx & 1; - idx /= 2; - const uint iqs = (idx % 128) / 2; // 0..63 - const uint iqh = iqs / 8; + const uint iqs = (idx & 0xFC) >> 2; // 0..63 + const uint iqh = (idx & 0xE0) >> 5; const float d = float(bl.block.d); const uint qs = bl.block.qs[iqs]; const uint qh = bl.block.qh[iqh]; - const int8_t sign = int8_t(bl.block.signs[iqs / 2] >> (2 * (idx % 4))); + const int8_t sign = int8_t(bl.block.signs[iqs / 2] >> (idx & 0x6)); const uint scale = bl.block.scales[iqs / 16]; - const i8vec2 sign01 = i8vec2(1 - (2 & i8vec2(sign << 1, sign))); + const ivec2 sign01 = ivec2(1 - (2 & ivec2(sign << 1, sign))); const float db = d * (1 + 2 * ((scale >> (4 * (iqh & 1))) & 0xf)); - const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> (16 * (idx % 2)); + const uint32_t grid = iq3s_grid[qs | ((qh << (8 - (iqs % 8))) & 256)] >> ((idx & 2) << 3); const vec2 v = db * vec2(sign01) * vec2(unpack8(grid).xy); - return float16_t(v[lsb]); + return float16_t(v[idx & 1]); } #endif From 8d4d2be143a3919c3d194b9bf7a221e50abed893 Mon Sep 17 00:00:00 2001 From: junchao-zhao <68935141+junchao-loongson@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:20:00 +0800 Subject: [PATCH 252/279] ggml : fix LoongArch compile error with 128-bit SIMD (#11701) --- ggml/src/ggml-cpu/ggml-cpu-quants.c | 169 +++++++++++++++------------- 1 file changed, 91 insertions(+), 78 deletions(-) diff --git a/ggml/src/ggml-cpu/ggml-cpu-quants.c b/ggml/src/ggml-cpu/ggml-cpu-quants.c index 88303ff0e..72ec58cee 100644 --- a/ggml/src/ggml-cpu/ggml-cpu-quants.c +++ b/ggml/src/ggml-cpu/ggml-cpu-quants.c @@ -297,6 +297,90 @@ static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4 static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4 #endif +#if defined(__loongarch_sx) + +static __m128i lsx_packs_w(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_w(a, 15); + tmp1 = __lsx_vsat_w(b, 15); + return __lsx_vpickev_h(tmp1, tmp); +} + +static __m128i lsx_packs_h(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_h(a, 7); + tmp1 = __lsx_vsat_h(b, 7); + return __lsx_vpickev_b(tmp1, tmp); +} + +static __m128i lsx_packus_h(__m128i a, __m128i b) { + __m128i tmp, tmp1; + tmp = __lsx_vsat_hu(a, 7); + tmp1 = __lsx_vsat_hu(b, 7); + return __lsx_vpickev_b(tmp1, tmp); +} + +static __m128i lsx_maddubs_h(__m128i a, __m128i b) { + __m128i tmp1, tmp2; + tmp1 = __lsx_vmulwev_h_b(a, b); + tmp2 = __lsx_vmulwod_h_b(a, b); + return __lsx_vsadd_h(tmp1, tmp2); +} + +static __m128i lsx_madd_h(__m128i a, __m128i b) { + __m128i tmp1, tmp2; + tmp1 = __lsx_vmulwev_w_h(a, b); + tmp2 = __lsx_vmulwod_w_h(a, b); + return __lsx_vadd_w(tmp1, tmp2); +} + +static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { + v4i32 __ret = {d, c, b, a}; + return (__m128i)__ret; +} + +static __m128i lsx_shuffle_b(__m128i a, __m128i b) { + __m128i mask_f, zero, tmp0, tmp2, mask; + int f = 0x8f; + mask_f = __lsx_vreplgr2vr_b(f); + zero = __lsx_vldi(0); + tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits + tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive + mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask + tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones + return __lsx_vshuf_b(a, zero, tmp2); +} + +static __m128i lsx_hadd_h(__m128i a, __m128i b) { + __m128i tmp1 = __lsx_vpickev_h(b, a); + __m128i tmp2 = __lsx_vpickod_h(b, a); + return __lsx_vadd_h(tmp1, tmp2); +} + +static __m128i lsx_hadd_w(__m128i a, __m128i b) { + __m128i tmp1 = __lsx_vpickev_w(b, a); + __m128i tmp2 = __lsx_vpickod_w(b, a); + return __lsx_vadd_w(tmp1, tmp2); +} + +static __m128 lsx_hadd_s(__m128 a, __m128 b) { + __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); + __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); + + return __lsx_vfadd_s(tmp1, tmp2); +} + +static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 c, const __m128 d) { + __m128 res_0 =lsx_hadd_s(a, b); + __m128 res_1 =lsx_hadd_s(c, d); + __m128 res =lsx_hadd_s(res_0, res_1); + res =lsx_hadd_s(res, res); + res =lsx_hadd_s(res, res); + + return ((v4f32)res)[0]; +} +#endif + #if defined(__loongarch_asx) #ifdef __clang__ @@ -395,11 +479,6 @@ static __m256i lasx_set_w(int e7, int e6, int e5, int e4, int e3, int e2, int e1 return (__m256i)__ret; } -static __m128i lsx_set_w(int32_t a, int32_t b, int32_t c, int32_t d) { - v4i32 __ret = {d, c, b, a}; - return (__m128i)__ret; -} - static __m256i lasx_set_d(int64_t a, int64_t b, int64_t c, int64_t d) { v4i64 __ret = {d, c, b, a}; return (__m256i)__ret; @@ -409,18 +488,6 @@ static __m256i lasx_insertf128( __m128i x, __m128i y) { return lasx_set_q(x, y); } -static __m128i lsx_shuffle_b(__m128i a, __m128i b) { - __m128i mask_f, zero, tmp0, tmp2, mask; - int f = 0x8f; - mask_f = __lsx_vreplgr2vr_b(f); - zero = __lsx_vldi(0); - tmp0 = __lsx_vand_v(b, mask_f); // get mask with low 4 bit and sign bits - tmp0 = __lsx_vori_b(tmp0, 0x10); // make each mask or with 0x10 prepare for positive - mask = __lsx_vsle_b(zero, tmp0); // if mask >= 0, set mask - tmp2 = __lsx_vand_v(tmp0, mask); // maskout the in2 < ones - return __lsx_vshuf_b(a, zero, tmp2); -} - static __m256i lasx_shuffle_b(__m256i a, __m256i b) { __m256i mask_f, zero, tmp0, tmp2, mask; int f = 0x8f; @@ -482,25 +549,6 @@ static __m128 lasx_extractf128( __m256 a, int pos) { return ret; } -static __m128i lsx_hadd_h(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_h(b, a); - __m128i tmp2 = __lsx_vpickod_h(b, a); - return __lsx_vadd_h(tmp1, tmp2); -} - -static __m128i lsx_hadd_w(__m128i a, __m128i b) { - __m128i tmp1 = __lsx_vpickev_w(b, a); - __m128i tmp2 = __lsx_vpickod_w(b, a); - return __lsx_vadd_w(tmp1, tmp2); -} - -static __m128 lsx_hadd_s(__m128 a, __m128 b) { - __m128 tmp1 = (__m128)__lsx_vpickev_w((__m128i)b, (__m128i)a); - __m128 tmp2 = (__m128)__lsx_vpickod_w((__m128i)b, (__m128i)a); - - return __lsx_vfadd_s(tmp1, tmp2); -} - static __m256i lasx_maddubs_h(__m256i a, __m256i b) { __m256i tmp1, tmp2; tmp1 = __lasx_xvmulwev_h_b(a, b); @@ -529,42 +577,6 @@ static __m256i lasx_packs_h(__m256i a, __m256i b) { return __lasx_xvpickev_b(tmp1, tmp); } -static __m128i lsx_packs_w(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_w(a, 15); - tmp1 = __lsx_vsat_w(b, 15); - return __lsx_vpickev_h(tmp1, tmp); -} - -static __m128i lsx_packs_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_h(a, 7); - tmp1 = __lsx_vsat_h(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - -static __m128i lsx_packus_h(__m128i a, __m128i b) { - __m128i tmp, tmp1; - tmp = __lsx_vsat_hu(a, 7); - tmp1 = __lsx_vsat_hu(b, 7); - return __lsx_vpickev_b(tmp1, tmp); -} - - -static __m128i lsx_maddubs_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_h_b(a, b); - tmp2 = __lsx_vmulwod_h_b(a, b); - return __lsx_vsadd_h(tmp1, tmp2); -} - -static __m128i lsx_madd_h(__m128i a, __m128i b) { - __m128i tmp1, tmp2; - tmp1 = __lsx_vmulwev_w_h(a, b); - tmp2 = __lsx_vmulwod_w_h(a, b); - return __lsx_vadd_w(tmp1, tmp2); -} - // multiply int8_t, add results pairwise twice static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) { // Get absolute values of x vectors @@ -2232,21 +2244,22 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r } sumf = hsum_float_8(acc); + #elif defined(__loongarch_sx) // set constants const __m128i low_mask = __lsx_vreplgr2vr_b(0xF); const __m128i off = __lsx_vreplgr2vr_b(8); // Initialize accumulator with zeros - __m128 acc_0 = __lsx_vldi(0); - __m128 acc_1 = __lsx_vldi(0); - __m128 acc_2 = __lsx_vldi(0); - __m128 acc_3 = __lsx_vldi(0); + __m128 acc_0 = (__m128)__lsx_vldi(0); + __m128 acc_1 = (__m128)__lsx_vldi(0); + __m128 acc_2 = (__m128)__lsx_vldi(0); + __m128 acc_3 = (__m128)__lsx_vldi(0); for (; ib + 1 < nb; ib += 2) { // Compute combined scale for the block 0 and 1 - const __m128 d_0_1 = __lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); + const __m128 d_0_1 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib].d) * GGML_FP16_TO_FP32(y[ib].d) ); const __m128i tmp_0_1 = __lsx_vld((const __m128i *)x[ib].qs, 0); @@ -2264,7 +2277,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r //_mm_prefetch(&y[ib] + 2 * sizeof(block_q8_0), _MM_HINT_T0); // Compute combined scale for the block 2 and 3 - const __m128 d_2_3 = __lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); + const __m128 d_2_3 = (__m128)__lsx_vreplgr2vr_w( GGML_FP16_TO_FP32(x[ib + 1].d) * GGML_FP16_TO_FP32(y[ib + 1].d) ); const __m128i tmp_2_3 = __lsx_vld((const __m128i *)x[ib + 1].qs, 0); From c0d4843225eed38903ea71ef302a02fa0b27f048 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Gallou=C3=ABt?= Date: Thu, 6 Feb 2025 12:08:13 +0100 Subject: [PATCH 253/279] build : fix llama.pc (#11658) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Adrien Gallouët --- CMakeLists.txt | 2 +- cmake/llama.pc.in | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 74b48d24d..7b2a1845e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -233,4 +233,4 @@ configure_file(cmake/llama.pc.in @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc" - DESTINATION lib/pkgconfig) + DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) diff --git a/cmake/llama.pc.in b/cmake/llama.pc.in index 0b2b6bcfa..6fb58b5f6 100644 --- a/cmake/llama.pc.in +++ b/cmake/llama.pc.in @@ -1,10 +1,10 @@ prefix=@CMAKE_INSTALL_PREFIX@ -exec_prefix=${prefix} -libdir=${exec_prefix}/lib -includedir=${prefix}/include +exec_prefix=@CMAKE_INSTALL_PREFIX@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: llama Description: Port of Facebook's LLaMA model in C/C++ -Version: @PROJECT_VERSION@ -Libs: -L${libdir} -lggml -lggml-base -lllama +Version: @LLAMA_INSTALL_VERSION@ +Libs: -L${libdir} -lggml -lggml-base -lllama Cflags: -I${includedir} From 9dd7a0390feffcc1f4b17eb7692a6e43030d85af Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 6 Feb 2025 13:41:37 +0200 Subject: [PATCH 254/279] llama : add log about loading model tensors (#11699) --- src/llama-model.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/llama-model.cpp b/src/llama-model.cpp index 0487c978b..e30db66e9 100644 --- a/src/llama-model.cpp +++ b/src/llama-model.cpp @@ -1275,6 +1275,8 @@ bool llama_model::load_tensors(llama_model_loader & ml) { const bool use_mmap_buffer = true; + LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false"); + // build a list of buffer types for the CPU and GPU devices pimpl->cpu_buft_list = make_cpu_buft_list(devices); for (auto * dev : devices) { From 194b2e69f8da3a22395c74fd9acd6d5835437b96 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Thu, 6 Feb 2025 17:12:35 +0530 Subject: [PATCH 255/279] SYCL: Adjust support condition for norm operators (#11674) SYCL does not support non contiguous tensors for norm operations --- ggml/src/ggml-sycl/ggml-sycl.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 2984ed82e..aab34a752 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4537,14 +4537,17 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_VIEW: case GGML_OP_PERMUTE: case GGML_OP_TRANSPOSE: - case GGML_OP_NORM: case GGML_OP_ADD: case GGML_OP_ADD1: case GGML_OP_LOG: case GGML_OP_SUB: case GGML_OP_MUL: case GGML_OP_DIV: + return true; + case GGML_OP_NORM: case GGML_OP_RMS_NORM: + case GGML_OP_GROUP_NORM: + return ggml_is_contiguous(op->src[0]); case GGML_OP_SCALE: case GGML_OP_SQR: case GGML_OP_SQRT: @@ -4576,7 +4579,6 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_SUM_ROWS: case GGML_OP_ARGSORT: case GGML_OP_ACC: - case GGML_OP_GROUP_NORM: case GGML_OP_UPSCALE: case GGML_OP_PAD: case GGML_OP_LEAKY_RELU: From 9ab42dc722ad19a12af80f38a06474d498f96da3 Mon Sep 17 00:00:00 2001 From: Tei Home Date: Thu, 6 Feb 2025 20:16:15 +0800 Subject: [PATCH 256/279] docs: update fedora cuda guide for 12.8 release (#11393) * docs: update fedora cuda guide for 12.8 release * docs: build cuda update --- docs/build.md | 61 ++++++++++++++--- docs/cuda-fedora.md | 159 ++++++++++++++++---------------------------- 2 files changed, 109 insertions(+), 111 deletions(-) diff --git a/docs/build.md b/docs/build.md index dd6495028..afb7a0402 100644 --- a/docs/build.md +++ b/docs/build.md @@ -125,21 +125,66 @@ For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). ## CUDA -This provides GPU acceleration using an NVIDIA GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from the [NVIDIA developer site](https://developer.nvidia.com/cuda-downloads). +This provides GPU acceleration using an NVIDIA GPU. Make sure to have the [CUDA toolkit](https://developer.nvidia.com/cuda-toolkit) installed. -If you are using Fedora (using Fedora Workstation, or an 'Atomic' variant such as Silverblue), or would like to set up CUDA in a toolbox, please consider our [Fedora CUDA guide](./cuda-fedora.md). Unfortunately, the process is not as simple as one might expect. +#### Download directly from NVIDIA +You may find the official downloads here: [NVIDIA developer site](https://developer.nvidia.com/cuda-downloads). -- Using `CMake`: - ```bash - cmake -B build -DGGML_CUDA=ON - cmake --build build --config Release - ``` +#### Compile and run inside a Fedora Toolbox Container +We also have a [guide](./cuda-fedora.md) for setting up CUDA toolkit in a Fedora [toolbox container](https://containertoolbx.org/). -The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. +**Recommended for:** + +- ***Particularly*** *convenient* for users of [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/); such as: [Silverblue](https://fedoraproject.org/atomic-desktops/silverblue/) and [Kinoite](https://fedoraproject.org/atomic-desktops/kinoite/). +- Toolbox is installed by default: [Fedora Workstation](https://fedoraproject.org/workstation/) or [Fedora KDE Plasma Desktop](https://fedoraproject.org/spins/kde). +- *Optionally* toolbox packages are available: [Arch Linux](https://archlinux.org/), [Red Hat Enterprise Linux >= 8.5](https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux), or [Ubuntu](https://ubuntu.com/download) + + +### Compilation +```bash +cmake -B build -DGGML_CUDA=ON +cmake --build build --config Release +``` + +### Override Compute Capability Specifications + +If `nvcc` cannot detect your gpu, you may get compile-warnings such as: + ```text +nvcc warning : Cannot find valid GPU for '-arch=native', default arch is used +``` + +To override the `native` GPU detection: + +#### 1. Take note of the `Compute Capability` of your NVIDIA devices: ["CUDA: Your GPU Compute > Capability"](https://developer.nvidia.com/cuda-gpus). + +```text +GeForce RTX 4090 8.9 +GeForce RTX 3080 Ti 8.6 +GeForce RTX 3070 8.6 +``` + +#### 2. Manually list each varying `Compute Capability` in the `CMAKE_CUDA_ARCHITECTURES` list. + +```bash +cmake -B build -DGGML_CUDA=ON -DCMAKE_CUDA_ARCHITECTURES="86;89" +``` + +### Runtime CUDA environmental variables + +You may set the [cuda environmental variables](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) at runtime. + +```bash +# Use `CUDA_VISIBLE_DEVICES` to hide the first compute device. +CUDA_VISIBLE_DEVICES="-0" ./build/bin/llama-server --model /srv/models/llama.gguf +``` + +### Unified Memory The environment variable `GGML_CUDA_ENABLE_UNIFIED_MEMORY=1` can be used to enable unified memory in Linux. This allows swapping to system RAM instead of crashing when the GPU VRAM is exhausted. In Windows this setting is available in the NVIDIA control panel as `System Memory Fallback`. +### Performance Tuning + The following compilation options are also available to tweak performance: | Option | Legal values | Default | Description | diff --git a/docs/cuda-fedora.md b/docs/cuda-fedora.md index b993386c8..9c88b7694 100644 --- a/docs/cuda-fedora.md +++ b/docs/cuda-fedora.md @@ -1,17 +1,16 @@ # Setting Up CUDA on Fedora In this guide we setup [Nvidia CUDA](https://docs.nvidia.com/cuda/) in a toolbox container. This guide is applicable for: + - [Fedora Workstation](https://fedoraproject.org/workstation/) - [Atomic Desktops for Fedora](https://fedoraproject.org/atomic-desktops/) - [Fedora Spins](https://fedoraproject.org/spins) -- [Other Distributions](https://containertoolbx.org/distros/), including `Red Hat Enterprise Linux >= 8.`, `Arch Linux`, and `Ubuntu`. - +- [Other Distributions](https://containertoolbx.org/distros/), including `Red Hat Enterprise Linux >= 8.5`, `Arch Linux`, and `Ubuntu`. ## Table of Contents - [Prerequisites](#prerequisites) -- [Monitoring NVIDIA CUDA Repositories](#monitoring-nvidia-cuda-repositories) -- [Using the Fedora 39 CUDA Repository](#using-the-fedora-39-cuda-repository) +- [Using the Fedora 41 CUDA Repository](#using-the-fedora-41-cuda-repository) - [Creating a Fedora Toolbox Environment](#creating-a-fedora-toolbox-environment) - [Installing Essential Development Tools](#installing-essential-development-tools) - [Adding the CUDA Repository](#adding-the-cuda-repository) @@ -29,44 +28,33 @@ In this guide we setup [Nvidia CUDA](https://docs.nvidia.com/cuda/) in a toolbox ## Prerequisites - **Toolbox Installed on the Host System** `Fedora Silverblue` and `Fedora Workstation` both have toolbox by default, other distributions may need to install the [toolbox package](https://containertoolbx.org/install/). -- **NVIDIA Drivers and Graphics Card installed on Host System (optional)** To run CUDA program, such as `llama.cpp`, the host should be setup to access your NVIDIA hardware. Fedora Hosts can use the [RPM Fusion Repository](https://rpmfusion.org/Howto/NVIDIA). +- **NVIDIA Drivers and Graphics Card installed on Host System (recommended)** To run CUDA program, such as `llama.cpp`, the host should be setup to access your NVIDIA hardware. Fedora Hosts can use the [RPM Fusion Repository](https://rpmfusion.org/Howto/NVIDIA). - **Internet connectivity** to download packages. -### Monitoring NVIDIA CUDA Repositories +### Using the Fedora 41 CUDA Repository -Before proceeding, it is advisable to check if NVIDIA has updated their CUDA repositories for your Fedora version. NVIDIA's repositories can be found at: +The latest release is 41. -- [Fedora 40 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora40/x86_64/) - [Fedora 41 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora41/x86_64/) -As of the latest update, these repositories do not contain the `cuda` meta-package or are missing essential components. - -### Using the Fedora 39 CUDA Repository - -Since the newer repositories are incomplete, we'll use the Fedora 39 repository: - -- [Fedora 39 CUDA Repository](https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/) - -**Note:** Fedora 39 is no longer maintained, so we recommend using a toolbox environment to prevent system conflicts. +**Note:** We recommend using a toolbox environment to prevent system conflicts. ## Creating a Fedora Toolbox Environment -This guide focuses on Fedora hosts, but with small adjustments, it can work for other hosts. Using a Fedora 39 toolbox allows us to install the necessary packages without affecting the host system. +This guide focuses on Fedora hosts, but with small adjustments, it can work for other hosts. Using the Fedora Toolbox allows us to install the necessary packages without affecting the host system. **Note:** Toolbox is available for other systems, and even without Toolbox, it is possible to use Podman or Docker. -We do not recommend installing on the host system, as Fedora 39 is out-of-maintenance, and instead you should upgrade to a maintained version of Fedora for your host. - -1. **Create a Fedora 39 Toolbox:** +1. **Create a Fedora 41 Toolbox:** ```bash - toolbox create --image registry.fedoraproject.org/fedora-toolbox:39 --container fedora-toolbox-39-cuda + toolbox create --image registry.fedoraproject.org/fedora-toolbox:41 --container fedora-toolbox-41-cuda ``` 2. **Enter the Toolbox:** ```bash - toolbox enter --container fedora-toolbox-39-cuda + toolbox enter --container fedora-toolbox-41-cuda ``` Inside the toolbox, you have root privileges and can install packages without affecting the host system. @@ -85,7 +73,7 @@ We do not recommend installing on the host system, as Fedora 39 is out-of-mainte sudo dnf install vim-default-editor --allowerasing ``` - The `--allowerasing` flag resolves any package conflicts. + The `--allowerasing` flag will allow the removal of the conflicting `nano-default-editor` package. 3. **Install Development Tools and Libraries:** @@ -100,7 +88,7 @@ We do not recommend installing on the host system, as Fedora 39 is out-of-mainte Add the NVIDIA CUDA repository to your DNF configuration: ```bash -sudo dnf config-manager --add-repo https://developer.download.nvidia.com/compute/cuda/repos/fedora39/x86_64/cuda-fedora39.repo +sudo dnf config-manager addrepo --from-repofile=https://developer.download.nvidia.com/compute/cuda/repos/fedora41/x86_64/cuda-fedora41.repo ``` After adding the repository, synchronize the package manager again: @@ -109,106 +97,62 @@ After adding the repository, synchronize the package manager again: sudo dnf distro-sync ``` -## Installing `nvidia-driver-libs` +## Installing `nvidia-driver-libs` and `nvidia-driver-cuda-libs` -Attempt to install `nvidia-driver-libs`: +We need to detect if the host is supplying the [NVIDIA driver libraries into the toolbox](https://github.com/containers/toolbox/blob/main/src/pkg/nvidia/nvidia.go). ```bash -sudo dnf install nvidia-driver-libs +ls -la /usr/lib64/libcuda.so.1 ``` **Explanation:** -- `nvidia-driver-libs` contains necessary NVIDIA driver libraries required by CUDA. -- This step might fail due to conflicts with existing NVIDIA drivers on the host system. +- `nvidia-driver-libs` and `nvidia-driver-cuda-libs` contains necessary NVIDIA driver libraries required by CUDA, + on hosts with NVIDIA drivers installed the Fedora Container will supply the host libraries. -## Manually Resolving Package Conflicts +### Install Nvidia Driver Libraries on Guest (if `libcuda.so.1` was NOT found). + +```bash +sudo dnf install nvidia-driver-libs nvidia-driver-cuda-libs +``` + +### Manually Updating the RPM database for host-supplied NVIDIA drivers (if `libcuda.so.1` was found). If the installation fails due to conflicts, we'll manually download and install the required packages, excluding conflicting files. -### 1. Download the `nvidia-driver-libs` RPM +#### 1. Download `nvidia-driver-libs` and `nvidia-driver-cuda-libs` RPM's (with dependencies) ```bash -sudo dnf download --arch x86_64 nvidia-driver-libs +sudo dnf download --destdir=/tmp/nvidia-driver-libs --resolve --arch x86_64 nvidia-driver-libs nvidia-driver-cuda-libs ``` -You should see a file similar to: - -``` -nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm -``` - -### 2. Attempt to Install the RPM +#### 2. Update the RPM database to assume the installation of these packages. ```bash -sudo dnf install nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm -``` - -**Expected Error:** - -Installation may fail with errors pointing to conflicts with `egl-gbm` and `egl-wayland`. - -**Note: It is important to carefully read the error messages to identify the exact paths that need to be excluded.** - -### 3. Download Dependencies - -```bash -sudo dnf download --arch x86_64 egl-gbm egl-wayland -``` - -### 4. Install `egl-gbm` with Excluded Paths - -Exclude conflicting files during installation: - -```bash -sudo rpm --install --verbose --hash \ - --excludepath=/usr/lib64/libnvidia-egl-gbm.so.1.1.2 \ - --excludepath=/usr/share/egl/egl_external_platform.d/15_nvidia_gbm.json \ - egl-gbm-1.1.2^20240919gitb24587d-3.fc39.x86_64.rpm -``` - -**Explanation:** - -- The `--excludepath` option skips installing files that conflict with existing files. -- Adjust the paths based on the error messages you receive. - -### 5. Install `egl-wayland` with Excluded Paths - -```bash -sudo rpm --install --verbose --hash \ - --excludepath=/usr/share/egl/egl_external_platform.d/10_nvidia_wayland.json \ - egl-wayland-1.1.17^20241118giteeb29e1-5.fc39.x86_64.rpm -``` - -### 6. Install `nvidia-driver-libs` with Excluded Paths - -```bash -sudo rpm --install --verbose --hash \ - --excludepath=/usr/share/glvnd/egl_vendor.d/10_nvidia.json \ - --excludepath=/usr/share/nvidia/nvoptix.bin \ - nvidia-driver-libs-560.35.05-1.fc39.x86_64.rpm +sudo rpm --install --verbose --hash --justdb /tmp/nvidia-driver-libs/* ``` **Note:** -- Replace the paths with the ones causing conflicts in your installation if they differ. -- The `--verbose` and `--hash` options provide detailed output during installation. +- The `--justdb` option only updates the RPM database, without touching the filesystem. -## Finalizing the Installation of `nvidia-driver-libs` +#### Finalizing the Installation of `nvidia-driver-libs` and `nvidia-driver-cuda-libs` After manually installing the dependencies, run: ```bash -sudo dnf install nvidia-driver-libs +sudo dnf install nvidia-driver-libs nvidia-driver-cuda-libs ``` You should receive a message indicating the package is already installed: ``` -Package nvidia-driver-libs-3:560.35.05-1.fc39.x86_64 is already installed. -Dependencies resolved. +Updating and loading repositories: +Repositories loaded. +Package "nvidia-driver-libs-3:570.86.10-1.fc41.x86_64" is already installed. +Package "nvidia-driver-cuda-libs-3:570.86.10-1.fc41.x86_64" is already installed. + Nothing to do. -Complete! ``` ## Installing the CUDA Meta-Package @@ -233,7 +177,7 @@ To use CUDA, add its binary directory to your system's `PATH`. **Explanation:** - - We add to `/etc/profile.d/` as the `/etc/` folder is unique to this particular container, and is not shared with other containers or the host system. + - We add to `/etc/profile.d/` as the `/etc/` folder is unique to this particular container, and is not shared with other containers or the host system. - The backslash `\` before `$PATH` ensures the variable is correctly written into the script. 2. **Make the Script Executable:** @@ -262,26 +206,33 @@ You should see output similar to: ``` nvcc: NVIDIA (R) Cuda compiler driver -Copyright (c) 2005-2024 NVIDIA Corporation -Built on Tue_Oct_29_23:50:19_PDT_2024 -Cuda compilation tools, release 12.6, V12.6.85 -Build cuda_12.6.r12.6/compiler.35059454_0 +Copyright (c) 2005-2025 NVIDIA Corporation +Built on Wed_Jan_15_19:20:09_PST_2025 +Cuda compilation tools, release 12.8, V12.8.61 +Build cuda_12.8.r12.8/compiler.35404655_0 ``` This output confirms that the CUDA compiler is accessible and indicates the installed version. ## Conclusion -You have successfully set up CUDA on Fedora within a toolbox environment using the Fedora 39 CUDA repository. By manually resolving package conflicts and configuring the environment, you can develop CUDA applications without affecting your host system. +You have successfully set up CUDA on Fedora within a toolbox environment using the Fedora 41 CUDA repository. By manually updating the RPM db and configuring the environment, you can develop CUDA applications without affecting your host system. ## Troubleshooting - **Installation Failures:** - - If you encounter errors during installation, carefully read the error messages. They often indicate conflicting files or missing dependencies. - - Use the `--excludepath` option with `rpm` to exclude conflicting files during manual installations. -- **Driver Conflicts:** - - Since the host system may already have NVIDIA drivers installed, conflicts can arise. Using the toolbox environment helps isolate these issues. + - If you encounter errors during installation, carefully read the error messages. They often indicate conflicting files or missing dependencies. + - You may use the `--excludepath` option with `rpm` to exclude conflicting files during manual RPM installations. + +- **Rebooting the Container:** + + - Sometimes there may be a bug in the NVIDIA driver host passthrough (such as missing a shared library). Rebooting the container may solve this issue: + + ```bash + # on the host system + podman container restart --all + ``` - **Environment Variables Not Set:** - If `nvcc` is not found after installation, ensure that `/usr/local/cuda/bin` is in your `PATH`. @@ -291,10 +242,12 @@ You have successfully set up CUDA on Fedora within a toolbox environment using t ## Additional Notes - **Updating CUDA in the Future:** + - Keep an eye on the official NVIDIA repositories for updates to your Fedora version. - When an updated repository becomes available, adjust your `dnf` configuration accordingly. - **Building `llama.cpp`:** + - With CUDA installed, you can follow these [build instructions for `llama.cpp`](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) to compile it with CUDA support. - Ensure that any CUDA-specific build flags or paths are correctly set in your build configuration. From 2fb3c32a1634488a5265d1304ab37628eeb5480d Mon Sep 17 00:00:00 2001 From: Xuan-Son Nguyen Date: Thu, 6 Feb 2025 17:32:29 +0100 Subject: [PATCH 257/279] server : (webui) migrate project to ReactJS with typescript (#11688) * init version * fix auto scroll * bring back copy btn * bring back thought process * add lint and format check on CI * remove lang from html tag * allow multiple generations at the same time * lint and format combined * fix unused var * improve MarkdownDisplay * fix more latex * fix code block cannot be selected while generating --- .github/workflows/server.yml | 25 +- examples/server/public/index.html.gz | Bin 1207128 -> 1219985 bytes examples/server/webui/.gitignore | 24 + examples/server/webui/.prettierignore | 10 + examples/server/webui/eslint.config.js | 26 + examples/server/webui/index.html | 353 +- examples/server/webui/package-lock.json | 6655 ++++++++++++----- examples/server/webui/package.json | 48 +- .../webui/public/demo-conversation.json | 2 +- examples/server/webui/src/App.tsx | 36 + examples/server/webui/src/Config.ts | 89 + .../webui/src/components/ChatMessage.tsx | 229 + .../webui/src/components/ChatScreen.tsx | 123 + .../server/webui/src/components/Header.tsx | 182 + .../webui/src/components/MarkdownDisplay.tsx | 254 + .../webui/src/components/SettingDialog.tsx | 306 + .../server/webui/src/components/Sidebar.tsx | 95 + examples/server/webui/src/highlight-config.js | 60 - .../webui/src/{styles.scss => index.scss} | 25 +- examples/server/webui/src/katex-gpt.js | 66 - examples/server/webui/src/main.js | 704 -- examples/server/webui/src/main.tsx | 10 + .../server/webui/src/utils/app.context.tsx | 304 + examples/server/webui/src/utils/misc.ts | 87 + examples/server/webui/src/utils/storage.ts | 138 + examples/server/webui/src/utils/types.ts | 25 + examples/server/webui/src/vite-env.d.ts | 1 + examples/server/webui/tsconfig.app.json | 26 + examples/server/webui/tsconfig.json | 7 + examples/server/webui/tsconfig.node.json | 24 + .../webui/{vite.config.js => vite.config.ts} | 49 +- 31 files changed, 7100 insertions(+), 2883 deletions(-) create mode 100644 examples/server/webui/.gitignore create mode 100644 examples/server/webui/.prettierignore create mode 100644 examples/server/webui/eslint.config.js create mode 100644 examples/server/webui/src/App.tsx create mode 100644 examples/server/webui/src/Config.ts create mode 100644 examples/server/webui/src/components/ChatMessage.tsx create mode 100644 examples/server/webui/src/components/ChatScreen.tsx create mode 100644 examples/server/webui/src/components/Header.tsx create mode 100644 examples/server/webui/src/components/MarkdownDisplay.tsx create mode 100644 examples/server/webui/src/components/SettingDialog.tsx create mode 100644 examples/server/webui/src/components/Sidebar.tsx delete mode 100644 examples/server/webui/src/highlight-config.js rename examples/server/webui/src/{styles.scss => index.scss} (83%) delete mode 100644 examples/server/webui/src/katex-gpt.js delete mode 100644 examples/server/webui/src/main.js create mode 100644 examples/server/webui/src/main.tsx create mode 100644 examples/server/webui/src/utils/app.context.tsx create mode 100644 examples/server/webui/src/utils/misc.ts create mode 100644 examples/server/webui/src/utils/storage.ts create mode 100644 examples/server/webui/src/utils/types.ts create mode 100644 examples/server/webui/src/vite-env.d.ts create mode 100644 examples/server/webui/tsconfig.app.json create mode 100644 examples/server/webui/tsconfig.json create mode 100644 examples/server/webui/tsconfig.node.json rename examples/server/webui/{vite.config.js => vite.config.ts} (58%) diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml index 0cbc3d640..3a29107d0 100644 --- a/.github/workflows/server.yml +++ b/.github/workflows/server.yml @@ -81,13 +81,36 @@ jobs: with: node-version: '22.11.0' + - name: WebUI - Install dependencies + id: webui_lint + run: | + cd examples/server/webui + npm ci + + - name: WebUI - Check code format + id: webui_format + run: | + git config --global --add safe.directory $(realpath .) + cd examples/server/webui + git status + + npm run format + git status + modified_files="$(git status -s)" + echo "Modified files: ${modified_files}" + if [ -n "${modified_files}" ]; then + echo "Files do not follow coding style. To fix: npm run format" + echo "${modified_files}" + exit 1 + fi + - name: Verify bundled index.html id: verify_server_index_html run: | git config --global --add safe.directory $(realpath .) cd examples/server/webui git status - npm ci + npm run build git status modified_files="$(git status -s)" diff --git a/examples/server/public/index.html.gz b/examples/server/public/index.html.gz index e876776e82eeba329b1e808cd46684e32aca94f5..646988ad8e494e1bcccfd2fba6f549796aebd983 100644 GIT binary patch delta 1219129 zcmV(&K;ggG)=81ON`D`V2mk;800062+`ZdY+sd{m`aZwHXs3CrCS}LQ5Q07b<2;?W{eAlrcI&;Wq>^ls%(>RXKI_F+-S55ny|&gG zjlb1uXOkeQ1xbz9k{}v~qUHyos133p9MxjKmQF;?mvNMeXn)lFTdlU2V&*K3r#uaY zJPaRdV-X3-Q!%Oy99v9#ZDq2cvYtM&d3QiUtN~Er^0N;9(;f@=&lgtxfs2V46)$gsQ}DOSdwN!&o+w z;Y3V@Q?wBV<4IZ@ad}(xFb&dBy#3$*-~al5YhlQzyni{I&1wx8P`dS8C8~_cPzJNK z2GhZuX*|k8s9MHJ63bv5L~rl7tbLs^!QMWm6B*yv&K_psy_B(Z1zB_<`F6-$K~owL z_BL%!c;X5&pMRaX&JK@YIi2;0V9sZ0k<+~$;II)k5ypJv5F$mI$*Akc(pBw*aJ2KLzGgx`oQhBk(^zhY zq5D6fwJRw?R;lXxe-aY7A?ZfqzIK?U9D08=NQAr-(hb?$$D)lG+6YPDyKxiuA!E*P z5`Tmv7=S~Hj0t3nVQs|dV>}WGNt!7ie}u!qm~*iA@pq?QPhjj2F88Qj58Q+-NHbQw zaLuUbW8Rpz)}l!^NTm?oT5CbNX5MR&8=9$)+2f+CCaM-TgD4ea8Ke*Ox*HcEW~C4s zHW5fkA&=uQ7^=MGC|1n_rSKdc;W>l>Wq*li45b)}2;fKJ^x~!!WKIwb!)zoRZ>c1Y z;^<)-XUk>cX^=WzB@#|fL!Ezz#*2IwU_~DT-wm5$X7&nvv$a;&Ob*Oc*B9w<;)czM z6h4i~VnJMMGEXyCkVid*qFxuxn;VfF;&RpNM}se7m^NoJPGi^(aKex7qhlG*gnvvQ z%%O5d!sl6-I;1C>`r8u=fHLog>`{EfE$%%o7F58J(bGySPhlaS|NZZOtNp$9A7L;Q zQ6g$5f)7)Ok?;Y%7-iV&<}`?!Uy{GoF#k?Gdyois-65&<`A`hv_?Fi8qG7YfqfsqL zlN$H^APjga;A+ESc!wr&CWoRnjDJT0u1MYTsFp<|xX31=cDQ$DqHBJfL3MbIK@|sk zJMTZ9zE>BczTIRTr}~nN<%7D7i-ur3LkFJ2-267P*5QSV%lVsG%o4!`Ia!g6x8*lzeYmP~vApMUbR&iD5IS1bzbu)-dN_nmRwPfwG%OL{^cVgJIC&JxzA z7|;vIE>%zpKVhfpJHu{Db~2yeEp{x(5hQq=cjDYlE1Y9jm-U ztY!TOYyYumh*^ymxqlA-^NSB#e3uq%w+AlV_kb$9K76S~_BlH*K@ z%6?gEAt?O9wx#3`%^<;l`i%YLlA_<)<1mv_L@6G=1^tsf-iil6Fr@I|@8%{Eq4`2U z4Je0&aR@UV(1T+^KMB~r2LiVn-B*f$#R~^}ay}pxcQxG3SDn)9AS%dN9_U=$Lb% z#-~o-1vEC;^Z{qt%Nbh@7Z-GUDL8U^0n5U{l1Q_eR z#A^GnYP#mWIuoY)ep7u|wSToP66-3+Ay|J+T`n*~rhjS1m?&x(3aA1Wp%}+&w7{5G(Jsb5RKi=OJX6b`T$WpJ8Dn|f|*vju=4?_jz+={A~AC2 za}_g)<9|@_$bpUCS4RuC_`YCkZP?Q0jwTS(Qkyxs4*=yo5ae;l6X7J-Bgex)lmYSS zcq;!u3Vz$g1U*hqmo8RC)lw5!zLu(z3)tTK!XRt=g7pC^d=d~z`|KFI@ARCevz|dD zZ4vKS&@b*ML3-{9a~!I#t9so%VZ{=1L7ivy=YI}acb)@+CfrW~=gx78Wu2z^QccqZ zA>JpESN0W}z>~vLEPS%=u6+`)Li72)sMq)Ln=z&$dyeD58mbU^q9f~l0uXDKB@?Kh zP@MjK!CLfK*u(XX#k%7;^o!dk#DJ4?!CKvOvDMe`sCzEf*U967VEwCeaoyD*=UBjG z9e)S_1>JqI&H@a<=z|n?&cZ;eAZ38687wPEZ8Qs;?Xw_N0JKjiw!a43tlx~px6~!1 zIf^6EB?p2Xh~^y+GeOHeSYIb}RUdMQskW%zHyY`Qd zBG;icShR)XU6OT&G;4c)?IF%;Q^BJYup23myy3NB9EO@X7yC5>ylE{<1mHIuZUAJ; z2tERwCg1JF(VpaYqdJXv7!3m5Rre*kx`xBiGUX8*91Gad_NG24DA7x`awqfv0Ds2% zI$02UY_+~EC5*8~+kne{^G1p!3)Ai;yL4gSrV|mlHdHK?Y|ZDDiU8i^ zF>RVx$WF+D{=4;Tl@W#zpji^@>j`4pQ><|^%~eVE51z&{gJ9B7|CrjkcV z0OQbc1Hm2@(xtNQ2FZ-4!-m$~%bOi6cK4d;6u7BX!+c$kVn<9!m&0AEtm6VE0eU{~V)wCJA8 zCZEm12Zbl+7z|lZSlA&@W1fv}tNoPk+@;#q*B69dE@&Dfx>PE=vP^NuUVkpge13Jk zpm7wI3kJTs2KYmnkWsT>2Hu2HHSujGhA?$_s)r)D6NOMfK^M&@P^a5E#C_zuGz$cK z2ZDXg8ekEGjy0XnLJVb66xk0U>J(W8hw(_T0K=LzXCnyR+?>AOIeC9}v-k1r{mIAe zgVUScqnnRMXE&ct-`^aa+<#mgeY!c{J2<#`_x`58ck+Ih4FnYA!<22q$1sc|!%t<^ zrL_Rgiixs|YTEB?B5G#IYtt-AYXgx(VLeZ@Hpo&0mouIKE67>zuRAbr8kXIsv8 zkLN-Y9YIRE26ZsRN02Jw<6NvHKJ0qW#p35k{U=2%92jEZI42hNg*qCFo4=#!-4!Rh z&x!^TdZX9xPrxO-et$W<0TJm0?lTVpaODiTFppCiYTEknOZGzUR6aGN`!<;etjgBBBdc6@P{}{f4wd{ zf2pFwr#6430?1b?cp^sGP{?1K9G$r`C##{sQc>U92Y*$b@e_p<*VQWa5DnjdONESh zc$&uY7uR&DSY-gKPWls76H&v(s!llq=l*izOv`H!7<8GMmrcFEiGr zr^BWerZEsB5i4TxW0_jxOY*HDvnUOw;=iAK?N4jwZnS*u&s@GH;_jb?d<|xHr68>k zv;9<(_J1=*bjZZNff6^D$SWHEs|@VK9nuk#k-!^=;l3On-fAu zRXh|EMhXobRgu{{hEWQ0o56aGh}OFp(HY>GaTU*mSa*6gyJn-S*);()aagYpms!TD zI#y1hBZ6hEK3H{Cuk2J^*+WGB?s4YsH0n1sn15ebu!-|sVv|H7ZhlF=u|$6)_=TtR z+}dj&UlgbG98T#uoYMEjDZRl{T0~$CzcDCB|4>`;SpsWxQ<0M&09HUhR$|YFKa`$> zk`jpX<$u2u{<}5nXV$F$4zvC{%z6%nLAH@|{@r~#&}`^!wr_Zn`-;0c02ZGIyRO^X z+JEk@w()njzuqQ|Hh&Pe`yh4!;`Ugqw}DLEzd8_V^lw98E^iZZ zAhs^+bqMbxd%x%6eC)$~9EfYSP5bOX5Kl#ag6Lx%eOzLvTGedpNv%{;nN~d+1B){l zR@OF&Wx5xQ#J9#k6ahoSz(lor+unXN7(s?$v?wIbGa>z0PC+4!Ez6#*)Mgalmw))r z*m}&YEL~9`>X3UGJ&2(emn@LQjTIzpm#2abS)!7DUWd-u7%mW`jDYbvfgd;uQw8wv zS%==SwKlzlpHukRF_M@>q-ViY#94`*-YcRNDf-dmABKX+N=_s^>2W{~Vfvn)Lp(db{!t%=?h87=*( zT{xrWbPWWH+zIIpgcg-`@%#=1-hxPvX?|4z+04d=jfr$(Fcn*D9}}s)TiHOsRS`&3 z-Ggj2bw_ky4v&&z>DDb^^F;#?Tx94eUAqO{zQ`RB-9vKA)>?E=xX)BwkAEx2QLmo~ z_Y1W<+Ss>SdZtgE+EewqYVxBS2|^*tk?u5a&0TY+ zl@Y%kfFkRDy}mXSE+IKsp5}AgwRAiu;;!w5x3P+2H0B+28`cte!=7zh+ZaK;<;8wHGf#dzT+S=VJ-Jz zX}QaMheMZxZnQaTri4fmJ{CI@9z`Mq_@eq8C@qz8GMxw(2vn)t=<1JlGmfSz7vqAK z+nhk2B-ORJ_5jKmcR3kn8s}v<>>NY5HN#~-);@XMFCXVcsaPDPL6nL2-^4Jh65_Y>`Q3c(79fw5%A(3t zU-GFqDKQoH$z$v7{(Rovc(JwL1MQ_a6iuOo4dp^cy{tBifq%Ha1EdzD4C%cPQLTl@ zbbI3ktwFIM8rOU%Sp!3iq&5l06U3Q$1et37ELoC~fttKvt*!muAx|ewKa6APwngXp zKJi|ZMl%+MXgsz--3qMXzFMvy$RssW58B-;3L)^a%S9!2;XZlPiWK)+f1<7QCpOey z$J212qjfNh+JEpsu=|C%ChpllAeb;1v&9R&XWNTX2WA|&eXG{Jlue4SnMl%O9@rwK zYNjkY57J4#HH&fK(bGfQe`09(x2gVKc;)f!vyL^qeS35R@uT4xGH%6@paVdd^@X?u zbaiJnZL>ZtMy_toM%z-Q zscm?dY~zKkT~mAEU9xuTRp-@a`}Ic0PT7PMv02#$m+Xb;m<#QaZLeQ~$T}E#v-Ndq zj=whm;f=Mqy?vTri{3UZ-{syV%`qyk(O%H)xBZ@LQ3!0CrtXC96AGJ~nG1Ix@9_z+ zDo%T@jepa4rvORtLf&rl2>}wvs_qi8w0%OC;SQnu<;9)ESva}?dlfbmcFMivvz1U# zpPzd}%2-?Z$>Z~c?7Ot*77O=mNC-U};y%5wNbDVp#J&I$`vT~Sj|F4n3>h0Af0_L_ zil_hmm=d3@8Qa!u$Im(2@gVAcUSd1K#M%ZfvVWi{ICCaxI!nCg&vDMaB+XckpO4~U z@*Jx8c}r_SX#xw>Y~x`?2TPbYi1 zeBUhutXa-Ya}I+j@B=Y2GIQua?SDAya^30H?t&x$12ewv!2PDo5VJ6>X+#OssX?;{ z;(wz!YD`UKBXL&~(On<`r;1W6j0Lg&1Xr7nM#wGaVQnJ9nV*HVdoH6O8YfLhNuCjP zI~6GnON#8VD=8s{YNZDw$b{hD%3%{6cqx~9E zc3K$dzwiH*by}@<{C&;0%LpZ_-RflDd-grm&yZ zTAn*oA9Ss4ZM-|!cr>Fvf{HAs^fF_DXSjZA?aF1&wT5d>{3spQ1mzTtr%Sbzf~(P` z^~{96 z7-$Y_oW$rOCLj4nmlGBJvKl>~x8L$bKAOU2%XE{D>44s7V~NTvGH7vQK7WV5j{4hF z))G8TWiZH6fku=5ipXTfhk}h&3g|0+N`yK3ETg$?286K^sp4?kaP^&VFme7aKAjw} zfic;b#&_cVbe29SWmR^AuxtnRxns0wY9lb}mT{JZ52qsCiy|REoE;w2Y}FJK*qMmo zEo$(YTtKl%kt93HH2VE0K!1GZT*2%-`NP9VVQ6_m`Vb1InasiY0R}Yo-IPJt{k-zJIZj}p*q~qmgLSgy>f;jPZF3qhpKOkg`2b= zO9!oJbWA#X|1}Hl9E#r!fr0DmmAzBo-V}Gm7scWq(Tk^U5|?9!Bt&kT}$l z@jd({!FRQ`B=)!?HqpmwVwQVFK% zCh!pIjGBN^fqOL_H*Hy6r zfe0}%=_DB5M(AAvI8cpP0!cdKQJjcIyEcn)EgRwvNUvJnCtTtvttAsav%8w4@l5v% zKg~jb&41Ia0JZ3BJSn4Ef9sg3*ZyPr(8@ z45mR|!0HhM(eX+RR0bP78cu*rgl`~X6(XR*#R!pRFHdF>Rg55u0w7@&bEvI&eRDVODwpVykL4)eLg1PNRuOFKYu~0}lz893Uq|hmKJY)iA2d#pTW5Wwhh!CY zQN3fa8IKRs4DK==l0zAXCS-jkllWeLqGKBK{qPGH*e`bw5h(BZ^ZWbz=6wgb8SOW3 z-hVvDwj3M4f18FLKqrUdXULG1o{6^nt;tYI{!q>%|Ji<{G=+&HH%+KZ#sZ4t1BytFuRT-}6M8s^T+bZy%>wb%_ zwaB6*63=NoSc);*sj7W((u4I-!hUybNPo~V5-y;T;ra9Vn!Ay+UdVjTuUgkYWX4Xt zZoZEkh;QR}gz@V8Bif7Jy3mU;^&yZcT9Nwm`B+n(y?d9dBpsHTGTJsYn9GSL!83ss zLmg*5Bg7k539_EN8keZCYnInMHk5n^Xl}>xGSB5~T&DJ@UyZ>HNPpZvcg$4nG=F1b zMPtI>X1xyO)>>fQ30+m{v|@-EAw;pazWiG%i}cwK6)C^}$Y05yKi0C0 z#7`ZTq~gaOOTPZtW64nb&}9i1KlNFXi61*H34WA*6QZXjkU2l;+GI~W?c3yRRT^C3 zNMbpJ?mzAPWFTzhw#uB5OWsc;+kdau<<xVRxD{DZmQk6o0yKCizfw zpNQAvL7@yVzzA}|EYVGQkW|l+GK(+ZXTcB#T0EX54t-2S>OEP}B3D>B5k63cwiHlD z6@EF$BlK@6+U4}>+RXfCHS<3EyjadC>D4vx)=`fI>f(wsY88fpQpH-sv5Kr(Zc`3M zl*fR0rC#@Q#k|jID==mZ|9=Z%%9!ayjm-SxZ~q{4!2IRN8#Xr9Yi)YO26>>{-eNZ( z4`oO7`uOSwE}X&D(RG&`H5#b;Y~|?QG#d1%(ST*P@?xKF));Hg=ST2IMlmR-=-jmR z_GmsY<%iC{W3X2tI!A4s8c%C*M>`Zh3+qnV90dt5E#Mxh*WWd@Lw~Zwb+&R3WN-1F ze8=7u721ikR0$#U-Qs={gu*QbNR-%ivB+hJaE9npyFb&14miubQtk7(Yy!^ZIS%Oo z0Pq~A;Iw{1z@!iXI%GBg!wQ_8opo))B+dFhU0@zM{3hLf^vb#HvptOCJg z)%GxRKcBBCYR-@;Xe1C)ufU(CYer4pQDgdU;*ZtMu3}S>N1F>-B}92bVN$G zmr4kh5^${+Y$_7)u<_igWUvHW7s&q%vH@=DESe zxp!c8BAL$}u=diu-xD5`H71qNi6+`l+L{JX*yTjS?Z;eG8}+sF9lS4WdJR2LtvKTw zMtI}w@ZrQPzJEEYF8}paYqyF*|N64EUz>jHiV2D>#Hh0A?E}xvL6_`_)8;s|(s}n+ z*Hbn3>WQBy>*V*pxJ2(>a4lsKU|%kqgOQ+XA&n^z38nIo9dj*f3Qd ziPiKmHHdt@JDuegU9LyIG zg|C99Pn^&SH08_Cl&?M)=k|4HK$#Zx=2F~}H=|`gM};q?d`p)79Obb;xMG5I>3|j& zcr%JeNT!l{y}}v*|1|-q(mVA!N_myAVi13GlnB(3d=$ivB?O;ZO<}XWT)^RJnHS9r z%`g_J#eYxO70PpREL~|km$R^BPN)yIYx0Vt@yeQ9=1JrTWTxy5ObA+IB|n3CN>WZS z3=b?q2MjY9;JzA_2UZC~=>84h#g4OnLkC4B zdV_fX584$G07q;!YZie8p^syseGe@6!^g%esegG778LlN(6 z^ncDWN%{(m4s}h&$@sm_)rP%j6M-uI)ixhpzq^66UG(2=|_s>HFKed znOjSN>l02N%R+zMFIsY0q2uoCfUeEUdOg`h#m1SCXqnyAe%N)53u{3yD_yc%(YT7^ zc;$HN7k$@m>hf@rRX5hn*tu}ITDd&8>VJ4fXr-s@vD{V9r#eE7JEK}w9b!|35A6c{ z<&g>o_9|WVN?oxQCr;H&@M3U)uPI$2V7e9(VO~}+iuD}o8uV5#Mon1 z6$-#T-ii*pj?nTaDV!% z{7!ul7%F$ct5Doz=nZhlnlVbe<&5@z0W#PwfHPxfAcW>~ZnU$i`zZInCo2hTZLONu z);f$7yId(mJvAQKHa{MLVaF8fxaNX+wwc>{9Ybnvp*piV#v-nW!PPfD#W`M^tB!_3|Q&RtwGH^pwskUt~;Szkf8tje*mpUQ}?b+{MiO7*H(Ll36o5$Y<8&YsFTc z&jIE((cpVmgI2JHlUf#_S?#5M|6~fha&~*mSF6c1W8If!Kem>Ffh_6B0iqSpASF?U zA9N9Oc2=u9-pY)A>KN2|ufQQ_b~U~RJwwcyu~s+RlD6{im@RLoXMi1L*MIp92H!@D zWR0;g49AXRV?xKscT@3tq^Bksq(aEdQ+;rp^>g87gmQK9tG-LS?>OA6@pTSjGV+Mw zq^Ogs_QosQ%fZ5$VO^`7T1{v^cR?#_d`3-PllzKP+dD1YI;IEaTl?CX-fyb7%tOdfxj-(4oJWL|VZ7M@&KLU|31)LnMfg5a0k*^VHI|^lDqL+XI-Dk zBi|*3|7Wu9Y(2+x%GsZy>7z{{z@e_R1H@o?7T?D}UMH#6%;N-A)5)*D_#E z3+Qd&N2y@|jY>!^n<3Y+pmYyxjWaE-Cyy$u(HC>UEJ}x&aoj_fDSPBmFhwnqZo5L^$0qMdCUQAi zk(}$m^x$$kbz7vY^SUmZNj6Z-N4HJrWeU1hm+6%RfAiLn+|Cj5f_Gdt|FAs1Yd)a^ z)O}XuSbwwdc&4>OzInn$tEhG=9qN`c7IAb zU#&dLNv*6Iz*ac&U8m7-fa%$yW40Z+8Rb{mHT3}*f(t2(IdVcX<_wdhk3nJ-a*B%S zn0SCnv$1m2@VjK&?&DNM8K{xt0zYznyg)4u|9_=E((niGs^KY_U~DiQ;~y|$;p0|3 zj76kR0iI@f&<_4*TyosXx$`iZ0+$STJ~m;m;~yo^8u;$-@=9JKDs2$WHNCJuL!2uJ zWV`cT4&D94PpjC^mK`8zhAZqdh+0E8yV~{ zZ+}?FR(!?hr8kI;^X}U>eX+xEgEex;-+A3ZQpgo5!dV9&;Zu6h-vHUc$dX#Pj?;SE z!=-Qov z9|HA%AuB!oEOVajyAasZ)+HA(c}zNxTcmuH?1%5HML}mD+X7L!h9VWDf!5>;l>27Z6f|#D6hS zj?Va2n8kjU+9uXA^{b%!u1&RkhGua#Z_<*|)plyJ!){VyBgLe9lN!G!a>Nkn;d8q~U4R>n=1*C3;jTdb1;JD$|~MMN2QMSEvghO59A*#;fvV%m-8#Z|K(#^5VAzK@n;R%Shn?^eXD%=&3o4X@1h z9?TXT&-K}wpL1LYDlq6h>a#T$Nw`6lrZL>`vXB$%fMUg%5a8Hw;W-2(DrmZ&`gU4J}KX1o&) z%Fk@s*Nu8JhEQ$w%}1m6VBrChYIum(b-+~Eq0a<;Ch}a`Q1j`GTAYJDwZzA1*)WwJ z(RXTdf7VP#>)ACRGqf6>G5Dt)(p5@(%3VZ8KN|(SL})A5I8u|C2%_NiH#~PITC8^$ zKxkhBj}IAp)CD{n+k`SLD1RdtmVxzMxT^c*`p;9{w+~s#kwyOcQnsp&z<2M$)jlxj zw-nRiPtDYc?3UV8{+lZJ%A3~!u!H&Bg%P)4#Gh@*aw#na5X~5ZRoUbr+F7Q8XbM*| z+X-j+HxlNO@$M+b!o2B+e4NxbTW?+?E@^VY*~m*N1wC0jl>v%0cz=^+URlXtH#WDF z&eHA_tSV464!(;KGN!tgskXj8^8rw4aZ;-VL2R;ChK#^>Gs-iol^+J7?ibD-GUh9j z{`$2o@jc`=k1g+*7Vj!E^GkJmF}4a##QMz9hyF^X-Yge6HNYfD$hzG^z^|tp^g2zg z%7G2KWdTKgyCVZ;F@I!1&0iGpSpu-eKrK&IKY;e1tiC$> zH-Hu3@blG!8K~xa(=xM^axh-${AfdE1vX)v3q$vN(&LsYXI#QkER;1Bx+eAf$-*tAIxkpDSF!%f$Fz$yv7RmaoT694n59oUrlM16Czl<$ z(Rbyth2JCEFMp-jZo24EY9~eUJ^d}SW1jGJM(;yA*OH4nD!84#$|)T^DP<{pyQ5=a zCt5!8=(|#i?RJmKRNY}oTA>@hrg}}2G)G^Ov#8M11_D^23I37 z!pw94rGsc}!Z$3dn?bzjYI}{AYUzmG)c-l|9-;GSpA7={hGduuDB{+t2(67u|WoV%s7+Gif+XNdLM(2 z4BnQRWhwd{LI3}?VLW@N4YL%UfJ{YAb!#|5XCm5c2&8f~RHm$H1xw36MzK84;#F6k z@}(6-xwK@MC2^KIo}PC5S!y-XG&k~qa40yiZ?~6os{{lXP0*`mL}+2dx*>XorIhhOs?Y zvd00?YhrX1d7O^oDJJ3*i5W!*aQcv>V%i6y$MdOGgEyc`fCNe$Jcxu$ysPo`g4(Kq zAiv?niuw(pBB7Fau73tGRs(b9BS5OwOUG-4(y$TejfsBqR|9%r*;rhy^NnIU^6kT` z+s9lwf}Mu$H$jUc)OdZg_K#6X#N68}b6fkzJf(xVe_xrqw1EucD_>o6ag9&pD$3Hur02e<(ix&CVY$~iiX1f8TM9#Wan z+6=u;JG%v~kBb$Qw>4tgeBK*7^T^1LU%J zBY!L2{`e5P-vr{5Li@AXE+uYb;`M}KLq>a*h$I89ct=*W%6aO^QM zjdz##2@Uvs?n0ki^at!g_i1+8?;FJC?0-b8y?ABB9p~4=P3VpRUg9fUw}DYde!L0Y z+W_}U@qn;$f}LkV>{=sVx4~6*-StaDLqo61(BPtqrE9Ikje@tO1-MaAwKVTH3U-#J zo0n!Auzx$$(2pX!hdzrUR3iF$=eqi>vb+N2?#!E?!Qm*P|%kxyTOW z>`U{Bhu`Vf7%dFU@?30G{XY3%1G&fT2X0FtmK&}4DQnZ;bD3Qs&G%C_rsYC5`u+Jo z*XHKG)23AfK;hTeG<{Jdl;lEw3*&wITTC-T5q~B03j?V04cN-Y-!Zr%iDw{G=3}mI zhdie)NjY$E8ttv~LIDkwOmG*B%Pjr@K&&zLXOQp7_aPA61G~d?kZ*xXcOhY{Wm{u5 zSQNO-imobXnb&|&A=3il-{K0p`=UINI-N?k!vO$Gyq60x2w5e`)2cUShCnC^cXC$T z$$t{=WNuZH%SLRmuD(n3HU6(%w@Z>%{*#N=Y8SqVLz80`*&(8l6;eJ|O*o#}#LNoP27^l1 z!+hU?hg}`glvxo%`m6|MGn=BZvT8ch5PvX0u;t%bJ_lf&U#4+NQ)dyG<(Rx;SZYN9 zng}$c%fG>)t+v7!0*%&HK+%ROV3ERhOfjtmS*#=OK(D#nZYiMDWr8>=#pZ-;zT9Op zcsaW0f)$y~!2Pj~9|$n37=)Q26}g`ZX@yk=dx5P)c+5fhekgSmcZdF-@MKa-oPWhL zJVvFVmI8V(C3z5T&C;GNlg{E>kSSer~PuV9Dk#jQ%3Pnd2Xa%s@KP%uQV6G#0CO?i4pCWl(Wwn z7@L`APJKFN+LAViYVU*I`#>9bzYlCf4m!xaesv2)#|(VNNqA;>d?l|@-Lb2f*I7eJ z*nQUS`sfpjUu6wnWgRs9>#h%(^X{>+en0`3bbhbh@QqEs#dMEtb(UN4uzv%3h}zT& zH1HvehCDqj*;i3Y(yC0L+8w-7+8qK$Qyi1pEfYsO8(ZLP0Ao8nY4fzt zX8B`-P|+#&NPl^8AfISKKG7jG?G&3YH-bb)e2a>Z&k`}hH~3m~mWXVWuLjf-b)c5C zL#VWBfp-rEF_)uVp7K?LEoCqRE|=1J)TKz6h5%cX9YVR)B1{8zXb)$JcL)tT?D2gv z^qlv}kk5oePr>STz@_KZ9C{%9)N{6_jPK#op`T_t_=jha-ha$>6blptT_uBci7`=&w0m(x7xe;8_zl81BbRZJZA@9?RIFp<2g#5pSE9m&M6#h zhqhm-_J3s@I<)=TbGAc>ed9UDNNCZGmIn{3C8{SIuZl_QY~XZtI*@k^76r7kslL@T zbY4J5Pa+Hf=E|zq_vZuGU23T;(c~#mf%+@pY ztbggyf)}^qp>%FhaBwRgOy|PR6^6lmH2fRcOL@~S`N}I`YtAMBR2UIws*zf~e?4W4 zju3xGBg()l^GbCf*^lb8MO-#>da>>wlmeI0rEY2f zMf70R4QY`3CoqPkX}~!EtU&krr{$g+dVfA{sO)|c3@86u7yji&7KDxvC=WY?`kmr# z%H@3?8eyjsuSM098E}_Co0&xH3Rnjb_|G`DB7+d|7x)KkCA|l%u0?gFVC9GS)Ugbv zTs|Q1-$mf>cL<$Wds%U65^M;rzq357m=>cVV#R8&+!ZSpd_m|8XJOJIbZT8q_J6wq zzi^|Dv65+r&lKy}q^`(gPpT-Lf=IZ6~+pKH$XC zYN_^oK3|Jk^}1ejxY2;a1$?j188h54oD-_8L8{i?n)z*;LRKaKy~6RP21j6?uYeVN zX=sJ6=5z*7PEi?hb9*l9^?&x3`Z7}2d|Dw=+3w>6n}k)Du*tGm>HtnavA^vT3wq!~ zZ`5rGIKpkLU!hRzvU(Mek=S96Xz~uu41l9;{(<0zZ^r~0Si7SG<7_{MH*Ml*NAcbXEmSm=7+Kuc zE;F~tL5&DPi}yf!sEy1O8^p_KsI65Kk1w>tyF&$J=}B1O# zxHePTMyKMr9YCR^P#iLKwDf;qa?}y(`QYdFyh?4iWd(v3bK*$Nr3LoUiZIY>|6hIf zL+?o4uYUO@oxDcsXnR3;{iVe;ApGh#bq3p_XCYfGXlcEZ7IgW8&m?ef*H&IX^ZA>t z)%Q{{KfeM0_7Ip8hao~^qv+zqkwGoYs($eaERtR?tCBB%j>MN9on?Q9w_dMb#O_H* z%(7iwcc1w#rzfC~h3*>&-A>)p4m=HFsd;pAZD!@ZO9>ErF7%onh74+889fZ)b)=tk zU3&@3FRXeUYO(abI>n<2Y)O8m?atK&TLkbXG@D6{(bDIs?md*6TDfhtp6yEJlJR?G z*H(%icB<~vlork!I>CQwKS#>vIlNXZl~5T-DYf&*(rh_xx{f|Wig$VPSnse?9L%FI z@9S8peV1NiUVarw4WsPQ$4=#}sw<|rzqVr{37-Az2(5XiK@u9ImaZMGapr?kbm0oq zH#$CJmR9Bf_4G;t_h4I~btdWJ@+M4@hBDi;NzXaFYB?iU?z(unb zl4*K?v(KP*!)pP^%cCJ~^CtNH+3QO_HBi5+o=MUY4C9VRzvO(bIFE)i(1hpr3O_yC z<}%anD>4G;WvQf6X;Er7*dLk0_QJNm7zl6u`$&Xw(Vu@i>@Ns)d)fHFH&q)YK2Sul7GZJi%FKDjeY?*!8fJ_S_{*HM`46rGz5tS%9?-= zE&P9!rtvI~IE!ZsrA?YkRdJTeuV3g$*XXTsTWwV`bbTkybB|V2;AazBtfrmCGf)+e zXNq1sQjCx0)LpMB@6pn!jmki;iFpFQG1^85-^pduRCk?*6?0^BOO zeie-QRT#E>?n&j4Qy+Ykngitd)@h>l3CdGckmg6o^NZs|ts`XORS0BgVZDA7x_cqT z`Pd8b*zd<~*aC7>^$E#MzaTfcMQ-XGW)d~I3CmU?H@QV_`XH4zDkI-{NuB(2Y~z1o zkBi(x$5rL^nrqM^ExmM`FBj(O9bpdx=jS7gX%<;U4@5{e)x4XE0YqI_vmtdM2McWF#t zk`_qA^wz-u5*NqZik8}kA4bwldEx(&N6~s6lP~>J#Ry6Zq5|st!)KSV>(~$_k znIuT63v)`)u*9GD+H@18bn5F{pwbUOK@q=>?sVE|VcwOu6QyYwzPs~n{-bDcdr9im9xIvA&b+% z{lkY~1#q7R?tVGm2G6H~D<-8>1Bg!px6rYt6_1Z9*>J)jwU4UOR(y1RF%7lk zPgF~F@)@zjm{_V}@}7_72~+n%vbWqpt8TSR>5}Qde^vbonv{PVQ1%A@W(#=Z?uO>Z z8uDnSZz%Xr^$lgNkKwOe9}4b|iYs7hUjb@5R)KCC<_>e1`$K43fLT2Ww&8~Xm-cw+ zK?m>YQ<1ukxs~zaHJ2OSVkH12Z%;oh>bW&uQYc4%kWV!>CfYppPxnSYe)NA!eWCva9YHULcY*d>DA|5c z@z`pyus^1yM~1HMTG!~-K4*C8NV6Y_alx|g+}tQtXs4S2&Z{WnaN=?*=>|T4odJKt z^=&e3)GBYn7P`Oo=WLVEaN=fEQXd3Jhz`fhQ8ot3KQ5e-b>u-w9*yFu{UAx_CDE7r zhPCAn!u)@|$g4l%Yj|+#j~JfLqQUY@2pM_A2gjHDR3`2MTWK>wG#* zR(gM3uOC_5K+9}bx3EbR=rJB$yEkQ9;dkuDbjW?Ddh`y9)$1;#J<%WfPIcy8r8DoU zoiT&IVK=%5h{&TGJ>3oLDd+AHp*NK&3w&315F31 z`4e1%pjSTwi&q=Gf=bt^V55%hVkftzJrP={?a))fL~AJ%YG*+%G`n2R^dke#rOev$ z7iEUZID;{^Q69jL?S)sG=~k^N%SOFV+g6q=m`?>e@~a+_p0rZ__w7Gd+c=*Jwr_vK z&0jR?wMK3m`JM_kv)8-!hp&t&_w~P+K%S_SC&jpNX`dKFQrMHF?rFQxQ^D>^1y-7X zYX!1X!Gg}e>*(oUx3T`$OlB>^>{G#ZeI+vx1tLkmq-^a71fM9dR1-tb*^%gl#xF-N z6zfj@i~*{Vg`#_e$KT+LAJ}Jl(fxl=0H+J~0Ri`eU>D^lA})*%=6e01aQ)$4um?fU zi|~VBJHq`y&k23V--tO9ikbaAbU)AsK`#g)gnmgi?Eb($7~s{~IuZq|9KaJITy7}n zi7?7oi2yKG;*W>IVvrw+g?4fcH`Xgc9)g08qusRcL>%*o1s2 z>UF@9vZ!8nPXzi(x9o=I^UOBDLU|i^PXw9IPsG(9^4h{#eb+q|^LdDlYeK;iQAlbM z(MyD@%UKQ1(P95%UqiIQk%1g&}j!D$JY@ zdMzC4Ru^K$`auVd;Lzx*%lw#gw#3up-avo;hfM56Au~8ax->5pBJz~ z#aQn|s56mvi|TcCiz9zw`4iEB?rE_<&N+wpI!f8DfrFUIa(5RZgKG13+_!t#IE{K>KM3}*gTr^r-6Hk+X{9?QJr}EW>wR`BRy}{v4yR^Uu$>N~fxAKl zcfpGPGa=;hnGf@}tM_59M)g6|>mMLfD1hv)swKJ~u&<>TU0~wHxfimq#kBl= z$swh>^Faz<#xj4G9MhDs!W^4J`91xN^K4CWceM``I8Ys85rri>fTvcXYDg;d2CyN*lEK8*{`!wHcm%=FN;jua3Jv1W#!J@me zNv$SpW(Jfxv{eX0tB_UYwB(|D2`2#5@w2izKE(3*{1Sh|D@yh*adR#8rCvV(4$oJC z1`ot@-v=xa;P~U0e0G3IM2ismL4bC_XL>0>so;Z!DqX6JW6yUl!Hwn9n9`+WzCEToSP=z4Z$B5EP4doG zMFu4oJ3oIIMPiTQ^McUC>fnMSl;^~LNBnzOewg#oa34WqNLS@?T4Fr_hRhP0?Rp4j{&Gfe}=Ef*>D z2IeWt#|~e9*YJZfWD6JXE&*S-M1;J{3%JfE-5Y-ZQw1f4MB9At4*C~dc3mY!1DxmhR>{Jq1T|0Vy5de6u7 z7`}hAR;$&T|CROo{oUu|f@zC>(e+++EvquEUE7nMV%<=s*7Uk0N~B zp^mL#DiZ6_LCz9%%7~4up^|-8dEjSrYCl<4op$-p1EVWa2Ow$~fi8ZK+H- zDR(I|fd}7vE9{=LNmGg>4(|l=#7Q%q2uN?LNAf;($if?2t}#EM26Hh1Zpn!cieP_i zaQx6JNn9w%Kt^%?^q8%)kJdQ$`|03F9>hU5FP5Np!?Cho#21O5KXY=vbPIgD+Et7R z{RnQajYnX=(NJgw%08Ys3Nks*dimFyGoSnG>)niv7YZt7q>FV}CR#yZMxnM1s;`zT z5~>0c^YC0-X^wXaem2K(spG2bM$vyKr1Q+Y8;yNZUp=Q>4o6=y6I{02P}m>p*Gh30 z#93lyyl`*a>dLORhbql>{ult-C2G%-*t0C}S!R0ns5{jvbjNr;-@3E)I$vL3tmw?r z!(4s+EYpvrUvyV8=SE#T_+HdAXT8tqahS(~C^1p7e-wFhKh7%Uu9c^t^hAFbc-G0I zjXXuCd?->r@MshY6Qr-N~OT%;A3x|CPt#EcBs#y>&YF?G`0=N3V{2Gasv^k*Eb z%?pWfP?UJoNTX-J+H{5woweRR1BcPMrT`nXuAcF0s{YhP`iwUkR$p$lEczK=UpM*k z@`cWUHhVh@KzG%T*vG&<3kW^LFJOR#oSg+lhd)$ak;0czHFXNd8lQiXTFW0;JPYzy zMoJi%5BR3u;Cm77y6{l@jr|l&d2%a82x7GxAFz}%bv{|}aH*I!)OcLb0iK9K4XsPY zR|C)(9poCLzJ9CIek}T5=^cMM|8l3$n8UK!i7GAthE`sjQmStO>nUnLqs~(IFe3CU zb^XXz3Yb_Jnje{kUSogkEU3P|_!3@UyoCyY6u5;7@qy@Emb;|oQd&i{{#fd*#tD27 z2-@Q=y)K+X1rRw_GP;FB@-Zx!Pml*cR%*f3F|=OOeehI+UT2geJD^^=xr@NZg?3t< z>+Y0}DJ!o$0ci2mS_=7pxO?~Rrjc!5^#AiINCwwO+9|T+I0=72rrHA&dIg$bNT5l` z$Fh1=BIp z*9&qzi{ayiB+%)&TkyqLQq0Z{q*}^+IICLxzdT6vS77apW+lev> zc|^|dpBT+qm?XS%?r`YZKEv^7vw$AcUjxHmbIvDa*PoR9TYT~4$v@*uF~XO6wwmsD z^~(64oC@!8nnHHB~bmhy7ar6DO`97|g$1;CG#F6}ajCr+h3P(uJNAwt;6yBcD za^Zi0->p>O7_!=%hp*ng`uOT_laa(G7*Z*Q4N_(!_RJ(j&8-HFhsZpAmy2k%Oyuu@ z5MdcKM^B8Kkqtcqtdw-7+6yVuV1PMHl(UFitZX|3=oQvjOeCs_PBH?`rTu#hJv*B6 zXzYK*B>3D%*QX7M(ya4v#wJM{GW8%*fMi6Q*LmnCaU$qv+MTeu#|QN)D?ZadZpT+) zI%Z&$%qFuR;bOcJc2y^VIKMU7!o!#?iZqpkKde}hE{Yq-vlg2Cd@F_vr+r|pob@Y2 zGA6ug?Bg96N3Q~+f&!e)tF+bjWkS!w&a!_?4#?9$a^}YiCFmSYg_NVjeLN`Tio>G# zDOUs41|}zlMPNtuOTzbf3SN`4mR>&cGSEe!D2|9m) zqwI+kMIwbay6|zM%9Z)-5raAA=)E{f<16LKn9QzjHe0O?Q0$39X6fQa^C3?M)NwSe zPh&Fjd}fa+65qJ%OCrc(vBBxwqSG$+MG12{$AzLa9QrU_gZMT1l*Qk~@h>~f!RN4^dxq#QuoTNk(ch)K6Kt%4FYm7e?^RS0shd&{cmUFTeW~ z_-@H-L1to5!s`=}=#+~zb31`I?J#q^wH*ny_X#}ZNe^#^0KuR%sF5;|ZU1}?-bO5}f@%gvsg)<-|aixGsxl#gyVw`|0m?LTJYskg}WjN*?GO)wCRr(=T(>lNG4JidY05%R{*kpuJpnH`BWDe5`rpAAFRJ=bj3XUb+B zU9!)V%{mtw;)pt~SGwkn!ZN%GcNwjT6D?ri8&^uTY@?1X6ZOH{pH+WYE2-uP#!HdO z&lqQjx7d*%s-Fn0xaMP@&6jfVCG~*+ITlU)EGpmu`>fs;WP`k$Kv8yX-j?s2v!dsn zQ!gNPuDPjmelFW~{`?4rS`O_FH2&NV?K||h%u>ycsB+%>Oi_HuCv{pVQlU>s}SNCz++Da~ciJK}%&&^u?JkyV1yOgT1k7^x5ncrXn1eWEZWAChiD=N8j_2#x){ zSEiyMf|QloNQSdklzDWPwb6o1o(R*f%|nL=UUv5y}U{N5Z-uQdZI_7q3VBa!Nrn+yRR|=dGL_Q z*@eqB&ixylpWAor!sF|?7(WaaJ3?JIzUPWUR5R7A&SsV|YC>##mpsEgyQDn_EY!CC=BncFlUp9ckT>a_rLU zP70romGpHEYe#=9J|;<6!`eCb*|}LcLLwKpWK+(CRQG47TYfY2);1ff6n2j1ZB}?k{&Jb5-jPqnC?;QGB1NV3xvE5x+H6gkq;=|`#;=DXA~--Z z)aP^(&+SP(uTP@h|3a`8RBv;03%}@8t7q`sox$_YbAb0b(I$beAV1 zk58SU|I9yQ7|yv$?mK{$0)sCq?>d$1;n%+Sdi-zFwG*Jea+AvO#lD}1; zYtPHWDmZ_uRUmiJQOa*pdkg77JOV#zGHr)?+HO)YZ{Uf%Zn(5_dU|9(sP5&M#IaG0LRw3@^#?o0aoT&c0Yq@c(CmI{XDmoI70n@st)6zahC zg!oglS13U&I4;?5DLcRTl3uE9mM?V5-*1FD`Idk3$yCaftD@EVmbO~?qJfQxu})y; z5-(q{BXk!$QqOybDbE+-k!}lzGH{RK7hu%Ui}BpSKCWc7$BS00gl|B#hiLTB&6L{N z+Q@uMITB0VPs_Uy8u~45YUsC=HWbRhJ#8a$nu4)*-%_55me!jwnex*VJ_T=(#!QF; zx@dp3CQ~!2`oXtUFWQIhT?`8@X55PACoEV2b2+$Y537^X+0MA{a7FV(8>2> ze^ncP!lgEx4@m9^@c@cPjCqc9yB7!<@SGo}LRf?2RL-nhw(T2h-x2tVuiY~&U+{BO zHF!SVKN~qT@SuNHd+Gi7!F%cb_@2G=e%v&*@5gj%p$yzJ zdr5ZkZ|tJ=SVf7tZbyH~u;~yKi^B z+Ou}otGzpS*B=YAyYAtITCH;x((pT6oBm+LoGZ5Z?C&yGQk|(qMdXT8e$+imEth|u zqf|3g_*=`BObC>!kkGh3^>0y2p@$U4KKG7NR&P0BUqACBt@P9xhmX>Kv-Bx~Meb{% zXqb{8UA~^SRMewLfk;>^Z^20FFRaXzDyhqrzKsxeE?2De$$z5&{FxKR1{52LPm>Gc zNG=8%+6j{(SHV$=u|{&bcU!HsvN3VbU53Hr28fqmjC2-CTkS zMo8v~2g?@Nw$tfwEF8+TNl|~;#g&VeK47;eqBRf#F+~UxyQrJ;peQU|IUQ#huy+9< z?fYn*xt0EcVZoFs4$Flx11>Kz0JK2!4)v!LCZHll&HY5C)7k z8=Na~^M8){XJIqs#`1y-w2IyI$+)O0?G#bPXsD{^e7+u6OGwKaZ5g5xhZVyGMVE){fdlnR3jjZ6LXY z<4`&PI2#9DB;~t27MrHhClXXlrkt5=sj7u-nF-wSb+yxNX}(|#Yj(g?Y%deDdEk-k zxmUGz+s}4)b_4X1@zp5CVz+`vp7vD2&Bm%F&zOA7EB19F8bPd9Bin78HllX8(UXa1 zR@n;U<zd!E3tVc7AFPX?N)ThW_Rq^+})vx+LD>OB{RJx znN8p-6;K4O49@=6{t~sp=5bi4b?d%Hd7V+|9#Z!H6cm40_<82F8I5@EHVHfmMA!5_ zg~)O}_3l%#Vg!)ZfSvI^wz@0+h*hP16B2$FM&iL4i+CSjD^qLDmP83yBFYI}vje!{ zwixmlV05SaOqte3^DG>w@75~deII%GME4Pzb(I4Cn_3y&h(oe7e#ZNvPXnH);Iv5! z$b_)e0i%D(>;mL1v(^NK)JY1FaBkPINn{V5%B>UNt{GUnW>DWXgQi{MDzxnvS`UCsRHlBLvBF%@A?)u&xJ$+OPUQ3R0Q6Zf2HQu-98gY3(`Oo zR+4`>gWoUj2-|p zc`PR>F>TVdijz0+yIH*C5tBSPUfEu{h_a`nzebkVTC5k0zLN4AjkW_P!`Ohn#uW55 z7ND%u#l$XKPv6B%XmwqWqhg7{S{9G6GW1UOu~ z9Rh$ZWG1z<7x}i?rI+TIR5cTdLd=?1kbv^*;IdK1Q#9A zukipsbX_{`yNBa)4j#N|!Tt=^yY{jx2%35;&yP3qZ=o!K7?k#H0{{PW0zifxPCx`k z1}#AAiFVp_xxWu|!p3^4MH^n*q+Y%lbe4;lI~ff{3<%(SqG9}d4Q@~@oOMKYYYC*31X(pGT2j6@XtaQytPJ8C^wAAr+;Fn#m{f%l1P7`sx@jK_uej? ziGg^U5dND=1tbIP2pyhUh_J{v#(zUJ@GY@;cCE05ddp=}TrLMz0cx*pQz+Y_(gBJc z0VM4WC`eVHOR+`^+lFa)gFSJ_@s0B(GNx}9M2s8zmNR(yj8>A(#FU5SJmk$FL4@K^-ZE=6WMsfKn&LzHIc(!f* zW+{DSeo!HnW!!nIM~jAqmofD;n`%HA%N z6243sh!17DI^$cS=E5$&_U-cfiwcre&e1pP}T@jk^CjrNuSxv zRlc#3(KS?vDu{m&Re%tcC>hH1EWwT?O;T0iD=nLAl>l){?9B_O>8#qml5E5!$=Lqi zwKvVzSZ;PDOPbhNvz4m4wkzU#0C1cNFJX~dkPN`y$>0FweC~I1DD?12O{{4&+GR4+ ziQhQ6iKLMi3CqZ+zJ#zZVkfl}$!~#B;8^E8hGd=`IHG^dnXOz{w$Y?%w>D?88$fOZ zfjwgip>jalu@^K-cM9yf5) zuWwAc1CdL5kl~Rj%3}?$o6#q1`m*i||&!;G?N|K8$uXuf2pyO-GyKKiP^J_zyU8l^a zk=7S9$qBxJZ-Nxq5O+?!zy@MabNH1mAI|5nKVR*ZOh2<{Dm zCiFeDA(0Yord_Y>M$wu3y+g+*TXeGptOl@1?fz2rs&JPTXipI%^k+b__I9mbkhmC* z)dGF;V$Y`-M-{WnW@J77@j$}(xnqC3g&dA$!k|h@V3`uYDo66#Xj5U!(YUbRkz@G` z+0rUs!{Jw)cw-Sq1VJ-jO+CN+vye&Ba-aPsxVVhEP5jo>V3z+&t%d%Q2IoW)O{*Rw6x=Z9e8^cr00`ON8jBJed#CTpBbI-9>+Y$Z{p z8KmL23z4Bs5(~UT)~_T*-zFT?k%4xwQ;M2uty_}KaKxCHUAZNZ^+pYX#tZ0g`~Tpg zD*0`Z##h<_vS3x2zEnoz(G-7w%7@~YWXQyi*Vl7Q()&FXNTllav`h=crKg48#y7KS zVch~mI(eyBR`n~BS7M)t>%%mua^ViDZe(duC>K$2&t&dZAE_`S!|ALlhN{r%&2;$c zr+~#f1-4SP{0c7sw>_Dp(YSn<{y0hD*c?yNBHU=N*m>v9F*7*Fgl~WNvqt#v>~IgN zOIEDth>NP_h`k>*jyV0^Lr?vi(u3vG2@cks{k8dO?3PszGDS_ZzxGwdZRwR@mlPX_ zN>vSKJG=8_UeT51j+8v}tokdH^RV(_Ka82HmLRH6fcE_N$!bM|s1~nS)-0-hZV=V7 zx~Qi15=GyxORL~^t?_?lYtpG0MbV(QBWE9e3+nr@0qVP91BhzLZgo-$7W}OU000o< z;L^19ciQy=m^9W@J-dk22-p;Wd}q~63z}ETTTsK}uu0%CsX3GeVh71e z+Kl~)5(|%~+vBTrx)^8Uw0K{n+4yIdfVEWsHHoN_6(B((f}{bJ^le1Qt%}8bGu+&Y zw>D2Uw*VFTpOdXE7CZ;Hg?Ni^9&K(_)+;`P+u^2y|Nf)1wPh+P+~QlC;ig_)u4ZO+ z_hE2$_hE1}T1$V_xLi?i{LH>25$>z5aC3BwwwdwyHnT<=PN_O>A;QcWX z#P=`kXQM*bR#V_qYlghSzju3!23!P}OjOt?o*^M`*u*bK&0F`yNUqI4Mt9HLm;dk0 z+~Wt%+~a@K%xNQXuSW92zdfsu|KFTdF zBEgw|l>ByF9?z;NAQjfnVnimifuT#k_XR?@sR<1}43Yfv$n^(v$a0i~^6 z3d0F6#^`_R(M^;`D!8Akyfc{~gYFO=Bz3SfgPm#vZIJnd)hW#T&-wR5viz4aAnz8wgOQs(~mA zo6U1Rq7F$cvLNUU4!$ySY?q^11z6(ObI8xOkiLJl)K_9n885@?0W4TS`GGs0!Sx%L z@2eseeqid=-o{{p1F&4e6}U`=)Sc?gK~LXkvz&h%qfM4%BXCSL2@Moi%8_2ei^5Al zLBUU^UhMZKaY*poGrP)?0kk#Gq#n122@!HXgscfMBIm^-^;3<_5Ti=K9o26sjK~|I zD#e`O*FS=KL=8>XhOzm) zVls(a?X(bpoZw82;f`IE@r*9zJJ)coimHFLoHVXwQw*1F;`?ix1fS2-#bH{Ezsq~S z7PD&{F-F=#O`q~eOn~aFsh7;-y4q=_I+i`BW?+%)uTFwU>844?^Rf{FHs8Lx;Bx83 zDgz(8;~jfEno4*+yT6TYHERocg6PYZtzEadnhh!3VE|Yq4vqWfiFD#{eAUoA6ZU^1 z^+Xg@Q3j+NWBn%NIK)_sYKwnLy-fIiSa||7g^XLt>lB4r!9&#dw-O zDi^@UKl;zDWcxZT%Q#QB{_}spFMJ?*HYr3GtQ7ob%*1OsyBgHlyxEoXm|bUQ^|K&&#+iXK*X=RB)cQknZf=V<$!#$S)) zxpe}|dm&)A>Nb4PM`Zi8FWjwF@`vO;T%(-ReiDxO^jZdPL#o<_cho$b_YHH-vjWp5`y(;m8i)zO^1< zb3b(C{*`U-W{41s9#YcR4?Tam0K++_f|q`SnKyt19>t|sfnhbrzJvV4vpuRI6jD_z zvkNlIe`zrX!AQEQT4?qnCPl*6+sB{4v6q6g2}Y6kLSeksdFTilhfH;;YzgJ6#- z1co-QP#3MNCmy$-0(iLCt8E-x6hw~4XjyAx0#jw*Zu?BawZC!U+6Rc84t8t;>hN_+ zUCKmW2_4t`71R>SC(jRmk1>=93pt6L3TqfeI#p1uYyPQ7Gc^If#kibU#z`m!%5@co zXaRzWvN4$U9JB@(-9dki=T5NKQS;N{|CO_`Kt9O9;0YjLRjc(A?4RnLC2up?do)za z-qY+edVW=*x_3W zZ9w*cp(RW%-f%Esl^3xLGkGCazWBDp`Mx!w(%?}}B*r|U=Q6fp!a&Z#l0FtcUb1P9V zQf5urL7X%JXL=^%b-H#P-P!P5ht~=0e2#7yOGp-3?$G;q;wDL3@LX_vvEc>8!F_bcLo}b zGe7jsn&y$6i3vQDyl|QFYQ5UxDm96>8}$a!L$nOttCYNOpLVHH-wWMcIGn!(*htJjtwS~V*MlqX&-->jR@wm?-pe80~64E^_NSNu5!72 zG46Sd<0Vu*ut*?esi9V%c?14pyj-4(RW)E4o20A{^VIY19l!O8n&^H-%^fA}K16q_ zXQGFyXQI2*Go-rv)F`p5M)5=M@4w8P?Z|79VZ84%wMW6TyOfw3dY|$$mhr*A)(X(f zOXq(Gn_fuZv^w2jXFOIAsWAn8?G3{3pf^D-b70zj2HVLpbOx8C}(zPV#bPdTlMUqpm*k|4ur8$3Rg65p0tPfVCw%_y0`v{L1{D%x;z2sWb zQ=icq?a`JpY%P008y`Z0>{{25qNeJ|Mka4PV5!nq+X3+0Yp6%l@ByBXBPz982%Y4} z6`+m(3l01?d&AV!q)X3uGT7->0rpQOo zI&~IUDQ%^+YqHu3#jj3m8xY@yFC^paUlP$tGOPPEAcqUh6o8b)8rCoypt5(cMd*TUMzpt6CgKk&Z64JrAsZj zZ{Mob+G^EjB=Xmj>8O!RCnA?Iu~f(b*CQy5ycjo0!3Ys-Q)0cy#Mt1-Lw;iM5=`B` zU{ka0tGv{T>5LCp!cE@uXTC3{5{zy#8imf|3Nc!}s72|6E_U|z#l>EGM?MO6WFZJS5m&pxGg<7Q z{Y;euAQW-g*?GDzOA3RwD1fFdS=Ihddsmj;+u7YYI8YzM9KGs$Xb+H|L?x7b<)CwL zupg**_Mbj|y3>{~bvk={ySp9v*8bCWaB#2>Bhj&jVy}O*fABPT*4}|J+CSLs?CtLD zI}o8c4^}zPfl!uJiMc3bI)upY1!0cIdeCaYA6&|Uc}2Hl{fp-UMLx%n)(riOA!J5T z_4&~A-EG}=wzWp0p@^A1&)UPa4bDfFPX+jf3760lXc~{zu7|^faNAN?t(4oQm(G`z^u*QkB8M0QCT6RPGLj+po~m-3U-wYsb%c)@mA!Gffhc^0aVWYo_=*>F%D?03G zq;Y?dUZ@fOwQ%uY$qiEh@ii)3>{q=|(HC4|zh*7ohIR;PFtd0%zlrY<9hRqqM10@O zW(6*;O&s(}v&ItMtTO3hIB_W=dmd4O33N6vk>JCjM39Q-ghR~r2Nao-#3wheu*zaQ zef9=u0)zqpu#8WL_hzE@hHAdR6m9%SH@AP+5zp`KkMUy4RWe8;TrqvcA+j+C5o{%z z8b~?CY3*W`r;AZqY=){w0z(4PT*TS$96U|aXxv*I#gR1m-demp75s@ym<&d!-Oy4M zaacFJ5h^0HKKAe#v}zpC#>e6 z!vVdT#ixWH0anf8;z9@p5NP177WxO!$GhPmFtH8w?0t4!L8wjubk~iPUNCnpdD0*E zE972dnVk~McWr_Z4nYhY-Kytshq=#3TPQa!f<|6(!XEK0799)Rfix0Be z1Ggq1D{+y=P}kX5efMl!`>r|#b;!zW`f@UcG%XE=+)O>Kg|W=0L%hEz{ndwsh`&Vh zRJW2D0cVAG1~J1qIxuG7m_GOcc=+?QSloEvp_4b>yg0j*0fXbrJL8zN^JbEN%HV|o zhXPoA1l14xfm=%i>bDB9#csh-|025No14{rf|GjJ zd&zR?!AzmDD8VePLcMn;$gppJ1~pTNQW*?}$T=hNZD4pp1&!D=z~H|z0Fdbox#tFQ zxZUxFY6XpvBfZfu)XQbR)#^9hZ6=n~v1E~3l&9Ku~Wi||Ab7Z@zW zFjYP}B?>x*U0FBNJ;2!R^|zyFw78zA@fz?!Z{XbPt7u}-q4yC%%$}QnzVCPYagBMpJJ{se^mJIvCX>xp>)pcJEX5DdXC%VT z-B*L$uqYv~S5+Gjxq9IEMbjitx{d?rnQyyItX3> z%56dBii1^C3o=*u%QQ*_NWIvIh9-$?t6iaOd>|Iw`Lq%3jc5||J<05AQG!)qv=UAT zv9G$OEoVePtW?6Na+womj%CYrb}R8Hv+o%J9wE}zsNj}|&;Lt*pC(^i_aPmDMBf8U zIjT(t&}S0Dw0D<6NbwI}rEjwlTeR+1{@n{@N;j3K3I9HoT3e-S4y|NPyNctSLyR*s zJZW1!pz=;pdOmYYYNpk8AAvMXknQMrihr`H&mcFBv0%PbUJ;N>OS<&94{N0*b8T7f z)sj;d`x2zGo*@f=;aW5Q_P9LMQctV3;SuYP@4Dx(8zPBc4ZWMfSNd2`8K*|6Y%N6U33p+(QP)3LvD-1EJsy`Uv?JD@`%JW0 zOjWDpMF4K2H*zo1mwY2Y-tSMaotkrN~z}8jC(*P4wD>z z0$*}GNfM!k0KJ4uFB6TiJ-A3N(UdF&(cT=O@A}}x_?PtoT3DVgunt*CUC<{x8MAYm zj*ED-%}u}68j4Cn@{&Gd@R^YQlLtxv$vx74a=*$c@q4<1$(`DOP?tMIgttC8W3E4U z3OskfJ#?eJ{@f|>+-Y8|nJ|cKX!)~$OcyT}i(;Hqi`3i1hj@5?PCZ%H_nX(ZA%bKz zqtRU;eI!zEb_~nJE(6UEGZpLqJ8e*_7bp4<o?oW=LcS^_#2$ zqyi!lImSt6NFKy*-ck7~X<)S;fpyL1wW_ePc`aY^2Ora)n8_KDsN`7CdC1mHzOYe# z1cRB+>ZZ#3y!2~90CiI)&L0GSi62dgl&$<5H>`LAKN)$#7HpR9vxB|;0HYJM5iH@j zG6xZ^wkyf z1HBd1+@bhjB2FIr<#^Dd_hBy%ZM(c~3AoUyEDJGut*ep5A@$aVc$nCK3_*#6!BI9I zTq23*D$x^BD{?QLN_5zC$~%4tet_sBO+=7J@q=i0d}Z-h;aV`(N(p4(7~W32_1*wA zWl=j&)x8Wn$1R|t^wS3`>4d3VZn377W_tsW*M)_>7LXHh;s?i+@4kX9=NLq^3RD2GC)0S-iuD%IfDp44$r65B^eeL+)UN6tZ6i_36Vqtz>WQWFu;9!Z^lDJEDm-F17fk+9If zaVyeGCx)Am&*B3Fu#D>B{ml=F3T|NqR zN#(~IX;#&mOUSe+Zdi`Nu^KmR32b%~9w5%B9pqC{|6S=>+N)q~g%vtn`Jq=kcIqxe zBjL;59D~B{J560%Dx9M}U?J-O#c$J=HBq8THt}rj*Xrry; zY4@Ulb$i0TyVg3k-hc&Xjj-Vrl1Wi*=0)oX+j6M((K_b%t|-%D4f`3_UVYna_ zCvU-z0C;UBQ&5t{|$9Q@*`=OIf0^2Ux|ag0hzEdFnQh0@;b>N1z`iiD zs#}9BgRn3Rtphy=7Rl5v@f6cE8VQ#r`ch`itBBLaqjV)e7Qh$iNt^ZIzcU`N5+XoA z(+!}1E?9nnI^}NHcwwG`A_cSo%cYE6x=QuOHaPEKWP7xg`wYt^%VoB%T%Si<8I+@< zc?Z<@TU_2mDPQWcede8}@PL>aG+R4?C(!s)8B!rQZ#?k}87r}X2|x^ss@2-s0_UI{ z9n+Ni}$*T0oiuKvHp-@;?^zo;X}STNPyifu>T5G?n`7 zkA>g;?B+*k#KJjRkxq2bQ~sQ<6{Wl9bSJcX=hU*wH#bA;`LkB*eEUk0CTZ4!ThuOB zE8u9~q}o!RZ%Q6_bh?hpu>*-i1lhiSNs(^HI$oOHq`q*mFg#>I0;JSBQpIsOi3k~3 zsydKKGCe83nKL{I*a^uS``g6zH6k*BkoA?=+{?Dzl;W9_`4wm>?CAc;(m#3SO<6 zl4kcHPj*OhUz4_cpEsv!jqiw3`VQeRzK-7)5`-(|7Ydd-e4!`^7=8 zBFx727!%Nmg9phAB!wjmo#@DaN)fzT)wH}nRlt=MGh_fv<|H(;TxiGrash_h+4c2!@#{EA zi%-*Q7uFNSzcuB&VgVstGEuiO?^?&f^>n%X0vWMlU)fB>ybPx-=Sk63UDkbM!zrk% z3!bx~v3q$*0sc!v85Bj*Tv=@o*`d9z`~5jyKMEUcUh{yK zzRQHH1l{a;r99}etu57z>LR<;5^ll^Kv81@9(5(tGcdxiBz^;n_zkCgKvtlj>eOxJ zQO7CMw3F>OcK^Z~s{tPTc7oFi~rj`V_@Lc#-$U?6n+h7R;88|$0! z667z%%0I(@m7g;JX6r^nhEDV za%W9K%AKU|MLk}vMj7w>#w zWf3&DDSPc5lPRIqLNcdfg&jG+p(A&`tpm?HJ53pX3^9KhOd^Dw-q`_EdF~><2j#GG z6^_}Q5`{%1fs6epOxQrC24np?RjSh>rj{p?anQ#}=Q6YlinDjKh{1yv?o8ur?Ep7h zV5U*_X6g-Cq+S0dv)dC-lX_Z6*(wzwRxGR06%&`9<#qOJC27xjw7Uo&S{sUM2F zYdto9rwgl`ampO32Zvq`sQXtbY$ru^Y;VQc6uZ&vr1+u!Q2E=*R06r}KD$l4XS;Vi z<22$zp`#}f1mZL`9*H@lB>~SHToMruoIb#4gu0tzLk&sIOEGLJq@BnwF2l`D_^7|i zMHtV6H|72s5`io6Ugy0X$Yy5ki+XY(ShTr+(S`qowsjiYM)!PmM<5yQmUrOy;CdpzRx?7c`JU%2 zdB6j9x7uF4Z?&Z}XVY|F=7JoQ6wV1K(0KOK)?Y)8bke|`;BL4VZm_0<=Dgt~kn z-qxReF(oYo|Nc9P=}KvM^dRm#g9xKX+fNAfS#NVQbUSnR_tfX)_cl1+*+Ijp{Lxu1 zuhK@SaCm?!e=KgJ*{B<8B?&vJx@#fYSmsh`NWDrMRk&BFtHY&tOdU8@S8#fl%1Kvz zUCJSi&s{CAE-v5IAUHOTQo~_?SEu7`$Gl~|M1)5Ct%YA9Jb<*)+7S{!8WLzCHIEK9 zQ8L%5xH4r2d6cdQ*iVDxrMS4vk{y*(fT5I`X3<+MPjTZc(`dh)jzaK3&3qTtjF5z( zYcUD(v=%M_*;+wzAZtN&D{{kkTM667O4xT7d*5FbrkSg_1rVJkb99S;KiLhQJ*D{v zHMjsCY^Tr?VoRG-b*LC@J1oFD=EO)I1G6Q85(Hka*x|6_GL_LRU&AfD!lkegX}BID zYWDfSxJ<5j|9aXT@K>c2D6Xea+pCgC3|I!pv$Jugj`5tfJ%VQUwG(SZ+8i~*Hh-LZ zIfgdL14Rqy5gaFdKg0rmVy7qiJI9dK^K|Yd;AX9n!n@EM_NwFqHl zPay*(1>cun&EczwABtDa6yopOnoEJ$eaDvn0Mu<%IYtWsd(tiTAU6Y0NuQS99&BA7!P3{r()~ggOq;(SM*$~GOrEc0&oeZ`$7Yzkg??f`H z%C}iDu_c{pGwMe@f#7$gk`b;!caScZe=k0*Z(`FxRzQaxua2Y8-!6+GCSER!A;({? zjA@BgbXa!68c1S)+{hn@4RhxD(7D`tY6se3Fd zw-R;!y55k=dG>y{6WFS1{!o849YStPam-9=&L|V9fr^DUgz|tC{w5N&M8kCw^uGo# zm8!u@#gMOe45@{yXjXcuzafcJ~mPXwtHOYN6`1^mdsZ?7kf93J&x8 zsb<~S(v&jL*L0G6w~?wkt$;istCh*}tzV|=LlsV(ChuvIcF&6ue#N;N6Q<^_X&%_q zth2V<4w|NU&@|0Z&1{b!$BWVSFda`=R65Dk569&kT9+0q9yTUkj|=B^AIiU+O|x;% zMhhOWSZe!!e_Kdda>@ht4gUKO^LJw}@L3AK&LwJS7W06;LzK@U57?Q!{aRKtgPU(+ z-u_3x;<32*5gnMzq4+hBcVFV&*Yej3sPW4L5Oid@;}RTIPf9S)XpKKWUn+0vMQgtY zI-Kyh+(2*3<1#!hQLSUP$l=)XZIP;)(V_*C7Djk~^{%F^Cs)((pB<8jMYA#|r3|_| zDfy=vWb`~K5xXK=pxHlIF1>;azu@ikyFl?MO#1Zpe)s}$#o=J_A@00Q?DB;{h8t(z zUhw>j(i&Prp{e4*9$0F|h)kaO%-iswC%y<$i>JN}9*n^ZDxbAl-n()MfNK~(xZ7&I zh4-(2roPV(CDuYE?9roz=&POd$tf370x!np< zOTPeHx9uy}pP=`x`(SA-) zV=g?jJ;WU1!y$s<`f3e}iK+PcL^L{o@Y!1lNP?Ra72T{-Y{|YapXyLoRLF}46UlZ1 z2ORaydtQ#iq=uQP1}3tBiEdz`8km>{KDdS2TAPih<8qWjvduX2R5KufTqg*2I=ef&zKjTn`D_SF;R!~-wS(t>5>sI+ zx`4AK>XTz=Cw*aF-pYJ|y9@^xIBJfkxmvch@-3Y_V0$sXPG{BPD4t${MK18JW6L8# zbVf_Ct2I5`b$SRvh&#Ih_KyHrgTf1sE(rI7WFAl2-QaoFRcet=`^oDCqG|E=^XCz` zL4d6@`X$FD8iDSO|5vgSNsl0ZYczyULJ~Evy%YSB_1Zf@xO)Isy9Yto4)7BGgzaad zz_Xno?4+GvvR)_cgu5NQ3xC4)v;E*dW_w=t54iH7-gfXDpsXJ{O4&W5wAZjenx&G? z&NEPRDSvgcbRCMydDU}BacKM84u|yAJewE{PPGy50)6x(aUzK{!JBY@P|8UD4Y508 z{(1_WSw-BLuZF@+n&i4f$M4rlv)g+;O2zJ(FJEQVQrSGp^d!o63+)pDYF)~rLUs? zxl&@G3ca;mQ=l8hfa)8635~_P($q6H3UHxViyJlQv8zCjss3CxDd@&V0*GC`xE2|x zJIqSGYUT&Gyu{LvTl~K!Lk54|mCU6%OXLO!S!(hBiqS6>k61)xL8{OVp2uA^rGg-! zi(UKq^Qv21RF}Xjz(+E|OuRt(6k_<+@ejZ!bj2c)R8?kNBgxi($TlO+W^%1}Rn{JH z00Aevqcre%G<9V>8Zd~g5=T?!`wGy1{plIkPtOWZ&!&?`?_TTRYC2Wjqbz2G`Y@-j zB#D(pFC<&j#-*;l4H9fNp2m~O&FyHxE2jAQW)_~z(~qV)v9S)5}BA}TwG$EbNcU$|-iwrH>tDcg_<0T-T( zBm^A%XECZ6iIN0&02OixjV#`TyzPVTbzp;kVm{i%_Z)8x5en@jEorzJ zgf9axb0VM}OSn?{!Awy@7_1Els`zce3(S|JCr}y%r~DfbT1)pZ8S-}}M-9a5DOak8 zDz$Dk&kr1AIkiiC$*_l;j`F5Q_~0`U_f(gvZA zEIlbWIHS{lv-ksSkhq9e`YoQVs@O{TLr&piDR@bzX;?mL6QN9TO=OCa+++ zoy$&}zq0Lbc>G$qVN|okh%2}4RSt?7G!Mf-7gPRb!QV>l(6Bi8KoiuFm_gZHT;;>7yNO4o|- zI)^|BffF*37k=p0iGy6LNyOz#ce};hi||A|HA?)`)XE1toqMM#iy-qnoN%Y9hMh{m z0r-r6VQzp^Fgu=gb!4x@!JXK4hJo>RI<>2XA^#rT!v>8kv{_Msu9FPNV1<;2zol# zEw&4|s3);PG*Y=LKl2YG4^S#&zZ=2TXWw=VZ-)ck$D&7EIJ&ZPp4e*n4KJ}#)#PqD z0!I2UEymyFvih}{UE_Gu9kg7YyIb$vQr5o&=GuXP9E>$t*cgn_i~rn*IFW~R`X=Rn zZ=kOO-RqmwnhIrTmI@e6l2@<66GQ504pRtLKTLu6AQk`-Kz>_rb0`s~EJ2^NTE~!S z&>-O|E4b0}DE2S1Xtc(0O3IKBdRW%Co&a-OF7a@7XZJuK zqfT)LjFKbU*JEl7Wd^+K)IMt|n{^|AvJ=kn*AMiR4veVh7S zH}=+!utlYokd&dK@HgO4ozu{K>@^Ll>FU$nvCI}kw$pw-PxlX`dA-;ZAx+eOS}3VH z6nhFwyXqR&ia{sQl4YK^B&O075+%YN;11Q>83 z5c_bH>KfX6&z=P`@rI+jfhWsl;@>8Ycv@c}UFi(B=)zLz3iGWbnY%TAJ{O^hDxbXq zdrRT7la4nJd<0&)F$CV zodGXO;6#Z&2PeMX*;cy;3$GgXzD%~ZLQh#s3qQn5P!l)`6sQS^soAjd82^xZFG`Fs zvzAG9zZPTvmjdj&1x$>rQ_v=fxGQjUUyp4gcN2~1w0+-LtYBJy#M(Pf&2Fx_cqHH* z2y&-(YCKN00#+v=v7;XcmZ&V8p@MGz`9Mu{Uujv-_~4>1k`f-L{H*r%62bVX}S{b7$+$OP3^7&DbigVn5XKtXo2Jd{#fvVZhhBW zN>7QD|Dj@^O-(CkQG%N5q)uR4E$x{XsSYL3pGZNS)dmw`oWVaZ&Es%I`(&koC(hbS#F*R%1H=z(+FxL_THVSy>0)iPAkHv{+O0bV0lN~Khz}#Yi^-Rd$Lc!f!k}sdIdqC?Yo&cm zJyayrpv+9JKy7^dAROg{$iy4A$wrmEJ;Xp{9Y?RZVO6AUz^R%%;UrGv!^G@H@3{vG z*BQvxg&4pB2JOuV8;*l^JXY6*7{J&Xz?gwz9tIG9A|zB#S8%QHEkF-9ZB@Py*@e8ktK?5PCmo-SJcq4MqZ8&f5La!Uwh3W{j?9X}kPd9C<*tG$?lT4cb$GyVDMy?#M6*Q4}nOg#QY*AUmPKlDAd3Urtr%xvS+$c%lW#wMED9KJM@bIbtSMcCqY@TyC4l;W z0r7_$0UKk|k`Pij)ZUi?z$C&~{gz*rH$)iV3BnQQ@^c0MQCc}Ya+Yt~@e*K${Uh+i zsG)D*IT^@}fF%+g3%eKKlqap$PyEi;sVbU_ccnXl&tmP+m)XG}B)8bQxrRhp+tGMB zUa*YkUbY?0;~Pk}g^1A*P?CWH1RIop?|~Ju;NJ@`!yH?hD~b7U){3ZVTH~0P^E*pG zk>c(myJ5`7qzJS_V%__sv=jZ-3HDhAL2MerIab(2BsnWqlh+5w4X(+0d5HbbefC=* zu!Y42u#vU@Zk^@4R%a(nL)6a!@}O4ty0YB?XkFX{xee(oa-OKqv(u4S6r&P<*QBto z$!zrQi^5$p((fjATt9O6xAI|_G5RpvXtN<4*mrZS6`>9AO1?y8)p}3aQ2TQ&&uv{8VJ{&b^9RU>a^`S zN3Ec)k&2@%(kZ(RaC`J3k&fGhli`EJN{imv@!3qUT}<{MBEzJ@pERC-yWL<5U0#;C zQZj=B***w7D~*$z!lt$zwXH@?+3zu($&d`V-Q4!SOjwFKVbLjaVpxH2XT6 zIJygCr?g<%=f&82(q>ibN#(OXKG>1L#EEO0H!*%^{A{}%#YKA6)!}~1e?>zY^!mb0 zr5}3osub|&O#bD4pXv8_f3?y!TJX}KDTFLsFjdB9F9JP|42AlCfqp0@%sv;NHZ~^R z`M{63hHaAZvx3mb@vXgaAX6~uZ$|49NPx~YGMyQ4k{^bdpl)wqcP0|Oh>V&`G~3d| zHYnkYmLcLvH*zvYVUSAXs3#L1nba(LUiGSsOcxXRQlxLL5+12@BDc5lngc_p!pIdR zbIGKs#z-7R;pOOmJOqD;WU-!N?lw?NB!>3Ke0f_I@!7aeDBJpwi|3MpVd)ydbaRjW zU1F`cbtUIW`K2K83!vud@i`kI1DE?syg(7b5iSbTKs^T|`()eEqvm`1kAmc zS`PX0`wBXP28fZqP zgQ#@Axr)QcAR;XIqJK?qB&3$lDr@Z&^cSnd3M&+H0?FJ>6P}Xk07Me=t3rfqXQ{cB zs+CZ=eiN0mE__{BD~d#(ymhXPR)PM78|@YSLas6HY5n`G5CZGEhTXWY;n?h|j2T5bHDbTI z6JA_OnjD*4hLxpFy|rZPuyBB+B!zvcD#Rv^ujgU}Fu*7*w(T1$Itw>@hA0pwMe+rI z*2fg*VtGmlmO>e)Bnu-wY$&^-T1d|MTBL_~nfgyzHAoRoHw=CduA$~*CGMNhOa24# z7Q|gU{t6yFX{%?`MLf+XDdjm|PD44y?V|Ncz5P`mm``gfQULVJp+r=@vbcGt}3t;aFXJnnVrRKaC z_$vCWAfEKG%PMV(%tnZR^bwoW%#87`^EX06H2d%rd~Ym^#nf4*sBm<(Cid>KI9Bd-nkC%&#ZO)mK=}@07x|; z5e%o@3V-=LZ#dd1sUlLdB$39zM8$NBc_?~a0fQcNgG-# zK(^&4gY8k&goS%@vJ%`&;SA6v*9ejf(YMdzUKPgVlZgylm7iw#VJ(FDt(JU1-q&fV#ByPQwP!_=!-fL3hbCiF2wAWPHBpF~q5R5Jz5dy3x7q0jsa_2zRAOZA$`Q{@$-Jamuc`~1kr;8_Q{2y&n$D|{Ob0R5KEG8VRFjT*k)dMXP zotU-R zhOuPQj`Bmq7YPnRjS9ZFdNr|#LW7T+aB;JeISL%t`*Bs&DVKl`0F`(At~}tpuM^Pg zFWb^fQp8h#XmkP#XhGtn*`f9Jj7R`1bX^s&S5B@{bSnIbUOlW0Bee1v9;t1AwPOEV z)F`B4EIW!+ipo2?JME`WcMklfoSy0<=o+d@BFyU0dUv&AHxKwCBJKeW+E@d0RMre!GH)oR6F#5JcvB|q4?Q^uJ$ z6Hb}{@q*c5lv0?ajfpo%5Jkm{E={c1^(!mUVpk>Y`&OmJjmQr^D_%?{p7`hme1vaq zy*VAc+ZNi&adzWvZf?c?*8jY`z>mvMU;a;M7vdXGcmsthaS>mtRaAhJWi~6WF(3>s z)S8-qf|D>t1LqDq@(e_*7X|o|K{hU2Z>x1#WC}Y^nMTEhVH4K*Sb1B<+|QJHKk zF{EEvt=QL;XG3l`GRG`2CG8qQ!ds>trux_>wwmaMH zO(uA`w0Iv6zs7m`F1}8~O>s_M&8|0B?CT!CB~^TwY@1)Yx(}0WdD&Ruhsk!sZCz9# za?(-~qZ5=R#rP^cn$5nR8V|?X?L+jZeIGAIYYLpCu+HiQh+~g+Yh8u-(5=dS&&T;<+C*ETGwIuv@<`u^MNr-tgQ!87y za6U}7*F0J-lf7Ca%3o*D>LM*ic0ZeM&SnceOtOSEN_na}F8U@I?{qku4#$(U_}yN` zPr!fTN!IRk4zh#bAb7G9?Cb`+JNuh|D{@Rn(aEb9FF!_yuYUhH7`#1={`q#$fAKat z8Vr7mqUW2HJi9-Y{O+PiK0cLjdX7tA9-s(99GNtp5Ic9O$#xpmWIIw#2HpILfN4(PdJvevb*Yz= zPMRyzD3o^WCdmqmT7}P^X*A7^Hp}?tCd>Kbxpvv!FiHs{FfEGlaB(aE=TN0t@r?kK z9j^YBX4(YT2lqL=Q`gPXw?IjMQ!jxCkI2`yZN9dGpTfNjxz_kEaIK)d{egJQFqE*( z4hP4mdUbh^b&=9E&8h~P(h{z#gOu{Q=&GC-rEO^NeD4`FUQi!<1s|ZD46Gy|z-|9A z8&ADWw&^p2b8r9wHHO0nqJ%z~YsnHTMDHh6IXbWjR(XORO4(V9h$5~>`gHmjr#G>2u53dW{o4$zaB(wYa|;(4Yxwqk3` zuXuJ*T(TVGtUO&vY7Pm1Z@p?eDsiA-Q{HD5QEJfp%3V|}zYOm!C*W$(Ihweryf4*e ztf{^J?lNnuWhV7l+Kc7IqG_jZplYPH+Wfm^=ALJZTdD=J89%K@A!*7>o*R~2QoA# za%ev2p?O?(Kg=b6#6-$r=(_p|Wg1fQgFP-SVw}=n?6LUlV2{Js*kZ4;7x@26{Qni+div^ra_jLw*kVmdRpvh)|AS5W zKmNCOv3=RQh@brVf2e!c^|p~DLG*t=1q7>AfSrU0-U$`NBa>2Bbz7<`%jKRPT4RAg zl7s~UGysaGNj$;%bA8SGuh;UDJzAO0+e3)Om{!sK1WLt-Tgk2849`u^~3_fiL&a)*kr{z>dB3 zXn52G)4APlc^1%Axv}e@EM@ht?KQK%`emxAwn}b)5S?7x5ZRK)uVocSa>X;KNIuvj ztT(u|M)=kF2Yq`3BXocTD(YU>O#OvfMABj{9^0?SA?+Prog(-v-zrB88k`FpbDSrNege-?*k}4nVp(ff1DM_iWpJ6*6vm5)v z3zKVqWp94G%}?8d6gUc&o;+aSMb;jyyE}K{VdT+^RC{C*^^R;CGOxkEeX$Xbyy+J& z9)El|)VL=aM_KdZ`NNrhxRYA#OWnLMv;DT`qHWQj_D}|f2ZM$j7}$^BU;munv&h;| zBc(~It;JOXa0hfVyEpCyKcoP&ulR}LhmTHw;Cq&Gyj^GadJcxSe36b)*5L4K4L&+S zrM*XJk3sDEg=(uWhS=bPSc4mc4rAebx|kJipX@%lSuG|M3}WFToY6S1_rx%wC66ct zbt>Z!pTcl%UfAlHa_xMoUCcm2m%qcIc+tY~LpFkt%{wprV!y1;Ucaok=y!zA*e!p5 zh21Wlbj~{UMWu(|G74RMRa1u_!GK zqP1%A&O{rpqO~eZR2S6qSBt%B*Q{8sIrnGty?Q@E;X$wbCt!qsxDO}%MRw1ZI&l{C z)=%l)a)=kD}tsGiP_wW7H?UxJnO*uv{&tw^Qlw^)6Q0ZE8Rfh zcdU#U=-$eR7qBt{Pe}n8v^5?b=`Q`Vs^%463sjrN}ud^3!eo7kVt zQ$=%iQ~VHKKohTi9$wzuUK$Henfd|cZppsfsC7N@WI=Y1r9| zL?;VRQ&l33A|Cl#k!vo}H1;Q}lxpfcrc+gCEqS)QC}7b)x4i=7pQ-wpYtI?+ z=vf{o)aqoOpBeXs`b>Qx&`2aJ1bp?mx8w}k_yIoD@Rn{^6xk=3bOf=6Tz6SojBA|9g)X@CWUle zGP#qCsxY5i?1Wyl(Da<*2vmf{l%vjjoc|;#siKZWKX|_}(Wgl@XK& z9}#X)?dMZ#h#;b$zo46UT?SCLQN*!Gkoupe8cKnJ_cOHAq;@r*h5)nKdjBq=Jt?yCnuh^ zb?{5eB?2Q_w>feo9t~2*y2+#fU_hV0gSFO%!E<~cbzM&kznSr7~lgD7NFkw9X{XA;_XlEj~d+6O(0PENjgB}{oG9`luWefjIF zi{a(BFUD7wq5>s+CCK-TwJwkKK^wBuc_ZI_QS8rrJo^9`>R|7cRlO*$g;ncN!e&Fb z^M<)J;o80yf1}qYzIB)iu36B0h;%vk-}C* z`Tg}P@IzOi1`x8>**(*`{r>uuc1>>d4CZ!gZ)Y@BoQf@un}KRt5#32KdbNNgdPV|2 zNofcx1hGB$t~~WZT)0XU1{=k30pt=T3MuyWNEOxse~s0A5dz2>fc77+_&bd*$U-Nc zJtQcM3%KFl;Y!Rll}J!AEO5IPkfAPK{{H%v^+1@s6#Ys9ww+e_l|eX(a?XBrD3!JVWkcpTgH(rrOhW ze4X51Op+*ArNH07-zbw&kGJKIGF1s_lIARwsO4uWFjzprA_HnD7op;a7ID;ayNgwl zD%iiW_R~4CRc^GH_#yRJ4G2mZ1d5*~3RAzc&lBTbC*>j8)oCFC@AG}-6vo_keQoSl ze?Gm_$r31_D0H1v^YpVwiq2B}=1rZ~*)m3qRyo2AxyPO0gJbi8MVLE}rD-RI@@qiB z%97y$oSgh#na0tgvh?#zh3Wp))QYi?lq|^{tvVU(TU8F3d*YHC( zSa~!-8cx>(Y@>)Pf=2;1md)DV9*;wy zzx_KqC4n6pH1s(94^*8*c350f?*?#BvF7~!=9FMCj)S>x!ABNaN+d~P{$f2!)h ze2(wLqPc9jk+xenOU<*-JzS?jWQ`-@-ANzn1Xn8mKqs#FhVCC)-i&&xTAs|%wPAUv zkwkH-0mG-;=c$3)43@0y*MR?9<8Q3DZ7ns5eaCKn``kB5I7KJnH&oJDJj|=Mefl5P z_}#jOTC?BVc9R^p?FRX&ZJXp{f7>?em!+ypC8Y&G%i zFt4db5r;JKp(_W4n`&;ngIV(P72sv%Qup9e3r zt2UELNBZFRwiU7U{i$tzZ0msJ^MSo>%N(Ub3n)o*D=nJK3TmbfZnE)4f75&t==M1T zzD>u$oj#yr@omk#tEJ<#c(SCFh=1qTqJ3mCzC=2per8h=fZ{Y_%pky?I8ZKJXFvMI zE#?Zbhd*}j$lQj@#?Lz?OFLpxh(T(ld;*ccmF$9D*h|iVT4i)uxuCrA1x0A+0Y|gc z$r^AnOaPT6i<6W2$qB@}fAYu0NbXn-hLBE9v_H}-m^&;1CoW3#0M~4C>OUY0kfu9} zL?=m(A>rAAlro%Jf@>nenydh>2>$b>lThogZLL}b%I|cJQ>}HTSKb}rdY_zVk^RJ{ zg1AT|>1@ORa9;=ke7nsSidNR<$Yje=&vcFv{X6?_ZH);n2(2z1e;`Sn1a@jHkn3Wk z)zLmx71_bKen%2JDzK<#=$BJ#7FatY_kFwtf&kZ2|R+GizT}>{tb^cSdu}UUz7`S9Jt#Z zQR$9)u7Vm=DmZg4`)i5{Sts;vDWP$np52Aeg-Nfdvo_^Gm{F~v&KJqM-&*ixn{ z6Y4rE2nsT%%DemL@9q^4{5UQM*pX~;g0-_rlxY+M(OPkoe|Pt%@9u@nn@`SA-b4!U zSF!Kqf^QJ_f#HF&WE74&cfuLvqVjzFY4nBB8TUdEG`=H&t^SAlLaurG{M~bYXBVaa z@$U5bDeg^CHh%Zcc>dx)jz z_<2HSD8ysxA3sr+aWruz!A9{`C}VJxiQ_3t8oR(#AS3OEGetYzrkM`ZwtRd)`nk+I zWw)lle~eTZD9B1fnSe{nuk<~X8rrFR+*N&0kOr3cNkB5n1+-xfQ~t?`&$gM#_yi#m zvJ1Qz|GzQxgIsw@j}!_~fS*g01JofMka?r**P?rJg%BvrGQ~Owt$%N8Az|Wf)&uPT zcZpr39W9Ur-Tz*q{!$rqUhvZ{(f)@ZQE)2(e{kBcLx!Oaa(>M`ISGwrwA6Gffb}v| zEP&&Oj=@F6L0CYjRa(NKTgWTe2=0eY2r$XH9mP?80C4?5!l!jIdE`TmoU~JU-{vKl18w6hI+0zp;ut>Fal=M($e96#-e~Btyt3+jI zK_!8%e`7<>ctg+14Z%5*n_&%OBkboBO^5*$4d|S(qnxELMu7!D>mlP=loON#BwX<3 z+98Vv4b3d1eL&3UgHpDnQZ|G_c5Nr}u}0Ig^|*e=kMEhjHj=qNO(Cgp%{I@Ze}S## zd&FLx+x0;@Ufkh{WlYb%wY7k_Ab1D0u(%78GNvU=lQ6dZGkCPR|)D7z{pD7d-VoGpcMQ{ z{#wLYux)h`MFDjnQG@C^siXQK&ngtp?6LamFSl=0qW=0j{(bRg_>)T1(dDblH<#+& z8PDdc#WUv35qIpd`j^W$sDO=UQiTqyFc0kCxXy(~UtxBxXY;`D{A3w8f14Zhas^!y z(%iAiqS>#W`0)MesSj_NVF!He&-T9F`pKI-(vly}fOXZ+{8_m*&^;E_cw?RYq0qH* z?JV^(zf=DHilQT8!oGAmPfWt8e0H&R{FHt3F@EBkk8$~%8|TrVImIwtxF)HJJ(JR92GNH&JD@z>`P0@GR#Gf81OG6r!{vBoRn2 zd|4@3PEF-D*p6eSCr#LfP{YSJ? zk~wk|V2q~lm8P1`$Tjrkg;p-7Q}71W3349mISql{iHrV#t-zR~3U`L~3>j#Ubg6C~ zVn*NH?eN7bXA`Ybf8LQyp4_Z7Mybm{PAiZwj0<9T@X*n8E9z)cBk)v$(wkzce+*I; zS{}(f&`ANksRX^@A?Ou{*x#R!WL*|yHoyvr>?yu9MRf77=i=t4cwHlqmX!5fi)&^so#PoBCFilo*Zi2$$cmBA46Sz&x68oOk%%e16p{K^=Q zSUBj=jFj?We|I>b3z#?nh&s_wZkC<0JYAzX*3ehr8ea+=Yce z2U~!+dkAmHPe_{{=)@;!vDaiwNdQH6(&W8PNx2XqPa+@SS;2F{c3q#zrx_1t^2cwT z;;;sUVJ9p4(y2`jte$HpO1y^2i!p$Ozso=_EI)x|PXjy-ZTzUvvC_Z!JPk;J# zPaw_FH3tCklC`+oY1*|t z%Ar6ndj?(0EDGp#?*9(AFc0woOA1pFH@t!oe_51w_k45n7w7Npl_)OlFIy{;SJB_y zlX7EXNK7U^p))@$OccN}7Z2zZABDUU>!))kRn|_zb`1oT8&vX_trrxKbMi~3_;!L( zvl9TjC5Y-*(mU9Pe4#)A;3Ue-f25%}?(go!;NRVYyk`kg%tw2~!c{%m>ocZDd92vc zf85#+uyadx2Z{Y#mKe`HA>1w6gg<<-5|>7fa?MP^v@;M&LA-;)Y6b6afM#wOtHd;cKuJt?=!A+&7t3^`DobZZjoeLc z*3pJ1CqtCijqLNXE_R!d5fSRay;mL?<(k@(Vs=Raqip*&s1|4vRw*u?(Ubbze^Kcw z;s|f~PxOX@#zQ?a9d8DdD_-dm-E=dkQckzg9_6&ntvU1Dtn>+umAD?|4-#73J30y2H^LufBdii@&En5 z$mKru4G=u86Xx=MM_D6v2wGd^_kV-iDdgJPoS}7=O9jne69uQ6zyw(XaJaEfv#EOcyLTw$ptyO{yBYP;sY6Un{Cszf#VZ3+Euq+CgMh%Z#o-n8S?Ui@rUNqN8o)QCO5ofUUG>I%M3v;_u0%WIZ_-ycNHAJ@gjhw4H>1 z3PoiJqoKuNxdG!n@@+}_n)8{W?$4er+VH)7)N)|yDPRgIe@dsYE9aMCp8Sg1#qXoA z8d@x5b4he+=j3ydbV#yvJvliF4e*R-5mLNE6!?B4a@ccbf9FoYu(r&+i7}wfun0sd z95EqU8d{KQbNhUruM*nEm0F3$#oIf*2&e!|q<3!D$^9@l4uM8d-=4a2JR?X!gPxjS zYf;F{ejX*@P$CCJi==>P;Aax#W^8qWu80di1h-mdj^Vw46bZ~E7?Km)Ox+pkvUh>^ zqo{x8LG)8*e}5i~J&b;;iHWhuR`iMsCwDvXVqSj10Y^vCy{D66VVHCa(gx5N#KqQ< zVfDmXWOhi_wy4UgoSYmjfnWG#9p>S5q{dE8G?1<=<4RJ1ZtO~lbz^&bxNXtl;zDaqO6XhY z^1NJ;cU#iqg!K)8WgECnzZLUw8$Te+BGTCS*`d#0xw*YN|kA&TVrHMY*L^ zf#4#Eb$W_X6Zji9TAb4G6e6>nvZCa4>AMepIIB54vVv^Hvw2G5FI}iFQ!OPp%#ypt z<5Qo$Uw!S<$rtntU-|UdD?Y_$NGJ@MX);bL_nD+Wc$sQpjy?!mY2ckuhF%ENg-8hn ze|tRbgyJZx(Eqriit2mneeNt__@3l6+bKx0V^7Ov_CI@aumj+(QM&IR(yU#1zBM z1qn!$>7WavsEa|fkp=j&7eUUr)K>a3i2T66371ROCFO#SjTwH$+f0AQ_ zT@?POQ4YgSBB=?YX`%h#IGmL91)wRxpeDg4cUD`+&XTd=d0%0!R7%bb8KB>Hv_com z!WI;40K3u)fb%Ru3-@?vie``CJ{sAfYSQ#yMt@!9`9=Pa<=xybks{3NchrD~< zx9FJlxhG=uvkaz+L>^g?HSzEA&TeAuQONK4E-7LOLHU$`PKvvf3I}nfe-IO@7jRC^ zKvBoFCWT>mE)B1^n2Pp^k}x~k9Bi1vEtiFF;HsT5MT)d0Rf6K9)Gt4EKZgC_C z``|H?(tX1(1m;{0a~e~?Pan=9|E?6k${ zZ}C>(@|=Rt-xM@Gj?gMi2DKo05xR#lE-^1-kAw%G5T)#q9A;heT8Ceg3U8HHuapy+ znlC0Q6_T>ZT~sNR0*X3Zt@;98>vy|@tsHTs;Gt1)^u=zK*Me&(Hm>@DFhzni4tONM zP8X~0eBNYdRXBrae_Z*4{lxhq+_xb?-v%UcRx4Yb(Nrbw%2u61@_qWLwqCDm;5buT z#Q{@ld6E^|o7ERhfKz9Nc9Z!@Eilm=nU29`K$gotC_BY~PV~;e%-0K4znpTv72@T zB1Y)?eL{mN#$Lltal3xv%BZ}pUpg#0@=r;gp)cNS!S44eiWk6CYI)qEiYfvZMkjTh z`~U)fhHp;xI`di<*rRD(d8T-!28U%-N8p+bHU$ z?cY7KF91rbP+YLqly_)x!5%U&Q;VsnhlVx=a3|&FLY?qvTkl1(2;gpVjn@}m8h&!S ze{>*?38@C`V*P3?1lO-|wD1$kn8-|a=KNf88sc2jk!ewkCB~nDmqzedvQ|sZK>%O3 zv3pCOa2~?<{%4vLuk#pL42-(@0%O;(r~A9pn0mI7^=9H%B!dF>c=BoWf{Q)>OLe8$hf)DU2Rf&^Wtdg2phbok~RevE2 zm5CI&6_h$)8a6vNbx{zSXOqHuLg;X0+n!{(sA)^Ne;te> zR2FV7%jUu0-pASlgR-&uT)pE>=FhpJ0UcwUB%x`d4$|C_m5njMe|I|<|J7OW7^Hpn zs^WAOXUs3a7qH@$VdZGK+cAaQE9A5|x;a9PVx=?wst&J2GgaOWj1@7wy?u-{jRe1d4!eSZkrKE)FO9At8GFB z-k{hJ!YQ}gDpQ3)O$vXEwd6yXhpW!SI%RmYihxOFlKiT}k9Eiov|k+Pu(C;hDaMo% z*khj~QuF=cNqt~e>|FjlI}U%6b0&gi1cmX5x1oB`{Ql#B)jf1&)p2axR+H` z9}A<(r&Mc5%%Er+lW<;Fa=ev@1lH7?O*$$*)q*vUzvK|VSxd-UlVP!i`xZtWXTz2#!z zYw(}bu6d@CIH4|Be|hVfo{=HqiJ~|Pf0?i|r`-|=@=cR@6nK`=&h#S5P_Duo8WF3m z<%K#6j5>tPccU;wpUVss24JIoXj;A)bwblO{(?p;{tjoD5wl(weX|5e0d- z3J7TAk=stNFCX$JQzr@hUDzHBFrp54u7{}#fdSWKn?YzmfA}GGG>kNtJnCX!!$|YU zgI?aUf7=1y-Wjzh1}o1>ir+A^0aFpbqAmK(6bkc%P!bwKjRNt64wV;59)HgU z<8K`z29UdhfAXPN6^CJQ9c&j)=uc^qS}C)(V7b50pARs?{t+DCmEHAZKw_8Jktr(V zEP#>DX{aUk`8L|^0wzVpFCq|`G0iav@8I4l;(Ha#DQ1JZ=E61+U8IT`1mzh-d{~JNFbwJHTI}-Ya{iN zYf?{6e^^b38GFlDjZwNrLQ@S$3z)x3a@JvJpQATVsYj$zhxWNtR+ua5WPDrfjp*eL zRj$C0c_bwiw9I5t+1i&KZB7E6jOQe{16#GfceJ^5WWH=?zr;~J+UI-b??BtB1PQ&z zLyRT=iGjTz`8k1TiQnEHBfSNGTWx90q66^{e~{9A0P~VBH*6TjL#E4w`9C@koV5^V zq<~~|UFtZmyO8ukn6iNyKR19$;SObiVPoD zmEi#~&s&j06I1D^c@mByR9MM+L$ci9JX_V=8YmF`%+IGNCS8<_+M39&5Eh9PfH%`Q ze=C*O-KUp?h%gWe$6OO$1gXz*D~#N%$g|J4zcKI8B7O>je4=@1{*zR$hEga7s2CVL zLcFV!aX{R=eG~5Y9rDCl<)e%W>%xjnL?mV;8Hr>}{o~CbrR2bZjF2K-+CLt#z@5^)Z}&<2xEk=#)pB|MSi zjW}poQ62(5;r1OaNu79x_M2&B&7n1*FK*p93WC=(p(%*IOF?U_1)QsO26-b(F-{&1 z_?X{{Lot_Sc;1Y~>YeM5Rzch;2P)p4oM=OPPTUzws){TmwFSdiWdbqt0QqQFf1;#g z{5t*#NR}W$rW*w>=6Wu`nCrx)U^Vlx2#WQfpMB(fC4?iR{P)R;%avV&x$V*yAH~)A z(O^;cfv8}Ikn6vSC~W9_elBbd6Y#O*dA-o?Wj*)ZIgiRFq8p->HS_Iq0Su_8)I>ba z6=wZq!4Hd8F!UPt~_&0wcZkV z^N)`9hzUEz*pZH^iL;|xN4F;@p~OfH)HGgEm0Oz^T+Zm^(H(8ADu6ql*9 z(ID)%%d#Nc*bi0D+-iZge~_GUi-5}nlFa50Bl{5uwE{zzpK_EjI9Mu$8$?Hi-A;pS zC}maF$Q}(wmE3{|5jXZtB}6D0F;q4J3^O9P5ZOW`G5uiQh*&WP{6~Aeh^v7KHH<5bQu+2j;ddd#FwHs-T>swf8F}Y1tv+cj)|ZA zN<;L&{YBvndnUwf-tDYeHwIUcAi2aC$q@bf@jvOajh z5KgJo5i7nEH1}+nfNP1p&YMgtahFwLM}SEoM|2>14Eby$GAfk-=v%VikA-f2R-j$Y9}BF^7-AwBjj^ z$+4|{D62s}e8Ffz-|dpK2kirdt>HlP2JaxUE$tCs_2cqpRrlt5*Ix3f`R$;^5UfA2 zRJ{A4*zwz9$Nhn4UE?P1Uz#^3C-e)KW~QJ0R*h8%y%uD^UpxPuPB8P?H#))KFv~eh zHV~zRlM`vae^nvhD*w>_1jYVE)vZeM_*IH)LtHrGu`6|6jr12~WtJxAhL#NEUw5=6 zNp*d@JPg?jJ1pErLP+oB!7-ahAQNy|S8Otie||DLNYFguk~yl2K{A2XR@@gO-Pm;! zZcq+;|Ni~)mZyDu@gm*m+57kR1%v>ysJN@lXXu*3f8bLs#iTnxR)jWq=p0@$=9l7Q zx($w>1S?t~!rVT^E$mY~W}h<8>=Dq*De5YN+#?0;Q`l}U!4Eb+>APLH(eyLn!GbTe zB?*b(e0RHuY!5BBv0u1994xtJ;|>fqf!!{_;!9y=WAORxb}pF)kdbc_x>jK1zcX%r zwC`v0f8R6691jZZ8;8q)2fdShf5O94szPXBlnXMwkoZal51XfA%oQ;_4?vv1@7o4O z@EFbw@ZSaZdY3U>uBCb{IT$j6WgOhue(vAKoI-0RL`(#V1hB(yry6QG7$`(K0sL_$ zdyz*X!DhmLh>U5B;k}jQmYS#wh=`fY1%albe=#|;-s7^`rG1WeZ(x4t8XgTOT!FkE z@dPclu+JAo0QZG`?h3;;UY9JmplE5kMIKnl(Z$ES9NZ7U^#e@lgnzM?M?1#A&Os%~ zO#ZF-F%UmwC!fJ7Q3vc32nRn9V=rQ;Muj#ws%nt)sH&xKjhHC703>IF{u%o*e=ZN^gSXFSXuZC!p7)CU<2#pG(P}syUwarJrkgC3_vSGy6 zce^jVPT1#Qbn?`<2j7XCVi`&1G9=N3e``RwJdb>f$=d0s^b+~f7-{uumpBV4R_vzm z6m=H6o#rfTlU%#@*7>+{tgY%ol%iVV&gp`xmg+h21IY6H1qK>=R_{}H+Jp%4tf+=A zgTRHT&+7Gdv)u-VF*o9YiJtX4o#~{h5=d$prk46_I;H)7UnL=3rLhxO>a*sgf9JR@ zm0*_bnpv+yTg_J6bGj<=!)aux&l>GclTK8UtX!8SiKRX>J5ICZsD!72vD9Z?zuT>M zRPHmaMh}5I*Sbxk-~O{wH_X1$=o-zYu4ZHsfGfyX=i2PTpYnV}Sw0_8P#S$5{}Y7` zR%o{1k0@-i!bS)Fh(c*Gy$<{-f2|Ymi7yHCFrCUvQm^;0*J4Sg)9DmS_>w$>CmdI; zh9Rk_SDQqiR7=V*CRLAF%4C}my_qI{BuwJB(=Yl-Uw(29xWsWYj? zj_0outeTO@YB34ugwp-Z2_Sldv^#8jdwrvA!uB@IW<%+BjE<=v1Ws1Hf7vto4X9pk zb@JNM_VPySMzhagrloZ2Mz^D5J_BBDztz6c>|v{&US3}S2yfCf8eMF%-BsEhqutP@ ztz~u0hS5b(t#{i>qh}iZzFvkaUawyNvr<>;0LkuR^YHbxde&-RTwY$5A^Eb`>h&%p zNWQwdYBU=%#F-BK`SKV9e>k=-E_%bd4351E#``K&wXHzo~fcAc&}aq6Yn^-QM$v*Shb>7(AXG3|b3=6h#0 zZ*lK<84VUSZtsv8=7Bf-sLAijM@4GxkDtVhxT~e>Jbc6AKkKyJf2P|jFEyR^rxRCN zYD%4H(~+0D(KnmTDvF;v)9$ogL6USz9onfNN!q0iZ+c+C^LDGtY8ricR6*w`ureP3~ z>nL>Ghz!wW-bu(Se}6(*xE2L!lVP-qeHtq_bWQT6_F%o+G8%}q8uh-?F`DgzIIC$i zn}}d~Eu~R!8QsPn66%^pufvdKx1%)8y3uOxqoGFAsJ9q4Yva*sB1+vqB&>SB#WAqi z?JH)zWi%THvDOpy{^&)tcZI+u{?)6UBaybz;ds8IG`el0e|vCgyDbB;d!wT?TMeV# z-e1^!Zk?`T_L}Tok-*<+Hk$5XOmoq>7`=R1IU1(dXf~WG{m?gQqe?%RZ74o)GZp0O{d+y+bN&ziPvvV`z7*i((AbL}yRBw~E@WxF^r`_a4m)U73jh1ON_vxc1g z>zI9&fm8=9;l6qTOgibcTTd}JjC7X4#2mfs^)4k?fP#ycrIGiWoyMqoyt`(@Y&?B! zWn|{W9u2>l8fx&3Xmoo@)95uj%}%}EZ8gn)!|ZntQ^X96<>~Y;TV_Y5nc8l53ZPxO z$S;wBe>g-l^=X$@IiYf)ygBvyv_4fcGIt>CBt10SgIK%UHCj2Sbm~T@@x=Vhrr9w& z2LQ5_16!-7H0y1n-rNJojzId2reZcbM!nl?b^Fa$v)yU6@j^U!+$$%(ab>=&x2w1j zHsAc?tZk=lblJJ;OS~;Y$hQJct8bXtM6>?Ie=D%lH@bMiH5>in68r*7)bH^R>Sq1v zYqMi?JA!68bZx$DwpvdB+o(GlUADdnBk=3Hhh3?Oe7m+XvQFjDZ0^ ze}{v<(MF2XtT&ZLy=?&Bn;!=bP<6v(^liJTn4Okkwsh%)a3Z*8G#QuOY%ATq(T6%^ zaZu0bFpvWl+h}$5@8Z_#Ut-tlU*Olwe=WlntUj&c%{L;yC zV)mZG_E%Sz-HT>b?$xjN>ys+o+BG|7f3GU{q7AQch^6yfr`>XM?AYnJowAgx*Y)bn zzKk7d!)wF6RS<}sX$P*BrIVyI_=eN#nXc(oBz@D4JMm0ueI_m1FH7XQb;otP-*Z^X zWFB7&&-B{f*PuestRJ_uEgSH=x~g>!?M5=h8;i%{m~ZVrRN0bv(FUc+sKS z5-M<93hb%Miw@NmXo9*uw+na)3l7xGAhre#Q$PF`(HQM_JYZZ)=cwN|r!tQ1bS5UK zLy9$armar*2^>4EdmY-_6PwZQv~^&OJI#jMocv2vCiN}@ita%m|2EvTfA(WK1;@&7 z5~dC3PVg{^oY;f-x4x?+kTGVFkRRYjEupbL{Xt&tNe}*1q!qlZK-s*YSZ+FjKi`A0 zDgIYQ+4$dg>0+FY(`om=MHgcNg4QG|E7T>^7KI;wZ%|_)a9QE3b zwtN+i+WnWE3wd38jm|}Te^>=o6aEw`f(8?H6|2W|BdPQO)VcAJe}*JZz{H2Oxf)i-H0wR= z?tnHRmk%@)v%v`ud0TrO!|XFnZC7bDIKNljR=JYd?i#)Je*vbjRH@!D%)Narch<&c zG7=t^r0iJ<^d{c4auM~+-tbDkh^|^)^JR$)HZL3fW>xNAzcRZ(8w=zzyl7wc50FW@ zFfzwFjnSy}@=C@!SFM*QZ#2TIEW%Ejwc-Gt9 z&Zso8Mtx{DfA+AZ*)gvU5G`)IKkXFgv^N@F3{81r{n5*A8J%8TjT-GMX=1~EyJsTB zfnM+5dN4Xd8`f?sjds^)cPkuntJA!=kmaF6v(>v8mP9HIvxTVqd&DK(tE;NGqo4P0TBJ>W%sIdgZ9kf)yOQ}pjV?V+_Jfnf4$v%*+WH;zxev5_pdH{y-|4` zuCBTxxZ8_$==K|zB^w^oZJ3S8VJh}zV>ldjssM4(tT%_1wP~~({bsW&DX2Hv#VvfG zT|w{m=%h@8bsBU!@k;E$)T_6Rb*luL^i*k3Xm*WG|4WMPZhgP*-YI8_fB6HI8giwlT58JpqW454X;1spN#B!@ zWuMl|9G>HOQ^zYSPu*VMlp%gHnRtgOm8bT!_x*dyh?Pty*De~{M;jX~54mgEa%SJI zv}mKFh2lCQpCYmf$l4(f5!rZWEG=EK$urZTg(lc$-=;Bj7NzWTp~z0RO+_t>hmZPe zf1p7a9@eUEK=x%;0I!*mw&j1J+WouuPxr(JACiYy)G2Vv^?Dk|!FmaSRL(Z5sY;o4b}Pvyr|e^7c{JEg~mqB6wd*m!pCD~bulAy8Oo=lwrKJO({E*p7|1#C?|8$R5xuWW)NEg*2shavOX6E0~k!{z^lN0zi%zY9uCK|lh z?Sh(lUbKla$LRMM5@U*|OPnLvRBK|_Q}pl=XB-D6Ep+ab zleZ8PvDlZfK9GL1Xgw)J3QoayR(d5|0aDRvSnYHscM85bBJk#y`U)|fC&Z}wJuaQN z%kuF&_RDX}m8U`JcrIpkc{f?SZ_)X2o{C}XMIi)16j>vfLmisMrx^Mpf8{Yp#izf= zx)Dzv=svWeXaTy^gc5e!Bk&Kk9$+?Ay&^5(SzR z`(QIGC-EHr#k(`uxyd*RfAHwk4{4saZ!;q*xErVfHe?%O_A?r;zzS><;MCnHocgm>{=AO;RQzUbZ)T`9TgBn` zfBQJg_Ke=&k*v};tomFu83<@zT3pKzw zZZCAT`kE%oC`_mVuOWyx%f2plC$-s!#?oKXH}g1J&E}eLMHx|aMPYQCzE>sLU>s5A z=BM#BgLtCTjh5;32p z&$E)2TjYdsesq2Nz~>!ZKdUZ_FS3pAcIxlpLkM}RDa(zP@lD&{PQcpc0VoVN@Rz9P z$b)11$t`tPv2exG57adP7%8kLY#;_;7i<4iG6rbMiK(ONe`l$I@M}|&8W z$yS|75(TT2s` z!NFuQgJZkicrsQIj6yLx(bwhQ4AF_%BU}ErSUDY))vHRe}UpT9M|+`NZ~q*fzdoN3H{^- zE06Mbzl5*pM?jQ`vseZ+PArvr51ZxUQ>v1bE|y^S4ynkD9`QDz14Zv+C!A5MUKG$z zJe`n9(rEeq!D`j<$I>$TZL;t~_*HL{PpSK!g(30-Kiyb$ERiCDnh@Q>cb(w<(lWty zmpbYDf51u7_o3BkiGn2!ogm#!#X zWg6W!J~3NH?>*y1tOgeH7Q_f@CRsS2-lw?Q@Z_$NG+Ka5DfzT1cJP+6yxbSR{;d3s zZWMpYoF`*&zJ#FswVJc^YY%jzL`!xfMKONCf5N01dm!)z-U04|uQ~Q&nHkuXoG9~X z4cP!_AQIP2C?uAGro_(v0Ii@uh2o?*0}M1Qdi{HYJVqRz%pco+b< zL{tGFmg2>m+Bt4{?L6Hq-!IwIvUZM}UOWGky0vo&j(#Wsl?s?Mjsli@Fm^&OT7Y5@ zf9??|nbDA9&|L+PqB7Yi4hCO?Re<*rEGeF!uoxP!p8Wf0m11fN+%QA=$)7=kp&mGQf8R2?+eHL<4ii|Fz153pY5m}AU|v(Gw}_Hd zL=U5E6o$$Vy81Utmpr8;1SDG)qhts`edRCwG~vZ;Nuq=UZyL=gre4GxXpZ8p7AxlD zTN+$61%L&TIaQV_$0wDB%Omzff3aGW8bM-()#iCG0f+mT%n}o3uzvZTRl3iYeHaRC%oFFii zIzTz>LZbZJn<4MNK%alCceXV0#6{{Cz^H!AvRS*URO}sxgB)S2w-CV|^C|^T?~UhA zr!)qntxRYN=Hp5b&1!PL&;o~he?zU@0_rOVDa`_Up%BgXbWV}=yEq$v_hYW3o% z;GMi;e#(@DT_QBW>@It#NZr4Ao`T2qoPl^cryIbq6w>&HGJ1+)NUme32d=KC8U<`0 zO;Z{^-7MgV^0lXqUjKuE2U{M9*PqC<_rog21rAAZ+&D@SC5XbA!t=}~e}L>tpafBZ zvG-CZMelq#ce;{`D(a8xu;Fr2i9zCSiF)$YkriFsB*Dj<%Wj%gOko$XN z$p%0mhP9FJ9paSOv+73SBaIU@{~pfanperzqp9Nu7VvqGKYJFmM_?Xtd2Z->hjn6nULN{b2G(%ktO`B%P16(XxoRl|DH}=?jKVA<+6@m>T)=THiK}R7`_}8N`3xJAc!jJgv|^2k8KTcq)35aT+BK$r=xGos$VbtHZQE2o zOHWQ7JwD%W=?qXTK}3h}Iz+jtgn_`Wd z)r~+vn$h$P*bfv!?tjBWNzupvM&hi}NX35Hhg9D($q|$fo;qTpfx|$;-x3O;=+?8n zd^FokCzl&r&cB_BTwWg-P#2ld(~Je<3+-j3si*GTNlz^XIRF8~hnSFi8eg6qs~Rkl zeFxFTqf!ubM4J%J-z~>Rhv3W}neP`a$)Z%q&Z~fl1Ic0-lz$xCpY8MQXUO$%auR8W zPr0ubp(BT>arjUj>*UGr2LlKR#6SQlwp*x1&#c1%9wShdfCw;)_J4C5^s zAKzJ@Gu?Wd&myE4{_Jf&N)~!d3F-LwjzE0nj6CM8&Hn|hJ=n*f1xTRemT9p>^BC)& z5(m?OK4`~!8GkvxwaZ%r(Qpqh+cA%fMfWuULl)>tk@14a>Ny4DkU}ZIf}vkbqB!Mj zQUDRDv}2M|5d#;l-2xZCFbN8AC1GtY$;I$iXKU@4eAM%-m@A5T;@=u2F_(1@=CFuCU~9jnZ?K+0I|{f_>Kctx+vHzzV-L z_R6XZ?tfdOTu^YF><#=p#Qd?h<-$Xp7^_z{f9j06=6c?#Y&iAXR_;<@SwA z=BUX8pNkmf(PxFnh4QSbwGSDE2?ITov~(sT0#!Io^sI*?op1 z!|(Nb^ybunv1t3+J56(`{ibr% z-|)1ymFv>YF@SzR*T=g2ncB0BHmCdQ%r_{C`-GKS^)+C&eQ~{l`0jd=^=lKZCgp zGe23eHHR9Eu314GSC2ymXPg5Ie)S7it5A(D6qDx3Y4sh?HCNI+d)<|%a{7( zWSANSpBh3oC6X_?SmV4p+lG%!hsl@F$`_N*mCd9yT{0{v0rHmT(EnbM_L z1h^~>?wFKsV-6Fm1VXusp?=QT?JELE;vu`$j>%h{EbMbkS@q**oxH8A{x?nV zZ6<7RU{)dW5+fY=5VYNbdOFt00%n&Rj~O45PuhBxr=t|6EL%VM3lpk=Iqn070QSR~ zOmA5M;TZ+41*MRX1P0C3c0-b+(SK5E=0-##<~1%tkH4kXHMe7d1OUz>W#hF*8If6x zj0{(3pjHY2yPoOf7NlenrJ)iPsRRD+QY9)<1n_TuNvU__J$zRtLO{$`B{`rOK|ZKN zt&j`6BqtEmJ8}cF!3e@g1vPb2|m4EBSt;pxLW&x** zh~QH#&pwYdmLHv42zZd)AYYBZnb4p-6R?G^aqS$;rEy^LdyB~1ktU6NlERUVqG1}S z2^C624-6B)Xo#7G>BWo-8>GF>hEhfR;Ncmmr_5^*;0|#cAOIR$_hMy~<89dI!Hyl_)1Sw0s6;>i>xGq2+PlMojzkhHZa<=&3?h5vqor(AV zi5q#xVCDb(?feyQE-(Pu+sg0T$(#Sg)%?F@F<(O9ck(8STS!xr zr4v@jfS}fr7k~2d(-I6bQfwCNM8I_oOq6(XBvc*Ga$;AQsi_q}`EOgj1CqakYj?;% z@{g_@w#b(*&DRc7FeZ$JKqDIp^4AFM2DR7JjTQ??jpQ*y0ZNy*ls?p6n>t(5|7-Bs zh4%=CO`gIPKmY6roXp7oA&0Oc!F%!uR_t>B7#Q7x)qfAS1X|)f4~SwNu5pKqw5T}3gop9BY2u%?@W#Z#mbRZ+zRE5sx?q{ggqOuwm$oLn&f^MD$RyA95m!34sqn8n?1JNEshM6NlM7mYeCoF`2B=G^#)*_!sA? zQj^kU3!H2ki911}TfW*(Dz$@5`PV;IAjl$jFn=!^a^*AQJklInW|A{N9BC3ML@T6F z%GW(C7ThO{7!I;rGyR1#hsZ_2zY9PB_kkLqCS?Xl3Q<-GQ_Y-Zt?}(g)Qh8~2O>=I zqc+FXdQb6+sWU+jS*y-PXkuAQ$YLLX2fH2p0s)%$&QZ3qpy4XNVa$46ksQ{Q&o#5I ztbc2rmI8B4=8gxJ>uCwqecFua-C&?>O%#$G1b z0!vnNp%X|?o9(OPK_0s}E4%n4rA(5TZ-0>+GNd~BYSmKs2BK*TdpR4YcNLw+VqA{% za0jw0|UDjq;%)ko;cSEUk5_!a9Mp1X`V^i{J`e z{nBVQ3uw)Yoi$>){kvc`MlI*Ru-uIxKA~r+*A1hsm<`8h8*K#}QH*xYXwS{MTz^(q%%7T4 zK@BS{>b2H?^_MDMu}@zVcsq@ZQ6z`Zk*|`i zpfQ(wi-i0@4#FdjkX89WU<*~{2~$7k#OwTP4dX?o7Ucbtr!9>l(3mOw-`XTf(`cdc zJj|)TR3+uiUawyz*WqpqG>D_Hr&0PZf#w-?;;(@3cprXyO3UcBTYpZ|XtX*C`1i!=j}t7%C^yN1T~}4Z`2I4uQzH9qkr8};4d>U%yu6tSDP~04F#JEYS0t(=;A2yktsMbrLN$x0#U2d zkRmr^cAzU4hoiv2jDFozU|f7S_;%M2$>%2)z=hE$(rEZkwoXv1c3_=YJ2h;m z(Qm*q;b40GCL2r*Rtd*aW2=OtsU27+m>yepTmak|7dI;(S$~bM6WczvoV5chRg;=B zJJ^{EL*hFK`-VHnhJ=5!eLk>KwS0!37qy1bX*P{Uujv9P){I^g)~VI#iJzTri?0?g zmWl0{d20a4oj#P9W*b(_M94C_Fszn|Ef_7>P)q<~n7uYzFkG+w)}WmZwuw;F!!?7U z!0?LEVa-mrReu2Bep@vh6B}Q^z{_-DOnh(%Wi=cU8y>FM|K{#Zx7$XNM#0~6pF+at zVFSgaOlq-g&@e}3*=vRkrM3(a;xK#GKgtq73RLf$uma{?0vj zen><{?vasOWGp>KT=mjaKcC4-_B`l_c6G2_{EH#2qkm|uxwviCL@`p&f(|qdnh0e; z6R~-_tdX&xv)(}y-d^nPZ9Lz4v7-@DfP>hPBirG)V=Yuf*-#$P<;L#T&i+15%?_WN z&@|RYXxcM2psbOxB)4W7;hnv;1cT-8?MHa&t?fpS<9*OHQ~^==-{6GEbKC!t!&>gg zv@GfNcYmMIx*J=22gdPzqdVC%5A<7m^S!O@{rwoM^X_+cw|YB0BgKAqf1?}kZ0+sr z!P>g>%sl*W?RTHQ?}1TfuqXVkx3|?$fq6aH+IzR-?`(+GYJ01<-Ro>U-+yr+f$#?| z40jWt%-+WS*3S0x&i)pxf6upH;Q#0EpKa`K^?#l}zdhcqA4R*hqhl8eKZX@+Z{FE` z_hM^%?|IzW-Rk!CHg<0jS@YiZ*6xeF+oZd}zRr6)w}0ofDGr)Dk2z@KRcO+A-hG6) z3p+nPd{Ddg?K^oTE)x8?h_m2IyCZ|Sf#~w4gGz-K2(m*CWCPEs<(5)|X5|=1zRc## z9)In=#?t{OgFQGW;=#GO$tJfBj8A43N!cO+3qVJp)R-46Dv&A(8{gGKV)@g^?=YT% zn{X~)A|H03!y0>i;k{Z^9&11#D*>tiXY+!gtPbFxZU_@9i=Ho=Yiyr;-s@PB4$EK>{s{4!5AkOAK6V1n3#J4% zCY>UfqGf`yE5C0$dtCq|{&ffV*MEUyWze7%0Fi7!g%|#`sUNT+#Qu%-xaadAi+{~$ z-6-u$I+wFJ-VjaR$ZtBk8~Jr-_wNceh{+{PI3yRH-6|Jf$_#E--y=1~kMY83ke+sD z`rnolh>z`^3xC=HIlKP^Pq2qf!Mz&qZQkkW3^~{g=e?ck9(>&EZalAV%&_0b+i(`x zfd9{o6#L!h@E3P>Iz|m$jEXgAkAEE40yq_pmRFJ?v3i)@EAS zs!C+!Yvk1if6WH_A80JXEi02-*7vrAF*4I^n)q?ORoPY)dY)Ie=o^Jjt-XIRHJ> zkZbjaL&9jvx7RxT*5n1s=?ttQ=&$ve)PMuk5@{4|MW6!bp0he|rubCH?|Z@j1f9zOZ{CBHj5{CWoDxf(%ucDLQ_zFx84!xf8vEzAv% zet7b9Mm{uV?mRO0(G7G}q9IEd8hGE2Zh+DTA8y>!8)vJe1=vI6jekD8@tzJhzLa0m zFPC4Yqo-q4%6r{rAZ+~Eu}OWm$mHd*1wSQ=A;`(UJ{b71`f2w3w zl%ES}T(=g_4EWhE%_;kR!{J%Xe!r#otH=sMVjQ=B z_@Ujty%7d3Lt*Ila!TnfwVMqA-+ldx4gLBR^}Pun1DBRW4Tt+%Ng7e%eoKjoVK<8j z8wxk4q&95s)qj|nICtNaE5d9gOGBHV{IRr4e4K@|7TCbNW@cDArlurCn4Rz;p?v5r^{pPSjs4}qMV(!TwNV3>{i zq96roBM|Dy>Qk!gka8G}yo{v7B(oz23^n6t zrm6?v%70w>fB-~OI4a+ay*#$>5awFfaRfq!T6fgm$0A*;T}qS2m9>&647qpL@1ykU zASh>`6o|8lL8ieea4Hq#9Y{^8U?moWMYZ53kj~Y^aEV6S44^$@_9cne_9Th5e(c=e zk0gHYr(<4c68{K#_#%J{fT&+vh{k$#%m@}80Dld;+6n8#OvN&pO%bE?soJx&u!tfw zm)-yP0I|Cu8_Jc!fO|5ob}TJqPtxL7z^BCJYgS_;{x*=S1`KCwP`Zp6pwPK{FFGOd z0p2MNX%Wyg^to{jf-xny-h>qvL$fk_%}fDMmFCA;aU)U)mWT*u@#}1smay9LFk^J8 z@PAqErz*&-C%SKVc6JF?RY4P{(FF~Eyb);*nRL3AMo2^OHXKf~oUx}w7F6;UdS5ah zQ$tYs-o_wjE%N+x>R()_$ajA6G0SJU%Ki@70V4h=o%?YVs_38T&DkWImS8b4F>W+j&%qz2kq|&uC4E?=66GH? zSw@-QV^$ocXH*X3N6`9E$N*dRAJX_ng=0E5$mor%FAeV}Sf-o0!b8{G58b2)pMO*^^=@Pg-z%&Ac%^ClwgWuJaqp}CN zLyNPMK@pp&G*kR65mazoStE1yUtEZB#zw*ciG981t%spOXHp=}i~)^!Ks{Rfd^?Nd zGh}MQ2@OFX{TtxKo-!pA3I1Hr>3=Z|BcDOUIEkqNTK*UV3yz;hCD>jSw68BhZ&;eQ zU|m^c_!#>)ctQleJqU@uDM{exXfy{)fPnyN<^i^1(UJs_Sk@HB0`kI&l!GKUTdY_L z%?yzUya^*Y*@OjLESGB@gZm&;77#AGK{9!cpl*EA*A5aLvYXyr~Y^>noH9_?%V{~Ge6Utcai8I8in~d zjj}0N^vPX&{LrO6e4Fi_ZneSx5!E zq+^AHNaQb${T!2*G&vHLlr(|ai4~VLIi;84^DM}6D$*mK-jw7jN_oy$?FzB%6MW6& zh6R$K{44VnRm53_?TFbrmiTFONkz)Uzm^~GnSmlmgR_vqwJ}NrIe#WmswL(N3)F!t z3w)%K)Y2>sEIpWAGu|Z+|41*hDLn>DA|}Yl z#Xz5se{aG*Gm;mucs3Q^+(nBD;nHCSxo;8)WzTpbq_>m3&4~arq47Q|$_nV1rn3s@ zlon+M+oJ^Msu?p{sef}OOFX=YjTRhE{jvW2CJoJCNv8(jIx6+>d+N`r4)`!PGa&Cy z0kT|v&!*a&oPdHWx=3~F4IZyV0KKg609(IhV457tMKDX>`NhO6_XuXC3TQ{IEd80@ zfQ+OT#M6ov{w!Plp3f<>^!FOIyhrwc-WmourllWNpc=LzXn)|!IBm+HfrI00R#3g^ z2r$MBA;usfBlU`H)I(WTEskj3g$440Xe0>R*8TT(lAyegi2vrKpqjY^elk4d^4nt54zWh z%A1c_90fP$QGXU|M`DPkS2WZ^TKn0lQ-;3gs~j_#4H8%lNg8*zxnk7zEP&;G|S4@ zahBq#$$*0N^w=+HAy1+zP8}bLi7%_<7nMStLW(%!m?}hTa~j8ZrG_(FF%5Y1G&s!E zw>tL}+J6xSpAfN-lI8+*BWOuV9OYDKv@*D2=iqH4mb6&P;~ab)&x(@m$h0J|1o^xU z#uxpq)H|7_uk5K?u&7+imu$vi1 zozf&LDeQ+p$GB!r>1A{+amB9k99zXKE`M?TD)?^2BETuOWa|iy#9M#)a`eDlS{?xyS0yvxjhY`LzaBZ`QNOCrbE=$)E!A=2T z3$EUx;=s>kt-3V?#smT1b{~BZiGi;U6IX#xCar=%pcRWvqa!#OAzLIZ{AQZE zJVKR(QL3mcE#LY{6yI=(t4j>92!G_iF)%D03V48isI;gK^GwB4lvCg^h-elPQ8%$H z622Mu9C&o8C~(d~Eb13dZ5XrQIoEt^%BQsAB%i{9FSyfN1WP>#=fAoZes=tD-nr&*Mn*D<5mF`gw|`3Pjm0<` z8jWIDr%@CFpNbvNV!=+q!WKafC#Y79)FFa`KoWm?MW=9}w%8}rXJp^Ukpd@?1he~) zh~UDrsXxankCpoK4wKB~AdpB=U@qpy<4)NC%g8Akb>BRSXoNh zr^Q$$2uVeViZG97K5Da6YI(AYhn1!1X;4CrQVCdz0!^rrALo9<0!=vR$Ne7r&D$I!)EEs@8K=eW5#fT_LSsGVH#7P=b{237ih7T;A zQ6nPwI4FjVjF@ZnFpMFb;S8bdHZLL;5VADpj+Lbu`?M5~2Y-O6fJsh|m8H2qrjsa? zaOC)KI4B}mlJkSWqU0%O1&7So3Lq97vDyee5C`Etqp^bkoaJD!h~*Y+A%OF5%VZB( zSqefricU|_3^2m+%aTkY1mWYGIAF`t+YHpBDo)><$J8c zm;u(%X23PvndvnI7UeX%qKAGlIham;tR4WJ&@U!FK=dRZE-!I{XlXJ8i9{e(@Y>I1 z3K*`y&wuskpRy=5;z8nABsh*rIz@MrGD!j;bmb`$X_&$kI{!~#Z0KJhPT83nF2@Axugk@-769U~^@oeBH2$owdz*?8*b6P6)TL5s|f zebA)y`7LPPNwly<<`hI8xgd_kLLG`hh@6%O;ruszf^4e6l>xR>!2J{pNry0uN+t7CW7BZ396~4pym^+hkPE>`jtv*9fch{- z@P?F_fCMR~nSUE4GsX+N@Y9fSbU7PVXMg!f&|6_*Ux7aA+`tQ9y|XVWg57A#)LMn$JTqQF%59r*5fu{P45`Xw7 zl}~u#Ov;taBWMStuX3$e0wG@lJWt-vke5QkF;(MAg{%wRhs%D$baQluuFY`&96zSx}=YjeVxVqGT@U z63hzKOALaqcx0ytnB=C(@R@wbtbcIN6pZuBG{8-)Ffb?Dg(GjfhZ6> zMsWvJM)ESySAvO#hqcl}k{<`<>P$E@{2PR&NRhTP0@&ogap9q;jQscaEp+kX&UJ=NBj%R)5*|eAabE3ooUEHNyI=YJxJj?0&i29Bm>1r)Pl#65!3`J z7~q{Yhzz2QFa!Le_Nv4$u7A{5okRd?1hrF4MQ%zLl?p-F*`N9WTt8A>G>^q`kw6|( zXi-8&6$47XuUYy^E|{8>73Z1i0c1?-fK!Dc!4?&P6Zv8$SfL_dW)eaoiu0+0ddgYy zAt?R1prGu6nLFkocD)dyU54bB;DxT01Zx!YlAGdxv;5ZoUq!IKTYnmhaS`>PCSUa5 zD4g{d#jx&`yZT=uW2Ld^{YE>r zsGPevR8KdN5-n03EPs+GQ1hP5Wx906oB|o&2PK}VMBq0)@H+;8X*-=$}13JAaRq98pUk6C)8iENTUq z@-vMPqbUMk%$S0Td063na=|qz%@!*Gy425$Nmi=U42;e!_NV$CQ^4M6*R~=DAwOL^ zpwN$$*pm<18KNk|LpqPN2?kEQChBu8K7l+xMM|_tvT0?|xT`fYWl?^vk^RbJR6~I^ zsedc}>$kMdZ-3Pi3I=NhgiKWprtwi(-5T3e0C~5^{syR$8Ae1%SpyzSxx+0qQMf{izUk~jlV=M6olDDdyi=EKga3=Eb*VbR!(|br zc4^j`wGiaSideRYE_5m3dF6VnaUJoKcSPs$>cXXp!^F$K=4ij>XupQ7Kcz7{iip9z zR!l}-?|%)?cf<4DH0HbIKUtO>q~S-d9R?D4pTXz}oK1(FFN-aA)PC|bB0=Ik?F{|S z?da*4JOT402c6#y!z2e4OB_jNm%=VhNn$UYf{lQbY0PtT8q!jR*x!aCYM72jn4TL3 zc(vPV3ClEHvWvD!@Ugtr`h3Hzr<4x5NCxY4{(tnKv{|B#Q7-rYF;Le;T} zEo`;y0)E>C*uCQ|DORi4`xfeY_pxo78+oVMf`RN+iz71OpErhG5?+0;(nYQz{?8ll zPJajsGYQ9|!)5_Mt~6%h@8R0#yTgZH?+zb&ynBED+ui#IpYD#em%HkFmtkOl87JT& z%QQ8>tZPOGSOk1pn(YAwJymJh(>g-W@O53YM8LrKZD|$>I2iG*Dc3XA{9n0d z{_)tm9^1pSF?lm4pT^|%nEZG%B22;8A%6$6TJ&3p25DQ()<9?&nKJB42}4jqYkyE~ z74TkT3o}L2FPEeC)3M_YM~dvenPi97oWMkru+Rh!TS~C=LyiJ+nW&bWgE`3P(!NZ@ z5^)stFB7lVhb+{4`sL}DABO+_Vf4e7AKLct-%o!SiIDTdm#0t1r1YMCNuQ2M>VG}` z_m|>_rzG;8{@ecY!(iC?uctqJ`SN8kYTI9i!*1t=-?{zT8MVKRoPmQ$WAEv|UHr-* zlRa?%VGrCdU;eS&a()>6V`*W?!jQH6Vh{iAjD8q=ahAh>ulggW?K~v~*p2FcDMmj$ zB>^^}z5G(NpFACt(EH&GI^%dA zj{2{?*I;b~F@56o$g$T`19OOu=pibi9ypIj=k%r1k7%o9pR~P5m|1eLfPV?50)Nv- z+)7yJ^pCy&IiZ7@H2pQ9?r~cNHS)1pRWs_{A)Da@UaMwF>K;oAoDwgQqO^!^iAQR* ztnvgJO;Tz*B%&)-KEj#tB|NQrGs;Xa(KM{r$fDSF#e2;^R!*tkmq}MdC&v?}=VnV%$4!a|P=|g)$!T-(~-|r@Jq)n^>p6|}> zRNH<^s#S-Eb7oNMaycCJBkKKWJAEgjn_$JQ)tXUeV=kg{xtVD#|9?{v#W$M~uRYoH z&eWc|T;^K1=Qz$CRLMKL-A}aGh=N_5+=MKb69drhIZoecwZ6eildik=EgtVf2gLOgDKl17%g7=T=62VY$OnQ zyrEzQnCnqZ=-|qp)T7;GCHs30b_9=f+VQm>3`c!NH+?mFKrPTqja+PvUI@#e(~Uem z*$#oyHG6zl_L*EV&IV`dNJtWBrrdaNo0@35(=U8?n#kR$jDPzRwL6t@t5wEZU%y64 zo=r=Bv0N_ITBVk%Rx8^2`jr+;=X?;kcfvkGv|9C>ZHtvLu~Z&QnDjzpzGb|{3-D5G zPU%q_wo`4N2cvc|!*j;W$iXa%mP=uxOWRX;J2Nb+@a!%rMFox>IVA9Srr7f=QG+E0 zpxC<1rf>XUVt>oNhlsjRS}D{wI==9Dp}Y@}+zXlY!hSG_?Fq?A=(su9vhp%Cij@r~ zBQJO0;*v?*lQE9j&o>?n6}JhOC*Il2{`tlszukCwB6_i0<_S3+dmqNO1)_-#IS(0h<`2M(i|F)0zh7MTP>M{uk1!5 z27A@zgTrhuQU+hsOcSK8G4^+rD&o`*JUsop8z1pCx# zrJ_BIiB~eg2d#)gvyQk)CArZ}WxWz?cuD~?xG64|cuL3bK>|k6M6~1bA>zqk@MJ0n zG(S=*;(r)2So9*qXiK#VA!1<1iA}+h8f15HXBH4vh;HpsH=YTFKq_pL(}wGq0MRtm za*Yj>n{uc%LKslD$E`))q|yQ(o*bHta5A%d(!d@N3!lBr%qvMfHk=N(ifLISgF|-# zmJ{KQ-D=qqU1_VOp(`C^-W@KBZo1MC4yic8D}PDEXdxK*Y9Jk4xWjZrcp^E1D>w7N zUn^KC=8Z2^;|H-zBb<)J<^D=KQzTd-U*3SnnlhEtB) zN)%g;fc%yybCGdXJ>l{_{ zg{j<jc7Fz^JwmstibG|H(pa!6@p!T1TlkH%h;6Yt_CE&c0GIi>XJ z#4pOutc>DLnRUJu*PZDMwIM9+ytdCctFXe9%VjCOM0lQ9*&DC^Z#b=O{2?;@0o>Ot zXU+t~!6@}gBsLH|74*+-oFY3l1b<6@Sq}Mi&O-_2s}J9tZ))ehhuXn%=&jgTOlZ3C zhgOHXQBG&{4`57q!M>dmQ4#06R$B0#)nncHGz(|kD7QpLrQG5>3oh;{*_+9Ju>XZv z#bfVIjE$?tm1z6s2<I%r92C*HnTnnOcQxc8Ae>|6&VIrO7RvoxpZuUs-qe(vKjrhd797deL zD+r5#Pt1U}6@0hkhOc40osX@-!OT4#v&TX)!?j*F!k0@>QMcFw-fJ4iXiwo&CR*GG z$HuKl<{gghm{1}PQ{rl*c7KIUZ6^Q+^@uo|lDNsQOD5*pN|dP1h=*fgu^4BdxGV9I zl((|!K2O#+k|p6am;ilqjS7oEzp!X%v0xlhixtI1Y@MkU_EW*i9~%RoWJ> zZQHlvO_9iSep97WSX|}Zv$-TRZ!gjmtQ5_ey~reQW^{|u5kCY+eSaA4F;Is2Y9iM_E=W*mbC%nV>JeC= z=44xvj1)w_??h@JDu1`&6$t!6!0#3X!w2lHBbEt;cq0vZ(bl+%5soL(GRiMSp>_o4 zCLVBB;|%etPQ;3T^j$5K4-@seQOL|sY$LbRUte?l7<$9g+kcF_WO?XCg~~O?<#d-K z(RG37E*V}(jo|*1d{0zp;dEJg)h-x;8jMDgwmVb24xIilYJzpZx4F$jX zZfZB*bkp-x;V|%GPXVtKTWJ=eM+#Zq&!I*-{LQ(mUb7{vHz?f_2pDb2Z;9m`EXu3G zW)c;G%++ZwvSUPDAPS#&_*%LZ75HlxvQTG`x}`v2FqqgZpNk&`=hsA5TH*CUeWJFY zQd_2KBO&m0B7gPDCt6DG*zMqiIUooJA-^tkdiq%a^()YT6V*~`7d*AosiyPwPZ`+7POJL=(o0+qx>j~~V(+)y%}YFLn9YymB0ap#hRxnjhY^0?d=vTPCtm77N%BSZB9(9$mCBSS{+1b?^ERT;COUG8%=)v_r z^PX;roRi7kGs}Y~4{mV0%W*jm!OeNU$XyK^=CJ*r=}NeNC<-_99$B zkr*?4LVsZ^kj9(EuyVcQs6-MwDs&%?0R!x5M5>Gb-VV+($uAMks&`}>XXz!SYJ_Z(aY84fsKs-W^vt$)xlc!IP;XSwY53ok_W?u5i!1whH9 z-E&rwFNcSBSaJKornsQvO91TOfzyE7Np0r@VJg1q^270_@Rug}=Ekc4TX46c;>16( zQ-_H{nJaw?r^(cdD!zLL#}MUyga6?AP5L3iwGGTxF+rx-)9Mxc`qa&#?OawQRZnjZ zEPrHp)=Bo^C6WwzH6$K!+zjd3ODo}L)U`u@j4KAHlYxC*9nVhDOid@@)a$W1oRo^%Z z$lA5dk!=^8WW)iCYWyh@I|DsDjDJUNyuvm8QCBei`p0fw`XgFqPT$G|WV*H+$A`ae zu+6mCSVZL{o0S`XRM=U6teutYxvG@2G6}CcthDc$n(5l9nbxKzYj#IfRCIW=JFL|b zOBT_#k7VEUQiT+rB5^ZuTStx$!A+nXj`!`<8F_S=DtE}5KcO*}2*s41@_!2WuNyMo zSIOnH-@$3mXMKr+tffP=;%=&UIaBOZrkn5IFVD)IBL?0b_vJCP(2(bdIlvfN=s4rg zSq2N56@n=#D+2#U)}Pjhxu+P4t!t!ve;7)i;oQygi0N1lEgxMJQ}_X+5Eijecy6mpW3LaN35>591MIaZ0ur6PC3!BrUYrvAJoRq66X%!sp05@xm;?+m7JhvjdfFMY>GO z@1eeQSS_nf+g9J|&wrDxC{5|~-KXRCUMU}n<{;dDU2<(a=?oL);9h4TJt(EXey0bk z!rqHk3mE8DL6gW@E>q9*%E4yO)t0ZHQ^m&ETU zEj{>--IAnU`l>rf-7blE)|WlH>lB>Z0ahYFT&dWarD)xkxqm67crFO2y|uMf@Y@kC zn<&Y_qHtKUmysN_LO%RwBx^%0MzP&$u_SAIJ-SOu{L%PjQ)5-6lIt|E5hVgZy1<>I zj#~;Bl*xTQ3x6Cp?FJeFrWyfW?*k8R%`R}wm+i`SQmSFHFA}w}44@o-eR7sd8-l>r zAemPVQ|(fhp?~AY0$%~0J5fYT3dj}1X>d4lvK1L98A_mSNaD%}x7`a1q z1*v5F^rovTDo#j=hEk%zh6Zl=VTe2)(B=mroXFukSHusFNtbkq&NX~nQF`ImFg#-) zrKQ1ZO<%rX6QI{yF4I@NomMM-xwDPm+s`1-d)9@}-hbZqi#-VM?eFaF0_555i+zMV z+v|3p1J|haY;SL852hG?)Ipf9*X!-}dcA@5|NcM!x8<^18qNintY})pkkkD-ZS)5G zbjU>fH$cLlpa8#&`uE0i1EktfM?l}K8&ykAQt#);#)o%2qB>;EAzwlTd$Z4A|JC`& z5A83V&VLJUaM1bv-<|(Cda5m5lbPPwO?Ej3o-E=Nwus*LzNq**#3H|hPaSf$ud00O zcP^pIuN=17dFpoluVLqpyGm`hw6Qk&va_x5cTuiuQkS)8XIsPe%t#H=N4(vgo=DAG ze>{WM!5R0Ij^AA8w)N#pe)n;;grBpOMXZ_SIDh2IXPqqchW{MhBef_fQ#Lhy1!4y~{p#S@=);P7zWFTwR z1m1QWC-%1GT`+su+kWmm9d7>DlUAqi4LZMn^*gu!Q(TLh`j$Dc02SVvI90Tmp&6T@ z>VM8yYid7xx%X^myW467ulAnp^j@@D0n0R4R}nnH#9q&F3lFC)yWXv9RlmUJ3|yzE z5E;j<4eYnj&!7OMj_gMTLd-lsN+i+W@YIpnq?t4hihgaa;gv zPaV}9tyb1P=6@% zO&X%yBGEGt-ZwJ^9~uOHME?x>`7wqjRBHexAnL^eYXP{E>Ud%YisvfR2a{QP^$E?9 z-|_x4H8p&W2O9_TCGaQn_sHkW_4eOk$Om$dq@I5HJ1(;3~ z8qV}$Go%wOQ6cH%wN57o`zx|YG=IRc0AoO$zfL#@2H~K?YK=qQWiN2@A+N6y^C5OU zdMS?0qLgnQGQ#DQ1YG;z+z)M`c;AM-5m5yC4fHG^NtDh|l%9lS;@F3BPEX)ly}5~M z+ig-Udh071LI=LNb9Yaq5c7;%tz#);|II76m<3z?{QkuIMsC?o@TAk}JIJYH$%cPV zM#Jt1dHG<~9G>e{L~kT7dpKlJt>xU^dX+>p4f_{U>R+u+fK>!lo_pU|h37A;EplY; zbCDaRkZ)Jyyxo=$)~ReGdD8B|8C;aZmc(z4Ln=jk=VYScJTP!R5jY1bmxB$gO{oUG zun&>dA8!TBeei3s*0fr67z|A?rKNwozFt9gV1br&I-TXEx4GF^)MmW(b$pq8f@jXd zo77LD;7H_o=Lc8XMo^);yA@11@akkQ5|Wd|F}j#T7lrr_k_CYQAqyi_qNOUusv7&Z zH$!-pU)T=57>~SYQz42Jo`_@$3gpTtb77R()M_zP773Y?E3@5mEjLUF{#1W6ENjTC z&6ZKMcUOFv59MMeG11i*ti$Sp6?^cqBk%#Xc5L2dT)4E=7pDQa`zn^AsVl8u!{nAxHJi&&PtT#9jeNXZRbwV%MfU81e7KAL#hAp;NGdV|F|kJ zgh#mtPoRRwhgpqtA6b8wgZcB?f@W*4WpGyj4eb;hZ94CRTKAn9he^gx+hcPIU7Fi4?wc>u(YHdcU{TaS$cp2mBMQk<5 z9JQ12Bi&W#-7)v&fY_9oF;~U9r~5=6@IL4VyweIliEybZ62N~+i{wrBT)@>Zm9q|= zZnRnm(x#hi$QiRhO|>+PVwn1Jz)osNnKqab-k*!Ja8_62H%VG;nf#B~HnUn-sdbEnnXROK1TqgwLz!>pfLXh!kRa|fGeMopYLzPz{H4?!jVMnk`_y zql{Vty#YobX7xXk@CSD8ZIcH4q;>qH3O{b{bqO-pqfU>cov6J-Ccr0%+7t1Xly|oI zaRW~p$kBf{7u&}ytG1DdnORZ-OKO|?P>~HG$O8$?a+Zbdh@1t|%R5MOedv092=D@mg z=kR~}vpaxDcVf>CE}O~>sdjgkOZ?sAzt5J-F)K9kdL*#tPPK@jQpDc#N(1167doze zZ~0EE6}8(hceX2;;Ti}9K%EA!%B=Jm<}(qgq3b^|6h?#vD=4Csoq6Z@F2jD|wQe#4 zlp(&&?t}*Kr_zVrO5bzGeY0O5(23)w#t47pT0v~E5~$fslvGFhS;H{poL26|CSNE; z#GAyAV(Y&N#S5bk5W?f6AR(P}p6!UW`O0JQgN2*)YYX7qAy-nU5sDX#T`*YSPl0I< zk}Psvc(1Pu@9$ON$n-ae%o*TyEjBstcILRTof#bXOk3*(N!`Lb&H##WQ%KHR`FVfr z-RUe{SMi=d?ym^o(EV|rN%Aqlr++cQCzK8c)E7t|x`?`YpOx(F zEm-m4yX-l zewjoiMJeNl>ut=@oQRRUeP1-Z)@gqm;m7Y9KBIR?o0-WSi2gonz%Cp&TZ>r5y7)ZC zcH4%+(vSFHQt!VMV0tnZ8+agnRTP1r>-7Crr|-9qoUa-$)ht(|If{@5ddt+1R?@`H zv;6Bm%x!yJrD#KVT@7o~Hbyg&(if0-brhb0op5%=lUV$$WASq{nK2$sckO>GR&oz| zRhW1J4mRzH8fTqY>K(tSkp-_dJ7q(PMU@%MJ2n)MM~ct#5EY#&Y!kL&V2DZJ~j z+G7a97L#xZ!B~*n!du^FUZ{V(LV2DyPFt=oGnx3qbprzL(fk1Mo8HcrRC6!L>TZ4XYMJ?xEBpCgX$*l*2M)H5;bKYv5>z8$H zXBwOjw@20Cgv9U|#mjQLB=BxEf>qdXnxJoMtB)C_UxkgHaqJVqAv2zf=HX zd|t!2<3jUZJZrVs_=;iF26$n-6tC}BA}F8{tKjeFyRmv@V)g1jhE?902{4g(~17Tr;5wKLZUX2j?J@e*6x4dWI8t5b9@i->hq&I zka4yUo7$l{D2I=pqgolqVhU;)&>!f(naHaQAzlykzCE#T$vH`d$_`abtUvpj;oON5 z1ift>kz3gwut9sWOW$*sVC9@D(Y9w=&}_3%O8Jph{{{7e9aLGV(Mq8+&n|Q z@^%M>jSR+#!6jC`nTUZ6AsvtvtdwHO|8t)dB!o`fvsMQ7#4#Ai_0k!^-m_LhlZ>@; z0=#1V(l=>bD>eL=&_```{VQbtARXRPla=(H^dCF!8ZxP|SQJO@K{5$itza#g1Px>o z)XAh&y#s%QxT!Yb&iC~R_sNt2{&`=>R=B@tf)&5%^XwNx)0DS=m!#efh*_>Rqp;EU>a1eij@*W zlUsjMb1!!Z2^m^kDt6jD&v}NjVu!+6FUf9806gW?XB=#ffY+HXQ zZe97ZnqwdV!*b-R?890~ow8lc-02}AHdx43vfmFjYsJ)b%^U?SU(cpRHU(K{Een5I zzgrbj4Om(E9}LziJBjncJw4tMMMjQWzG_a#TEG+5P!iQpLLP%RP}~I9chq*=T5BBC zDDX@BFV(LV?W~mQw5}zVg7u^d!oiK-Rym!e0r+oIwhgr==B z^^{?DD%CrsnSV>yQQ*CcOXo2a<8T@EYFCpe)8&l2ea#vr+B4w3ZeVZF%oTt1bzOEX z37@_xNzqJ!1IJCn1i_!{Yz3I|&BB8Un%Vgk&TC5EIak8ktSM+Bpzowi=^r(oaDTNRSuyCJo0N)9Hr==jT_YXaQU8h7aUz=?Kbn> zWJr)Z*n5p(+-ljTCB%~azHjFQthup4AQKR`km}0}eNrrgja%lW@|51sxSV;xM=oPj;AN^0RYiZgFtA=P7Yh!& zoX?UNq-6tX$Rlz{E^js@?SRsB0FzZI2!c6?b@QEwV!Mt*HZ`YqzgL7y=@vWD!M!_D zp>3fUQA*<#83-yDUaFBk+PXuw#4y!qVuWTzQLzckXYvUWLID#o&2y5F1x)q1my#>5 zOU^vuJmIxBJmYpU=3;+dvzE2nh&`}t*mwFFTWVs5%#B9bxiFE!6}9G&0+g_p%UoC0 zN?NT#SI3%*R?qL79B6ZnsW28-ut2PxLtvSO>FMPEp)&bB7KCNzWbUzE;jMj+T!Y2S zKrV>Ea8Xgy2!;!|yP8u1lc_nx({O-_R%&RQm`I9b1E|%q&%A#{yGLF#^q!eRB6O1G zN;OVEco*1;E=UCkWj?SURfOZd=4PT7-fPkK1*{;P#!tL}&xO~+#fZEfF5113Lyp0X z3*_sET;KAT9Jf!@3173Lb4<3$iGyek)y|3Ji#QJ@&L_jGcF6EP^7v?h#sW{w?H731ToZGbAzRd1Dk zH5QPvuJvCN*{o{mtMa%7f$u_sVU7SyL}3{u*1c8E{{nw6czu`u8tb>t_pP#gxjd$1 zm`kNllx^%ko+G^Zm*3l3t^XJ^nJNJVyQw{(z)Vt7R)hxsDNVks!>KaS8!ngk8X(El ztElG7F|kjSOg+kUD>hf8hBQ{6nLeZ^a!50f6mU=EAf0fQ75wh@P*D5?gpy7)UYJ%v zp+1A}6%&6rvrkOni%OR?4&nO7^RHNbMN&rfueEA!wrbwos`WAp0Z5gcI8~g2(ETmd zajA6&61>SRvznrvMSX{T#oV?v8XABLyy=q*iBjOs!u3LX4yE>{~yQa3iiK^YM>S80R&Kwtz zqd%@0lT=L4LdlT^pd=#q-hQK!#JU++S1&LpX5OqS5xjcw;sw*$P+A->di)pkOhf*? z+iGnFt(Nt55``g6-)30}KHAP^Ie2mfYmp6_U`+jkl_g0adt}qqRJ)~I1LXc)WWHt> zsmOn1K4OL@rlw6*Z=?xTIQQnxav8MSfYfyqpQHLvMI&apv_o%mRze(^GjdwENmcDb zYl5tJTQs1mG}LaRw_eolP~NR8G{`IuHVG~-l>bhcIJ?Uw{=Q(pyZ3Acq7e&yJPVI! z+wHla#_Oi`!g$Fs>nl|8>&t9jjx%^US(tw>CryQOQVik5;4akb(E{mh@d$cb+;?i3 zn5UNSCc)h7LtgCz8wYvpS*%@R5g9tcE#P{9B$l|jLFJ8Am@C@?rxUbBcj}TobbybQ zFch`pN8mPIgImy#fyaRvPyiEhi%2@qam^gup3N?qu2BqvFtL1>pU%a*Jl?LEKV5%y zZSm(T63v^hG_iiA@#pLH0i--{zfnU`_FZcMNOP=HO8hovW;Y^2NkCGf<$Ld6pe^SL zgbQTX;DRXDDlf*~*J)j7{GP)z8arTj*d762??mZ8)_QrrI$T4#u&TkQ>pWh-aydZS zn>cl~PJP$J^KovX?vNH`lrs0)Yv6y<`1`%l4an9Ko%)M)QH`UK8-yZcilRA5z^K$Z zZARQW*mPJ_MCIm<8icjoOzp9i!9cNTzOyriR4AESxM8s&((cpq`}B-gS zaRZGt6zUxcbOw(SW&$CY9bN9EDvzTL0x+!cNh0lRzl!R{V$7sz2a?t;A6&m<&DKpH zpxHSvESq+AdC8sab!9uO@Th;?gIt4cch?~)N-DBemzo)>y4f+t(UP39*!1UlV_tYsUGNGu4fp zuRB3y-7zjM6}p0hU2Y0PHMLd4NT|tHt;c}Aftm%f4zZ%|K`Cz?L#Nq-iCjo7=ibor z!w~$Kpx=iw3UD>|!DNeaDFHv1889qO4;I|#bl~TF{|-1^TcDng|Hsp8mcu>};$~3# zQ4CduksoJc{J>ozn!|s8+~c8^%ZSEdLD}b6m46wHXH%#dyjx>M=wE|a!UqNZvW$}+ z{M&|qJMeE8{_VlPXYh}?$%Y>+XM+F0WrOC?BBo=ShUim6IMyIm;z!UFaIC?9(7y(; zQXe{zW~>bsu@DDYUJ4v!q8ktZJF$?fp#C(N;77oti&>KRQ}lo4UG?s5xb~jt{=WG> zTuwDl8#8e1ah7;PD@Uv;pB&7A6RuU<+s9%|ZQQ zz9Pw*xVgSDjpub4TlTtFb~<2W3x!mxX39(fS=mshuY|goRjjhIkr-@YDTd4JE~(Uf zr5~2<-hJ^>_@aNaO}5ST`sSXyiC_;m@|jN6gtt$PL+NS5sY7<0)L>lRP|f~Rha~#Z zRh4%N3yQ(LI7Nmrz<2j6@C>ehXDsqrHB$_t(Yl78d*HYyseS5H5;mI*KXu4!*Z@wA zv$EOH-Mw0!+rSk)MV3r0ZjS&@@2a^KvA0`qu37#3q3VAh$bSX-84Y}-p7r|h;M2j- z>T4x_Gz_x`?RK@50ZYUM@XY#r>Rwf=Jky%|_i+CjN{TM+Q@Qsrl3>5UqrNEZQ%CC{ zizlC{SC}pr#1qd)+G=fPt=3th9&J?eD$1d!s5o`%IPT?KC9afmj_p|19j!A)%&M5NF}|m zb8ug&^zP4w&K>Z3V72mbbLD|qVo>eav+5%0Q?13py)6#VHi}WcPuEY0F8}SR5jYSR zL{8&y%Q~oC5D$RybbzK((S zS%(kR*Jk|acv(la_w;d6C*j9X{t>M&6J{;?3D7tvm|$VT^1!?W95kLJ&2sKNRUNE3 zRsE#j84h&*&H;a_y7+G}R&?OF7v>DOp!XfLcqa?UA~Y4G8%LnekG}wbDjWB# z1rXr5$rl6Y1v(eEK%>b3zlT?$8Wg&$xu0w*X|U^V_aoibKX_n{SF4J@WzEf4tHrgt zr0eC>cyTyseBme3!ykVMxc4RD3Ca_m4l=xv#`+rx^eS%XEBJc*8l~Xjeonb2TZ12& z=B&1s7xbT2QdN%m?x~k!$IZjgRKCNU$6ffb5B8)$ffCdPl?t9p>D)!is0lVAu8$xn zU3sZcUkJH$0Ilk6%2EdmZ>|+#BMtW=S=CJm15?vv2y+rL5psVBF%v9%L^*|)JvXw~ zJ)LCjsJ-3f?WEA(wpJ^7;_lucMK@TaH5DJDj>qPzl}_120BSz4TGnVM>WG1WfZb6xc+ z0(y9T#fca5iKl;L`Qkb$sfA{J8cFVzM3VNMyUa7=II-U^WtRiIiXE3gLfL|w$Ul0<{D!X6ZA-tB5u!mj2dC&eIl z69@XbV0Ua4|Axg+cziJcnOiq8D$oU+RnD_Hb3nORBR+pz8Zo-mZ>B~O-dR&q%J=oX ztegfjL9o@%rlx^e0lVMCa4nS~!j|YFgb5cR%p9=$lNlz5vWhvI-QEB!+_)$X3a{X@ zB2yT&0jfkn*GEdx!-gbG474Fa$bs){J4V}!=5B`A&3vt8rPPS2nf>177K2`}-`(y2 zMJp#DS}A`TCj={vaIJ5qP-U%6lji2<4N3F*DA|9PUaAq|0G;RreeihZYS1W*4F?-* zsw~H=Zw*}>@}T_b_bqkbBa*Glq&AAMGHF%CiTPgn(`l0-`{_N_#4~+_s>$N@Zb7&f z(ci3~7E|{Sa5^{c5ofTku-#F=PW&s>JTGeg5%qtY!H2&>WahraDz5HU2yN(kP}!W! zbXha;1(L=4z=jw+YC{w)F7BhcfUA|=qf!<%s+0xr%An+{j~d1bPbrTWszZIK4*#OY zvxc$v6}MOG&{#YQjl8t1`6;IOtN5#!hP{=hVRdsN%J+|sqWAosCl(GBliJavA9JGn z6sLd6in=#%WjGV9EGIPa%P2r$b=|3t-q725&AwryR!)P7TC;p+X0_Q4Z0t?Ykn4Io zuCX?yjB*W;VKCzwz4CU@iK1crReNWwHmk&JrjgpPsd~*iESOl-#eml}32ZN~kx8C7 zF0)-Ls3%?PjOp{7J7an`CxXBV{Tc4MtLhyJYg;av|1Pe*sa(Ph5J#WWY#P!j4K0SV z4?KickFJFECzWcjS*s<%6@?YMCepnIbt+_ktZQT=+`Ezs+*Qqul|vfZ=hPPl!+3uc z(;Ac5*poIl!Wl_5?Jns4rY4cb)4zDQ{924Kq6{c7T%k4UnjxlIt#-S3+3OCf{_NaJ zql%Z?ASbg1?SuPDvHaD_bcDkQmtvAx1P|X#Lw%@kQz+g zTzjVfI428I@p&`X4#AbuQH9Ho(e{7U`zWQ{{1;g~pbBTNA%!(=%Nj7zkbY4`_~O2% zgGo>?YuAqM%@tXWwz8mNopvJseDBF5deEP`J~W{B-F)h-m2)|pmJ>Ra$S=HylK}VV zficwfd4>V@8eow|6oSu+^>?O+o3qIj`|L;bjw9<~BIF6-Gz-?G|MMlQ`SO3`oRp*h z6s$Q$-#7Z8E3IatC>+tRYPe02wEpw&ImV^($^D0G=_T@U6wx=Yh{BY{BkaBxtya-w zMv>PDvGt9=FgO0D^&1+dyQ);KBj%bzKeTh%u%+tn)Zc}u+A8(;M{e)0V|NpMezscc zkX&nR-MQ!2M4a;r3Q9ts(~y5ofiiVPpnqylci;5G;6K;AL!p+b@d8zQ@KMh|O-3&7 z!1O)iV^`CFKGk^Y=M!dMKIKnK>xyc%r0PyedJX}7glbw%`4Jg_k22wg(s zktKfodyi7Q)h*o{WwS>d^wTD7K6XtX?V33{c!B9Qczbth?&rmO610EA5n8TZLAa4t z-R-;a{PFF3?Ji6hHRQFok%SRM&Iv+NcfQ_zz&&ZCUe}s5ijoqqA&E&y#*4{)`XPAA z)J~p;EqY@dVRu2x^%L$lWW9Tn^#vHpq7UAC4Ywrf(s^I0+Ql;@rX7!o-N)EG(MTX{7C?%)@=7*|sCL)swio|K5{_#9F$4 z?UUGXDrRM*tae)A(MFfHaHPzf#0$nK&#+q0s}Y7~8`-J#WaU;cn@(v8HbGhA#X%#j zaB}}fn;ve?11;eL{qtO!vIMr^&tuzJk>BRtFJoI6-*L#VER28p^x9_l%UD^ltQ3Ot z7t;@ZpS#@rr)DTqBPcNUTxC@>lfaOUNam%QFS&vQPNS2KT=PJgI|ERzcZ0erdvs~% zasX0IHM=qZskGg~L~K$+kGxsu94od{VLyar%$OFqnZK%IuD6OWdBWaq;>E+9v1`zz zDY!-z_NEsvld6A6&D=|d3zVnXRMuFROZ6?@ZXyYBWR+sM?dfL6f7Zr)qclbl4EVEG z_fAioF52z*r4*X!JDKuMlMdritCd!s)Dca%#mC@V12_qgWVzH6ZiV!&q}XldA1_o5Y>0gN|l0@Hdmy{LKs~cEwG#UjKhSH;t`Jo90SO8iff`&y$e4 z$05_54%pS`)jO|CL-7PKAq*qQks}y(V(%^h=geLnwk^}F>Q20Pm?05BMcF0n4RUUy z$>zeFh;B@DA?%{lbI?j)-vRS!MW@q80U;)81a(TOA=qy^+YY{y2ko{rybVHAot|@- zwcA3*C%S)x+nQ(_JHMHT3zy5_<@P||?=)jEpQe5k>!-Z0%w<+1(!T)>s|2h7D z0sr^9_`iq$G36en+}p+fn11gW{@;iH&$ivq;}!X7?%loUxl`a}UwC)J&i2TCACO_E zhrc`9?)zXx$_wvqXV0C@$?mor&Y=3?*Mx+#5qZ{iCv)>Z@yvabklr@@ ztVn+fS)aS_#^lAe`((T#Q>Y^F(yI_EJ>OrE=)${$h1MNjh2-b?XhnPp+1(~D_C_lbOUP(N zvI~!o*yWVcTPjTIvsdbm(=4ac^2WaV`Za%_W@YyEtBZnDuxF*pmRI5W7s6N#KM1)$ z%?ZDky72`GvtX9cv~>S9Cvfy}%L}3%n_PN9lyH_CT@W^--SmRMzUxjeR^;p^xya(J zGGmYJpZqbon*+bpwT`oJ7E^2GX#06_VZITW6RXn`6R68(OxkLtIANB{o|rQTW8Z%O z<#Pt1?0dQFuKGe6^9~IC4B2*umc#=HNf|%B&%5XL6Nezb&OHbJr+n{pj|ol;_mI7+ zd?WJU59!FA3IZb@h56uy&j!R~I=FW$qO8liXM|lyz^PK2E!-=@4kIqr3`IjJCSSx| z5N(-u=L9Xvy9qa=@0DBP1jP2;2?>9di45&Mw$;jn?-er9Ef5D(d+mgU8^q+vYa5&( zXxxWQE>k`nxpoRpPPz`62!rjXGdq(ZGEp%GA2*;N=PsY7FYGx~ZmOKl_4dpzSZj2J zAHK?)bQDlQM?PhQG*e416(lpED=*@>`HT5=%^+xKJPatiC3*qnL`wd&4IO_UM*0C| zyJ*Dsh?hn6VC7Q}*gh;;-&=jEZLUyAKs}L;M8=K;UTka=Zm_6{vXgQ_8+jci2#v;t6~T3rd+yscoo5nG=6|M*F(rN<_(o zg8C6ogHM$F1d0s@QcG_-^>4PK0)Op@I)mTgziWwQ4P`9o;CFi%(UIf6g;?!n+J3KU z{zbd+I!9wyOm0c`WkZ6gNapP+51#WEmLx_L7#%gv0mP%3}jQpma4;T zZEH1^{-irzigVtlo?4^G3_{hxCJ{hQF71SRjF@5K+miip2SM2XI9>sLeO>ViIQ9Y6 zmqo-vjlO0z+<1QxC4o1gxL4}>gU%h>C%}(a3_BHD`(JKr8-W)OS)!4)fk;@VXP8aC zFdZJ1#zl=p3ZO0gcsYja6i=0jpq7RmFj1`tc(G354dHIiM>iswGs4Z1InSWd8CvX+ z=oQ>vh1U%5QPt-=eV{h_0s5{IzH1`KEw;Gx$W`{L3nqUcxH3~L>gp=|7Tf1@hrkO2+#UGkE4F`|JM8`y+rQ48 zRfF+wU53}-S*j=O;sS^ge3xsp;}W}u!@r3xgZh7(k7VvYDe;~TzjVKpU(zqr(bKVJ z%I<<6ZK>BIC0IjYhQk|$IZ;1NU@;V=HzDW;jLiGY3tZW5ps+zEZ<{vQR$6SJVj_im z*VG2qBjdJhg$M$Jx+L+syh*_L(@(Tr;66Ru+uncCY9;M9NhPx`d6_Blk1p+#WGnWI zk{y2q6Eg8mk}Y~o19Z0HO<&(Ugu|> zm?_l@!pIJ`_sGBYsOWi@41#X=Q8tj{?GwF)zUpFYkI=yFS9WO#NE5jIRSYS_Rw{pW zJWNoVUm>3gB6>%0Hb~1kvSyd=_W8LzYHw{jTmLoMy13o?_n4F&`=9pEIrne-^xIYW zVRY_KwywsvefrIcl=$=so*wpBWUzg$?W_1w&ko+31z9xl0z12;*OwZFxRyc=Ady5$ zveDg)^gJ7Z~GuQ#;8-fJ1l?l)Be8LOV6$$ryi3x!ySpxblCsK>F-F&^@ndK zq%6g@{r%d#UG=7u{N*wc0wguBkCzqi1HS8uw%m0f{B{dkNUU01+6l3*E^Qxv^aTB~ z3D0o{ljT` zL?~|O$yMs))zagjnDA0z zCxV~ToHYQ%&PnYjGra)ZvY~%VpZ~=+qjZUui}*rXH#n*NTSDlD==e@f8*>;K<(e;& zlT)!uPHI0(2(835cTzhov{E5m7v($@6FlNH_3%zGc;S3*RM4l59=J|w=LsB_mqFy@ zR9kpq3ge{q@k~83;4GE_u-og z<}>)cZ@_*gfc_!)F!2K~j3<%vOR%=u$vh5bq1Rl6_3@%{i~VS9G2cF{?J(!GanCIp z_uNfo*St6Ppbr_Q`fZC>=_HNBiJ6^NXj?_+rrmOqRXV=Ku zKI7$LpZO?7GIrT}waI*tsF+*qJ>Oxz{U#7Q>@uLzQj7Pa5~tGg<2VQQ2a?odkrgwT*GkXw82t%lRa>Bn;2okUrd9 z2ftn01r_{%>`(IIsc{J)GE&9usxfa*z(r$=Kp39QFa)?a4I_URB%mVVZJY%z_+NWB z&S(*gneD?Vdmh78<0`y%3Vs@Tj8aS~yR_Ng)_<4?}A3d!Sc53Je`| z5RANHTs1Zn4ikT6+5BE}O;BwZ)_@652k-H8;+#qNqi_?R-hSW}Ze#}m{iC2>!E&&& zu~vtvzAZ+wukqk_?n?ScLCsN;PUjCwUc_??Zu^JUdP1V8h)Bd9v^~YuTCMIlzX949#H-CQ=qn$B>%OdDyX^tes$=R#ryUW=;!L9*v#{~&nnkh0j5Wj! zP7Vf89>-dni-K_7RM{#^4=fl!*`q#WDXZ=JhiZRa%Ci1edU4@hY&v6j@?+vA1!BV! zao->Om}vGa^8`@h#{|`G^JHzh`E{?jo!%|`TAPBBP(1-rL=cRgXa+pB77&Mb2-W&A zJVu`+?vIHykZCszRl?MKrEvA06}07EUzOuf9WI8wKVil_D@5S-lcX%O*`p^3K@{)3 z&9i@5F!6EP(;2F-Dg<~%pCm&CIYzwL(g%qTG*_6mK-)>7Sw~N2^ygx<3zE2`=yvH@ z(Cq@2Zp24{6h9_1)DEv;Q9fFy-RG7sIaDAW4=nAAr~ zGnx4Org!nqU%IVH?c%T$x`?XX&y9V%4X! zSvs?=!%32)MGHNgS{GKAch$8nTDH~AQxO6v2=s(m0rN8GTEuCE#sB!f{-3-Bt)y9y zSY&XfRseT6V=Z9!>OP7R+`svD6*_;0Cn855;A4BihYWJ-B-hWvSzf%*PdwN%n*9l^ zCkW{7^Qp9U)z_&3Q+LM$fk!S+0FLU@=Lt7DuR10Mg!Xqh;@&y_8d5H##xLLlV@$mb z`_{c)KF%rJTxEjswU=7gu0W9`ucO97N;vNT_7koB0s&#*v6h8$M8X zhR2uo1)MK_5Cuih3Xp#&Yfnh^*%ek5s*^~oiAU97&7f+u=*@paV@D4u-9T#UWb5p1 z_Ez!?@g<1n>U_f@Sj!?@e$JZEzrxY1TSqV|I19{($I=Q6 zRHTm8y|c%-kTQP?F7wheChJl!?aH21zX6uqqHq&u4nJ^%X1};2m8AzzomD>%6Oi*B zVj~S7{8sBvi*!NNwsS0tYj#=oK_DjV=X@5KO+KqNsjPIGC;dn53u(cF*A-`H&4+0n z!WfHBuTpOtGdnlHBCl2DvD<62rIV<@XFac0?Q51Ky8eH0^IWcGng~&oBwJJ{z=NPs zdBH`?%o~i!n}a||de?MzyrwfIwr*-A@NNR1JFYc0`CC|>zh->3X5DL_a2u^P^?$B` z7;k({Yx#4nX){`*Zr2J_NmfSvWMH`AN>?qes|>gqxjy;piq*61y$17#qRC%9`u zz+pW&;-U?x#>{q*yyS#%pNP3$@ub3_4k< zDHkZ)$0RL)3hpg3Dy$StL?lEa2z_CK1dD&xjw+k2U;0rrURB!GbPM-gR39>{tVJ${ zRiBGdZ|~bVoVZ`O?~^L_FoW%&muB!j?|D&rc^S-@3R`oYSX0BX=6NvN;_fM1fOp)* zqR_5paR2%x%jEqla^uJjEBV=J>IS_l9vB;080H4?@=631$*HGRIZWr^2zDJ5@ZNt@ z;ahA7Hd1X052X;&?N;BgQjKdo-k zwXTvas~geKckp2rT)}hK_8#|#)|S=v98ej-77^jWUk6tZOnEX3Cr3f<&BDvT|L&tR zvc&MVSni+kYV&U7OOg}Vl?qdQF`s|w`uXJZZ2cDVo9NoKihp|Wu{Za{{VC8UZmXDq3eTwNfr&hGFsFPSl$x%2m6s$F*Vf{#MN!e=TpFZ z2rw3tsF33qPjd7LNX}DhjRt z1E@M#ho~E-v2m@BE~{ZxC0UN#WcUL7>YSgi8JPwLV6e_tPS-_`%WS{|Gfksll1P<0(=pPr zRz?Grx(zfW$>xPQI4*x!1^8U1A1o=|k}wltuzZNyWZEav)VO37ll*22BxhS8h|b0< z^pDWpBOZlS2@rta8xQQLEWNht?v!OFO&G-Zv5ZZjjSE1e6aSjaBcbHH0a%(5k>14(N1=8|Lki8PEd7R|`ky=0yBX3bG?pw?i;#bRE;IV#Qe}c|W57yN zj%tCAHQ3pLq_ZsBDp_n`nxKSoSs|Q2w;MiEaU~1)=W9C|g=6VrtLaI8zSgBRqAPiE z9u8}^7?1m!X;77(t*D{}0}<~M2PXGEGck0n1dR+WpmA8#cHKUbH&%g{5{!~r!_(Xi zM-kGLyaBUv9kYL;5KGjGbvmYiQ-7y65HUYeFSL;8l^sSfOscj6c^I{POIEZ*e#_Ne zhfmStvh|^rMwVmEqxDoAMNHuu+0w=X>D7I2L)jM>1bo-rb;V_@B0u}=UY^}|f`xlM3Qs?AV>5RZK0%DnL`_VjxI)$HWm2;T{uS z%XU0B9}0h;Gp7bPB)Gl`j5F0ziu;c|Njzg$1HV6}OQ72eXnN$2c~^r)r(-W!@9u)p z@|(2)PIb^O&6`l%>DV{QFokV^SV>)DdIM>obPz)k2C0Mfy%Fo1ZCKygU5h(iv$$Dx zarH3RKirpi6#kbY9IUIu*gc?6VhAp*Ogjy|3cr6|1%xK9b`|8zK*Hj?AV0n%?>Pb+ z!U5rNz>Z^^2T!&m%JGxX=Rt0B=B$zYs`RSFei%uDPKILcBCK6>V+N zrrLsoiXMl5!$5|}iU??URbG+1Z}aajZFxb)lFvWc-rZY}`T1(0HE4Nu>E0@xm-FG0 zA~WOMTw*{0=jkOk^K(uwmB0VnOM0cm$vgzY$tC566waGV^x1a~FR3XWbe`}M=NRJo z3S$xH^(DILt9mb4f%E)QIqe^@-_EB?%`^YiB?bO}!ujq}IKw}?q)2L=U-*y1Ac~wH z_`~aT={(>ugTKG5c+J1Nq(vG>&i6U}IAMRa01Dsda@I$Bsh)oZT+ldZf4m0A-Ts0mdIj5VlVeG#L^B5J4NLb@FbD@n2DvQliZGoN|gtKnTTJG zMk&q5ZUGFq87i&ya*pct){obY%#xQ=_zp_F)RDpB5_>d;7w8AiE_E%K3E|N~L)%yB zDm)f}DVyux->!`BM?Rg7SqmfhcigD94W=ej{*0e1yRj@8OmQUhwEu zG`Y?lf91-oO6=3u!S&M*ne9;@K4;ArSNZ^<*6}1;H%g|`v3){epw*EkwWdykVhzA1 zzTXJ1U{A~jb0m2*4)PqMVbo_6kG!$1Pi0R*>bvk7yucKkELLKvD$H;~)l^mSj`baX zdP!L(@P7%DVxM2Z2zR8SIsXhu&si8Og^|WB5B}h-+}gK;>YC!0kbM~P)>h}4j@e99 z42u3|ud=y% z1t>0G{OGF}41wjV;7YwZo~ZIdD?bfXAIX)`2VX30Y-5!lMBSL#6v&X(aP(Jbr~iofPev3PLWwH@fkWmp^0*L1_K#cz6YqU^!MK5%B^35(|dQ+8*WGn>big zrwMS)sBdXPBYWgi2w=P%vjOls02uhVYG4{YTrzr?@o<6mQW3;h9UwUV%4$@|8FsBN z$yk!5@3^>{tq1dy_fE6t|9ji!xJbifr@t|{oQYi?3var}DLI!!~ zWPiT|P(jF^_Pg8$92_mjgbh+V({o}a#~O}c(6X8cr)wQ~fU zs8>+PqYlRn^BOJbf}M{zE1OMgNl00;p9|77zeRm=$$h#LIHQ4ZskdK2B8G&w;uxLQ zrjO^WPgMn+zPuo<=<^tVNYm}RJWZT}MrodZbP_mJ6k)<}w!(e<(r(+}NJwbAT~Kj4 zg#<2s;fls{$pT^|*ZC~)52G{*bn-al%wN&z*cS8*h*y-Qh27%=8NQ6|=gg{+IF+{S z0Ftg?6Iw0B#{N;ch%{>R|HuFR|5__Kui|kZgdMsd{xc0f*4vnW6kuaQ^mJlBU+Q`2 zt?wq@4NypF`OvbuHslHI+66a+DIDXT`lD&m_@ynDKqw+=iaP@g@{rXi%Reqrtlm`T z1v9$%`4X?r)f)V*CCqtp3vacJ)!s0;m!&n`wa`kbvDz!SAZ2+gh274`%4b_?5`9Ed zAQ6qr*R)Okqh`B*CoL6|svwoYm2XD=m}!TnDjtp444ncXv)KzwfZJh7-E;l0;v)WeG(_JeYzkb zruMO`$22`|5-S|92)(&uTdlJXE0jstvTHB+PFU1T!T{@k@EZ%w!VoDOKEzNPe&~ca ze5xY}G_)mc$1Yh@hV_`Kn>i-KV`p>>_PJa#=4PYJbelWubPgMoK!>J!QDE7zd#Hq; zhfR*g$DPh`#n$Io*!n;e@54$+m$j<}^f*U=#d`#^n(uOzK$G78)*ItCl?cnWcHiX5m=^BTHdxwy&8$7zebd z@q7+}%gsvs>PQ-sj09vD?8EU~?AN~(VeQ481*_D5V&puU zHOX5=0NQ9xc!eFyGm+DK)4xQ;Gz9+&_eqK#6_THNK1WHeiXYkQ^&%mtRqdpjp9;)y zl#ZEnk~zvnDktMW3sT8P4`xE58XdOHF8Y zB`@hLGp0D_3Vmzv4A9%KmE;$+#TU!Xv z_UV*nN+IUUetb=k>N~e9ZCAy7y;Ockkw{ck5J!$Y24$HeD<#z$cx~W%x8{s_OKL$S zOQxtxQa=EfBAXe#bnV5^dSrDMUF)$0M>hEASSxvxUkDai<-3l<&lsV~JM3uwp^^b-)mhfw#@j^|b!bUsu(kuN$O zXshkIvpviTb_&|klj_mzZ1qVur-yC^34OdtVV{t}>0=1Cc>1VTd!WvH^=@>E48x7< z*~c((ViS5Jr@?1&GdLCNc>`I-)kcDUN9VMgOPS(AcM6O24O6Gnj<$0|gQLkl4aN>X zD2ByLug9#&)Ov-fh*130!;IW1GyYsLU9kK)I-6tu{o4M*mN}2 zLL0V#CkZ#WS(`Ftm(#^nQI6N?_4 zoTdrf_4y^qNhq>g&U~K&+u&U7%X>erqhtwG0HQSq%30;crI~&p$PQ`da`C3iw>8(L%MK131gl+`P66Uk9t9uq|;eQ zqF{TF8wI%&PhKgrNuIx%&eHkyw9{!UR+8Z*3xR477D2jiCAW487;w09GkW62bNf)T z8EX5x9^Mc$LQljFj^|)i$R4!e&6ArXT_!)U8I`5KPxv${*{H>WF#DZ<#vvl~3}d~8 z#@#F)Evppk#`St2c3F-VW11o0ZV&>CPG>naks1W&hQPhkxWk|o26K!! zip5Vt=*BMvzf8o~PTd>bl4x++>73U3IaU3fO6_*Fn_;Y7ft*;{dPw3L>&dS;Wl%92 zKnb91gMI@Vs|Tps*L|LUF@FgJxkT?j-Dnv@QcU4=M)3)IJmm93Q3)$jLJy4{&@Q_K zl>{}YC6XTq^|0W%YaohYFa(as>vj3zy1PogS|ts4;I!(}oMX;6a@MVl!BkfZQGe$LRK$@5dtNC{1>9B=r#qX_ zEcs()fV@MQS+UIHQ*aR%_n%@eS;A}h!ImiJNpG$_l$a8Lgf z3jqVy0S2qZWY&7+(O0oLz%#hkg;(~gSe(SKyuqv3?GK=Tav(xeV;*h*MU^)z>NSKy zj3#d5Ero`n{5mMKwwUK=R-`i;DIv+UnzPARazlY(BEyt`=_8Z%0=XM;DFwbATcEeq zJ3(nNxFzyW+S(^vAzx{Kx}9}8?bsaI5;x=$gc8cOg@bST*qTEM3o-O10mmB!3ApYipxH{eOjIUe zw}$u{U(nkif#6MO69tkz%*8R6&uph;y(D1CW-yC4f4L&gbi|q&yQ&H>?DSIIRFdb!TtG`m6wS1d@+ydBSc0Rc8x|7*8M!$f(l=RT; z(-T!B%o~rv9Yb?lIMrO0Q22goGz9nL+Z8=@cL}vm*^yb6;@h?+E78i7{1130&;bq( zADs-k-9v!JRBQflOs8!oOvEI1Z2e=`vroZ*b?JHtAqu(($e4HB>A;^9S$w#)qgHch zFNXGi0;DlFZrtgB5$-T{Mq}dObIP8XiIdQI4lcrjy0kB;3PYuLvWlcGG`|KaHClz#guEcpg_MvjfnyY7oLgN``C~Fd ziv62HfnKkt$pwN@x0`ZLJs2S~49RU@Kz+rpIPs%MGC1~h6p!U%@CSbSLly-nh(G#h^0w=?Py8@qOLIkc&MAcl6N<4p7!0J(@m-w&eYWENb2PWUotO*BzB14^WE zH^@6ggz6ENKI*rP#>_~)UN2XPawV^p8;1Mf^N@=U2F2{-tuL(NnRFK|DNv=ONoVk0 z-RUHJqM0&C0C%~vv{43EOo5vhB2IjNG)`DngodDw-wKnwm?R$5MSu+>5b&g6VyLIE z$zdfta+{Q;jTuu5QAG6o4LP}u*uv#f9Q17F-d;}ffJr|0sUObp=jA@c`oW*eeF_%i z&WC-P2bp7STbih7u`fd?=zibz?2nD>|CI$?pG5vvJkJZZ^z5sz$l!GfzH8)CKNBr8`$okHrT(HEjfmTG&aa+m!>E+4~kJlakxSjk+SWC{K{`2<}`d z=A?y}5*@=wJ44HIR8D4Q?G6JIhK82K0++x9JhzaNyusfYP_71)_KM&oAI5zmB($LM-X#v-NjBpsbxfAkw5{lHAsR}~2&kqe;G^(eb7f|RiFkp72 zM6F}Uz_7KIG=L$|zz~8XW@4^NS=x~DRqg|=LU>kaS7(6EsDKFWN;Qy@xhbr()FMC^3HG1pF9t7pL_UftHs7F7^=)--(6b**iP zq5^o-O&sm|{?w3jVbCod6L8a0Uw|ur+- zUBA4t5pq6Au~g%?{-d3tE#L*|W!yyWJD)cDb2|QNu;_6?>%Hr#W$emgxmRO1Gk|{($$%nqix^(@7t1^@nM< z>%bOm5ZfWDSGezgP3NffYEn#Vr)RJAI`D#U5%?{u3l^I@4L6&(s$nO zYsdOqrHh91UIh*Fm2R!ww(n3!gGm#LXkc@}L#Jb9JUOf^7X|QpGRIBI`B1If{R%uZ ziEqG+mS4XB>LBx&;XswWL51yjGYwi+HYuhJya%u?AG3ge!w`%wCW&Ok;O#H3UuclM z_^Jz`8ZBYc!H!widiwhH+vg`|PhLDfdi&({_ix|){_3ekixr-3V$L7}K>?GsZm1Rv z^_SN#^x@^+G>C&1e7LdOUtYg>dwTf8(_`$IW7!)3r51%tKsM-2(;OK-7?h>C(VC!p z7DjA9P+g9H%8F8N51>+uzyMa;uvwBO#H= z%?H;Mt}bf@2{i7vwp%jPb}pAI+60JKPu~1s(MT$PVEh0o|ML3zVH#&?0x8l9v3s9+ z?#P;kd6CXOVlMLn(wGa59y6Ll>_Ce%^A@!@hi{DsXe`zW_ii!CC@yiJdFs?R5gmmZZe9-WOW@BEwNH| zRS1jo0l;epD$iy;Szo>v3jbBEH&>9`2cBJjXsAHwFVzCo$Fmvp3TA7qA=T{tF8FsBjB;R1h1N1Grmc(K1(QP;np0NKqJ6Td6_3XiW|2->@jNPj z!YqOi!$oe9)#jxZyC;ad49JAR6+X~_s_oH{{-DfK5P%LX5VpE24CQ z#^1@a;A&D#BDmW{t1!7r@AIj+)$wnCg<;{!tY#ctACt<*a()<-4Yd)nsB1Y^mle{e z2dsvA2N6H`kozW~mQuNBC7rek6MvXRg1YTlj=rS)tz1)8h%vM=ojjg~Gr&{MC-&F@ zX{*Y*Q1TZ~1^EOXQ|_r}vtAx#O`8gB@a2|5jv9}^mLK|f#_7;<=b7Sj$!`NCfR(SF^D%>JB90g-tH7M~T;xg8hN{x%M1mK#1+|&%D zU>A_C=<$Bts*UqEldBd+E%)<(){}jM)gia}JPT&F<)zt2vEPBm3Zp=#$;HA~ENZ%5 zG%1RSHw|WrIq{mlyqX2UrvPuQAOzrL>8Btus=+?P)X0(-%IaOpc>B;Rs4`YCaNE2( zOs<-^Wn;t**#?aFVER(@=6S$E_sd{qEPVTSp55M|VwiH^kPvU>i#Qd3RdTIu>8gqk z#1cdMd(TFn6iWvYsq`@_#%_XT#5vwq=GLkPg#)Mdk)RY#anFw2g7;z;uh?qZD(Nf# zTqAG;BjCP}k5y1%^MZ^L5Z;I5>atW@VWFy+*=)P7{IEn33T@%HNbD|81CbAcrDhQ7 znu;N773B~kl$DW=%IyY!Z0EUaU%fni{^t4F)3?u0o;^Q#{^obAxkB`|3!YI)z*YL; zpXDqxm$ntkQrn7xD*WKt7QVKC5QHL@OF}?$naxr!$aC9>3WdvJPJ#7Ju`TwAMb~vq z==)kVvGI9DK|u&jhLO!TaKe&6yqcwPh+rIO`kRz$N+~vV^omD+7UZL?z5mvGI+iZc7IEL+TJ)zeu;8#=+w?ow>;3z zE5QI2yBX$oMx1mADrzSYbBn=Pkb~s(W8%p>B=85}oxp}q?g?RO<)hUC&mg#Hp& z8c^$(5E|B20`d8OQ*5Uw#apca_?(}51}r0O!dcFy=^I9PrG&~V+}vW^c|sUng~L>U zG&IpA)z%}$@Ch<_!dVUuG*OQY5)8&GO9yBov7s!BK{oVY74u8L?YzQ&NbINyq9L0bjoeKOE(Qiz zA{(_}+1Xk}%v2C)7t*_A-Ki(~k<=cjE1VW{pf2WN!EHdaNVlaS{dyW+;XbJood4kuk5Pn zmpH330BSsccBcbosm>LX5c21*&SY9)r*mOFW?NY_zYde!Lc1F<_)r;Yu!yYptJAw` zUCxWvOo(tN2^Uyqvmg%&+FDLSZwiTZvRQgLxr{!xCapNki^)yk@JczbHX$6NA6s4; z2mI>J$9K`1Bz`MTN68GVYWh-8pj%Ifo74%iN}nmnUdS38*}Ywcx5kL7nGblg3M?N$e3xkoM~zxh!L}Z z3O8eaHya2u`f#)ykNV@mTi-q{- zM3FBQwou&zHG_ydOk#L~C`efaS35wRbY8&C4vs?ugzvv(`+~k2J$$eAP~Tf3m(?VS zWP4!K)MA9P#gEx^);muQXThX^n>2hcU$m}&!dYI>*5yZR1@5$A;k4$EvrAU8$ptE4 zf(b_2QZFug`dFU{bKEu>uSv7NfJdX=tU;3)3=Vy?b*(_8_wkDilnDJKjdyzl12_Za zQ`it-oZM^u!m^OzZJp0WZ}%+MeAZs>GeA{PA*P0eZmOIjt7EHWiE9R-fwlAaf zayy=c$+kCz{47y=-HUyTJ^;5L`a_JKW|iD67@YmnzPtUWjXe#^aOUO1l4a;DKg3x^ z9&Cs7)xJBrg~VizHOY%t{Fp)S_gN5sr;A|ga(;dNaSH+@ZiR^))E3*8){2U+YeV?U zt(;(H2=+1Nn%Kf(Kv4E8@B#k$)+F(_AR4_^W>LsLiX~6E3x?Tv;J;z@{&xF;&wI#K z7pnFb=tULQI|_1dk_CSi%v=`8o5g!8Yv!N49bSQZv7nP>?Q2*j?Nf%E;!Bu+CVsY6 zq?=oVz?p9*v+JNRW&*_pE0$F=9X)w}ybQWw`$tv+5V9hH*0Q15WO<5#qF$xBIq}(K z$wEkFsa`)wAQbw{>{^yW=bq3}@}9M4*$ivhXoD4D1H!?GeJ-9VAJ+!lFj{#Rdk(Qn zf0>2+i;3@V&1aE(&3c;8qBpXCq~6AhFuD1>CdP^p_|6P|(5s*`O+*ZoV{ zn@1VkgGa;_fW;U=^-Y2)(q_T*V;1m*FUtop^|4e#2P~b+(%1OKOXh$%1gGA{H^2$~ z5U-pcVhU~=5K{nlA&uRjH&4Lx_DGJ1E?rEFEP+#&&-UrUeZ6lJlqxNMFWFz8{h995 zDf{I?L~3>p4(i^1nk!|f2NOF6HsRGi;WA?;7PbR=3^%wK*gMPWvWKpH+$++dHD~8D zJm^QX*oLjJut@jCe)qT+`qY%Cow^Gys<~lSfrsv*C$7<_^oD58PxhgEf5$i#%q7x% zBYCemH5OC{%IJFNU)J*cNeMoPg%_i5-k@I~|{G5IRXjQxHwIhc9x3!_V00>f-9Pb?@cl^1@@?OY?!WC;H^-FGdf>^vpf-Mi0gV`%K!@pD9h#v2Z`d;0={; z-KCIPeq!mt9?)Zm3$ke{ZZT{ZP3?nJr?H$RHEBQSbdsh;=ydD{Zad*?N9cn)7AuC( zOk!D@7Y+!-v6ef3rg?Sgksi9?S%prDs0Nk;wwiju&|+z;WWhfzmNGQd39b2~n27X)*E&e`k%(7(!|0(W$=a3dc78$4r z?1`4AfSZZqFj;_g<2{YK!Xio;rEAxg{m--~)b8`5o^ri^keSa$<@x#T$|j@p@t+U6 z{~GqjkOS@f{6WVes@4`O{IF}>cqV&hzJ>^b#@GqCZ;>9ONstd1Tu>kEizGI=CL_on zrBX(K;WIc*@C;{}ii|-C7_zFMW8r3_cua5H<#1We8v%87I`t_UHrm7JoRR08?Y(Q#LOkjl6f`fZfc{qVR}5(2@U>aZG=5KPd~^1`_^hP>-HV{rtP}#fb6XE8s?MZ zoA!pjBP&8pN!KSbJeJW8J`v}WZ0{2=8t=MCT4se4@F}3}WsgO0=XTdMSPFgE$9pKGh-R2kcXU%65<@Yqv@C)c8_`68r6JIjQ|Fez8d)EE{C)*@lj~Ww{5GK zgY{5gEZ75c<9ITq~Q-COPy*&%gq6 zGA~lNsae$Ge=uJHJhHUBTCxFUT74_;54kgc`eT1ggQN&%&TWMLi_sI_sB^YYD@9L1 ze&PJ;Rm*=Uimajd*X7dr)mzcvLs-1m`!)P-=rda@zr z6VBS}2^4(AZOgT2bjIiVwa-6<#eKj2srhRR&Z2)|SV8pg)EKFsT8vMYk(grIO(qO< zwzn;2FoC4;RD@4AB$2;wo(1CXnkoE$rY*#WurQka1K%NQG8%1jiJ^18eZE~TmrHx- zp7#i`M_aw~<+w}i(VwmJ;(T_VjQ>TpuS5Ft{+Nz5>SNi%NA92H`FUQRpJ%tPR`}<1 z#hN%@cE?@vU-oDVFtck9o%8b^e;{3AQR{kc5&Gl)cua2@N92qwjmoI>%@CGMDk90fQV|K{fe=fH}#^j=fCis*kv;aSuRILJ`?cl&p2&wvc=WcOeC*6=m z00#H{Y7Nibgx+bl=%UmA#yFy^&PqiT0QdeOME9=Og;%Sw<7uIy_f^a`KDcO)<>o8P z?a|h`|91Rfo53I{RG3~XGp2m!}M{TR~ z6`?(6@aXaIe7qWWA8a#LiQS6Yg~+)f;>?T(fva7zc-kv&zg7T(gxV#4$0C+8w!r(B zW%K(|%AcVI%dExkbJ62;K!UF3Ru+Qur1CSv$7(M$?RE!ptcey0FxsnT5k$UN55dE) zpj*;ASd=ig%bSUvkg`06xTmo7AlxHdMfbiXt8}lI1;LGbb)W?xI8Y}kXAiEg;%D`Y zM&SW%e1UPHdES(XQRfhU0`ujyB3^fXAozvm{?dE&7uEao*Y)Qe=nTZ{U^4?TGLPSp z#*lHAi&`^{K5792ZyVAS|H1tSALR~Up_&_oMI=)RrV%_l7}7sV8flWU>A@EVfF6odhXC97|DoCIGj||Sg%+K!o|}1R2odTn zbW-N&W%u+p*nyK2FTy0qA&ejM+X-gRqI7A4&xITlq1gf9&{ts14ut$n6LRhh1II=$ zJFC@dwJg#Vk)Y!P^Ooudc@ZWEh!^ZKXBNJmEDZ`uE#!!QeujL5;^9e_Lh`$ZEac=_T8s_*kjdI99fC&mMdFh4Q`%O@=prg)0o<3&zrEyApyyKMkw|Pov zheYMF79SWV1W%rr8wbE*4SC}eSx?KWkl%#*BHZKD-JzK#Z_2c&6f$h;2ttkV4^Bko zV3Y~5-(&EGnz~vE#D+Y1q!nl(t)Cv?x1$r54TVpC;wXs17$toXZWA~JL@aJ%)M|#qfqRId#lEyb7kpl1O zfTR%B>H8S%>4+W(^%P6VpjP>G6|10EO90+N%Vmu9WE1cK*MC|TJ%KytDFm!M7}|#} zNK#IJ;9ugLqF?p?5DVE;f3MTA*=wGLr3OhR74AF7lVaM7Cm-xS^+)k|>rkBQCvv$2 zv=6)1mIZo&CwBt_agyFwcVxX36;zO2N@!oF6kWS~=#rOpn9JitAvIGMF(R2%c zn{Gx5UgwC=6R{ERs{5`!P`xL8L{Af6jHm6o5Dmqc@QP;-GQVJ(;6!F{R!X*3_rW%` zEaIq_QgnX9iZ1?fp&dL=4LZ#O--CO;P-JRGpm9^yEhE2{>GJIC=$hAWzGcz=4i1I~ z2N-(k+izG2P=Wr=K9=q8^0NNku;1T*bF>T?r;QvWKMqwIWQ!e>HO?~Sr`Tgyf|N63 zyE=QhnH5WQR_yJ8GUqT4#J3fO%zA8f1uuq-m@<(#Fk;!hLsfE5V^ly$)#F+)+0$JI(2v@}L+4b5S;fKbN`NnVtUJFetO(3*{Z9wj}- zqUo)RvQaedNxsc>-HU%*bUG&oz_`QRjcWD82)0M9^Fj!z!XD#c2!}Tx+{O9%KQ6Qz z<=#4_y~x(C6z<@t%7ZuU{3V@!KEITzb4?~SAP`q18g{$s;~ZmprNn_m(~%oW9ljxv z!feBWrPCs}7+BN>>7mRM2Q0gPpoaldGrQ7I9-CA0BffR`cs__wvzwp?XT z1roy@DYzZ=RXS^380}wB?wSpOc`jNP@DEh_x9%CE#`>gzupAz+xYvAtYG{0yyAW8* zy0C`U_p`|&c;b0rjN@2u!pIMBLbw_UQM!Fepge9KniPR=L5x_F-lX(ck(3zn;jLu4 z>H0mucO%Kr2u5hWhv6c-efp3PIyHNK24L|yTa~#UqlG?ls|+9NL%vWSzL+G}4{r0- z1sP;r@io6qv(M=*zddAs;`Y$8D2sjLjI0Z5jDaW>35Dx)Y?GDjA22iyR62mhaU2K5 zq&5ybQyYc_t<{Eg@sEr93`-cJ$yVXiq-m#PtD>|Eg?iUMp;MdCOiLM4ISs0}awXoo zYThq+q?P4Y)>mS`d}V#b_sduJ*)MoS->_k@17pLSTNjvY?Bc6`=I!#jVY@_EY`ee) z`O5lgXfX+aV`;(xs)=y4G${-x8^kxlwQ-GvXg}FROY2|i%=wo%bKc%(4~aH5Y$H*{*h&B58|iKHM*5g4 z-rMT@^8-W^l<`vf-&C>$b*+IWU%8%}c5|WYLArWyt&OyQ^bes5u@b4D2?l$&hPPpo zhiJM>(|N&vt{hq1SH=C60}b>)=bN3CgTd4EuD?{<{F}epYJXj;D~}5>pKAh4uR%KZ zk~0W$zX=Sm2ok#5neDl`KwzGWHy691Q8*{|0^b}SY>M326a$f`*Vh$~J0Nhlo=aX> ze<#ai>`gj<1-3HlF<^j)+XZA^4%6f*p53SO*({wWaKC<@T%|4t*0RS}QUEOz1uN=x{q92Z zd8^9JlV*ycdA%Lzv0Y`d$x1e{kmbgbDRsh5XZw7AHat(t^O?M)FmAal!%E^Pc^hMi z8ds2I+_u|6&kr&Xdh#ES9+FPyH#o~M>yj9E9)RruBq!H%y)+!wl4WE+!o>cD%0Bu8 z%&{A9(BYMRWb(I}LjP$nf#o>GL11bQ`AUl08QZ%~6rI*Z(Qy7qzHMdj#~JX*z=3sb zw#_YnkQ;6>VF)nBB9GUWVv(0-?AUWiA!3#xN7wDksHnMX{ZopmB16l>C;X)N01<7Q zW-CMvc^6nz&a|wVzc4TRXUoz!RTC-_0Q`E!@o9>Y`f=NudyjOdY7^6`J00Rkx>F~o ziJl8R51tespb<={+|3Dvz>FRqus)}UpEP=Z=#QI;Bd(_$oOP>le`AS3IwzZma z^!-jl-)RH-f=>k3l!7Qo=ME54HH|&j*R4TWZyn0|^3sbaD;R)AO!ZAnHEy{BUtU^& zr(J72VjvhOs*duu0sqSsZH*QN>au>{*`~sv1$^ay5@4t1KaH6oiS4 zhide37Yq=P)3fqvIxD<6Bnmc}3`f;Ra|K_EuxGAx##j28w$hn&QDNdiT_hBNIEbqB z*oM{TpXKP!i!rgfQP;BJXZe6wgfjtuWI&=We2pJgWpN?U^zA=KwZd&)^gx!C<@TQ! z<%KFB7*PneO~?)&$k|a?KJ5JkPSBKz#b%r1%av6{b!E zWJJS0n9Qx9s3@--g!)m0dBOTSosL<`V(RxFb~@9oEgC|AD!yh@f(Ml-;Ei^F`$Psq zj=Ett(mckrWxUYNp|~RIMXqTeWuWko6rVP*dWo69l$)9r1IyN)o;6&2d-+<5f3>Es}*YmU0V=$03M=ZtM6K#;y5 zys{ClCLdU%7}{wfElvo}Pn4^FGfXmd-8(KsoQ}linU+_Ccd`{Y)9|cO1SMYSOahKa z=iCGhW|*8eN|)|6M8zZ6O=DEeFebtOTqeQ4n@Pw`CLzB+ldyiUj7jh}GYR=$V-oTT zli(Xng1?qY$g503et#xm?Mx%=-)Ns$f+1Gkl+`OF-xQGe9V}BfkDtdj znD+gDpKCCghD|v9-`vAJxQ7R8j2+W8()`PL6a~c^QDj&A*t(BQGAx34lR%PnRTD>E zbh%7&t63)bgCR!6d7#I&+3ZUQR5oC-k2Dp(^)G8^ZCMVVULvNKi0LJNaeB#`>ESr; zm?I1X@?LF-33tJP)v`H%C<*#NUg(|Ry9D2{u*!aTg@{}zIBKa|-XCnj08C#W_+<%+ zummFX#QKvjxBk2%14KQh3?mC8rn2)cwb-At<9qJ?>E_o zfa@x^3Sm$=WVC}*)1a3!C=K6(OxH<)K^mw`UE(ch(ybnZ9AMdh!+toxkef(fF|#yl z{KX_Me#v?LYM}_gAY0Y)H>yAi1}CZcp4&OwWMzpzw!pk81Lu)UixQ(ffJYV#jM=~^ zRRefv17Wka&aSNU9Ae?Gt@rRAZSB?DLazRlS5^(>JqogJ2YsOCT9(5^$n{oi-DWvj)O{@lI~+8p$Y2Mjaqig|XN$ zKe4AV=bp3_>S0a~J;n+zg5+QPr_(20TZv@D#G7HDmlt7P98YF9Xbjcv4|gEbb~W-t zKu9+gtf2^o(d`dVE{@AkmiUvw9&`qcqtCNhF!4dlzDM#!awd&5+d-Po9CTC(U%NV?2y z!{qP?>Bf$|7^cqJVRvKA0pQn7XH7PlO|EB?Y-$P+3R&|%9Y9c& z5mypX>1m);j#T=@Xg@VSD;b8MB8H?^m_%U`9Ab!nLH=;el1DXq7*i|uKm3?~fD9ZW zpdglE!3PM6o1^O+SjVO4hQ}OY@y}5>3%nv+1lCxK&hq;~)kRlxs_8-WTwAKwa=DT} z9oRWpkzoTcoeH)P<^rD3z(`a2+ZwXQM*er~1y~Ckh!?AH4_kpteuex)yFy+zTp@p} zYNI)S`0vz3cmG=(eGhGP_dl+W{$ss9TEj=pyT7(+T2kb}!f5NL{^P-Vrv7!11qGU7 zmE}W3_(u;vkBy(n7S?n0suu(6AL3mkPG>>m=^QMpRxUR3Z2Dmdt)D`m_FVef2@?Jz zhAkM>AgX;58aI8Bb#S@o-YSc7=k$Og?RCz7Vv44NPY$TEU~^s{P(g_uZ;wtVGygkL zook7m7Y7u=kUIVzMH=i}@gK_1c)CXwHFmN+icwjv*dtrgy+!f0 zQ$SWgb!KRL3izD_xX~f_IXKCRRNeJdI>KIXoXRy8(Q%lb3)2}#hirdnCV1%JfDnBO-u};%Zlx- zyjX=-;Al*b+#6%&mh|g9d2}e{8OPvc8r$HEimi(&9od8)YkJhb**Q?T_5J&-|HY;e zEX4?zYw2>Yrx4UOzdYrnN2SfwmL7xe9%|&QRKY3b+~^JRnY`d`?tk{9re=6{N75i-`1=v^%g7azDA zD5=!Cxob?RxBQ&WfYq!DBDl;<%1*>MT^s+QZ{u8F5ehW`Y^b-Xm1$4R$K*L>aW(xmdn<&lyn zc^+OTHaPKDYCx>0Rr68rFPoI1+A`5p`k6MMLZ=gTI#C0>Y3^EoBOaRrKN?yexc{X! zZdtTZbd`2GTzVx;UVz95q~rsIlOSBHfK2&~QWBRx!GZ2y-K}wX?sw07#OB^~t=m3* zxFXvWL}I>?sY{+2SvS}A@s%PIBM2QyAs`fm$Ql&*b_&Ew3s5y(Dhth9%zFu9ncNvc zL`Dixl_&(wpvXvn(K&w1?>^bmy)Dw4APGMO&TBxlx2QphrM8d*<6PSBo4F#CHOVsh+1I!(fkS{I-)5?Y$myq<7nYH zhOqb31n)F4p&oH!a6#Xzl>3mrFgEx{)ewBK_sx(K5j$Sej7|xR9N&ChmhkuB>$1E~ zu*($8_N(n@M*CB(eWNDkefD*3PgP8j(48c<22hxP2-4Ga5L)vZ^4kLJ*t|8hZ}ZX% zYdbwTqLRTv*n|CVJ01Aj?;n(9`{Z7WpZ~SR|8=>HxjAya>6k90b3nt<5pv<)c! z6#zni++K)}-{mgvnEK*S#B2aGtnM0fci=V)uj~^jhNG9qsr;HqtIT`0j5dTw`jkdB zGUKWZcwRS*Lu!?!{GCfb8FQ}*)6hG1^tTlnkPfE~+qyofPIy}Rn5A@>&XLkx7Wn68 zf`5*okP)`^Xqh2S#-w?lufaOLYMVE-hLg*GrD?Ccvw0VzQNjMV%%-5_~_x# zhU5YYP>{8gI60EUP6yoj9_nz+2~7cb`I_C9bKtSfU)e+-%mLGNXU$ZJ6mT46IYk02 zN%Ye_>Qj%{Q{qhJYF&c;F)kDQ0SmCbuRERNxjiLAAF5XA_LR`Pnm|$O_u%UxM4DDpyO7&qWlOZew z7Cd~&R{7x`|GUrs9A>wg}cpM@g zhls}^;&F(091lUvx(u~4 zJ@yq2oVRn(Z)znpT1mB5PN0KfUhgWeboKQ?eZuTxvpDAsY4LqFu2I$D;Ck9- zS?4;jQ=rBvY_nAF-z{0f$w<(K}#^ionyavTw9z+b4?eSp%&IO9&G{Og7DRI6K98h#p#mh&rwKY&2N-!!gsR$U9 zDFFzY5`*)DrTe8~>2u~~*Xgu>!`|D=Ni=~_94tS!O2HcT-d)o4|jb z6e(8k3mHfyF5u&xPNxkrkzk~h!7#?nZK4fV!mq7w^=ssF;~J@DrP6L{e(KFJBi8Lf z-{BX)#z@B6Meuk@K|UNNb2L$QRFAKP?(5}pwPM<~=5`^}vDV^~<%%AEJk;vFN>Q^s ziOdfszVBEl{nI{vv!vj*5-ox^sdET&KMkEWai)Te6k;q^B^Al$XFYAed0E+>}ZMoG7?zPCWrXi+GAsFiLQw3O#^ZD(z3u_dT~i6vDROe%@X(@?8I zfP%y)D;llojgRT>KB1X)J?eE)AjyPJ;kQQSOJN&b! zy^G-8$O8OV5A0QcYCPq7MdKl5%?C(#70O9-*54`bi^k zhS+a^i2!4Kx^S*_oOcVnp-%Mys8Yad1u|PTdi_)oSkEjE=}8o z$F^FUwD&B_4%o*(@CeQv(|YmTRhQL7@k9u0^`6A zmO8>*4RGLt)La1pF0TEczHp)><34OeY}#HP3bAB>1eEpIfT#;?nGu{(LCqvCrI(I$ zFJ|RX(FK+cN)c>C?_E<}CD<5rdj6fV3vgsEX>l5kXDb#-Nc+ve4U=ox%pnr6f`%ec z4^7X72esa-Jo5NG=B^>qccRbYs8)1{@t)nbu}{RuuCBFiKNv!GRi9Dbf3KA*4Jn+l z)h#!GuFANxtc9TH&*fddkiPlP(d*3&;bT@$2M8}OQ>hIHr_WQn&)taYzm0pp*NSW3 zV*5SZ?Ox8;_1^NE9ijT0l1EW zi7*1s+&P{jH0Pi1VXaw#t73+|=B26iuk3)Dr`yOV_-8hjtfZzGts3(L8Z-|f%oc2h zK7}EY@d3YO5_K_MD^hSPYqkq_^=@J^({O5fHs<>+^3_G0{~{;1V{J>z)&A?%P6bu{ zb?3!$FKIp5vM`O%kZ6T;uc$jD7&#J~x#lX={l=fVgQ-(p%9mCjM#Vd{T&3ag4?Vi z;v9-}-Mu$Z#LKv&Xa(486l~ciYMGx;fwh;hUbAEt6?6BDX}lUw2TkB?W?IFw*|iti z8-qE|5nLrH|JIv9piM+E>?C&2nk_Fea@NorjMi~UAXj6kI>KGx@eKfb56%MEZmG#` zfe8>Xa%UAIdrC-JaCQpo&t8Ha$fRFdw#o(0;h}3+aU+?BQ-wdCg;%4DOib-q%a>{e zp4-ZH!V|0qJj?a5&+Ij&FoD~gbu)P{n_uJXi}0dovP7K98?r0iPI6hM*a|yJv3)I` zlEwrRt8z~Y?o+Z^m|e?FkHY{MrofV9)|hW>uN2xw@PX*=SKZ8la{qaR3@WShcYqNd zi6hGGBw!Gd6FXwskzjNXCE-aM_e z1NAKGhcnR9(6%85x;>yA!%WT@s#c7sj**O@$QOufcPNGw#X&^<=&*)5J1Vk=Lm&)28jx+mk+j)L4!14V=I4FA=Ts4exd4r$|v$!nS>ycWnX zdxkziU1n%obUvWc3-Ld3sg5luBp9Sx60eB~ENuMV9pA&{?609CZ(wvLSlmwF1PS1% zVc%^&AiM$~q$}8M@64UMfg*>v5s|lDd5kWJqqo0Rg;Y_p9 zKMMV33J*&_UgeXWZXpo0fIq%Ez}W+eVFS&FArq?qmnY4NP}mtF;}8M)4+16!_ha|q_a~1JGcfu5kEpnlBTO#X@W8#*^dcif zSNBS}5uP-5KHFyD8)4Z+N*_N_Z*P}qIa_g~+JC|gfta$yFtV3&FN5H!AU;P7{W(U) z4e>*`$?jrTK~|gd>e(ZHAN+rWTP{WWkWzzwx!hRL!`aOE6Kms^4jfSchxUETjo@ta z!BPUbd1i2aW?}{NoB%3w@8-Z;{7tBNEx0xabtNX&c9UOAPQJycS+DGO{J%hGb2||> z7tT2UwTUpqTmfgHgen`7th#)za<9L%P(I=F7pe&)kK+ax6@sU*2(QkVp%aatwsFyb z{KW_tp`Zt)fushGqz{w}UBaBx{*Sg>KLx>gWrS$(z4?%5&A*a`bo*ux=}IG>s5B59 zdYpA&VaLZdUk~;SyMsU9iuBkZSHuBX(3k5((=#0@YUM}>h-}6vJ;%z*%m%7Y!rj>9 z>L0&sX`3}!EqOX~W%6xJ*$v!F!(nbfvAEuNC>*HWGJ7qQtWe=dO9Rv6%uNFbX!Pv_ zX^YPTjpj3OJKqDlFl+|I*`yRMX~=7^D-0*4m#mG+YKpxgw%OEI!`uTJY;vI=zPA=- zC`1v_$8|h>XFpHbgcW9aQAp7#l#oEG`XoVIBwCj5-!L|P){>YSEf!vmDt%Xg8Z$2X zvB9=F0VRx~L>Cp<8Vo_4wwfSATs`B62+U==j6{r$F~#ASiYWgKiLvlfeBn^t^oZ|K zlcmq);1<2t<|X2~9#PH!6q%Zd4dR&8J$oe2v^<1_V|0zw>rjzYk&@aNVV2B42>P?H z+r~`@8M6>&|5$Kx&;lamN34E;X~@(*n=C!Z&D?7ZVwon&H!@k}=xRtJ_B%e9TY}<} z87|#0%T!~~a9hc#<1%3|PdufJL8nSY%AVLDxB)6f&sy>Mmy$aKIO{%SnBvdp=jZ18 zCb$+A+GY@>9c%VqPCu>xF9v&{iJ@x8z5^(&#PQS%`8#l^YI&1(o?Fbc#hC!(p zPBT!`LeQ_54KLCPcv6wweTHcQ54F6wzPw-Cy$H{FKslYS!CB>u*Vw&iVz_%c+st#E zeksdL0>VS00o;20ekA-(DUBbmQ|O2J*NLY?Zg;N9&Zrz7I8QxR(NwDorVk=$pKA0Pna7vbaweWeYaVn=VwTFMSheXQv?K|{&ur&-FzJb+$7OMD^t zI-k!hda-nQ)8Owd3|AN8$#qjjbPX6H@86V9l{{!aR{x9d@!WC1)m#+^+&-hhZP6mr z%S0_@_MEB}QT;h8*}2|o$?1)Z5lKuK=3s=8)^?G5UXOSG$p-+=DrYNbfWW9HbDy$o zbDLjZ#v{N3h=4%A3zRB8VD`X5Bj|P?40b?OsE-T=(*qPJG}xK$IHyxcSp9}S57HYb zZ*yq=j{3tHl|WJDoW(=cfP#XE9uUm`?Et+4*8!W4AB}F>vx-9&ZZl-ePm;Z1m9sO< zL5!=91MuMB=l1~k1$cWuo_l$apbx{WD_#fKXf$0_;O~9iL=EuFVD5r1{keVLpYk5= z;lIEF5|7NINX<0D_h6gc!3|J2LZ{8bOq6x_w6iVa7N6acMc5b~{xzxjDBOoK`%Pi9=${z+ne!B+Dgpb|?fi3kJPL#CH}>ml4V~TU+%Z zoxOH~dU3>D?Pdm7{d>^yUGbgud+X9{dHYv?%zmnv`9rf7CoD0_Nbb`O+i=?dy z>$IZ_AVKTqLEos&Yn>4ZhcoQ2~Pp6ostOn(h3)ILX5~WAFd2>7ExOqxXl zlWwhECoSxGvRp~8I;5R-G_E%l&#io)YgiN3be)-9=6ec&cw)Yi6AVFm(F}W^BQGlo zeIS5foer1~Id% zecA!ChGF0Ku}7PbY+%>X)|K0n`vv#Sz;V!c1eVd8tbp}Tg{^^@p!o@q33v4q{W$9SN)dq*?ps-q?V!A8%2+}pb5=5v04GuKp8udEn!RUV=CLms zjY(NECthND*FN7I3A_G-CBEcuxX(rIXQ6|8Ku%t&-V}JlqD=$At7%BDt5chA1J0qYjr`Y8b1zu#wW2oBuIVF}lXbPGE=t3I&y25Yo$=>JSjwKkBB8X~d) zBHH`Z(g3Pb=d3aEB3OeI#DZ8H>pflx>S$*m)k-T(@K0@m)Qeaw@89w9Kn$ic<8^rs zNy+^?>0KVCpnr~l{})xVjsnwfX1vxpzq1dpH%$$=wV*@Lc2@dQta=APL(bw*##q2` zvn2_HFCT*np{C9zO00rk@sL}Vg-|ZwW|eWVG{0k>Y!(IEvo4vX1K5*UCVAUhWvNMRwEtl zWsR`@((MI-D`)}gluban$P~+bGiAHUw&OQP6TNN6)-_3uQSm=O1e-_<#K+q7bJ-p7 z1z2pqRoWg6|B4}kV7H$+ry(#xA@IXL-enbj(hHKiV+wY~wi@OXQ|;aHU3~L2_SH2f z>B?iwQ3XzgLtbSKzpdoa!`tt#|NKp!o++v507Zh#2QTKAU`lRcWu42!HUBjL-j4Ro z0C(T#R!;2zXR~^Myg`yd%2P=pr~6b>&8W>=%X9qqs%|-Z<85B;V<-;5;pe|x#QZ-$ zL%p9Kxw@gCY8gMP0vxovU8~W8H4!TcQtEDOOSUUjy#< zO|n2(jQ*6(8aeLwcGG6|M8a#thL*{WEUxvI>zQJBUyYps_pSbW&RFb@M4v93vn@lh zA)A+P*};gP?o?q=+uP!<=X=}S!)@M~@7#~W)8pvCmLDJUOYc-TWneC@k1&r`ih1ns zLQ4k&>jVIS9n{d9iC@Zr_`7Q23n3Ej%?kV9Z;(X=g%59}r&zqhp*?}EZ$0VIVSj`P zq#%t=i$M?%(0zoRa;wV11ex)DmpIh}PL0lJf9>qm4ApIgv2KgDDX#m)8+=4}cRR42$FFEHDIsynjJkoZ(n}Y;HGs zr1k~0ep`*A2-;3hXNjBGFPBC+`fco9_g&V=E9D8`;pgNK{%{@Io}d3>N$8-a9#q6B zoqT$_zZzfealLLIDz=5%b*twfr(`;9wc&TVZ{Xd#quuYY5DUFPi22}o6FE(O1yRC5 zz#bjKf1bx8@PR1tLr4zcVti2>ljc;i0j>D6|NJGu6T(8{ZvQpYUe-@gAe%n_aZbyw-(3U7wPtVX%4SOq0PM56~(a3iqRl`!R`e9 zR-%LZ!`fh(moH<%^>+@W;k|~-z{ik+fCZ4w@LHfKAXtlP?;Ir{J)Yo{!NP7-!9fc7 z211HRMVD~v82l5(c~c}n=KdeyA0%Jcl)4buI{M$~o44>gNvJ>PTf{`!ASb~Vs zQ|x8Pt}ap0{(nkwZ(-!n&<>}O08te}AUAPs$`H(>eotYIVG7{ei4ghk6f-7hfT;e0 z_uw+I)OvYnenE?Lobe^VLdw`0LV;OAA*+du_HbmY0$t zi8?zFvsM|YYkiEWJ9KY2QU%gG1G)ddHL}>Z{ljAL6QD%&gpe@`zGGiLpdLh(ya-ue z6%@>!H|an9RgQ=K6MlG=Co%q-fb}EVOP4LYr!c5$I5RF+l%M9;9jc=N$NY)?J?7&P zSe#F3BwjJXJ5Hi<#dA4y+ffaM1AaeyM5gD_- zgrA542qkXltR7h9?gR;GBasn;@n?hVSB#xbGBRe+N6Sl@_8?wHSs!Yu(A>Q_q*!Y4 z#h^4_j*0N8sySB4fx&fCWI`BKn< zWYk_rM>N^9Auk${KIMA@T9eK>@^cTD|akKB77;e%86r~|kDu&}W z>{fmO8SHv=bg1&a`taE#YJ3?XJcaQXFqfzZOOZ8Xsy7=+OY?i&=lG?)%xv0KrhPB{>c?Q=qhPMFs2vIM6zn2pQpbE~Xx=(Nxk#E`V z3NCzAB(^nHIgOGtnXyJ*$$@?`dJ$70aI^*g>p~Rq=G5(Ar4)&GF0lpvWnX~Vt&v4o z4?&s6d&H9`{x!>cQ|1_$CmE`NHhmHWFR4S7mw6=bjI3u)-~qPRN6MPRgX>UE1H*dw zs#Ii+2=4ytj{~F!pbKj-ff)cs7B!K4SQ_!0eXJE@2L4ZGAIKMC4kSKHY6optRqs{w zEGzcycz`L{J0N73dtl=F=>TDa@sNahy=~kQtKxUW`k18-1ZDNj1T?;Oo6D0PK8N8R$NCt-Z z#5Hz=9B1w%yF?oe?rqjc6p$Sx;`oIIeNZNayknBx=gQs^X!P8*Mgh5djqu$3N=XTW z363W(R}*&(uy@+~Gjl(P*px`<9?oO==)N^9Zq4?1faTN9;VIMIn&22PFp={IEHqI= zfj~>>)sB9{iJ{}BU$}JXTo6UBL|7Rx2%5}wKzwNK1;!0pY)XUYw~BE1wi&opB~eaX zg4qH5r!yyx%DYG1-bqytB{M&tBCjXz6c2_(- zZELoKs^f0P6EtK7__S1k@K6^ff0tApI31S`|Jnci;C^5^Xp0VpPdap zvX4t!K7X>N0(4-l8ZO1D*_97YK&;XmBUzwutq0> zo>xdEUxouNRclF6RxKHn2SlCsYdQ=$2-7X&mciEF&r=~QST*E2X|Hv=qGldSbEWWT zUyhOs)(_q>USq8T3tBS^P1>EhVz6&>NltS_z|AActxZ~D9Ro&jlF|T1%TF>kv+LNG z9fBzJFz`D(%K&#nJ$F-tcc$yKvFmiA;#B0$gs5d+Io(tw^&EJ43akJPW226D9^gnvHyDFT>L$R0 zfc?YPR&U8CzN><6ScQF}Oxf&Mf6Ot8GZxe0{OWtWc)Nxl9c$DdhHU&UteKk~1I@FQ z^V2rHn?;I_VITb_op{+rr_JuLMb)^k|MV_5%tY{f3voub!onNB6*YL70%}+ zuf!&mNuoS6S;_+^{?1B5bJjF5I^2e10EBBq$WTA!Yd_^rdFU-ZMVob)qu%Hd*HP$Y z*^h(Mhw4=qUaxBJ&(93_@S-;o|y*%<^J2B#e!l)$&+P!UlS+W`*BB}csh#yuTuTN!KrH13CCBf2P$40uQnt=r`Q<6M4o3V>^lLVAHTArxcAp8~>5We}|EUmH{jP6X;iA`4K&q zG1}Cmrg2};F(AtJh~%rF_D?DZbX|;GNlK!|Ol8j@?T7F;jv2q#52HkLKzBgaQ>2LRv6Cg9(|PiKUBmC7y7PM^u9*p`Aa(lR<;vUtsK6(rF7?%Pf}BWXMql-^J} z`CRI)KIg9hSbduB+$2qBzzYXLke4@>Br1T0jA??N&QI{sR4r7_E4Ia}C!U4xh{End z^d9<#zl{&Ee6Kh1flo`{Gr?+BTiT`GS0a=Tu%^);9cQ&X9={SnFKR_~CVW7+p6a0j z3oT$aSeZ=tcgFKBXJ^31V^aGjb1hMmn)4RDdf}*w1HCAy0)H$%G6ayCiPYhaVkam2 zPc|*BKT-ssHgfPtoH#H74cjXbta8I zfq1#48(U-um22oME|nI{=uzr9$3x)I12@E;v~!9(Tv1q2_5pYb+Cn^8>!OYw0UOHF zrjt$4N6oKl_BxSz@5D!kFc{vS$eQQFBc%JZLXDgeKj`q}a{=LGI)%IVX6^R+&09eY z!uy2PkG1On0*s>w^YRvbAG_`h1E&kM^LB92K)0v3c!@(X13UyQ+Et|91q@k&e+1Zt z?@q!fLm?8zgualL8`gforLp+0K2bLpm3#i&XiYuTQ~s&QlrjsiN$0z=X^TBev4c5{ z|-vq;h1u3!r*uCGLSqI$%`4CNImSIv5iUAqzr@_P6r8|RoML=j zG<}3bFrHx4bs#E}B|iqTVfMw;?%m4E{u9Vwt%o1EXih7cMX$TUY)UvdM<=5%s;($+ z!ztkX>u8gP^i%yfGBG)9XfbQH2>is{C`bJTjwucQDA;SQ`L&q)XWuJJ(5ycH!>9?s zxHc0rR13~PPUmqtb%Sf}ePfAmZd1%DRF--v78Ho#L$L`ISmZro&&X#mxPc(ue~hHU z0*P-Ko_$l)KC_mtUhB9uWisUam*eNTf_Ly%Mrv6~$)>vqC zFzUs!Y;2T`8obw_QH;wfXzgF@fvW-3m+NKN6vD>)%7e$G3$>4#XE>f;ue8apO z!|Qkm&Ga$L+kHRJKLD2>g$ZYGH)nru_QQG%=oIAf{j0<8mGi^zi3wk#mw^SqZ~X=r zs{8hIdH!Jc?|4HI0D2TQP#;`fytHt#un(nCkzCAK9Q-;)smV1=a=WYOoAP{~zOZle z??J{JB7Div>~3aZ>-&}R?(6G&W#p4~0WSd(ukVwL35GWbp+OVOIXK4c>e`&oNWvcJ^ZDs} zSocl2CR;fh_MeX^K8hWyO?fo8tV&w0qE9F6gP`~t^G?y<2nbx2h7Qvm=!MhHX6mV= z#m%%2CXj=Z=%x;LHIzw=wKH56zmr8{ODlT%+w3)tpSY`?IOxeF(J6qAPMRey(rzRC zj#hSW55_2}uy(04b69Tk8-cpoCnZg^bNzfEjhaxb2;16Qf-d#=X0P?_jhh{+V||zB zT<7ACS?nlHQMN&^-E*y5+rq8(RM`SCUr>v0`JW?#YjGcF-MJxm8|@b#ePz(vbBl{^ zxbGhXCQ{ah0q>Y`vSq+jses^Zc%1n~u6Ou77lb`88nM z&^7zwi|@db>Ab^{#^bey1R5T^c6Z2YoEXp(-{RQ5iM-J5`6 zDiSzLTixg!3gBcyqq0Vsk2Ifb%1{Vq#p!!jamsT>+fI9G30NA+#pSkrdrI+AQk#Uj zsA!GYU)|=|OTZzfL5{N)9bL&>CcOvLk2oDKM10ol23puk0Qw|ke>Bl@>IaSe_$0n zaY0O=1jsp)0pb%yJ1Jh|o+xtPi@k7$O^+*1^9GX!AI|Uh0z&v$8>6Pd7!}mZrONdL z;m4SBQD4){+m;KXr81?WBhp(Uq7qX>x1FAm>y$floHq_#Mp5bt!fU=P&vlFH=8V*$ z)B$;B^F?zhc%uZA33FaP$ITaye8Rb-bB}NzfD2X@*bHDy6=%7BbaJ+WmN$BBGr7A; zo|X*IjTTEG710p$xrt@*yy+TE$%7)RirZWf&UY8xr4LsJ&%^n{SP=~|cQ|B}NbLta zw65VLONE1n<*b(!Nb<=LkS{W6bOCz-7JEvg_swXjVWSF@z5k7Z?(tLd8@&0-&lz_f z0dANPrM5fAEAv+uFx&8HyBYY*DFrgG3KaVcP4ez^Pd#Mm^5~z<%kE6@R*eE`uTHetG z>RAK~B}sIQzRYL1R8UdMc;WedIU@>Cw9E9e?p=PifPkgW_CM)4kuI!&-v^}x5J6wv zS`C^G<(Jn2_T!+dxJ3%Bh!E|=C)w?~b_7B}N3ulXGr9K~a~niIa>wWDmxF!mfh~bu)lVyj zdQ%^+;@D8Rui_QOr8D$t;|eFB#@W=$Tk*v4!Pg=ZX!>N<>+P^vlO5Yj3?Q3w7pK4U|#H9oEfArXC!iVd*SsYA0M z8#F5!6*6mKgm0vZ2#tSn#t)t6#b^u(vTU@Qvr=j-!PZZmF}l3!DmwhGB32lQC{rpr z!BIGzCz??%QI9mS7NG<5HAo*&(CB$)=~l!bjDMKB?OVFRvht*qkrkVcRG2=6b@-oy zjoU>*vE-0E#NVcj?>6_PKh#dnF-39a`0h;*p~ni-WUwen^Nq<&G^sq+aM0USaabO9 zwT~zoH>p4+DT5Royc?3bTTT=M9gP^5u{jJDboiG9G6zCXK;i&0aI7kL>JkszC16gn zY}yM`<&0aBfTJm{{MnWTmiKQ&McqZ=+2Pq2*_K5t>q_aCgdmkk-9_aq;B7>-m1wE4 zY{*YU?NxV_w$wP6Cu!od*kJaNbVDbi!XWUn)MVFkoT~z{!yzzmad2$*4P))t*&3GT z`*A>jtcXY!)@^>nBMU>rEHH4ew5{~)^+lUu;A3dm*ed783q(j=VPdzGthR$hj$Tl4 z+VbYxuR})ZDcKx23!ST>W4Gk2woOD(Ur=z?O4r&cdLUL&adhOYtf+wj>@4>4?btyQ z5C}M#oK|Mr1&P+d5Ml6}ddM@zkgl73Fb8Zg5;TL4|-Pd1&QoI z5`Um@v`MVC5rj!;!D6?FthPafk6s{g+5~>r50prQ#M!{NGO_{!uo?3d>_iwY5HMve zRQ)l?GSO>+*vb=JOA`FPT}=|Cd%16BTu!S0?tmLD^C~J}`AS*weeIfr&n+fWskdB@ zTOq`N6u83@+$_Jxzq898w3eQ)z*olyMn6CvfCnIt|L5rAUysyG3Y!|j8{<0g3zGD_ z(o17H;fX&a(!}HGfGT`3!&IiznP>DYFukO%m&y4eOYhoee89v=8Q>`1ZQvST%YQrp z==0~DT@(}xc#ay`|3wj%(`i&0E`&nHKo+#bq%=WMDVc_rDggR%&Jlw2j1Uz+F1ooJ zDI*{%u2Y*ymFW{p5xeT5xm0d7Za?vs_8=AGAgUTL?H@K|R$NInHoMSD+>Ag;01P4I zIC-?6%cZE^4b7LqNv6)xHANYP)i3|X?-eS`Vd@2KO0SoYg$$UM@ZeQxjrOF;)fR-| zJmXV4ACRQ!s()J&(-{8)|cjhDUf~tgg!@sEUy*o~OtJ-dhQFx`L2hgih z(jZNiB4dNZXUW2^gTdFo%R~;dW*oFMx*WA-O)*QhcQFk4x zRjbURq)^eBg`Zgr!QMYnEB&#PnMp~UI90@%K{oNdA6t=g&-j()mR(ZPBpR>=$ z>SC3wwrh#;ZsjOGG4{`TDivm#TGMpeA$!;08P;bcZq~k2Q9GtdRMtoHfe-sN{X0_l zmu_~K07oC1$2G6Y^tyTgPQO~#U3refRWHj2{1lE(n*Eb|0`(n6_iEctvU(i!UWx+r zxUgSw9_V6F|AAB4?j@}Zk_fEH-$2DuTSgDoa)sIem@3|X7ps?BLz1-`yEZ*48vW_H zy;m2lv7H!9`;=prk|+1O5)isKX3vgZjl3Uy=Oh?6as9-)$M;BpzD)5yz%j9_wvW?K z9LLBdn?-jTm)I)&^VMfBEw;PW?7>~-qnUn7?23I=Ygagx(gW-NX@APhxc+Nifn_*i zT={+`?2EM$n5TbiShx)Y?uCaGLTOGBW9l^D@DT@66;%GCQ(AxRGm(17779XljM(v2 z#aI3#mh=R1JEa^3Q0ozegw~&P=nZOE7{h($Ax5=g+orXYX;ZaSWj^xtbDlo*nEWWKoo5sf$|73~7zl)X(RCa+{rkQ-K4m?( z3Q>w{1aHy%AHc)Q-Or!H$Ca;ry^rUsxB8_UjVcl*Y_$m-LxR5=xVQ!2?(Xe(NAPl3 zoLoFq?Fzz{FjHJCp3{;;N2_BEKLSKk4NCVyQ|Wh52jIerL8A(#$0t3&aN_;L_3cOsG_3YqhWVnLhRjNhw*{OYbgS&HgK}pSB3Z+YxweevDOs zGeO%9<<#!`MLi@qi(m!n?)>oJjk|S^fZu5eC3h$Q+=3Nf|;qQ4XpA;K73wHAW!NM2GwhUdzO?UQ~QA*ecQf zdHHo}SurYw(`K%Saw-wivt;OUagGdDPh>9X%28T|BxB8Jvu-FZ5@4rjg$D*_J_JTM zVoD_xu9s9W6jR~-In^>EXJ>jEhXcX*;4NxpMZ9epP)<%-p$@1(EE3Eog;8s8F~ARi zjR!O20^$d+dEKI2M67HB2Cn3cb}Oe&s=SOn+L3Gh$R2iJkSc99GrJXqa%cs30Cw2X z_A(?|Zgl*DUhV^T&%Aarn@?S&`o2AY{b<~!HeoeEI;VZwR=Os{`p}n#Dch=saHoMx z)iul+E%*a{(GxDpmIyzF6r8q!{WJ+6pGl;?9)*}s31pG2GC&X138hWVJ&8g&lTs4m z4Qu6w6>V+k9k8hvV1?O1_M+yTrk{0kK9xH)c82FC^rU<)+0-XuxuUXUKGzRQ(6|NU zI}fxs$O)9b!ZfXmXU|DmB93L_a43j?7nGa~qMVKtLz@-mWblB-1=}Lg9|8ur$PNuD z5pG}t)6Oi(e6ufnwZV?2q%Y)ag$h#Zua&`1kiH_>XZ`uJZ<1G?W{fu+AwWzG(+>U< zJS3W2Qh?TwPL%@kRgvl>k0IE3Kov>$;?eLqkX&!aAQ80+0rpKpjIT$Dgg-%EyCK&b zZLEhztVZ)kib{~tCa8o20>l;Y&LWXw88y2G5{@XI*A;f01Sp;n542oP(D5=iYU&=7WFpn|9?oW(7fg_aizFgX!p zmk3{#O8#<;o;^xR07-Mk2rOdDE{_EDHHTGvy5xW;x^amguT57RBbo)gsW0Ua`|~iC zl&zR%sWL=2rD+i=i)U6Kl_QOO$Aqc`c8Fs}q6L0egKLU+pM;1Ui`~#E=;*~RMSdyK znX_!Wy#LXb$K;BWfdL_QJk}-44h8Wk){2T&LmPaQ!FO!hsJhE09i8t7s)WYs43&dd0hm;L4xCksro*DkYIajvF zf3lLLXMeBeKB60%>QwP7Mo)p+l7iI|+diJ0b6VMXY1l-Osbw394elq#XqHme1>S-F zSDmW#Pn1(FUy4S(sMc#hy+*bYJhrm(yx*Tx8P8p&GPORDPKVmyn=DVK=nsj#?~P47 zjiSw#aw!+PyDUARl}*#^6Lq5s7rBNE zq%$jI*eUeeiMtxrM|B(mt@^@3RfO6Wq;#t*Ji)Ren^%*z?6YfBMb2|IhQm4ERbWgu z2X!HE(8Qz$+;f2zjmK?kS<@o1lgMxV*$>fC?ON<|&_fQX?w=zE}Vv|p9zZZJAChfp2 zZQdK%bSfkp-dRmC!AM%BBx8}pzzom)t6cpFOoCG398x@{(2?-mC4sJlX0F+B>5V-A}N-CGv>l8&E6Svv@^mPWk$(CQH24G#z!G~IJZ|y z@xTR`vyOhYrxdshrBYIBskZ1@8a!(JcTroKVUb~waapmDhFTDX$Gt-> zvn-)9A|QHBB{L2|2Wmapb&EW)Y`-;{Ub%#-NnFtEP-_w%5!PFpTlj(DBz7d;s$&#j z$lQyDOesV@%Bv@j{GWn#QmSTgX&a5)pL`5D(1aSn4s|nH5^*U_v79N*D02m=>vL6D z4YvJ<|2hs$PJ#@FBJf1XR}b@Og+L7$Xv9m@$DhrUSWHvZdb`3DSkTkuR0Wgtt0lY{ zQay%a%0%8+6rhLKe4`sE`% zC|@ll3Br-kAL_}+9jfvvV+!obIXtq~hx3&b7>mzOI^*_KHF^rhL0Eb*6|u=GxG~hA zarZ8dZ%=~_WI=3kNn48~*gP9GCym^WIvmM^*ilZ4pc8<|RJA$~DwVvI71|KMLsuy; zc&J#^*hp7((-p0YgUz9O`pb*NWTi+)dU_-kyq;VN`34GKFXl*`0&iDIst$+5fGvhH0vN@4 z!8z)d#v4`LZ)A0fhR69|JoPMQic6N3j9Hb|KQR&K4o#T~G!1PKaTZD~WOOmw!xorX zeE&I?ldUkmSt{uVwc?VA;upLx-&U~J!ZY^!>J;$CBCNCmy{*2=;V5zd)GcR9ZqlJi zehJnd`TK|^EWP|<6C)e7MA|U5s?3=nTT<;qTE#_x#JJmu^k2!r_AxD^1WIr-+k5Yk zTGafM{x0O?bh^>DB-Me4#r0+PcHLPT_D3{sR~ct%C-%x5mCHORmr6cVGA)I)5Uq4Q zUFlG&DMW7TG*t1nRMkj;oVO11#w7F#Lq)bvYdp=&Rz^hXlMUyx!qkVQt_{trn0uW@ zlI(wp60>?qNm*$Q7Dv2niYg36G}QE#C*|xDdQ~D5rsYUunAA()mQ&GWjoKxwmb2cI z7(~CtL!ZzExUwml7O9RFRUHA+BKLjk+t#3{LRPz;XsH#dSQOwtrH_STA=qvx>2iAU zr0owc96wG^I2AG{Sn@oS!P=s`?}4SN=ShX}oW^}2rRz#`uC{GgU|n+;qG+jT29?y1 z0EQuI@fu4k*CQvmqm~TV$$?{$kgiOUuIj8=>f)>%15#P-^lcL7`BBnYUbUs>%v%G= zlIid{Ev|fLX*B?~fkTm;y2kWFuu5our+5?P+klMN6s58Z4Hr&KwOcr}3YC0gQUO%* zxH`qfyD5G8wW%V*NV_1LqEj zv<4`3uMUoV-sEmcNmR5idu4mErpg)gy0JxScedxjTLJ)<8zT`>C#$yc_FY7s6l`q1 zjf4UnX~LpwgaQ?r(mFoJJV2qCA-J?%3v4P!;mBy9Ru|H@1IeHes13= z+HX;46(E@f?lpyy3?*Tt#+rx#Jd|=YaYv!;p=*BDRTPN^8Mz)a{SSi3z+szKu!Y9q zDd12SYBfN=6meibRk`kbe<1R)8mwAaUF0$Mr7K=IyAKs($;7J*EYM|0mmiviaqVbQ zZ2dPK;?Z0|Ivy@PQxK4nsL~>WkI_ukNF>Hcri6@$bNyrIiln36i$gK^?1u+05MZ*F zL6-y3iQTN95W*uN}xjvPz#p?m?hK%mV_?9#;zNAeb zJz!)33Tw>Tw#*ADONkCgY?Iz{DZdx9iH`ojl2Aki6+$2~(OXiub7jYQo8VXt)NK^F zxSDMZZc@@Cp5EKr?>#x+yjg~<*;dHu_-hOm1S+DJ`_ma)x8Qg_lWl`=N4}}yzA{r) z*9j0bFeCrk4BN#^k3K-Kk|`kGnY|nNs55(3#Sf{VzrvZWptq;6kto}7z+@tf6-PPF zOXjW>Kg1<3PVnK&1(^mUf@OI%ixH>|c;FTsKn~Rfi%@IlDj7OpX`l(Ak}v13;RjzO zg?TbwH6g1Tf2?ho6p=NOR_tprqs#wm$pz?kRyv)6$pWFYw_I|N6T6jH52{ok45jpK zpTBmR3l@OW*IEm4j$$up*h~<>W6Gjottu5<0@@J>|4HWY% z9&2%GXTNX6kjKu3uhq9&Og&_kov^o-t)vXB)$>}M1-HWfYaz)*soBBz+O%m+kp$Fg zRJSfM(|GM}d)wx^>MTTF&s+cIWh#Nxn7gyfE{rK-TOAO~PvS|nUp75Y%{ zMRlTKdLnq9mlu~o0_GnNqbVlNv`j!ws4DU%N_a&d&fjkbd zF-guu%=9C=@g7Rwh@D!@kQrrxcMEw{F9+qP|6x9@%bfP3x_lboE%ljLMhlF7{X`+3Y@IF%m7t3loFhJAp{ z(v++})X;+#ZJ=R1Z<#?Hf4{|crI9M*zWuaFMneze80KBG$80&)FJpq5#hD+%j6ZY7 zm-{qq@Y#UytgJ1q-0tYB_=sJV1?Ea`f3~*PXk>|LCh!MxO88KkpXErhK)gKQ<#z1j zw)%FeIyBM3mnBTwa+Lkr&Ex}V_pSwqazCj#`wz9y{C{sIH!_Y=j73%!7r4(`b0;K6 zd|)S<(XT00I#-@o?x=$%W80UeUvM#;GHtRptTH4|t9H!UbKT4YFc*(xv!%QLpA?%(1#WKW_6l+6BAel-&4>oMNgq1EE+Tv~+>G9Plb@ z(r*S4&7eqx=~lRy5&=c#2N--(++{VU@abkKBHx12&gGhkFeE3!a$l3UAr5JI%T3QO zL2dJD*yiMKW~77gYm@F!cwCWIx$KwCX3D)pQZEF-R;^2I?#r^xblo)U%g7*j#41Q;9hbJqZ zMJuPu-9;l(oZsJN+c3Ez@_Evt-=xhP1@Ub`Kev6A8$=u!Fi3MMR#O=3 zw~XGUiKdPxu>2-DGU1?|D#^MMSy->d`{+qn%v&btaU;%yPs>cZoP{m_FusH^HWwj>1gAq~Q@L17^k&hHY~o zLY%qOs>o-)6u-vfSpW&-QAn?5OINWa%}W^JvU9kaXm0*{BJ$()ej1QDk1|e3o-Mtd zMOSqW;8=*7jG9;H4DYoYwf1jgrvbd4+tfw#kF ze7C7IR&91d8waK4??XTPSxDUSY_jqSqoDZfBHhT&n*)%iMVACvwzNGEUTrT&g}Vj8 zn6Q}}rk9#uS@rQjh9P|wXv~M?Gc52GHas1s#)l=Jmx;M$X=_A05W{p9mX7d8yzu|p zj3zKAXQA25e}!>=g@q>FI%d6`XxerdG@FTe-$MVHkeljJmJh5bu=(er_an8RHw^S$}p+~?CS@?9^v z$Uhpy?JSqt+d*Q$rQm?QuYi6jF6qN{z7S0Jq;=S z?u!pAJ@oq}3bdAjyq^G6yc zNhBCIoT%X_dwC~@YbAn`98TYjTzv1^B&gNb3y0vJ$#fa{IC(?DR^N0oIQl60&mg`N z7U*wOhx`4yKap{;y6z)C$hCYUqHu9@G(aQQZ0?BfBM@Co`C5HSzp~DpVs+J@=A
5rQEP@(>a!WG4^AIfpx?^=R2 zKy7iq?s+&A$vUPse#a&ndUqyq3{sG`cto^2w@ClKi2O$DK|~CbeM^}}l%1C315_mS z`2fi|n*!L=yELz@FrAO3vAtcehhwU2mp+B!VJe%iC%KOuFA*zRY93pPo3GYAAkg7P z;lAb*oc*NYRwe|g|WY%6ZnRnL(@s#S$KXUb;b~7Ouh0Co+cURuRi)? zB|K5`DE3x2OYLBO9)P5AZ78Z@0a4&M1CuNfl5ZVbM+30pPbs^9!k;Mh^oL;9;z6wA zq;S?G{??*Zm##K@-+bJp1T=Vny5=+V<_wc7kJQi$ndW^2FBFZ4MPNtsQfvKyXr2`i z@_~1ezVQ2_cBl|n)0pnsJH$HJ4Z8mKF?rBhD?ppHRL?YKTP0-9^?-tI4-i@9@4Kh| z%OqOiG0Hj$Ne>gxLpA3lM7B@M{0m#J;?EL$AcMl6Yz6Jq7bi6eDf5)xkkx~a63wSy zP44!Y{MXd)2 z421XHN}FsFuoASbGmX7w~t)A3bo5%b)P?{`x{RsvY&JL`X$kB24$gFS*DJs3MsvgHwt_R^!mI8 z3t6?D-+R8u3QV#ZoeZp})OWL+ffboul##v|N8%bBM-y}Bg=k>4g5|luE<$l){&cN>rUoo;4j_wQqaS@@*gS@}*yDHlG!wS=om;_KD2>*z{#**A~XIt`Fy z{rTSrAPwO!D?T7UFx;Zd{u1kbN6W~L04_IXBb)2SU04OTDS-KTO=A#(_j;AYw1Q7e z&SsY=TpCV!q4#8O^p~2~piNhI*NmS4jFnTTNVrYxAZ7Q`d-zM#feq7*X@#PAQ!lRfQBzFrUaeTY5X$NdW7Eccb*08rJew<`z>CPlGV`$;2!o{5rD z_(#6v{56hmp-6qmlT%>4-YNf$_2 zM-OCXIzZsYSOy0NNqH@o(7Z~hQ6i?OeRbC7iG_!VG54p!^!Wk&t+0#Yr3T2m>A1_a^4B14c}`r#isgLEi@g(s1X0x6ZnFM;8+ShlFu zNr=6p(`dl`1w?Rh1vTM~elEqm_7J?etZ$m*qXuOWuXK zS(+-CprB6fwal$?8?tttHnBv`0)?YwCp*N>1tIVb_dU|J0OhqOSX@pwp~X@9HKN9W zXj!0lBEf76rBYN1G51!TLd6Z%&*&-ZyxwEX)6rxN$JY|)!Crr;1_2Hy(FiFM!rC;v z2w+aTcrZ9VNW5X?dD*gIMwZDk&9B0_J1X=KpoVy7M($;0PtXM+&3ch-zgn(n{)zMN zhvM@NXcw7^5;6ko*+*sxH)_RjFywk``1|PlZ1>UC+nM#}lkL0$b(8GWv!yFk*Eb}n@Y|H&Et+p&nTHepbmCR`+e)u8F?c0m$OIbkjcEtj#J%)WzN4dOKBnV?E}>E9LK)-1ucc&sQr2>8py4zO9sY5xd`Y|vjQ>tujMZEUIZ%P7u37Sc+xr*w>1 znJ+e5kK>#$cs<_0ySoTN+FkdwT&8+~P@$?XY16vMVSL2cs#YxvH51^H2An#6l&dVO zDUDUwQ#<^4(e*vWW72 zHOopmqq0Mx56khrz)(gP9^54AYCrz1xwegzEG$W?sVm$INj>8S{v9R7Mm{@cI7SC({o-a;q&BNvg)rySRFuZ8rcMVx^g8Y)glj;mQIUJ-O6 zCr+W%CwloJWIt$OJMU6j_>vGwJjAtiim~~oO0KgXLSZ=kiro_515Ln~@bEmYg6*|H ze}*44?Kw15aq~Bgr*MI|lAeE=C|8^ktf-wh=AXV7ha{`5;Hy#0_=fo?dLeJRQcYp@ z!v&RQh1g_;&~E}QIsJO9Fc5YqZM75qcFx`F&$RA-ad8ujf>Sc7%X$5X_z+se4&x1G zS-yycTd&^17YThjX6Jcv9!B3?o9RIF-fJ5$S22>gHM(mSSi_)QSCt5X0du3pV>}0E zX`Ryb+TxD4{mQf@Ft2yA7`CUGEFyK<@9^r~?jn77|1@?NUX zO~S3$3aS6m#dlQBM;XPMe!yCJjd$qgV)Dvv`lT-u?!TQ`-eiJk$G&!BX_fYcs$hkR zVN0xT4gtIlPRm@lCO({XJPa#5HNPB{<3kYrj+fz#xlbJUTxZ@Ev#-YQ&-AL3+g~b* zdoBN+(ny&GDhJvwPTsU(B>*9F+KY*Bz#SrH=cyf&J|~0D;pS%j zgZsuAlD_3(x6?Hj|=pSMIDafti)cV)Qu8{5T za#LhD(r}4hP6Zz=ZB*%QWW6WhhXI$Y>+sWAsZvdQ$cH$$T;;Vm@FQipfL_60OZnj* z#xDL>c-p%(mZ;?}@iL|!YZorvntaGXS#}EYNu}ZAy}8ka0A~q+oWQzpZJfPS`8oZ` z^L918aSqHyzOI`?nQF}K+nsGVbVJB9t{H2T%lh$&^dtSWTOxsh?=qWg$NnZ4Wz)h% z(U%1`+cmUfUCKMPpdrn`vZ#de{$m-G(};MBcJAUWv|J^!c6A~(enRt^$*1+0H%_Np zUxN5p`l)Jfru)zdrzs#zzZw((!4>-tKfiR8Q-+$XP#QOZ~0K zYYoL;-{N0HT_5nIYs><#4Pu|SR#ji_U^Vu$oomuFjfDnanO@y=A=X{EN@P3`^!-;x ze{ko!D5Clj#+jw^mqP-t<4jlvA$P^8H(-i=x&!c>8HC0NQ=eribw9%e=ld8X5`~H33uU|R4I>2#)A}2BFBltXy zB)ejq{kGuOGu-jC}x!0&T6O}tPWCCuZ!5P^$$NpGl(WT2(T@QN7scum}2SD zpDo?QfMpd}s(Zz9k52@I zHHjB+zgv{ZWuV%S#E%5SvdW#Fo0lLTysew2(T^Z~1lzcN!>9YII=DuI>y_8RkHk_g zij+7*$A~rN#p8XM_N)Ga*15*9cDcA01?6R<@oa_4X}N%Re)oE7-lZ
Wlgfun0l8 zlf#bcg)hN%36Y!1Nn2D46P4);wO8O3c!Ol5CNttA_!q!xi{DC9@ z4uvjf@LfyHhX?T-gTp1UR6gK4A31-rd+azit}DnAyQS4ZHHAO1(9F~ta*v-M+aPw0 zV-z?fx0McxV*Ql1RTO^ViY=>|&Q|rjUfL^Oo~Z)Ite+o0S1=64MBQApxz2x_gg03U ztcqzAR|TVQ!r+jZ;<5ya&I83sfUgW2Nl55Ho^M|8mC!>S>>KRP@rLU7aQYGMR*CR3 z&g>5my=(U6L*+wT|S)Y1@%F}*NC$$X92cY9mpC8jK)vB8e{q(8;Fuwf2rlr zY`3DXla&)Fiw}AdahGfq--<)F6Ie;86)agsv7G&){9$=Ao?@YI4SPX)0DY^j4XX{Z zbQ@KftwCkPH|cIsG`_!F!2tp8p*S4rrbbPUR0^aBjbKsVZjHT7<#umzkXqhW)rTto zb=|+KAlKdfJAmM6BZiQ7K#<&18gD40eBy?1*WxU;xs!DQIbK3?BvH&+jUo|onuCdO zMSNLEepX!ZWZGO?meHNr1Qr^yZPT(EzzWs}naL5nc8P+%iG7Mv;0il=y(7fy%-os{%Ie9H@mH4^YAqEK)jymgP;1w4#mjw(5}R_gMJ z-7Dg|yj{W4S0;G+4Oe!PkV?iak0T)cC1&L9(0I6=$O^NB{1J(HAlFQ~s6#_aX)E?T zA$gGglCeGzU*P_Nz$f~Z=F_^#en)o^3l}`~Y*f$OAtdx#Prp96a4npXJ>QU2!Xj0; z2$IeTiTRQ$)anU z0}F_!3i%fag&zI(31E5cd#)W4`-Naet0N1cf*SMm0|KFxk&FnXiGaI^V zc5F#beXP%IwG9z|4yklDSqpM5?S;M_%DpzghX&SssisW73nn>~zu4kKH(*1FX6mstnl6n3pI(U6jeqnris)j{&ODF{1uAzaK zuc2LA{`V#O2cZ(z2QOm0mGxU!d+oo*b0E+^!vR!%DfcBEFdMM65%ATT=Mt{)ieB3j4O@2EK0n=y2g`6dOh~H!?htZ9IR4q zWmKdta_l{Ohwg&BO?cQ3%`KVEA1D-}2;3HUPTO0)(RXcx{Y&BIhLX8<5GTlp8YSdy zrcxI-lavrp%R~Tm#53r0gf%yEmVLe&U(b97^s6y3Dc2Gff2HJTaGcdo#rG+^gCR%D zw)R0wM=2LuLX3uw;qaFxAEkkh{8==drZZ?Ow*6ZWZDfi+)$Ls(WRGy160m`Fb+}Lw>Uz`OR+v@<0}?mR z*XKR1@sif(md&?u#u(DI;Sc}Lgi;*Q{~MHvVJ4Ala8Z%=GoQIUAVXW6o{6rzQ^sY2 zFh%A&Ves16LrOng*FZm@)2aLI+VS=OsTTkzZp(@6jh$%yuUho6XaKQLZ4k@@_IH5e zOoGiK>mYgzik|9~YyTiO+;l$kBux~7%PCAI2J!?p?(-eL;8OAw9N3WMaC_Lmt?qG0 zLN4p{MOhJnDnvlyX&^$YyAlTyw_TE7jx)mpGTsZB#cm$o_ zRy+AT+4;V}mV6sRydURBahdHxtDjb%PY@RdwA(jFS(nkw(NMYFukf+J@y*f6`~I?# zi}$ZzF1#%~LV~Sm(NU9INI^Y`aT^5sdPIbx^%z2YUVu#YE&Ndh=yxa~;?DLJ+rbZ` ztB*TNmtVfW9zOO=T^#Q&p2{AEr4NBv$-m7Zzr##w^<&t zn}Ix`J)4(V?YyXlZY~c%0mj#b?Y*PkLn-nI`S$KUHkAOa0Gr=RCDUdvvVqD`Cy_C9 zdT-Vjw8o72&IkdP-4#QU3Eyl@2}4zoaVaORPXKnbNmbrY#bp&D?g+_MXaqw{9>pGw z^+h$On~Wuw7JrIupLB$?%<~F~3)H3h$mhb|Lk@nZ`}rBRq7-;mO+oidLd2HIJn2wb z&Nq-%Q`E!Zq1J=iE#Mhig@P|yj+6GqY+~#TT|rFg^x|g1k&?Vz#}vA z3&NZq{)lfp%M#G*2n-;i^&}S|AnTk&x4`4Wd`2F2J<+Gp_RH1xFJOdNGNgd5fGMI! zn@c`l|0vyIm(6&ZN1BusCCJ(8v58(h5NMQl?r#}&iN_-<}DIGL0LduyXh)>ge~ifh5%fYg!%piuilf8tUlppS2Wk6 zfFiAHM;ygoFdPWE=Qvp;gzmbAVuD_d^f5EjM3F_IXnoD*Ml{Fk+SYO-^@nfp1ku;B zA*4I5Up&iQ)i^GL6BGrGp;;Jf$B;FSk~yTp0JB6zCqq(wVJsB}-)(6FX=Ux*A+%90 zpz$xh9<)KlVS$Gc>dH$?jmtRjD_A!MWU>{S0y6?!iJj$UwKP$FUKbukVmK+*q~<$v zLGVCTHXy`kF9;(j8h;b)pakzQohN#fEFh)s_jXNqqCv0ab<$Zr?dSnQ#B_iBGsoXs zrJX@o+OZ!D6F=%a$--Ngg19ifO8oAki`j!xhvSdKD)v6_xz&G4oaPLfA}@1ZFg(@0_bG!MHw zxC;BRZ>q#ohh0T-^ET@6n-zkNq>YdD1dx`3rPa=Cs><7W_S3&5{?O|=s)>=HFe8nW z8WCkX-wu)Jms6PM{Y$6Hm>5sbC|^;>;WYDOC+`G9N<9ZVsr6W{&W37*6W)WnuvRIv z*&;jwzC@)a6&6PUo+j3y7lbCkC}tV&?{74XSlxjDMN~P6M}<;F4Sh!` zhh(n_)AAEXM#TQQ^HkRX_z~p2L7m>Sdg5*Qc^{%IKXQ10OGi-KgjyqWjfns?c-S|4abjsVh-C7r7ZV{(;QzA z##FD#1;VCII(@K)dD$*0b(QGLYYV|O4?72EOQND=?sCl%A`ZQ{IbL;$-{6j&KN!=F zuwpI{B7C8dN#BCKm>#C>ObQPIOqy;lEA5nm*zp#iZLA`19sWhxFY0hb0_$rnwMp_e z8R2#MCfMn2fhnn*dpIb*?YEa)Pd14GXE|C;W%WsJ0^b^cdCtp51t7X`i$8JtcDoz$ zpX=IT_#-J7EQ#NFc`&kd9K!?JcixR6%aXrlkCKUS61j)H1m19$Tjwq4zmMoZ*sw+Y zR@C*1F^d)|dKSK=!r*u-If=VBB$5HZwnQ zyw&MZZmmSTYSP+QtCi!`3IfN!PT+5nlLDBBvPR#YvG;)D(bLzLHrXbhr#@$&l{ndJ z!u$D)oP<*sC)CgV64APmIm4@yazQ;hm@i@eX_dB9t%)|_=*gb0&&bXM{aV!aMVj*h z74$&ELN7xHIfez$tV5%jL~ib$X7EA9TY49IIPEdc*KT`cl}Y)`*yMk>F}HgD%W?zs zW|PcV>bzgHbdUO-BX84g^7qr?#>*ShXN7BSovDB>puPGnlI1f22dl!smE0rH2F(2A z^hfNe3j!xNfBWO=cvQXSBV5U1V5;VMEh&HPY1Vpot?MA5QL&%Wl*+o2=Bp2e;Xi-tUq-BTP5i)FrNUg4RP^f#wNNogiIgE8Ln;UPu0gk%^-%rmoRK zYm8N+&?KpYf24D1FkO<`NfMV@hF^l1Dbn#M4&CO>J6ATgVRzSxxb@eW5O4v{Yd4jX znk2={n=;f+J7H_xW_KR&J4~zQqWM-Y`Lu@t)qGgHotbech%Z>X_=HDLcnKUUM;my( zxeov<9~@!mA;*X&?GkoqDMx`{ot5biKc;d#e8Ue2q4VLS%kSS~qQ%aEx3qWEF=j|c z+cog9aa`-a|H^;#->YWi{M;^XSUz%-!~yxeg8yN=>uh@boeO6EK==IzGUMnjgW$f# z&b(Vp7GdBB(O!@t_?k~6tG+x(gs;6ebkaO9;GVmgXdlj+ZK#gNf$Y^=V`$}jZ=Pn+ zo-K$+n0bZ__2&67eocwoE+EK@)_sF{vHg79e?2_CfAsnuUpoD~+8CHi{{iCwLF4Y} za{vB}Pvj3j(B1!$?e6Z{49H!^X#H)jZ9+K-^FqeWp>Gl0$q!3rpW+#B9mnNj5JqR<12ri>vYU2A4fC=B{yd|xi3 zea7h~_dH|qo}*e5l_dREyHVoM6F${X{1!%;qZIoL{Pn_q;EH6ly{k0Zp)evJR;XFE zr_^%wwpt&EYCNx7##9se!GKDt1oDiQ`@T9^yO4?x!@K)&cJyy66grb@NZ0I%asD`NSD91hg}O{i^FYGI@R9>ErHSd8M$HpCuP z3aV3FXd{F8i2C2cuvH6u5?&C10C~oEZc1pO@#eG5H*t_Xj@fTsIG`btscr3_;bz4} zt|Y%4sjb{+t#> zlt!rImnMQIU6rwsCP&B{%iH8L9kTM#cLLays#qPlgeO0VdUg1vD^xM#v#16xwVEcQ zGeBT&WRM%N&P>5i*p_ulaae5(e;yILgBKF% zxzaRS++2l{ZFay(=;o8Be+Rv_)?OiRMQheliJ%QIh?7;+sGdhl^hE*qe%#gMe7;CK z9Mqi#gQ(I(fN|&xN_gVdhkJVVrkyvjHb&pHX2+!7j_)|RW|cY+?uZ348Z_9I%@4d} zF{BvyiE&-jAn)w4DKROi$i3csJI4715dLX^hJG`Z?6jj%O#5p51dbTXD?L^fI`@O` zQuW@4we<6X6M0ebn5@In87z&=mD#BAI+N59mX>n~g2oNbJuBZ|kmM+k!9Z@E^dE!n z)k1&8`hv*Hjno6Ji3H_DYcy20R=Us)Dye+>d}&N~bU)6uSsJOv*3-K5F=3-s%oMv(>S@@FMJIjLev8aQ)m( z9}Z*=WO$=HmO)iG3xrO^B3Hh55xK?ZTnU;_*?a~J({}J(Ti20NI`a%I7=MRt01TbM z7}^b@&Ob5DO&dM#8JD>Cm=^D?3#ei{#8gge;B zQBmC7L|@F4xL=JV-ViBK>QyjF8hi&Wa!rcgBYyWX| zz31o%JJ|bkEBp!6!hvbKN(V$gaH0>OlufKkU=h~91l256A(+=|>@2R5t(5M>;mzq= z+k2b-R-st##2Bipy{im&U^?P6NdrAuYm5VPK!%HyJ!8?_nybbtJ&D$u2#KmGy|*U1 z11Zy933D$EIq0A^eY^u*>(;7|1hsMgVDwKc&f1(l5Y!d(c|*@R-3yc|zD*gxKuy*$ zQSRj+a}m-R#*}kxB28c9S;)geVrRV9cPLibCg6iV> z2aSAF5j?9b20E^xeZ}0bz;9~6#ux5U;GaaC5PE)0srv|~tKM8^2Dzfv@q&zvh(BF= zwMW67maebpf$ZQG&#bMVYMm!?|v~(h1WMWYI@o|O>*(kQ$4i^ znqr>Sce51V<^^)y%YeNpsgnI|j<$B^w8G&$F1o`eenX@9Z6?(E4+XLYo|x>{<@4=f zTP2PsMg+~>;?QvCE^h%XKpHJ~3jh=M)eBem)rLL9ubp^7?2Gdv3k}bPuI~Br>CtCB zjb?*<(UXJ;^TOE+iT$(&fA#NU9Fo=8BKEwej$N^sH_a@~c z$iO)xde9YC)d3f=eR9J(xCked?B62~yO=2etYhdvERQtQx1<|@hOcf@c&qTXUfGqg zx!pFmT2;A_AAX^wym4>ee6M~4b5~}RxoiC=kZ3W&SdcfB+03>WZs@=Mu z$C~8qyHhqbVaYgKtI@gw)L7-&m;?`8N zi@7`ke&nMGjV*ekkTa8UC&u2B534l25&0iOmJa9|@>JD|OGjn|n&6UqDI(G6hSZOt!2V}`g|4>WuD=$vYq|M@Mb|axwxTFM5~z=#|TV1h!`Wlrk9u3KS;|bBhga9!p_i{nVOQor$x{50j47Yr+=d}Hjk@d_cKW%OG=Gr zXe*mnq7}D8%Oiv{kt(|DT)1jiOB#Uh#Qfi$bC@$sjgj8ldeL8%pG-H|6g0Bg6$y(E zJ&Z~T)|E$6#W(+ymXBNU9UfXbbUpJmlO{70*XuS=dQ~OGRV{B^-B@$+NGDEM0?b%6W_0S zuFoFZ9(c8f&$!s(>=f74&u+IRQy^FTB>s+o;^~CqIAiD zM5l{u~}p?GPiZH8eg4zoQNSrW>E+jd^VyAyg%fe2Pl=3aJ~CWi$d+ z#2D;?Ipqo+xFwMnBoAPTvP(Dsp=nD2onv_uhkYBXkKg zc4cwLA_1>$A*5~(K&w2qdv69+5s;+o_Ci(1Jkyaz=tGu4dbr+En2 znGza-TzioOiG|+5M$ZLF{_M#;2@+GgXBt_}a}FeJ*4oZ5a=8x3(aSW$fohwTYOkYl zm<%8bzuJ}-1}q|ctCigg?_@98Mpj+Yexu+>hf!@K_C_nA_`~=?PCLRdPW%b1e7u|? z;rI4JyckAgBjoy#YG5lPEI}UOoFE3E4_M*h-HpVN;2%odV2Z@drp$pwa9EM2-13jF zeus0m7^_X_WYB}MjIbe*?QSxHDNbh+m2Hd|M@F!tw>!cNRz&+v+0+||%!w_i zma;5nM$_IAn~<6vYof2H)kf4(OVt$s4XhoXXw28k$XeD1v0<(xa_2iPdC*~gejRoW zhz)XOQV2&16{nMdD-4Wz;2TjU@f9fDw5wy^`;9!@s%l z0TCD3;y0$v?adnzMMn;cC(;ji=+F}Xok2{{m1lnOdd7>kvh3BLt<%wMkO&oAf*Ef4FH_y)yrq^P|30%W1x?e3?T6{m0zd4AH_`B<4E2t&B>A zC#l1ntR$U>xb?hGt;?a+=Vjr?!mL8OM*pdgSbv>LcJcnlW}Kic^4ksI;2orKn7_cO zO~0Msdng}{{=Pz;CsAV_cB10T%l9r;7GbL{N%0=Sn*+9QwRn8AxiB?d0Afr)qU8Yc@}8h2CG67XZOT-(wFS zN4=TaakxapH^eo{rgI>=)2^3*i_$qaE6iVc&n0O27qp!}-ro7ppM^6Bj`Ppl+Q7%( zU1E4C9kU2iGzlj>!nrD!C`N7+=42+AgBUm#HW0FSKjc?nWWdvZLWr5bE-F*}MH?gl zMFoAioHG8Fmm-y;XIX`n3z>{#_J}!*Dgwo^g@r;Tf~ewWD)SF0nOQs|l1h)ff#y0S z&EDN817oJp%A32;yqfWQn9Bblyp9Am_vLaiCQv*48$miFLUrg$Vutm04BiaW*M_*) zZ-}kt7{P7ji$*%LT-1YCLfoHP0uO`WeuHk7fKUUv{y2fIRylnfj!GC&)3;fnB>}YZ zWF*#bD)J|Wt?>ZUE*gm@ww+L`x{1pCR#+-~pW$cSf4QZJQ=4Yw*A5l=&*PIG>OS`k z8-a~yfsD$9gmPmKZZpj(Dvg@WtF`ul7DDC5*@X9mLqWR{+v_n6liqyGrnzyDO;3T* z+qNs?MUUn>P3Mc3*Ye$euH)N|h8&67?#S)d)x=2~HH(1KP2Ii+ba@1h)i$$Q&i@00G zhtvGqf|W#f%u@XwR{yc3XTqmGDMNSJdLNv@wdL+FBadXpcDJ4E#0*VozDa)J>O0WED_o_ zPJaU=J}58*#LU*|7)}2^ z)aQXJ()xE`|hStmMykh1EqU*jS8tDP*RDhULxE zXRm_>ZPBG$`>dz0?gQ#tM5OuJQ(BkR* znvVYbW-+(%)uK}i+0=ENlihu3Ch77xobjD^Xi9Va5GSnyUBpaLxL2)rS}J|OuJC7W zFdJQNSi^#?=k%7W`0YNMxc{s~L}p|y8ncU;i+<01Z66~}gkyS>Oc3cBo=)pt7r2&q z$GvLvtJw3+_vhZypO5{Dlv^XK`}IIDEtk=11dNr2PTisWOOkB@*~mIe`>exosrd>TJ_lS!8d)yt|v=_0bq@NAv zqk4a3f=lhmfcjE8`-08~kd250M-p&^DFI9`b;U)z@4g3b=E9K|C^-sscEPSs7B9y` zl)pqv$37_SU>r#-2Rwq9xdP-436O-8r113lfy$t})8m#Rm)9Vj@#K4XdilvcFPRu2 z+@_XY9k%~wY@7(8cfG}m8bpm;1O~D=3WzP0f}xaQJU{S<@<7a_=Ie+Kk&T8CKmPr& zNVolW;eZy&#iMr>GH0%y4GQDN#eoC2%pMzq?f3_Dih;||x(HnOJ1!u%{2sCs>9<*r zIL8tEZr1fJ&?gr31Y@2Ngk=@ zyBzseyx4oi7&Zl{MXrN{P`8pB;Md6vI?VU7jakt46w2W;4LSfu2G3C6rSi4-rvh(q zf>&F?8h&TM%hA!*2MpSsw%2G(S{aW7-$LX6eyqt#EyB!_nXYK?{vj^SgE3EsM-^M(oR0J z4vVdm{mg73o^cgo(!C3brCmQJ_qZ+6xKi0f84poDpltj`8DLrDNaR2DI9^Y3+c@tH zH*3DK@19?WzI@m&q+H~4>HwW9UjOx51F*pU$31@D=83GpD7s=h)KZHvT2TBo@(FHB z1o1dGC4f09Ol7R!sq8&_sYkujg897SdZDXXasMKD5FcrPi2pN^xb$=QnRdMW$Y%_0 z+D9qxFC*L)F0k}EVC14|?{Tbi5+7&&Q$ze#dB&qm-qqvvcR@ixJuLARz&d}z#DrJn zsh@`#3)2Ny~ZM(}uWd)PMhOgiF&C&XF_q^9n ze6m*Rbmsb@mz-hj{IzNBQv4S;I#FUhgA*j#1vvzpTehJGcbyvT;fs;!ys@RSwq`~L zs{L^W8DJz$+fg&heBV|>LYkL({r?5r|W&T z)CjqKCR{@-kupr)3O?L{Ab?aqXkeo(PjZ;&yJX%P@s&M66T7%i*CxV2?8b z4doTJ<{1k8iHv0ovA2hkp_7+O>!xCXq$-XJ5Hp2=Mc*pNtU;JoP$)d1idZ>YXc^Pk zb&Fm-1`Y6&8t{m=5eB6MGZCBj%z`hk>Pec+pSW^h^|U9X(d8e-nbg4l4*-uqaKC?m z2ARBsKR|~}{sDh}KrU9^mV-=VGmb1L|YDiENZ-4#K`;N2J3A(=pm6viH~5d@ZueIU6nFAges_iY}4Y>}rt>7cn=Lf?Gq7 z{gQtLVMM?)5DH=~Auq`9OO%Z`ejJ#3(Nd&NaTg8f*BKZ#@%@-qPa5A#_YJr}wh5-4CIOMxKxF+DflmoUwDgh~s>eF;u9yUKC>IA1!k6 z+v57KG7hG^xGPWaJOXzh{0CnTeI$QsrGPTS%>M}=@o1FQluk)&yo6cbq&!?-CzsM4%TsOGCDL6V9A9-^#_5iL*IaHb1vLX2j5@xkh75KzT$B873%sXtDghQ!EPPJ#DEI%AfzoT%VjS*F z9%J($tIdc!dim!VrvI8nL8kiTyE%qj5s3Fezj_D%dQSG0qRg z2%KLnP*3MQ7slkHNt(5|Bc6XCiDqIeli}8gADi`bunfPirXX_KO1VY0+URu@$6_m1 zsmNL34_(l(WL~|GE!@q8s~VLo)?_WEMYKXa6$K-ebRb;B9O;Shh7|mZ8O-M_$hgUQ z?IaQlt0hg~Aq5akXoo>Jtvq!s?7>w8?j*rzLiTui`Va^nF>6{noW6gx6uwHdBtZmd zNof|5o|BfQz3PV4*4>))>Tp_QY*J0*ygji7Tfz?yo|Q5;&FEX9fILX`s2ZJ~Nbk3v zw6<(3FYaU3lpzkiN`_b`tBK7|psdDcP}Z8KG?cHGOp5~f@s;>WFDMm`krt4Mc*s@r zaMxXzS?>v@*19rE<2ZjH5>utv~lvryWfSHK86$v=&~^DfQu~sQl{hT6m!IaL7g}KQo0G7n8N47~EcrjDd36EZr zgGnA6BbR_jR3}BOrFB@QiG`r2iC53l_|2Qgs+j^TiC9QH1f$Misz~Am*@44+~;WCGabTqGx%iN-*KP;}$i_*i3&Eq%`3>Qp9;o((__PLRcgB zGtz1w!PkM8X8p_IJOk8!fO}n)gvOg_Ys6yYKhk})RiWvS@u)rxzD|(#O<|wHL1C1y z6Ad#7zgPTLhXDchU_|d8CIEXugvXf*=aY=?ix|7jh<7ERn1F*NS0cr<9`Pnq@@}3c zMN7>DMz4Q^Srn@n3*b5pQx2SkT%=DKdaf2xVb9Uh)HyH%E|ze?RTBlN^8oK$A}U0~ zBHRkX&x>4kFrTGqF{#G1#_}R##YpAz?~Nk&Niku3jw=Z|HC)TdXF(jdM!~$HR&(pE zTk)A;Iu}{iqOAZH0B{iUdXD}qk;dRI9{>k(C1-z@evayiaWuv^wbBkZkgX@zhmYyB zW7l4_Ha5jqQ?H2j#BqE-pR_cVS!K)Xy+!1Pm-FY~r^Wfs6kZf*OYMRx zjW1%A04CWYfhtsEoTk%L1Pz91d<+oGRMS~{t5<|*rIrFktj2K=`ATYSBr=_Mei9^O zxQKsNM1305@_s&H3AQ6_v@5AGcqhQTT&5#R+6dd>)OLlXDQInVB#IP`w;0WsxR!zu zcLdBT!7r&MNZO@(utXdU4u8e_rHHrzYB4_HdWx8HN|U6#9<9`>5!q3$NCsH4(e=H? zTM;ROyCUM8zoND|!L_5IDpEA&{C&CO;=_Lhy@nC_4z*Y-FVMR$n3l^;MMYYQuP>9d zFIG$Cr|T@p#zM$6juJVwdX&#nB}vSZNEEavkIUJv#H$U}=9{PT`QoumGMF)1aP2^f zWYMfDCW3%^21I6&7SWzG&6QjUep-^rpvA=c)sy7CQ%{uFPCZfFIxA`7(y1pZ?s|Vd z6?_{CYr-i^5HA-adgmn924mU_$9T1S{PV)FTm*TVWcQU#auyVmR>{&ph)HmRz$_36>-`H=K+_qjQO!4uQOVbywzxF@~S-#BA~SSYMTcn+Qnb3 zEr_XRScZIU66I+zwFRMkLpv!cF3w4c@l%ouK3nXDy9U!f56MQBi&Lgl?tiHITBj{h z(*bdLN#sfUN5=Ak|H$!SL=JyXNuBsd3A7s#d0>4YDHV)poduWoj3lYG)yz}mms5$$ z1=o`Bhav?S)H$J4cD65%#X;u@(v{h4SnesICu~if5Q_%G3HD=Yi zxAGy@((BXE_*pjRt$9*)ChFg6Ex9rmc@oU!Qqt4%)nfdF=1H0$s+ND!3hwZ-T2JO& z6-pikiXE>~#tTlCt0Zw)(`UN4M6J3kHExhF9 z*@$bXs6IhNor9o#G-Wc-3WIzi;H$MHdGzr#821RjSuFw3LsiNQq>6a+QEBz(%1J1K zp`bue?Wq+9_vu2Y#K?aasUj^Bq-qH<3BxHx>}MDorL!Ttr^Q^X986X$+|?90UG;Rn zGU~~Igt;cUMeLxJEKWPr(~C(onkJ;ohk0x^pKwl8C0^~KOx1SGYDVROK)NMzCiull zM5{MZN>UUJ#`SfF36d#Iu?nLIlTb&7>-k{pmMR4IdXkdSYpEKZxt6*r z3&%{(f=jGKQHl*WcZ%qe?{Yx6@1k^xH5IoD34m{36wu|h?F4~d@?MQ?!J!%fEu z*5Y18^q4MWy^TU*1Bu=07Ffrl34a{uic6*fMid7EqkP?h1`fPIzG`8W!AQQ!d(_u7 zoi*s8yq3n6i;A&j0Wl|W)2S1=@zjZ36T!(^{&YDBF`nv!>b8TI_-KlH0BU)!|`1$Jat9C#5QQ|jQ zf*Eaj+oH=x$+ZScxqrbqpkko$JkEmoBpQ_q@W}`ZlR~X5odpRG2l0Fol=9Rml`>~h z7U{eMc}9t+a)}S1(+n~oK(ugK3|m=T66r#E>Lh>4OI&XaOE0yK;T%a&+Vlw>zs z=3f>;#>za6qp(!cjxuKeYEZB;XREKYA(SQ3Y)1b$DM z$bNq$Dd&zs2=uecgx%)z0LGrk!6YI87Rco+3d5L{GZsdRSs52u8E36xSi*DYMPi7* zND9-7jMEfyfe8!u;Sz^{rxBoTEyfdyFqj+U53{UQVa6VX$izy*Hd6&q6Pyb zj!L00TymY_GGwC&76aTKSXhp-G|wkNl$GLOg@EW2DbJT6At^!Kl4a#6nBxoh%1VD? zso)#nOqe(>`G+@5i%B^|O;7wOY2twllJd@S$l~<26e8pjSg>Uf-v;-23B%yuQ7}Z& z3Zrk9f4LxLu5L=5Zbx{bTY?)p9e+UI9V^F|GjfWyFxF}~#+pn_8%&;xwzME~iuQ`B zma-Hyf3|=dRdT8Eh}n$@*0Dmc{uzJD8$Zhmnx&liGdFJRKfLzgaDp>mLZ>t|+u>x0 z^0AaUa{uoJV#ha3yTQk#?v+c!zn37M)U?%eyLJVSM;$P5czySTu74?bo;A#|0>2nd zS6qQzPFNfpmjt`4+dTu-mJ$OCisZb+cR|f75R``<=jF2B`Hy!C*08>1|J;A_xB7ce zH=p=@1K`ywsPuqK0^TRQd~4IL`FIGA;1~XiP<)N8Pw_4mwX5@a`R=L>>nVTFx zf6ncSWB1`Pi&#x+NPvW9UwFFr*-Tajs{> zz0;9THDn75rW5`Sg7H{}XTjZI6p(@(T^2dGinw8rv13AjE3+9ZXl8JF0WThZ08JgO zAqzY^rD{Byxe4G&{2d5WLW+S&fmF?A#VuK~e8=h-l48v8o_m1fKV*Yb8P6!lM?uIl z2r-rB6#(q{57&!RD{g;4IOz5DC%pn_*$lbAxVuExk-knxaZ1ligP7$bCf~XUaXT}i zw*mfYBCaPEMpD@?Nkq8>5OAVI(`w^NayR6Ha6}j@qcqF-56kD!7FYYD()mPal;Q79 zkPYT!WrtyITsg`Z?E4%|e!O$B|(7Hl%zq(u#6I2Lh^5t{Zq+ zQ9?o7zm?rtc%Od+^yvbI3ffl+6-0rEBrVg(F=8k`Ur685 zL|2W$jPaFpoiV2TYJxo-{zx|9^@5*sWVIz7Z;mt4a}$577k}wH!yqX(-B1b_I!sbD zs72Hs+MCE>XA(vu4jjZ4i!hY%BP$ig*4mZ~v25)Lm(z=vVXn}m4!?*%kdW8N1q+|^ zG#Q9Rehys%J|O!X)UfjM171CVSy-q`OYE3V%wnIEKbMhI2A9$I<6P| z4g~xGk7jI?juW9SQj8`n*Ijb*qTG~P;!2Z$&RLYu_aG3%LO4`%9h@6I%OH$cTtvhf z)`=HThHR#A&<$vk)|E!6qtXIX&N0As<1c*_@PmJ#XNo*ZM+Ga?S(uJ!iyX0BPC3Gk zDcue-!K71;lt>a`p{cte)pbY~Tq%SJ-3qCKfghRD#z0(3R!CDtDTtS5_k@%N+$t{F zr;ylKc{GX<+`Fizz?MoW5y60`SxwNd1?BMPe7yk0%tCyRI`ItP=Cu36_dui$V3o6w zEO~$D#kKBtmAVNQr@2O)3jZz|cHA|i*Pun{=+6dY3H_ZY9{~;bC~;97@Y7Pp<<5vl7ZAdnY}X}qGAxH9QiN2G6bZq#&zR#T zMl=2+k3Wf_xJWn!w~&;Kf=~oTt#(6(P(lhi5`zGl;9N#XP;mTSNINY4<{~h_y;70m z8ZoXel4~xSEQ96B#bUz00!tWemlA)=5kzc*;bJspbXQMEtjxW8OIvh)X$Kh^m@5$$ zUiI#&UnKeUkY5Jz&bRi8rC){kE#<$um1wt*Mxt0FUai4;>&u7ni=p?jPZh%uJtJih+ zdgYsP&+e6b{XG|0RnTbZqN&|(-`I{ogld0qn(|M5e zTi*TZz`@)7W9dFD1;0OG?jHh${Hy!5Ec?DgT`QQoSFkod#p-ya7jAEM?6Ul-UTDGW zUctHv!_cSAhk`B3olk$=g4xTebmU*}9#;@SudL-8@H*2ZSMr8^@78PUlBLhq)Oo=1 zY`S}!ZMx~bdAYPDU~EdTjUxr*I9oR0qS9U=CNsB}&Y^SWTsn~xqdH1eT-T_ck~jyt zJmteHB_1+Zk07oM=DI>=(qmoLCVLgaj+Is8RI7SZSf8rM}3PoMD#Ywb!5dfv^#bp-(4vu6c4xc~&tG;>{-0LXe4Yv*$? z!Ld|3&aUkgcK1P>he1;5Cb&-PRG05?6iLimKPSYdyojecRIE+0sNlxEG<$(VY-4h?Bf$g*S3@G0(^2p`l*Qe zE3@WMb3eHksOG-&?-Ai^rJ0gjt!A*V zbNq>2BYdU7SQo-tJ73HMajGEVfv28C9H)-+)hirY?i+uM4>63wOD34DBqPdfs6(oRv83mlMOwj?7as2;$S!?CVRXVsS+t>q4Rn$rpi z&vVicO$Yj7=y5Kug7)k8{|3zdAwkw@`27+LQnp5p?3_-K-Q(*41oV!M?9bf!^QYKBkD6;An|(*B>xc~-k24C1n=k; z9Ob{mi~WG?i1_|O@czQut&nqV4>+nFu*q(toSg=Dm1xA-lDY^|kE&kIc?W?`eJ6l6 z;qy3>SMZ#DS@10sc11F+xFySm+vvI&ERug=dIzo;E+P5{+=%{+ipdpsTLG624v))E z0b0**qk@lv;)m&N!Z{AWx*-1zEPNMTGk%|d*<@lXXW4j>4C%hzPH%f+23v9&yOnQpZI_G ztx7n+zeh6R?GFCkRS7@f-?#Yp82%pq2!D@7t1otTe`w*4N&&xl@S{>*Qp(FCneu8E zQ~vQ%ru^d{lp@RQ{u67xJU$jF|JcElAAkIZNcr)fuOQ{EDsx0-j(!v=FL!p}Sub~X zUaFLzFy+OLOnJHEcWmcZoYT!$H2Qy+*woI;A4F5ThX;_dd#E0_b4WvzDaX`-(4!i#`>J3ryyef)cffAI~xeSt6b?LYAEj}+Ng zuk3(Qj^Ho$cW37iU*q9V7&*qsF^`B|?(EWNUmU|jUjB#!{o%)B`1=z~;O>7@H?y)v0J8yAPpu&rt7yGmvj&KPaW8~EizUx=;_XnE6A5`@pDdpf8Rsp@w z-Jc-xipsu%KEC+zM_hS7{!^}@11fun*+)NNM0M-s&dw`Z2tVS>r&aOd7+*g{USi}G zR6P7qz0!ZuD?NO92!CHu1bTmWe0WT+b{8T?FDdfsm3;HBe#DgHBX4nhOsyOrAHp)Y zU%*@c^2+IWVW+;{Dod@h%2t-s+YFY|msigHf|k?2a!BZ)qq~kwy~nc?!4Pqe27O6T z6v0gu$`k1cdoAdO4-$*^N&zYeDL5_?at)X(3Vt|Q z?d~E`aKM^y09AP5tUD2woIoTD76~XzfRH!j9*Ke31B)bje+jeoUZ0U8?XoZnMpy!W z*Oxf}%HcjQsWf6f}b@dXwmwLQ8KLDj-`J`PTaTT8jjyP}V z-GUpEC-7rSc29pUo;sc?4o|eZ1o=o&W#&}t90+D?bNxv-bc z4Y*Og^}+KkUnQ-*+9J8Ew61Q6Yuw^eGapCNHRvbsi|cvqa`WNWS4(^Q6U@?q+n+j@ zzH3~N%*s`}x=afgePY+5w{BOMS%4dRC-ZSZy_iTT~D=(&SkG!hV&YiXSa4M^v zQnmT{Bh@<69}xBzosVc)=tM~7_)F)r3oFqyZE&Qr>d<>-EcpJsj*}Knr@!NQMUt7% zeRz2d1s2|e?h|F#`Dy7bJFqZqXY=s~H$2TN4DjjH?R%M*_$B;>Rbx9}A8!ghRCG-N zn)}CjR{je_^l*Ti>AGTytZx znSwRCPggJL56gD`0I%@$BYz=~0Qh#oC>MWK;E8GbOXrK*BIc}An3C1=E> zF1iQG^SO_HXFKS8yJIU)=&GeJODAA1Y?1ujNE3gT&zINf*iMd7tdkbDu$QEVqzN{d zCtAna?e-Vx1`M=q@tVqgWo!{gJYW-Y3u@;HlvxA!;Lb|Zy)MXvQykW&^q-v!oWd}FY>f9bUhfcwKoUcpv1_ibh`onyEEn=@q2zVG}c#u=xUa4lVv zCp8;_=wbF?#~d3k#EdwF{Gt~|Re zFAvVj%iqfnC*_~Nmmd#oqVc!E?q=OH{*~9PPlb@Jx8c|6Ah}();lJigT>bg>GxFt~ zTK~#x$FRS2F63wThQGu;?Hx|4uP_3lVGe(eovHJezkL0dbRFEDFY<|XVRHw<=lXxb zM*2J7>MCL)?;^QV?e+(4573uXqvTKT8h3+w&K&IIg2{f~ICAAd=!#M2|MHj4JND?% zIlIaru6%78p#E>N{vfg9)T)dV*R4$1=2sBP;)h7L^BqgD_H}f;;ebx-29MiRQR@r;AnYc`Sw~b zdyhV}Z%&BcEwEY=xb zXrf)87QwQ!bUtbw$EML&Gr1mwdDo(}UDkb}`CHzwVBFB_j%ab4K_;k@G;P2D^d+`czf zkOFVjws3XILUjx*v01;iSHFM0PBV*BI)wppV%8RvhOJ1Tv`~BUb(DnGFmd;WNgF9# z*L4+*YrjsDw&K@hAQ+_i7=)HV{(-RIPZx*h}*ZzOzz=x}fjBJ_C zFRxJ8B}AU6u!T}0lDP`G8A@W~dnsNcZF^0$gtA~MA-qWu*pth$l(?E?fKfsyjQB9F zoyo<4LZq#P(IgO*@|i)+$FxSRwT?~+X0 zN6XBdCMftL!ZrEZ<$IX$^DLc3_)dOdiIlR-ur}@cuokrV1-s@d^XFN5$HyFF*M|Ff z&St|HUgMwe8imO6=thYzPq-F=h+lwxgNz6PrbxKFPm179#6UDC!XGY9DCj%C!?SW$ zoG2TooE2BmjFEpHGx}Z7GBK{QIX74?XS74(rc(JatV>ib%xS64gDhvq+{g?gC*0#3 z2rEBaoakZcwh=WxQ~?>T6i`~`Jm@?B#{DN7FJ>HLvX~H19#G*GZ6ik0`)h0888Umrbz8ejCvtBV%6#2?v&Uf) z-gMCWuiJT}$Q93P54tUX8x#qF#LpGozgkE#8dkIRN~|8`SJ@)Ie?Mm+!@ml~T?=lS z`j~Jyq?LcQ{Tub)Id=JY`uqOt=-N7lqxg+kmKbnM`(eo1qz-zD`hH>_mRMlVeD=&r z)GY2V?Xs-buwTclF#h5P`Gt65TPiPG`!+oLFZV(y75?qzTW-f*|MckEs+Hg7dpX_4 zpFR7FM(Dck&iZDSOTfUAZ04JVWGnyjN-~o_zfymSf*)Twl9|z#27lduzgjw>Ya#>I z8ldBG-`+BviTjE8pCRyP=B~L@_IE2~${l2QZuNiqC!XSPde7YNa;^T!BIfLTXQltMy8tvu- zvtK-~-2<1ypLiv+g0^9P63{3|N!Z=keU#e>vV#g7$K5Q3OT|=s)F{vYZ#-#4UeGVK zCyl@SUl|hu7Jb2S9W=6oc?ZO^KbhZZf9ZdmRvMBFQuVbc$y~?lcw63<)!RVct&q^I z;dclSyq^0r-*x?M+x!mhNSKavP&ZZEY+|y>#aCH*><%@lagujLOd{;8MT4tD_(j{$ ze-D@oExX*b%^J_7+7Lf~kj3pKzU}3m+GP(o`D;|28wHyF(m8a$(^b23<4m1P=Rkik z5jih;=D)sGx4T1e%**@TgE$?Ro#roADD(1)hxStN13znI!>Oa50y3V@QU)p~#>=OnV&6Pu$0{3djG!X830lQa;eIfD~vX{Aw|aauNPiv5vy#~s@OUtRb{4>J;MMw0)M=Md;XuV zKokMuGA{gE z;Y5La+t*%KrQjA9fTOJJ`+28)WRTJh!30u$JYO}ACa^O{jjvh0RM2??QUqkePQyk> zW(oj%p_#t~%=&HB5XZE78>aJ{vM?aq3fd?Ztl2CmU>3Z(-(RV_4QDox?Z9vD-lxrn z-pw1gp1!!l8Pn94RCNhW&rE7e=oKIe|r+tArVz+e{K z{%B>NQGy2KT+$JxZ(I_IxYDNyQ~c90E6SV|wS{D@XKR0hy{`CpAmDbs`Y2rla9XSX zw`KcVO(D;-DPY%vrMb=8AI=VsE-nw=UmOii4&ki2I6|b+e9PT4K^}jpHYa%d1A{XQx)1037yGA|gM;^HSNkVtm)%Y0;NAY^<>2Dz_~_#3?BJ-Iu%-GKLDbye z9PGEg`V~GZYhV6)^mc#$>=#RHSowqv(Xl$2e|OCU)uGjb`3LF)IR7PCaJotuI0=)Y zaX|YMyy|FMe+y>Utm*k&F?nIIU_D&G6ZbE!jxH{7DU@*8K~Pbe@)G%&a3R7)VpimF zw2MFigV>zS6ym7^&-vx()eS0}8FsmLiqTAwPT^3CvDM zeRBC0kRyN-(cRE##Q#?)7KHl~O*-pj$t&7+4=Y2A+xUyUiQ6m*W%X1(Am?SwH0ct& zTQ_g7uuVt-1Dj~LkgaY@x%OnNCgy6DAf(}bSrHQ;H{OV@4Pu8I$WS+cQ(wJ(_cp{j zvDa>5|J$ZWr_+DK3Xf|31=|C|gd{0#YS6x6wwSjNqy!b8JM$ei6XD z2o|+MwF`hRNdO#RD8zxDxap&2$Cn31lq1rWU(r^run`3%7TRkVilRVluVpRH8>UAh z@oq;;XXbxea73OdcJHZsSrxdR05{Y+boUOW%=G}-uBR?AuWdTP5_W5#e8MtEL4vLV zpt1oj3XCE(xJ9~Yzt)=PCcmf7u5)HDUy~H9O}2KHlI1!R+fj?4Mt+c@&Rkr~Z{4IO zFTQm5F57t41LGM_d)v8js}FlvI=4=pP;Vf(d!T>uvb0}+qdV)@taIz!_%IyFm(cnv z_y%()b#g2>vl2(qMT%syc~<3)C+1+0WM<{xy!$)IDx2JNia`{*ClhyLSh4rO`Ttg-xBZ>n<}!YLqs1>x1KYmc?!v7@v-Rja zfBwAVfNsab_wa%{Z@f-VYV{#E^;T;QNpEbd2|8ONzEPjN{M)`bIz^(##D2%=7K7}u4VHukobLw9;K^u`ZJ>ih zgWW~&2adQ14@1zH0qWei2CrD50Xijy+#|=VFi0$IjME}DP;1Vh>B)h}S2Tw~oTA)_ z5`&Y{5EkSV-~k!9w-jk+09Oi7fDDkO#?jD-({ygkm@$j+bY8^Fn1P=PY6ySBf>beP z_u%TlnBCK4N&?bI0uDcc2YF!{N@HULhD>lPY>abO6j0|P-Y z5p5_=gD^KL)p?I{18=K{Gi4y|We@{Gd(N^OfNDo#%@E8cL(*&p5p+ftL^-4PBPcW@ zWAL^!2JS+{as-3888i6tNala9VHJM@H4MLZvsk_F zC^w=U)tZbb2jUKpKNidY#93HHxHQshS`DDimlA4-wkW6Rv`>n_cV5!`8v(eS=t z#t0y#j1fS38KZC*8>5hoj8T*n#wbq4#wZ%_nJA7^580^CB}w3oN(5f(y(w z38F__n+&Q5&%j4;SZaTRWBYR`VIxkHF^Bax=vnkAnE#mGVlp-sqWA*Kiw6qo9JRj; znxrHJbiuKy_+VzIQ+nP*>3Pc%AsotanlFYW-?7VEmW1Wsp0{b}x8=l%oY0v#b7uww z>KbC6+X+1(bxPqurG)t}X`?DxLP<$j3bbhgawwzgGGk+44j_LB4p%vB*sW;`<&D>N zK}v$nu+li}ThutN^~weM)~{TWkB~;>wBc(-D=HbR^}6G9UW4K5>!PNoPmB^l|LR7S zo!77NoJHG7xMFkI7-3#K#5;C2KqbdvCSeRUnEinUBNGNN>jF_6@E3K6n^0E~6!Pb0>TSHXOV75qE0Uu* zw7du!1TUj+63W6d2FPAi_V+@o*B$gwEZFOMZ7pv1z#dSj=u|Swox8pl3jG;KS5e;3 zn*2jwG%PGFD-GMA;WLbqYy3@u#OigsYad39;iWB96BoUFQAR(!Dz9I@D6fBbWrH?F zyXtrc7ejyjVzFv7V(}(Q-lUBqvwGcNn3q8qmf*VzU|7ph{I#S#k^7Ap^z()9bt0?R zefgsN;gwwi+!fq&%Te*Q4B0F#nAd^E!{`P^1r_XG$-aE;krsX^+JH^~uxdGGTETIl zMPfY%s2Z!+wR+v3%H1EzmoIFYqt}@QcjYWf%GrM+E)y1j%j+^><6fC&PL{6`wwBw24@Pale1g%wLhpH85!Ax z)L^>x=6kHqNYW?742>#GB$)knQ-b|e)rwlZeK$IyiZ$U?qh~9<032=RbN9{OCpd{n z7F7M-e!V(<=NKPP-{}{1&@Kemu>3belZSwut>1kH4+(d9h<_n_9&_}k9H@!>X1ssq zmd~#z7w}EKqh?PlVg*~qtNS)9bIeD|`_5bo5z3Chp=W;Ziq`Z51u+eID zoK6cs7asJkEp6GAHyd96;BfzHU+a_S*M_om;s*MhO853U4Qu0#*Snj=YbPYs7Qq`2 zKUsleODmzO6R0zb(5o~uc*;Gy!5Dv(FC20{t#7^)zv}kwjn~0KI992=xl@sE_E8Vi zK`|$2Ag%8e@{YfwTf}4jR==>+;~o1P+yoMPkgoQ_Ap5S)?g4b{UT19ybtb%7qd}86 znob#v6aRTRDA5Sug-&m8-Qp(K*7UHXvLo@Cm^`uswB7+;z?%Mb_E4eczkGkbT9s9t zlImW#tlsVhm7`U^Dc`f)NK%eYow#P-^KPHK?)17pKXbP?o%VmWo?CX`^L+4^T$KUP ze!lu)^}XAzpMCsmbS>L!ci>b8?Nq}n{BqOQSfXZQVRKFC=Z&SGS4)#8LjA?C*&JX( zTk49@z!`tg^ijQ~_Ry!gqAGuRE^IoA^jGnnZW9G40`N!L!W^e&QJ;VFVUL}98pr+; zHm%3ivSXC!D(WC_W@tAJN@PB-YaTm~bqWqw=V950)g{>KO?Ne=OaX4qVF!MAwX|9o zB8vu;0-4*U)xRnX&ntYD*R#5M!52%r2L-_$INEJcw2!syE$gqc-`am(TfK*+$#&t! zCDnR|=#VODv_}43R6pgZ+o%n#CiX_DRhphxzu79)f6wbnHS~&C_QxAIaQo3(wwC#K ztJMM(lTEXB)IMSY(D%G@&$rOtqz5-yyVo~Bg(b0!ZM*-s;e#*Jgi<{31;w^KFX`D^ zn&Qlxr~RjH%kK$?wNHOre(&j?Syk=H7T}ZdaSf)mI`0dU#(Rva#3G|c5yTex@nXk$>BD%SoLwuXwWKuz(a z&}aw2g)NrL8mF}S5o-YKr1vzeJr|S)Qzr*xEW~+{aOP+2c{+c$Z0C%4jw9Q-QBwRf zAtnF!{^=TIdPSVy1NEW4kpp3yeKvI;f-GD_N!m^C9B{1L&F`F1cX8*8=JW38&KZH= zVcrcPly`6MoG`k+?%v%wBre!g?&EQ`B; z+&S~fy!+>!W6me@mN@Oq?(cWbJO;lK?5^nk`_7r)7n3yUeuRP;1aI!K+5PLz$-4i# zbKq<0{szB!(PdL7haoPe4!Sw+j;2ojCGLh(2hsMr!zq8V`uUjdrq0bUVMTX3b#8-v z);*d!ceA*AId$%XS==2=m#=Sv%=l~M#JftP`)X?057D(1@7nw=|229Y@48wMW}Qo& z?{h;Ls~e}mym#J4<68H;O;+dbIWTcuQQ5K|R@7r=!5qXXAbx70hLFANR@%K}#j|JD zc{>P0ccp**$65yYRIO!OD02Yl}zD~2FU^KBZCN+Yl z%suaC%y&LAmrHS`?6vg~^=Y^+T3J$5vRCbA&o=mzqWl;kHmxbMpFLynv$UP_CCLI_G4abHttLOm{uC-?^CXukIa5hwFlrn#`wsf z`k;UC-H-UQ1Yje+C&ggWoX>+~&va7v+}+#QI45-~CwHxL=YinnyBXO*N7vSQn{FsT zk66;VW!JRrmD$yDl^e{KiRxwN$}5Q3Hf?P}&QWo!z+^tu{-@1{j4d}GrmUS$qU*x4 zm!DuJr{o91`pBF?VWUySM<{0w%r-$HS%ZIKd)HomvVF~&JhdtsJzsuWzUKM_wdLwe z8DCpN#367^lhz{^KgYRor%aDJ8EfAL@zjEH_A&^=D|T1l($^Z=b`wZif^UGcG-Q|` zU0WL|g!ojxmUEKBH$tyPypei0miB8*f|rROA-ZgqZDYI7o>`SX#P$+hH(AQ;MP+|4 zv89!Me|iJyq<`U>IECikCvnDwu5}LIvD{uVKzy4-ahS0LjAX^)t!klV%sB^TeDNM8 zmG{7Vkfmt><8eca?-O|ujHYhClIt%)*P?*4yfP>(+gCy5&;%<$^;!+lBD_O24@Ej} zivba-u<{<|YYQ!Ih5(-ry&h<5%JhH!QzO+khHGJ^uY2+s(`&;>sp@_lL+#w6J>WH8 zBC^rf_UmPlg8iEsDa6`DWJlNcR%PH0Y976^m$;rYR-GR04Ydcngj1<+FS+i7%${m( zh_gm>bCa>Q_+F;W?!t$~XPQn4Im48B_SR&z@!Ve#qydFKBDZB=D_19J{9#b^Cff;c&nna=_z_~g08GOaiTGN#4&tiD_)#52WjV(H9+`B)_J=~2@}b(x1m-WT(H9& z$nL2&4y|*ivNxYUdv;D0GNv@8*|TR;rXWI=!XCWi^+A08w+F0Bgu~eK{L-_$Uatg0 znq9%d*+R69j=ErM0u_G+0|SBy4}u5K)tfSR=XK>>!aO`o*|xj;T2OE9lp#pUe&ON_ zIS0%&O(YLr?-$}!*m*66HDK;%8qH!!v$0s3u2b95-R3QIk)e* zSz}r%erCkTj<~sn=jL;40e&BC;Yvd^9a=#dfUPJigN&6Y&u3+nC(nyAOzAJf*e%9} zi=zCz$cr+cq>DIzEJ5$1xG!^1556y10`AnxahfDSIZmTwT;3MfWuBJ#Vl*ki-&J{^ zF5q7V54kpKRws^T>eA};4X6x=8v}reZ~MJ|*Y8>Vwhz42z5%Az zrv318i9Dcxj|?%Yf3wU^$8^k1({}bTra7Fj0n!Vgyea=LxxlOJ(}GpMrHeJ?zEq2y zvo+;LY!x)Q20t!w)SDsH`{QV~xc9d@l~3%yfpCuy#v2iobwE?`gPB_b?bBhQeS#Hq~{tn(l;uId#!lH)77)le7Jcf9u{wyXW=P zizAqE&QtIH<}@MlboZSGM4s-DIThEX-3v$Kq<6nKM`wqF%l*@%!T#mo?a?nMXI-_G zNb0P5t3P)h^v#~`1hm-$XRkfL9NgpG45)-99uA;s`*`UMm&y~dRy`@~rL!Vg=pH*l zV$uD7!#Ul*I{0;P_WtTunDdLH?pNpZ=<3(^hl5`}>|Y#q|74Cn5q~*q8Vcr||9Y{1 zx&LnP=lhGpODb{2oE1GMFxJ=X`nw+>s>2C%1LkmhzwUxL%2c}hjyVmY7FdB=vR3{# zU@oW|N5U!HbB^G5PA6+?7s;(Q5%|?fla=>>Gg(=RST;0HE$0ooTDC*1YeP%Y8vH1F zlQE#TD*_&kyg<^)+^!W6j6lPzUJXFE)N9)aVFlQ9xotY*BUWMu|4TS{9S>9H6o!j`t99Djk&NrZxpm3hRaJCe-P+WAYggG^F? zl;-m=_7D2cl5o{77jungsHP7_Phh8!FXo^*&O)*!!NyGcwU~9@yqW=wYB#yGkjvql zld&rZ^E7d(7V(QW;Az)lu?fzm#z<}&iY#x8f&@k~WX2*FkGlpjuG+ULa~`UyG=O?g zSykeFR3Ewo7a@JZY!1*Cfk2zN(=nt|GC{j=_4_$K1eoun0Em1N(|3l6E7eBT&T*Yd zqHvgKL}8PJqRKze*(C$u-8tvlw`+H6pM)(6+1)iSNggfxh5b;Sz>myr0pE121Lhu(m|(!%y#Z?%={s=WdJyEy0u8By206+=vI;htJ78io zr3nOYe-kdRzT4j~oYTz0n6ru3Tz_e6-_L0#cRwz%>G4Z3@Jv2NFcmvpjZ1+^qI918 zKjgjJk{ideHg=DvsKH`Dz?!OmW>@tYqvixjks?J=6c0<%UZdG0*ytHxO;2Nl?m3V% zh}f6D+V+(r9N`E@*kNDz($C?i*iYj4vZ_xQU?^$r|Nrf9h#GWPRo5XaD>ExIE32`Q z{J7R@v)2ATkFQt!bq>qTwTYf<(J{p#HcYSQacyyG704FH!w)|{eDlM9n346vlvwdJ zZIu)1$2&Hz!%E|Noi`Kq zQ>qaUhmpo7m4@UOXh>?Y{l|lo^biV)>lL&h*E+yF%Dqoxx%WQ(8ahU;q1by5eZu4P z;C;FatJ(YV;5~g@CG6OL{Fih$emPvC3<8VpQT_4bl+%OtK0Td78B)ETo@eD87RAP( zSYoQ?N5ge`Z|H)tm7UDbh-9}mCwOl~R5A4LHEu^dnV)giF3VY~B++y_@>p0w5%eWItD zaNZl%rD_dWzu#FR$u=;8$!!MWlYH8M@JY=JZvpU0KCQs_B%f{sUZvCC47w_}3%Dxv zHrT41yMTsJKpJFKX$`P6pAxtze;cs=PQWrk*kDk1)WpANRJbN-M|ZIiwB8!&O`8&H zzE7_$8Ds|XaZRm%@ePO+w`?Qbny=sE{K&YSwrW+;fUZ+Pom_ujr6AA*^@7l(C8yG+ zqkdhc!^bIqpKj6;pAH|VBi=vl?+$;o)*W~D`}6*|-yb2b?ft&rKR4;`)BXN2J&%m} z3XuI=a4> zHkqBGySw~Wa|i9Z5951jr*RXqg-%Be)Du!Xc`MHemrLCAa!#7KQF(1VbF6IyH!QjoZ z-UyBORTtFkud{jw3CmgiX;aln4wtYK5KIM2)2 zz6G?_^jBa!PRo}X6a>u5xZJK5EV5%>Ue9bmi`8|1lJ0eE<>;;nivK=s1i9hC&n3Sq z4@bZTtHN?h8;$rqsK5;7@r{iFoA0G7I~V7R0*Ez8>{XWxknD z)8)e~>)>_;m$hZr8j4;ulzvDZ1rfsrT5B_m@sy@uceg>)F8Z~=zSj0=@7^6q46#O| zV;gOMO%>ZAF&OL&e=Y}upUc{U)vdU8v#Id6)j@l-cU5lf(yrt`m+@6ucc`+~puP3M z?Z!Ws4aJhT@X>UOfL8;uHBlq}bJ;#!P=bUvCN7}gFE#o-ZmCf#th=WL{c3jarIi$d zxgCQ*>L>y+rS%ih7mB`z15E%_optjBy+#t*KlsvuFAm9?yIGA_22 zeVnc-Pe?I&psz7nl!teG*K{!6u`9eKqT1adK5n+vZ8@ctY*i;nY0K7>zO}V)Wdc!u z;!Sl_qRpl}`snBK=H@*?>{TeP5l_( z|Mk($uOIKz_mpDzVx?8!JC9QcL4$#LXu1FEDj_e{wN!;sL7GqN%1DML;?>6A__WHq zx#{=$h6di>S`NeYN_Jz@i~atM7M0e2KN@wH25K-bx_?ckE?KQ&4X5VXuES1lD>6IW zw_A%JQb)aizbb3K@L~zPg~*9068n$SF_>?+&D#n9RPeAIUX@@BdX;N6&tO0c*1TaM z0Ki7jh~JmPdz7-Goy+YtcyqHcnTGje-5*Hsb>A9CNgW5{w!-Ymn>S-tm1iG+Q0xPI zg}XYy35BGAYn;f^i!{TgeZndi9E-8HY09emb|}(8$@0u7;@qBnY@#Y3jybrt5 z`q{GMWAo!0zTMoQK&1^F@3izGFFIe=7sw3;J6Mg$ES>&TCk@w=;^u=caDgig*mlXjRJ}FfT-45)m8gT>rY4HA6Kvwy)NUvlotN= zjT7W|dF|*$^SsG7Rr`A_D~CcHQi$ou@bjEJ`4_plVT!VGTsS^SD`(B~JkI&!Nj&Ed zC$YYUY}n5iMor8=&EtUo%Q9};d1|+T4N5`2*_#=T3qsQEzS{GOS^S4#96p+mP8AEO|Yx|K52(c7@LRZzCvQ?%6@uumQbOt!$FIZkF;!myDTf*D$4kDIb4>s zb*wa@<;~6T^?dlSY<$bFSADKajri;Nu>W+DWeDT+ae^RR3zjcLJr6ogDT1?M88C_t zWcB$mICxR+Q#FW3-h9!o0aT-7$*Ck1E65bIlkFeD$aS-Do^a(_vrQR$$pU^jvj!h!UL8 zf-}pqCd=bu#AcI%eN5AwO>14P)gSS86(4@gQpA8m?ez1&4{cTyhM}MjZumXx*L0A6zwk+CK3usTmG~QH&Km0c3-DfTfbWQR2j0Sp ziN=agB+m9LTdF|-+I2PR>({B(I?USgN+-NprU%^uhuhHWDvjSPhuy;b6ZSX!Np`D} z4QcUzRcgI27iSuFS84p+eE6y4-BL!?xZh13>jm^WZOo$&Y%|4ItwaLZEgy8WYd9jm zY7xqFs;3q^?H1fejQKCJw1Qb*bZKQVCG(=ySsZ;^iw0;JuNq)^;L>b4sUMs z1J84y@{9Cv?`gSzl@IfrKZU36r!vlS-ar8BZ+Ew)mH9ZoP0YG>i7KJ3?fviGx3TL&nmH(Oy@8j3$YK4L=Bcee7{=(qEw2Z$&BeaW0DpvHgq)ER@4=VSl!;0lR zElC|lXtvibFnpL|BKgyD?O5bsFr1e0jvTE_&4gUK$)ZvKTva07-smcgo40zZS1an! z+D@+4suQ5}H)eFrnE!E(&Q@6Sy52i~cQu6e{_st z()#G*KhClGD=584$#S`maXd`N!xw4C3_KVNb8N0Od!2{NsH1 zBIQ@a726-}+vA#Uv}jhg1yGOD4(F+k0RV0j@aL-<1$U_lIQ~S5Yu-56*HoNkQAi-nrA=*rrdk<6a6Vvj;dWcu4b)M@3wN)ahNUaTfn8pv< z6dOPydGm5k9vC%()x1D=477|y0t;3$FixAy&CqEsY!tYh-?FcNJZwxZAZUZZ z<-E3PR1)Z5@FLZs(6wm=7)?f~sz@!Z?#dD0d0)%whBjyg+6Nm^mn#oBMudnB?_D3$ z$&)>;Z<{vz2yhqdXQrj~aYE|;_;Oy+oNDJNS2VErAxX^L;Z+d$NfQjXKQu)uJHcSE zv)tU@ugw0CW$9IVnzH_XSGzCL-LLwz#ihM&|Et|$g~~TK{jbk+Xg{V8`lH>i`d_t% zgwipI;#xlIb_I}c_24S2x;rg*;`rfYR3q_f+Zd`DG9@udA9^3SL@o2l1DSe*!F=+d znx?-xx7C|xi!#kA7Wh0rq8rR9%sFS5lTQqX8%#WO>{eMho8)?b9@C;xU!vI;z`$w+ z=`p2CC>xFqjZ2q}Gsn8Bfpi6}8TgyNf~VF$jRErGR72PJC1&x%e3&lvMb}591$dpt zP-dJ|M&~CPoCUtsllva*$w;hZ^uy|=k4ebj%SmkW=gZjUb3B0~%Dr;=3CN!=&A0Cs zCuv@c*4~GYQvPj!Inw(c(1;e_?bzP-AYe{*clpVDpBfl%E6H0K%{KG-a<3*wtpaUb z&(2jkqQhu=HAm_&qUR*88(suc($&>`xc1?wwo86nTFq%tepJTa zmR5D1lwa4gcARo8n;o@7CoL!S@tz&YYqi?9CC5gFj+Q6D!Y1<=? zG|%kfdUY7caUiF#lD{_e_dusAG|H;BeR9bcszA!tYtM9Ic?eNL)J zjTVp6__}U?>v(%xRN>=RBb$~|9e3bSvni~%XsG;AODg|vuC+U(>qkJ%o12#Qvbecv z`>f*ZXszVb-Pc+!>u8>#?bGF`dVIRj=Nd2b_`#dy@G|GeDc_N!(Oyy3e0W)Fw05s0qesp>3BO$9riZGl9&rAMlc^Lmz@>i4idA-MYHL;2`S>)44X;H?n zmi*PEnW1OrEgfsEH-1E?cMl$6_qZ(gD%JJ(^MbbHLsF;rNBmYseGXmRyDrMrhxP&P ztgQ`yZ@CLCVMMN2$GsF2*K%&(q^^9TFZk&5>omhcsu$I=Tv4ZR%%w<6tNtRLjyg`e zeNzSQ#JHF*&Wr35YZbQGyG8TPCdU&vkRHvKWa~lvpbs0G_@Fi6mA+?wG(2v_B&g09 z4+eA^__l4N}J+8HrE`P%!R)N{c?%OKyqwngxwdG#81lTk85o;l8BTn*Ov1cRP87 zN}JU_7yt!ZtG`osliqHoRc)(4)52(zVt;)_8l%B)B_}3Yl&cmc`0bo1!TU6RJb}a5 zaeWm1?@MxtynZS1uAIUVi@~d+w4M870VZwR)5smTz!Kr-j}O> zm4A;2^uN0SP_&O3B*^Bt0`$wRD~n9Kq$o0O0T#lKeR^9?N)qc~jMYlzkJ-)5Nb8mPnVx6gC{!uKDAnZCeX&a z@FHu?&-aUpYh!3jd*sk{QxFQMwrDHIQB^jStQd_4#rgYCq zQKn0n&^GK0-j}1-IV~-HEe@^!qTE};KWVir$ufU)Gc->z*3L3LfO>pc@?~i)Czs#U zeZUg`2ty;;brBhLvTPfkkJET~jIC&Y)U^p2Fx=cM(}NRwY&Grs2RLsX+SMuXDJ|HW^aihanmUM>nU|DwCJ&Yf9fc13Il|9XWzOWV>2ZNnQ zsddY-pkYj-p-TyvHqK7+kLnys)h zMkHq(tdqfR_qNc;s(0J6o6c>n{de@Of#tWF)@qRQUsCOk+qKKw-G*Ca9kazJxk_`eEf95O_b^J;|ZTkr$1)r zIII8dIoE(U@0S@JzL};U&yQb16V?jA31=mVqnX9zDBYBazz5bvx`YFt4tW#5F6k)e zae4(e26!os-|{+tI{WyN>yYu|A9!`W$oPa$@=y9y;H#r|^V75#U#wbG`j{d>o|Yp+ z@=ihmTK2d?BHK+j@Bt8#ks#(0J2{7yt02 z-Hl=0y>u{G;0o;)9_MRlJYPeVtVWITqKhTtf;TLDoV4Vtd`MTYRsA`Q&u(rOl3!NQ zP){u#aj|vAwMrVlJRyY^#apd?)Gap&8BmGa{-!OW{4M2DK=i6 zbiBhZ>#!z&7ja8sAI|voEz!T(Y))6SW4*9iI-1-4Z9Mau{UmLQK0Isfs6O=hbuu|G z(s8G{RdZGGMWby&{5SqKKA}BEHOFsj89u&f#QC?ay&rw3<;?PHC_@9Tr1_En)-YeCM&2V?`frDC@06}N`tdW z-sk2P+c_>%ZhU4Ba$J&8O(@*P*Q>QGD{IA@t@WRJa`-$hTk(^-e($*9yCMn6ezDts zijtg+#(9Nl?Wn@L#cG7negPhf_)m>?zT%zOz}ebZ7q{c&F1lfIZw10_g#cZTAKLF% zj_=P;I_B$dx;Mk!+&n*_d*P}*HejlEdp9>bk|w`p4*21OFXGaoSol~#yuh1|1)apD zVM)Y|rdkxL{Gw;^o0DNi>^L6`W?gcBGt5W4sCso47xnI@j|ke+_$H*W`n#bB^#>|$?gm3jB{|E1O7ODn8@(9O-3 z96CL-Ijd<5$DOv6sf;hvQO)LGy2DF;eL0(#`i^AsyYH{GRPh6;m41ld?$26s*IWL$ zj?P`IM&ni(lCa*ng}e z<+Sww1%%AEyZsNG&CCDzxBuyXmZZtA!4z;)WHCNC;g`4;4$|B=ob+4c%lZ^2hjYX8 z>9~J7FK}(LsQT3BCN^$=na)0)rEM4b+NPkz%WqadgddF9q<>qcs4a7&Zvbwh$M3Fv zh#NaLSt~A^(92DMQ74?j_iHyoZfc+AJC6k!x^}u~+E{#0XkJN}`Pb!tx`dg<6=;D4PH2RAn+tw}P~@5sHK`CyRM3*8%$@^z9Q3#h&3ah2tbM!Z`zs*7syb&5xg zy?ky+VYVY~xB5J^bwJycv8UTb==x;Vg*@ zsr>*{Z7~eww>% zXbg$AP;3Ea)5wZ{&stI44@Z1ft)W(I@E?h^zz}T0#N6DN%|j=eOc#^uiywCEmLq>^ zcyPTVs)<5Pxp*G8)$nQi29Y_G{CPu&%7Zy=b+g(}J8P6l+PS>jtE|;46X7k+S`AR} zr^Tr0aIq6tVLD7)f zvq)fgo8oTuGM(-h@$_^!AB{6ur%xyCjI5pUYWa4Qgx3xwrpv_UL?(ETD=p!%%UZmrqXx4YrE>Zq)V z84;#^T|=`l#-@{93uOC@_|;jO;23yLhx_oi;B2vEdgPoPFG|RZBNTk(`7^&gjNqo?sWaYQnr?VNtJ(|aF(xcYf z^D;f9SdjD6Ntr%gWEB3vi|it8efG`#=;*vizc=O|lUC4%0!;YjkClw!W5jmHZPKzbH+v7d6&0libz_%Qg%*1MWKwtJYU&s{09|?1M?P z3ej-7rP@48L+2+vV;|T0@Wt&9>uUhM^zFZ_?-9w*g(RWBy?GABa2&_46RWs5-v8}n z{Op8hRTA)TKi$0R7}x21(JnuK7-Ss@em||`_c3m+OQB+aSeDEA$MZ7P*B_2}QjzFs zi$qUb(oAwt983=n*FGO2kz(cS)low-p78Hn+B&uP!FTRD5NIyx-XT7(I8oMWy|Z1e zr0du2Fh9r+fj^6uHECXJK>Mv`P@pnSz$}=8+8_ZTr$qo;z zeX#T)M&k-FYyjQjh>Zs!Ng^8j#+LC@-nC_DFOv1zBO`1)vx*4dhY--9^ z8-|`FocG7Zk`BLj6O)}kqFpv8$ zChyW;*ctGYk)?Rd`nx61pg4logJ24Quow)?41XQd?2Ztg2jahfo(Ub)elLqnDZbFf zIOsv87TBNf#(l&)LNIh}&+yHd^~Y6uH6GUA9cH_7vD;`7~qc(yy;JtH@Z)7Am7dA$rzP0Sx60VdYZsa4-G5ucJ;vs{3j^V4P` zGoJ03;IND8VOUwlW~5?hpE7bL}hd zKfp8Yb>;o9@rZk0`B(j~#?{5Hy#E!_^>wZ*zI+njKNvjty8l&2OFDnja8G77hG~OA z%O|{yALLIC%EJ-OXR6}XEyV31R6D)Ja(cX6oS$J=S|=tX4(^xM$Hnwh6QNb^r$e1~ zU>}ag<+%BO;%JeV{W^ODP)@qQLwE8xRl}AbR)2mvDQ5^2e{+*}47clPlX7v|U-7J4 zH91v`?%Iv!g1lVW$Mcf*p!NM>Wo!Gj;)_jPzW=WWzh2q4`1MNJhkqU7ezyC6*+2a2 z{%AP98T`fms~6r3{&Hpi)tTH30J(g0f3B6!t)cFJ=LhN#)~qqiUibgCx=zge+9KNh zRal3Ay=M$CIie03R&gNLW1RHgFVk#-5Gef>f0~jbn2s{_`h-u@qpTc%cfz&9|9Jj{ z&pw^a(tJ{;5bU1Sg>Dzo#!Q+eKHP2J4&y#FE?u#jBLYRhs1$x(C@x^%wLaxVmo#yAKe73Y9or1E#4z)Pps&Ot7pdkaNjCpxL%Wn)u;E5>ZOx-kri9%xTo4XC=Ktr}1NFG*tC zwq4C$!n>VeU(?Tn+XH)O2BB*-V==>jGt>Xo^7!LdP@8?VTpk~P{P-0c<_q?hkvYbs zT`1VJi)Ff2=<+qlmIT0b3OFHOl;%T?aX&5>Nm>+(7#JNbNLg}Tl#5fQiyBnNxY$>}{(7`nj7bEAJL^|$2!~^T3c+xe zrA%OjAJfjD(1*;c9Nn*xW!ls;aB$ci2JJ@T2ScN1L$t!til5GN9G!nRnI?v~)=cfqfroZH*)jKUT@i_v)B-f zic<^Ei+6NXEZeyLTkw53IY|rl>#rJBKc=(EMapn{Ap0Jo`&R{1qCx?G_Uo?@0H`Q$ zuk5PmB2U|eb)6KKAemt&QWMsXz+B1}^Fp9f{nlS`tihzB^#>KP$H_Y!ej@+YKJRM^ zg^R=LVf8C9p?}yN;yW;VxEoT#|0QqX~N!>vmnKj}7q>hZqCcy;@o1p`O(a(Q0Vj}5>#yg_tnSS_^oE-4gTP$9oqtYmFGAfzdEf3f|NMw#x6kQv!KuabB3+8Y zEYm3idKR3e*0F`v`RlL1-)qmd)6A- zGj%&)(FEUe`3VJokIr$dR%Zbh=6Q{rUj$cLfy;b;8(?YtfOY{@cl!JCzFmWHh}(sZ zv&BcKw-@JUw`dWX*^#T3lDT+NSR$jP^L!O5?h3bnFMH&l9%AI7l3)+*Nh zxmB-(_OteX+8X2UxOg(0kKnlg68{5D;yfuU1t*%Qy=9Tn!acZI)U07Gg5u~4aqqhFtvpRxXXfvlvbq+skA{)Q)^8< zq6=$(59?0sF$7Mk3&;6N(}v0iqoG!<$=z>~hvjJ8yzNzPO!JDqKb>#2e$2actym|2 z$#QafmUauxr?u$=#q(~-`KmZSYtH)ITOK4&4$OClhpXcJV<&Uk&cs&fhx5};fur>T z-=uk`z^q+>deO-~YiC2G@8q7gbLYi7@|^B}-r++Vn^>)AQl>|-%|D*Uk{4O5_;emS z{9+NiyjaE_pJuVo-{o<@?=9m4(8Q}&#UB4F@m+k!N9Pds3#e%DL zezC-RlRwS4)6RdF zKAms+a`q%Xn9h;8;mflp@xjM){_&jODssqA@#PURb>*$Uhx{LDeErSyw?BUW@L$G2 zjH5&T?#VCj@LXhv{O#j6&wr%1i$(R;q)oHK6@N3053Y4seP}%MKG%^=#s>$=^SKwv zLw*qWArFJ-@NmUV+2qr5o@Mi-Rb1MC%H>jZicOZMX+EE{Yn6@)xpbA&D)wtsIB7ShR2XupJl8qo2QrHMkh?Yp!w-Bo=88@?N>SJE{en_|5cxpv2)S zdvE@AZt1$KlJni&ibGXd#=|px5N8MW;b4${9cNHk*Ag%Kx@z5kMl0>;90uw))B5~7 zMEa_+Dy@tk9G%w(k>H2bjHpz%M|5=Fj3`zk-K(SXgHm@3PrG%`>QEp%l6thv;?U-w zo@_J^^EK$uw=mLMM%f;xCj&BSs@pmUN6t|J*SPqw5xQWCYYEX_9;?cGO?g#k7b9;&FT8aw?#eH2fKKFgfm-=h`!I#|Q zLM1MBEwlnoZ7447ULgaAJ3+|JZ(>s+hq@U^E^U_!C7r|%X)Lbd(xE()TLxCZr7ob9>}4Nz%lpc*18=v_1Q_gxUSgm@y%Eyo$|i6;bD=%8FFx!J zU3<42?S6O`x}K#1<@O4p6g_g+2Z)=OLMX@MZs_ysK~D$9Eh3KYM?B~y!p95&{E`ba z>=Z&qF2;dgn5RPc9(MzOu75uFUVhjehK`Hg^tG5UGrjCrz3k6~PWs$m@r!LpqkM<3 z#+C+a49sQ7oyZEfr?`|+FL7KR05pDRNk8Dy3kjgg^0;!s#BnW07x1JN;uAa~S1KD{ z0}E}8-9sb?4A{4+w4&#ro{`XP%?JT2z>EMml1oghG&9nk5NS()WjUCR0BGjXWK@rd zaM0V38V+491K_#ia}_#?zzFS-i!jn73keORlr%JjRirG3yFR{l94q93UK+rxKqkVi zW))q)#43je1l6FLxMrCpI8noY-& zPJ1%B@Cj)M-6aj0-CkThOr~SWLWGWmL1Fy9Ye^oub|NHAqZcSlaKOXBC%A-CjcdM+YQV9_D~M zQ6U_~orvonENe|&CDBxLmL`PC+xj!RmnhwU(ko5)4oyxZ3wpD4qFpFEF5<|n<9o` z>D3}#0*B&mq_H8;R6XR08I*fOL6H|;0kyYh8kEN zmo5oOq14ab?}Qbwg4Tm)kXQgQBb7j&!W2aY^}(8juiunSpWAk?^SKt0eD2tWk0>p} z;jgPS+2OuhP#H%Tb=Db3Pb0Rc>5}vujo6-lL2OTIaRE{sAfRk8Q;++$oA`jNYuj2* z;J&Ts$>lxY_CNnFqGG@fj5yik?o!@Ydo^az$Bc9(?3vBJ*UA%{WxmH;k9{v)4?LTlqYmLI5z!M9DsnBy zafEVh5vU*&N-8FN*I^>`m<;S*B3w`|#{ubb91&0z(qTe6nUD^CfmWAx)EzRx69 zkmE4vM9h^DKIthLG2fRgQ!>QEX9B{12LSik?FkE91&F@A4beQ7sq@!-PeZofugnXj zF)zR*{c?$A1aft#KcN9=4@bQiU4hme}QNTrkt1SiIO|lI6 zPjo)in)Pu<{(hZqtqt-&{{4ThEhR5iBpJxF4A@T^S)Zv$erDS;jA`XR{=5Hwt*HY@ zZL?=G4C|uO^**z08Fd~1`#({~?y&#T@dMpQyUug`;4|Bn(I1(X|NVbyT6R4u>xE8T z*7Z=ymW-Ro>H0T_=fxjY{Zpon8bc2xk zsAL20AjCv$&gie7P+Tuz$x2K`1@c&ZHFx zhq;y#4ht-=kj%3}M$f&3vYBsT03OrdFh67v+?Z6MrI;I7A;X@%rI=K%<@Eq0l(HOu7BcBk4fqo>>CF5@DuHo?!o?qHGg~Nu-UY%F(h~|WbbWA% zFz5*l*h~=6XHFCaPIw_)`s9gC>N;`9eRulqe0h5wCq3j-$jwh6@00$TAS11R6JZ6K+ zh(Jgwl*Hc<6vwfYMIhB;8ZAmJ5lev<^t9YYL<#T%9?(j`JTZwTnTuXHAX1SMen_;! ze(M8Cl?wHLB3EueiUst_XMvt62|*x4AAz%)7zCgxcm#Tby0`(MsmDU^t&Du;y6A|q z!ya=ZJHZq%q-(4h{Ss%Y-9;986{y*`HrZJ#l7!BfCAnAD0Ne z^Wmk>JU7gQFZ>JPc$qJxOgvvm+r^X;veU?&P$);46&ljq2R-3JFph*r4R}FtA(aj3 zAi;8SSRg{lq$i|wm_nsC)~JADNA)?59;pR+&GU%uU?!o2M}Z!#cc3U{FQjjXkot^JD^Sb>Xk5?MpivQEEfbC4ByZTVrqBs8qdQ5GbB73?BFRNU`v=E|Qcc+lC6W|@X|@0sg( zkA0VgHj{oxt0A&nrX*9o4Gm4iBoD1QEj{~3il96Q7W3J;dJ-NQu zZnBz!iahwLZkS{fYFT1d&~m7#VX>iMCWXVC3nxqhpGhyIhLwzT(@I9&#%Gh|*dO#PN1!QC$P|$v+wu}<1O#Yx zNUAUz0(62PAhg_j_|{<#(K1jYqHeyBq3{i5BZlBHg$-#yJS=3=PlT&j6cWuQybfGU zIcSSgAqh$@Wr6~MzCp;q>2>`%}<)X#TIQcQo?V-g87FEK^2uBjJ)3PP(7 z7*IOY97rbcJBrkhP*o_crSM2w(G${BLP=S3ipUN$oFa#*uvXMDBz{lhzYQ6KWROa{ z8am*qEy!4Hs4y*h_Y6d~!GqBRqb@+Ah6Bk`4G;_nNQAB-pvT`GJ;ts_9&8)-v*7b- z%lbR zJq~OuWFB@oV8IYWTx@t)f)Rt;cqC~kP%;v~1ubJ?lafMCzU3x zTlLm5y3%(Gcy$p0m2NPrv!BuI0B}H$zbok`K48TpAj=~Bkr2)OV5U?;J4~A?rzc2e z5|G`%CJ5vtEBgX-5t2GG9cP1f=gozBOHxSyGBS~QZ#^>6OP_|^?brIE%&8yxb8fGU+ z(E#r1n5_rx25ja*gp@sSe+?I$(Tj*2(pJGHiWmSTWg%Unr>>Ulm~T_30tNZ91;k1~ z)0x2BUZUl1lFR@|=DJKlOteY1b(pKDMW|6oT@642W(mlsONrsT+ z9(MCm$OyV^AS}o`s4&3DlvKi327n|BV~!oz;{m%KBQO#$89^61e@10X;j>T>IU+q0 zX>4eN4}uT)6?$h+I~5YF0nVQA1cVYHl>j&ez)4u*kO3N75L*D8g5W6tPoY#Kd|OZT z=MQ`_pXXXVKK}61BUv_(mYaAnyv_wj!aVXF>lLTC#e! zeanS#?Q3;QWS%G&n_q0P@h#8-oj1AkYzH(1U7u7Dt7>+wuniOi0l*_ooNZe&abS04 zHgi3RZaRVGn`h;K(?mq;&@pV!L+l+X6EDIEfg`qXJ>uB#e?KzMl+SDtDa!?3xdPRh zJO`4TXCq5Fc4Cu>TY>GnaAdHQpiKQ-2&jvt@6#_O^%78Wn}vAynNIf${gL#MqCfZb z$Xoc}hnOtF2iFcQsKs5_Q zxYDEcSl#mH-f0lnmY!h=oN#E{sQ@5H1(q)Yf+;<}e^qX1LEynC^|RyHmL$x8a7yUs z;A0lRwMah~x(XTTbX}cqfogR0t!*hXjIfiyN+aGMx-6i56x!$s67L)g5S#)86&nHA zim+|~Brp+dwX}Lk7)>kGN&vIb%>ppc#z{uX5C_Ux(tJb5W^}W(n^L4=L%Nh&tx9GB z%q(gVf3AdfEFkLHV@@PU?-4j;C-;f~J@jpesWK7)v|7GUEFxYT2`LNVhzML5gVqu! z2JQ8POOP{P6~Y6t51=ltDkm2cAzfnmiqy7JxLS}R#$u5&EW`+UwmH1tl1pvsjX z^#hooR*#Cn7cS8UB|IaqkXBA+K9jvf(y9)bay;gf65WwZ`jRvLp?^)EOgl}=vu@) z`J<`cJy~Fk96%~?+yJs05+flU%8nwT9O$MUvWobm?T2y1C(8>I{faDSe@0nj zYr;lqKM)9F?+}g|1P*PIa}}AtSV+onh{2?&3$!&OU|!f`eq<>YDHgfbUCZ*866|*k zy*={YKR@*L$b0|n(7Pk=Z5TS!o3+UOJ=%&kl#GrGrXM$8o|MTNA8FW#W+I@dg-gC9@eBGe{Gpt&8NHgN>5c(>7+}Ok|IwzeU8&zd2HWV$46hEF@DKmbf$PiXjs0wy9B&~|}9YuW{|fTfyAf3u$FHkb@T z8BhhbcPFTNYq0t!LDg%+>YoHvuM4X`2r8pUZGox>^?^pcmMV3lc2&bve{0IL7D+2t z1K({dpUkzXX|ger&LSk81_-+%u#yHtdeC#(@FK@z4k1(o0_rPxmr2@uJKCrVdJIY0 zBV`D21+XL^mKPt!CA+6(A7Y~*$oT7EG6M({suHiL!?$`bf` zvY$x#sI|ABdw-y;X{y$L8No)v(?0-kp=AMSB*hY5{9q=OlL)Q!g1whmJ@nPKwv7VA zNy7pKB)A#L1Y}9|BL;njEwjK_sEy()1K&@yiP2F#m>bA>LYLO0f0f)aP^qV#CZO|z zmY6o&OgT)>l#?h|kc&Y;n|=jE23I17Dx#Qfa9io?Y#;{Z;-Z8GtSw%k~nmSKqO0 z_yJ4bikyJqWQmQ9oWla&qD{XaTFMWs&_yXn(8!gLe~~ng71*F!aawZ-Wz(+LS74Il!AJfUdeiT#@1aYzH7q6B{~-)}%n=d96+4rm z<%;OBgq{#uZsiZ%(inmEKA!RXEZW5$Q+-wsFuk(q@|l88#}GBsDaE z>XHWxfA}br5uvsjy6_8(EMNJ8ynTESEv2;Ep0Cv7(1Yp~2j!eVvA_czfg%n*3)m9r zX8`sPXdfd!Ca zq+$W-L=+=E5bXdZi!xHp*nyWw7FxFN3(Jq7yYMY9@`R5!xJ$Yb+})*Rdy1T35>f+4 zu(M0o7rq_Q7?c8>MG%Co*B%oNWFX<%FsS&J7eGtnR?ig_-3p4KhGM9p7+5YjuvCvN ze<(VxS3%KsJPk(IxdlcAAO??}5aJay7EXYofT4cSKxD2R^%#*2*QMVU09Bv?sDe!Z z3?esynjo}2LA2fpMMQp#ZscWj7#B+=Z~-^p3Oz{f0W1WhRR@(1ln4U9^;W~I$LJ`T za(oRn{rvd@zep1-<^Z032|46~|IwKRe>n4z%Id8{&gxDC+T|Lff2fW8`mbVMPqR*C z+y}H50 zGdSX#wrNnG2a#q((BsV<52uW<<@d~C5#f6H>qVbG@XPr$olQ!z7paKZGcWVN5~P=C z4+c-qk&PW3Gy}}Rp%xrI6q0%1eh5DWhj|FQ<(%Sl!2f4dJ%YpCNhIg0t=7%*x+Fsgn6F{`8|%MQR5y{3Muva z)kZZt0|$0=^RE6*NZGXS+-hGrf3p4X&$RE}Za?~S?K|!tZa<9pHJxWd|Z|SIAk2*tO${^a{ z2&h9(bau_rz%s}0b(aWSe_U7ObC**R?}E#zI~6vaBAoeiozlykI;G^F?No(D97s@i zD*E%C3Yty@f3Q;@C(Fsl#YMWoH`~V&)jq$O!;FJAYk6ohxM;(jmywrn;xgl;&B#le z;gGh?$lX?;uw<~6km&~efV{OC{E}*iZ3=@yF59f~*=DuRc2YTQf3wEmSie+P_|m5s9ZpJJpm8q?l?l8ur%QSU#?MoA{h-v3)|lxrr+2z&8J z&z)O)s+oCrE8nV$d=#_Avg|ridv?IJA19}??&g#;PW>k~$Ejx=H;%weU`537c}n%F z5HsY_$vu}#&*##yxr}_SR7hvR>6A1og-0ikxD0xUq8dF8e@-Y9o7>#e|C4toXe&i< zP;_?OPm~Ul10F~xtU=Q0#0=L*?a9rPYd=r?P!I`SFAAG~nG;2hC%K*3T(~&Q>qMa! z=u%PSIzjvCGkfVp4*8&8xH#b#MV?)&RkbrAUMMHBL-I>x+cLTag=Qkm`s2{gW;S>2 z#EBxQz!bE}f2#Fh0=Kw20!O5yqR z5CeUWqv+HE&g&ztjyd)oXxW6~3kfqnLy-zQTC3 z3hPB!J^cRy<2w+<$QztT*2XFMu;M>_;ZS@ASB;a_sc@S#S`qsxblEfC_t{T2o!w?X zQSt>ve-L38^fJTQ>%eBT)UH!SOyMR;R8=8hW3fT?? ztpcx?C$?(2lSyC zP>-S)$PKywP5X^l3g|aFch^K@N7-Vr?1o10ZABPKVG!`rps@7V42Y_cSVo_FFSmSB z<1qE238Jw4V+}@n&4@qUUY4Zu?2i)))QP3n`x0@e3Rc7GHuw8TL(^Eh{0xJ>v$5=K ze}enIO=}-shQ7li6WB`8J{^&OH0P8-^;i4v(OX2)fTPi-mxxLZpk15}pyz?b3q{d? z5JCnV8{YNY4;2@#hroTlN_31M={t3RImZQ0a(wQ20i?eV2#*}PI$R(k8`+%LwQna1 zY9taaxTC`kx!mlw1eQn(2 zNfmJsC?~<87jS>5Qal|5DuQsP0;q7jC?Rpdz*h?tdX%3(7@S>0qrfL5@imetk0Nk- zgjtF!lKvnVCN$-+t0Fd(!znmp4=Cs*5yccEK}8123c2G@q%?(aZ64e|6Az=5k%z!> zA$MWj;W~ty?<2NFA_=1?P?rSme^FGZfDpsxh+&I{9SQ~NdKH32IzXJmO%yg00ulI# z_-WF|x`0e0O&B}wdQiiJgms7%hft)hA2k6TeIKWEOn3FvI*@-H9n-NMQ=$W$I$Ekj z?|cfgry(K~^;Ec!@_drLZHhBT^-7G)2)R<4KM5^Q6P=D0Bs6b2(;x!i88D=RY`%cm{JzKk}^K^3c zbfFIitliu2DO-z3UbS0xn>;w|G}mcqLnEE~uUD(tq;A)+Z^fJR7?+m!$JdS;A7FjrKTCOs-1m@xFb83zQv;=xI^@2;p5gnx9~^Q& z*!D%ZEhE}eRbb!Be~?njLmtRmRRuv19`ZnK%W&2+96TQKz#Si0676+zy+a;&+v?gj z^x)ReBiD6N)wZDr(Z<$IL*cgZhw{$P)Rxx5TR`wU&+`v?=x!NNxNZ7Fe_O^n4A9+e z!w;h^8IirEL(&emj6wSL8t%#pc>|AIw(_Wcr0Y5NJTKywe;+C3R+HuX{vlVcy`>2i z-2#W_+6pKk^*154VskP@J1+1}Pe!zs9BeCZP1V~wS2+7Lw=%+p7(zZ2-UB~u( zZrSKndm5Fid+xbW=o<(D6rt-oTZR<6;kK0=x>2|dlRR5(#U=YzTdrOop=WQ$KzZvc z-=U=&dXX9*e+c-XL?O3a-?de}dP6UAw`ck`eJLGBI^1%6KMZPQ_0i3(c|PVrI-bid zPbnvAWrbU_fE^pMENParFXfjCL~HX$smk7%D%<@Jf-p3-8Il+J>W8Ie^+Wm@tx@Id{=!T-}yp5k&Vy} zHA$(*<@#IFgDvTs#}VqK9{O&y`8&_P`#Vo=MLX2-F?K zC{Qije{aoQU9a-*Zr5?vmtT|R-^!|3u@h|1B5LJ$GH7E&D}OT}RFw}VX(Hy(tvkbT=%9cJF-J737Zo5lvA z|AqYE?)*w{+kmGLFhZ%w3xQ_%S<7!V9Z7$E=pnIQ<$0k4G0t0uZ3xNm&it^2abBSO z_5A4We5p3p?@=D+UD;mXb1R^qwQs#Ia zhIi+C;hhUI^rE}-effoaRqvtJGMsH0?zRkXOGdS8ZxMAP_lAM9je~{3EkQLj%q85g zL$50gwr6eKriMYZHA_?E?GvOa@*Vl^f87g&DE3|X;rbq+-Zg|#OJZz^ES0nq5=p)- zQPvQN)SdZ}EpP8Cb+@Abhq*V=lH<4zM1Q4`qiIyBvkMuyXEl-#2{c7Y+{`AarB!29 zS7mo4k(F7j%IXFP)Ert1Nf5FYOQIG_q9j|^E=!uRvdn4}Rc%27X;socw!*k$~>7Yp%d#3{>dLFIGfMkhk z5QUCUt||~RgSyv1g1mF!zFalqY7!KfK}I$yK|nPUWDf%TS86Wlg1U~bvxr?q#EFZ} z%Yf10#X*?hJ&8Z07QA!dVds)hf86LCc-%>|6LKbXz=@#hdaf(-rY$@;2OgdSkIsR| zoo7};c=HbUIcHjO&Y6}ZzINNU@SqEx_=KC$+^;!sx97*YD~806V{LK*rvt~ZLvpA~ z4r0DdBoes^di{Wd)`Uh_MA$fE!^nw!eqLJcnjSUCAqTg4)8h>B zIT^^(FbW7bLwH_>sEe7y8RGLYkgXe%V@(w@kON0{X9kQ(KgJQS1_}9)Gq~Lu$f@0h zsKjy4$pB2XNmRla{PQvdf8E2RG_)&2bY6z|oD6=_J>5y*oR@)&BKBjXBcwP(*p(rO zJko^mngwU*yhp(q+%855XYjh725^S3YqD^L=)4RV)jmoBghhA!A!q13af8SUo%1rF zbD9Xx8%22DD8l%>49PheBByKn;|!=H!^XC*j*I+cIbO{2$pgL0e|jE96rq~|MqKPi z+|w+E4jj+%kv%~8&BPvrK^P!0n-clJg||{i$dV);YO+Kq#&1vrCr*OnIJO;1ytbyqk0b7B7jigB7&KX8-w#~w zkr%Q=UhKky7C?Qae;pM2k!v?Esq}d_5NbgLwGI}UFvpLQfO`RE_@~H?lgQ%>Z^6(5Kk~u`ud(a+3A_p`sAot^LLz&Kc#R1!Noxrc5bqScIxWKJn}#d)ly$== zZ-WvxgLa&b_-aftxPedf?=ZSUpL;syXP))Y4PuXbMCJxTf6jatZUoIkOkyhiint7Y zujyojJP%%&<;5i;D~!nltBB7CmNKo$^&?O7z`U`oN0;O4^?KR0SDTxAP54}fD?090aptD$LDi&!w88`^jE1y1ZM}g;yfW!cxT)757 zNHD%&&_dl#sMiTqCX@k!&SIgdSkzfeL5Yh(Cmh-2tVM3<_&R7vABUYJ!EYRK7z*ksP7kOBgvbs2_F&;ke|RW}+v9{Q5xVRUVIvaiLnvvL zfw-Q<0Y080cq{Q7SA@Eb*Pay|)&t*Bj z_=Jr#h;WhH(ti%%hDiccpREW18sYkY;CoSwv}Ikj(%6YwIxhf1)Y5lhtfVRx34VYV zLmX9AzmX58)CDaq{8>mq3mq6I~zMP+&tu6(jgo978ya z1g@8a!n7cy$B2*bYcbf@%nl|fh~ByKWqcR<63X-Dc^+PI2&t`VQ^2hMRCA4|94r z2Jh}8RWa@4@$mH()ERILUXvO+tt;zQzOm=};)TF~?e+wO=Z<~Z=4n2?|+NOQ(78#LV3+>ZA=C>Wg zym!kXswE<+XV3>a^0ppGPPECCD^!w^KH1eLd-`NwpB(6uLw$0jPmUFA0a5}^=S_u% zaJxiSOb|B+r4vM1L5B!AUeex$Q?T1vSXuG`zF`{Kp>J4<&80=yG!#NFLj{0he^OPX z06ZrMxVwoK5TnHBZX_B&gdaP(dx#d`dLge$-T)HEMQ6I0){*f7nupBOMgs*o`qS@5@UlPK0u9OlSmyCMqWIr3&K{ zIgP2%cE4e*Vc+{%iF?3l`v>n@L=+QHddXe96GAV@GnoG7H6;+b)iTnY9#;uwlF z+$2}VjhvWrlTDcczV!Ft+#)x7l$+$~N;_Ai|H$!;jRuA@hcJ_xgRf~ce^L+3F!6bg zQmsezJw&yFJA5lN=+AOnasz#Sq6in*15chDjd_Q1 z1jS)Y#EdZ3?no3V-H$>Le=-{JovRBvO5Ys~Ob!C|oyb8x>l?#J603CFCv@Be4%bvC z;l~G#ABXt9*TYxT0@ziL-h?a~*Hb2!PP^-Oj=}{kGq@%V{N@n}*iI)fFdv?$4iSYE zr0h%DRsb5{D$4>yfLYSWa~IJ~8&WFTrUN#Dq&;~d-8a+|=+oWUe~@nCpd-&QtQltX zqtvC!32+vmz`zSMIzHTy59BRzl|zjpIB}JeI(7&XL=z;2c6B^5y7`@mPXti{P#x8PoP&7b(0(V|0G|V0++NvPeF-UH)>moDA*Pd>a9>lI2 zbi=8L6>yS|Pw*4_fAnOGTq;fZM6-ip!B+`k7)AkM%|d0hGtBy-GJbkqVMa~6MI+yF zBLWXiwFZuYAIfVnYb{0IXlE-(JUxaY&Y-Kae9&1wJh!}7#EgR^*44`wOu93Kj&oiH zbWR5Eyxxa+7j@_GopW=b^Kt~|_9KY2v%uCth=tj6YDl_ke+bVVM<{dltZZcDTjR*# zmop;lqkpKtm=ZpRiEipk0aMR2d-tp zs5O!by0pF*{uzzhn_AY}s$5WzXlAI8w3(C@sxiK8&q3tMcYpm*Vr$MC>u z=CUatk@sJhXsW~wj9D=a95{;tSDlo&yXM$A4-N+@C}kv#{#gqTy5UHB?bWV~7|d-u zGEj3o!`)XLyHPisn!1|d!8!1_dxX@~)d)8)uGl}Pe<-ywHN%my>cBOFx+zf{_~*cb zbKv1Q@TeOeh9q=ta>D?ss zbaRQ-e{0Wyhv&efZa9^Uq{o;f_VmQPg(qEb9BDVyzw=;FWCF#ipi1uTs6~c}Xy6Q; z1PyCOYOe+z;py%1dR*j_b06Svh#F8u^Wef;c!^F8npJnhjpI<;7&8w~3d|5P_X z9$Ygu(4-40Jlv^J)CuLS*bG^`&IM&c_`Kvl0PRD4myv?Jsj7M~UIc_);0mGjg^RC@LG zPm9PAYmn%TII#A8(Q`w*yu3K#$er}a2zf$=cA6y;$sIySXg}7D-c8`G$Vs$#r@Vmp z?%0Cq>POOrb|j%zeS_5GITwS%e_JFsKnD?XJ0za&)M{wp$Mgx2f0Z0KNeKLs2TFJP zRY#dA>2*>;t@t|GfTNHQJn?mMfLyS-8U!JIyhO%2*ii9NAonJ(1!C26||8WEu4qeyvRIW9_wj|{Ce?O2P1KB2} z=d0$y4^L$fb3+r~{o0wPn@D{8<3@peqG%|-07~s^9s#jJFf0bL&ct^>g*J-Mf!L-a zLIRk*3erUav(tioEyLb`)i**4aV63qvfbiip~C8GUJU7C5nl|IWf^&${F(TE*dD2r zjnF+J`t+&L(Q^qgeJ|9!eJv$g&e`HFM5MFcXHK8mh zi=7h%<&ZuE`wn@5kp$&X1oDD%Lao$LD=0ewxt9pQR=6omdnE|UPWv`Xdld-QwQkIa zBr#BH%$<~Ck!~=oQ<9(zg&Szn1(gM535q(QIzd^=;&oQwcS3{ns}DOXh&rL73)BFN zKPkQiIa!+{M2hpbe|CA(r67sPE{GqreOXGTs#C0}XtGOQ)5$H4XswGJH-KV}hnKEs zsEd4hP(>&Z^kAutrB6jEK}d#@E;IX%$fKHQmy0|P*r+f7$r6dCmvGI4?GuP-DOlnn z_Do90BO2j0cR(J{-^droLwgmtfoyo&jx~fMGL#V*Qi(p&e|9GZZe23Bl{=g05lEjc z9PQLl6pCW9z5>x^*^5IMkt>SHP(7luVGvR$yV@C|GcQ`#%EJ;3Yf%hu2n{sSLX$+) zu@&c!l@(VNwbW-&(TcX2!4LsdD<;G46U1cX(1Hvo9ZR9l@B3Cvc41$$`zZ#G1iBD~ zlv6}HF*yX6e~bfS*OiC_B@8?UQUqki2O?2GW^D$@Z;R^WV|oD40gEy+tpqDjx0Og& z+Tx%Z5jhC3bvbfSBGk@H$TW-Zhmnv+&>I)Y_Y!W zkduxjSQx_1f-7JSxsFfQDmf}d=~x~%kl#i=W>vVr|FWUA@-d2Ha=8-3{2A9FABRxD z@gts<(J9t?$W65JF~LC+YtJqG=B~=dApA*GS0@`Ihi*YS4X)>CcPJ&VqI^t!UnC-z z)(XI4e@~N`T#+{Nuf7FEz7>gDBi~}(BeDjQKmb$u7Fbr9{>btzC1wTA|5m(hpY;(HL!GYUy%ld;mLh@M zQCB=b12^F}Vk_QiPv2I)6&D7b`BuKA=YDese?|pZU-ZoaUcB1UVRz|B&rYI1)Ti~d zXJsqkity$|T@`ReAx#xYdw%Xony5b ze@`dVvJ#5+tkyhJYn-Vy&D0uZYRxjWMwwcZOszqt)*Mr7j46qk{P0x~lZS#SrrzhK zM)VZGZbwm1RpN%AI=*4KX#~+gl0vDH6LE|aMhZX*=hFg!4-o`UpsKMGgp$nZFmW@M z#7%0cNQ?y|SJ&4_A=^Z^ND?lYBL))We@7j&6v~GaDg(uXlS3VnOVVW9p~O&q>=;m? z%8uFqJ}m&OOk-u_asVHBmSXK}OOBTiJHGkAc$@Jxrx-E9!)pIJH=*3ST_F(1yM~{ORBA< z&eD?2nX2Tk3VKcYgIHNcVS@c-f1ofj=omsT1;MasAW}xfe88%bBG_z3T|5kmU2P|r z-&l>1NxPRQlq>;5E|*$e7+TawpaNK9ohgf(Jb?zwOml0hAlmSnPGI0fM9GwD#KK7^ zN~MB#O5sVC+de=pbat7tnA)Z?>6GuU&LUGUi|Va0W$}|Au}1O)PQlfze?grYrYvQ) zyns%;q(RwS$8hdzB}=mGM9ETczU*p`R#BJ~yghrKsd>*gmI;kzOpKl)Gjb=YA}>-H z(%W9L=l~kq%Le4?)|t-kGU%|mWO3z8HkS>Yt{ddCeL|cz>1;7+ygX)!iKrze^D%l( zEIri@vx%iA*%vSuohw+957ynWX|rj+Ep9@tPKg5N1IO^2|eJhMHO! zHPO_=P)9g6mvsg1n^|&4Hw<=~Y;`a@O?}y_W_H0fOq zR$!%Rh9k|57;3FEH2|>6G{mi;Hkl?USY#TZK_@is9tjv*b;S5GE;4G3X)YQ#=fIJ% zYSkLE1$9#&u*U3y2j{@UbKp@oT(!n*LQ<*Lm@UY3!>+T&e-wg`VQ;Y9g+0VjqGp;} z;>e6N6<{0x$~6<^glrwh39v?X(Ad$%i4P|Z7DXHH*buP?g0x}-9wc(Ai4R7WScj28 z+}PC`f-_i!zCk8daY$N5^dp07hGo~ z%7G#y$aVGeib0-!VlgODdFjxB8a+n?^T3Urhg4V`&~h#~Dj*7Ga-E(A#_IG{SR7DT zXzWXjw72bzeXKby5Veo(iX75qA6w7Pv5)m*t;ah*f6*ofjy@S_y#h6^QjIXL17>!b zYAj;*oN6laV6I2;rJhF(%0gc@QFOv2qN>cqN=7nKWR9s4FUAX6G*NT{KcG5L4tBXN znJ5NS^Dl%gq`D-<$DSu!C*n932`HSnuI!t%ePLiK7NXUiZsh-wV zDf6lte@Tk`E_1&{+^|Qn4m?P(qh`fIks&blMBPgfMUe}#p2>wNIIb7*{oNFB}7f0s+_EOo9%J>S*48g)p}8=b3B>f}6y~y!tK$%4Ea1^|pi3WkxXTBfnO#z({bea8#j8xKdbWjW$O(Aug zFKUN6F+AG&F$#3PN72CXTwm*ZG{Wb%-uEcHbsN2pI!@y1js7?cxyVz@{#s{wWVSVK ze@Fw^{W>?KC~#xF8`2m#uIh%g=?ZnIDY+qKcgC@Y$joTmkOm2k%b|5c+Ekv9>r~0g z5ASb}-uA%5l>CS#8%Q;~Iv}L{LA@+;L@ET;vj=pwIB+6rTNnETm-4rF->w2sFpb}l|jeC!ng^<7y4%N zg!l+d0>uZJj}>q+MDhDe=~5CXm;P*qf>7^Z(TNCM4n!NIFoCNY`S$cp1TtkF~B! z2MI_BPSSyOAaiGRij8NdUSrFNG`gn@)A`!b$ z;wteJ$!$qfi-%XAY_>)E$RgSKo)ap{bWM5i;x9!}jFH<0H;kz)AjSP7e+uaAQPN9- zShnJFk`TLXq)`+Dd{`+_6e&h%Lw-VHX-QEODNdNSprpu*uf&@0e^{YIvF#GF#P}M= z5K57u1fq+@oj4_ej80vzsU-o8J}Eq96mcscNbspH1ymHKH$o@#+okT}%Rs&U zUKB(8DusZDj^lc&{$7JVo!HXkp=;LhKrS(34l?49p-3^z5{aZAAQY+^e(iL|kw(pr zwLn#bWnDkk21d0;;ZK}iHeylh&kYmA5&Iy~sQ$IhpujWe|IxPSXe3F81#k9}m|wt7 z&@JxhNE%3XvRV^cf9zV_COzSlwVawbx;j-SYycIuy@)Wgs94fy&m+m4MmiRwBo0-Z z8am>DI#{Jfi$+*XKkeI$e$zwZxf3#ng&QSPQ=0GeDL^ zd5M{zHWu$uq?4DTxW%4NU3~a5?W8*{3_X+Ngu~6Y89fMQe{=qdC1ImwF-llCK8?o{ zc-k%O1zy5SRNMgp^iv~I;mP%7iAvgaA$Cy4iB*l+XxI`NKk<^-AY1vNgE$rl@R4*; z#YV{DF-67!f^LX9h$f1NFfNE%%!m*hgS8~hCE{S{8QMZ1(22&b$aC94dl89K83tym zM^)HF!At6Le~4p0%hH#Gna~Zw#3X`|2ANJ91OjQ~Flkt!OXAVWa#(}C1cB#NjWE{c zA>pSRkxbHSjNO==TB1;?s! z@jHn@7^+TP=*28C?JK}L9JK~LkQaMY8XPA zF4?#>Q3w$nJ*agp3#wmWks7;Nusj-40zyzoOI+Qn)pK}!Ll37~b{9di+=TL7>us+vgX6SI=YF=(Ac$k2G?8rq1|eF}Rd@(n17 z)RBXwovQR3h_W-csi~g4Y2;C20K|>~i9lds`ZdK|Q!&q^zhj#C*fjiDR0&1QUkp=J z)IsqJ4TS87P0b||{}OP(L|q4w>w3CKf^Zw7f5r@?>Dt{2#%sg_N(>%j=y4Em)@$un z3tYYZ8i+bfXm0cSOhv;n6lp|dt_|igx=ch{C=LuOPiG4ygs2OAmrtf7&ugur3?@#7cyrVOOJn0i z&l*q5C+O^)_}c;H*ho~19m)(Qs#Oj>f5;Qn-i?t&*ECa!YdVG+SziP3Og&H;+@?%u zEEAc|J6G85A_;EIokSqQ3N?46(Mlq$m|0H%WpxbcZ7j2E5R|M&Km~7W6a**)Wi=}cNv;uk!~2AGy&M-vC1S0 z6~d$n4Fnk>t`XuHA-)k37$Kn%5*Z<)c8MD}uHtIL4U8F>M-6ZB~`ToK0y^j7(nDkV>j`5f2ibfJORIXG=5wdYn#OV@g%9FElY#GMxs9*&tqN$ zL3Tj;cA;01$kfFSb$kl0KO#?CGc&pMwPh(11(}6wmr5I|;6mlBNLwkn*^lDR&L(;k zQpXvNesZWRfY@>}6?gT;{UG)wk%qWT3N)@cBrGyVAc1yJG{~FiYAHm4e_)`b14_?$ za-+1UxS%2zd@Qkxa7RCNs^boFE>$TPf*r&WsP8bK9EB$lTy~J~2owupa|}r_ z4zR;z;NXB^QBD!++ZP5RIUx6dqHDc)5I24vS6Gg55V8mcT7Mocik!gc&x3iYkG)Xq z&tu@AMp>6OPM1ZL5c-uwe~5rx7Ga$ij94B7zPsH}=fJTYJ(B@r?dY2pq$uLFe-ZPg z;b_6E`;r&!Fr@l#*YgwOZjd9oF;f7~!2FK*N)b+?#B zmr>}&LF21K!@x&av&igLLk=3ikVn^*-tRxlLk}g%Gh$096sP~b=SnUoLWjO}Mp3Bs zJHgTC2Xj8bQ3g&rqu7cf@OIhZh5|6wh-i^ACzA6+zJH>q#vSgk%r)|w{Cq2)XOe#k zvGBH%V?Q+GjUU=expGI?^7}iWduCIb;;K z{uHKh=r7A6vcE$WF8Mq3+u2o|9k$|^Al$tpmxemQ?my6_p+6SDcqmbcf1q??kTl^nzXUFXXqa49bA3=_IK#z9He=+i)f`ju1$PA8ATyTt%f@2gC z9HV^T7)1leC>1ze0)f`8oe2AnachT}|H(36)LooE<(eX_fuGPYsQA&MVVkgc>eGAg z#EF8IZ;FW9b#|_zmTw~0NM1yxD&KDzCtM_Mf-$17<(wi2^5xSYx>)y)vQ8Khc`h}l zO}NNGe-VkLN$sD)1hvNNxJ-g*03RnIw(`WUDKUrU!D!$FC}@2rw4V zr&W&OA+Zw>6O?BBZs<56&t4^M4?y`Nf>})(1h~HA!3&{Fyn4u#AFu5EHSED&6SySux)yL)hVcMfv-?vJnPR^6?x+S#4yS*fj=-sz|NaS$%f z_F&{dh8eVzB5~L{_xme~K|j-FAX^i!Z-u=YX`j(dFeQdJL>Yx9jxqHcD>Z#pzL%3i z@tXN~@C*IQ}jW>F+~nH@af#3b=CEe<6Jw$&)uFMAe*2qZNPh72bD zrp5YBMNCNe*igSpj_D9(6A*4TeNS`r_4THiRc(eLrzpWE2ARqCYXL6-*iroU!kCLu0V=L=4@5 z5VdGLz(QNf?&K0Rt$I2>FHbV?#gSt8s48YIyCy%+IbLTdwtfyOGR=nh_!HHx0lMyv zk{_;ZB_cv|DkisE0+AC|Xsb_1hlD+5Dq{2>b*LIXO8fuJx4&bIBqbq1);6I& z1^zcVk9jnGZ_j|W6E*ic90~sl71X`USuTmATh@w-qR^xK|0Dg`-l6}!VoE;KMBXY9 z97*E+uK@pBI*~-Eyssj`O!6+O{{Kmccaao)xAf}Qq(!J%)ReY$KB*}|K9L&lDc(kj zG_Qg*zUh4%&~^T4%8(!X8pke`n&vL0y3!CACL;2YzE(0~IC8*8=)L}K$``-CHn==) zm0S^MP5_E-1kuk~7*XD^jm^*1e~Iv}2Ll~@hje;}wq{nCzi{xA$Xuu&be-Y*{=kv% zJVb~p{e=Td|C4n}{JABw3KtQDZRfBM26T^NrqA^FW9d=8@aO|uPjX2(PS;^?g=^Mo zNwS!+^~gf_tH5E{C$DcRbDu(2)&*s5_73@|z#Tx|9Tg396n%y3XmV>-n?aEbw#2oC zJXU=2Tx4dyG=~M5YR6q7dDm78RY`-(L+vz_yqF%jYShB6$aBLyj4F?fRpm_1E|yuD zD!GKf{0VzhY}x@A-wu9Uni_T15y=Gw1@~Qb0U?Mo&bgpSRR8DEyR@I33jRzdRrG-n zd7$E8_lOmWnB`GUOz~A9*vZYV=moQWwJ~46)i`IGM>Absd zEkwPQ&6Zzt+@hVt%?=F_7fyyE4x5dLv0_8!=kd264Bitxl-PMQ|9wIFHXSbi_^(ED zQ`6A)-T#&OhZ|xSlfhRa#GY3Mdww*RO$tCguWtHNs}rO3o*ks@m(T2+#Sf z%*<|cXMPaQUF`{(hfRW9MdGrBxPiMte2D)G-m>J=@GKHPj+>jvV#wOdw{9^zWJrtw zAXWu$SEua5h>=Xs7d(opV;SDf%uE@-fQ^`rO>6evJY9ncQeOZgIiU1T zA$m&$e%bG$-?f=wqwk1#r10){$>C#BOEuY3wR@$&C5d@6BP+|nfS|-txko0ECaTtb z`y^_)WrEj*mS}0ssmI25O~cWShnsAU`csd0G4u-{W4G-+Ap0)Zc;lVz@o{^~eXiyL zK+w}A(u5>=BMA2y(u8iN+tK6Lp+Pf9*&&!^lcYA)XUr&`RHAKuaJy}@Q)OCKlZr`*>+v;ed5 zk1kOI7lQt0x3wcVHAyTwk=a$5DKhB|7Xk>>Wz?TSgsA=i9q(b;PDaZQ07B>6;FPB3 z{p&lz_}lo@$cJKO;KJ%hpZMOdXH&EckhhKQ!HwQQp{_wN$$?N0Q|J&t+3Ed0GNq}0 zfAMZ)CPNQ-$Rw%S0Akp0A#%~OFK@G=CxOQAZz|N6_?~TobrT;ATsC)5W+pc_C{SMC z*e*{1IjKqSca5Jf1Vp_i7p^+8$8EHYkx?GWIxEKSs@|)uRTHQu3Y{4YrM2a(d@KKS z8B2{P-Ro#i%H33%ZiX#N%Z|%;EOcZWBTM%d>{J$d87vFetKn3E3&*zAky$I|g?eLc(e{)i%l1SabAs8D--w1%^QkLd$*_Xmf5nt_mko;^RQG9V+SRLf3SlF_Gfd| zOb$)Ze0`lZ`2)$6nbjf)%viZFe$~R2-&r?B&F_xqbqZV-Eq2M5ovxDAfrxPSC;bO3 zkHo$F1k0$FzCN)nUk~41!jwV@Qhhy{roaT>N0u5#Wm(WMH40W819}`u5o&Uo@ssqW z;tgfsp3RY7&p2;Ar=%#`z3;fI*v09$x5wV@!E!{NPo9S}a#7yT4Zj3op7H(>J%w9s zI7{B^TLm?6)u3YV)$~S3gveM)J|Y1<%2ap}r+Ct+@VkLHC$RmGvgjBliTtH{J!ba80TrTS}vM_()k72PLARc zm3D2pV~gcdNaL|a2Y%Ucs(3<%>XYMWwbV!+ZAt!LKTCAH2ZH-a#FRRz)8&6!7RU(T_?X^~tvLNwya(h=0?q%dbJp8)E~7Zf^EIc7fAUTdP2d?Cdk*8-hSq zu<0WlJCnSenC03=RZ1n};EgzE09@a5KtU=TJcf48BDf`?v+N$ZvXH&Jv9Uj5c6qVk zb6V$O;V*wJ{D!8qAXgs4n4y;IA=h}0q>|Z`Guet60re=PIgAFEKdv2+{C{2*XrBcy z$>R*TZW3><^9E%{<~0D2oC$xzjrse6QQJtv3+dCBK&Rs~LWbI_JAoK95qI`7C9zo= zK&Wiiwm0ga8?F5aJaO&rjCH~0CZd7jC^SCA-krV7qH9!w+rT{jsO_BV zUAJ7PBfi85kFKYo__?aSl~%8>OCBowlg=JZ|7K|A&={yc$PsAR4vR5%B9%#{aNe8p z8%wZes8vzD*jBcLxBrLz+FuhD7?eA%Q0uIYbUe{YQl?}`hjNyWVrTqQi9jO}e~aer zY(?M>)77SPQF-H{bg*xqyi`9u1Z$SUp}!$;NK_m7Z z+Vag4v;Ep(%y-~1QotNoa^neK_5Hpq8K!^gnrJXsjT1|1e|F>;tj*NnUvD_- zcRsR52Jy|MhL!QImoVimlz_IAm)Fm?V7ztqoi-}26o9MS+k5Nly{*@kM$55bQ?9az z*WV0w;k#Dd;hb@SIj{U)?(Sb`a)0d!Ck;czCo;vOyHq7&m0Fb+BR2BrYQ6Hu)0yEp zDf-`LcX?6cooNb4-s`+eMZ} z+4FE{HKG>*3^R>Wjy8(no$k!M|4Y4pdHYJ#JKzme=urLHX)CHVuKZ0*3;Ban+IFg; zr3x4_>FDDv@kC4NXMWP?3fdqwz8r9BAB$RW0ve>j)+7jhZE>uFVsF2+GE(IQJvR}? z9K0d-pZ+GyO1(=sJ~EwZn?hm;kY2DEzDZf zWSANYB+=K}rJv#8z0H?GJSfI4-i+< z2g3e!+-g;{9D>1#e^gY3#%gX6-Q2$-RYcP|-It6q=V*X_Cu;Ce*IegioT>2fz1lN_ z=xglVv3lJ_8hS0*i%JVi%ok1?3HAoO?SFJ0FC^~&z9({HD( zm|L5ZjQ4PN$~z5FmZ2ml3vtiXJyZcE)AT=BbdHRL?dF%IsbURbV>jw|!ypfPK;EBWE{I7rOP|{{^f(X=I3zXw`QWAfAM29c zi9b08$&n`hy2cU~ja2r{D4<{KHqyp~4RV&@wGK;XWZ_x;vJ#b+$+~c&plM^WgSAz` zt@%P-plB>GF&bYrP)*%`k3X6U+_YD#hS`~K{kB3^Z>VNmfiuameXlj$PTy%)uFhx- z`s?Fv*)SH#5Z?M8j)y1ITcXm9?dI;g^K;P#US4?tSuwE8Epr#V%zO@3a8&~sRd>gG-j5RC5 z45CE$MZbbhBs8H4)5pm}o?P8x5+`HKd2=~L1y1jUcYm0lecaLkr(Oc;n=RCqFFq&2 zH!CgOBPn{%4MvD4%6y9tb9eJ^T2AI&C-?5^&lRh1yoLs2QYOA!i3f}uQVdB6YZT37 zx$KBFdd*m~MehJwWCx)RycQ~1tXR$PvDH_9I)Tl6JDqqGe&zvM*&|ds=k?B{(YQ<0 ztyXnVI|0Z*gcQtfsk#`pmH*4s)btM#zDnm9IIFbEken(ata8e>gQ3k%#PRO2zoreH98q+ zqgG4DZ7tfOf9uSF;YLN<9JzsQvKLD> z&)=4+>~VJ7CCsw=cuan^z4n>vCe$Z5#9wT?_m``0=l+_V!ZPpH7sqFj1d4`P^TDZB zpK?2NEPy4?;_|RWySMVkDw==kx>-^Myf~F_h#eFqtsIx#2*kY?B%?P z0sMhEhJn4k!DBbO@|2x@ zBG$&BX)kM`cBi;RQ6srfi+Ggi^hDzZ0^sEI&~G|F*$XFw)l&?n*h-&_B!50pV0jZ4zd!R)m9x+S}tEUQSU z`gW(2aUG$o7*i%NZ9t`PXVQD20&>)X0U-ku_DHKA6hWeL&Vw7tQ%B zApcRk1#}8j%QE=!k9w2UCury|Q@L3vXJPMxx=}$ZJ<4+>yL6H`11UTZVUx5Ew zA-ukXDyB{u!=Hs<2(`y=M10$?KKr!SX%d&$XcDe{2NsOf&Dd6-_=lOx*{dc!6T2c4 z=QWCv4;X#po1q^m)AKD9)TCXq24;_3Ih6F6Ws!L~>QvL?OR?=NSMa-#WlG&i^E$mA zW?@;skxZ^E0Hnk30pu#lDH65Tu|S?R>Gw>D+VHlKHk|jd z11vWmK2yAXm3r;>rxFlN*W#zMom>O@-i~`Evnt$0X$M_@-uXz+Gt;^r_jZC(`J`B> zYC+9%hx&h&=$FePH1JwZ9N(E#aLatn1vAw4l(o*deny(Ia$WHB$ue%S69=zYQXUe+qccy=x=)-svs68byi9}`n@5kkI^ghcE=88g)J{3HZ-TQLpO6vY8Z zs_NOonIo5ef$=g1;4Px*ipQ~oe^R&tLgIMJxNKUA7Y`N|ZatY=qU8B~|q3~h4h?Mp{96wZ6M z1{ANgL;eI#E;K+~7}Pgz36p-wYyrTYN@(YNndU;$(LPcK@RlequQzaH>kUm3;*{5w zskq63a>;laR|DENY%9I$L5qID;P?|7&$XTf4QEm}FZK4I*75Z}K0>Aye$UTB9dxQ$ z3zt~+aMhuFwutdmcZxJGJfTY%XEm=@Nbv~_!9-Lo#*kjPyn8J2^oR7r#Ipx;skLjj zKRw8!3FU-80HeHl@4^7{gfrCd2=XiE8;yFsH39V+vL)AIco&v0E%E-`Ew1mOHnebY zk`h!GpU=o%tDE2wl~p1}uasoIjGAtv#UO0;6& zfY}$HRq-th3Ti{3;-M3tFeQ46=b~P@u~*Fqj`#jHglv)#X64@1>kVNex~0d^3uji{ zI}t|N2bNvyK_*JLU$~TG4wT|1q4hO!DW{5_%Fts0sTkt%QKjY@IT&;Ich`!#8^kiA z7upW>eFwW!ut>aQzpK4Y*cBI$Ya;&_vfsG!yEJiOCtrQ=0a*2O!nmB5V}mkLiAp3R z>--h4qtP%%wIdQGh5M41!nBE~J7SBP*H@MS4EwS8u0oopb`yc_^G~u1NZ+nU;c{{| z`IJSJB;-h=l(`&{uJN@wzJz_X*|9O9Sk!^>pT9@>OG{O2W$z^j0ww+`!SfIJ9Cs4j zD-ZLm<^Em%(1&CrZP(%Ir`vji_k&DmY3!A!V1loZY3s|gYc(#H7b)vgBK2Z3BjtPy zuu9ly9P;y>V&rpWq6J*h)~`4)$AU0wj))Qrk2Q^DDe1M45jxDC(JtHO=q+Q4FbDt3 zii@`toqD-rT36xsBHwR5#xTNH;pC?MYh0;_1@Vs3K>Ru$-Qat=E=Pd!{n=ToE;wuX z7ye_^xxKAUAfK+;h25U)+7lU8n5vr_z*>GBAGiqPOIUi3d(-w7g%UlhcXyt7Enj~d zB{W{$nIhGRM0f7fg|&X<&?C?|1KD8z-D}iNZ)1_v3+`TkEs~{dxsj70Y~i>$fwS1u z0%(X!LeqXEqe_jT+57C~V;Qc0V%h&({%Nl5TX+~W5hCoNix#!^J`&73L3?Woz{Gw6 z%^fOvD?GA)(r_un^4A-wa~Q0@W)DZSmj^ng+);s3K7i&tR8xmD2D~E#*V3pN-csVH z72;ZfiI3;veT)*E?WxB*ulg4D5QO;{A2>d(_w>D|Mxl{{{tZ9%(ZC47SQo61sn(O> z8E?4n8#4o(0an~jvl*UY8?n5;7ZXBUPKOEI8=UgJ8OpdiDrGsI zo%AXQ8p2g`p>(%*@<8fUJ9a@l?!}9LHLPj!kPX^x7eZ^7bJL*8s15r(5E~KMQGeA{ z*_Klihxo%i9X;>kyZcy0E2x`_xwWbu36mx#0KZ9Fi;7>K#Boee?f`ev<~agPR~ge6 zbM7=D7PpRSV%$kK8a}Nz2ojcag(lCJ&%U&YW@Pj)mMY~uZ@Ta3d=~wK@NnL-x&0a_ zlQ?8o_%jEF&(pa(=cX$Jg~@O_ z8@r!>y9J3R2p!aJn*Nvn?RhCJ-ib1eTb_t9!yY*;9I5^`)9xmexXcmBPh=7OHUeC` ztc)gh4%4y1U+kid@7X$R)VAY#R&TP9!f#<&X^~+$r`>fVs1J0=`C@5is?=KXbC^PG zX7PO&ceJ;fbzfQ9{}y4Bqh|Zq1LvXbob7mR&vXqjvTTONm2U!#s$>eTlvLKK2 z4vPvL_tW<0Np_gC0%x(zykGlebNe~R*vuIg$(hQ7h-gXKo`QnIuG9i(Tbt>Ike%kA zeLsl-S_8+s8Gu)4nyHlUjPpX1H4{D43AyNrv(skm(>q*`UbqxvpcY%w9G^47TSb-+VMl~u3t&Z_q_UpI>niHkwoAc&YHyBb{DyG4 z(J{nGbx>n4)d|&U&p>DK8TNG1q4xLZZURA;3}P89T*tTG=CrRc-V#zl3@(2&i_&2z zpWH@tr2*ll|J+Rql)w00v(bJ2)b>1kb1G(Yq(9p5c82s3>)lNQ3>2R3#4+8_5cku7 zuW6#T;wmp`qITjcuV{$-Xmk&hGWuvXHkC4ZXf}40G6rbCS2UBGaTk{~lUs3}yGnxL zN*zo^)YI_er(z2ZIm+c_7Wz8vP!|_| z9Wq^9P0Yol)yanUkNG`pHht$Wr-{FD6@T2a3WHW);aJC%L94j;d}so!PS(*ewx2r6 zEH4*tS+xRQ7%-CAnO$)w=}^8rLN|rP>)Q%qj!691wK0{e3gzRfQ}QXRjLZJ><>c>7 ziyA<=`l?l}sIK@^k-$CCC+iyS5r> z7dwM0?e15?8ZneFyl;W!%DvBzYd9$@Fl$!t=i?GDsw=Ob{~S%-Bfx!_O!EaGyQ1>z ze;#|ggh=lgYzD;!YBZBa{UuI7y}q*1Qahl_RxKa+f*shBL?^x}`zz*`x{`1~{jr-+ z&hL0QF$zmcWxC6Y+4R2R0Vp5ZP34$tLI#qMNCVOz7J0CjC%4)A&ymj`SDt5rO8%8UU#eaDF{9cn94ngLA*@Idl{_P-L3WTIGE0DuxfArLu6&-MUE*;mS8{MDggi{y5Tys>pM5c#U4+j!Vv?unD?4N8@o#^F(a(=ja?7<=2Ciee z5cvS89g0mM<{64*oKKiGs&s33mvCvFAN z`KWvm-KV_kZ7QeZi@8PkEwd)^%|_Vf*|b38>D)b6vIhT<*T60=5j+P?rKn=j4Z#i)Vtprf=I*D{l@@dO&@*2e=gEKL z9)?UxxU{A_NBlr1m{gSc`{<{Q0W;N-)Q$l2{s6}flDTf12s5WN$^!oL+kPoh4}7Ow z@GysDc?XA|GZyl`2&bYEGmSG0Q*%_08?h$~nSh2-#x z`fDttP@vXcF(mq#b!q)T`Xt<#u$!oY{XTju|0TSL8LqN|erCyWX4S*GzYd=2^u~dP zDK?);)xuulpp}OXk^qiu|KP>qz5lo#n5RtWZpROwm5y{!#mG+?M#;mEt0(jQo=DvtZ6!iN?Ez!fu$#dZgc^&zVmEvNYC`g(7$V zB>Z|ncLz8pNHJd!FSxj8Et94AKF;TJOg7B6-^V|}U1u@lf)Su&O7`i?@vyxoEwne} zPr-K)UJ3z}eskKE-Xt0pJlup_W6JsWPF>6T7uZko1f?(`bTvsY zcP7sX*LTaE03!2Chv(OITJEV6lI+e4aj4ri>sKVk8>Y_9*f*?$*F{~V*h?mPC&o;V zcf`AnA~UBRzFNRte*Mp9_6eh|cjy@n8XphaLRFfhs$WFwY8KGT+vcBbQTI>HNz=8& zaTr*z5~c?8mCf!urHZoxZZ`6j)%l9!5ZBay6*F(au-r8=KZeBNsCFw#2gJM3&R@ce zc05)`=~I341i7ZW{T>@V#btTQ8soeR(rP|puyk4M>Bs@fb?*0eh=V4>aIOv{m>K$K zn_KDRpz@TURb=;OA!lkGMn7Jqo0;nO_EYRH$@1c!EtHP)om3jb!KpC0uQ{5RmImDk zv(~knH1ACX6lvA8udXb!aNdkReBdFY$Q zu4+Iv0TJkmGgL7hB?B|rq_o7>pu;xO;@DsocFLSE9b3g~J(|wvXPtu!hXlnp-FAK!L3$wT zD<>BPd<*NBBl*7L&1OC0akHb6GHk}PV|db0DS*icO;7}zQRcMs#6t1i<$r3GXF|_! zgGBFL@R_dH#f6zqxu&QY9&Z7tUXT9~6B5f5EME=N16S@WOr`z5MpfDc1zPQH_7?WW z&|UO-+C3XgbksR@h8B*NQ=Ig9_9FePtW8fk)*KPM!70m47W(+w#2>#`h`CMtpJwUl zY%%xC&MILcxNF42uJEbw#>c0GajSgRZU*5Ydy^8X4wc3;x^kCX_3uDf|+3gd7YER1ELqQ9Otb{K*WRa{9lKo zD|3rA$V)qT4MH^mMIvZ^eY7`N7_rg0pI5wiw4)?3W&ie3avslQzf5IUteEW;-3~iG zT&f8XrMcc4wga6DDq<}`-Tvtwwcr_C941F~UNzXl#c7SgD9pSdSyWK@-Mzg0{%a|K zSSA&)wdvuONvLysbY=!L_QTvCmQRf$>i^nuj#9pJ0sVIbOoNN&;F zNnVrAP9gwvuCcT(@dC~_)&KQ@Z1B#-y`OQb{+SPr);vB&FK@SIHa;I$ z*21H;C0u(j%FE0yCZ>sZ_qb!R)S1-qJ?1-WXL!)wvn^|nzSP#J1^d6ML*XH(oTdEv zf*~gZ)F16i%^)DA7C_*UY3G{Sikm~yRc~KHFpMvtIWo3|INu1IHWJ#a2N5XRqCRgA zZW(imtm2QOVsuaiesv=f5@sr)aNZ35#-A~_93vLiA8iK$(XjwU{|)`Y7s;>z*Meb= z@uIoq5->hoX|s0K0}7*NfB$V=v6S1_R-+~;@@@KmGXSF6+6wT_q+0N&wmqC$@c+^x zJP7uSW(tRB@MaU`KDfJKMBU$?8F+l};jA`}Uc`2uAwnnIHx&NN{)7mJEBow6;z;JElO>dRjZ& zVP6%e-}LQN#+$Xm1Ev1^9Yj>|YEaN*t;0*6H~}z1X>iC$#MZ^h6|I{T{d8rTyE^6C zp*dm4_Z<@Lb(WI9Q^A;5o;dDodl!B{|4521l)IF8AwM4Yqc8dG?+4Kjb)30v?_PoH zvel{NLwq`NMePX`$lO^RX6&E#OQe)0JLc{EnIF;_^7QhlId3zxZV(0Wm1BZtLVMRW zJ0)O1Mk6;HcUiZx(%zfgj+@VP@j%;p_;9ANW0vWbDxiK(>tTZ9Y+kuUK>utsE|>l8 z011DIQN?JdlO`s0+Sztq-$*Sigb8GU&6@aDG!cix% zMWW}+LJi9DzYnm-=C`A}(G!sIKJL#=D@-G@O+$99odxz!T%@I8^N_*ztXzCVRdatl0WuPHU;*@ zz$ry{efWl4ls%kB#;T-t<*N|>m5y6(h|7;J?VWZaYHd$sz3`HMD#9!yLZIW191xPu z+i2U#vs}sPxO0Doq?QdM7xQ%!ir*DA1JlD+b~G>9vMIR~RsEf#`pr_uaOI~RVlnZ7 zQ!lr`+Z4ju~j zkAKX8D?=$ucvBwOKXZ?AaR20#*K)#V4W&?HXs3Txqk7%8I(;}T)OeP!4YTwl^ybMe zS`zo*h+Q3J=PQ$Rjudj9GONT~`vV3a*!SE(+Gh$#;jhrwr5vi6B{Pypp1mpgs` z#1RgGK5WmDt(%Yd$QH&ymanz_bN>Wf>1R@vyRS708x<}t-}gyZmTC=2(exa6GI^ad zc%7qpIU}TtP^5mk^`)SSH4XM$@GHj*(Ucavvm;N6E?K)|RHzpQ4Dq01`73>!c}{H! zIW%0PPl(b9(_S3#E||0Z$bUrvdr@b3LK%OyzHC={yfQxkYNojnUY8@zY1Z(2O*(q? zE;!x7vgNKBEtn8V9(lR9=Eg%x?=Wzt=-b4G27d0lPdPJJ5S;_?9qDH{2kY$w8V-tU zxfV}X+-^n&E>@n*z}2Z4C>EMLN9XZuWFyFv{!p2AhHGOp{RJqqbS_uHWoo5Dzg+s) z>-GKRo+!`>cS}ye&w!gPQYbRx{T;VfH(5%4 zYvC%jWBM%WA#P)MZnLNvN^yq3>+%G&|8=}Ro^BDL2-^^yuhw=Ixk+lot6$s3A^sFY z)@{ERfoX{};rH#}{gi-A;)*ZZRO7cW+eZ{5{>XNf7Brg1NMw})#{Oot#mIveVGHG+Xwd#TqEjk#e9=xNuv_yN zGJ$gjpby?e$LiwV6;~C}u^mVKjIEy{Ki~04Q92P|kUOyfFD5&F6Af!p-KbcZ-n{5? zKAkw@U0EWw;IZQl`|06Or9^`KW3BP#HaTV2qG$8)&&J*^rcMRQ??B_T@`#qjki}>y;Wp1C|xx@yJdxthU$m<3D&3^rIJyC{)I? zl7!tHt*pUZ+r}U*wUW7QM#Vr+S-2fVbA*Ie>GFM|qNQr*@^lMBIqhIF2S@(CrQtzi zltxzDE3%CNtDnIsdugL%pqjkXGMX>4v5VRvX|3?v(o^0d7#B^S9Cy-0%C%7gR0l=) zfKt2We>%~rKz-fy7ibj%LOU#<*;pO);$vRjq)z`o_88f~NbEB4F(7K!|!jL
Q&Fj?z&dvIkh0z8X*QF@E z`>i!eq^N!HihmUfgO3*9-yQbJ38G>fWCqjGchqn5Phvm1OGS?0NP58 zG97dRp_G4@+uxJS82e@U2fU_cb1_vSTWieH5ps0wpIC>x7;WGzW=x;bS1SQJUYU(} z{Si!ix9iH1*2LKX`UpHr=>0=cmYsUD#RmDL9zMFoM&T(WKq{mrzaP>DdujOhZp)sv zG@&2hCX z@}O6dhL5wH3P!&pk7U)5~-4Gx5@A4DI1cPZ?CSm_7Flq+-}&)98?NVmvS#Ses>Y4LOlV&72I^(E2DJN0Vp3BhPnS?3bI(<60m}s26?L=&`lB7DgIp^DPYx z`jCY5vm>lO!IU?Yl%8RZWA+h!K2q6aIIsF;$g=jDaq$c2w^i{KG2=Qopx?3SR}qH` zLRgEMp=vfc7c<5cVpY3>o*LFif-umZZaryYZ?loSsEu|(?!S@zo>jU-%Sh_tmH5(4 z=h2yT*ycdQMI83{w#FWl;p-D!Z5_e?MaTN#<*oZ^lono;rQ;rD_U&BOA9U;!Gr-$uTUJ}_~L|5+my9#NdlM_@_w8CqfBi-ZIT5X7# zp)fWK>Mj`Xm5!X=@b)ck;A-2)gWTS8y&%PJ09FUNR|cM-G4<877lhV3qp2d{2z;t# z%rPE#6A^KNVI@OVDdPd}1)+IyYX=J@gVxr)k4Nr)U_^9GJ2YKyz4n&o_7+O?MpZG9 zXLx$Adc=$~oSKe(yUVCEmA;uD=!i_=Xp6tH`1HAe)d&mgtcRmgqeidrXzWa8>Be~h zNNrF9H-T!fO5TVhR6rGMP02D$zaOVrgg!snfd^FemV4E@n@+Mud{JXn8p?8D^5wJbkJ*0P4iC+yKvWE(Q6esWZh)hgNPPOC{pL>-qL zG4C(p7M0q>?5D3agphFdhNB17SClNEQuCJi%B6l+eRZ)BjD!$CN1?5?gTG>9NRW8!VN2tQ-QYF(EcMof4)%hCF6SnJvs%l0rl1_#zYRT#y4iA}ANn?B{BHF~^j z{DcA*$F4a&S_Xsbw7hJ6X|W*CG?K18rbuTVp1d7w3LnJL=qu3(-XXm|5{?3B%V*ZI z1G;C+y0pa`>LgBinu!P^)Rv*LwQu)h!V(2XJt#(#4abwj%UA!~8*E2%>SPcOjC4a2 zIhQ~#w?;g|`N0!%gM87?gEp)sF8xm%fp7$Jum7V+(90kuE{kzbv4_Z`%^j?{{CTya zovR$T1L-N9pmemov!rooxl;uo1ecGV7(1!X9N3qQi$BX^HgKzQQC+y6ehahu8ZK*T zWNIrZ8FNVC^k-f>nrd{vz0k{wLU(B5&+5`CG5n)z(~z9BJ^Tpk&g!@*^2)~Udgb2m zw+#gm6t;8WmGy~BLu|gxV-}v&OAK~d!ptQ+&wl!EzeO?@DBODeYxBVXjz_cYQ!;VR zw?-PfcrW|&gQ}-^ws$m33LcNxds*+A+CRkWM`z~V+}-#lK^PsH_9M68jf7Yhj9kha zkk-G!a&vbmyYX|f)u-XeDK81n#zrRUs=(jpcT{|Q&T1~-qgd5X82g=_XIC0M;;4of z+v9Z}Ah0;U<-}%z)Rbd%b6uC>LymE6YalNtw%*Gmjt;&f47Rn zldPE(f>=Yv8{Fl`YJNFG@O{~t@=>r8dMu&%w*d^S zutJG6bHIPmF;S7?ssUMw%kVusC?=F4m>9{_Mt)ghIXkY5)-J8R_hP?qN5|^Ut{kJ* z9=ub7lHCpu-mva|0h9}xkQAK1amP(-SXgBC8&2Z|*&#_i4zP4mawy zXw7k0tNWl5x^R8+xMOmSPH*q@XCJ-^>HlC4E0NP=jj$Kjb9K=B9mL*E`p>v8BBy$w z;l;DMT_@Pac^o!Wq90V9=bYge*W;XG^1HUU{|;Q7b*!SB$z8uBJk^)x9SK;W8DEwK$*8EC53^?zP*1vBtT{dHj+aQ`^il@Z9BII} z{#)XJI{N~bR4}zPS(CDCWOJ}St36RS`}S8N#2QLX8W4DX>dABRn+Q;AmMSD6QS19L zG%0iZsa_8{d2o91)1-3J*X5?;bzz#E`r)dYdh&FxBc{b=E)g1?iR(g8Yi;Mp)}Bs0 zH97c4MDS#A<3(vSAZ~3iidd=Vd}mOOcIFjBdhsJ=eO$%j$(c$XB-6EpN-pElQ7*$% zC0ogX89zpQk#HzSclDX*|7l}0Y#4aq**>jNHfG`FebI?zKh`P~$k zEq+@C?l_4v;)~O|wK*gj=P5n_q5nzgROf3??W)+ut6TM#4$2McyY)+Ru5eX-HL23f z)RbY%F(*8|#(mg=n%~IkHxsZ*!Qvp+Y?UJh7ObrlVxNvQQNnK9CPXgqc@c9Dyfc|u zV$LUVtiH@|OR^eCHSuouQWGcaw{zU!d`AvMH)oDZf)o90S;=5dR;7~tn!&fSQq)axDF5mW3O?HdB$A*UGEMb$I4{60hru*|lnWk_VQrwS(v769u zKmZ-Qp6`!uwY5E11Y%H{#QxGEjC;`k!fZP!?VKqCzNCya7OaubbNyt}RtucUy#A5; zqwSz|{xb?~#@pJznrh)QhE-Qwwe)5O%l_a)lyWACJVuID^@@IQRY;xaQmBnh-Zxma z?Z3S!iN>E1YUYeYL4|_{`lyEj(})HWj>>p>r;I5 z=@GR&tY=IU^S7Rvz)>|r3QQWrcr|5f%Hnkd^6w-o6hAA%VkCDrV2fBbwYHd=^zma= z^<8t2@suiYe+K>Be+8v>Zo>3ZV6yi`+d7*R(i>K@F~=aRedV8z^E5;6GDgQ%p9bJW z%Km5=`MwJKzAHr`H{;-|ZPI?DStk!hz2PslmP*$sPpPZG|H4_}ql)d zc@^8B0#BPl>zb-DYc4mYt*^g~h1E&F@o2M?gCT61rjdu`d39k`zf~?V7Z)&xraw!7 z``th@?Y(#z!K0=4b=DpdKdfaElU|l_I@VrR#4W^hKGnRM{l53!Fcq%8A~LWfwr^^{ zBhq2&?4?>}5U6sfW`lU;>dzkMm@*Yy@NLy;?n~LCwjiF*Z+0~MPRH?eaVM*@moaa~ zo-_`vJ>v#h0Z-+dKTX+8h+u#;+C{+?>6_zGp?d!{n;?dLvy4Iz#qfcH2Y z32B1gETp>L`4po;C)9^R-gd53+*U`z&oa>g3&2ASOoVyAK2-nz+?y=e{dPak9rPun1#s{s~31eBi4Ah+j ze2I*sy=2^D+)@|a)8Mv*i~s&Si(cME#Kh39vlGP#%Q43l!}430ND=QXNsUlVb+^d_ zg`7u0gbloZDmLXuGXnhHnwO4Fa6&4u-hP2?1rCOiddque7x}UJ5h4(SFLJsx*NJD2 z$&-obm<9#7SNb>({d#LfDN#c~0pX9Xq*mW_g&T0Ks%{asm3U0dz!VXpo|P2|jAohZ zstJ?O^E+l{g+7Jd+u88$HCv7`tv(t^o!`b5#C;SV#r|sJpaBkv$5bWFh>hrsZYjcp zru|)|2A_77Gsv{gD+nb`Trn4(#_k}9=?lP zdeQ8cu->K`2)%vLd?~;sJKkZuC%Jj=4cq;(#Qr7jzucL(X>cF)VF=4@J!b`Qd%)ji zg;wX4Qh+mn`2sMYvi~0dOhB{0JM^zDb~T&Kvpj{be^saz(8-~|KQvp;ga7gWE0Sw* z8KnPb`rzpie`2w#qEAnkynSAkd^Wn|&haWv!Bxm|61(7)<|&whkXHqNkVbI$ z!!nAa(?ztL>8&aR{|R@7q`5J1rRx%3^F8WH$0~jx+hjqb)+`;?e^{fMCq!CtOvRvx z!S9at?yugz-(?n99F2989P?(CFT&iPepC;g$(m(D@4B$ZMqE{pnm=akufF<bcrbN4Ndx@zRof<` z3fZXQnkZ!2D`ohme>5i~VW}9h#~rh2Z*I%hZSVIUm-D^WyF}T{j%m4MJBtL1cZ`45 z&XGyb?w<*1iJ1*uveaZP(MmDH)m5r7DG<}o$tIdw46Dto2p4ovQVgDaZ|_mX%2ITt zkY}%yp-f3fBdg>wxu&)Xa;tKe*Cx+iq8es)&EZrvNf>Jfe>YE0zG9d)ivsZzv(g8w z*2irB(N|-g2hg)(u$EtZt3oKGSkh;;{^}mNy6q?XJlE?|uHvp_)!Q3Nc-$FR9kb;< zL++Z4<%vPOpeNnO=@5SU*+KuON(#jnO?erW`%hKtE*dKp%)WQ;nUEJQJ1ce>*W{z@w+o2CnWtrnJ( zN~4vk7H6SQkx;f!6^c=%k_&~d*>ZS4TMaJOaJy^fe-|&RN@LR&dfAV{eHM6mJD!|| zu@o7V9~$^=%VL(iYw_RYjf7cr&9cTUkRrXD&AsK&o}zZG!)BmWjzYAj`wk+SiBfNF zU&VUKzpjJSk| zcJC2u@6rE{S^H5BqoCCnj!OkU?m*#td#ty&_cep&Yvn%fu-;dX*~_OF_Rc%v3z;P$%kj1Gof6Fc;2n?l#{DGpM%FY6!d50%N3 zc?0DkuV$0;FatZ)M!IM$=fNZo8$Q5jPQyjm$kPUgI{oh&761Li=DViVkg{$|De(Cr zJj#Rz@A`fy-eV@ASV~7DA7CPup$oeB0;wHBD;)Ql}X4c)u_y2DT?+*lqii1tsQ`y3WoI{vds4ce}Y$& z<99WqCR+RKo}H#wHwbHb11=hO+j*GfcH*GqGV`i?%@VYpN*#s<98iWtt4RqY71W(I z5Y+%*cyuf+jPU|T+xJs%H16;IYIJ%v_CeSlKakAT(wbf1w7j87i&v z^D}O9+sM6dmv)GIdFw9iAPo}!1OzpGc5`yo9BhK%s6ZP+X$njLQ|spD^^2l%7A0|< zK}cE-=<4UpM5=*}*$+V#K$p=L6htvz!eZ~$3wurz z=ZN6Qad`n1d4*NbJO?Fje{M~AxR}i5VL(QeXb2ItqYPo*KH>{klpP8sx##H>fX%@J zJ&*;l*I%MS1K@Fi9wDTdLLEU}9ev+B@7hxb5i!@aLn2}?3Aos|*vy*`QbDa!qE@a% zRuVBRFCh9|{hUMbt>}3o%7&a;IV*1-wfX9|Bd|JJ@%@G{5cai>e*@+gFvge!^%`*` zImG5-@UNS$SvhdUOqeToWM!ABXm0GHZwx44q73Xy@Aujg(!wU7l-yw#5KQ0}?aR;~bph%3 zl8?h!P47~gVy(`7R%-J%IEr-oKu~MkCL(lIIZ*J_vmUpq+ zbJ&VgQQ*;lh8}{MW=(ZinL-}3+C5Zo`RNythEdp>RW2=>fA2Lh$;K#>*}B;JV>Vii zHRP*!V8^;1fp@~nIGHD6Z|S&}B|Bt*3O`x1S+9V=6>3{jeK%Q~$RHoj3eX_4-g@)$ddR(lk~dKD{z=|G$-5_c^CaJ! z&e^j_a9~r8ZhJ+#f&tq#wuN0WQ{b5 zKcd}Lc$K5G3Lr1sL4d=mraIaO;e007K%;96ILhHpCWqmBGw6*CZ5@d63w{VLRJ(g!HQuQLC6CUKXBTr(v9~Twujb+NEPq za5o7I9D?ZpNJ%>bJ31g;zt`5$4Q(QIY4DNZH5Y-WL*x0T!+H)8x+`m7fpVoESpC?% zOQlV#MdI)IMEK9n`e7Rzjm`tg4LT(eLS=&>n`}|Gu`ZzwTcpwfAefss{>fF8f6=AC z+1}rgeygUNQns{|t#-HD?b{stl-8+c#_C*jVx%hSd^X)?8J-E#){;=VDaI&RmDbFqJ4V- zX-xqs6u^uo0R+&%PBid_Ht|88)oJ%YoW+dDg&A4sAVC_|mA9d=?vrw12%m_~kyIFA zDBizZ&Lj%QU_!D&aF<-l62|g3 z$G@Zx0c%NL94C^0QM)Vn6b%BkW3VXBeXmxc7y7M=!dz=+H6zY2>^sD@yvqT;*<6!u z`H>)Y3U{&dUg5&=vB&rLbDiZ6tB^`VU1?syz;p7xc4?)bU z0d!A_X zA(kPK-7L)2Wgb56)v=1T)g6NiMW9YuKiWd(Nme?@yyr_hcHGRPLzr`TytxRm3L_G8 zRvFfJMq;AXAi5)=(Ias z`txXhWr5_smd(>yfJ#OME3tGV#FEVdCP3+5E5nAkjmm~-0+0F7ySAQMZq*I;KnCl# zSWktjoA)yaTh4i!cP@(fSBc1TgL&SC3)t{&3zW1P6t`x-?s~T4Jr!$3Anx+@IHp?1?3)DL-p*7&kHaB|xnZ;&9@l2$f+VkU5=o?4^?r6DljqMw1m+(g9 z&4;6KES$1L+$l?G99=C~vucorCxC(VQmD*;^&C1d8v!)~<4VdbhE^1&2G`|Wd!Zn$ zLQ9qj0E2?qU`P35sxijmKmt0o1|JZmf}cm{7K-tpe?Y&cX3{L<{lJux&dHi`3vx&b*GawYu z6o0{`e^Bq=FT!{NgrG8izwcag;AH-I@!|?kH0aV+m&&jlEq9m%rMit0#pCQ5JT%yj zgbG2$t!?2nl*&b=-fTGwHH&^89VXzdHXr2*$XD4&cO+ghIV-9%(w15~P!*^s!l=++ zD4T`tB)rPsrWax2D5s^M2pW5%Xe`}e>fSFEf3kEAyTbQ=-L)O=0zjb9^#g(#+&byn z85l4-K5>;e${0fG6On*DXF=2Ja$NKCGfF~ULP?$L*(8`|0Ea?GZVbG`Dmx-9T6`5Z=PHV)b4*m2CMJdQHJj&4jER&$DKqr%5~o;Lh+ zHjl$RY)l%5M|9S-Er!MP=cC!!82rmIg3PaeiV6ZDb1=$!DA!$@pbi+0AdGHf0&v`o zpFO0!eNqWOAKFtkXQ9J}UiHCvvH4`|fAe)TbohE)r@zyKJu;9B%#Fy&B??AhfpdjZ zXT5P-t1zz7q81}SW(ySV_E{9i@=Xv$VGRGcmT!Tc?fGQxwtM^Va+UMnN^6^EgHkIN zXUs5$>wyC67zDpa@lkh?u6}ni&s_9bFlK%lXYL{V84^~G6dN8MS!@d4d$Q-@e_6T+ z-+&xk^@Mk$+2tJp;;r4i{Q!k*I#;WXTjK=W@EUpBoyHstS*6bAW)VvFIg*;gCH(m< z@_b!rE}SKES$QqR~|wutX%!P=iq;w-GFh|s*^@Dgv=T{KrVZeV=z2*$}=wLJ7*E46NU ziW>AqdM3aLH?Pa8g(tF_D=#UpJb;eoGIg}ou2@4G@n85N(&~c^uMeT*f6_F844ep` zag`Mwm$boTs5FbR3w12R(^^EA!ql@Y?rtTna$S+zF?g>~%8>oE>G=e_N=mX=*{dW6 z&nf6iILb~YCw5RgR(ZD?(@EAy(xjCvXW=698x!u0sbMt7SrsN3z< z6tjwQz)2$4w`d45Pa{!^e}E1>@E-5)@9ht!9%)0j;aT>O!%^CS6Wef0nsbXi6!?C|;74 zE~4`&fk*7kL_%NJn75S!Z*4i$PA;}9kP@mxpCk+dJsTxAGeuCBMG@4gDS|4EKuzzv zcj*15E;Hm>ze(^0fh7p(5ZEBFHOdIVj-q z>bMSGVImf$76z{lB)6zlwY?40n_2jL>$EQ?@iN?WO6sr4e}<=F^>WCozy~MusFZ#w zkiq#xNpyF_Z^@}OxGcs5@XYGe9jSF}Spl8dY*2xXijt6AZLo^Q6;l(#h``R}7IF&y zI`FPRS2<2k5j;lz1jZcXK3srFp%}o50hK9&@QjLsA58o(LP8O8B|@%BA#s?S<@09{ zTJFVI3bod3f7LVJski`T_gD779ld{ly?(I$4}SIQ@y&~GUOK{>EH57J@)|m6fZ_Ba zjT>3`!!k^OlMOWkBz8G8s^eR7p3af7X{h#ccY65nRh5>hM2*@+2+z%~=)gprFkXG^ z6~$TSVQv%ZsMC)<1SrRqm89X-e3F2_78ZNgNMqqDe>i4}L@g6a0~mSkmMl5SUdUik z_PSb-mIF+fqaUvk*wn&ds1eBGnky}_3i7#uSQ>;fSNB9@#B(7qLwNAamX3dfxD6J{ z>tGaNzsB4sRmjwMOEnRk)=KgyhO?&?z~D3^N-Ra44pZX*ju`Z1_lom>I3rx*fg{?O zFT%43f1XAP+;-KF8rx3pHo3{}Frans)|*Bb_1?-$8qTRZ?ZNn7doa?yEp-r#b~GA~ z`+4P|2+xYwR^LI0=Bidka^w92i{tLr9j@1a+Uk|!nGe9rba-4_j*`k%SGcwNt5LW0 z^;q)_+^i%G)w?kJwyJ9su{o>dg1Tyw6hqhvyu6lNk z_Uc7C7;458pF14taA)Pi{qKBS;675ZdDE)U!tkWU;o*JI)*S2w%A8h)#&;M^B!FoR za_BHel}PT$Li`p8e=POR&g}T+hVr2Oe|B27-~KM)NEBfpOt;a9yfI0F258SVfVo5; zL{?kzvgw*#o>lnFC5jz!*($1jkWwu;OYp-GeF{sZft~(EuV!$QQWpTF)w8>;k~7D+5a8@2wR*?>yl@jif5OAz zNYFk)TDBsZ1QSpZEiYvPEy7FJ?6eADf@MMy#)ldw)Xtf8Jc*LloA7)YgLz`gVtARm zJhOd-d(CvH`HP+%!1i}oux6`a#Uz_XF0u|7WLzhzB1pbwFM1o>xm8*VZroZdVDNrA1F886 zkb}IbH}`TDc+yGCWpVBIe{-%x$8~K6kj47(&;HzUdCYfa%y0g=j8XbH1E|W#bySSg z@-63DPTSY3XS^6TH!E;dYf5>))ARo3pYyy&T3#J7?P|z!2ONl6KmPGw1LB26ldXUjgjZ($_@{qGt@_jde*;_f_s2%x=e*qN@-SzixJ99Tf1=uVYlkb_w=moHPmE!l^Kz@pE4R{`U)D9WqR{v6uM4X(6vh5u z77O6--|%ApmRIwyD~gw;tqaRhmiBLWY5$g&_V4aeT1=09Vpjg*-)n8Vdz%)8QvQS1 zrhBwymM%d{c^b-+{`5Z<+q1bOTQsPmsQ-kQSfg1RJIA$Qf6~>Op zdOz%VB?i0Cf3fiLaJSd`flWft0|W&s>3M}(zl0>MVe1I6>NM6N4|lrZ9xx)_$qRd? zvoA}Ea@Z>gt2blQ&|z;w&t3uIAvlwL8C`{ez2`J7IRAg)*-=;>b*ixVUMVcTx1r#H zeow~k~t4RM#NN-f7wIrx$ zNjdkrCO2&vLg{(q#ieVVyd!)YZnCc=-JJr$fFCL|%SpIqZzNB-`4%%<#)t~^Z8&Cc z%uCFge`SI9RfjEsmHdkNK|PS5H+;dR@XQx$HD?)2lUQ^92KdLMz-B+{z<;p#HuKaH z0I~pfAXcdX^9j1iDO>@I0Jp-Up*9^jIw(^xO%HGs&7+!19CcAoaR# ze$Z_{LU(=bComIsx0yAUfwP;kDSp5&-}3|8g4*F;Zn1#P8F+k_R}Bd%X?5CP?E^Fd zn|bZ8_V#vD$5m~KCyVnC+XAc{6ihlvGV`VH7ua{K+FxN4vBC7}9WRHm$g)1r8&0 zPoGVKO8}E3gwGjUEMxxLzIFbanX?0T?qB!3k!926i*r%{{ifJ4W{oJ8bJW(DV#O!$ zT1>H74A~{7I3=d|Zl+jVhTJzp9+yc*f0{{_l_59HkY{CLhm6^fE{pQYR4_X+?^jH} zAQ{&pR=77XDOmaw|lJH{)+Y5kJ#Rr{kj^ie-$2z zV4EFiY;&%7%fGP(xu=@JS-z(#@wt}xZ!PlbtJ>-?{c8E^9?PAZ8$IX*f`kL87d>nl zKoeo(in!myp_X_EiO*7a@>LlMZ;^2vo=@V2eE1;uY~-1ZoeVtz8$k4qi)lWD?cw2@ zSKq#+ez*QzcmEQeIp^&ArjKjlH<{$iHOZC)BrupBTX<>e7krS1Oi99ah5JBIp35mLAzg zk+K$AvuAaJ!`~*wqkCJwMC1m(cQSNKf#h&xG_eAVx=~e;W3Tu~@Tbn_pS#udX}AJ1YykuRC2f_4ap*x#sz|;o`+i za81CyCM};uO&^d`W!QXozth}F_Yl%)kFbaYe zk*Qx@f*vL~jm~RR?1>_3Q#=x7Ec65%zrk!qZ3@+n+9awWwMk@4W_k*=Y&Y9c6R%oP zlR&khCV^~$F;;MGG8<4Gt?I9iSJhX?KU%NZ@$kO^PPu|*1Y%||jzF+XX7RrUk6XpS z7WnTQlMC0m{_kpUe*j0f+z0gUwl!AP-Y=KBj;n-%FL>QO9x|>iDE$kI<9blkev=$0 z9MY5v4rx+{!*g2!d_DwqOt-!San^^@tuPB6%|Oi%yC^jjBw>1qGB(~O(%C(mHz#`P*`6Qz=P6Vt?GXTf5wRSO7#t2)B#2`QRH73c>}ztt)xXA!7lhCYrvZ!n~!+f@$Qt0iUW;#k!UrD;6E z?Vll*(;5w9Hd>H&@zP>#?TG0}|? z6S$*Af7x*q1aUY>y{!&715vy^M00q|2&CO+3_YyHfaTn^)xD#Qsy2MR=V zeeV=5p`rt0$MwB4nlZ)K^DKC;Q508ZVTn}A^&OTQi`hyW5mC`vl3YAsW<_92ffbi*KzF7!R|Y&%b4bUSd?nf7r~zhA`f2D# z8&4%|IwOR7;GNZLi#OFqL!+vTH&>^R_x5(Y$y`&L7{-S-mSa6X11nM!R}B}Jf%-=w ze_u=y`*Z+uT5S@z(iuUOqIEAQ*v}WC9|mwZaQdvQRSWH|JrOo>Rn%5mB^rO_Bz!xF zZIF1?tW{Ur<72^LTvNRw4c~=szV0V-6MK|?8X}S3mY#q1kUf4 z88lY6-Z!9`7uH==k22obM*xCiibXkve^9?ZEx3%TM|mbh<*0!1+$A;h0v%7`;9$O2 zhLz*I7fJ?Etd%Zv%Sr2ezfn9uT8YQ7BIq#(TQc6cc4L-#0KZ)0xJd#IVH4(40EH56 zroPSXYqBtL+mG2YiE`JP=N4cEv#Algx6h-)D~g^=)D8W?w;y})?!4+$n|T7mf4`Y< z-RGb=?F&;)K(UCD^Vi&K&}_HY3~16;#RtgQ4rrBn7caoy=7oG87rLy}Lw4zGcrPdA zkD530)m`7rQ*YyQ`NbG&{M>-2X|(<@cCeWEy-;<5stgzyf_^3tO`XfstM z&%$|=mLDDx;M8!?aZkWo>d@%Orh2Q^eoY%5?VPYfU4Nk6+ixYpO_pm9J%7*!FBDKe zO97BtbMR+_Y%%sWqrhTjoOCOBf8yv}-rhdh z^1OEE(bu48VB~n4&b3U1;*ibJ%hcA?A-b;fFV0a7iXYjka5Kg$g)qaBWT{y)m-^z| z-7&}2Hj8tPe=@A|l*5^4m2NwDWXW~gk;nJe4Uz^mdxm=ucQo$*bS+hlVH>(p^jC7k zA>!$1EK-E+ZTncNw~Jc`f7kjGbMLTnt}TC1<->#%lXwDN$VbOOtlV-s5w&Kk;^En; zINrs1DR9)(h6g~kfkZ2Etr_2O($NVWyJJVdPYPUk0YXXbwcO%)pQJg0V1YC2c@Ry` z0jmM-^%tQq?i5Ifr*|j=q^Fxpp#x@98IU5C4`oJXJmNRyou-TEe-p@JYidWAu$v0N z!8Y?CrP-8c;^)iBB0@W7vo<+@0;f|2AckQLxi2vz;$bYRb1P#tdpk)JxQ{YRComE= zrxL7TUA)Au5(x~S6so7<^7L6JV80H-mdK^su$nQblQ!^>Xjo0*EVV#Sjaj(*jJ4mW zU&w0-Vo^FJs&IHzei+~I zS~-};8_blFdgXoJbk_ZgR#6dYWV%XA%ss~|tThKK7gxum!5 zb;F+|jnlBP%n%@#d|K*|TJW5vrs3+0Ki7qda}EOwe;ggPx{f@qW!w&rQWN$Ekxuic z*+n!b6xe4;@C=^g-AG`{#)Ki8nvk}UDp z+;Rr?u|Pe~mZ#Wpl9VkMda|)oi?GgG4L>&;j;^}8mP5$nN4CRG zC@&Higk^_ed*D@~vQGybAl>CS>is*zv$F>^kl>;A>iA&IlI?B#K?#srPf2RK2?wcu zFkUd4kfhQ>N?d=N%=|01->CKH_SaEODv@x3`9~=4PQ~`5mZe!cT ze{k=I8{F-ifH9(XZokd}Q-gT#8;~E$CnkSIJML?J|3M(C# zebV9TKiDt`vQORZ?FX9PFAXAualb;kw;@8cw}-;KuX}T|HFvhRhlF#_PI$z@Py$wO zZ=XnbBOM1{wF=h8wGOgRhKzi#g|$die~8`oPAonHg3)1zRO!3HJ3=r-P)3J+C@x|~ zo1TI$9ClDj>%B5|)I&K=E-&55;jHfW{A5_3=@}7C)&x zG4jcI<{ryKKvApNThc{wYA6?bx~z&2KupHuCFS%ue1vde`LPb z;r4R`fetXiFjcy8Zc*zO621{uMiq8v4%$g2HPVfhx4$zZHTcrlfFe^Vu&5_~Db47l zMI#v#IK?-WmDlb)dGghceYsmiu^~?H!mFw8@W>iyPdj`1!%pJ3-QLcO4ZXdc89Vgy zw!UO`F4&3JezJ4Oj=lEY&LKPUf3%bAgPkQx1^+bhI{nk+Ak+BN)5JMVp2GG&YI^P7 zzQd;6l69VD?h%W!qe(Um12;=Xr^$Ftr^W|6rGG z4*U?S@t8@CTa%DI^C&ljzZAs$ndf<1oxw}<5jFe)VZ8}AHgQ}` z)h(nFk;TwFv7doGR>y%cf5`{!7hHYkr8K-2GWWN&_SY}DWdAhee`^Z1{x>&;AeWP^Z$qz?c);JmTr0p$ zZL#kP?FFTR$e+Y5f7LlYMg8x>(zEH2!(?dH8`!)$p5ElX4OJwe0(%y|^@et9zT4w4 zyP_pR;Cu z5t<8wM(g`fcx)=|pN{Zky#24nh24-`DQ9 zS+i$Qd%*iXgp-ebUlMJ_g{&b+&$9*304x+{vPh7^5;k|(?IjS zEmRk!D$^)nX)<=!?9I@-_Fao$2^$uRNfEbUv4v}q-nX@8-xvApf}^mq8dn~WpE+xm zFIwmE64hzbjR|LQOP~456KGnI{dnju_0tR1byfeufvW#anJ8xf51(?%#Unu+pDH$<>|a(u@kTBu#3@! z=FF}(2f?s3EOy-Tx=L^R$UEsD9US+MnoZ|obfgL(MU8=d+4SBFM;L`cV|EAvg{EgE z;EK!wwIcALU}V;7=gBd?nrUfBuBAa^Q66v-n{YQ^m+gEJ%>agWca#NVu6!|cwzsRZ ze*xXhEH)I`*40-=)oZmYKIp9X1DLB%Ubp}0;DeZ^PtB(Dpy_@3o!$T(cSBdeQL0tb z1G#D*$i@&mv!OW?G!3p!a6ZYW;Mo0^+PF{gjaTB^%fKt}rh?$EdwVIUChFdn4jqd8m7e9PrieQp~6`9>^u^idD^z5`B@rt5~Fqbj4TbJGrn|ATNWhTSeBCF3)`I zuz9VP!b%i}H&>0s0a4Pw54trq3D;s(^^|;uIc&aW-}DMcTQ1&dD&AVQx4&Bge}J}j z^i?FE;~x%FjdKM9na$^2QOED5DD)B3wDk2wn%gFCtg|70de{66_5j7j#GSY z+3HjX-eUa?CLQ$nI|G-)<;C|bI^Whz3f&Z1QnlDT*Tm+d@fuC9MxF8YcA}pO%Fmai zS991auOT`%zb9RjSo^tOV2Mg^ax zix(4rYRefvQ7t+4y8Yt=Az$tvH@%+26$JC#J^?*9DZF;r2M9b85^UXtS^opoC0K(g zlJ83=kYyvR{fU$*NlnBDGH|yCBj_JIsCxu}H)N31PZ|qwdTljrD-;4`f2-*?E!H?) z=8f|d{Ej_v*a>+7SI6ju*Xdsz1pQ|7f_gdhtOXoqo&E*-&Fl__o)uf}1&Ej~x3}#> zIJ=|Kg>U+tzhu9nw*a++XhlOnls)c?4|Fi=7aTg5- zvSQA;bTAo^S7^7w?Hs8We}ehjY%*7?L!v)`J6Zzu5HC1Z>@So**Z|x#$fww<@dw9)raH*GMM3T1`^DO+pX}KrUx1Soe`_v(oyuR&<*!Tm z>z({XKXz2!9hG@U<=#=*cU1nJ3eN;Z-vf=@yQb9ShKpC@5;lTkjoGOT86gP5m_3&v z0s~=fjoGCPy+PamF?*+_E8st`M{;ll?ms;AGYiBGi3dJrBd_vhf2&JRIHRg{nXHLg ztHR-J@ma0~2<8G`e`0o|{-ZP|jm`+;-_WLrjWSXU@L!*?f9?UOu%1p^vGgI{ksOu+gu6X0+PxI_l}V{K+v6L=B;mv)EZ=?>{PNRA zwVZ2<)0zz_;WTuuwpBrjzG#~GpwQnP@dKWk_|0p>EVafrEgo&557 zh5Lf|rOoYRqg(GUoYnA$rLdYL4FHVuC)9wf z8uYS@e@73X1;8yG2C@+*m(e1{GuU#ZVtLm)?iqUVDv=Ij1-gsm0x}71QIfAP_PC3} zKQaRX=Tb2))$i6?GaJxK^wq53H8t1mraDgMq)qhQE}w3cBu8>S5ozsFp9!YuxZ9pi zvTs(&w~KThE^^S;Dm3jRZDdP-sy7W-om6;{fA+oOF8U{$z=#C^e}H2*T?_Zp!mDCt zi`uWPX4XqmnydAtxdY+2MkU`>cTyILwe=oWeOvURFJQ~onyaJ{;`lWwSG_`--lb+!@Vl+r2 z+GmT2f3lpRpAGstBCn_YRHCy8hnIcwGlC>JS&br#%# zw^twA7v4KP@#DZB_^$X64m~~o_aGPqF8{qQwwWL6Q)+V%Cg}{@3|{cqf2tn>!|;5v zfEU;S!cHtefq!7nKz&wz6IM-8dkMy>MSNqf zJHho(r!LtA8}{p#6BtMF7`0&S%ToEta@J+Dr=7utx3{x$FT5u^s|pAf9J;RK+K{xm zm>#?ER*qX8xyP`4r>+NqOu0hYunOE`&LfYiA~PP@y5vY7 z@@eqI(>ktoq<xx-fK%s@~XLIPALhEv4S1xlX_ zA`MzWbAVA1c4qQ85jc*8Bc^{#aVt22Ni8t0IL+y94nbR$vYg#$}1m@Un*e_Y1xK}uN!t5E|GNTI-j z3S1$PT|JJze~+)DFund}6hRJu=40#mMgW6SlRTZvL7pdtwZMST`nM8*g8-Oa#m#z4=}x(cyfGnNmOD8>BZ z1BTh)f@7mMRL3e-paxGS#g~zQe5+O^zc7=JF!5L<<(#8ig6;k;S%iLio&c$C@I}$^ z)7fkShPmro226Bzs`yH?ZMiw5Dmh+&4EUhp!qhKqfAUn~SQZG9fz;SdnIgh&#^knk zeWo@O;thNX-6`WF7Bw*ncfc1#1d^eNKvJ<~{F(**`D?d-ufgbS8Vr1M5u<_oKpPPJ z#xapIR~Q?k?@@9F$}+0w=_QxB`_mLPH7(i}L?NVi4)WwtAyU8sO(D4IO1qP3k}0D8j?i9~95mQ!6AsV?+XciV$F zFcmw|SQmUcbWKWrTzntpQ#Y0)CIqeDbiI+9X#V6O#0tz5BaGRruHoW~GfLnVUO=2^ zPwfEO#u6{#*y51PO+f6hdFzOv(%N`jc^c^Ne-$VeV&V^ZED&AN_9{$TIU%WhDTvT|uGXY%+`DkEoRj-16X6*G>szhBlK=`MYDTm0{-hwfx*o#H+6D z>jg{+BC9v%eL}$m4Le|_KaVHL#bAz*@iT8q7y@kW&D!*)^|T{RH1M#4CxX%5nBD*? ze-vsl+)$g4h8sDLL6sDxIJ@$`>5jO@>;<)aye92_e2%!(5;HyVB~N?@;zsl=;`A&w zDZfVC=8{^?G%A->(EQas$$W{_O-|Fx@RlbuoUm~fRon2E#&>C4f}GN~cT(QoM5&-v zG=A7ksX|0+hc6FN!}_+9OfRqsJFc|*fB3d5Y_xlwckv5bg2GXk9qCdJ_kr)Fa&g~z z?TE85t6e+3Ts!IAPDtEOcUne>-%^qoE01&iP+1Ty=h^bq8rXmeo-OAC6ia5ydGTrt zVPG+lJ1$RhT9M?GlECQFRWb`X_~&yxyd1}+C^MAb_vZM*3VHG>FWCGFh`NdMf2f^t zfa(}3^y#u0W#3$ei-GNL5+I#Li!6tdNe;RZn^%gBF2aTI@7C^C3X@2N#Qe4>5EQ>E ze|N_P0BeQV3qa2Pz1@7L93#2iZAzi^rfy|-?^b&EZgnWsVlD{71coo3Jv4zOrY;ZMbPyKjcZNjF`DXNGPMd{+q|)PMQ5 zT!3UMT}0tvMf5J&QtB& zKUw`m!+>vQ*(k+lO>TKsK^%PRl;X;Q8I9J;7h>&XVc}9mDhA`h)a&$9>1nv13Ujy2 zi$>|Vk51_R5j+EDa0LaPdy@H4l*7!Roq`T&AXs%;PW=7^|GL?lB+CM1lz+#SwGyZf z&?Mm68HL?my;>u=T79R$m?B)OzFlue-zDX>vNm3&XP-O3y;2iO8UycW&K;Bf=z9Gd zw~oY7NQxVVil`y)d_U-ppbE~8pfSdX<+J#ZG*0-}t3du7a_n&Wf^+f~TDX|3zJwN5 z-*t4TXsEzxlh-@}q^<2Ef`1`IJ&9U~Lg>nit2|6HQndZ~+vC@bPy{rh3~*8bCIM6| z80Ov)oHXEY|`jS*QP{>kG~tVqo6W3OimP3=A+4S zxx4Dh+QL?HGoMKm6tWM#16cpLf@Qls9PJezNU{v~ZK~84gbbmA-iD_ez12ndv4xjU^t-OL!|4YjE12Yymeri0l`qY&AKrBeaD0g@zS^~Y^=pL3Q)-iQNDgS9XKB&+&p z@d1`TlJ9oAU5@3$$A9N+_kGsfTv(cHo`>Ro_IUY22?Ds*_e&2o{?0-0e8~EFF9&ip?`d$_w zjm?~+G@j0_H3#3`wXq4e%963 zRt8;$n~7qxw0}icjH=*9is0(q@)26eY!(zvSyEV|Tv{f^rXzqgXVx0bu!SqNul4Y( zI=Rmwdq7m6x$5L7359cb>IGkXOn9C^Ls;qbT^zF-{pSbnB+f0@_g4BLM$TN^*b4%C zWq6X|9$ehC9F?c}Bo^_R5nnzq^fh!5e#9jNZQHPb)_-e`5>bD-^F5Aj;(M3O_k8)5 zzx%89&glL7-SIBG=wnp2Jf&f`JIrs#lPrHF%-!0S<8Ze#o30h5#S2%AOLb{1#q0hw zCNZeZl3%-6c(hBIHi)3Bwea1`wHwvmRZ81>6In!67cMHx35GRnYZXe7@AkIM6=Sqm zmOHIP9Di+)6Wgxmi75|;1H&u~PL;d9q!~HuevGM81$`I8X!F1_N@L^*omW^mZ(e{T zcHzUB7w0UMbVVwxCQ>;yV%g?fa+&Qvk3Y_*7P{grUV~wyP_2S^76ufC1vO-9#x$-R z)2MXczT_5x(D~S@x{z}>N}!C?j1ltPoD3oWsed^ir4DhIO{)-0bk;R)%d>?Kz0MYX z>FQ5{Iz_SR8!R8>FElcyF#L6=#~l3zc^~gC<>wCbX$I+V@s1;gDjIdi=t~|9qGEc* z!6OX=msUR=*co_otiKS!N_&S+48BXE94>;dCvU^wf0#9_X4JGAeDh{xaW-4x;h8bS zgnvm8|3$C2X;Hj|3jk;au!#8Vv-w_jTQgSa;v$p2ED)cDZe@;}eObvt8lt;--ci}b z;ww5W51rQwq8VKqT|NX=*L{PH``vg6QdpU>ED5l2QIBd(D^g=$FTLU{O!Y$7Y z&wi``U@81-gRyz;IBs3ESqdkjM$$7xq5yzYx!k;iSJjp^+Eifb{@fiS&=T+LY%(~Q5-1m^;z86al7k}~a zkpl(f0#~O{buG*_F5`jK>RPVV>a_REN~OXPU)1DhMeQjey#JxHZSkCt-Z<1Y0tWZN z=NR`^J%DHmOC|$PtQL6kL+1CGMh_^c;7z!{A=?6M1IKc$2SEO#Djz1P=INSSil)dU zs+Donm)fuK}u1DG#gU#^x~>v)U?*`oJgprX=J{^X`!Xj zessIL`g-w(UY&X5;UFmq2R;D^`39rz(05OM>!EwMNI{Oe_zmwS;0k$@D1Ring#_`U zMDBNyjpr?2EZ>se1p{}>F?dmjjA8Kz3CF^^JK5eYn{Or#m|-KjEzzqv=tkr$GEK>Z zPDbSPr`|O;)B%3NmC#L5!M@`(I{#)qB=pZN+Z4OknsZW**xQM?ZqIXYC0%OBDQ`%? z8#22`LwXw<@~AZvmT7e9U4OyAOt-fK;ay}??)_%ah6ahlj(>4K7N+xD&7cQGzCLD!9||bX|urHBAvHlIDS9|dWawj&wC!6 zSqP&RCGZUUF*em%fE0s8LRzKxmXP8bBL%oqdKVO`Yv~+btPyxS<$7xn#6>VdrOpTY zi>$_23AA>}XnuQpOMgd6dfsCxB3d{oqub(|vjKvP@hmi?Q_mk*peopFy$R2k@dQ+C z;JYio*__K(Z7P#kxio?Sxw$EXL7;B`#bZ3c9c)z$bVWnpwmA8w59e5C^svLF39@XZSwCvy}ekpBp6`U?Q zcI@_t2Ye-P@qhXdVCw0m@AvTaC;K?#(>xlESxQ_Rh&3x~x`3L9PILfppYuv~&@j4C zT9_La>sAW86z?FY6bfUD@V<5+P8Y6i=j`@c^>*6qM6ND}#e;8qc7VKE&N6nw4jtD% zL=Or?ro>Y6EO6TiJg>z>T%OK>bR9Yl3!F83m6}9{E`OkW8*92nU~=b!{u1sPvNr`W zLV+}df;5!wp|FW?bCLC7cAg#iB6?Lgqc2ek)@@_G;)|TR;BspCNsjFH(>P{P@Rzj-P%i z1O8_W$bXh#=lA2E{DvTJqVrW4oQD|nha%`Qq=5fk1o$YE`teWx2N|%$fWMKYq(Pcv zh%6+X$IA?ZWHD(T%|Z(MA4S+wCi}-CDEk45*RNRV+;kkz`=mA2=!#LW)&Z^Q@YEr! zoMdSgw8;cj*{PHfZzx8#>(i{Qp~i)br1Or=9%p{qONR6`%ise~Rz_ zD1W{opne-n7JkN|;U){;Pc!i|pUBYsT*k|v=iJ3LWOT^Hfv=)iboZ?8}j9t-RJ~ZDhIK7(xw!7Z>myoFf z9)IkC0R$x_nl(sgsH>3WU<1)^y3^D)fM`G2JHxIP^q>&F9L^e6&Y_hoYc38&&L!#Hpc0>$r;%QuG#Rz~H7VN^&gQ)bR8;dbW zy@b10+_F=BAoRCM$ARfB#ENGjhs){7fR^j_%pwi}FiXUbO#VP21^+<3~Ue{rSL>9Q~D}sK&TwYEV(F7?PM~oEMX4OC=6?X^uFj2fh zRI8wm0yZ5x?in}~Umc}B+GXqB0e`2ag^@uPbQ>cr{AWa)wGddaQpR*YSjIcNR-0|A zQt^)+Hi=i0kJ-<$CQgMpylhQFH&O2bv3^;!R(d@YIL2n%toQGK7M*4JWKmabaU`Ao z+lgHQKCIJS2-{gWi?V1okHbOX5vHJ;?6_6VD2+rNr%vlZJ=Ex_&BsPC^nc=hKMhN? zYTV`S9c=nb^Db*B&3ROB&!K4v@F$nU#bh=Q1A2KuQd-3L6E!KEzxYldeo}?tOKmHY zDnaw!PF!tVS&=p-*;>EesL|3U2jG!6D!kkc_sIJ3H~$|JrIOYcxjOhLA&xY`NTJwE z&FGP2E@1Si7_x#Ao$n(~v422Lip(`RpH32dC{RmM1bwkuP2DP$byg9k=nk6Z+M)j~ zm1R4hqj!E?VkBzKPpz36siI+0#TZV1>w`G0AH`_n2-YhanFGr8TN0to(_snM@`PVv zYdXr^Ex58yip)wsYBn7eI*P`6d3rD}TTZ}FMkh#wUg>B!xm_9Pd4FQR6GP;A^I zQR(tm>JLSlsYs2EDOc$!M&t0PSaS(pRxH8G-}(}~{3(`TrLtOSC9*oTwN2!ELCS$b zd+qPZJe!(Zg^65DqAVT95?#1&9os*Q$r%|~JG8sCZ2 zfB_~Y(T}B5xN^^D^nbEuJZQI~yX^gYo!T|3ekd;pK5~WcCjJIh+kTCH-er1rmVwYJ zDrx6M;7=TkO!YYq^mDX70Kma)N&;&;U|tFZgI%_m63($xQ$ zh*d0cD$Icr4i;ywOfa4mSUOzI++3d6CqoSoUsU7mZSby9SbyMT50dgPVFyyL5?w*S z1ht{DL!pLP+rkon#YNMF6vR)^t^r2iPr(1^XbTEo08i6J5H3)O;d^1e5ut^G7X3Dz zf3Iviv`A0_;kpkAH~#?h2Yv*{%~T%hE%-T`T!9n?9>BeLr`zuK_IFAM#K~=H-?G;g z1{*cKM)$uyxPP|-f5zt_xw^8Qn;Yvi2CO-jRiU}g3E3yBkQE4VwY1I=@O2&g~T05Ugf6N8 zv19~3wd1eX>esV3zx#q7hL$|& z=kR09K7S2Mh+AAqGWry9Y-G6t795$1$FxT9yu#S8zgC z2vKHi9;d|QWwQyq*r{We0`6vSyK!FX3~S*r1b_a?JaRoQtzk9a+<@=wsHY1HV0AnF z;2A zJT@QolYg9a3;6C6=1_9xOna!pgswLmADX^0IGCFH;w2Zq zwa~kmk_xyf1Ea(Bh+wUn(u=h8nmKKY;Pb0zDvfvw|~AJlHDX4 z6N(}z2#ElIt41sX*Q_jCo`Pq_kfCqC2my$}B(lV~%)JhGQx{bkQby#2NSas}8%tX8 zq7`M|NBn8Dl3=Tw8+$Ij{k1L&d``BUA1%`P&gE@>zR)#KT7B@U>VLs@vL8V8DIK^_ z{YibrP!DiatwzV(QCZs9?4MX~NRpHe|j8YTnRPJ46Yrc^e_KTfDW*%ErLYgGP+n6o0yfK|zhK(@Fr{MyiXBsK|Ic;;L zl^?vLz|NS39tJEB;UhC##a90?W}~d|G|wsgn2kP+om-S{wz}NaK;`Dwjw7_{*xDl1 z-`Mjwz2HX?x_@@r>fjrn7K}JgRI**BE0SPTAKEI9sVgUKFjLO}E(avc3=Z^^OyXA3 z0RlE7Th5D7a|!vC(Rr77Q3)MgGu+CSrw;!y7cxAmzCYdGo|-gsD!H60!hUm8$f5Nd zA}Se`nt}v-GRQpMXzBrc0%ROmI!R}rfoO5Cy&ae>4u2G1R$Wt2#f{zE6iPCB5>)p@ zRCRMxRg=~e*$b@;zUYDvzH*E%__uYzukJ#jB%=#{br<|QbU~koxwWp*jLKMw7fZ#X zp(CJ;Xi{YBNl+oe9VQy|A~p)T4;tTJ*$yopwIU@#k8((K8YL2)XLiPUMT|(}phX9{ zRh9x8ihsz$SY0o74wngQP}1rP=M$U^sek7h%|ZW4KuC%%lPYX7H4Q;wlqoG7`K{(b z&%2&QuEqPoW{E4brWtCS%TJx9Mk_OrR&2)q`eqTJv{I(O-vmV!6C^Y%_)Of~h?YzF z$Cl?k=#3Iw+~Zs4h2*oUXVQY=jy;n;oi_BWqJIxY55#!KN*WExq$~76)e95B5P>rQ zcX!ljeLd!Awp5c-nJ3{HC=GF}AOi`jt4MkBxMpq=mk zl*NK&M!(RY4kR@qLi<>=DP(I{?)4hd@gM)la*a|Y87vvI zQ7r{^q)Za}bXMD^O%i_(gWEpr`7b$(& z<`WPl-kr|o-~d34mmULZ@Y zC8C*R6J(K9g%Z2eo0gHfrBO+;#w5EN1d>IlK!5_kTExN}bxXEZ@B1pRl4W~r$$xg+ zlDy~~TRxsMhJH&{R!_yWCBPmtZrE|Z)VPw7FHlLGBPtVa*vD{NOy0G?9^!u ztXjgbGa8N<{2A1)XIn}mJ{a~Vgb&^RBt0x1iLhXlgoU993&SK#jJ`?wh~AlIG>nN1 zQz8>fgiS0G(KA~KvK>>mQ|hy+2!Dg+5Pew7>0&TEL*Z+25XmzdjH-zvNjSU(@r482 zJ8`}j7?9%5#ke2C-Jiff>U^+glj)eGNg(g7u`x{sB0D6TSxOjPiEE(h3uEn&WbZc8 z=ZYms&TRE^UYGJ_^@PjHq=)Yg-XWl=6?sVdt11r zEjp5sUS6S3nIj(LJS-cMOH_DZ<9NhXlc<|b-J_N-4;Ku}CAu%!yKUUR=Dqei~J(k3)M2hX%R91>Emtnhn0+pY}V~o5>nk<%AUGTFg?02LQkd z4mtxK;4=fi(GV**@Ua*kf-pa4#-CR=eQCa=RZvr~ho0_AFIYnU0DoF&%DN?*vhJ-o zF0)g6Sc&DcfD&lUJs(P%8exqRr@Betvp>w!4-Pv7WZ5ZZeC8;K7PlbO6bFJ z*ropL+*+ei>(9>JdFP!2LYKC>q(Z89ZdHj&s&_Z5!pxNBAJ*5)P} zs-Kym=T}5;w0}Vfd^!ueVo8r7v3>hYAbLrAkZrU5Ot5K$rf0D%E+-l0sJR6~5|{7V zqGTH^_hpFA$w!H7Vp){3Hlg=cx3RDl?~7Y@2^27KXarT;{1mlX!_9 zyN;JPpfkTh&fkA0!F|;uZUu-?m%3`AG?$|)c^`svN`JWbge!ei6*|`K<{c6}>~c%l z%u>wwj+#3r>bc2b=tahA3QBILj#q z^zyZpSW_WPbhWR1-3R%oA_A1L z5h$*wgn!JR*scMofoCu?Y$)hcLD}&lV}&KwpO->Yi8D^q?uz!%MH;9I8+jj2)CG(2tU}?N?(;2A| zbFCOBmM#e`F?`Y}$otT6;9lNRa5aHvH5TKX0q+)y5r%9s4VBPSrruQ9cD$kCq{th} z)L{m0bkTzbl!@p!o=lh2nBPOLLcSVV}nS$NCT zHZyezMcrcJy>I{{ZUqgOV>{cjfP#e?bm5)VUN5gVs<6upO)+}Tpcf7n@(bD|m!KTA zrVVfkP}93RuY1c$H$1c_T3`|AGy~Zjr+?JQJwi&toRr|qgt{4zY;_-Oa>FXF$M&b- z`jIB8Z7iO-z{=lNp6GtpWR;uea0jYFd!umAE6)K2$JvBIvPv`6&wu1J z@iA!$(SVtrYyIt1wNk3~$YuK>OvX1S+5$tTA^?q01r>E8Ia{2CGoPhAgKA_fLO}pJ zo8#9(E&`hi%BQE{%?0I#g>-D7C$58XVxD3D{J0iHM5KY`|NZ_2G|zm?{43m#Eodfv zF{4rGMC;BQdV^}ZMw@z*w)7TV(SKKHTW`~j-l41dDqYjp=(@g64P9%{)9^FYHI1Hu zA5+(w^ep^Xy4IrS;D^Cd@jU#DbgfPA)U|fRYr42OQ`v?=PT0#1lPkL0i0XFNXs#E; zIvNQc+%dRat#eaEa)p3SM=oa`4W^p$pqQv|$bja+px|dci=7TDT6%Yql7EA>shZzZ zL!v9n#17Ewts%}#VI5dPwjmCV-LBS1kHYQl<64g+gYt2_jy=^~O_GAe(j#PG3ESJ7+uaS-KGAilUF|wlW|3HefpA0F=jcu=B0`uh zj}reH@#(&kkzdFPr4cnhQ-6QKT7h>i+Y=59ZwnOcHwgo|F4d4JhB7fs-%}*Rf?n5& zyD|;BFjAjmBim(G#p5samY0u(M`Si9Rj*2-c=dzJS5NVog;c-+npY(o3M_KL_&}PF zsQl3zWZ)JukTK&d_QuSOy$&lFhHl9eaIkj3LWS+yK^SKxXC$6gqkohvU@y&D=5?gu zjr2&mQn28;B4{)5Y2+f?592PwpCy88rU9xZ#a|%l8>&@}-z}D>23A;gJTR)9ywnd) z;O%_1ADmb^xyVB+HxU8bgt(;xT_IHqwSVC(@g6sYo}NZL`<-zzPD>ztP~Iai}5Bb z2n=|m%tS}Y5A>Rv-i;^_;W@1Cc-s}QO}NYa0QxOh5_G$#_>`4e5yYCss|w%$+wx+l zzxY0L3&vmyP|iml*u}6{S}U5g7%ZG!IDUey_Ew0O_5#G=8Gke`(j14vkR6%F(e;EcQ^A#fwIel7$oQ^}FIHor}@WsQ(j05Vqh_`*y zVbzVL7lkgWZ;ddwDpUs5dlZ1ah6MkP3{>x1Gg(UnNneYdb z@PA6eN4~K$Kbu6nn2ktF^rLLV#}n3`84kHd&Y9tGz9WMF_#1Aa+Va9_B2Ti5MfjM< zH@BJfo}YKiSdM1q9X6JuVb#&0AeF3F%_|84UIq=tntwF8<%$JLCgs8lx!dxBgQa8% z?F1OoKgMB77+g*9*ItG7Un}yYbw2q=N7VZ9t1R&U4?%1|S__ zv${N|Iaq*>q^3A2T)S$MSyWnpXP=7;aO2tM#Fznjh!TADh|Id&r|i6@u}PF#o|-r6 zD{KV7JT@fJKddA6*kf^X^Tlef?Ib?XtzR zzMj(h#?iFo%EE4FA<1u~B)@el#p6bb=N2193u%5UrTOjmL36U0;%gNyW@l|1)Kc1m@=+9DplytVisDFRsGYjLS;uQy&JL0&QYu+>- zS(+`)jFPb>sZ1FS{|FjZO2av#9L`bYU=ocZ%HbSUj!{Zu*YeOY@x0&PHGKTycEsk5 zX@3~awe~cb+;6-U*!rD5rMFD|}VkBUjDIhpai zP$7OE$!P)faCkD|xG2skbd?2(7a~lIX%KoPWJ@E) zp0~4*l;wr&@dEj93~zXfp+0kRp+0kl=*3VDe6TEVe_xp+t|qC7&~U=11OBJq_lG{rSK=$m1heGJ05MIzOlCXD$H?(KKl$QkEb-UGo56HAPLtw_ zQG5*TZC`@Zk27`#79UF(vVXxN>iiFdKT`WFxM>=dwL}OEzBmF!42LhX_-KwF$vKi3 z+utwMJD>#kn9DkQSb*@qrE~&sQe11YOUPTtSk}X0s`goG>e;sK=oj*_3Uba&-^iwl2q{m`srpI;ExObmU?r_(_>1bSC8pjZ$y3i6TYr*uv!+KoFU0qsM24 zl_F@x_$)%RfPfEnWPi6Np|O331G<}r!$0srxP&eOD9PZ?E3kS~BN`T z2I(K&0omtoCWJi=xPZavjpX)g+hU_QS>_}yIhLLX*N@qCe0%^^2Dfu?GHb)4bpmQr z(FY+H^J6LsmcA$~+iYlS(q`i*xN=-m6Vh>|r=B&>lp3%*lYiN9E$v+IzZhEcW*zY&X$C9`x2$20d+1tFN9YiR`o?QWOuB>=qXgluhr8i_q$1ib$%D zH>)I?XDo#&r+*_*YfVaxJsHY8$DGpL)4ey(_}+xd>IcdA-fYI>bRbH`f;)I%3dP8G zNZ2^AD6sd=G;o}j&q~P&3;Zlh_18_Y10V^HYlcQ_gGIKS)S*|y)Zh4-Wx~93_c$QxdC!@XyeUrbp zeOwNF?u+6ePYj|W^h*LYgZt{QF!XE&7v|m2_V^7!Iy=csPv@&_iIQw*h7*(&LryLs z<@4uJX(riHXW~<&^Eo8^nAq+tQ;=dgQk1h<6n~763y~q8#>z!lz^b@| z(yRIx)8(RLqnRb?UO~3xbYkuRr@(ZjoFevveMAb|9i^8e?qjjX?9vpr`&hpZZa&jp zmVfUZodghaX?6|&W|c`DUqUIPT+$R2oN4^P^POwin=I1mM1&!3`Y44Hw~KWG;=fh`uA}zm~yW zOy7~{OKF0!=Z6#9DoqndM)Mob{vekI41a1l4s#YKj4>ef%TnsuFks=LY9+DJ{KoUo z%BiMDrkbXyo|EN>(<@gZx3yH5zXI8h3c_IsE={ffBsF{CmcNHXW;^6B`};e?-XokQni<#lB#wT z(Xx8=`_Y1$z5H}ivzMRZH4`?SdDzpuT2h!;mU!|NUGA7=p3xG_9FU>LS);D4v>9KU zE(~aBxsb+c6GQra&adX?o#B(i9H2#os zgclSCv5BGZAV`aUKBvdyPZCYye*b)y7k8&u6!0Q~(*r2EyBf!hoV#IYj8W2on7C=yf{HVBeb# z@N!+V9Z9ZcCgwh4QHFrjOs>U+ba`Ned1*{L;l)g%H2nUFZTeZE zxj$WRJF{TI zOZ;{=31lV@+)6^Gh9$JGI4DUa+W8kcYD^AgQgFI&4rbFhVw#C~$jCIrL(=Ug3ypY) z+ckXrjwAP&4O*=bwU;Qye>^w7Nn@zLJrVL>x@|E0?S7w5Y&Vm_*uQBCV_%Yu?8YQH z!;urtr??^EyTf#B8Grhc9w2yJp2ss4-1i+OdBlsO6z>oa;`u9*=oL|Al7$p!LktbU>zpW()5X{Y)vr#5=xH!%HEFI3&NcR;ogGqpc`hcSr3Uy&0JW& z4>d@_WSa*Y{DIn}dup$HY7Vx<_vANS zX{erLk}my&0{yut4{o7FaZ`LKoy)aJ0aVfSd#B&8iO*2fh zSjO`O#D=*#T3^o~hRkq^m04dIjYjPPVzDyK4zsKxVzDv}&C*P*fOxJ1(O6$EQY==c z*;vz7OMe8SSes#IG#Zv*mb8sAYOZNT%wm03SIkja?j@}=GKTGyB4$bRSnJJJRx-9d z$2wioWvqs=UO+rkf;b$mY6ZlzC5Y>0t4Og}8`fC0v_=83SQ%rrZ8X~XZe3Cr(^${d z#b>2eSzB3a=J85uvff#1TY0>aima_ztwO%VdVdVfW=G5BJ28CEbIY1#Xt72KYFTa8 zn`Wm3wOAo$wMRx7YOzkH(Q2)lC8)(}t+PgJg_Vqe#cElWwVE5SIBKz4W@orQGD}d4 z)iR8=a;n8@t**6KjA03Cv0Ckw)^K$#%K_UBOWLYET4zPXGbQXFHLXzrv80_EtZkZw zd4H!Q>&|F(v|2zc5t7D88#N1vC0Vyvn{^6^MNV2BYt1qWh$T{Q44GNX`p)8rxwdAl zw+e`N6=$<%r`=gEAl_X-YzSqDJvnDSu*efB(M-`2Gbz2q!aTa%6J{@^9c5~fgt->jLzjue z?ypA%VE-440##CEP=^E`tP2vQ)_;w2jgT>-Y1bXkjN3utjiXYxj(e1fO$kw>TwS?A zy^V}n&t&_Yv4^A(HQz|w2$S$yFBLg4hTB8;s<1~+O!V2p%wCyoFDIKn^=Qy-P_T<* zdC*v1f#oi5ZV!B(#s!@dwOS1l15s&2M6k(gH8{usGn+Zx@bmkjZzvG~FMm$O)Ft+y zBom{;8YWsqg?ApYA*0yt4OJ4<+eoz%E-%}gVb2`s1XXBngG`a%%gg9*wE`t<93BR- zK{In8>IS~gM42EIt5mI;)RgV4&fR)2#kWZ{lbY&=Xd|qZ*=wjoh8%6+EI*W*f+ZDpidxR$oNx)5YCT6kR)O1u>&9}@$ zW2W$EU~*!sIzaPE?rkBrV~u7>GbV=ucxVw@MzXmhd2UM4mW?OM1`Q<>V_>YS=M0W@ zyPV_s*5_tXAyeMg*}~XiprDN@v^S1I6~#pfpl5(wo41D>X0@sc$A35ZzVAXKm{n~M zpBp7D6+|Y?T<7C?Odig%lUp^mMVbvhl{FtbG4{Cu#60IcPnj6*fs&*SbOkR&?2svr z=b_6(moW>hYT2&oqY2T3oFZ}nCmN|;!(@QS_h(z!>Z4BAO-wIt!g$NAK3?8tQk{jn z+1Q3!2xoyamnDWAIe$x*mj%Z1vYi@&1FKzM;|6PtA8<9s+YOKi#Bx&RJgTM*3f?%O zpclPRF%)iWEHNg>hX5#86h)5C$5w+m)n z<(XEVOOxYX*?-0;njl4frqAza80B1&ieg-VM#&qf6>C0j2zOH|Q+)0mTftTbs_;G( zWe{?cd46$+(wFBhy2{ESp0Z21a;zcq#VPW|$uq=}Y_e+9$=PJpl0|`|W*R@&7vuQ` z95G4kI~~AV#@qmvWpDBv7@Y8d0X4gO%FY>3i?Rb%mVcadTN1`zj9`39mY00tm74oY ze6pv@Zpkt`&Qr0WNAO#xwFIR|u=HRG&YInBm&A`%$tGXKt2)n&V>sO|_%XrZTFi;u zBw=F29QyPzTsoywnUlI5IDY)(Hmou?0yza(SkzRWZbPkBJ!!-eyCWyJJE9hd+qi4; z{l!MXSbrgJ;;|~>Sm^=;huLX2UWQo7<%k!>tv8K@!->)<;HbWHczCy6W;9wn;$9Gi zHbG0G9&47^m(Cg?K0;<ac@jcZTcxc2moYoEAr z?UOgIJ#*vQvp23icjMY;Z(RG_jcZ?e@A~&{y#Biz*S>t?+E;E|`|6ErU%PSb^Ea-2 z{k`kozj5swH?Dp2z3V@C@A?mKT>HX%*I$0``YZ2U|IvHbUw!ZTj|V5IH|yR%e2kf4 zeShRL_5`cU$&G8z6P;8EB{#0UKy*?==tZKFE<&FoI;kM^X?W*_(33bP#%m=wub4XNgYM5PFX2WF4W;f?G#~K1X!I z5c(3)38?zLcl~=rC!phV|@x=L?`e50>M`R{3U`v zB0BlEs|dad;Fl2mF@Vnifva!DY`^;3nC(}87qk89?_;)K{X@+5tFH;R|M5-1^?yJ9 zLNNW0zl?c)^;a>^uU?ONe)ZQe&#(R_=K0k(VxC`pQ}Fz~U;UKm;bg;CE6v`)({}@BJDW`R*GeQt1>NqyF_O(dXQ2Z@+&VcLE8ZVa`GZ z{!st=q=axKMcDR@U5@Z+V@I^xZ^gRHsL4WKI5#p&{;V`E9 z35TB|^q(>K8AAVpWqbw;g2|pI^e?|f0Mozmlum|#_|7vrF?rB)0KEe6JD&pZMMB@f zuxo_A^8x_R5c;QA5R1@vKBE&0aNc|41%RFb=yL$QLg=4f)d@ou{RjYH(N6$)7K;0O zfPgVy0N`mz_Z5VAF@L_q;U@`w2SH%i8{Y!x6@cIQ27thr9|M4lc^e^2{@be@2FCpM z6C8%TFo$Od{bwxMD?sxlos5uI*LAYZ^ZqVC&{Qu0@JT}d@(KXJiEjY_oIo7l-1h-M zO0Vc-40ZeGmjD3l9|8bnd*k~6A#+{@2$=IvI{gdmpbvV zp1%M9lKmP0V1Lw45J2&~4&YNj;Zq3lyg$QX!2Kr?~*A%FfM0GRYA00Gwb0RW!ArIQ)*{7nFW=f490#-^_#fbIBA4n9ffU%t%2rwRSb z*E#qip?`iAKx`DGgN^d%p8$y5|1CmTn12EgIQ~Tdfa4f>jnF^;9DwHu{qySpAPax5 zlU=0y)_+qv+2cdmRe*3H`vgEh^g008wVwe9y3bo*1?YKzKBbd=K9sxw5R6K{*U15I z*-z@)fX{Dz4j`z~?*M=d zuj=G9^7%;sAit*p06u>k0hH~h0E8}#SkQ&90e^sVz|RokEO?E>$b?^U80LUa0SGL` zR9N00AjDI>%wbIRV-7J42fT#s0054}-vbDl^CN(OIiJ$WJ;1T_M*u*nehmP$-J3eO5A*mv01)+C0I)KD(8&cp zVm%2Ej4)5>LbmLKyoU05QSK2w~ti0mL}~qao4FGVwM+lk{QpXCrDy#0Cc0r|fDIsAZp-{wf4B=p~2)XBSV zb82rO2-H5MlYjdJ4}1$j2z&t{$l^5)@hrY4J}`^#;s<8&9RwkZ|F1*++jE@S?+}Dq zd|D^({*Y7nF@g~I6NI1<-hLH9r1T>Mfk~gy$-A%eL~kMZB#E_RmSzjQgcg64Rkrb6 z*vhcLmH0)63kfpG9zB zCRA&{Z%m?05f-lA)64O&N^Yadwin_@J~7^v%c1IqEA&&uEm|c}a)|X_Qf|O;X(SS6 zj(SrRagbig1xx%YI>%9STwi~z+#~b7p23RHAxi$uhO>+Cf6*~CU%i*C57v!-|JiITz z+wSp@fehE-f`4B{rI8I;Hv%eiI`k$L7c=&-%X@XWYk;EL;Ncvc$*XW-Zo4xUr5ZvT zcC4$Dxt7a#yc5+PdH#RoK6~4MOIGx_jiKD%`8`lHaG(tlQ6Vhjth}ayvKM!qDeHT{ zjaSbZNaY^c`}{WCJyxO^66KQ8q2h?HhGNf$ zDt=#k%vm&jA%}kybodFgm(Bf5r}|Z(0W>kfhrwJ1IN4>^*@=}ETD)d&Y}l#D+SRHm z?zBZ)KrPUPJvIgJI~31x!=62e;?hgl<*nvMG?5xH5#-P?RL_p+=tc}D7EYz-43cYA zFG>fAoR&|@MPh{BgwQZ`pPd;6(tGe^prlSRPL?&4R~Ua2Q@y=JWBKq<4ono6dAEnV z{G<;|#r1d4Eg`ev);ZJ-s9QIjJ>wv_TN1(Uu|}djm?N7$O(kksQdF!H#7lB6)quGV z;$j7*fICtgd0>GRbr3;gPUMR!hPj1OQO)rZXNaAX@N?d5=r9Fd7W=%DagD{UG&E=|uL?*Yrui9{xgwj5i_K5hp1a8OSX2umElkj$< zuyo}GN|YDO?8vhy#U--5Y)R^YH{~@9bhkKWryQI0JmE5Q&Y!J5rg7CQq54RtuvURu zPI+0xN=iytsE*Joo^%WK!a>xV^(KSE!#JY}tWtl*V$m_J&s?rqn&vYpW`TMMdqQU@ zULa1@jq#?lfCXargZ1Bwbo-Xjq*YA3cCil+olTJzUfII~{XCD2fPn_=Y^vKdm<@F& z(p^40CNmo@2c^|!JXJYeu2Dq2E-cFioM9RL^PKWJDB~_K!vcekuIO?bEip6eo9 zV2H?Ql*wxp2i9`brlA^T!twPBg|uQynL~e>E*cZ2hZJ+*(<{ZrqwY`?v-@4#LI4MK zFQR4_HFAI@=JK*B#sEK-ha{j9;9HxGypm+f2Z5-JJRkfBi(O}0Nl75xSlhIU(EU>& zU^o!6Asp5>LZCx#`B2R}i#u@T&EZv?T>qS z@E-P^EE@Jz9YGuzib-OT6X{@)jOgQ`w@>tWe)*k87c0eNVJ$S_;t@GKa6svS56O-+ z=k=TcMcd_KSsdwjB!pS}#-7Mo-tTUqMx9u=0t!tvzrAd>7%MNyxi+58XGniJt5gj2 zvh~SnB|%8lnoYw6N76&4c%nxL84ea~QvxCRNAFc&R1b3}YUv{bD6Hq(+i*O?8x&_B z4)KG&o3^dGVX=`hb3#3@U%%sed#)%5u&}f=QTdyt(#4WOHq_h_q0or=IX6CxlOK!m z^9?(vmkZNwI;s&uy^TtC>!E+<^i-XySjgwsMp@0|cn0T_J{%wLVn4{X*}hcj^x!DJ zTA&`3C)lV2N0bL1V(~>?H4q-;Ri_(JuiAA=ONno;x?MsR`1eMy^g8Zz)5F=cd)VUT z8O(lBe&{LL9cH?k*pHTLNJC3-Mx4AmBBXCAOZ$*d4He5NO z&i0vE5st2LIb|=X6yNefrQEbqZoH?qQ@`3LFM$hmqc(!9APeLz&NL3ZeyBR|5-QzX z*Kmqou*G)7u$~UN>4U}YF)djQ32DtgSv(eI4|xl(7w2SCX0;?)ColDC)xoB6qOz$k z9rmp%{NJzltrLg%t15pdkd5;OZAQw8rIVCT7plIn?J%B(Dm~_AnhZ+GnVNZBDw?HB z8n6+w;nqNK`(y>nrYa{YeXCm8)Om?Ev1C;U=0$5p>d9gYI}2JEinpMFz3!YpAJMB7 zr>707POmvo>CCl5-RZRk5fXNr6>pKxdqf4|ULC;f2~a#gA%A~zaG%pU>-EIiBK0#wLH3DsKJbL}G1KN6}nd1V*2AI$Rd@ywAquG~tF zE$|h;s1FN~NS+b&`<34DrH?NE* zT8%(H$&HhAf#ZK>xlk~BEGAG$#@)}W` zXuBKLs+~Oa>e;X@m)oP$>?$)|ZRhMZt-!Dx~qA5s%wK%?tx($w?0k{IMxU&`Y@TK2>K*nCx zi}Q_F6hgW(Py9uPhDM4HDtE!;&G;{miA^Cw)ss}n-2_NYWQ?m z!S{UR1i=vk8n0^$Jm5t9?)vuT;h~h|SU16{X}N!y^$IVRVgv#6@epHV*bcLlTJ0lB z$v@dm_1@SB#j||Vm<8}o8MfKO+4hxmothSr*0f~N2DxS#k7Iz1kJ@3u_K246m+-*zEa77mpL_4utbPV`-3!+Mjv^E%P-^D(p<&?W!;8zSi8AySS{gS_^`Q9QcG zO0KzG^;BY|PG~E?ueBO?qmm(lJ!}-1)rNm@4m`)2_KO zj^`$RNtTr>SD+Ue< z%54g%?s4s@*n}#klBD+V5JS={n%(@l6eop*lv%n)fL0N@T0p2u#QA!tZX{gd#W;GQ z6{^%RhRo4-sU2*EKFm@F`Y{c@;5L8g0IW{oy-JY0><^--r~uRLk%*fWkC&L@Befk( zC|M>0=e`5y&)Ra@=VQKnmz4eK)1E)=HrAKy*knPt{3xo_P^89}xzv8L`$&OIK z!qZ{s+rwGNDkQW+FfSX{Lyvcw3Yt*PBH^(-q5P~&ha<&Q(L|+AL_c0$e{A#fuquhQ z7M&%qWt;jR>k8n5&f9fo%;-28a*V6ssF=Pw1HZ?%HMBw z=Mnw8ExZPgZ`0Ui@cMtv5yCDzJpB8uILlGG*|XKTKdQKtFo#WW3>>AmhF+XL=$`g{ zPRHw#os~E*y(;ut7jXyvPi54|%}X)Uhw zd?y)&6ZsWQrITQ4xP+zxsn1i1%nuae+AZ$hl~C&6hlhXUxM?;TjRq{PV6@Oh+0UvG zHOIDN`OMXO8I3l6V7eO(qCpgi@p&hmt(TW8JKaZjDm!XDCZGy&+3HS`uw)almkbwD zq}Y*fQ4Q=7?WojWiNKIoJOYH56GielY`uJa4vl=L; z?NCbKWD|erb_oGD33~*qZ>9u;1UzSihHgfYG*VTpSay>QMzJ}umo9A1?1)0MW5_^m zra>eQ;(#Nu(jrea!Ai^=M>HQH)PmB^$Giy8Yw7rGLTWsSBCzC?ay(X%AI?T2=2xUB zu)oCXC{6`^CYIFW?oA9-f`yrbDWl^$6tHWhMD2e#KPYBBX8t{HU|a0)u#zbXU4)3f z1`?8v+!&dHrf1-nDt&*m?;iGj^)LmOxP2XPA8?cUOnH68d8i#$ER{~{$n)-4bc0EE z$_tzD8!or_=uDFVnM&OuuY?+R!f|v&-Nl(`Qhg7qUz86%;aor|DVGf{p5q7Jk9>-0 zY!H9RLd9i@E)b(&Ve@=YzSBIvk78(AATwc@>A53&oAZs60%aoE5DFN@NJJ&vD<8RXtEhiuiBQz8*27JqeL(vCrCe6iv2O(2=tVB= zk=YpG8PL6gJ!V;sm{ByKLWL^e1GnzdRB++)X+s*0P{E%{(&TYTI6NeTPP=9u%q52G zA@qvpgQ!fpWQo8iz>Kg0B*8LcWwufoQ?f+Vsz#ftO82^^=sQUz#6oSiJFQQ=T~>dY z2sLOJ&f@%cipef;KR>(GIGtICwiF5JoL3!MXvjvM&-7i&T4=zb8~;JR!IdS4wqD-3d4iyh6>3!f2K0?$Lhw(-X*2) z_ucp;+=8q9lb3phz>^{~weeF)n}99T~gT z-QkO%4XX}K`+x^l$2G{iVWomk0Ry1VLFjVYfZehV7q^y_t=X8X&d8p@gh!3AICs9f52z(SZEXU0(dpIm~R`7 z{RBgW0eY;V3L<&~`3_o-v7+XNG^SrjHleK8nQ0*mmw*c+w#0QFZ1gG@q`AYA^10CM zc3Yjz3QG0XPdl?#8Wl;=2?C)d$@ z0XLL36r}kL3K>G~5h<9IHbwolL{Zy&B!4XVpyW?PW;Dn5n>D zl;xeCrMubWyJ_-@w~2oZw?kzyreK*--^r%hDPiH}Stu-LV_A_)niTgZM5SBV2HLun z2Flae!Dh&^eoG>H_6XL)jxtlfiDl9r@K z={TLV&xSN_1WCR@ETW+CA;}>$<9XML`*_TXAnouO3{RDJC@J|w0vzLsgPvV?y}gQB zhopl|7jlGbr8<8&$+FtLrDb*c;+#L1m!>}Iv%qt9neJVtQ=jeH-Yn3A%hDwH5b7{X z6CIo=(tzi0b{o{9Nk-ERX^`AX>gUMfhVQ=YgH8+oHseBalA)YxuOX+5dCMqlS6i?cK#!KGX3U#}! z*hfc&_1u3!eg!5tdw7_S6m7P=Tw%TNsAOSTGOoE~cTq{{_7II#v!0zn&GLGlp_D{E zU|5(F&6KYg?N+-@8rHha*gJ#BzoyZ!TE$XyLr~C;Eoy9nap!P(SuWMFZ0%6>*j9CX zofAS`?L~~>d~!-k$eRl?bE-`Vi@8>p_3WA!e^GxHpN~4#Cber$t*Jtd5TZ6D2h|{6 z`s8$#Nhd8R0de>-F5mpf=r|?K21^58!@KNN1XEs-alxSczv(4C}sQK z*JlB{z$RX$qIORmKq}f}=|$sR^~Zw!%9(#PI`9jO4or_!7scm%Ej@ z3uhuMk1zc4dNdH*bGKWmcwHtYGOy5hP9?L@oP=CrnkM88hYi+)%5k1;g+|C^`!AxZ zTy`?G4XNjzb!>A-2OiH44nS^g@O2PT&%NNy0(KuWb{R(7Kr^k0yGhP^Q=3`Gh)({` zOJ8_FIqOXiNTf<*)AoXHb-}B!vTJ7H9Hw~1dMuQ0lXq>k^ zgD7)fcaRy?dxH(uWvN_OK<4{Fe`|1J^U9SghgW)+u0RjC0&fs1Hg(kQsY)2Tm!MZ} zzy0j}=gwa~bKm`E@4dV1%VgJI>MzCl0esl+_j^e|zdwM0 zN*!Sp%3ya-Rm7!%jrreq`l0iWUS68JxvXg>qy7>ed{))|Qd*Idm-@kp^i@q&-RuV^ z;^Rt3-L&<;F{{mP-@->5aQd2yj>*7t9`)3zv@~uHw^{eNkoTaFO5%TWA!%4hx{DJ9 z>kj!}X@#9>e$n1r{UC1d`iadeDz-u;4)0y+_Xld+7SI?fw8qKpq)C%)n-dBE2i1K1be6;e zDA9dXtIQ|Cw$7++=^=j&Oy6^yhrBah7%LeL~Qp|KWhmn|Efxh_f{M`wRY#G#xDf3Zt@3C2K8 zjv-M?#G7r8Lw!Q+;GwbY?g0PxqFnW0?ZNJ3J)vf_7F;OiqIx^3bj-1UW6Oni#-jznDI5ihI+9J0ZF2h zX)3zKQ$P+4nn{1Qyj&64W)e$jMQ+J6t+*OH38S`TMxBi!zLeuH^<$4qOv? z=z$L!*l*Yt6&4L4+(j4mQD#C8ii-~-R6tSo(+T8=rHVN=LR^OcP{TzS(bG1Y1>j$b zM}ktRi0Z~sg_TLLU`2!!hO;4P`%T2^ounD_m>Ouj)>?m!Mxh9DK0WhTe3B%xysLhv zZ)~Hbfq9|Uuog3XW)M2JY+onFEcA%921*c=7^n?B-(o(ql;JinlGbQoEdLVit?bo$ zrs0^C^~UaAZH-OTf#PiId$sjWe7SAH@@Nbizxq!1lvUZOhlan+LVmpm>$-Spa?$<%L1=F|lCHsOmQ(oQS3@21)rQN%@Jz zLc?}~Y!OU2)0~2J93d0UMnj1MYlaU>q?2K7g;0NQ%3OVKc{!*DV{h*vJ|x@&Zy>fo z4Z7GbgV|(a_y^_r)r3lElFD9Q-rJ0e8ndcEl82MsnmqzvDS?>0@I2-S#s*W{b(t^m zRtOo_X`AoD`qVU>(~iCE>SSVDmcvLSNtaw25|4pgRP+Tn$ROnb-!oYdU|Z4!BUZ{4 z(FcD?eq;0xS~_t}fYczNCPz&%BEqYB(N1U+^8w~>;S?%KL%s}_?@eL&QevPs_I8<{ zeumyu^rl?9B-BD<2p1^(dV|6+mu>;%s?Pkt^J`NN@6nWz=Y}{3PD9R&Y)u)nbcN7yy=;9@54}6>eP(}EG?fx1JQ7gMU{=bUYADc4Yo6$B`Drhyf=b1$rKRT=qtXm^A`B_N1)=xA*AVlZq-C6*)X~D`PePNMHe*&W7-Y=_1>Mro6|hsC1|6 zZ3M`3c6ex2uAGGB_{pXDZgd51K;gj7*fZ=<2`jtQ!@bA^_acXfL1m|~BROIvo{T28 zr4#XTAgDQZ5N4*mk;C>C_^E%Ho&y$>_7o6ZxYwv1)cC5+XCvLb`TmAXLQH_lW+%2uh)%Y^;jf{Hf`KtKz8QkiArTnMENzi) zZC|uk8_gVtP|0VqP98$LRtX0y_>i%B9&-;p#jt?CpuPxqb1F&9z$9Cw-R`NNvYXu^ zQO~`X9h~#_T!^0U)_2$eI;qD-TwdPge%bG32Zx7+$YW_NXk@}%2{&x#xRI_3YGJAo zGIc?aSc)@6J(m|#3Fx8Tn34M6GrXl8FlnQ z!|8)`p;*>8`2maRGv*5RCF^M%<+20cI7Z!zfyq7}%%=NgYwNR8?v<@hy2y*}Zwuw? z2R$#WBrib0Z8PV^nS*<*3h_M;OzMgkRTWO?@9SImb_yO+6`FGIGH0H;ar;SblJ^vv zskj~Yii*#iE`NXO7>pSUi2_e(18!pP)CsC{HEXJ);`O{%3-`qMQUg5&81VLW1w6YK zBMy>?&^O$`F+wIGr(r3c@4?z4`3!d0o|FPVQ{=!j93-g8-v`O})Nmm-{WwTIY}Xvu zp76X$9Kwh9THqSf8h?l8vLMivsqgKB;ll%tw(T1htTlgXa3@|%u6vYR?5=Grjtsp* zoab9$&oC*BV+3Pn)t+F!7l*mC3G;2UHnO*8KFfy+ju((Y6+y}eZdGfx8?Z1F!mnd% zd$tvhb)_-gFUAQQbjL@dkrXLH{q13;y{>4jhSFT=C?}PATP>2{drnci<5~$c)pftgj`)H zqG7u=Zon=Z<`@UwEX;>a4cjdQ@>44%()Bo-4+npm4+okL2bvEDn*XGMrm(IVdiy{d zL|cq4LvMdWfLvZ1@h2w|c#dtwL0mqMvo>30!>DY{6d&d&ghbOn%T9$ zaD#tZzkRKCATb5@(>!GtFQz z)8og-`V1D`kf&mKVaTkFn;}GFjgFgi+@j+ZI&RZ(XU=a3P@i5D3q$s@l`!VnqUSS; z@rGbF!E?9gGLa!}^&@xyLU)E14UEYYG!W)FxtMsahuX$;>&^?F>(w4)+cU@T=>>o0 zIv%~?xrXV{vuIVu20$6`7%)MC% zOwwUPFCu3)OmJXWna>$~2FBD=adSLhByGALmlkht@fNox_>#tXvoISMk7ig3QGI6k z=6Fz6d*J=ZW{$NXN-vg0daXI#mx{niNJB~)G0v4F&B%6GZ8n8l=eTT(`41wO3{&Fn zd@j^zbvRG1!NF%VbUbtC@tJ=YGHN;0vS?w%q+_4*)hOkoSS)`L)-zz?yhI`$xP!6g zX@KX@nL{Vcoy{?KK8-^S%LLl84aeJ-k|%xDXV|UcMvk~}a_Nu`T9eLQjGD)5Ax(6w zXNP2F#DJT;l*qBDwj**pKI27>_a{J+>{2xdYL` zb=M0kBCI=eL))p1prZ~{HOZ69C7gxP9z1-gg=02hgL$FP?;3ukRvQ_P15vf1M!g-! z99MYwP*YE8s1{p>Yi56OO~j4v5Jm!r5iSN1s1;@*2x67%7cG_wl-?6w=$KnT`b?Q zHELvodAY^5E3NyC*V0&TW`oPo_Y5q#*=$Fcu&SA(f*fe;ji#oR$ieEcT_Oh!)-Vfl zV6>VovmggX$FP5f!-5=WRQ|Y5#tv$&uX*Y=zLVE zYwMcQY-x4q-p~%6mHHY4w-v1e`~qgLt<+aF#9dd`TJ?W55NF`(s#aeG8qKDrG@Es8 zZKH-lzqVdqYp#4$sW&>>dQ)kxw(F}PztBKhyWVOyA+FU~Q?#{Ky`y1~OkLq)FBU}p z!iG)0u-%d`oG6$DTYX*8d^VB8LRVMn?OY%zZRmDK8?DBACuLNiE1h-D z^Hy_RX{|KtZ45VcWqplDG_*!1jWKj(wZmIOTU$x_>gr0XS=UzA+K{BVvf5Fa?RI^& zp>?o>yS%wp*4MGUG_AeXR+{Z*y|LcWRzWCWoPdA%UN9WShxJZf)7n}R)Y6spW_?BL ztOJdP)>u=t_0_u8T5Y%20m0Oj=4ySdvATw(SZ_mR+nxH_%IbQ%`2k1a|7>a2T6L|3 z1#ij13~iyU)Ylc?V7%5|Lm>pY zU2S*PglIQg^=7BRFWgL#0YH?_L9vL<>);xx(OJ_prQNEptTfts{}-Q&M_mHuHr`LLh-*Yp#4t{BcHLx&C7qS=DiMBZww zb>gr8%pQXEPecx-y;Eo&rNL*Q1CPD1*;{}0)*7F}R~j8{P0>2ddUK_rHP^)voK|l&nz)H;G~u+e z+0p9F^_6C;6)%v|b1?(izq;CNy4HdmnvF)Q(-IvuL52-qyWZBg*tMFCRZVGj zn)UUywf0)Pcv@epuXK=HqoJ)fI!be8wZ7I`TW@M5$8zS)NalK9qpW}HHOe}kcB?~Y zEWH0s2hI%&PffevEJ73 z4860q)@aMOE9)&>3pAUn>+6cvfi?Wv+6UPI{bE_y|UWw z4zTV+SXj*H1eI#eo*b-uo7HQGvZb*YPjVwO zC6L7Ia#eNJcJJ8TCY5upmBSi-izo^sF%=Ss$z~GUS!0OZ1w?-~F+4<@S$_2)#Ls@> zx8H(Sbo~$Agg*)Vlfd`x&dy@!_Ga2t->e8^vTQ)lFebZ@B?Ms%DfCz&5>P#QhKe9d z62E!_k=?!nIYwNOA&Lm{SyBOJf_eZ{tl*UZOd=v?)P^M#Lr4~|f<&++M_G^nCuNAJ z#6?EGz)ML+0)l^lT>*$npct7LCfq_Y6d6S;5WvX9Bx8yq0Fhleq~)v(nT~=G_Ed!c z6D-G&A_)Y@swDfQE(1fiWo157>I~Hm=p!5a5-X#1j2a|V^u*hD5_Kp zCbJs|8xViQ0)~>}2@w@BMic=<1&e-fspOw4i?FItYaaG|{9gRPpTdeHaS{~)RaYTF zl0=E!vgodXDD^Pn20%h0jtK-IMM6tyo-A?&J3)jRE)Z1!swe@)+f#Owrj37ZJSVW4q5J7^dg+iFjk}(MhGGMlSP*PR6MrO&FBngC>7eK04LSZ6fH)+nfs=B1G z@NIup+_Yzlo3E`yaox5)LX$J2;}On)NQQ3g=Zy29ki6Ms-x4EBV&X z2#EX!=9$%DPmYVJ$sGiOh`&Op@qKoN?*V^>!FL}~V626Jx}4A?T_?s+)aH3Ln{3S( zNgyaotEuE>6`aASb(^FL82a4r8|KMsfWF|C4jkA|O=?XYj0|WyF8Q z&=|;sP;mp0B*NZ*o^3Q%Mc?p^(5D+SjN)%yk4O>0+}2i9CCD5i6r?EZ{r4Sjjr7jo zF_^gdOo#srkp~s9QxiD;{wG+L#qhi{VpV0&WDcp3NoSvt#2sB^Q2}Cj&b^@S8S;}MKN<3U3>iHAdYWlkGhH9< z(2LPy8%jaNE}{k?^un~#4=N&^!)21god6+MF^Z}b!+;1(>gri>E1nrJP%(c(%;iO) z)cp?-fc*3#eOVB>agIn8%+m5^tO}Bfxv*T6WV&BTf-I{d^yDcb5mZ)CR94HSEpO}y z;TXgc)u)THy1{e@(Q7p1W{(n3LW|1e`Jy1pL=74R`~U&Op$F26H4acwee%dBkNiqJ z5Nf1SL<^q3!0EARk3RPKzzLZi{1r;mY+5{?6==--66?hybak~|S zpei#VEuj7;zK|C}YU|FgaX_Ic)`VXD0954vW*<}}h>YfBC58nBOIY$*WMvpc`9Tpx zmD3&sszM_3o1}*8Nah?xM5M~$>E#>&2~qhTNHy<@O4aye>GGvgsXTw=KmZ6zI80t} zUj{Dvq1XeQ{v=7H888Bpa_Cv$vA`iOlTTjwH0;af|G6)eb-=f(&nDj*It?iq!ugfl6Ufh@TS8$rGPE@w4+p@C?w#6K(qmAQFE8i;`y-C{e3S2?|hw zlI&{-MJTA4tB8OqDL9gY-$!KcutJ0YC7i?I4_N3K_#r5h7zU~!D@bJn)VL;4Qn;%s z0-{Km&WI3ksa%58z>!0TAuIwELn37bAgv%~zL%6%=HljG=M}9GLE?7_D2ghD=u!kp zB)(UM2n2{kl{3?M3kOqhfPBAH+{Ok&?nSgSHFK2ie37;yi&^T~;yocPI!@8iS}cB|Df zb{R!OBBJ3l?qiAab=J|AaQ>aKP0o)|B4WaAoKemX2yi_ z;c7J27ilSQUVn=sD2T-KktGrnGay+}iM-K67g0k@sDvOOKu~PC`aAsTU?@k#Ig*MM z5POr#D)yhGAmk>f3clAsV2;_=05Mk^1}B*5f|(K)ViK5M=C~kn)1>9(egi)>`29RF z8GRmNJp6x2sGo%TO$ar(TaGlx)y*FO$QMSd8F`j{kel`aNtM+YhLWJjcu7JsJy*C) zR#mVXqc;@=Gn_+@@D-6?ejoyb(jdEsAjyB(Qd19!n|ec0rdJoF2vET)_|;yvBPJ>U z3gQ>$P&M#sZ@#_oK1FlsvNkfk1no^#4i!-+=x@(tmB}YR=*xh-9M30)NI} zs*6EH0RH<_1%k|RW-$_DNTh^^3ZVAEK6!I-9}ofe8$^aL#^P#oYeQGZ0fKvbh*|s76w>wUi;V90t@bcl(=D zr1}>%wDVBWt0)JzACU-%Et!BJM%1V~ygbPmp=Q!t5++LuRp*75hbfN~$PfpBs;YmI z8eJVOO;g#^8xfgFg#(Kdd4X=)0np~>C)$MObHojM#TxrI@lPK4Al%Z7pTEmhypwt_qlN-M@$sfH6oF2oiny97N~`zRfTeBw1AhBo|51 z)>0^ih3E^?07+rFUqr0fI_AW@TbcPu)t^-T525OiUT`v>*o*BO@#+rZc<(XD#X}G&h->44q(aem2T-Wy zQS=}wG4&(Bil88($^iwA0=X#}5;4Vp#5DF9RyZiv#Jj}!{)-Bs{?Rlj8xeo`(dFxN z5aOy8BvZ&n2#}yq^Phkw91|{!!UW0eok}D{h0-tZ1dOPeWPy0mkR*}DxeG5#GKM9p z{oJdmtKVw(wHN$D5``ZLt>Gj#8!-5azrXN^gK(j zBE^tG1cidJr~#Zi$afMTQk;J~3={!*(byS3tAe6>(KWCF6_x>0n-AZNL#Ey&GDoz* zk|Z&N9ToUKLriG}Bo-bJA}GsBl(N``4SEimDpKf;HB4CC%n!-jZcQRcjL;RAe0X4I zvV@7eBCCT{ot)+N9~Ymz_sM&oymuJyg<@jbT5DqNFz=*y2jH0l144hQrP)F|G{NrdXqWg-wH0(ptwO!Z&HdxR?zjaLehpokK$gtN3Ph>*L+ zh}4vpS?L;UqGU|sjY6wwqu;{-- z8NNY4O)q`?qJ!ua0kNsX7?G8gwW4xv76GAlxI`5&R*`~NX4Z;GxOIX|L6IRK0tS-G zg0gzR0hkauBn^L|1kYU)0ab}1dOuY}2t=Q+6dHm`RlGF?B!;PB9*~f%1Z3Grl(C1Q zEvXbPlUj1ape#fL{H7*CNv4?3f(&IM{t}P@6nc|eIAZ{AK#{+SsT^56rW_V!W`=_R zl8F*oJ0{phNKv_3l3sJeOUG;>WfmTDqimcqg+vVmxr3-Git5^bN+x5XB&h6x10_T+ zh>>;V5IUH<3lLQiaby{pU#9Mf5~0+ynyfnPyR9eJ398 z4hJRfwn#lcBm{AQWq>Eip(KN1%#1?B3~q(7j0%>hX9{AD^S^@NM5NP=xrGXZQg{KG zmyN`2522(&8igmge$2l`M0|Z!k*b?R%f~!yu*8v0v5aMZF|>LNK7dNU!irW&6%b;^ zG(eT9b!lWhne&jUa-vZHq^J?$^<;`xpm6sjNmPLvSx#n`!|}{S$k*~40XrZAp}H%Bq|}6K|oMNF@kEQ$SZ>mk7KY1Jc$Dh1C9t3L>a3X z2Ci$6OA7#h5fv)^X7?x&F3|>PR7wtihA|N-n2I2)kg!XGXZR@k!O9VZ6ebLW4T&qB zK>|TsE{qgrwJri$o20JXNktkmPV_hvQotKYAgTmc(gY%is^|BLp$I9oD8-7DY5rH=5jFv3QjAC@W-DhCg(iU;3@xXUw< zff!j7rl6L*utuUpMeTtl`2gpt!$W_2EOvk#?~~U)dF_+eHt<^L?bI@MhBBvJ5kokf z%sgLzQ6qOe8LSLIefa{L1DQ&{F`%yMM8+%Wx91rviVAgPM+7LcvJxy1E)6Br-v=Q< zm5@r{N~(Y)u1{vtdFn0{;2IBHQzT*x(FY|7NV>gmgkI)|B9PTZPN7d_Zk9xKfIuJt zQG&{slPHWmu&6=~K=vI2`#^zSM5+EUJell&GR3@L5fZ~Q$-Hlw={%ySWT9!~aH>3f znzEWhcDGWz2sZ(u2OQ24AvrjwY#ZYrsGsp_LK)pKL})}Y1(vhP{XLj;SjC~K3}1*h z)AbgIwy*R*WDIbKXzh~|J~`p%_uZm? zV?Yt4fW8xPW&^4iL&yUQ@QFU~4H8*khLKC_qQEymjYO7IAFh)K%CDf)B#P&^MdNe( z3&QRuGRJd~0?Wb3o5$5=CcFMxFu*i_E~=MTE8(#Y2W1Y>2-88*rDKaK2bU-OA+R=) z537ZVR7F?iU|Tq!1rFQwKY8Jk7k)lo2=4hqO3*u?XKCap50PF5)Zm`NmlIKueS;T@ zXCQf|l_C<=NDh7zfrLNRXhdQX&f%NH$iRccl$p|s#-@Z~054PGRulk3NrEwdBuj#d zMHK|1iixrpbDAJgj9~((Tvc9%BBA!G3K!V|nT>=#&VZ_jK~q!Jiy$cxYDTX><{c$3 z0|!&X8<7J_BbkC?DHH-2tYo@Ocj}2JML=OBF-3}Fs5ZZVtqUd`Zii}N)$lSgwI6^U z9FKyTAxdzSn5Q?E*g8WRTwIBNEE0QktD@&6A}KTob#Rf`vl}d-RVgH$7$zoP`AEE= zGzy@u$5<8+fh?3-)J2<_fr{L@0wY2}JHu^hKNM@9SVvJIS}(NU{Ir~61UI93W_XbGKNJ# z^rYx2vwsaPWHKfqfhrMs3^gu>4z45H#R%k}(G*4#hnJDD8P%9_TPj4O97V{n#d+C_ zB%vZ%6{+_FL@M?INic5+(Tm|KVTJl$hbe*CsnSJd{X`;#r3w>&0@utFzF~+!fl)eO zcVY|`K2r)pD=IkHl8y7_0qUDi2K{8vPX_%s1`VU-3|-ycA_JtHqe4G`GeuM*vao~? zRhUrJGkHNMh7@Y7qly@>Lf)u6P$*`X5{!ueWhmkR�be6C$erqV-Dza%X|1--74u z2W3cAV30;GCP-R;T^=O?c-Qu@Gzk&bEtt3 zK-vqRVh%^+VXU`D_V~$zpFH@Whg++`Gs2#8XY;P`CHURVnRl{I?fIV5H|4Ml{o6d^Zm2R zAT|t8y{^iNC{qmx^>M~Q%cUg*4P=J00u~{E#|in4wo616M9eoNS*HHe;W=ea3=+Q= zQs`kCu_rX8+;&tFJrFjg{RvK~2Q|iL91|aD3}UzhH&z7|aWMvzsAmPc|?VEC>piwaZShX}(nvgY=2ii)rpp$i_QiCcaFZm7GWim;un zcmx!s_?2+2j4uy(9)2?DCxd=6=tnYW@N{%0Lq~UaJRqse9(Y0V=%okK07Nnf*!2=1 zdsvtR!x$1|1hBZenj*rFXpAwY*0d^rbu}YQ0N~$EA@qWX$bj1L0*SIZ4B)2GEC~4` z8Va~6GQaI$2r3A%=fkKjZOiELoQ5Y^>&!dy=ancifY>%S2#0=@rsLjAqJd$v07uK{S<#`q5f{D&p`7 zkyNIGT1#zlA-uxFl8czxN{bW_NFsQ752Ut!oJAE5Ru^7eFAM=!Nhq|6@X~tUx$0h_ zdiTi6df0t@tCAHOT{y6=Q6u9`+@l=MJ;&JFdyI9!1Mri>J~`}@!vY*8Xr^wvuWiE_ znc5=RwT<@q%xJs4B$f4^)8BD_ZQ@$-j%m%3wlzD~OrtwwJg5)dWXII!amUqc*ZW|& z`oKxHbgGd!f1Wt5(OHBOyV@wpL=vt(cjK&-|H-2|uZvU7{v$N%GbYM7@w0jTq<@W0k-IypIiIUi~5wlVA` zeSe5qoOM3X*5*c9;YM14MuHX4CgO3n6!B&^8Qb4i7%CAT81wZ~FkA}qQuJGwO=@Uj z{Hjjg{`Wv{8(Qp-fid)^I3|f;G(UOuh$TW4Ez6t?hMZJfeK0aLm+sLt(Mi&5v4Jrh zX!Ael-)FF6pG4?_lTUzu8d$>Ho1jfKSF?qYskQW;Wwv$uueLE=5-X*>T~z1OC{KP9 z7&9zO8$;6=>ha?$Id^Ss=ooAp9og1Mx7|gOP2*W~UeAKmcg9B7)?1d%_h6pdF--H! zac!%wpLwKk*0knvr>C{8*%@oBW7z|m(bes<_)Ks14L45PYTMR-D9+P=aIgN+y*Xsg zTw`DiyYbFs*rLCatx3~p#hdzzq1%55;u$y-&>1{AxCq!(qCMfmO44(;r_)z=()Yz; zu{|8)MNbNf%u*O_Msjgxj1R1rc*k&k%JhN9?*hxf`l~eDm(LYtvgb&|Rziqj&M;=Gn8Rx}9YIK6w>Pt*M(ajsJfAHZ2PS zeK?7k#$W$R4lVbOiFw9S;;!ME`dM3dHN$kygr++@Nop7XpF-TSg>Ju+J?KP?Trp^TXOD6ED_tvyx~0vu5UnFZh(7sLVdulkHTu#obfLb0O!&3gO=no zx@{XienSs`ZGzrZET2ke^kH=J?(=Wg>GsVoo$kzQD;Wc{`!O_{$3V>PHva(B)FYOl za>-Cm-d(*NF0pJQn2Ao9KbYh)hfQbN>U134^#{tD`d#Tw<`nxc5-e_Tt+5` z&d0`k@Nx7W*qSz-Q=895w%NqnhNCr2z0G#kfBV0G{$@Xu{?Au$H;L<0eds!jj^C2A zo^I=Zu#AQ$6#2Ki80mj9(MQvo|9x_Dwmlc!8LYogPF^|v(hK==x6(!#(ZX52B27mi)$u9%V>GP9{e$Me>SpOZjMw;7;(bwvqUsxcuFR}KYUj{J{!?9uvd;bQpObo|h46?uRmjo|M ztN9)4znz@_RGl@b+&%7Hn)#B&sm_moYrYKoPun&Bj{QvQ1eX(#i6J&@NN*nh%yl}n zvmU4D!**zY(1+X3OW3hvbca|K+_X2WlDF;*aIJ=I`vO|axph~7!Cis=e{D~QR`N+{ z->b75b_7)w)Zf>(gP&oEe#uRnvIxJSzlNn4@p!A(-=uxm{C;BDJKaOo4og{oX5iN4 z{n#QtdC!C7SP{rdn7LDGx#LB>O==l=`U!M@vrJPPIXV-zx$P7FUz`88WtsHmIXZcr z8jjI4Oj?ZnY3i$@JDwEm`9tN4Oexbd+HF0ml(DUulcWy^?>ZX;ouC{%xt^OTi39q; z!`>OI{(7Rs8xI7QW?QqsvGd**U%!g}`~?TgxBAYGtRCyz zJ9P5u^A25sz+$|_jbU4#2S>Y{qyP~GNn!W2q2L1%jOIbwSXCGcOU{zsE|QslWZ}a&U#1Ol`%aex*BhFSNFM1%xu}PU}_S7;Sx% zSnzz=;f^ua+nerSjG3)}Q4$=o4Z!oA<{;aoYcMKZ+d#|PvIcb=0h5wVb^`tp7oYn( z+cISlOmSM&Ml9acd)m~n?4?X*wV6(PfgkN@FEr7A`9Ek~rnXp7h2t8n{=%D1{+@Dl zvy*gOTX$Q%=yhgIrW91Td^1(~4^?l6C-Pqhp4g!>d`}23!O81?rs-O#NPgUqF})<2 zX1vwYhFv|{CHfanqm1xJoU+nM@IiFw$Ric;di<=e)$Iml&U`Nj9Olan<@|$gIN_F8 z+w+d?itUaYcIyBObGgI*s*jl|u$m@)*#0YA`~U3UqTwOM*?2c?@8MlcAGQyz?Ra8X zC#nx@A|58V^_o9_Xq~K4+nNn`9K+tx#*`?l0$xL&LBa{Y+Pr$I!6a2uI+kg)W5NB` zR~&L>B+ay{FSh(^Nf~Qeg;j2<;bQH3h7l(x8_KWW%9q@<+A89fZFG%c(zQmO)|DFO z#^=6i?lNV;Q#MWHN>}11#peON=_++`auTyGH&llIJ~@ehIemS$N~XW!b1%6ONvHL& zUOfgr@P7MW8qqIQb^2*f_i0b}F&7Sx)tFpyjqyTPVtv^5@6}#5`VQSt%2OG#Lu;)D zb|L1Mz%|xouh>BS?DsJZc*o9;CTN!fE%2K5G(oH4b#B;e0+qSeXnRe|PVp+EHSt{G z1=%|hXIrv=?_L+M?8d6ORc8A1-PMwRDtX_zsUANxb{6B#5;Mc?8N=OM{PJ?+?eQ>T z9T@Y?1J^qrFdsa&4Wa1{;zMov*S`ecmV{Syw)(O%I`K>rH@zx1$}z#m+5!a441|xe zX9^n_P{}K`Knd7h9%-|R!|me_w^`lQKsRJv>P$v|Bg=N3*dM&6825A<81p}jVa)2A zTJwyl2Kc{EPF@3JU+c$)!tXSKI*Y-Rlb8yA_u36l=vTKBn&oI)Z-HqUTJCaccoQ73 zXwO7!=|{k9`Q&61jqEPLZA*o&sX5M){MfG*bs+Nlxu3X3mgIMCFxKr^j_db*i|ytH z+So9E0pY6l(d9bh3Cm%{y{j|(dt@8T2>4k0=25tX(N4?i_tE~2ejdUP(GG|E%T!y( zko|Ghy@l-sSKIh7x`Bsn2Y2mt-*N*--)aMY@4mOiGI1$x4^G}D3&lvrpMqkJy-b9k zm@x9dgg^E3a;b##ycYPKFW}>_yds^fd?1#8eh;gBCtOFnbb??`6r6UBXd&t{5y`r} zk=KYw`DmHO-m~EsnWy)hXpo+0VL6U3!io7kNcxXn?@N53*?rw!njUjoX!Z>9oYTWj zicF=^Hf-H{+%oy#8+Qu|=lojVkprAXoDo%7G%)j?v2Vr9cEXT-2>d#!anN~x z<{<6w$`J%cy+BaJJC5viWINdKBgjYJ()-MtVSd&OCxa%1Z(UNEF&xrm9T~%TbK<(z z@Qi1V588*8cpYeV*I-ZiWZW|tX9?chIg}QVYOwS-;n^~Vdp%`+h1Y>o&ufkO8v*^n zDmwA+wA{cqXwbLK&~Lk0@Oh}N@Hel21FM$VK9cMhw&TWIJ;U@&a+0RDOgQ6Z{&hgs zK*c|9M1dY|AIq*xUo{H9KPwUXhxTkkwOjk>Tkdb}srPjoN$%{2-&?QL!PB{<^R+dO z^mW5Zhx;4o=&Gr83SG|Xu1p)|@?A8zK8L$nU7N!LyLj@I>m8O9@PpH`|6pc+IsXdW zgZRWPTZ_~ZsAgXkEIH8styQuM&H=ZEsCVdLuH&Io-g6c9I%*oRiFvX?+cxVNt{!K% zh2+p;jfKw(Pe>g+z(Zd)Hnw(~+j*hnx4VR4-9Grj%QcglmW(s=2HzFeul_#%Xb?xCc( z)3auQ^$9sTdc!-}p$uNJ%F>egAja8nlUZvv=3$2TS7>JE!xZ=ftF4)b7-Mnu4Zqle zl@l7f`vQ-lk|1nx6}0!H)Z+bS)d#GxRxW_-%SNyrpYcIJ)4a<%%C{+hRaJGjNzb(yd{lVFyMSprD$ zGvm|j;nVEl17L&#;0g?X8UX;2!XzLvn#Twj&0}$hM)Rn3-L3<`-x|49J^MT9YIaw5 zy*`ZEs(d9i9fN;mn$QyIS{E*+)`K@}RVC{o3gg#7I8$~|a_|n_-2BgwN zBpd;678ddTT=LvtP23NZj)v|MbO8F${X-;ey?YkuPmF-y&sr0k{+(eiQUv^dauzKI zF)J9&7_BMPgB3u3gmTf zdlb#WC;5L5CZ=nQO#N?vFW_cd^sLrwU3=XHumv~apZxkXwaJoeqYc-uZ@uLWIp%51 zcIbxq5wkS38`*kCx1G4HwU1pvoa*m^~Ix-_6!3 zXkYUh$h~$FkG#A?ed5}hxijxDWZZR}-H!)I`1M9__&XWB;iHV+w%IlvJN6%IuCg`? zM{ilf_ScVp!wEI)eC;^)RJT43twlvmUpWXZ)T;KigV) z2@h{sw_A~`+rE~MK;r&&{AdnxiaEd?<~@gh!}g8uLym?W)xYy#ICjU}?`;%%Hyq-1 zwE3ZN#AUv(FFLk1(4E*y>i7zNe?3^(sGOX&h^R{C*a!Bqk9-RX6Cv){Dx~ zqVkraUbCURVnmfDf?dsqieeCGx5E->*>KvS^+)?g%MuV77~zEjBtVVzT-_cRL(P3x z@SVd+@LLnoW>Qf>bh(x2tSFn*rt$ZG76j`gf}?91O(1e~q2%8~o9}W~=-#_QQ5#<* z$!PvYwtkIMd68>v`3m0NJp1PrP;3B#F#64RYug*2;%Gm^Z7m(>Ko>@({87h(-w9^9 z9|p1wcPt3@xg8nw;3D4PAI|@S1|QdK+nW6q9X^lOvL0I_Og4UijbMC@Rb-KWfZ=K~ z#vsMc11769n0Z_8{(7KRnrZ$(TR}*i?KaxXw2sDX*Z^|GBgY29pR57?(D1%8`8_C@ zLp&(d`KAy?E0P+S6X&BT?4LLDm{0J5^LzT+?n#S4diQXs>`suF}+sSA9DY}(<4k|)_Gkp#1u<{>I zItNvI5ale--2d2XWku7>7gohEnO>4h4Zuyc^Y+vxjLoZU8ofj+5&3;v=Kg*KO+iX|y!gvR@-n zcfInSsD33XJx-?`7{OV8@P|iSQ91GXI~qCvZtvybkOEKnr4s4H2W?p?nRw>N?|>cm zCg1IUVvO}&4TQ-0;JFm{%w>`X@nF#wdX~vz(1mArYmVgJ904%BwJfhkOHBiX*XDV{ zH?Ei4IrN>Q-Z|jiWBo@B^RNBP;Fn(s6X_A54cqjAuE(dEIniH#10lsjpecVXp+?rh zNP=KVAOe&GHiBNnA>C8=)yv!vDS*Z6raG9LO*Dk~N$g*-IP|cRJS-Zv;~S1;a*+c6 zOMeqgOQWxljqV?tpz_Z(XQa2NBeZK+Nf=ASVAun3-3BvcueBEV)zd0vRhwb?0Wo#-=Mz4I<_^SLxm_tJK|b@e}2Kz@@IQu|M>+uUt)Zf zGS+11{`rM!oI1wPaCI74FwlIoRj0egS8YfO8QA{_`c)*B3Qo zz&@V?L6pfk^tu2A@)4cjM}43d=W!^V$BFvYZ72A`dOy0}e_6c6?4iLSKWX%nMt>ZQ zF3%DMAvmOea+S{w_LF!&H`q@a{iM;4-C*06>wMhq`Zc5)phPi5i1^FS>1+j75u$VH z9HMhtGCYAuk0JsYfl&WJ(#H7J)^i(;S;M(*O|_4`;H zLm#9v0KVcY2B;8h9ZNRZ)H{}~Z?NL=WNMxv!;Uz`UXGR5;rANr%F^i{16+G3YRv|a zoPaQt1KED!4u0YeegN(u8!Cg+ZE7aP4g26gD;_hPg<^aNTygjvT1tL&ONO%wOS>Ms zBz(qOs&RXzYk7eccz4{jex8$M_T<;i-4LD&L#G710GJaY=w|9{fo; zyoeAj5?zXrd-zcY@=*?vAB_gF2236q*U5oC3j1pD43>7HpnIl4UgkA)@rbc=Fp>+` z)9p5YX0|q-(F`-D-v_F_YGAtw^YN$6N7{^kZ=Q6|to1!pzX^igTaC3j zv9{HpjN(n(nmIJCTKIR|wJeGf8}lM5t$W&dwJ{uc29WC|*h_T8%{K2abTvlId_0O` z%1Pf()J@YEIfk<-pu;z{{s_f+Vo2yKOn%d~kDNpQRS}@8^#=xP%HsuVl%%Gh8s7e%35Lc0Fti%2?ll_JKKTNlYk==7HC})7hBAK^WFXLlm7|-^ek{$RKI!L)+xBV zP5~-HCknGR;-Z0ntw1ItrA|&l=TfK)M+J_>cNC6v+dG5*-8Sq$ZP)z!Z24F0UnlPx z6AU`lE{T&m)h?r(yalt2Y^!VQjuZDyrM7w4&XaLSfv-Fq_ieZZfsa4PJC*l;VCnhK zer|I6|6Z=(Tx!oz#FyI!>(voX;Hzl{43>)mOYcW%qR@Oygn0I@QvO`-ikuXHT?kOs za(K*}JKA{~ap4`kKeiZm7LPWL-_`b!rI;i7k6E)2xW3PVtCyQ&8Z72ZZ!&1U#(gZ4 z0K^=KNk{h6D@L!cUb$72fdD0cZR!*?CFYrbP=})PVo!!cO6&BTYxQ3@JR_M2=X--7 z92jhO*}1^{{3H6aiHO!8ZaX4)9NJMogdsgJ0$}amC^W%W1=aAc=fOS%=xx&_tMNyi zg$;;PQMJjOr(WpU4FJE|d#T<@7!xEubz66`HS+dhTLa9*xWbEO;C)?xwasm+mp7MC z`zI%?we>O^$@EttiG8bj=+cpY=F)}EYL|*2)?lxK)G9zkqrF9Nt~TWe;avqX01A5L zgT(q6wc4OTNQWWX3i*7)9oAdzOEI6Vlpnc8_GwAbwc?{ZKP! zYq4jRDRi6oTUf?Ra{pj|DS!S3fBXC7b@S_HJw*N6>N)ENcG?<)2mBs8Wmo(y5qS7_ zX~Xy6X6E#DQ>Pxnn_sa>NWkw~P@20}BwaX9(2 zbyRsyrs3y9#7ZUEG$*z-V&Hd{ zcCvoKrl-U17S@7)2eB4{k{2p&@?wU9HrE&~+wkl6dlIZUkaJZ%`JIBzdNH?H1gvC? z{paNMlWsogW;fk%AaG~k%YRS=_X-8^vABYlD{jMAGB0s0E+DU->Cl6qn7q;PM6-3@ zE{S*JB-9V@<`HO)D*stT6K2DEVA+kDzCeXOvJ6Gv3ggFrM9fdNXAEK092$amc|CMo zqt#zTOb++}wuTJmAsi3vk!N}o!O%I`^q!{A>A>Ck+cdDF+KuQx2hQl`dRYaLD9mp? zcVKdm(T%O2yskyI-yVIKq6KisfT?b~MoTk!DB_jD1&?CCDQ7H*HL!v&;CKe6wuCb{ z+f?>nJBtK=s`lR*I*lSqZIOLu0Ox@(^#9Rxo;y^=Lv{$~2$T&WKWrcEs;BGV4O>RL ztH_4YZs>6)mrZ$YWXW7OJjp)5b2eviaobf$m5GegFLfF3C@0phEgtC9c zhmt~Q#77dtcGBNPCYu*~Kj~Au9M>4_0C^5!gT&*1UV)`_cNH4=G{)r--t)xEM{V%V zV(gB!Y;C*i$0m@H7YaC~&yy>+i{%5zM`An={z@|Eo zxq^abV%s>oR8V<%!1&H{RS6H-6Fe{5^+93|YHDLuyV^ePe2t>hd2))j-q9wey9Z%x zeXl!z1TTm6Jz{49LiValn4H5aUjxmd7465$iL&Rn!uvUUniX+!TD zN1X&NwdVtE?#aqlI;5~=BMKPU{i3#0{?^DBl#}J53OKI}CG10sxYj6?9|S*y8ecBt zz-YHkefwIfP>|?9a&Sd4+H4uQZVD%;k@XaR)L&vjqceTT%DiXwzZJ#2EmxWr`C3-~ zFXH4MvhZNX3WKAsR{>86?LYJuA))mbMZm`(WIn@x zRfi!n`i&2l6HmSceES^`7JschYF1k^V)ig$e;RwBnGrdk=i;z*%;qd4Y?f1=hah*o zqFc0lp$#Ie!CT)3{Bbhf8Xbc-ab9vDWjW&I`-Z+c{NXQkLhDN}lYz6PaCigQS2oF& z2S>K(l{&U5&G{n|ZO3;Z+m+?mzA^KE0{_YS_ep4kXM1w-5e1}-o*Zzcz6*0n_yHXC zXNx{cH|VFsgcbBe-S+o8UeSS5X8kFBRHwJ~Z{J`xyvKu`+W#o`jsG_ZfR`b-0w4Xf zH^JY&);F{_!M{RmibKK$^sf6{#n3yS0ZN3P`q0lv=j3(2L*tt;Sgb1byBv9cdzCih=r#O7 zApRHebxRQ``oS+_u4-F(2(t9`F}?`{dzIZ@nIyjn19$a-p_D9>*G9!srBLcBw-QW8 zerj+QeWrR7xZ)~uF!8J9Yxt&r`6f_N?=q48coX=B5m6%b;=or_0V>z%#gXVINUS@( zxU$T`4m+CkpMTRU;eY;6!L%2LwBP?Z`On{eHg;X4pJMSC`*=djI4xV(hkhOiz_j-! zFy&snGcnEAfS!-5$Hj5P0If9%g#|CN#V8 zCUr%E0K9%PtLw8EsLx-q{$Xh!6zPn=qnhNJw)J*Y);% zw;U}3S(it{hA>RrSj(s9wP&^WnN=P9$`ZVfXl}l)uHT-ae(?5xjaKcL-B4DDHeA&` zGXrknFnB%aX76R6JHhN5axoSD;O8S@**#Yt7IvVEWHW* zsqFcrE}gfiPK>&LI)ISPivUl~ypPmWCLXnk5um5j@@!$)!1Z>x!j&j2kZHZ%&Dnx> zKP#ynWg~vQ4fcfD`=P_*QjS;8 z!fdtgnG^eD)w3qsSBARd1QIDX*_z`9lDf9GU}DWcT5qC%`@+;!qA{d8vOt2Pxm1u6 z%;V@ILks+7QkiAPsbeYyHHr1$S>Oi>{=mT>V(ZFuPKUt;6WktDN97i<-i4>{oPV?9j!SY_O>w8y(+B#WCfz9h zrc4oQS#4eDna|E^B;2-tBcAgI^fEvw^EKI4jvKCKZ<&@g7|^SRS4tw1F7GeZ*B3L(ZlCS#5%a^b?6|9S_hugG zu6AVoZL4)i%?^Li?afEcS^Sv*kVqS@ZY`v?`NBiex zF=$%m-fU9}>Ot`!51y5SOPI#6fB5nRX_Sbv3fganFxKG>j2wN^wvJqr=EN}F|6}ZD zF%EIKe@a~4*32WRsyQ)D-96jC<+YdGN-)uXB=B~`*2Jmr$Q5W)M?u3H`o&rRwUNGm zb7{AYU@_E@I-=%3Z6{@@o~;Mv#uYI3PUqHRGSssTr(_2x zZE-KjY(69po9gXnWH8tEBQjLi))O+AYu9nNaXt<$u4#vX>Y5Tc<7~a!X$6~A1dOLjg2_K1vO?&i2Z0jdvf3lGgNl#;Tg=e{qPLcwe|1}=Gt`@Zk&%p z3v4?)H%`@IPs=D59bEmP56fWTU596AVzwQg;mJ7gRT=KVw!<^LtsU~F+_C{3`lJl) zMhCwrLz~FK56WPvgQhdu}^Dsg>{@C2|<~-=kJZ#zze{=3S zGq=AvzwMdHkDS0c6EL9ue~iJ%vR!TH1~m8jV3qtP^fi|mH?q^xw=M>MfgBsu^bjDC z>FLexf074dqU_H6qi|7<=+*nNQNCRxd#e6AjFfG4@4-ol6pQ{oi56YQOF4dNzRInh zHnha?{cj?D}#LVh+&^a~6ki@e#Q1hj3DxISLKiu50cPnAMq6n~CKM`DQpQ_azZOJN>7Np7iL- z_l**^=7X1JZlKjLe3Yk8@V3)ytG#}qbDA#qrIee`C$!Vbz|E?2>#^Cc<|=CLcAic= zJPwNe3k6?{vtllPJFnl2+p?Ibl%CRqymnnGx1LP5kdR1!GImA79K)7XyQ(i9CjDFE zv6C9>*F(uXmCASSLrtxXz@E#k`nH&l={lHGFg>L%y9@V`yuzt(nt*W_>WZOf*%PaP?Mw{G1q# zwbm(YWUS6jpvS~-+F1k%(|t_eZ6j3?NUEqe!?AB8ZNHKvx0oB=O#K# zR4?yc?RGYv$+d2dxWo2U#%g4`sl=&-#wFZsScPumRvV~DA*TA+nfEUf#sn*ucC(VZ zzN%WKtLOB8)pMqst)8AHZfln}(nUHw(I2vfMh)fbi%L6n=`Jqadq>j?m1(<*CNk+X zjh;2QPvx8BbWty8b?p{kwViu8UsyE3T@#P2sy(_k8tIpgoVr%4=EM9Vp`=dRn%T|_ z?(Ejpi>AyKJ5e0vyT*J_yRviLN0Lt{y;Qp>XQnxS<(8Z#jQX^2k-8n0s?0kuC)XuySR1(A0k|o5C0%kmiLu?A%=^Xq!%ZQV zF-o;-Q@QUx*E%t*X|4xiwtC#RJjdQA5)nw7d*XYzulVt-gIPad;|*vp#}UCdtR zklQRLda_n}Xk9%&O;Hm(_p{yH)CO+0-IH=^p^X#6P7NkxrJK7-R4z%vG*h=Vvv)J> zykv)R&b+!p%9T}}HWvdWNAf+r{M@)U%AM|i^W-VhPjxQZeRwq)<3hCwYOprT7B#2Y zl<#2OZFeu419%H_FPdrDPGf@PS$dI4oCT+M|`} zj+@T-3SVbx#et2>-QG>MUTTjMqSRD#?fP8Gx=$}RSHt!Tx@z5y6N+qA$C70@W!D^k zx5_;u+imE2>7g}Lyml-XV!j@#*=!^J-KwO8JVET1={c1^2O1 zDqhXfSB*yh;dXrYFp@^@)9n*@nij9Fdbfs{aa+0Co!nG!PwR?;io^Ec6xd1!j!zR! z`T6El?MUFM)*9MMs+NDcx7B*0VMxkIcCRwS+LI#bx8%OmkzOXZS~@!#J?0kOg_^64 zFYjI5Y$Dk*a8)h@L0MRt5A$+HZ)soX#2r)ZAE%Kb1k$fPf1c~(L%Pfq(b@uCij_c!UvBvpDW z-|3I3;_Yc=S}od2P8)Sb?YVlnD2Z}mBuz@Ce7|AzE?NOH>Q&YxBlSh?!)Z9qh0HB zG^m2>yn9)x!ZS5kPpFE~}&FnNg z=uT&|W?R3qDv3(9BvmhNuiIMpvW#Y9dp=5CXQajAs+w!*X_tkKsK`EJR*Ga6TqPZ%W1tvN{GtA=%d-6$D_G;oJ#+Ui-TrOg8-xSB)YX8zm-QCnDg2josC2YZ^`c;y;wX(qDQD5D zH)~R^jGw1}s5_Z!b1RVsV(V^3CU@XP9;z>OC7ZaZq^|VNMO}I-TrcjQa?ck9rwS*f zS>LkAtk%D{FT#-qs&%uFxH4|Baiv{Oo?gtZQvuh7`_V;iaFfp^%IKmruq#hG1f*c) zx)+mC85o&K?$o-Sp5A62Gj6%qNTeUHYgXUAy|)X0wZ_9`Hlyk}2^6m{A94@*%0nq%^5& zl|ny%eQYn{HRDj%4;)jY{d>9#1ok zR%PPex|fv{>OPZ2zV9|o#T8GVYu45MRaeTCC&Q*Zb#(KxT{Nq0x$#7*pG>YL?Qk1=#ludUZF;R0`Q9x~&W@ z(JWCLJx+#+ZdN>f7&3-+7ArD_}410Rn#iZKpT^3EdiE;H|+>&(MTGT8t z-|eKE>RqLhNKZ!7$4bw5%9Uz8JnW|`&XbCVm#rFTyl7UHjCG}w>q<{`^Kq!?HKT`r z5S%@cwp$#zoo2?mE8GpGQ}w!7T9DiNd@#}VfhuLoqvwKDUfh)16SZ2uyvfPa`q0W( zo9U{l)gPe?Ci&Z!wwuTl=S90=q_Pi#i|c`N?_|bf-FSL{>FTpsNB0Q8YiU@kWN&o& zdfpv%8$|0qO$Uok`t@K5I7m=4zqf))Pxt}z1r{;JzGZ3B^pKr}ZzFm{1 zkCn%(>5J4g^K#1?RiRwW-vjk-K1p0X56ijd@+q+^j()8@Ki1&*)^Nxot?Sc*YEP;Y8|2ch%+swn zzpt8&nPfdyjebTfcCuad@+x0{a!DmKwV-PlH`VG$yGhN9gWjSq&u%Lh={__n1-m{= zk5h{jah(37=BB!RqcW{MWfDbDzJ}veuU>AD!ZYKQ#tFRT&PM)w;^BWLA`~PUK-)&OTF6F5?Q_FrZAjkuO|Z;Saq0xZ>F4QrIZ)t zvXcY%vQ$`HT;CLmHwCAsJom<0rTSdF&z<(_#r)*KzP>8lI0dYeD<`GSGMA}R>i|7K z!oS+;l38zIwA1Y}xoY+=C*=xB57Uoku79a#@qKHQxVY{#ky@xDarD@EOcZViDG^{i z^zPc@l8)uH@-nw_MWu9lUov}xe?_rY1btU(77M4K)K}5VG}|8MyXDrTWY|~vLOBiR z7gBFBy{Q<0g+1XP& zs}%Kd{c>ob#jt=D&Qp5&Fv)|Lg{EtZi-~zte0Xa0-TOtea`EueN@v=cf046LuP#Q< zO<5^Dblg%oec8HsejGFgcct5VHK&e-h3Xv{Xo=K~R%~=$jD$HLgbdVM_wgp9*ZXs# zy^L?KYEBI-WaC-M5LGVSl+2uYef!)uE;~j8Jm-oC+;!?|?WrtV?YW&F%GUU9l&FE` za9Ep8%JSsm65YUF6OxzOe-j>q$Ij);LUL4iKWY!E2)Cy%iAHB~`SR3mJQT#i3^Y?} z6W=|yT8S#DS#oXK=peOf-`+pJs4qh?TUTrCdn^C)GHtv(^U!oU z56wHhOvc08L8|?5y|5pgr$)Non=a;#R;Xkei*~l#o4BByx;?eqe~XF*2xzq~)UMl} zfQQF6EW5+S<)ilUpx#_JfYUAC$q&7BxiM2~vZ@r?_rv_+#<)mVQYpu1+!(_b>p5TO zbq$bBXKyB_wR{F-%Egj7Xjh+Zjh;Dd5*>-pg{fuTq?Jse&{eEbIn~Yg?&S+<(kb+B z4f8f<=yqSM=hFSue`D#Abjw##UM|)yZ|j{>>F%kVamurP`q6mNNBPWEI$O(SD&*zm z&VtwXVh<*WG#F=d&sc8s2J%Cyq+#4q@32NrpI%1N*c}hGV*5F(KAPw%@8*<#UcSB| z)v0uaq1wfA=T51Z(~LfOdbzIGhadw+m09k-Y~&YBH#Mk9fA!Y0gy<26gga)i2Z1ZTjl@ zsbEN{j&(C4e-2RGO64*40MaREkg3mogC=32B<&~x=wYw^87B- znKUL3axY;nB=xCynlQ_EATxY=e&`!&Rj!R-_a<9_6;SCl;r!Ur6cs97oj9atjMJ+Wc7Du?B zZJMbJu3Cx9i*lvt=yI*-)cXT9qrME?ejda0L%x737uVX{9Nv|##A(5?drBMLOrDhN zb*qqe+>7-1?n;(h#cV2tEpXGk$Rt2|Ar-1me_aLK-^ zT+U2S(Y;1&Yf&xNPwyt(+I6O;+!R~$(Lf&6`mi>rb_ad2lS$lMDeZasF@r7&&YgZb ze{Sb)9Dt`U?xmGAdKm=*)p@yl9P~5tOTM+3rN*dXV)qfEN%=y$YcCf4&caIP9?&FL z>NZlX+_ZGtlwY#&)E0+Es+%j8i|y3)^JvsdG=^GDzeXTyOfQv(3u4W?l@#oD_2Srk z?wjRqL+dVT&*STh+v!N@J+QMzwb zbWk zK2LHZ@zx;dNgC+7*tngMdFQ&Jl8gC+1oB4asV7y_N~1sM8J6@|QL_Ev|4+eL6R8Kc1o#5pKRQVy%D(8=az>O2f8q5)R%X zgxII)o6P}tRUDrUE5~D>JvHaqf2vdGatHNmJ0a0J@`?A6gG#$d)#)hnWkMGJnS$wS z*%lkdCX%$QQ7Yi<_@hFL4e8rqX}fT|p!Kb&kT*M>KQ&uKsC9X@bi_JTDeteb<*=a* zWR=_>X*Xx#p1G7g?Dp7q+J0Nb_MKr3hK?xSU2*lWWhbdIXk$GU+%0!?f4uR9@TrdR zlCo-sru3MvcnpJd^(Q)qwv6LYS7ChjNL^#UNL;G+k$oDuBr0=agYPtI z#ZX7tXZqFy8ytd*yd+C!zXXXlqdI@0wVzdaKJh3&4IeRh?(jK}DZ90w?eVD0bECIp zxl0t9`r3U^1gDU4zNe9{e;2S#=M$GTsraFI&`!O8`Gv0gwxVR6bTaz{`A6V@_r|f! zbNnizwhsl}Ixb5{{fQ>s3w1ks!l+3=im16VEm$y914&4Vr>|g`l0{A^=+S`VA-ir{ z5r(Tzo$DbfkJ|~bOh-JR?Re1QDZ=D}T>S3=;}OI50ZP9?z4BdcfAD#>SDLUPUnZl` z7d4F1?0^FRU@{zCxDr@W;}uvXIO~qJ(i`b~Xmg;lj{%LjPcn)0SQSncjRj2h=u-w$ zIQnxI%kO8<=VnLZ*K`xnYU9o3B({EK*Q4y+Zch(~4leH)fF`@CoDzYbHBjGxuc7}s zf;3hMM}&OQ;t9tbe+V>6EISe^raao6vHnN*;O~d>Dh3ok$`%4y9i_R$6EF}lf7CfP zOq<(uvrRKSBYGhls~p0LYjyaCMJeSZVWi#s)?Hg(wVw!)0g8hy%PV(cr0 ze?g8X${8b%C%^G5R67NWRIhk$fKy(a)x@(DSZ3)}Tma93WihpY*E-d3ROZC2#-7OB=b;TAbkVwl@M>WTZl`NUCG{B# zKs{dAL~kK_zL<{jf7+zj;+8eT05NZEOLM`c0VR4=Fu5|@NZv*_W}KT5)|CL`ymXFW zGB3oqe=kGHUeEq5{lFjI#_0fgRHy3j<^K{N5?bk}yf8_Yd1rMN)K-CVxU-kv>-=l~ zHAeIzCWz{nQM@Z!`J6VMY6%V0^Y&C``rVBJYlGY~476&jTcWH$EFi*lQ_AgauL*&8 zDE1SIZNCQdZ5;s$((y&H&<1&+c5ytMJJI1@f4T1+AvrN%#Xhfv-ltAB^E}V%XM7oG zWKiHw4b|FV#WT-n4Crv9vCgGe2rsML@22_-;`VB-eF&*Ym#lMS{YKS!k$ zv39PgR%TG`|YQ-bGu9S%2paxh7PAG&+K@FV{X(rC$Fse3JT}) ze_lvE=q}+s-M{~sw$K|$V{_(Y@H9NGNlSS*aq_?6+4SY zd*lbgrD&h&lj3MpLjw2^Ak(@u`<)70l}?kCuy`ZrX-O)?mNuX`#y#(RsM5QSrF+^7 z@H;Ph{Sp1G-Yh{uQwM?p?mNUH)DLbsf3Q-M%0_cV;yR4Ns4WqPJ*$t_Rru!v^BG-0 zZh}E<-I}I1OfC+T&ixy*nh^Z)jO|^Yhn%x!a|6a2E{|Qb;q0M!Ap)p^iRFXM@(ya1 zwSd)(^9X7X9UG@^yBlJsPiRf8#>qMb%b)ENHqiZMXRT`Rl~>PIwRxeYgt!H3f8m%q z-_|GCZDo7!mgQ(x?3dZxM2=#JM=-AL+a8|1{Q@^;2k4b?ZRU){Ps;EnrcPeMC1n=B z!Igby?K+-GLTRji*ib%J=;B#?>W(@Qq_e6!*`K(^ zJ~Svip%aR_+oA9`7OcA5haV~M`&+rva-~d!x#lqe~yzMVYr>9 zDnJpa?w6OiDi)uhu1(b;xhbfW(;`X6ff9P*R=t()Y8Kb|$7bq5r&(@_Sm%eg?F(m1 z)J<6;8XS^7dR8TnB|0*#L4fVR@)?mSN2E2+lIQ&h);z!~&$}JFY4dvKMyfOw;L(ro z(RU*P+#-{@6IQXoGJ=HTfA474WmAxJ``w?^Y~L<2HJXNRuf51r+!8Vt?s;|Q2 z0g*=wtR6pPp);}a&b%OzmUct8%=o2O{2p#z5n@RRRlZqR-B>!ouRo!?iwU5LZS!Om zE8MR%3OnR|>NhQsABx`6U}YQb4L3p?-NwD9+^4Qm?wsZ~=UB)oVtiv5UR332d=y#E z#HKb7KHo_!7iXKMe~4yXc3F2i+z*b&?9eerSkp4>!DFUG2SlTf(BEM8Ucn{dJbTjJ zS4%z#@6Hti#+)tGW3>2o%9u|mnY}{qaRZ;{<3+5<*7Lf{kh)Y}w-R4F9MfuP4>|^# zsACjO6+MyH%g{LvK6TMi@$7CgLKgRDWlo%@t*y@E30Yqne-7#hHKFO>;BqbU2xAfZ z7y`13W4%QKAlOk|KH&2=rw>xy}-B#- z!wxvBe?J3TRR>EFcivgn$}h=Y26$fEGZ4nCdoA|54daC=M+6CI#e-GVD)zdmGq_d5Cc+;`vH2VNray?@3 ziiqR zBzVb>E(QuX7aXuJ?I#JotZGyJx^?%HMVe#%!0`xwEzSYSxU0N*5bmzdvVi4 zyn#bX@;i>eQ;3SsZp_!CHSVe`%=5dtk+Zy^M?5`T4FR)zMmr;GL29@s_2S z-;7o+@daA2=4eZICJ4Q_(5GTXi!NpQ{)^6md~|v#iDfpj27@Ij>VNAHVd*4{s!%nn zmMFl=3=kM-FkTxb8o95O9WCJ5xP#DqMOyyp+Ua~^!We?9)tK1EOr1&{F7P7=?p?jK6BDQl z9cC(Kovtufz;!#cD9D`jzVxQqf0wBmfUVNF)4;){ZUiwj0bmUlcLFM9NTT4F@w1L1 zwe6tnu>JWklR9j_2hsOV);0i3Mp{`|GemXICpJ?-9PPPj>-yOevir2=LE)`4EPF0m zS6u4=>_ryu>{aVjFfoq0^@vYm>fS9{>7u*{t3cFCMLZW!5ihHhOuj?Wf1Qv>Pf;SQ zh0>!5l(|oQ4Z(r?kbK|!qm(Ii4jcnQZubgeP$Jhi`|URN?i7RwV9BA4m}#-9;|}K* z%zj>t587`U^c?Yb354~ogEEfec)$*pP+w$q|FYJUwa)>@Qu!ibyV1=|aK{&-kcX_> z9WUJ2G7|;~NkMf9Bmp@bsuiWdgdIBiYr_l!8AFnHaH~+Vi{KFHV}h_`Kzq zHwpDLR(&b+Go2)BI_=`>LXexP=Ho4)-wimt3@5gf>i(T(d&O^@iPz;B+Tq9RE(h>0 zva!|sS-r2GdO215I_JM<&5TrxndcF;HN6aCp&=~jCPH0Lw47-If05tds+s&f3~cFm2A9joIo3{#~XFMrE~NS4^I z!#gm8@-tK)&smfgf7TwL73A$xKN4l-ht4C*?eVYmg$DbQ_O+>sf{)yRw(AG*bi}-9 z>R`;-5$`xa%vyXO2}w8$?}&5)6tcbTdzcJffZzvh@^xM3h_qT)S@RGo_j7EF*#l9) zgJB!4kJQBR1wG;?34<~117@W-5MSYu?ZwHxjUCfsdvFtzf6raaue(4G2)BZZS^~a0 zIk)SCm~21CqM$kz%n6F@T#WeAG82g^Y)wU|oj7lqs-o*5A}2nV|JC)F`Td;vsVSLD z<-~6nK5Yim1}2}O$Y2hZQn>;NC!(*hK}1=IVv2oXfM54&a={s*vET11`?O+&+7)It zJ>L8f!pp~of99#Ejn75cxW-YsSz`$wIJi2C)iJ*VvTlPec$}>%pq=}x_=-!>R|QHm zzl7OHG>;-R3!RU|i|0k}u~u!mT-WQ{;WzrUo(-Xgf1CnO*jQO)%B85R)*V{Rzl33e z1*aw893WLAa9E$}LTI$1t!P&!&n!Fgo>?TH!ab2-f7%I76P|V`8zOq>B_lOBsw`xU zZ}>T(e^LZ^$nQhG9D)0eX&ftX zczS?k%Jd083eXhAWJY^Mh2++58B#Lb4Oi^I6$44P&tkYFeIDK9+_9jhVk-e$vn=(M z-F632f4x(wxhc8;?t#USejPM;uq3Rtzay|!IMfr-(pq(A*cu2 zW7Hb$RhqY^`2l})#bGOlmKdBiROIpNIQ$6d@{b~i%MD=##|0oEwR(nIL4O|=$E(d> zkO+;G88qRj(#RWou@&x5K`OQ$ZTY0pJ8}Z{e-kmO>f-`(EhXP2CqetVq{RLi!cMpK z;6}k;Jtv0aFh%3Hu!vIw(|Q=Xb~*h7qP1Aewtdjp5y#hV@2o~(Tsf3*$L0D_QePv6 zV;jabiRX)9Sf4{CX2#_-q72yVJEZ|lV>dno@}Tps{ek$&`z(}T`@-(}tp1x5F3GP= zf0r$j;jLf?1$vtJo@TZyKu+(wX?{K&&v=4+pdnocO}C%NRlYU|B*P(w&ohgdn7xXi zvYd8732%V)-b1bf_Px?^ekrvB0DYpMS*8#crhL6EWp3gfR0&9_6DBzivL|~_`a1^Z z)dO$G=RQE$=C(Gzh28Q(xUQx0wS;J1e_!F?ecpZCrw=*8E%Gsr78{k;0%|B5_}uk- zOxfuIb$O*IeLkSROdzc=En8@D9uu#~HV@V?c%=cp)(v~{aqCSuaxe_pnr zWAIJu0tl(I2fOYC3-ZZMgxx)HU#<2eyn zKt!LJC&Gib&Kd3_$>u%p8YOUoe=^dpI?_HEzd5B`3d2?!18f~2M$1Md(c`wulds!z z1Ob}rao7#{5suRq*%mn{#8o zMC;u1yGv6Vc}%p0RpW*2U*LvEZJJDf8#i&7OlD01teMDy(Mb+#)XmKB{&Omz1DbhAd(&ob9sZ3 zeprg%Um&iPq=%ldH2!;BMqZa=l}ez3#lRA{nwBDNK|(+8$e)4JxB<3?FhnQ9&#A#H z=$Y_4Lw>U1g!fLdMY4SDg2mcgyFzZR#p_|G`&}M&X>2Tu?f)-08g)L;`eu$eR+g2svm?7PnR(3n{()+^N=HnF?(wdztOi|DKccZbq zOCs=1l`YZA=6JexP3Wd{nnz3pw{oXPiHs+NNh6 zk$Ul3sK4S#4qhJ}f41lWxC;nVbBH4-^-13So$gFOWfWuY)~MTk3fyAyj9U|Tj+)2o zD5v!ik$b3gjp!8MOo>#SAEkFM- zjE9SC&(d;p>-!;Zse}X`lozaIX5eoEST_ZW;5A4%ow4LRqi`887K$A)63*yY@HhN2 z6jIIFnV6VP<`_?kz_kW_;{XrInMT%uc{aKSl}vta_x!n()ag0RwS;p&#HNN09Bzwi?deMJQ8wevyFI6<9*Zj? zT`oi+h7oLl)<$Uqx@41&q+BKLE0o5%qf6-r(a5i)+3Vx=*Q@qGGx*?8ZxwA zcBP~^44S)(eGQNsB`mtbXGhsimwCP`=EZw!ZUMn*?^7frU^%|Y4SgqJm=vCbQJ61~6pi6wB8lBx!Lb2SJy zMTY5>`>133>N*G&wtC5kt%_t*o67w#J52uqo)oSeCs(JYt#KxS6S2jxJioNT0?3>| z{byCy_S=LN?qX5qxBEx#-oPdV5IBkrHu5>Sf0`+MOk`NOm#EYNl#;SHk0K1Kg`|$K zef5T;=D_lwqNJl^)o{=?5K|fY6pAnw?!LuWI}~L3>y&m7X-U+jUjL~kQlfs>V^Js! zb6Tbp8L^(jq1A&_#ltiWq_7Q65OSUyB9!-#4S%cCc2{tI`FZ4%5v;}b9?kmu~S z0EiW%!Ucm=d>p?pu2&&t+;&tQg~|hVf4gKua(+S6tVIJS;!kIHJ(5idf&3C`QrwVO zm_s8~GI~?=zi#`gaYuo5Rr?T=+0MuY%pSQVNh94Giwb%2YJa)N*ostH4-aPWcOy|> zo+^&5&9hm5Zq3b5PUr_RXE4A(ON0S6v=pFV^Qv|H_Jr{W?qZyP-fAT~3 z`ylCV4Jz0U#wdjp&R3}3$6mfN2}-IrCy`!RqQD~q3`3D1)=vubRoU=>aqLAf*GaRd zpW-IMDTtZ0G(N3N_M#`ezO|5}#cscoSrd#lyv%E+P}n@$KZ0@VLE!-qUu)8h5wGRa z@2tq*T!p4c2Unn}L3Bs~Zp!!Be|YhfIV{)+_tg9jE&LI31Yi;2iT(?;Nll29MW!C3 zw?;ov1t69XGvUL-?PI_0@dd(521uwe=8q1k$9|~c{pp*TkoVh9 z!E$XXt5rKwr)cq3G2lMsVe9+XBb^!rj(OD71^O!D<8xc^-=6 zUyBJsI+~sd{(c6}=kj}AE@yyw=0R8LR@nh;WNf(in``J=Aw_E*a^yo>T_5!!1t_?< z{dfK5?g7v?|5m_*&%2@ANGCAoH932}siRFBl!)n% zKSaH)wTckR1iYXUt2CwOe~jpT8#Suec?*h0uk4#+@#(IvXo{aAzP0(RV=ygOn^p zUep$jmVypX?j5)uKyts!H?B@Itj7GYD&H&`S4r7wfO~DFL^=c^e@CLyRQse3`(`1< z6qEMhluUys*tS*%S!`cD=UtIg1UU!@Xwo5fxc@qO)?EQh zyb0iMl_IRjJL#LMus34z_-#zJ0=CIu;I-9Y)_W3ZEDCJ|9NX4nc+^j}A|P5-f>tB= zilOby5VM^Cjr|Rie}j=>-*zgY=cvk9PfYAVkDannw+*$E>T$c)VEBAHaRQ0nq6?DD zaVQ`JuGRti)EY}k|Ivuq`0-q3vVbE$VBO4@2lXGx(JlmT@(lF9DHL2DK*lDp&_xAl zgF!cvrW)2#0Kfh*T=!F2C6?U9=G;do1=x%=yjxMM?Mb!vf6w;OW?OR5Ad@wOdr3KU z77x79INnd<=%Z8UX*S-eNV#{Uk5}5lI~{bOhHQAKpA;j<4cE#Ojn2hryk8w%ep8q1 zT?ZZWyP35hgi+>&XkOV8(;qAn(Q1kQf@bY zpoWi@2#f@le}^itSB^ZWFI=XDT1n-;_-Y_GJ?KQy_wEzL)KO}T;PQR?)wGp35c>p| z!6W#6^pfZmO+@&c4x&37NT#_Rh$;NlwaB`+8uf9r?wiBPmdg)#);lTk5t!Xs_1(8)+h;lqa<7 zpA#&be%$#h=lVIPec-4H*N5D!bX8#K4GdEsW}5u8Nb&So&*kD5XZR0qUk?z=iONMa zm!5PBe+&`viTRjkK@lNqt6k&inpQHDNQb7wZddNa7910!Jx=#dfWIk-&rahaJf&a2 zj>PK@mF`h$S%Je~a#JP96D0qWZ-*zXCIpsqD66%oOVYeA=QX%YC-t8x>94C?%jik8 zuopQ(Zv;Ihk-J9tT!Y0pcGiAvct04m=KOw6e{VU1k2foc1ZLZkbSNXLLPPx&8+TO~ zYhx`RF&Wu$T5ifAanskI^;-;YCygj;=hKDOSwM+5okVNdQ#OzBddWP}WN6iQ14Xd` z&?9s-!+L;c=u~P#klT>oy384&BLU;tv*-UHsR(SMLuOS0qJuE{}5o3xC*u9oX@RX!nFp18-LU>N! zOu=86j56HSNw+rq5tnG`>y~_8-Rmv{f1Fg#uJMqo`3M%>RV@#J8?}MN?&6v#R1k=p z4KwG3yx*)?3lkS*%&tI&f2JM`BD?jqtxOfJJS25DKaw+YzVXp-ytrU>tA|51du`U+Jz)ax&e?W~8x7e#7 z{+uRA!ezjJRUE6f=2gf7;dj@;bwZwvvXeTv)*c$}lHUUyv@BdOg&X6hqV@|~4pCSk zRr#mxmZD>Qk=*gvzu{Q|G8|n0dbS# zvgmEfYRHVIPU?WA;+Y=`f45UldzIg=xP^0M*rZX|Yeit?u*~5TcDV6dj*WRh-}=en z)1Phi1PLU@PZiE3opCAf4`wHk3=Y-+{DjR8V1UjoMyr*TPx}5xcS&3AKE1_*NuAyi zd0}9^!SM4AxdUjhoT&eNouGk@XcOovJM>0%Om|gw{~dZI_HGife_|g&VY(Ge|0?{( z$kVqKujL{>TJ=HZKysnoGLak39n$%!Cv|@*;I}$2<6wFw{nBL2H$VLja}2Y~*$_Q zm_D_U@-FpbkI_J(W;%a7Cf-`LKNVhDQc_~f==J7T2^Fj&9qE|A?+L0hG_zz0Q=FJ2 z0&W)Ffd~9alXl*2p|d8541&Z-Gj87fyeh@BOEzNHjq2>(oBD3TV%j!pJP(N-=cSV= zJ&6(j3_6x$f8e*}DqV%Yd(iCL`{P&nb+x!HlpkXt^P{_I|NhFu)mT45eiv=>YciukG zLTgNc*(^{Gzky7v?rPsD?$a$Bx$!x8`%t|yDiI?9syUX{?F@-Ru34B)_KKWQ-EM_@ z>0;Qf*`gl==&-Wr5CH{wef3m)iq=@5!Vf2#f1d@^<-XX`XM=82!%b;ak&DW%7aeL0 zb#NH9GU8HVQTAc?M;Q!yiKP_ z)fKOmv86YOzXc3dirb?|a3jC}+fa7Jo@`zIg@za>4g=5eFw%JF9A06bP z>XSweg(+&PpN*Uj;X2l~mfeo-89hF4OaC7!~7pwdv_A?ce_`H4SP}C!b{l>D~6(@6J=o72~Y905_k&Ci?GPgi>!*c4+qJwpQvlH4``ZO>v41i=I_V!W_u=;u!D zP9?c}?+G^$sPUl0V0I9HV}p~sf5D392m_$UN9kw3=Sf>+a_vBAX>t?%L@E&2{NVv0 zpDY$!^}+5i@dT58JpOuLmd1^uY>b@^*J`QrmSBhR3*}Ob7Az`twO$H46pILTCe7xqt_Kv6v_|i`w+aFlme@7Q*hWS}{ zwBT_4{R-wK!@8kEEmDeXe3s(?P=snJ1${yC9i>8OZ&Tg6{nO2Z_7N_->;u4-!5Z z3SyFa1Y&~tI=ENhP1vSje|nw)Z_c>IgA_s31U^Za^+;hpmQKYcHS0ndZM%eTffMq& z0h-x5)}-2YMs)OWvC5}GpPmnI2>c-%%I!)$f5k~TI7PCTAvUO1@6f44c^vTFWIHFy z44xgwH`w;xA{aKv9zWK;O%z;@T;Fdw8xy^PjKU!#61^=b&VQCye_qHKoI@wf!UIP7 z^mAPKDMCHWWRicys9L4}gO`H$)L4tXh*v(To#Qs2rT`!8`j%#nb25P<|AFP}G9R%IxZ%HmEpJl~%B?G{P={rZ*hdX7*O5yUc1f z>t%^H#yyZ%6mkf}N})Yv_45hD58qtWezwc_EWY(Rc~Kh{e@@d$M`#*Xq_QqSUqch? z!&g#|HwCg9PzeZ}-b~jFTc)X!{JSG-0~2zoX@k>vVa74V=P3yji$Nwh;Up5fu;#FP z)y@}w7mC#H`=hk)(ykFhbJL7nrvG`Z{QJRX5pfiX-d*%J|Kx4Bl`Nt;b2M?BT*&72z-h6bbE z1@4_oif@TgGDNpf7HKUu@%lM6WBPFatodh+`IumUWflEAs8yJS%sKH{L+`hBW0!z^ z{pNSB%p?9=S{UuZXe3-;dn2}>Xk&)wQy||c_;gume=&E2XyOV#6BZI$k%NWm{C;|( zL%^)GRDqGk1Y=CzYDg_tl!ssmhNq=J4{6`Wq3wi;e zuzcc~e7de>{DgR|EsWj?%$?SqDW_~oyi#bKJ{9!t>rA83m&*B8w3Jrs^yUL%f6Sn` zs*-3YfB#mQ?a5ELX3~rl2x@SNS}}y}o`#|}U{vz;EIMz9tPyxou-<>S zmsn*1zj7YN^WD*bui{GKKCnN$uDQJ}bJm)Ot*Mj6U#C;i{9HdRDy2R^m|29D*Mb{Nno*NO1MJ$zIZ;t6r(@X4N@9`x&N@CCJ5brg6~e-@Ef zF7GwP>bfLLw&?Jze}xXV=g{NT>L0}7a~ZmF5q4Of55Vkg{r~OZUS0Py(Yd$u(~=k! zx8er*y};~ZQ+RsQ0doeMHY_QyTVCSSBn|ih_2!oS#9IRRqge2Pi%plIWQLfNbl^NZ z*cg3RulAM&Bc`Yqj|9Ws2K=b1e+Cx^OAnqjN{)_Uet4bdTuxb*iL{(`nUv|9hXH)- zTS>@`qH-xHz?Yb$DITWqKCTVE_&hA=ka^=WKl`UT!dy}?37tA8&tUypcRN(W+7PnE z{9xG{K^^I@WbBmih3P2QQj!1*kMpFt_IGT;Om9S*KC}BeOT~GsXicgcTIXvqXJB8K zD0|NY#7WrkD)JyLuh7qL)%^oVZo&kv4&KS1z$OE(Qpk|3+?f59_0YC0;Z2o736)Nc9SoM0tzR*5y(sR~BUZB3|UI@ZWk zPN1ZBcXLMgF_*?VpU5%b7<{Z3+9UoTj{McZu3)ae+56y zf2{NLlo^i-fXU|;d(bqsgDTpNmSlxY zfx3Xo-PUZC=*!=%pk0)_c0(8^kd?T6C!imti*mR{*fKXdrPv2-1BWSoBhqD+j>h0A zaqSywf9AK+*VfR{F#ANIiw6sTCx5mU8A^|FmXFBF5!7fMH8bCpNZ%#csdQLA?NI1I z!`E#*MzWevvbRh)k=yV*HjbRswXJ8o&#GkQUW-egYm9#0d~D2+j1auJH)hKL#nrFg z&WXej-!cw=d|kKB#yoGml^|1oIDmUgkiot8f9}n__s8|>gZIlG84v`TI;J!T5=|bN z#)h+V)DlRNiffuP$;d*bDJ%7lkl1()gVcMc{$gbyI^$dE5M7kcakQb3*7OF%7>?n) zx=`{(+?llGfVk=wywDutuGS7RbmpVV8_-;1%ZRFSygj1_Q+*wlYZ1!}(=VP#2xRRCiBW!u^ zemqlSDn&dpW#u83LnNsZr!K$;ZdLETcK)W=twLed4?A^Y1H^cLZCpXK*@423EdQ`7 zZH+H-Pj57z^f5mf2~v$}t)EgJ9!xE~e}gqi29HQG&OSaVqbdM$fZ?DP8=i65d;sb* zZ|gB*Nt^qLP50>sUr5E6+4)N$JW$V+8SIda#2rd(7Qh>TG!{?yy{zp|CsEgVpR#n0 z$h0VrDF3l*W-5pqd|HCb7ESVp6&9H8$(8;QZYx8Q->pe{V%)=^KTpBz&$vT~c%&`p&t)!18>x@P~daM3@-U z1YJlDWTVeCmEMRp&>M{w^|##HU81tC%$#MOi1N9tBO-~w5~v+vv=Jwjn&j|ZnmTq^ z#*;6gwu;K>nkbROpkoWpQ%9Y=Hiap@Z~S{VOSlV--x5IWEvp62&=w}2e`=Lg4hml+ zR_LLPH_*GmRiNOj=z0|!%pUkX*BwM_Y7m+1migH=A-rL?%k5Lp=30KB=hv zlqp|n&d%+^FaUVrdC)1PDm*k%D&XYSYedCAfYJ?A`F2F*r$PC>`i0JCBh#}WiJ&I6 z5JjmN=Vq$bWsfyn*QMj-&p-letKA6WXzm>s&11 z#NrOJApTOF-8WxL`d4?Tl3;2Z@tnF`92<`@ss!QvDrB?*lW)3;&mBXXw?d==CpDWL zhUTk#vra0%=omJmg~pK8Y>F`O=8L~yt2urk;2j>vZ`CDv%ZZ;Tf45*Ij#CTF`hdox z0O2N+Mu4#xkv93`xX)29w69!^;gJ`Jqo2X}HE;f$&4x;b`gtw}MugBk7{^LA(Dvu| z8*CuKE5IA{B)2854T2M+sUk2|OKdJF#r^%;&Pcs+;QA(Y*fab^=q8U4tC$ zmS?>}jou@Nov4yuf7Z-B^cuC&1I|SNN|oa+A28wHb@ZB@tP*I5Q*AO_ao*D!ti)gM z#Q7Cp#q#=>5w;edpCWg#jvX_(?XaMkpjF=%iZlLpu5^YTOhyG@*UtC57va@k>ybs+ ztwX?lXSG2reI=@%+VMiqiSDn0qIfpA9vC6~fd^iJ0rp!&9_NX%$IISk% zSzXAu)t`EgjYth$a6e*Fp_hJcWpnowe1h=d!;^Q$Pp0(13zrh2Ms z_#@By>!wNNYJfO55bgb%v2E3vPM{_remLZITUe?MmN5_n^FEV)9vDuAHQ4Vj9px2< zeWcQOe}FWmJfmBmeE2j`fzjP?5itsf>nCLN$VVRGEht3WZ7lapkLFia;U%i>6(=-S zUlip+Ab&*Qkd{+eW~ZVW=z=M6%TfgSjF(AnTs%QA5^3AOZPT9Z2d6946^Pv zEXlBH3CezVXTZ9>V24BqI#azUkk@p5rR3Y16c(qv%|&Okd7N@Z&5 z14|=U$^yug!IN!>_y{XYAc@vSw8|3R1V>vt@uDwJqvqjJuN_)pEKsn9ETDaVQe}Pf z+JkAQF=w$$te^?+=OALZlg`8^Ig7Kue?wf7 zJMfxN58SqHD4F(>NW$de&0_86%2j2+ap9sM;=*pn9&n3di9zUko4xZ-1*O#pVz zf9#fI>Vm}IX_3=YXSUJig&;8rE7Lp|iqNEyJ zAcY@l+V7Z5j6HAQZO7ljf?%*#WAzCk6!OXB_i+rGML%%M#)vzu;RJK><6tqpXhy>Qbid2UKe^1eWmqQ9_ zuPw|wC=lY!qzZEyRpdbcJ3z$0gc#D7Pm!%m=6%L~6~alVJy?GO-YDt-ts!?Y$O8*# z>)uwUPOgj$6`p_-pzyAA3c_esPO*VR#fzKI>ti>KI=HvAMdxihbL@0PNv$6jj-H(bvlvGcjW-!?Vk02RHm|pcz7&1r_B1b?-TX8J&&n8}hI}%E>-9#% zhs@dlrToc<#w(sNpb`@93@rs6T@0d9?P;6r-SAlS4MaK4!vPQ6LS*%3=IU>&s4Pi;Jyk8RDy@DqvJ+fKn{e#P96&3Ao}3?B$EZ z>|qqo7!lqW2L@G&G=1oO=khQt<1y(bNEi5>TgHGt0MnJJG7mV%UPzq|pd!Y#U~T&` zSvIb2|43Xxv7{~`4Jrwof_QzSF5bFeeJ}~sEJt1JIBf(^`l5#E%zrWCw;_sJNWxT| z*WZ1BUycG%USTf8CkFN-5yT*o?>D2T{QS;uN_gs82&|6t%lTCfA^8=Vc=Y zT%5`%U^7W1I%$FO`hU>**#K2$fRoBoH7tlZv#NoeLrA)az($M65{4YEa(Ak=RI7=n ziDfVI1Ka&e3$4b^N)3QdEhBld?wqu823$R4OhBdmUeS3oYFGoi;*en&iQQ>`F`1G1 zav|%zXg@<>j|IP>2mCNF6!-F)3wyFWa6;`%ilmvd*+z{97Jptu!OCf(P5YD=c@>FVCXy^FUL}EP*Yhx=p_dxD1%F$_-FZCQ%Q7;#TFB9SkT7&U z7`#H_c>L@}?Au|CL`U>97$wFdP8n^G3!9hv#FE z7-B}(7LMd}u%7%`j`{P^2Bmf{>ITXQQ{H1D`BXplgD7Us?G?DD2Udo+Vgz8jtgCShlmYboE z`DYSXO)g%!K!6PubW3Pzt^Lwl(hY#9&$L;Q-asqPJEGdnb=v?6DNnxW3vY*;ld9kN zE62W5w1}g!MK`x;R;p#-PNvX~saRq}oS_5ECmDih%T&!NuhL^3AtvUf2bZEu`fIBv zPk$(+U5e=y--;e$#LYC5;IL^VeEcV$aD!~dFR(hW9SRj+aDA0HjMAHI*X)%3tn|D! zjHvP~c;W~=PhF&{5vq0Q2?AgHxwS|}v1#4@7Ok{jpn03WQ-xN&>Ix7sB~jAlg@^Fu z6UCo=HwEf4$zKN2$GnL=qKI>&BVn|*cz?ppu@QWS^}dVQ%I)N*VM^QuO|j3fVW@;u zeHcp39u~oWBt9QjA@@*ic*2yy*OYqGHk)s_Z1rx>GWnkRD!JGm2r^GC_Xe9OY({iO zbs7aJwHfYhbcNTDJIsVS)%}&QHNmYBgpg_0E1OaqcLzwcYRIHf60&6Kn{WJYC4a>4 z;u8z^0CIJ4B8psD+%EG>YgUl>%&hWK@}4${m9*?+Aoa=?rK8o=yqhyV{p~#g%}b{O ziF+k~dLV-9=o+uY;=7m)f_&Ps*<1v>9hcJkj>m)TBsnwb5{2FmY(hx`3VEtda?FA> z#V};?RH?NBQzbAE%vv6^lR|~Y`G4G2KcC2VO$*mrv|7LlN~w#QWv>yD+z6YOxDpYj zeo~%LRAgALl=s0kvj!GRPp6ML7t%J>_V;ZP;FmtJ<3dW#ooluO+=!ZTMZQYc%e#i( za)a;!I^n7+W+4Z1Lw6A-_QK;F5JQD=ft>buXgh#^MdXdDYgkN17Jbj31%Jcx-J-ve zLx9_Fc|Qua*#abq58uG$(o9Hyx+*2Dx{o0FtCL~)N~3cC901kNoaEbJg$QAt#I!?X z_=pk>QQIem2ZlfU#kYIL67*^`ld<^-xhn8lX_n%Iam*f7uZtr%gN{?6dX91dez;04 zY!|kaiG>FC>c}FDcSK4>6@R@&2oDuxdPK8~x3LlzCJ;Y*Hr{gIAQ|_+c>67o6S*d| z%YJ{h2777h#qE^Paknh3acWH>J7=RcsD_InsPRzxH@ioG45R(qO+ENJaw@-v${YKX`wfJwejC@Ib9_yB|$FD+?lBs&q^lPxOHWMxjooS;<*mh-z@#C{! z0KrL(pfov3pPh)}dwkkq+@g*hG?|IaI$Q+4Fe*&N_(q@F zZ7wP_fi{lJkBbL=6A^I>OW2mvkMi?pP4qIWzip@U3{^iJhHL92q&;LY3KVPmpQEnNQ?0?cA+DRq>9>#_~WxO%%LVW^b z1mf%>#<^tteKx54*I>ED;a?|j#cDU0FiSRAmQf)`Xu`w^o%qr;ovBUUZZ0f9Ekdx( zkB!#O{NbU%cu-F|H}f%&4XSg$+@|1Wi&nTmBsOcg=fE#BvX>gL!G-A7jp?SK)ND2g zGlmSHEPvkD?e&?YD#M}vs5yJS*;Hg}-bu1mvEbVW{8HGbq33$+|(mwa8>1LmS^AQ#f%V$k6Jl}4*)TFYoacFRW_y~&FfKddMqy) z^Gm`FbS1ywQbrM)MACBK?!gA_IB@KZEc z@AlcU3fM!?6$)X!u`0?`(Rq!O>B-&HA@jyfX+zs5R`G#|S8&vuv+1H3GqS_Nq9KvA zaEeU;dA}6v^B~o{?NvxtHXqZ|-2LTH4u5`d4%_E`xv3G;Z_lQ7+u`)oW-3^0DQxRe zcmOQkIWW;pyVc|))*oW7)PXcPFG+5XMzDoCwyjgZ3VYjO;t&@v!v+E?eh$s?W#3nN zpCLEFK5|n`nF>L;qtC0ss$1L4DG^2*Eh0GWNy6B=os9KKTdx$ z$@zu6UsRX+Mgzhx_5&Kkr~o|c+<(mUuNxa4$W%hxeDYf36`B;xF8X|m za48?TZD7>d-L)esZZoT@%;Oa{E&ai)=_^F+08_wcftUaxCIbS0fcYTgMdItieI^^E z^!X2z1-A)c#b06^hC*5t|L}7$4TjxfDf}=jlT7&o2;NUkb&&WpeS^A8o`1l}u)U)~ zFNt|#48Qd1BG@+w?HCAi7Y4cNffh`X435}PfD1okY|kJLNU1PNdER7qa}kYIzzV=C zW;Mk}%#Tug@t2czgrpGhFxf3-ExY}p;{$g)bg{pwUuXgs+!a_nmy1t*N?|b8GK(Cj zk5Z29_m<&NR2KV0-~O8^K7Vv3LN+Cq!O-qmhVe6ft3B`lUlnz*@IhY+{cP6Is|Nn4 zTtzw|WMb}h27i5FRA%nJkkx`vCN5XrueT$*d)M6%5p5z%$-pEdkcpnU!y(xKa07yk zzb=%_Qg>6H7T!Ql+>}DVR$!T>>f8+tSdp$TuW!*+l#5F4M2}u#gMY|Ut$1onq=6(O zR_<)@(1Ij?eF|>DzpOa$ZisaO&JwMijVKp}mL!UHpE_jOfsI}Xbhd*RDW{%h_x!w< zg5j;8UFofiwAKoq6E}CQKjUk23Ilbjeh-nHcbjFW|| zj0$&pznHQqO)_RAqJOBtf+kX-rL-ZJ?U^MLZSM*Ku_DI!*!OXoMDp=wcmiDw2=L1B2i~C9i+_6p0phlt;x-UgKV~NpkEsVf93^uuaumEj-r6n} zY$d~&Y&%1+)BM_naXr2?#X}A8Q?hY{PAX8hn;-%DVHh8s;=)-?6pRV`2;_ggjrDao z8)Qy0MAY*@y?+cN8SiZ-a?phW!EQqs^HAV@S&Bb7^w=-hDaTl(+(x1$BV5*Cldtwj zXLZRO1xJNmjF_EpK4<=-!)^zWXyNjjtbozsQ|vic9-*DLvLrj!GaJaU5wJz~l{m-B z`UARck6$I|atZRnwLLS__fe?=lV9!3pTAu&?ptk+jeloRIRlY!JEuSz=rBv$G5!#1 zGQ>d=TRQoOXErePdu=4eoP4V8L*s8sssTO~@LECjw(}k}xFaUR&_X}(f%w6~^Lv4N zOhm|~A0kM(%~e*o5}JM$GjwBC910Ghpmx!kF*zPR3+h?Xj|5!VaDnj#@~>i_y}U?_ zFpeZ(bAJ{7iK=!{q_Wz5zlEb|I#WjdEZN3bYZeVKh20YHJrVd=?N)F z)8XNs0D|1I2f8^AEIq~M1pSt2)KRg!q!Z*bEi;4VfW&mD;O!037!gB@kPLZcOO}R& zsZyW;KqHjCuw%r<(_iN@zb#mxgRIQib)8&mXn&Tg)JCL`*KW9QpeAr|=9>k;RjrBL zXQIcp)0j>c7|0?f$;JkD(W2LitX?j*9M67S0B3_J{GeIAuf$#Ih(7=^(lGdkrBd$l zDmo0&=r9`O#&mM0I0$N^kl+PbbC3^`OYse=R)4U?MonU;Gh|&h>h3fH`@JF!6QT&kHkR ziU(~f-IjRh5~)YngzT`76EyoISH8$&c{G~tpm?b8TTf4nk`;u5d!v70 z`=#mwpR-byuLjxeMxO7HIw6;iqdX1rYk%$mVZRJgRA(*z>?9E2XhmX;h1H$uSa}*M zB9!vPTAKutopd(NEN(YYp1W8xC8Xl>zq6j@gS4yvO8L%X)iHMq#r&1D>7g(10oiVO zB7{^4wmd?6WFzQ4&!-Zy#ixSb@G+hZdaI*Z1soi`D$yUdzSLvaSZ%%_jfg8*9)C22 zoS^5HRE7$puQO`Ev+|c%rNch9q^v)!3;Km^@=UUOKa;I3oE$)r_=7D|$a2 z)@{Nw0&ceIg0$Rwj(y+3`O@4np?^kTzpkb#v&+|s@O=R#3-QF{vv6qI*1b7Y1OlYT zKQ>T`$rr@z9RPm$cT_`zWj#c&GVK4VlK3HHdDXrt{EScW^dhyOb~5&=Ky5EBR)4oy zdx4ftgVJjBS_WQsx)m<}h3GC3;HUVdCCgZ;bjJ7v2uKd;nTZuI^0{M`^M4TO5TUea zWs~x3{Ftp{Glqwo%Bvt0kLErzH|w1U{uEw|`{mK+})ciVEzaNT2;at>1WG*$9A$U#ctBH?JiFJfL%%45YS)bw4b*h<8G@8z+vygaA2%`=-BwZa%FCU+FLo zC|!klRxf|bXcxG+N+{6=UV^BXc(1aj-+@{dI8tA1X`U;5MO4|Gw|^#M>3K*(boN?z z3kyJhu72IsnA++st+E^blwTF=zh%`}MKVWimUf0*wZ+v-aoSA)fca$cp()USmrkqr za4{|LH7X?W!*z34$D>|OMlXEpG;G}ljl6_1`IL-+;OZBPsOO%e*{DRph&)^z``!Fc z7zm8MY*tEhiL;Hg&3_%O=a=F0D5OKGs^_zPi)mV*)p*Lu%Y3Q^7jH#=wEfQuFs?u8 zo1c?`Mp%2c1fFA)fhvwK7@+n*JPJuZbsC<|6YMbo9@cHh&TSw+92J`RjCt{RgB# zs#9qNKC1YLLJLXfW?L z>9)2!*u-PgEUbV&zt#JJ-*xkIgAm35>^RN4=QSJweGko<-G!rM>?Zd=$vche-PW6K zR}z+?kerkC?nqBx8PZ`yQtL%~Nuc1hw%lA9Y|P(qk$)8bOUpDYoxCum zA&NJ2D*?R?$h59Xbjy-NAFKJUUEQe&vb=c7Fk+FI7%fhb{Y3q4C<*QQzTch9U=Y7* zBJDb@0r!5Fb+^XDhL$QYlQ`C*;>m`aDWiOb|A>u=5tNRpkqE*SZtBUHg_b7l*0(FkKr-@-*Zx1Pgd1~YCCFLLpBm+Z zfR)-f{65yRYP!b>y<{zd&+JYw)`V2pCf}5A1Al>0_Op{D^baftXO+1sNC^FpfCnWx zW&RcFra)gCCM&gP0Dwj@b+U7J@7axQEFfG)Sd@q7F-U05@rd z^nXE|rBcPknj0VD`}FK?_j6E@o(Rp#u+J{cBKJk7nAxmv1 z3{}7F5+O1md>*eUUfOYrn|pkMP3|q?26071b0D&zO(4QQ%m?tZkl>%6XHT;it>)9Q zRgi`NdyKdHan~e*tc|AqyW|_)*ypTUzki*$+&s6f{u(IJvlOL zzXVM3aPy3+AACpgtwL+;$qmruS5s>(hq3X|)865jB46yBSR+fU0m*|)I!oVuUVlAJ z5EE!%bXr{9X1t!s=Ll@Zl$CfT(Za_rtRIy!YGqa9h^H8>u7(D{VSSFoV{psWLY z-z=SpP_ilANZyFG3BD`G&Q5SIyoZ^Cx_sEUN*>{-$c7g$)H=dGG>N~eAw>Q_yO z<8?&uP?Y4`_iByRI-aH{bpjC3<=27`9^JFb$Y$V+6p#B1;-p5p)R&^pY=2)@NO(dE z@re@7iR3^_msSiE=sFmC{UkENJ?L~@NWH_eeVzkriU{3=-008O^hrqh(BB%3vnjLb~ z#HfW8+C!5yyI$2`a)W0@Nq>ZO8eeF
=65KLdzYst^Zz>pDv6oyuxz>vMI%)t6N zni-Lk_03@w^<&0q+KlOwHbID!A}z_Mqreq5!yy%@ zZs@HRNozlUx6D{6$?31KdP+jo#@YG`)SHQ7m868-G^L_s*QzN7~MA zq~J9lhb}$(=xMA9{=#$%7yRN$ZdGAO2WU2w5GB zB_ODscr>00EFbp~i}d%TDrNju_G;4Kw|F4ePew`a&K?qf9N+`>@N*1lqOB~r4A|Hu zw1eZzGy`cM4{${gS${g*di@6*hZkgqP8{~0SdXGz+ZgJfLY%GZl%YtI=JF^^;7dv* zbJJiAVl*VYUI`N*T4+WUGVIo1v^Wsj4@<+GKhgW5MX|zsC)$|IIk3?!tUGXvxw0Is zL&)Us_iKd_0IEQTpUVEldcQvdN8_(l8!|yune#EFy&1~99)GS6qXQlS`)qju!kVWH zLEZ8t3*^XJCsi+j(HIA;&paP{7F;>)4g(!B6AwK4y;8 z9U*_}z>(4e`MCFxOiAmUJ(Zv$BFB!(bliZ<1-7S_Z7KpM%fPKIwbv(;PNUHZa!sn@ zq2J}P_627TB7ZAHYSIau_@yw&z1SwQKdfD2nykgs8lJKYiA?&@c*n+ib=GDUsthWN zZ(d~s%gqNw+f~3eH9fgixh`unCxS3tF@G;75WM%Ty_DG%>uR$P-+hEF z^1HU~3?b~oe`Dv;tGIH}k5BFCCuK>e6lFbfl*yv_DIug93XOW!*1=h&-ERuQq^?m& zS)fFFV{nWhB%BA51o}RY79%`$i=Z=lJW+N76!V2v2OzK(F$k?l;;=2lfCi(wU9UVJ zME4~QvVZ2;hqrv)Jk{_D+)9^YGx5N1lEdHJk*`T?JKm;!UBhe2Fvtu`0c`FtbQNNX zP%)dAEB9SQ8RK-ulM9SP#Yrz52PHEab1^~Q7IV)Cj>@@gOtU`kE)dtb4b0dIqtWOG zR-|Av438Dm4o0}cE^ETUDkDdnW8;jVL~k@wPQB#3b}&8}xGg`f909?Fgo?bni5pc8Y&?7EH6}5&V2(FUVql2b;Sm`Fyt7xN`d;0dj!tzF zp8a7_FJ;1*l83%~B}p-stIB=GY@gN=y??i`xF;@(FgLAb11otNwM@u!T>7#MI^jIekf z1+}A3$KkPU`2NJOkjW(3#ts;BgMY6flKTsF)A_;td}}NOQu6#7_);>U@`k0)G#rI1 z3N^D$+JL5qHs)aINtlL27w$VbAxPKQ*4gnyJ8Z!8T? zfVI|*O=V3lnk3Cz-H+84pd%m@i43ESAPuDX6|(wJApcU%p@_$K?XBn7>a#xk7Qb>eB!oH+e>+) z&Fnx(c74gDJh8;X;d-~6kbkxqO?4*gw)nf7(f>6t|1m`CQG0$`KraViHyi~u$a_|UJF7(rNKDUn5tS;hYs zAD(=VxEyJr#^WgWgYQZ<=zYG}@5bA;F5B1k_|8J#YdNFz7f9vyyMG;mWd=}Wl)Wa# zX%T?lavs@riN~U##COO<2$Txx#KIue%kOgS?kT0-xvsOaI7cQkB0v@;h)>qWAm@^1 z*FE)fRAmYsL|>14Np5}1y{GV}HJP>6w+-RsS}=#Qd$05i1x$9CKM%aAyOOn>dp#K@ zTHk#|;}T;UU9INw+J76T4a@rF){+*Ynd#nhhJ4FSx`A ztBf|;z0ruTl1I;6hp*uC&nY@_9Re!Sl=4rP3$~L&vrkU;<++wu&Q({R(_vN&Qnon7 z)cLKkEB8U)J=)gvskjofG4n?wp2QICdlEBFvE`rAD9YxNuYVLSuOZ){sKb!wTn%4o zQW_;^(Ig@zJDx2Vpv1fCZTHtmt?9;B&Q;kemt`I;3|7yZS52U@^Jk*U0`5x4@kJ(? zeQ*dKJH*ruX4%Bxte5}Jq-f(mGB%x{0Jb*UTP`ccWdxoBtz+w)BfZb^q7=N{~UO;m6x^n8SkE5+QfCwq>gS(@OYCaITNwBx1?3Nv6C09?~OVTFD zu9LZbU{`=Ao4^Xvq;hdUbh9onC*l5|fBjp%>{IspkAJ_Hx<>x}U;laT3YX>j&G7Jm-qb4eUvc_Fl zO^?i95;F`VdjZ2R9N#bu1N-Eozh8o;(4S^q{{vF}RR&XaEa^^#RQ?!-Mbu79A6lOhkE{xt)KVI)#x7~Ub)aX|9ll1AddFzjppY4XWm zjl{3YGk;OLq>;cdvhxp!eE)$uf_pc1Cc)63QN{c>D(TM@9{BBp@~KCTm@UJwxAD*C zmp_BFq8?~;g4;^|1Ny%iQ~u9L_H4*FpTuA4FihS438g=1p%4+WHqtvsO^9Jw@NcBd z_t$l3pOL9nnSIlL<`4eIzW(o(R*tSl6qUIZk$)352%Qn|Mb*OZd{x0JEF~BGpE701 zGp1WM{^We@_Y!R%`1&E{bT+R&)x&Fif6u$n6yXkioq<1^Z;0d=_&vLz}oz#Mg(LL@U(}CdRA;Z7^LFSq?+eL9cQCu;ik2guC(C9$!4lMK2)-N zLH7|?D2=&B?1mXc5UjnJ3otp_dBt-(4*&LijZhTG!LFUMsJ3bRHQLlte9HQOgI7ez zAM4dexD%4Zq!Hz2#&JL`-N-3HPZcu5Y=1ufdYL58e=DO~;{827Q!@rrbT5j$hQnV` z%rze7v8ph%4CnExdl#rsW;_X3IN6|h9_c2<(%j9qxnZM}0bPg7&NKmTi_C-=`c~r4 z4vbd6lXRcP!g$?D1k5@Ujxp&fh#*7qXB2Qw%p|14C_3k%8`L&JVkb}Dz>82Qw0}+n zu*ZDEoURv?Xfs?!%XmMy`T_z#qtB#C%G(>3gUR37{}HCtz=JUG2Bz^zkgaUVlzlbMXqN`I#1$ zOwDUP@DZgTu8Qbe9dPtpyACn}h;awaUPur)U`rB^5lwY!uGA9%@ne!m@&ooOpoa*H zKEY?R&K(=}#2B^%BVI-78o=C4U7_IjCriO0QIPnj>g5afJVSYD@8jj7wpx%#^i#6<}LG_>Od~ z+(2meF+9I!2i~6>;nTiAhJc|ySTGs}PLP5BOHAAu#neVfX`Au5h?1>iy(m{^W}s7l zofW&$1711I8%u`+rK&5JjxnkSVI{Bc!`+&qXpy0|BJEJ;=YOZSVO?V72^d-WzSEEw znsrE{=Wm1<(6wt+bwUkkPlr=(Z5SdgMFJ}?k>o@!OR48+(rR$&9D}&bZ|7S3&OpW)}Z_mIRvM7u>eOWC79Y*i6ea;FKL_7rCksHOUb|VAqkF3oWWoY%!)04AT*JyYZwz*K;FyQ&stk-4QpUk0h75BTL zR$cY2ULU$lXMJxsG0p(x#1@lnOvtSVs6?F(@ke&tJs}!?#k`4KWsh!vrR1#A(c9bRHw4-2CymDyWl?DZRaOss^m<>p1s zciSXw<~9q=HK_zPnEX6jIfEgXQVY}vA$tMiU8tckbV}d1a#&G@mlALMO7tXAxM300 z!T@h_v~&c_02jy7MQ@NB%z1@A#sVO7!GF}3H-?gBYHbf`_}}LmwdB!Rv3SS;s+YAA z%R^y=E!XN4@ISuePg+JZz8*a6{2|GRui={OJW&-{gxK>5g)h3yUdMpzs#Tq@Jzl1M zk|XZa;`Zs6NY@LmB-$rluKRj1gkdO3u+`$YpJ|E?n21qmVVMvV%ZLP4O`=#zw}0uG znDD~+Ck699lSo{2TJu>J(ZNM;2j)<=^iZUc{GVH_n%;2`FlrJ6* zGH#-c^+_~q|4|WP*g0)^3A7|O=|3RkR8pqeP#GB++(kaUL!6JR(NxR<(h=74RuZk| zZ#X6c42`E!>4H>KXw?M4>Y&7YB!3G44Jl&edZ!ufx->c@+Tr8P%n& z;T;DEH@Yz!2Mr_BwAHt(-n)um(-*jnY3<%mj+J)zd@744)(y6L&C~u1db+-oiu`!! z1)CIgy^SOAwKCOS0^G9QD&$e&zK+oqiO3% z5dWAgJGDnb;NcTSK)kfN6@Sl&J3OZyj|8I7;~>%l4-m~XK6|BAq@Gd97y?O&kd@Jk z$pAUl_f_0TBL>?K6=`9aM>P8R6GEpiIC)YJPe!jnBX^=DDkJRKb6X3pYINBHJA5M)Oo$F4B*b?b7e-GFp-BD62cNPi^MT%<^rax zW08!zDHY$#{W*dyx_YpY+~PHZI|kV- z9=Gs|%Qc;!J*cIZtU`js`$F(%|XkWdR655J16k=Sg|?%+AO5 zTP4*)ww2wnfPlSnOv{LWJIx$$Mzw5vU6_K8Qz8zezVnq+nCa7fudt7ri6@m^eQ4@p z_6JX0-X7K<8-Hyg&oBSZZmQKi9(SrU10ub@PzG~yw@dQDp*I*u{GzAZR-P7l_p|I) z#?X2rqGzox&QW~K&bdQ-)+WK2l`8kwHK?yml}^IH7s|4cKxnaP-%R!yn~|r2Sm@jL z0W)89-arKnhw&uzS6Wex+loSqW5~(sbj453l+}5j(|;GQv1J3Xitnf1dt~G$N}<&+ zc`(n2(0XlXnt!`+bDz$!UJ@dKm55<50gIbLJWQXhYao{b*JpOuFq*?X2LzUIyjZiI zlQiV95Owb?N~vRLju%)H(HAqhLx@*q@97MKU?akG3uc%FKm+y`?UCvBk^9l}IL~Z0 ziV4gQtAAQDL!pDFtYXl#2McbB3uU`5_jy^SqX)rNhbI3OVVNE@=BBpc-fF62=Kn#t|!3tiZ7L9}v&%71?437tk8GFw5Sn-JGMN{;JaJbODV ztdn?bT+&Z6+!<@*n;LRHY$0`5fVn`zm6aSc1d+usf&p<75QWB&^Ur#Raz4)yxA9YS zQIi95k+~Fp{+GZtN3;L23Gfcso9u(=&m$xp*}J2!Q9uZYpOBPRSCJoX6T%3B7Ym}K z34iXiD;d+*BQB~{Ij(JZ7oQWD%oJo@(l43|qCCjwfLaThXo||U9|~0Qq^x@*qh3U* zPd^~eZ_bkI+)NBx7{#T# zz5MWArG9M1#vTVFJrj>#(>U-wIPs2UN0 z&0HZNEJnB4CSoUEFB9wV!<<9O0D7!J6<2~7uEw9H;)ihFK=Jp+o*{w9c|y`?{Y!Hq z?@p#(G7y}mlQw}pGSIlgl!tPFm{t*VtTr85IB{8h#98_jQi>%cQwAB5RhhWvC4Xna zP^t3+kQ?U_!5Q9I8cRi|ztV)ukwNWGVJi*fmtF6YNN!Vf5!n%B(HF>JADM8?uQ%Zf zAh1C!Ro|jLni5z24|F!^6<%!<6c-g4nK-Op2Jr<}QvE01m~utCog`2%Zo0~8)$prp z5CGh9fpOwv8(hQIqnxD0x}K9Mf`7Q1ho5C*$1Kk01P+=3Al^l79HO4cB@_xsE%PL8 z(?kvXN!B@HWJnlb@7zA33wbpp#r<#C0+!$-B>As#+_Hl0Ba-fCwSjAFfWG(8iCqWw z8{YjI3Et1+O=n;$5M<+u3Sr4h+9Lc?2vlX^KPGhBnC& zh?en2X+**3s^b0fKSoU>HvO|)4~DTztGtXLvnpoAc(3GYf|rB+v1K1=&DWieuDBKI zbv~j|(_nD@eP)1oX(s{B&VO9($IfS=1`E#$0QiI`s@K~{N}f=UD~FiBS$=@o&CCXn-PyzB3+Y|VgUBZ!ur?fu&o8^g1OE09#YMUx zaK|M3jMn{vXryy=?xEh@m2LexxiMhR?&+bahRQVDcLn8004&wnNev)pq~cc5q~$-l z@`Y`~yI=Y_ZT!nHjelw9h$A+14!ex+vjvFW)0OgB@95j-+8FD?_r!rH1S>>)@I@&! zCD5CQa?BGZh|C+1X2U5_tnLYna4msXxb(5@anzpRObjL;{{oWUQixs-IEoJvD%2B= zW~1HzSBdCm_p%O3k^K=>i2vcVFMD-uWcs$nG<{3Z!OK4_!G9RB{c49}LWw<}hvk!( zeXR|wBD(pgTNmalQt372w%S45OZnp>msFMOTFTXWrbYg zuQYhY^~GWWzEaVg7W!sP{c)9{WY>~1OW?+VKvY3bNFU(!kn)YuEquNvg7IW1$8BI5 z6#L&#n18!v-I7rOWVwgby^%N3@O>M~5StUjce48^ZOde`eNe;CvGSDkHRj17t>=KV zT`aVA8-cEWZ-&80vHRKZDA5gqM|G|5cuJZ|iTQvBATaScTI`;#H8Imq(Rwj-Gv~VY z*i+)BW+LCzF95vWQbr~B>3?X?Hv6xxnpLL(4}ZcCy5^X)Bp0Vf6lhi$WmcZC1m~JG z>-59Q*%?|Zya(4RR1!d?QXbDaA5iJzODMsMdy+P^1Ny_n=^J(tQU%n89{Yr8`iS?9 z(H+!s<6--ooJK3%R)qRsC^^t`-M*iRo#KJ{=GQt+BhP;XnH?3iB`Nzpy3;%LML3*W zgnzCrAY%0Y-eyo%$Up`2XMBW#$JLW|VKfrpbG)KV=O$d)ZCJ9a{WBHrO}i_?ng}x4 zSjgsevaB!`;tckSJAEyH7rWe;n0sIuMkMi6oc9fzm^Km+5hpKgg%-=X7aE-_PU%iy z2Y&BBstopksY=(sn5SLdRYWzS)y$yU;+h3Ewj3$Hl<%THD*}BFRZvS5e&)Ir z?_JO{e-^=2M4%12LA`!e72GMX0rViM1w|ublX0z!;O>YeXRF{pG`*?Sdz({2r+*n8 z&g)9QU?yAR=?e$hFMcP2<;e^Gc?BgA6vs?G5kji5oMB?1kF5_&wiGTt#Fg&xsUvL6 zrve|^5ar+yyf|3OOPQGmt`|99%i>W3RboDW^J9mU-NgI>?NVl3>zdd=Oo zgh1HL@;C=^tfD}ByfC+xw}q=m7cCF{o%*xRDMIN~(%dA*Fk`cSX;Ra1q6LV%_=2b) zshNfKn#M%>v(z6&>x)bpeQP&FjmbflPoMzUSLp3@afo5__N*Bp>Tl%8hJV6<>j%1| z9f2iGbxex!wCc=@HgNn-E&~z}quwRznBhJiZPL*f5leOOK5SZ7-?CiyTksa64Yl^Y z4a))A9q*ed6xrZ!_T4pRs&DgyWJ*`CTYA12c2UOMc61Yx^4`MBev(!6!%$;ynlS#? z2!SOTHN*7#g(pN6o;yJ$^naXW+7$}@(5~9#XJv)Dn5iUu$FBg^A6#yfS1EDBNAZCd z_vrA=IDfP))uM4;3Z(;yqt9sT*L>n(UZ!fVL91^fttDIuQ&EOYUTj8@3dF=EIm^=9i5|Yd>GgwpBPLhVp#nOn9vPF`i)Lz zAa$&pO2VgH?=9|re=;;X_T#0M@!3VbC+i2O>w^C|qw+xTK(%{`zd8kK1bBKzHa)zr2svL*3}wqiIU+OwuXtCe`j( zbNjWn9GLhqoOYb?!_Rc!U*jz;+l*C=IaXW|3a<@I%4W>c&_CNvx+pr1;=?= zuBauPDK~Rs%6xms-?>#U5#J0z?8%OqeVQ&=kL4GZkV;w!oQXCBwiA(p1cy?@ZxFNw z5O)pxjP>TOyAf1)@E(Q|Y={>?Vd-&suVS$6dX+3_bblq{GKc?5q)4hU@ABj?*^eaK zkbP7`jhI)PR;9Atb4QtY%%S}DZ0q45_^a+lk+|sub>YJETY?>&{c`%$9a@e{szF)uPZX0V`K>a{~pIcyS^BB#k zkW|t-XMfziR33i*(LSWm3SF#8u=oVSdzms=_LLZUE4OzQ4@0HR++0twYSgp{zDr~0 zB~Sm=qe0bm<;;%c7&Ud^K$+__ZF6BpaBB|Id~E^~D23~U4yTCrK1#$vVnkSNB`F@O zk%eXkt~@P2cw;BnIfY#wF+^`i=MWh`yj5dCK7SRXl~QwyAM(n_KX{1hpB~@%x&UyE zwMU=*-YB7fK>!@{@WIzpT4j1hSb#LR;6cVFs+GAb1n=*yBcwO!dDEsW6s0DL`js*9 z#Ue^wQ=ZNgX(riud7TEK6LCc@nFPRh5inBOW_*|wNoBweY6S|PVevX6k^um&NlIY7q06J8eMFG8iOjQ1%G#!4#@ z@?KF4DBmZ`%Hmp5Teg|=o(K*dRl`-4;Xr4+Cf@?Rrd-sE&f(bni_<*S`f z@1S z-?&B35cMbk*RQ@Zm8<&NX?jz>V~F8hhIlyhHNWY2->QTMlW$^;@ntx0|UtxBQ>(D{vCH7X}AGom^l@*i@%~DNq0gq(DgYDu7>E zS>qQ{26WZ#jMggk*t~y-@km3AnW=~&J6oyE0~3mUgfrP{oR&|;0Q3wQN-|M$ zx69B6R36-u)Cqst7X8=@sF^|>Ewz9PzV-b0PzsE-i8(}nMB*XNl-e-thf|F$?XsmG zq7CC3biw)Ay54VVHf}01myeOD-s;;G;U7-2PCMX%djYX zs4*j|b{D(@hqckgGy&A{r5;(b1%=1-@*7}??h!)|*zTKi)k+N+)rTm#cW#dnPHHEF zd&DooHUw-V9XjEfD%XMntC9)?#X|viD)Y*lFM?}-TtZsJX1;eU7AM6Ok>%~kpB&}d zr9a`Wtp1H@%nz+KxIl{#kl}O`VSkBF;A`g(4+C_AYelg&ca(!&l|3jm6ddF_4F_|e7unJAby>~4F8X1O? zAscRga!KM*knq0i|vH{~<=1{--FQn*neXxIjh6!~F=0nE>lgcaNFD;F0IfJjTdBIx! zbW?NX~a9PsaoJqX63=v^m7x-zLTJvE#n8E>_wI7s6OWb9`$15?F-Y!bVRI(k1Hl&a9l ztB9}N^yz*bifVeY+M=4^FWUz7n1qIZd43hF(6|IfV%Y0+>q2qeXYTM- zS#}Kopfs+Mca0-AU+P|$HKlCQfGv;NT$z2yOU41#T0;pd&{5$2u7F zE9+|+n_cxygo)l0ox)kU(5&7l8V7F)^;E3A01B!5PM^?kY+Sx3+-6u%aXavv&pb+h9ODGCM<6eyK^Us1SO^vqDRL03hx_PgQ+YI5w*j zodNMa6$uWf6A0pN4Y?eOZ+*H2pmX8sBb2_(TQPsVvarP+uhC#1SHq_3o3##*Dyg-_ zJbzcA=kaZpPN7opP0T}D2SBu$aCVpQrHQV>IfD?&5(!$Trz&;x)=Ia%uf#Yw1QyuF zrm4d<=IKa$EmIe3sm_h5mKkfzw@7-zK<+==qv)^aAKx05J(xoQyCIYnhlPHJ1N-uK zKNCSRxaVvMvd5mT>$o@RhjR*RHrDenDSxof#YVV`b1xJwL>7$xJC!#I6&9l}+zf*a zu#@lCF?$Uc1fR?iX(}H**t@Y11`D9S#PgF)iaH2mLTr3WMoKq5`G9wce@ZR9drWD- zjR%*C)q|(TsO!tGFbA8R0m3uzxo- z5D)ESlgguUcD!3#Kx{P7m#a1BbMBI?hP{cvDXKTPEkg!SW{d)Eu zVqBs>N*8IYBV7iibscYXYG8ZAZGS4~ljJB9Tl$+L0LQj^+9?%?&+L}&;Or^0T8HkW z+i2D@_fq1>(e9Y*HqLM@xKT2R&9gl!dzb%2M&%vT|Qy(|tzGTwx7kn@uKyTRwwieW*)(!B?g+>X(i-XiRl^hm|T+ zAFEG+R_{5Ve7qvnO%@*z6zWsGu~KN(KD;^jZdlU6b@c_}1^;z=(I(G&0$Yw^ugJ~= zdc(vgnxxvPV&hox#SfDo|J%2&iUc780nfM4jRbiEfBU28ERrmSff#zAEW)%5GyHI} z3o~<0KQ(u7*=@D-w7QEu>^NpAB~60cEpOT}ui9!n-11#0*Kq#F3kB0>U}2hh^mx_N z9-8*fnbN}e?(^R_v$9qV7^qYQ4lC)|BW^O?3m`3IsI)Ca6}&5Ua~TcmR3-Ibv3krD zQNX>qe-km5z@mQYB9NcJFC>)8{S(t*#;cmE#A6hJia-M9I#lK-lLqV=`ld09F$pTS z?zo)`(D^OyL@A;Bwdmj`d#{Y)RIGOzI?#oXXI&!aP!I{_64es4-$C*?o%+Ff=PO-I zO=1yrd~7*sAS(I6f4_%=;9HqH%tZAyyd4gB27B6m!;?N- zF@l`k)L#M%Dr`vXR9AlL*5p|?*9@NR6<^2GRJZLty~ZaqtDY@u2Ha;H^Mm8uwS2}S zD(EcqjXZo)$2;8xSlK>VmX-ZBk317uQ0gWNA*5|al(OQrvN!N+`rTYt<@ZzhacUvT ze?#--{ZRQ9@1#CK1-Ou^Kc)43m+S2Mbg>Cf2Bqqg)ySAw%Pu{w9dPtn^b|ohmexE> zX-KIvv-?q;xMImE~Ybf9MC?4U25TPlFDfTYOG!U2cg9ChN5KHwJdZ zVKJK|z%(KDCWYmSI0^-T9hh`5?X9z zS*A>uL{A9gKQKp6qu^^4k7=*E_0~61cL15M4EV0u;>PW8XHu_gj#Dy|RXKaxukPp@ z@fd!bVi(P^0|3Lz$JX!Ld>R_ZY!VRgNK691)OcRhL|-~hykZ{+Jgil_uz*_HFC}Gu zjL`pygOl4ocNkckq|mY^FBC8Re?(d{B~Xs?tiPBRPGLrHnZ;uk>^E(&50X478o+Td z>Zh!<;jF>ppU5pORq45hQoyqQBo27Xs-o~PRDMvAh?T@FfsIohXQsbPxi(C^+>8n# zVhs}5b{!<>f2u=W?Mi(YPGPwt~G7w1D~Z&TnW7FEqnnhCFc zSFKpy@*9m3lDGNWq$?cC>1jOnn<*YeYqNe(G*Y!I`|Fs{8#tH z>F+!-1pBNKsgsG1>%%^6Et&OiVhPU_d)Y=VQ8Drn6UUL}kai5O5M;qPi98|!60Eio z?pYRy(heHCeCS1TDcj5;rJ0bz(bEV!wfFN}-kkJ|$0|L?+SKtX4Vr~~2`8u|a?f6Vmm9k!sw)A_ z%0aHOr@tvvYe(wuxCAyH3fKu8-C_8qqI1DoE*8=6mrE1#9sSkE$uifX!o7T*t=7r( zEw<}2&4Z5XPz{sz*=D@Dh9hZ4J^%EzNAxqi1YSv-u@|6Xf4?vD5|4CwIIiNLE5EUO z2%sxcb2aH=m2p}_?n}*4Mr8>@3cg^~YJczWpy)}{$5E>8pz+=)j<|8MJ6bB%nN1)! zD>@P5*0sL);fuoAsfVl)3twmCKn`I_xhgra+YAdHYvx zXkmgLpTrFle_+WQ)M0_TNXg^(Lo#*gCob*9nOU>P+Hb2bZVo{Q@I-vu1oc2C&OfCR zmaQL3T+*gl^i@OcmW5wa?3ZCs!Kc8L{AQ`@x}Izf?twXnl_l*OPdp%wiLu6{#gaJn zI+oV~G((pYE}9VJzh|*N-&LVv;%ANEETVS{5MO2ge*;G`6Y}#Mr?_{o;v?NYN5CgE zU6UkQusI@*q0w+k!;D0FEZg4?Od1;<%lU&{pi=DY8NnNwKcWjo%iwH4vI?~f;KB)X ze+9V<^HX8r6A%6=r{)AcL z$|YDnIm$qE+P2H~zzs_xgTGkW0J;hgp=Z?Wf00Jd-mehOhi1G4cJyv&RUamgr+25$ zjqozM#vp`_3bRT$5*)*>TUI!?-Fp+yw(HY;C;34C@6t6+!ol7A3Q5&zw=1;caNeeo z%xfRoR*8>8uW326Ab`p7(r$9aJHQ8u{rmWedT}9JADInE$6qVGfHXF%2@o4Jzp4qw zf5wD`XgL?%l}AS bccad#H8bsmtov{UBzvNgs(T|5%ku2Yo03HHi^4XS(UOiL5B zOP6wl)G>IvheQ%vc+x6=sL~onPgEV|1l&N7DQOPWG$hrNNc~g|j(l=Ffi2rRq9yET8Nws+)PH7ENL;{Y<7!fB zeWN~v8|4A72q58FVB-o8yhKNqp8lG(V>~!}rgN0__)w`)3Yx*I;|B|zD3N?8ly;`N zU)+Z~uQky0u89TsKj`eHj%1Hju+!bCCG=sYk1@K?cVEWoydR~?L4&R`B1F14e=WC5 zhI`U}*Ji+UY3eBoFp}KZblMBwC|ImEcc&kqcl^Hz=|okaZb(FO6mGQx1H0hf;hXQZ zhGQn}{Lu}YQS2M4PvX=Ee4~ z?Kgd>LccnpFj@%ekC1Z~PsO9$g+%}<>kMKJ>T)u#pj-Y~S0X;*9%mV(jv@YXBir7* zD)`uDmC_d$@zuNe_WcG0?6oS8Hsu{a59WY#TNQ%xvjl1it>xW@;qn(8f2-Z&8$pLb z=%d`7*&2Cutgk9q?$anO&J6bx&r5D+L~Y5)c@{0aUJyb$x|9N_(8&<-gC@JwDx{re zV?f)ftcEt=xSa*+Y{taEl|ZioGI0q% z2|zUtAi^mpvf7FT07us~e-p`A&RmNrBN!St){e)EkD3SXBtg?$a%F`*q}@80isp!x zsM3z$e%Nfu9Iojj-*P2Lu9Xk!d}wq#crDmshjkC>F>;BHMzRHi-+X!8og2iPM4a7k znd|tJr5FG)%04*+kR+~=i-^ndMxc6n{okQLb{G1_cp{|dXAAakG`bl*dY_%aCN7Qm-)31CKb8DS z)03HnM-?gyOC5m(uOlh7aYqA{Ei7#!cmDs>dlCX_oA?|%_-H~zuj{G;6j!mm*z8jz z#fTCZ-H4d!yN2%sWlJmTlf_`UZDVcjGnjk~L@D=!6UvAoe_v(*V!~s8h19q}yY?Tv zXS~~6@lratsuV&HY^!Sy4o*wWCBx*d;Tj1CM*r9m-7LMT57Jmwk~U1Eir~2ORfkzP zN9Si<1R2mx7K>k#HBBj*2yjFR!0)5=ua=%+K*cX%ljwph89tKtXgm073h&GVXDq$V z!CA~Vc=ffEf6Gvo#8xV9uc|bjj{wsml{AF!K%~2iN{p)< z&BP}6xAOPr)wfbL1=6NgZ%uhPyl66o~3~7-Hk1#nAqxb+yVSmCNqd0K=ED3<#O6|>7gctNDmeRYO8 zzH{6#zKHaDlyWI;w{8D@q|3`S*qEi-e9qV6^7|0Nm0*{mycHvXI(oi%3WhxqzD@E- zYPIr%F^!BDb4#Gn2UDL{6 z;{c4?x75lh1#hMz2lgX1dh_7Yo)D1<(<-zEw2DM&OdvKcE-G(TBC4ruUlTB0t0eI7BC zz8fQK>_sf0Z^3@Q>b9p)e7=dEQM$oi@;a;{;UfI!Hs2T2IKjQw>mMRz7Y?_9)oLcG z;!~i#NVD#akYhN8+_KycK;^T1pB~QDe@ndx<0f#G`{%BZ8xpR(z)iL~RQ1CtM=iqG z)iE8e-~w|aVe~7Sfv$J(HBm)oi5@p10NgB5^b`D7)=2%)7$%#4C?JgJC)VPf1Jz? z=@AKaGAj>r3pop*L)R0KZ0A880|h#PW5Y&~n{lX3kJl4KAq}G>QwG&g@j5pti{g$?X4`jrxf`uJ`9kc_1HfTc1Z zk1$}*ZQnJT0r|<&s^@}~Lq=r-f6<(Kekx+HcSzn5trd5wXSwb=SSGAb<#{wQsFSK)bCA*NW@bsyylozmT4e>0Uq?m%=qQc@qQ$e=0M&IW30f zia@d z>1l(6@vv|56RZ4#pQyxfd^zYz|L*-2x24HJf6b>oor`xxS($nh_!i4v z8h;4W!c18%l1d?sHM*MTzB(vq_z=AX2;EaSa%Sfau6z;2;PR$oW8;+ZlK)_u~Vmq6{S8~bTJ0+k2FtrXIWsHDbR*^7o6%B zwQQU7-*nT8e;dkn2tw3xaC~)5x*y_|9N-6Lnr3ZD(`-3b;R%-Dnz@~@#B7ebvI zh%WpsJO)x`tkirm80{F+7{|=SiY_8O8GS5GMpm zg`N^71WLh;yr8LKgqGs!M~JJTf2D=RRj+OrTwt|XRY$z&IssQ7rf|7Xap>gUcT)Up zt1zpLA8j$c&J-Y*Dih-5KY8z~(|Pvx%E zg1RqWfnErGUTp*;*iT8Y{X#-scY>!r5s)^`K=g(4)&P`~B`V`P zf31_i7VAwPc=A_cCo(G~cv}@^q04h9yGNtCOF>DivzU8wc%Sotp0;k)Ea|Vt_aEw| z%KKdm>ZNum4{G)JL3(Ch;1qc2b;e`ddk-jWRlG!N_5slmQ}bq;I!#O97?AVZGx zMJyd#ycDLuCuH@`QMgm7MKqEFL1;c_J~%OzI3}qH4p4;wrlF*s;8OLQF_We_s3DE9 zKNB~q;e~c`0C!EoE2!=sq@h87+C2L+>EUOSN|{lytEV%%8^2LvI@`YBLMp<&e@tQT zVcIm?yX}^{fb7LYigN5y=bzgtrbRL}gT?$H#+Jaj#TZW}sjIlv5h}slRPzI(VQnAb zy$tbtM4yLTSGZpiWTsEPMbfel{0WcTRc5T!n&;*}Cv2@J za8&K-pqGF;i5ZD!{4Fs&)18V_e|F_q!Yb2yj_$wkp7QDVo^H@1RJjhH;_ClM$ZqA# zphxi1$ve8UC}H4X-|A@^yu5b6Fj8=Rfm0#!>jTTC_39}z(fBbd{aef}cM$W{M-wM! z$^=__y+1Wh20lp`>Sh3j8LR?WE^?aYP(uYUs353=a11A-zMgLBDjCxEf2NxmAexGe zcmm`qYB%@66UYnuWt*l1^%skYTG^)mTYbhoUXr7StCjIS5#} z%QW8@Ip(pobHJ>aIXMWrk?3i4+@F2i=4>-H_o|9;ZNL9Jf_38{?tp|iY6PMQ^cQ=jv0em^aG@7{$olYk~3o38rWwm*q`MkLk-e%vF?!6 z4#e#Ve5VG;E_(ug9E*~GW2#^r*ryLDxXwM8ZO|o_Lxt{<7rc7kfAqFBa0efGf~zje zCCEw*NoOq4@hy^72#JggFII?-dWvA+kc!~CG^@}+FRp~et_&rMuM{+X+fliYHHTB{ z%Ckr$1JwC4KyYKyJ-~AFZMo=^jM)88TQ2$H+(Gmun|)7BJo zpOnNYoFo@ZAOyYk zExDMYa|o9p2>sV>FQXWDOsfVri^4{{R8x}^O7d;d+EcCLI*9}Qx8Br|lKH#9&<*>i zuWZoAdBi|qf0C{PtQN%FiELVNqB(v#hqA5;v4raoM}a0*Q}_u>4JY1g=YMa5Bp&1v=YDnr92`pA<)f6Ra(6o{Ppqky}SM2FGzAM=_@_01DZgT0j;CF5W?Z^t8s~h_TS8*+EM!RjHwE>WBT}la~=0g)S zR_zE9hN-uDM_ftugtMCyI>MT1U_$$NQsu(Bm;z2egz56nL(&7Rli^n}%Mb_!t zWhv$18U5+b4a8ek3_PGPt|r9P)1D#?B!QH*l-416qkUmr^Je0I74(3h%75+;$6DeA zeO;66; z_um4yT%HPcHrl0S9J_XM)kcnl6X~AMdq42g=9IrB@Ilch8EOiwb^=*Cse@Vc@?XOpVqE4s9#kxMh5bjkSy<8)DtVYEzO}Za6 z)Iu%y5gWD9G*pwopOx9>X#vc2f070ohL<-(&D9ewG>*w(s}M(-%;(V2k0^WI$1vf~ zi3sL-ZHNVE8W9LeVJpj|X%hN>O$z~4ZTukRI%ZD;`{50b5}LeyOQ}C53F5g+YjlEB zjs_*Q+jkb0LRbcwYDl+8K>jEe35}}vbH%gX0!Bx&iZ-q3&WT}_-_8R9f4)(jz_p?H zRJP+_l$;*UP*-itKchtB??GQv`yF@Dz$~zS`Dab9H{u(+8Xg&>3E6 zvXAn%Zxb0dkW z(9#}_&+GPkfBEj603wAo2D=W5m%L~!No6q?8et`M92jdwbJzz}<(5%{c({j-7${ug zc5BM_#eAFX+*yWBbG|v%Zx6v)?5$aLTVb^+ruy?>=Eb|)XBlY-ROfu9L*dDR!rlK$ zS-PH=4^>R75Yxbte{7Cz=veZDtm6XpqZY?V{6Ucs@84_XbsK07$|ZQPcMrk>U@J67 zCQ4SS;N~{fn`hLzpUwu4?IGVw3m!fA1J86Uk6bKLrr~gI+FB(t?*yt6M1j1z8+8OL zCVj=)6-%2zL2|U^#v0J|Tb{HrUp`EO`r4K@>PN5_X+f?|e{!eVZ6pztZ;pUY@-vK% z!*eEht!;B%H=2R--J2xLh4Bdo-AX=|gIbcO;%}76BuIC&3;JvuOOOsK^q|(GYMOEpaI$SL4QGBnr!pd+bSEVIkkY00i zLD!|V?QShce?A4ad7PQnzTmXYC7o4p9~93J2H1N1ig5E@U~t_!qk8^;GIMIz)-YUO z7jFd8Sa4t5=&5o^ZTw{$zEe8^FZ!AtBR|{)5{fEl@29Z~PP!6!O{TI=6RL1dyfW|! zE|hD)5KjC~JHeYH9a28PfzZ*3a@E%NYmAbQ67VhTf5X}{z=^>57x=2Zp* z4276=o82ZgTnQD(;3a6H_@OPoGRewL#p{NKOVTGePrtK|cb=Lq=6j zA83eeuCa?~QUHv3Y;`|;3=Ke14&WEe{YKvmRY$*PKZ3&zuV%-)OD%c5+;FR`T z5P*!ze;Om1jmIgoZlji(*ReN6GJ#mHsgpg@C3k#WCdc$m&c8_mN$W8hNHu>LEOy8j z@X$xH2EI&UM06HB`VLS;tWBaT&k-#MW=Yq&(}XS^DMSdiw}TfHQ(RjNL2R=4u(r5O z;~A$8YX^~-QlQ6`Z=_)BPR+7UK64;aMg~{je}Gbk6`opkVorqc^VgZrm4AKTNX#)~ zE;b2w?)?{j48RJWd|KGDk=;$LWg1NGq`d@L`f;g<3nf`%7X ze+{5ek)IXjCoV7$q+k152FaH8YI}s)5l=Q&$u9 z-b2M@4}@k-$rfIzUq+#mcy~`bM0)vXM8?Xr9Q!$$g*XKe|}(fY(gaPq@u@0AZw2~X#-25Q_|BQOBn#t zmnRW$h-D#f)v8SP^v{wWyK;nh=9<%N9ZK9qSkeO;xeH-QmU?-OpDqa!-|-)sw;O81 z6j#Hqa*X&3THVI>Z(mmo+A}e(miH$R0LpWKWrcc7F?uuXlVVcJS>wvEe8aRcf9dG# z51u*d`|#VoBT@U%Tn8Gf>PZ`?UZLp|!6g*sS}gEkNN@YwTlOTxjK98Oco@wc8QUMg zD1{iZd-`}8lq~)!n7~` z>16s;X)CDtE#lhg%2J|;QB)wXLGdVt+C8q7Z8HJvS;oV{W_>_~{|Mm*&3=P~4~qLb zTziz{_2Eji?h=#zRZcGw5@=H7O}F1jN$%(o`P6asgbaK}_Koy=nE5bqf6ZNH;kq%E zDa`Lwdq;lhlIZqHR%!ZyfVd1*7;?f565u_fs6pwOqnLm}>HAD^l0Z8+iSBN`=P;A#1kK(VtF0G0934-WxKt0Okw}`KCxub3lzV5V}nr3Giyoo@44i*I(@g z$K6J|wOBc;zOJP7R)uv_TIj*H7hE6~%EK)JnQ9_&vGYynz2~32kKob^EzPFGaoTM- zdsFxJ^F9vaeF;_ltmznFOb=CqLV(!$OE$zq)e7I-eCSn$Oux%Ze@?=O@#sn`Hn~_L z>E`hbP=gDKFkyP(b{OCSN+Txa#~4f81S0$(kE)5NObgXJscw2@u1rcRs+AE9Xd;w8 zQOjF>7Zh1Ln(iPP3GxBz6^bsI*^pyvBp$rfcO@YNKE>yJAG9dN6<(6(Amj`z{QxL0 zx)ijP?A`1EXl|v!f8=s8$%v9Mz)NS31DSR=@3}z?Micd`hdU%H@&r-6ZHBNBO%6dt zifut^f>@}M;%mD0>-LNfjfxh^D8^%jmoH{qrKG^K-|EK)IzL8rgo#J6d0GdORcyBX z6cuh}T;TY{c=#kEFaT^GWx^+Z)pavHlw^|>-zGA%sYTgTf8rtDTda%f23l4|YhWs$ zCfDO4zc@jn-Y9d9YdhF(^Z7%vlkc}{+``5Z;GaAR93xc<@B`~#bwcKyi^_T~r_Q#u zxf06H9Spe&mgT=(G}6u8l}wm?ePzk4iJqTG-@ip;el#WpFrcH%0wnk?5r&7Z1!USN zOQPvzBz#?Rf9fp7gI^qyit$ruy0G(S7Js^Wnsw!P>00YeX^tFJnOM6 zpSk^;;cqdx=B-FUjj{r8N`iLvm}7O(*n2g-#mahRe@GQ3D2VtRBZmf{lfVcK=>w)s z&VFmtJle7^e24oP#M8!Ba+rB&Wb>AcbZ-$%@beA^|l1v8*U(c}VALp;?VwZf{&kYQ0Qa{}9Z`FVz%gbU*5f;L7; zoSL}-DyFo}D(Ci)p!GgUyo|w+Ceo#KgZ*E(fA$jHrCje-%k-c*5tSL@X71!P#(C?n zZEk$XdIs%SFpAsUo)!)IkXg$^MfsU=FnzzjWxBAU#nw&C#cogkv8z*Is=#NrZ3zL_ z$~Lu$JmkXW$@Z66lzn(NPYR5>p`6lp z3(&hs3&-X2@t9$R-y)cS3<_68>$VIw2SRuis>nCa2Ld%elE`8nd>yA*J;QNzrTS8% zI$Y|(9C+-i$Y9*0$dPJ!fwi^3N#pQbu4+(Pj0PXGx>`8AI3qo3-oY7? za?|*9N3N+Ua;S$6u*-P>;t9}1e+Q;2sq#XjLVgH?A!JY4ljUM2{I^Cl9ilMGMaoGK zr22WpHK~qVOHU>UUR^~+=Z(?}H?DDQgAy_Wdh0Cdxb;d#QE#TmY%eJ%#Egie=(!|? zgxa;}TOK9dObf^)YK}BbWPW1qUBd43gp=Uw73gE3ojjtj)JXehpI6Vvf2dE&MPQyx zcdIn6WpaPD>%Gv}K0dW^Z*3Y;s3}w2o8vUC20f_s6cv4}c_1<)R;6~hTZUdU{hNg= z0d(ffex&2w$HNeML+N$;e>kXPyMA$CNaxw@D@UUPCtF$yWW#Q0c5kayL2{>KN6^6BNLHm?tzw;6p7+I72J6XVYLx!L;i_UT^Af1?`x*+>}9q3j&E z@l_^e(}mYUl6?ls|2e)+tLnOm{N7BLW`j{24p?#%t#3?R3 z=TzF=6|wUbwLVPN%iMSlF|7H9D@)?iD6toi zi{kiQfqZS{5GeIK>9!&k@{z|L((>T=aSGiSVTYb+kU!NwNyeSp>n;lB{6r^|D~39z zk`AiJe+m zuVJ0O#urWfka~12Y{$4l=fb#XRUE^85*v^Ef8&n{l<>M81ERz;v?pmdg{)!;KV)ms z!C|qZ4n9dvy2~C-?eG#BtF| z@n9##cO}G9FjSy+BXFmO*<*8lwBJ%1e^5c`>;%hj@Z&Tx;4<`UQ9TlunN9*Eh;_&i zF|YMux*OG^=y;3}LOs6KUfGJSG!ey1nozU>4;z%}y`<)oZCwYE%sJ`@6GY;u9^aDn zrQz-B^)PWE(}~%LZm%
)dnqUXbSvNKZYWS9CHFk5%rfUOBxHH#$;mi(cfFe<@RT zQP%~Tsrf}?V+(5BtOVu!n0HXd= zs7LfCy5YgJlt-vIOsj_g&B}Be@uj6ya{nfePU~@=_Bz@sjrAA^p-P)?-bbk00#{3 zCe9Qv1u5kT|5$soW@WjhOZ2Z))Kyhj5wxP7b2TSS`fkz+e|Lh?iXb4} zuRnonJ@0<^{<^3-Wh|En6GSp|c_*AMZ1&>efcfHOU1%cr;o*GEA!@8K=e-F8Aa z;NZ&eXIUprUwm>$Uw*dyoUJW>F%45HS;0_vEi&I~M=oy_-|)_dCrBif_((kNb_ey-J?glsA3Mk1%~c1XC^Sm+dz9 z*CFz1SMlyB&{04L@hLH!?s#|r;=`F)eth4HS&Oevp6G~Mnv4v&Uh9xJq~KE$btrf@ zv-3^`c448J7_`*!e=W_<*{c)Xv5xiGm$JO)#}?Z-s8dbmx_;)}L0+-?W)E5jecAq$ z6nUp^+WkE)*)FM;;)DE*Z-IYGANppnRHiB?xxU*&zf6gocotI+WxG%@K0FhY+J-hB zw|n+V4qZTvw^N#(uGHovoz2!}?H|gU&N650wo#(fN9bUIe|e0zbJ3}j`f%xfE*2aD z_Bk6?AAVv{9a(m~2CLs)j@$uH)2ns*TM>r6Gt!?yKhM?SIkp!rv`kYyDJorUZ^z~m zj&ri)JyhlWs9JHboF%gNx7)2av2-wSpYY`{HM32n*yjgbnYffAo?0vLN#9PkQFQzq zi;QveuRJ^Jf0w4NK8Mqx%flUQ zBD;9mshh4-pKP&t^3o4+ykGpd&icyXPI5Ae$8j;weLTZce0n0)%g0G2#=LVn>`~{( zHA$Fte}40y_!87XWtWM58;sY7P-+`7G<{>&Jbdx&d}%%>Shw#^$*RlkQ%H8L3Md;1 zRjDrx^TutKY-ZVOo^p=|ZqE6X7ZP`SIeIEcU4QLg2LI|`Ov$?s{5g%60xp}?vq!xo zfK1=7f@tyHr-X^6;dntrWs2j;5}PQM zH2N~>>xpDtEeoaYa%nU6_8OCSm*IDF?~!M?mus5N$xDwpjqv*0_9{LFHEePpJh|tK zX+%#fSk-C`p}`AW4?=MqxhojB_^|YAu~HYJunT*7AsE|JU-zWfQ@&Q*lB<=!y63TY ze?G1rQx?I44_i@sMG?Dz&GRX_rqOclj!)IPHPe=K())8sFWyU^L2^hNhM}apu-zzx zM}5?vUIoh(YN{>g?R+Z4(<$$*;uKHf^U<%%S6D3QlD!2n zfqnx}4Mh#vrEpo5oP1nm=VB$&!290ed_Gj_LEcaM$wq%hucz_)ydvAYTTxYR{MHcQj(p^FfT+Th-&WBWPII8`*rh=0WS#d?W2Wa*0{mAo8@>E88F>~8KouRX1O z)*o$_MvgK?TURx5WU6RmyYg~^kJM1km&YZrYGZvo16_X14Af0;xtMix2z%y+ALNSyD(BthL`u9j5isdrgGk^K@fzi46 z)N;|nHd}6(GbRQ@#CUPb+vu1l7!q?e4 zsg zDC-w9dP!L#KBjt$mwAeo?tgyrsaNdCoq2t5k0HkIrv@JrQ!17H5C`x0-WHLOZ|;F% zNDJlqJup3O;!{qXZvAyA=Y#n^OsA77I7oRBeZ+g0JgknaMkIA658XXgXF5N@e7LTf zUa@kx9+xSMl~--I06iE>jIaBJK|(&e<--!s&|D{QQ!`Ril+Md6tA8BZS0}YqThmD> z6LJ9MH+&bD^SLNEo{6og7KQc=%BRFLM8+dpw4d7&$fXKjSLKo%Kj)ZBjEy`R+`?R$ zo^kK4QY&$7>!7Wy@>$%$>y-iCY7HlK*Qif)q!i+t>n$tGLVDIMB1^~2j^l-RA(hg) z7jZr#qkQJnRS_(y6@P`udZa@kHNEQ|GkDqFC-%v}T7UG3U6xd~4CeX_i>@=!?Dtk^ z!gopChnkU|wnUa}0k6+ilMOMkY?si9#`#2Tc61MF=a-t*G;~*Tw@s*CoJOmmz*z@i zE6H{{JyhaiUyj^+z8vlQbO@G1rOC5?M%`Sl_Q>WJk31#ap?}uSJw62^L2A|owcTSg ze9G+BR~`+oD@B2KI%FTiShg2oo6K~0nbl-H?%dt21hK%xP2m`pGKD*P0J8kJkI82n z=mb4KQz;{_62hZE#VRbC4?K$Y=ss{c_x)CLOXj=dMZI(x$6Y>tpJV^S9ECaTgDIo$ z?_y14O?e{f=zm@u$MW#;=9!*+Br_t8kdoWB=0Sv?=#Z)w^Mey!UZAh_|_WPb3XL~9|XvZa|(E{AO|7I!VWq_>BZKcr+V%rn24n`B7bwAX9S!hooZrL8HekWqNrv}FVw@4B#TRt3M+-7J!d$JrEi-BsSSoE zEd<;4Cx1(jlEiAd3FPOw9HRNOZI83)4t{S@asQ50={R85s{5in+!liMm~P9)=WEEj zHM%7>xZ(I_ivpc^I&~XJ%D8VaIrKh;c@ZS$VRCGCGpne%90v0l56MIFi_5i((@#3v zle~0#H#9U;n6*0Ac2nB{ebGWcu9SycDBI+F_J8FGYUSZYs&Tk6&-(U}7jOPp5#LJBMd%YeV%^YuybjyliQd>`a zDq2C^koR^hA% zEq{j}UwO~sEqr~hUT=xSoXv{`NU%Tf{={IwKmtDxe*Y%{-rQf6oHf9o2UGycfxEx< zKtiAZ{FYl2lxxudQ3`u3+jG9~#;o?Z{qXNd55a5-STX1P=n4z4Ukh)duytiedm(!8 zZVP^kP91!T=pr^C?486B7xsE}GFK$(sDE_oRPxSoRlIVkRBrd>O-OT|#LJ}0a88qA z-DEquyAEB+W&P-jvm}##)7TB}TR6|7dL9L}}PdqBPB z&x;$|Y7bugdLPs?*~)IIX%6Zl{<&(mv|HD9(4O`C#rKmo8$EVMTD!0VDi@?H0zx-tG%3wt8f@FG(n1l z8an}WYDvQBofm6`z^@}eAw9xWCx0}gojZ_1SKpl+s}prHES=&mWqwjP+bG}t$LtER z@N?@uaqkENUwe^|wE(=Bh6nu+v#3wrWq39G(>FuXShocnR zFOTk3R8M{vyA!1s{wjK@XpsSb^sj+C1y~Fm>S_m1Gn~b=4h~|}JZnC@27mDqw0`)$ zCk8bgu7-N=L=IfXV`e`IH!j(p7l`*ow#%wlDkU1pDgk=1z=8a|P`Z0d*}%V}HcJiNQZTodR>=8Y>Z z1#d;1(J^P2r7i98IM25J(k2*0$L5}IcRoHm9)&T>IlVAzW0ULgeSe|YP2|)2ZHlDR zTxF|wJKQn}O&87G8m|b~=4!gAUC3ENXc(vAfX#ljoL5o`n(U8Z>a+M%uD4jKyXSo@ zc_NX?yr^G8eXkomSD&5&QfAS(jd|RiR=z;<2(UWw={S32dOzB$$#EYdQ_I!^ZdN8dG=Jf?9Jq`l*iBE z86TXh67Y7~naMq7SF^K)qkPGy^~sQOQI5@g|2Vv$F;vFUQ)DNxM-*g}WJ@PpG!FZy ze*hXCKa&J}ksd^1Oy}Z#Fy>5=#)i}HZ}IUwoU-geR@hu*JAWgIk29^Gi)2sZsLpqz zBW61)!r?Xp0OaZx(Xfsrcp%aPR-(?kj30_Pr$;kF<=X-;py?c>NboSbDw1?=^GcY< zAm-cO`R&Im8rdOIgG z4@i@c1ygx8^?z84FpmHRHt#$74EE2fH_A$12w`Z$c3q>|elV&HO*$-xQGeE|B3XD*^dNaVVv$m+=&p4r zYM=&y`v!ydR@V1h3As0NnzwPc-imx^K%_R{EjA#g8biM~Xc9h(zkNVs!c*DDhm4?_ z*ynG776vyuAV2js)yeCr$opOBsqG<(N1GZS@6LG->bs9`{h8}s_E6esPDh6EECW<) zv7syErGI(Hc_YP#Qi_hrjUPvKqDdNY-aG5E){`XvTH!0}JI8wVe8$JAk$aCJ_ri=^ zYTj5J&sbJ%Sp0YpF8Ux`q;#gq`8Cx=`MxDr{>?zTww4hX{@eC-J5Ninf4+s1m&Ilvwz1>apoKkdPXP)ahBD`GMPtxAPU-B zMU^mFdH7Th*R@&sls=hp)9DiulDr8H{`;!F^t5?d%l3?hI2A%ci!3m{Sj$bHsyQp| zNfwHv`4G>>?keW^rjAL##m?bYJVJP0y5bZ+!`yR-5E9N~@;?lzHPiag*J6~PinyFQxTuH%)MQzfGV!3mMqQ#nD*TO3Sh}vOTq5H?;#lP98J!a ze0v{*qwjYuAFtLx6>RS@c#=mUxnHtL>2LUXWKN7W5Fm4&n7d}D8~EV*H60tupYu%z zRw`ipMqo)!6U1=J+R8b3_lTb8zB*rw7k@bM)gnxXU8vZIlapN9@!`T-P`99PZpq6< z%GGPFCT|Oyukx*a>S29s#0(SNIt5DD54X7xOtKCx;>RB@{bn^6j}?Tvh`3v0793@~ zp6AIi!u-zkVni>^D1F#_s|BP*hm|MX)Myw8L1?LbBkww288lS|_9qjXF}}mazJKL5 zk<5ecU$@Cy+KLVfF0@U8&jcnq#G}16$pGl%Sch+Ok|ir(ObF1c@tnY{x%UYim*-(! zj^w*OAUG~iDA{B5s6i5$=T9s46o=v>N4ftT*aUvsd$sth|9y-Q%{hykNu=1&Hdy?-3I_dJja z`z*s^bRTsCAa?8x@B4%AKTG#i}Wn3{->&oa(`{F^WjV-3= zUuP>byt(jxyso~WOXTxGES2WEjp}*$0J2QXvw#onakZ;yv&z&!>|M$Zp5!A{){SZL zn|x|wEIsaO29pACeSZ%gVx{3aTiFMy=g4kJ)E=9GHaBRK@~I}aU~FY~jxD45v9>s0lw0xzC z48aXgZ|5LZpD4|=`eq)Z^rcUhh?%=G-H*3UYN;E7p5wa7`O{m{N$c7T*ef`YD*VJ} zc77K@`9Pu*UQ2-i9rbwMKWhT#)UT3)5d~0g;@wKhn|E(x>t47?dr8hbeyz`*CQdI< zc#sg+NE=z}Du1TD(7ct{X$zAy1}?F$CU#4e_)LKbi;z|whbC8%acZ*aYxTm!4a)h4 zPpIDGy>wtr3e`kS8KUy2DqFdqd!(bNqRWk{UsfmSZ`p7jS^u%?#!hG7(x;FGrV@6i zE?xPsyd}?RmrfcvR(V3Xq!g*S>=UtF9s=O-s}1ID!##juLu2ulHGbJX=*Zq~e;)S>RCxwQ)x; zm^^`g=jE6h`=zRqG}r#A8#{K%E5Ey)d=ey<-+FRw+Q*mijw%L5pW;c%PoTON9;X*( zGbw)!wSV#?+G^tT{m~)GC1P8JiM86fgz6)a_@txEXOxSdbEaMClDUbWb!9)PMKVz1 zw7hvTr|JWeYjeu^Q~&H&!kIX^s!$tWFFMz!>JfbokrNSZF;J0H(KJ&IkDI*^8xuNW zHTX~zSgJ5d-d|%B+`#8F&`C?5%nx^H^vLwz>3=)6lPu%24>-SzH-vmgtC13dTB~!= z&Mk~K=U%?h%|J*{n->T~Y$%D_bp(n*^`=#_N#@P1-E5OQ)***hz1K})xM4gP zGdD*aKl7bA(wYh1PB#m!NlWn31~m0{bWi*YTXLJwh>X`w&8!jKU-UcO^SflmV(X|{r@#oLySPl8Z$6yFgs?1VdPZr1M($sR{Y=H4&T7UBc z3dZ4hN(sW*x-6hDliyh;xE!BgN&*91H+USb+8F)R(~S*a=K^1S!X~UoFxy^O#tyPy zgdAZJ2uuk><)?mXV9rXojT+cQl2<@oxdKl@Tbk$4av+UnBLhpKNOR7m&deKK69ur0 z6=@BfCY8)EPlTvW1Flhx$k5b}J%2~uQPf~C)TL$OxE#!65>4)62*zd{F7A9@&Ie|F z8j?lfhuuA=weN#42*gOHqQ!OJpHIu5M9Lr!Z7G)FL`m~`%}qD(Q<`(G+(XM9JC~#( zQ!kSQh@R0hYhDU2>b5FT-FrzjR}sr7;xFEm4+IEKee+^S(mT3fxJhTFhksKmnm%*= z1ZUPeLS{>(N75fA_A02C*Ojl}`no3?BUIoK4R6HOYuL=sBW_xzVmhCCd7da!8D+kR z&*9WN$K&X%f(0W-zOut%8MATl^jmd0ZT*TX^olnrJ620@s;yHOZIk#|hV9DI;lt2` zg*2=ukGR22t`E~hVMc%K+ke0nt`GHVs&V~fO!{D4Khmr7Z>(NYIeqX_JESjTZU@)o z;~#DpbzW#9rmu!Aka2k-^HqHtm3eR}G2FTnixRmUTF&H-Z&+Sf9A_Z+1KY6OPcIb5 ze4eiPSp(MlPMqA7G~a?W_CNLPF#7=nGXU6>iSZzRPA`*@52r`z41e&Az_U~UgN?)4 zVF7X7QvVR81qKS@9iwpDndM`xc2%QrS@sO0x37s-;kG0v#BcIu+69?IhOyP52I)h% zKcC~q&~)=4z|3+-9aL4-Hy>zP)+Z#5Gs_)Mv#$4jJ(Qo%=X2vE_GU^#&@hUl`dnf* zLhxc*RdkQHG1P;G!GBR^HcUKztR|$n)4qk%Ysy3xe4P$L zd$hKa`y?Os@p_&fc7I&YxEx&f$wSsq=C5Ezy_bwW-Nwh|Lx0D_3$^7{58lgdpie?q zGRgh3-G&1JTGy7nZsyX%iSOL$nv5%#9tha3nSJdS?tC~29d{6CJ3a6NgXQ z;wrpKK6QDSI)7XHkLI16c*(LUpe^P3I#}EqlEkt^#pO|PnR$Pamwxlyrhj9b>K2w; zog#b-T)A)ptb*B_hwkv=(tH287Q~BUyi}%Gs_~TYB(>pam2+f-($%GTSdr2umNQFg zqgap@Cy4o?;Fzc4fmYE)4{g}JVrbEm$jUrn-!G4e;(zFVc5k;lA-7Z3J^In=&siT| zUtGq#;@9bR8}ewhie+|eVt$N;MPDeY9WC>|67rdl$d{cncMrB?XiHT-Ql?ZnZTbK~ z=05-#8=;P>2dm{c8Y~;%23iNznbYz)%2i{z)%yHwbhDflB8W*zKn1LO^Rp>0Nq5)? zRL-zOgMWD(`#14AK1N7vK|Z&$sBOvc5&5J=k}l*KDG9LL=UXT3#V`F|@3m7L$1Baz zH(5F?XWej>GFy(;L#zrTTf$^11)P0m>v**2GxI)7ukmy8UB+bkySRNA`grr7W1O zej9K9Pu9LhVjQ#oH@OWDj{RT1O0)G9jAxj8lrb6g4Y(x~x}7tSH4$RhxN8@{sj9KYZz36;-*82~WneWoQy_abHf^lk*U{7&v?ia(ns z^y1I1Hd-?PrtrQ=K@p4R63++#6MuKkY>GAx5pw3--ap^^_I#W+xjHQU!IhB%KneRs zY%Y;5Rdkh)_MHX5l-YeG-%!Ov2Jn-gu73as4*gvpexi_iUX$^>an&jznX zt^~wmSV4kz^XwnOPqQuqVE{gcHy~jk_&~cpN6)X@g8>v|l*3a5X{D5DV{XxARDTg| zY!PF6_@%B!0P|E59UO{FbQSYRhNbdS%ZM?nls31BK~+kNOXQ*=-N$T35xqo^Zbs#k z>dGQ@Hum+eVv&aGtW?U+_BOail*tRz*pEYq1mO6syrKt~kpveYfLsxTIh6r6u&^4umm8LV<-^fF z86rFza2|TJZ!@oTRT#J%&qDD@^1!{4Hx$|cOLdS(aB)?81gW&! z39iaZt$fnll+~<^uYGKw&^AySQ^1s3MlYo_N~MgdeH>c+6y(vqtxYY{20382Yb6$u zFBaGru}|*vl~s}9;_tq}n1B5D{m(wksU%kEkG~Yg@7VNb*_P5~?yp}zDve5X(oh5z zIW3XEH&KbIq2p(gbmo=^c>IJ8w+%os1%PCJ_FEp*Jitg0omK*~ufrJ``D}O4b8UZnh58xSj^#v2gKNCmklst~+9fz%oF9%}LIs(8tm|M)}$OaD35NHK{|2a#i4fX1-^11TJaKeo@p*{K!#3JQMCRRJ5a-ZY9=i+}DdKT84SszUBV1fu<_ zwx{4Ix{zZLU?f7osvyT^DBxCc@=&jl$qR}IaGJe_NPxM*LlETZ;=>%UR{@`88HfQf z)Q}h2j$e?9`1@5_c`+=Fv1!y)$Q39#J{CZ(LEy*!(}*F)D$IvIbW(?R)ZXWr4mu(x zAB6_+?h8&pM}Ki_%&ZI?AS)RBg{MQVjX;_~p7S@LMPEE^yH8Ls=Xe=gw4Yq@@5SfWKH?TJ%BlqW`p5zM`o6Iljk-n-0_Zo^zP6Bs zT=B)78dg6xhue;=yWM_bQJSrO$2pKA_OFp;;vXG8XC-w}em*}(x=wd#saawGyr)q^8{k7|5{`EDPq?C}Be)(0;7M=WCG^SSbkMKdSO9hU|B z^dUZHgMXiEo-cp~@EIce#6IG)0}5mRPNMAjR85>Fg-`3}Fq#bQN(xY9{`p?X8HfPv zsa+*@yiNhH0EGq5nZE;Y0fM^5wQ|tP>v7fdNc_pC(Kl}@E_f*wqW%e!tCTc_=nVOY zG~K9CSEW=-<2Q!D_19m1;_sV7_c75oZ^AhZzJFL-`J}P^!QiM;D*Q8M@vqVW zG5}6B=mPi0&Pon(D+4_1U{+d?r^0V6>mV;Y+nGrXV*}2}yuq`9@&WCBz%NRO_*mt4 zOn=2Q(mkANRFukj%_HnTiFAOIKvm?!hC_?Q6-E+j(72w~w z0CNl>K!^vd0I;H?8RaIadY)(sW{Du02OeO<mBBL*Mc^ZTu0_5)3OvReA&=dp$+4;n$Wj!G=L=s3IhJ)e7k?^L6p$J`m{|eYr-hgR-lstxg*-+kz_Tir z`3&d5BgVTgKxBhlQPFhbWIi$>-pTOSujJjYq8VO*f`FaKV!3{b12|?9K)8?n7UmCz$w~Z@&8pZmsM2Kmk~h zc+ZbPPU_$tLUrEp0?B;t=P3VTfU5vs+w(j>8@!w=GMEvB7+d@t8J>qz!>y43Qpl4k zg8TmCRs^{suAWbQb3;>0`#J#oUw^E!sz^kONWeVu#N3yVBa5Oiobw2a2uC`=2!C+| zb2E=1o0J&j-ML3+Z(~Q8*bz7g$k#+QTUod#!4imtN ziKbKFV_z0=Er2<9_Ee)z#0Pw4eg@D3k^ycxyZ}G}{#A^(#>oRxK~PY7;D0Iqnzz4P z{vVhT1qi00u_-%l?DKbf@4m3-d=A2BGj@OAjS?KiGq+?|OAz^D~Pe-tFm20``M&BU!0%d<9BZ9*&ui zV+aax67XR0d7Y^qc4UE~;i^id|x3UVL9J74_US4E zbcf_{fZNVXQtih_3=35kh;v2dxBR9k!A)7|Tht((cNClG2`$FHHKlPZu{fXP(*d~8@-c{W5&d$}AnTG`qerm5TUW7Y`QVU00*};UH$U$FA?g z!uP*^v?*8PPj2~(FMs}(zs{hF&Y+A)Q}7$LO#iAU#D9}xaAUfWgM^ecOaEW-gH$X3 zC;tdh3XaQJ1=tjFKT@E=gG@nSF_NF$GH6g8|9Lsa0$PR$V%CJbbAGHog82;L+={>J zLx>GeS}-r*fq%)v6N3LJcWrs{%{B1|a|B^71^@j1hldEeP=AmQ9+VgMp(r|^r#mJ@ zJD-7=G$8omM`6~cr@lp!pW8iO7{V8iFyb-!i!m<9uH@Cu>!*35EQnJHIp8Yi z(YkXBaZ|oE=YPNYl2Jvgo%@VgAgr#ewEM+hxd*1Sxf-FkU|%d|wEO07-MRbH?o*@* z&q5o4Cj_JgomvoPUxOeOATWNo=Zpn7V-4c7AY#SjXRXNA=-2Lp2|Z2fz$u z;uOIn&_*C&;9-~o@MeN1KVM}FaIiA^@9t!CM5jjU!?J^snQ+q zb`1Vg|Ne)u@WY`nZ3Eo=D^8mK){cR=r3bL{s2$h;KBn;fum186z2{pa{9^e_&5i#I zSAU=C+VX$uNVxdl{EXHJDtYwH3u561-~nQU_o~ZBX!gIjD}rx)rT+$3G2&4P#9coV zz#zQjx&DdQJVCR8Ksdyf!$Rcd9zUyxXkig_jN0Oh)+X;tg9l5<2PEVqlA8vBd}{w0 zYsDGlJKq{Qxa$JBqKix5irMK6ycU9C5P#63IAD&DFlPhAz5}J__Jg(myL|4)-{9OD zwc@-aA-w|!-*n*f2#`N@3{D-bI6iGL2*;eXK#l|fkh{-Hp#86~cE^EP6LHP+3$}=x z5r6=&0P7TVYWEAnGuS0&KAW`>pkc2A-h15N!Z`Lkr0#LAdOIFt2`)R|N4dHxi+}0< zAN!hvmLIM5kA`dhm-QiHc1|yD3ZLYW2Amo4xqe`YQ?_fR|1poZt`FBh0%k?=k1mk= z1{4$;*5iJgdy!~c^0&^T&HP*E@gSIWqpVxa0t&=74}NrE2tmH%?~V^p5$?QCE*{>Z z0yu{LwF@4^f8^;lfYWIurbrX5C#8UfjdATBqc?}UpE}W-J=Nr%{l&|FT zA~^`N^&M zKe&~h;~yIq$N{tR!@p;v5#jInf3EWmQ26pN8aZIsc*-yC0QAf4)0gsN-+vGlDQ(VQ z+d5$X`~`2uKBhqKo)>_dHs5-GIrHpSgOjCbLkk2kd39z4@t~j;%R6sJf?@q|Et7k= zJ0>UsoDB^WQ4WE6$#Nv}c#sa@JN703we(>9FXI$m;ByDg5ghTr!)Gf1T8-eGOR$0` zD<2GSJ(K31o57jDj)K1V*?-Y50m%2-o`>Ze>jbyC&AUl=BN0AW3P=baG@PGb!5t0Zx<8vf^ zbzYzVaUw=AM7v*UKvV#)2d?KC7*tAcZum!QFOc)K=|9CW2u;D?x&J?UHvb&o!j}_# zbA!SwYvg~m_no)>&5u;9eEkG1Ml8W|3T7nB3wh;dbqJQdu0|J-BUvgH7vkIj6s3vt zH~(V^`nz5D{g}i2dViKQORa-OH6)BBPLtFoh*=z58y;mc01{mMoYZ1j#%!x_OGvtTBjV!UsThFZv z3t~G$}3o1o8>?=d(7arm3=V2%-Wj6J9Q=i~QNC+-@+f5wTn|FCvkznJFf`OaJLR|hwL z`%p^Z@AmoOxI1?o6;X@lAx!I#G@z&S(An#`Z*TYSn$Sxv&-|}`^xIqd zrx=pK`hO>OfE)efv0ck{zj>`u*R&Hvuosvx36$=r~ak zUE7(bJ8O?O#al!du%X2lrsg766k7jU463lohmOp7Fe61ey1u+~LNbRvB1QRVhHA+g zd#UHzr?W6C0w_hwT}S~|QHO9V!oq)`x$9x>OV9x#z_`9qPn>3#b?g#t4u3(;4Xi2xP zuNH&wF^~uH!ihl(b@+rDQhNg^0>od=io`m7s|DEmSyklXzhlHb7$8z9ku2uEBmLD#o_p+|^l; zh&Rr5&4c*VkY78^p1=LlJ3(L`<$pQ=hy}q6k3dnr{nG5UyhAat^%?*4ONCuCR=!yB zqf>u-(!1~d#{F-9;*UQ0TLb^}ReyMp^4s4wz1?rW<9_;JKe^y%J2;ZRn6mjK`0<_Q zFKqdvpUmm6#`=we2UxJEmg%mg(8gxmli;lIAZpb$zsUvCzq%7fl7LA834e3116DOE zQWmIKwBa!lL8ME{GuQdNG9gxZP%AvH9bIq`EbhELE4T^qXke+3akMv9|3B8=uCG<) z*%tmlxd>5IysUl!8fcQ{B?-ApP#c1Ziqj_f^v|z=sB`VL|Igm%oQt`ZnnZ!_uC5wm z)aX*w`W;li@s5?7Ci=(>=6|?`M#iZhYU@^!`RvP>0+KQz+P62G#sy_9xoRWmfHEbR zoAWPWS{k41oTa??swQ7P%fd_flj3H@5*e^RJoYkmqqIZhv0GAC&cc(`FLXng%C%qJ z-GB7$eWY^*&*FwxVIMU5nQ2q-qVC2g`*? z2HdwtjfrsmL_X1@A%9EP8tRJ!Vj&ORJ*`Xz?}mVu0W7Kw0nLaiR_fF|5v&zH9naP6 zZ)_1Y69iPdA(~f^*zWRVFI~c2+6%sM8%`E#kNNSH<}dTlwo49H0H*XA6XY(-eO_tA z71VrY(H`+cw}A&kUBnf%|J-ev^KW?Ks8Q%gzF^{zF%O$KXn*mCjCIHbA09cQajpE+ zvvu)$iTfkJ{SQsUxOQ_lQR{IH7hCsKyv9u7`m^GrvVyeiFV`_)6bHC@yAxymu?viL zC!X(*R7DZcwdO;AA+Sz#8wW(FnQ(B zWYhfdb2x`p`!MW3T5SmkMva5|G6ulrwJ#b5t zwoD3YTK;^kj{|O~tUXtTZn#6e#z~2?GYY~bCSSqJx&iIr*MtIZxbbK6;n{PS>l@Eg zGoSQ-T7SkLd(uVzAEQp|`dew9pWeKQh#QLdlOlRKt7$EA?R=N1!6sgCCfJ%(tJ{ zMD_1mEr?6aiRFV)= zu3*A7FgJ5`dv)a-4vF2#B$+F{JAi6;^UtT5bB3>#cHJC`fEvw*_@uihI=}mxe`x~8 zy!3jj2DLl*6<-lma6eURzZL--_zVZv%KVbi#U~Ie%BXSOtCf?a)6E7=hrJd2cw>9&2@|Reu(3*{fztM z)IEYWPsB{|;|nM=IqJ=#BrhXX>whmpR7iChg34WZd*2agcdLahiFU4IQzUN11FCP{ z_F9G1l!sOF_O$BjUTBOlsC2g2X^cyie;0AHZJfn*9-1OU+A7(ZfANgghQ-83|AgW7v zvR`uDY*%N;#~bu@H@&Cx>9IQ;a@~M3nhl>-6x8az0_C}Gl=R2;ff_BB^OKa8q{+a? z2@bm15N$DLuJ2z-xhJaJu|ZuA*{C0%@&`rkFn@U5KD=#fq}jCW{{Hvw!j%(q~_H`U-T!lJc1-{^je|g|AxyS8+o@ zNQxRG!^SXb+#S^F32{Cbv-4MjINulc2PB511J3vCMo9zLGKBzfzQ4IA7-9pk za@hYbF<{CQ2W=TK1%D7zj@%NyHyd!gob;$qE3f!jr2JSO5vPW}JOx>C6{jK}e&1Qr z1LPNe>HZ2-LP3&8C2x|dCmSdkGsdI1FXQK*1lBra-YN#`-G=3FpVVw;O1B3ea@3q9 zq47>%@b==W&GkEzXC&At@QRr3kbFr&)KfC@OTy=%=nipe-G2u%QLB6*Il*VBiWufs z>5$Xu&nBrfX;eYnL}3l2=6j0>%@^z>0^}9$kT=L|3N`m=1wXiI)gs>i2);zUzZiKW z*LmC~Uj?tQUOkC%>pBeOEu` zXaB6Bq309kihmjn9KVvJS<{|}d|am2k+yayW~!id7v?&@iC z<79?!Jj@r8lI)D|UDXBimV1vcIe|o`Pl?XC(pVKo7*}o76p0*;2E=D^>&FVHth;K! zIZkph>3@6|;?%N8t3@ziU-CTGR8t%sbGyY9_fQ|YkT_S);#&6kutr@{K0@VCiTxNu zj4Y3FCO5y|Ln<}qlDhD*4s3vE z!ZE*Q1AdOY+x5*o%Zj+7LV}i@s5nlI`w2!by?>L;X?n9=HY%W%c%>k7g1WtvoGWU7 z5n?i`bXqJ5NzDDqGx5UZU*a~W!!O3<5 zm4EIKe`N~u!hY`?{5qd_12;^K5@6&X*cCYb8L#4C;HOsi3EW89D|iIX;i&0xhy!m| z`NN{X2t!6)=Y?OH*7UD+I^;;%RHIeHxO?QHBT4>C{XWKm0VgeAAT6f(g?n#t%~N~K z$IkiB-y8py@BaM#feVO_;+Mws2R9pPe1FuQkAL^dH(`S_3ck(zd7D z{X!;oNK~=E4>#S@HSCDa^OS}11kER3CuHLniYSrvZ6`&|Wx22(eBQ1==ZV@JW;6X~ zMvY!_gZ*5EJ0$&;Tx#3iM7f=FBgE>|xTJRq26n`V(wI+qpm#1f2SOy<_F~Y9M1Kl( z=A6@>d)(X-aWY&QgWNR62wM@g?v6|b$3=sfV!E+B{#bAKtex_d{R$!5`p7~?{4uVq>Wqh7A1Tel@d6$v*$ zfqu+yah`u_*eL8EMBQK4P&4TMtPyt|v@SD=2+fy-PR_n2~f znQOxQ$`LN}IZ>!7dmS|@67^^=lfLQFXNd9Zn={HyE_YXu1#ZhgY0hw*m46+(x*%cN z5>U@HS0#C-jfoq5TtP>pb3S)_yP&!LAc;vt-+2*TJP@s7u3O0y<$>}N+PFCZ)Lb97 ztV)DXjSCe&_-8G|xm>y0Jiv!ulG+Jf>R{Y(cSZ*yoV*S8tX31i?g4+VZ?TdQCBeJ>*E?H zKX}5xt&C&g&}rOoY}5EWXa-+{=T_xPr#{J1$KX=%R-kZFm=%|txPL_6XY7H3Y5s7{ z1?^4GwDeel$t%~qZ(gNtznGFe*An$P(UcT7NN+OW((3V*OaQzeM3J0@#&mRenOca6|$z-TB4el&`bKvJS?KD%y)e@q8o zcI*%^W%G6CG{lTfnvH5ouoXlaysj?^WWQqi$W7?r^8MH^!N>l*i*{dKIM>Ax!%vDu z73giF=Y`t`tA!v``%Y+t09`OQp~3t(@6vOx4(mm*T7+u;9Dn(d;^XV;SwPt^ZFt6+ zI&n2CnJNMSZAjBcUi*nnFl*^Qyt9<@ejx>fKF09%|pyc+7< zTR0Xcd4lged|0Dw;9#fk5y#8D{W-{T&6$fhO{y23;(rMxO3}lWgPN`VOB*ob1c#0F zN|PdlXCa7kSCBWTc}I;MYNjcueff;>9&>UdNK4~w2t35=luvU9J{9RSNziIWPW%cn z<@HL-OFX~)%$s^XFZsk-e(OHOqy;F)cw)XW&Fdd6h8S}O{H*4CI2DwomPeItgS^;& z8hhu~Dt`nk28nvnN}YVvY4(s4LJsk%NL+&0xMy#lt(|)+0@Aq5@AF5Jx&Z~5x*D5# zz0Xx5!;E9EZ+>u3_Ucg;k*7@nC-bhZm87b0QDZ?=>0>WWa4a2><+fD02V3o|l(na( z@xt2lvS~{OJZ45G;kg-{av8uItQ3_1OF*>0fcVJ%jCgf+jfi#Nz@K8?Cr_6ibJ*Gc zZ457s3p9TbsYi;IsxBOQ{rbQ5iU-(yzJVzvHSty114j)DIe+@GB^XWZ6qJs9#+P7< z7zawGkkQ(C=A^$#z}YU1Smyf5*V-I7m?e}Yag|6B*eBX*B4d2e)4rh|&QJ@DjaWp9 zXJLtzFggH@_oTM$9M5D@Ea}lYQ=^tN{q(sS$2osm&EMfYr|*We0DyeS>%dcwJn+Uf zWq#osC%p6eH`n})i}l04yZ)8CUt-pGFW-gt{MEa48SBfQ{GB5avjYY>6NueCKku+N zf|~X*RGRFc%QmU+uX!ThV9j}Q+B5#@LyMRt z=2tuPb^t9O{a&%5Z?`Jm^9}0j;K8%*U#SU}(h>(hh3EL?Aqgwwz&LsMZ^JXkzAY~_=68v7j`?MZH~Gq(An&!M{GKGDRs`EqRo|3OQwIg z=bv~QRj?sO$Fx!*>N5SB2Sbm9k%(Ji?LvpV=4-x!T}x*M&;^CKFzNE99`xX3V?V%@ zmk;sc`+j-)y{B5gv|U3FJM7vUmncrLXI@Fv1XDj1g7~{DPN0oJczf@y+!^PABIT-z z7}un$cilnj1+?v%LwofWx{oRmf2e5_t!^5YtkMGEM%P|%Fnd%vrIg@UGitnc>rXO?b47~JEJC4_7?(-12Ct@DJ{P*|c^-Tf? zg;-g{7$90F}!l-U3XV$7O6{laVq^_F`Jw9s3cq&t)WqjnM9rLvu z``xXA93Cn zXPZKPncpOXTZgm?7Q=p4jdA5y-EkbgKTl(r!fP<}Ll1fDW$bYe+fHkB5i`j|8h?Ne z-UTWD>pec*?dPBQbdPalEB^8?_ek2!1AumEp*a=j+)ya*a#bE<#OHu}*lU6wMBzC< zk_EpbMw@>)#kveGx+Yu4{^{ z@Vb5NRntDaamU=}r|vuP;rnTP7Qe?pq(5WO(0_l7!QA>ob90C)c?e7s1bvr9)u*H? zO8+O%JVeL3RQh?=`INyzAexRR_ORo8VOAWD4uonj-I)5brdsF3f3#2|zU_;iBiELQ z>DjwfVjlQBaB6zE-}Qu3+WQmYmT(A7Nfv$gTrB#nvh*>_r0?mcS*XenuZOLx>-)k@wzuj^UB=;fJg%dB{T$_Be0aK>V;&3YXB-w*F<%~?dUfO*pLED4 z9q0EY`!He-)Iu>oKvHa{z3cZnsrZQHF2u=Zai%Vu{rcCKJPSQ}JB(b@$QhjYac>0m z)FSed%UIcO#8@km#gI3;YTZOpyni#^>->N43Cp@YMQ-y9%1Kv7lXkO1enioRgqN5w z|8kam0qp;P%p2JL+79*!C*N9K<6hImV0;cYB>`Fw7n6Nk{svX%VMEld=`3@-;XC)~G)|6z}Rvc9B+xB|Pv%DOc3; zIp7wjy5Y|%+<<99!mTDqWldD#CgW$fuF1RE@#c*-az?3s1=NCPi;N!2+Yr5RZo0jf z75$)VsmqqK8%B?1xH^^2-n_Au))aqgc?{C^68qa>_p$@TLM;0^NaYc<5UWOpp!%o3 z^9r)ZxI6j?mFP9)|C%ooKV-AZx*0gczvIA^(;}`Uu79nk3wJbO&&Bnx^@!io?UXx2 z*eGWNkrghAvOpbtxB-uVB1FACg*taxrF}nj)ciwU#RhpsEg?0L2OXVvwu*mx%lE+Y z34MEF=3a+N`7F`C&p<(L;r>oO;s0VkZoWTv&y7N^INC8Z%F?F-aS+wG{Ae5Qx@agh z#(x0v1yMT8c{THCK~Un)N>!Xy0UBQB^z&69$!~;FFU*a~Sl6B~C>QX>os!;5?hJ?wsj5 z6(qR7%=B@{Ckd)FmNt|Ytk9Whz_rVlzdX2f>3{objeRQKkGEd=GH@xwnwYTC z?>=DESugD|@8tZ=R}TR#9`|o{3>81Gv~j7*Wt7_+3fd#5`y*3%$eXgKhDVBd**cLo zY5KN(RF3Oc#wFGJ(&~Sazu!NXx>K@mJl7P6eP+qYQf%#f5M9PxY2QER5xsibs)Z1o zaYCP6)BcC_;I~LzL^S6&+JzyeJ%~by4E+#)p9NR{& zL7$`N1ZwxS+8-)O=-KtTZaLdXLrftk*)1bA-v8=|TxoZ;xygTXHy5{=4_h{9oJ;Df zHh+CAB~-Lw|!Z*hEVPGWzF&p(M&y|=+gI#6<59uOm>7paB^RP0X>OCd_a0vM zRp*1r57Zzw*SW5iSqy3kmD3UsoS({wrOt+edTiFtGJ+c`KNUp)&cpLY`%C>$R43(w z&armkw0GX~&F*TwetCO5S*MaAw%i0!?Ofu4PZ#;;RPcY>^IMBrDh$U!mS1{oUVil| zPdxE2&BCSTL=7?O{eR{ba*@aJIZs&hl*bXf!q-6mosJLnVgaS^F65O_Mpo5 z_ifh%?CpQ(nGS{jOQM7?1c_ZdO)hXaf4jHHx%Ibi;*o$ZB4xRx)+s*o5O~gMca1sF zsC8<;BHaX_ovV^s=kiv{G4H3&{A`m4DDghyII>& zC0tMEL$BE<^iiOV=k{gj)=e;PqnXCOcl<*N-sXR28*bqH?_A5CtHDDUb0-lW`Na54U*{{$ z8u))G(DL*fufctB+7B@&8RJ&Z)t5&6G^Z|A%#V z^P9W*YsVNE21QOPb;+fz&q6DAOPj6R!!Cc+EvdkBLE9l&;b+<)Osu7A!o#lN6!M_H zwIMvqv{cdCKz{s1O5yzO?5nvl16qMHXjlld`54bVwAUxn%r)qf4f7MEQii_1h zYNJ=2M}&=g`&6f6j$OxW@}eA{*XTo-^4Fk#`oS0s9$A@lG+mg#e$AfXr)Jt2L3e*T zNZh`xS1Eeys?RjxTh|lCRP^N^lCJ$|yOYl=q!-h%2bBu0cX=NxfflI)07EqdI<#BeQDYr#3lJzaQ9FXw#G zT0|H7H4`j`wikH6?rOf$(I>967`<;5_5qa11y_u-{A!BdQ-AlM&Cf4&6jy(!(=OQ* zV_q-5B%th&>Mo#lb0>7$OLXb3jwt`D)|Sh((`LJdz|0gG1GD=3R3CDAa{n>=`#rCuOG8VtwPv?q$%|DsF-|Kd!mcm z!l-O>#J+;JTxx02BiEqH7<&Qe(IqbWA*zO}jEA7h0Y}T{&J6xo$fJMZoSu5W`32Ly znqnsY{Oa=a0gqCJKaSt%LG@RRofQ$)I-*B4LytI;I1D zn8IC%zT3(o?CE?(Grk9n^;Nr=$vvV{REEx31~d!(L$A`LOS;Zsfe_ zy~ommTgu=K_7sQ*}MOhP^-o zF7-5b^wOYI*#r0*>TD*@-#yNKKF+CM^9{1R8a!O#7lwPm0_%U0I(@kve9uqoC0pG@ z^X9Uj0C_Uao2$O=6Yv&rP-Cne_uJOh-PScSNoYp_Lj{(SFzc7u`vL{r~50 zk7Lv&pXDQjuLv5N0O+Bm?S)&+CNkm9(~zAL^}6q zuUv7KA!O=Ffbdu%Z&3=xOzYJ}w-0XrbbvZ$GoQVrydsb*#x3m1czrYNpTsCHKC7^@ z%NT6K=yB(wkQB^InpC+2^e$9#uNNjD&~V1{E82Xi;41FRp#>I3p;QmHvQ)4CJt}Fd@ec9^sFa-X~B^vQA2;&@a*GL{`&1#H~uZm z_sKCm-NJu>fem{lCp>jUqC#4!sy^F;pRcR)GA=^q8Y?DX38r}WM@t@6#>L)9B!H@v z=hSq$D7qN|>`N(JpKYk_^_GB)Df9_fuQNN9_f)qFp`WiGjGNN!!!@Wb_5!-1saW7^ zw6%wRnK3S0QssYwWVCr5_a#Y=gHM#Ql`+U$QKDZq zNhQ6(Rti32HQA|F>Q$8=?#C8|)*|RGgk8Q2I$g;fV>azCvEB z%F$MGJ;ZA^@^e%F9s2xTbS4wNP+sa)?;pCXi4Pq5MuzblYqVi(FmH45{kYcf^^4Dn zK;D0AQ&s|6#fbG}srD>Po>Nn}=^M0B&y(Go^#}XnUC58pe9X?o(V+E;D{%)!NSVG3 z>pt(^Z@VQF&SmZN34E^|vN4vQSyo7mW{IWAw2lG-<2piL6|YSW6!~80-B`axnG!fY zY8V4!MO5+gPIH%B)=h8!tQ$q-{D!j*f+K(Oy+Cu13>?bz%%Qk&4xwA1AO?huwAvcP;5^M0O7F%AaTIgik!`EtUR4po%x`vN_ptZGxgx&uEeTTi` zrE+@$T|`#5t9k1L*|`zWXcgWgBSW4P}@rBL;1lozZ{u zu)sJ%$$5eL*s(sp;q1lW{W^$`qdsQzg`4)c?|Nm56Y@M_+oPUF4C{QQ<>DXt`m|5x zFH11=2A)?-b(~$V?#c;M+dzW4kDR)_$VDyL+ELMYQ8IB7Ij^uTMql2$_xYc8+>=s8 zg5i0*&W>NVOSWpUH}gJ24Ux7YGWdTkX&%)24EpQwQ9U@e?x;NJ7u2+leIG?kO+jhn z+!Dk63oo3TFG=e-Qn>Fs2U{N?ZXR4YvdUhgtd{Cik1k`qC>9Mwfz5A-r;_yXW_#c0ozM)Pl zh%@DepbgelFD$Y+AiYOk*J&?5k2vx=wUh>$V8SBbpT(8x32yl=rIUW*CHu! z4Kx^k5YG{%<O2b?h}--wZPYdHnZ9a2@Rtr3iznk`~1KCu{u{9_IFat&_AG&ju5 zXBK$Bzpj`ru+KT5ndvVY2d;mc!P}i?+@Y#Lzc@-@(L_cUl-?^(m%;<>PyfgBq;lZE zZR0HfN-Wvu0C4qv-KoNLfcISm+5er+V+}Tu;H>7KKpOM?HKpeNO#gA0v_5K5NI(An ztkFLg@3q;`^uA#VdwGk9s>YFOdM=5$=t7=iDgq88u6b)b2P_+6)I)#t#fV9s%b#BB zvA3uGIqzrGetq{2s>^d2{MlXxt(e^il{POc-7+xk!A&HgS9Q6^d{AQ!7=YE{SJk}+NHD8cZcSm(j+_mE;xqS4Do+~9n zsQK0#^B#M?=*T_92ONLY1o`Jl2PlvEzvPRPF0jg{S{3zem>*+4Bxf+_Ddo%HNjLHf z_5`^aM;H;bRv0IqCS78@A>WxE3XRp^ITr9a|$x{pzR3GQ)p?y zKB?GrHzaBH-gmrt`J_BYHm1V9}89oEnn2!n14 zhCDSJV+~Koc*|8$2R*M~EgwSSJn}w&oc}47khX;C263r(+zS@E+>`m;20APE6T9`R z>seg2{>zPt+59fTYnXTFAI%3X5V+_0tq#9n?*M-v_B%ECsKB=uL2xP{IPFS`-cF{Z z5pd45W&~FE7V1rRoj{s){v!C`A&Z)kJ(k9phvP$uQh3l1q5UlC>*(a4dMdqd5AMHx zk+$ynQrG_Ky+Tbz{TEkq9JLn18lqOE`%Pu`4CI#gpIUNfmXqSJv($Oh+tbhcy;SYe z&zpZfE0>*i);z*hPOH-dL3Ame%OKgZ{tDRj~!Lx{R6j|NgaS%QnwFf#E zc;?vr!?>W{mR$KeoPRT-@BQocu`XI9hkb^<xe?(lBZiBipPa-Epaa9S8IO} z2&VznjyRNK(HAXVf$AwfT(AwBl1VQK46(JS(THm3DbT^uH+f3o zfyJm9SCWX6966>!2E(~lPuCe3XTS~_xZ6YTc<^f1ubbVVub-XgMDsWJV?&5w#h7CuE{yZ#_g-^-i5YHE{@sUiRq0c3di zj)Pvx0{!jKCYY)gYd>qL%YYieRI~bE#v(rOLu9%g!?r^}9dmxaiaf*A)#HC_?O4;# zoYjb1UC(DN$&~kn{J+q@bihA3@vs-lalZd`&Q6@EdxCyIc>WOj$zq&+bUZwda`5^X zUnB3Th)A4e1Uipo-s{_uhxF*Zyq)nq9bd!<3HtN9mE+;sxzmH0&!w*@;n{E|=fxXw z<0koU9A~Ie`LFBZ37%p3OBa8C%oo3Ko57=;XX>if=s8$Wd8NOvf5lLLoDXp^&4>Qs ze|r!1bfA&uZK^rFg(4!WyT$w~xAxK1{cxY@i}!Xm4rneZ2#;6~(frTqSwZd%8XCi8 z?A5a`r6Qv9x9Yugme@xR+INp{0VH?Nc!(>Ir}>8t89bCg6<`7}?K^)sPZZ*SZ0wKv z_9uG&^>rPf19jgP;v`Sv&K$%^?K~`d+TO^q`LcHLgo@+3~HSw!W*OL^rbZL_Sp zqo~@Q7*1c6;Lc}%pp|4{aQ~Cw|Li-P;cwOXf4Z*i_jMTG`uA(PT!-9wwK&RJ9_O_= zdVN!gq}c~B>S-#}FKmAtz|3UeAkKgJ1&nz{rha@s{?ctsJ=Z^bSy=jI#{ z^A+apo2#&9kzv|^aFas#=hljn%WE26i{7o9PYRro6SSh@`&HsR(!OQ1dJFepqp!aX zsZ6}1_2ldJ0K*~K*w;f{_!Q(|;A7}NL_B8TyvM%ppMIzB`8$7|c{lKCQyztW0^%im zZqcVDq)GdhQvM{^J&04;h|1}f%0_<&4|MO#x_$E*p9fTu!*!5)c&?2ypJC>#*jeB3 zL@4)4%>_s=Hu;}b-=yKWN%*MOy~YlDOZz$cwFB_)wTJj98FAD9@Z1M|;iV=Sv~clh z@?IEw$S%3W)n9+T^yh*KQ!ol0gG9dYTr*B2i%!pBnB)O5lF{Z3D9JaJJ;W3}E=y{v| zfgXwfcyH(#83^m)`19V8p8=}yxhbs6u`cDuZ(M}45?VM)S8dyQ=pHn8;exEg_JwX%4&tSmeCQ<>+`-w z4BleZdG*#EbeSQ|N zkuHcnp_^3|GTnW0W93_IH!i`xHW5)JDP8o~AL1^PeD5bWuvUf=70XQH*}x{;9WiHY zBr)z1wNBI&F@?3SaI94mLGt$OY1DW;tk;V3uyXbk_|DIYaXzvV=VOEV^}%Z)`}UUo z^~`^YU)ojmTMsv3DmI<`&;Iqbzs~3U?O{B|G-?tz4QeN0=VY@za-XGrF!+BN%cuE0 zW0fu9_3lxsGLCq&1fBcyqWf=OBT6#55HC^BS90<2AocVb;?Iw&Eohm8Y5Ev)5+s(# zQcKM-QA=g;!rex8HT<@A3$Osu|t2jVmR{NAQlXg1C`Qtnec|10+W z-2bl_IAKqhGl(zGZ}6Or@vO1GJ#T)k{ql$3_~~u_JAN4c5}b!PgI}3 zYp$rWqo~S5Pc_6#9LrB^>DE0krHKb_JpSCeqSfjh)x1mIF6qf$)2ay|X`W(YJu*^s z3asI*p)6?yB+Wzob&I}c$OB*>EDZHJ{^wJmdEXQI4FQ?_Lq?CiAAa>8&zGF!i;HpP z?{T@zbzCpUU*mhkKB*7$uv%pgEAoHttf=1A1=e`SGWCw-1IOB=REkR#gD$Rl)h+xA zIse76&_&2@ohYfSDod?|?42d7yO^9SH_fmPGl+?#ctA)zrd1RV2$)x3~PP|b_ryzx&!=12RPoUNz8 zcVc47m>Ny~d94dJy$6#yn9pBKbPLZ75cE?a&ZxsUs;l7)fm&=wTnXn|Uj6ggl1%;T zb;W$$8S-AKi*bMdx})~X zq}K(RgMvN(%ud#4W`;RFt!>vtb=SP3u@)*0ET^qi`AOTZ}94^Aw`l zAFRcd@k7P4`5%9qewYD&izx6Rj(+P;aL+I#n=d&1)KQO%IZ%@>X{>2+Kj!HBIQjng z;*>WRkD{R_zi@0-`Mv+TP|_DwFseEq*3srT%O#3z%y3_tp)0-Of-L-{XMsCscDcC^iLBGzk=c1e^z7 zcaQTw;BhTEq|BZ_Y9Ev{T=)Edcs1q#0psA%YhA>3(O^fj3+;=5N+8}Q9=;rZzQ+6) z{VAS;X&-+NnHu-;e17*aU4IE~{!@Prc%ni)kze-ZAXTpu-+&y&^>8LQ_Vpg8{^&s; zhwpXhkETkZUS}Lr^h20p+R6yzD*or=cW~5K@?3f0%Nt98Jk<6V`mrD%E>3~Wrak9t zw`|{)y_&b}AN0zgVDR-g1B>1n5pyU=U_US8+!KF96)xAIsFiRT&Sl{K+}9CJo?ku9 z0~gv}Ua-Eptl@i&KJGvb&#;$T4j5E$l7wNb(IlJy>85#J4t*g1zw_b$$+O&LRG|-C zPJGRMVxrmg*PSMnz%x?yY<1m|p5A*KUcB#{n1g#2)z}w-pr<&BrkCR$IJ<9d>o=z7 zv$}t_Ph`aFj-pverlLopuYy`(w^>dHDhbahDyplwd2$-)0&i39K{Yu&@*Zt=#%Cjb zv}iXCp}LL9nV-N6I)qnYcs%G9`Ny1SZ@_ieK%U3_X3F8u7Y^)~=AfF+d_ZC6;C1$` zS6Yv}QTeOSQvBlJ$DWemte|l|UtQ_|{&RmGtN(Le%R|eJ_9c@sMID;T@u6xD{n(9& z9u(@mZWeUVD8Nn(CRyc^NL$J1<^8;I>A$`|4~UnPSJI`L`y1oXWu+eBv&{%Ge;9PN zSD~qN7rKc$P^B0v{#+AclxI@AHWr?VdvHmPS|{m};`#SyyaXA%!TV|@6oyZ-in z`aK@19`)&Xh7$HuB7dPa&CT5!bm@a}PoRQ1pr7Ik`p1H=dVdVo_{p~|_9JGIdSK_S3KCW}o$ZyQ~oB#8_{jQ+&n8EX82Yt<$)1Ak& zm4xvqId=;vPcMFgkwkmE{sEwm)vuuyK)dmjvGP9K)`eJiD+scQ=EkS>mOIqML(1oN> zaCoa=qxjtldtsC}M{{nfTS1hF!*%iCD{)PfBZ!t?evsyn2OM;v7>{q@bur6$H|{5& zm^aBI`sHs2UO@1F#KR?=<>)_B{pG#(!vfJ48v@|IPOPv!q)9P+Dcod-j28+r$= zUuPetSyCg`4d$5-Eq&TK;(6~d)Nz3kYh~X&QPu9f9^G4nN4Rr;Oi9QqJV)n0j794K zyfl(SL7yPnz|QuFwtV(<_mI3BveTnJ)-zZ?81R8a|BL(-dslzH@n-Z1_`x>5W1{z8 zFuO5k{4a7Kv0O7Nq`j2hvC#gpD1uX4@U*cJ<75l&kV>4w6P)5ozlN1g!VEpLqdjO# z5zSxg++L^xG5I`&whr*zWIA|{KRK%JM0RC`prz+m?IALDLG(gKC#kcIBO&RsD0P+| z6mfc%)qn%7oCAO23fBNz;Khgo9pwoRVjrJd?uyH~E?M~;X!(Efto{c%jbDo2V9^t(Kf9%VDp7z0uwxjvQvtq2-;64#cQCW zN7Nfh;FdhR*8aEtG$##Vd{!>gVW0(3p$j8Z*7RgVo;?^w@4( zJK3u6DCg~1?1;bWd4z;du`7`yFNnl@iTh8Rg~zmRc}RzbT54P%4hQ=?-cRmPJFf;0 zd)3D`v2p8d(6|Q?I`!s>g#`L zQV390z?J$+9!E=37P}V;Zg=r7?~Qku(-KDGyCO$FDssqOXR90y1K%edAslcsNWi5>Y_ z`vtktci%jceE;3IzwWbRsQ$ZqxJG|1a4q-U&&6X7LrS5^C>@QzdY0p?y@8O($ z{^=dWVHypOZQg=DJTN!0UyIwl6UTgG{E=FW*va6fZm>UyT|VO>Zeq=L&oX}<%)YhV z1m@Bg&(ju<@K$(CibWXbZxe)s^vVQk(f>VH04lnulO@)V8=pnqP2uM zCnWh;2r9=^ArBxc>$K(+D(+2#r*th)myuJ#hhr;S6E!3y`Wu_E9d}+3kFOPhw(gak z^OOpBW%jYXxzx4t3l0(2Q5PdSu@+9>q;~xhOnazLmW$tXZnR7%xmcnCNJcMZ;vHqc6=*e3RR#f{!Fcp-piY6yL z4DUkq>}lZ=ttPOk-^bv1YdL*ZByfrD$qA_`6mQf~MN!0h`oi@*F3W7Cu;y*U9M3~o zhhT8Wn*nNSphqAvydry&0@g4rpGup|LAXrqLQvxukmWutU%7v?Sw7jQQ~r{D=YJCO zKM4!j;#|&W-`x+aUYY%>`;6oImt*K{&KQrD%g|!SjkeiEb2``%Y3ekm^PSdyeJdxs zr(h9~0ZEK^7y_kon{3q!)mdEH`64_vN(8CRbboRxf98Np-H+VEkj%G(h|S7UNJ{LKn6LTliDRvP2im~inYU?x4>S+WG2_+bugd0Iwr~v zQqK?8a~;>KM%hm<>F@P?(W>DWEzuvf%;JCYbL9^#WvgAa_qIt{Xr8*q z;^=ry#1@^JOYUZ~K?sujDfW0`S*>4(A-mr#$=z(hb(S*C-QXlS;wiD#DAwWN1+1w1 znVkYH5t|dhfv4i`{*y`jX?c(OK*;y63?3Nb_#*@L>?qaEV;va`s;saFM`%r!cK1GI zviKTn#3+9#2XwY_HTSSzK0YEwPYdG_lK`QzttZ;j@4;TfLeuclH@}{ZS3TmkZZl;JBz(sZg+8DlX&v9KtX*u4EOra zG|t3-(l`6f*X(y&qT3(z6XfTs*Kv3Z3Y#YWUR!?=#PtSiBY+-2QY_uQePpuR=^Tp< z!@5c$ikH3)VTT&H++X#qJT8|@rPPI&YEu{#`DP$_8OUhI3#N19Jxo_t!(_1!mBHR2 zp4$;-{F{&*o0_S{BkMxJS&u&JcIUXh9c@nW6tQ0BJdXL9-@jpH)QgHhyOkA?naY14L0Z6{!flX zdc{6}^PJb-_nkElY2JFoV~no&(>N_^>MVaI(=y|P*iPy+Gml!;yjC#*8ONKKhnJ@V z;p7l6_HT`hp-7X#i&F*@KW)%L&WmW{cq)I7prW82UnX1e5Ygg&0;0zyk7V`HLw%^+ z+D2SU&%XCm`+4=yeg14-W{bp*m*m~u?y@qQONo3(eBC;5l*ZS^y*l)@eqwyF@=t&2 z(WlKILfrGO<0$e=-|v?1{TqEUS6azn#s#PJq)M)L$~{Q9OhaN#ZJ z;^O&q!3yK=o%zCsM~M0Y{o9>Jm#%H^Uf#@nglQ=!L@n+BJqLfZn1NJ`5#&Axs3%;wX~T#tLR;3k*7?qh(I=G0RM#tMI0D%_$Y92lHm5^v5U{@ znG4+3`{Cd2n;|ov-ZFPc5)QUX2LF0o(}IGOsGrP@X1me|%|Tr%#MvGIEa1okAf{di z!7<N5O?*sF@Ebt-*TM5I|n9FvLORt z!Ok;!I@j~m2J<}lTsF#%qBk^M>6(H1>S)QU?sGmY43x`ionbw zZuD4n-9F3f9qv^9p zF9nejzF0QM+0#dt=Xgg!<};f%D+k9hm+^C%2PMJ~2@CpB6Qk$+AIH`Quk0?mJ#lCjBH;^1pYi?xt^XS8+A+jL ziv8JpWNP%ipeSwRILh1=)1kp8Q>Dj5J0qsJ>{gO|5yOXKOdEe3=nQe7J&o9ux2N`J zv7do&(SJg{E!y(z@H|D%B%aN4j`_irZ*{)!Z~xv<-283Za4ENHXs|O|!^i5?_&o56 znC!>S(Zm3kbsLiN-%SBsjefq*^dCLPWB&V(wY|q@+Q~nCrqL68k59KTynbBU-6O6E zQmcppW7=39?|HxE=ah^43Dje&;{LP$@Lsc1*m{4rpFJCl=FW_yawG728ofpe z(tQ=RXVzf7b5-RMRiU53T__PZyxPsn?Bq!9QK=@N!`XCeCv5e2 zm(Tb}?(zjlK8I&3r1p}6`JLix?CJ@`B=nEcBN91c0g+5ohU@=h**L6Of~+E{mg`(| zPk{53y4Zhj<#Xh{nR$M#ah|*1ytix~Ue;b39D)7hyM1G9au?5p)~7Oe?e=5z$#Tj1 zhGU5SIjSPN_L(^rj)>vlTVt}ACEG<7vhKqt2jx;daAB5PEe?y8io$ewa zix}7Oqks&3wi}N^?QYOt8e{kaxZRK81mtM6nu&kI0@n-~uDoM`eNG0O>~vk<=6w^d z)ltJg3;IZhT=Dhj%lv(1N19DjpKgP>Wf9O29XOF@2`WR4CmwBz_Bi&{& ztIWBuRmGU8f-E-D>dZ|pD5ruO|AexjALs*p?bTW~tj#lo zYQQ zuHB9YcHd~)AWpMy`aVPCtvU+uP}%;Caj=cQ)3Lp`;2qED=guH{e8HLvk)px%$z*>~ z!`EQEa!-%&)t{NOx(X!7dzUH4VDTvGxN7ba5K(tEB>jm+CZ~@+tZvVKU^rh(tk3Q> zN_afsWdrPQjMoni$8t+G@_Z!?p_*KJ%Ai)Ahxhyyy_h@wQ|B2jSW7;cAm?J8$?jKf z6J_~C`raD&y4=bXR)Y%W#bb9{+&+J+>2rXsxvQNVXzL3G>BT?&){Cj_6NbqTjU>bw ztS@@S-#)gc54@$*;K7!IexlSwqPAYzGk#Is_o&mTAYc%XRM{acl$MF!@G^Uw=*9_? z%7M5!B}xR@3H=Y72MV?b(R*46hoQHT5@c=#Cx6q7$=dwxXN2`3!x!Cm$KZcjsFUCr zJV?PIV$u!XRtxNxd@NC;A2r@o#+J?i zF(E|mhTCPOGmxv!1S7^}7BPQ44yb&rhpLho*V7f-6Rj`~^B=NPuckF(gDol>kD`9i znfHBtPN)#)443%wj993bARx-k+Govt z3Yrb1u;CqhNi8<{RR=D7%`H-jF-Ql4pf^+3gPQbI?qBs6!U23qBUOKTGWwgXRvF&^ z4$*DQj(FF#$t6B)0tcC4Z$v3~Z|Xm19i7pDT1w0ThA6$YX!RzI*YoU_t7declCzfm z<_6b!8&jQe-q$+z+UR2Jf905q7`^1qHHL3KJ&~*+RLjCEW#X2@#Eu>ZO!07DS<{xE zAHF@!%224xLmv8kirRm#!>IcezRV5psIRUIxAnF;LJ&{?qBrD)B<;I zJWOmN6LTMV0Z{4P!HWeR!2TOQa@N&p$2AVGJ|s6|dDfcu(qexP`#hjBTbsYBkds-B ziOhYXZ^t68pPaHL(Y^id>*eh;a&Rft!Y1I==I8MOg|dE{ZEH&J8bc&fGzF;WQn~Ly zbMUbMKS030KQ4cL)@xjG(LB55Bb$%Gdr#M4f;CGAG^pBTUtP+{-Z^>!F{XrTm_`KS;1U zuT263vBkG~FMq%iqXy*wv}G_gkk%xWf-qGGBHd133TNw~N2Nln9Npfp^R4!mR1JJs zJv7~StT_dQVxX`1J?NRL!T8NFP%rCOKUrUNg&!2=5pJ zW18j!z<)yy!}oIt!ek+cnq*selOoewB9+UqSDR*JqWpKNO|0bBZ%I{SZxv$<<*OQY zg=Y)Ilm^Jr2bq&pB6+~@0xRSNS^)1RuL7QZ>jPYQa5sFMYwh)$dw;IS!Z(+t^wcMQw!xg}WFS!kM-KYcnophb(3eJk7X!UM1!KMZVS3PXTn5CQ zSZ`bjFK8^yNRYt6A?l^tg06i+0?S*4AF;OBxsLx1Z<0y{AMN=ojX%+uoAsktf8KkW zOONoIH*4Ml!Tm!dYC>uUlpIBURXAl*g5mnmRP*DX1&?U-Z|A!De38 z)_G1YKcj9mlkm0wU0;pns@q8pK^>-5;i?z4*TbmxxjP|1`~=+k38K#a<%Pky4~cXu z$+PDv2#b~@z_47S|0c$JQiv4HHg?7Ey+nqSl5a?IErZ@OmA^NS~@(R&vPM6 z+@dRgqf7LsJ+B2|PplDSuo_!=NR;VCp+;NirRY;%T=aNouDu!RKE>$EyB7N}mA!Uc zuG(7X^R&hJh0yXHu4h-7P0zn~ccaaP`i3TdHhmFXXJs~>k=jKsAQ}&MFIkzCls}Wp zno#x1y{jKqR)=JU9JY#hjlEgvKI4j;&4$7jvi`Js`s%Ue=^FA}!t6nI*2UQuRUBBm+^pY9-R)uL#a?Jp{V>59|i0D zlMGKDll|zVURj#e+p(IDk1&khpDf|0>fVb2ei@5a-%@!S_ymX)`ZeSHUOeeV z%Tu_#|1>z+8*#KZ7mu=^bB~<$vo76#rscW}sVx@)K0he^9`6RIU5e}3rxN2BqxWpX zruXK?^_a9i(l*-@#Opi}-8?Kx)T9uE#(DJdsQmC4JW${ypNWtS4F};}#LeCwACF8u z95yGIjMIVLEyaW>_mj}3H{nKcFUDVPcE?a|LSMFF6!w}x$iu5A3Mg|K#Ph<$pC$1 zv7Url9~s7FnMYTT@7=EfH6C+oX%h$-Q#tFdP}7Z*80)FB^17d2ovt!}UUbxhjDrp) zL`9>n77P+4sh0M3fLiJUP9%RfYgP^fQBmi<`?y6N$p6jHw0)ZHqs6;Y43H-rDiWV@ zi&l7){js{#pl&$^k_*lIfs*{n$GTwVvDQOdyr`Gv^P8W6i`IviWY?cb5c{oc_tbaC zPd!n9%qNH)bQE8K&TJBYVQsDEkOJyNyO0DE20HD2x}7e@`H)llb>De(az>Qx5NQ$A z(r}dEs112g3jJvbRos7z9$zsekh+oNZS)a2df*>`5Tzz$Fn>5dF~E2?0JR?tU`WTvXD+a&Z3zNmPY}1>?I%zMl**v<73p>NYlB)6YzZQu zNaktm$BC*bfnm~rlY(>P_~^Ol=I9ZE9tg_)P!(!WA$Q0pGMaKVupur*E_>SyxZU!% z?>q1&*is>Xn;V}~{{8;UTDs(WUedlde!9&|vfJ@%`5SHSRbA?L9*tA1E5N$u=1f}V z9)%`^Rd#t_glX}GIm$^7Xb1vp1Ri`1Jf%4l*efmt?=De)GNvXVEn>@XjeNBp1ctBD zlUh*yOxSEHK|YUa#QB)q5N3bF#Rr+qyHx1n)WRE69LZ z-+C@*%~8`cxag89{?9oOz2F<44?3lkTle<@y%lJ#|2h8wp0!8y>swe=5rZI$PSeR; zHgJ8?m|SCf^SNzrt~|#p|KVqy;|8y;{5y9u za(cGJVp)i!}5WbtSxT`a^D`}cJn z!gG@%S0maekC6{AM852-5wvW&fs2JZ#KvX;N({Mw=}W@BR;xZJ0pW?xh| zTn}u2kiuc>avz`u2Q6bL9wVmv`LFy$>{t`UE6BwYK|RnaAU0xl4gVo{CV`7JsDQ%k zc(Wa{(x~hC2Mn;gL{Ca; z{1C6a&owxxuVi$_4F>9uSo0rTn;&b|_$`-z;)1u%0I|}WAN<_!-1;Cdxw88Qu4qqA zR@pEelZ>%vY29GaL(g{ZiF!Ju2}BeckE_68r`a|@9ruTf63`w~{x)LU5PSWg8}3>0 zID~>u*Y(?}8Afcm$qhzZ7lJ9oXg^U$AyczH5*@rft)Zg2#@d)I(yGZ5K_T&0-X`CF zP^ChbW@@p<=CBiBkBzFRvqUbpGW!5|7KG@GLh-R%Nbv`J@Y&D1Q-AY6H-DS|Nq*&( z6c@kZt1o5ib9MDOvwmtZV&CG1U(s^rFMOcC%?};xQ5->@IH2w%4DZHr;MtN7kSIAm z%4tCOYlu3%`d;4rT#pgN_Ev&1Rk*T$`SM7n&-@j7r~Za!MlxvmCsK?j(A#7O%vq^Gtc~gQ2(6NricdcyH8DRQzcZ9BEPnMCV~jhntpH`mnk3O6|el;Fa?K z!#Vgnyi-q~xjs2%3t*@GgQ;o?DU$iGW7@|)65GPf&t<4T>`99DLJ3bCIm z`X4x{qj=qNsz5aZHVac*@+p~B$3RtkhFtlqIf=Lt>CC#!&DuVRCy>aAj^BdQb$*aT z1iy@X4bBkxjyK#z99MD1g5K7Zg4r1d@O|I^)SkTf0Ib9G+k07jdmQ)9^^}X}FP?L~ zUwRprT0%cm{+*4a9K*vRp*)^{?*%IihjzB7AUsBd7+as+xy>*js2O8N2y8sj<%FQP2aISl4|t0YMv5_?%t%S!e!{Eb_&1IAU5e zI~xw>w8%jz897$2lmFBCdIJ{&dVcApZ3qlexeSJI(M2(LFk zB$o8;b?`r*Gbn~*YP^sZm}=;iAj4ED^$qZ|2C*h{t#riPAV?sOYLL!QTYzEdk(i{0S> z$U%_y7I7z-movwyI27oQQ@V4xoG8?esa)(;LS{!(>ygc-uLUT7QE&U}r9iG2JS|h+ z(BPCYrY-gc4lS{FbzqBB%7{dLG-f)%(wclzMnc8y((&c%7WFy3AV?bibuRf`Q%%unN9-bB< z_SpHtnv1fDnLO9C$3wo-&mEyT_mXwP?TnE|6J>mB$oLw67jg@AS1?Z5inZKa&dGq> zYOi2?kbjSniLZZnOZ3ihhKbQpvpd`j>9>CHFKbO-eFpZS;XM|HXKU8lXGI)hS%(Js zh`C3auZe?LBj7gCm0+Z@nF`QU6m{9NNtLVor&qMD8yRkRF<%{wU|)QlNg3OiWujM28iM* zw}zXo;$(6faHcIf>%E@VAZ!lrkyN?7S+}2#7^>tDBrYZ4NZKMnJbgLv+q%JH4DSOb zG+Bbv!19|PN-w^NM5Et%emM)Z{jCeyYJ01z-b(9#7xb$h3-TY@`!fzx%_4;!YS`-^ zFsw(Tb`=<{?;?(rz}#$Z^o7m6ob8}095$C9;VvV%M|ZE_kb^Z2gJ-O>I8;85Pi5tx zJ}h!0Dd+`ac!&Uwetb52wF$}J8T2c|le@0}dKSMfDmS(sNWLugC#>mYtI6T;=^a%C z2}JCF#79r}iV0o0boEG~%ohQOkm=Bi2~X1U;UCOf^SE4R)f3|EV`pg}#WR4a{eaN!Lo&|l1@cpc4kj*G*7MAYXH+-#!8$?K>coedJYb*tMFDDc^lx+a zZ{1+5cm0urpUNuaN%b(@DUWaRQ^f}^SBM3FQ_7!??mg<+%Ovw(um*Fu7vrk%Am)dy z9@re~tR;W&r$dj=Jddl^S8juO$GmQJ|IfM^H@W~A51np!ooDU(hS{?5oTIlcXVWw~ z7B-sUaW!}^E9*rZI|Hn{if)gy6<*7K zq=B(U5pp2keZ^n?_Kl|*`W2b}80M0#X4(a#hV>Go`4F@ikMr1Ot~rmAAH0z>4$i#( z{t|~=4=1NWjG09bu`H{**iNR4Xqv;?EeC65PowF_gY6$w)~%NMbEgO$e#S0z z-X5OAZs5a1hMJE$n=<5~ziDjpJL7eJav+;fEhpJ&O7bLk91;++opM_VLR8*=jZ^vR ztgQe!fduZOc3tx$JR4kle&h3T)@%LY*C283Vwjv7CC5}J`&Y#|QOaw1o*9F8?B}D; zyT31K!3lZ>D*oiFjGp}vNuZn|;!io!Irx4|yvo}=;Z|F{gu6&0elyv|6IUe*ZzlUAN9MPDBe?KVZg@?9%ES3nJ`-nP`;gThLGI|xvZ>@X z?A*~F`?+@7eFSd=o1tlcaoD*2i0EJPOI~7Jv)r(OO(f(Y_DWP8L^dEUjxW}AgZe+6 zht=bT$i_S4R2tY@G(tY>8J8WVGQs<}JtIY8ba*ehF4od^L8LMph_XC_sf`|1u*V2$ zNCz(>qZ0d}JAbXU4Xel3?kCp1t5@ibPs1**QE2OcBgB}CXT(>3{vHoL{Vj&sm%qa) zpT|dfBSdyHdG*bh+KBrxYjK$|tWylI^_6){=2XPFhtJ#nj5-gJ+pMh{-@eZ5o9CNl zfbk8+E{DJpM=|AP(GRyW0@XxnAc*nr`bx{e>#M-=_~Yw~B`CMkyuP1?zm5aJSw#** zur~)?n8H5C;_b>a` zd(>B&@3$lDA%hwwEA5Oya@0+L~Q=Uz?iy0ncIv+zk zATzt+CbyJ-rp}!qe)ti_6!m}3{m6^P>vvz}Qb)SzBAjd3=Qug>(wv?^368$WfhRfbd3 zyZXXS9vt~`x>xU11)L~74)W>REm8Bd+EyTt<4mM>A*el+GJFs zi9lt4gZxNxZE>FFo6QzArU*766Ap9Btjk(9`sQygPCu9(a{qHX$ooZh7b@4Dcb>N9ubJ-=f5yHnv|@Apol z{uJ)P+Uc${-{5ZY_t!Lax|?{^}9$@BT1nHdd4B`^7Ab-mNpG6}x%=h(kE<8^;v z=P$Wd-)7=Q_d36L0_}hBEsQS?`q=o-94f(NG#X-U6S6Uv&*-~eqSh^FaTJHcDqibT zl!@V!jpsGi{k(O&u1~PH9z*&nI8?;7N6wf9Be^-$Q;gSCgW5cfzX|9nZ7jckQj$te zI}$lH73(Ya8DnFvj2@#yv9|E?INFkXcOEM>Udv$+2QDoG!Ln(A-ZdVrN=~BCt(HrN z!l%ddf$PHOT7~gc=a5;IpK!?y|9I8uVLts;V?5tS-CTp~Pvxl{=QC#LvQ82S|7YXe zt6kKRnBcvj-Ogb3_)KCx_P~9A>#jJRk^k)&_w;Y{E@h5-m!prv|73hGIzH#~g*gAv z$KenDlE2qJDmrj79wPtD;~}m;$E~&z#wVqSKqAL_e6$>sS7AdOnk+V{m^~oo7??W{OpOO`41nppLhqOWA+#O_lI5@ zVw~F^uesCZ@yemW9t!#1@9^3nu_A#i!BoAcmc;p;0>2M!#6v;FyuxSjV0)Gbl+ zKj{I(@h6ArmhYF{UG#>2Gp}>>b^D8R_^0+znK^A=()6ya?{nYe?EmE4UdOSNft^32 z{47z8%d&R9{7w8luRM=9t#*tRG|I@$*&XuBdf_GCfKonD2M=W6kEikF%y+|$YS#*r zZwUbp~e<>iwTn7}pXdL-qkbOZOTqGpil!#sD+J4@=s^w~?5Jb>Gdd08?AJh% zANvDe+UB``c>Z1X0sVn5{f!S>_A6fZe14w&I@hwwwDyP;UC@P3zWNRU93O#x-uIvy z(vHm|-W4zC^AunF+Mb&UREiR*##D|+BQ0D|O+uqS@_^BII)Qk=dZ=5sL|!9$uty0X z_7w4@1yHiOtNzqu>_K3j;1i@!gRmJ9v?tN`MB9sh-5H1ab&c;69Is#S$#`eBwF?FF zc8~w3n8RNX6$l2`7++A$iSeCAj@tqFf6QyZdjKq2>D*{0p!libV>G;t_5UiL(bs&e zkMR-*IJEVG{B*{me|syH*(Lv)U9{0j5}dWqs=k-}yzg(GlX9J7+b$)tW&{HJJ7+-; zVb#=<@j-9RyR_XPOD{?70KkuVTAYU^;t!>mqzvM3x*LN}I2elig!);e*zxqaf zfRq0+Pe1goZ)4@HxBmA3z#56y_T0$XKe6?lWkq@BqMI|DX)zBD17_y-hgRk_F6hEK zz;j_iklQb<)uJMrXtnnq25(GSg3G}3opI-XexpBaKpo;N8>gB$J5l3WKal_08|m}6 zy5e#61CvWzP9?rtmv;|Cs;kA~P!k$UKS)vP>g-<*3@@Ed+7#!lN?{p{-b|M8QUPSQK&JguGnP;MS8)1 zJCCD&hmWmA9i1cl6tvHoX9wq@uStcwF^5iljaNz!Coih$7O2z#sXWscwfB;~*L zzs>1L2V-*J^=gei?2=$S6YNyYrShFhH0c3(gt*@pAZ^eQR#~ao&IU>s>Io$N|1$PeLYc z^@;Xp?YsZk*)ijgJI69{l^79~Nk(}|*TeO7y;RMRmicgkO(T^RDGPpotO;fs_02vz z3?mjx0}myHQ+uubaUzf2ap1Th{Wz7roi-077Sbh$AcXUpQ{z6~-8?BfV}&{=9CP90 z6IC&V`bn-25yx6K>X;$dQszl|JW$>}ew^2Ypr>rQg50L6vSr0>erVT*v@^V?2JTV% ze1rPP*EKUplp2R|wsh8idBH#suJiX9WGQ-dy9usMn^7$`bS+mCB92s_z2-qc*nP0D zAKy{XfM=m@!c{XIu|mN{ZhpfDL0ooQf4cHM3I{Ipd%dHARs*@MHT~+Zepw^^Z(RK! z{MWBI?u+h#Iag3a@2c~0J(t=TLVs(Sw7s)sO~DHMH|_8P_KUKAV>s6v2HNF182hL9 z>NVvKRSgffOWK>_dXh9U=Z9Gjbyf~&g!MeorI7WP*j(1k{WyYp7BLKvgza*DB8TAU zoe+!HQ`sLu@Vj@Ije3bZu3abBb1(Ma%4QqOt^ueJPXw`Yh_c4G>*- zUK7(HL4KZ`QYmqNJ)oESJxryh;-pp{)QE2S_ITfvK82=d$JUcqYUH=2Wc`toIKcI? z#5EdZ!5G$o?~&o)ybjOz70J5Cs`V?S0|*s#&iP4yqQu?5(zyR!PCxxuS{vWFyBg3K zb()?Va_aw6ZH-kYE`R3kEgv@O`k0@8aTrVJ-S8fN)dT;3_C~mB)EJDVb?)o7ni|JE z`L1E)SI^=<)trA&F~~v*wj3MnnX2iFgU&qhwql6g_~U!0eTnbswL6bX$L`%xYQ_Qe z6HvoiOTkW2$*yB> z)$H{x58-9h-z&^HkekPi?uhO5BqhxgWuxnup8Y(0Wn1%Z8HD6C=%U{b)}IIi-TEY8 z4q^Nf?qV${)e6rM9T?3E%RPJLp7Xc|f%P+P^6BD#zg>s%HHZGKUhR(@?pJNDjd%SU z`Coe`{N?%`lIIk?9&<*aA+WCAvTYEu;SXJn%u~6@?YE~LljG69(t=r? z(Ep$l-Htc3XTk1#CWIAo9U6i8V!&`N>yRAUd=O)_Zs*i}Q0#!ZhTav|SWJIWIy@ zwNI~KYfxLeeJ&@*=ZfL_Tr)d1ckHF6g8An3hZfZ>?sJp%4EOe1GjA8B*{udtAKybH zTU<6I4ikvS1cjP51u3NiM_#sGo9LtK@ay+~K}=o9Gopf0NKW2RV{R92ZEj`$x<*X7 zD-Js3B=Cp|Qht9JdFTA@7xW>kV2A6J<%RD+dli`n;`|7rn`M%)7Ui@maSq zh9$;F_i|>bYfbeSOyCk*x1Sq7E3#{jSx*o+6vU>y4&#b&sWJ|`(HHQ&w|jk~av0Hn zS?|Ns)r=}HLA{n#+n&M1wpbUWEjoKIrwGqPe?@qL&Ge|S*H3`C-7btXcot`Wii;<5 z;fALHyY9>L*DgNOH$3WVJa(CrcK_eR?Wc<_Q?WkJEuIC$QNa$B^?`gJpH3b7v6wh< zUQrb?$$}RGc@~tzF{nV=Am!{-?077H3s5!rwC|TrTl1kY{unWZ<(IskLg9H;by(}!sM*~nr5YcsTr!k zc+h5FkE>Ug3P^y%;Rd&YIz8*tNW%~{bOY2-0nI}W-Of;5{vSBlFNUwjgVD5=kDZ|$ z{5{Y46nm=r0}jFmojLwmyN-H!GCt`?{Z63q;b=8RGx`4OD zq0g5m`HS&6xYm{f4hb7{Uy%QQ_|M0tul}OXJQe?NOxpD`2fqXN3Z5k~c&GvII za#(}wG~Ap&`Xm3&yV|+dnQ?#KaHq^3JpRGYy2q4gc|FJME?i%=F8h!aj-4*vK-yLT z#h9X|a2bp^^PXJABPQeSOEUO;&GWs{=X^VUIpw=k(}7J??VOgdr?Jt0tqoDDAW`;^ zK!Y{nVVuA;w}*ze<7~aHpVVL5X}}F`BCTlz&f_QQ!)Ot|oSb*~a=sudBIApBD*DX^ z7QPx#CWky?JEt`DXY7>}H09Q}&-GqBPf9MC8hCvWq}58a;|Ml7C1XsjPS%`+R{B(9l< zC*~cWdb3``&7C(o-i?`}q=e8vmVI)@#$`0$|1bQk=TnPJljfa&N#zpd)zjUlW4Htm z?sDMwIHqvScJpb?pQM%qwYooYvRApoWD#~hdd@r8i1ml0`Z|(>$rMG9K`uMGMbp zbxTXIuaSau)9Ls7onnMqs7AvV??M98x_jpRwb%w_^H}3^DEFxBrxzHsXO^A}L2-?m zQ5dHR(owa46b};}I2;ZZydZw`;s<}goDunXW1_vIbE;zwg9&^Ag;-LEk7A-*9h^^` z$CHL9O-5H8oc65WiA#QUJ^!yf&AW?k3ToN}*Lv=G{D11I%x^ya_P_i!54-Yojn=}+ z@I#LQIrHo`x4xIbX_>LNd`%7k>Z&mpM$RIZ<2F}+D-X}Ik`yvZ4#AinvVb9fO9o>? zhO6g3oow_l+$~D`^RWq8k#eyfZ|||*unwfFg^hYzTxaNQG$c^_gch|(LN8J7bC8Nz zP(nN#KuUCxET`Z+tHBeo2C=VdA5>vG&XkMwVB^?FtXIs0MsGkNNm(sGVho+zc+YfJ z^*QfATe%;e_<;Pow`+>Ar}q=cgHetc8{~DFq3WY)7Dp7PD}>kXDFFjrD^J z*|Vd?TG&KpE(6~CP=kpT+k|^>&rIIcuctsyr}fiJ6fu!*^q)N*b1OuyMk#FAqIY%^ zlKTql9Y5wJOf!nQ*95!_=f{~T{2uz#ki;4gB1fHRT(o+%?+W?qLvSis{m`o|<`;c` zi1S!y-=1S;ZoX?qMk5N(wqW)fpUiLmw8?>k{@KseXeRyQZF=>uGy1nG!(MH6?Xg_E z?erowb)_D3s1kEW77nN)ov5PARc7ve+-Cqs(d0~RB; zC7=2*P&C?3_bN%7#eEqUtemU!ZemUpR zcj&tY6>031p7+x)^(>nOm!4Itv3=#){93c}z1g9z95P(#fJdx&_oL4K4LvvAgG_NF zP|yiaRjVVDAYQYQr2MdBX`xh*a_>GGrXFj4$ZVsN9&-t0)(ua;t9^}^x3#Nj--<|v zcTjQm32N(5K?*i%z>=hSyXeP%P#sJ?eNQiY$>1LjwL3m#4v~2NRd1j;bK=7~Dk0$q z=zM##w)HFIT$d_6IJyxVi> zMz{N?4tKJiHp?(+PQmDZ@dZ86{6wN>BIjjO*d#OSTC9UOCeAxkkB(4zNHp>y5$D;^ zP;$e6Pw%R?0R1a|>UYy8xa~VEM@&!g><|v9UMK@-fHBPuR8}dxKCItYsN%z zO0kxwYzdM#<8coxxJ-=(xr-OX6 z<7)a@<|BItv-kKpwhx2(JYFSsx60RXRXFKzu_^b;S&OIT;jQav^}INIc}OsOE{`WS z%9lxew{h3i%X>M0bX+T2)ucNVZlE5HJP~PC=85Bpmtvi+{aSqK&Ga~V6`f%f(Ge;j!tj)796+g~9N&eVZ(shdFzHpWLa(?n$eN!|?Oe>qaloaW?mE^jdU|o5y2x z7BaVz zKV~yKF1OBGR@}!AJrQN=`($o=kJ;-Y`Ra|!>}aQjJ+Y?rKATUIuc@@Bac>??x8Y3N zMK$f%`EFa$*?hVin19@VucB=?$(^OD9S(UqDnA@i<@I4R zzMGsDY+Kzsvv{$Nch=qVYtgl9qZ88#?;0+AxXg;V9u7wmBIqdi-M`Vtb0?71E8>Vv z(FFfH|LZ8jTSS(7c>WxD`oO_|Mn4uy!S4I>0nIf`K-b`@zz4!fDIvq4jA;B^(H(yO z5rE5oaRLPS0pMT)T@mnzd z1ZY^JLE-QFN)*7rb64FHjji+h%mJe34L;ulU!QTpH3X=^_&r1sU;Zs{h_X6=$CS(c?B-#wta z+x?&SdFGuH93WTen@AL{|EdnPx+~zUq14kflHzt<3vSl9oT}2@_QuD6}1QVwl{_B zZJ_gRTmq;Z#=wd(28ie$S9n5k3+`zDIckptm8ZV+LFIq*#`4L(e)0UHKZkJer#*gf z=%Boda6$jd-%McVf8>`Q*dP3l^6c_|H|77p-}-*x9{BKc|Ditc*dF)uT0fs5yf$mGxZ0uP7tv*mlzT2Zl{f9#R!B}|zoBvzh8qq6n`HA29 zb9M)y1D^Rezi)l+6F}!Lkv#44RsOp^!7u?09rvreApe~_zxw~tK7X`__QCmo=~Mn& z9S#Njx!$wd=2c=l}S3sR6oM5#6i{5#IOg^F2Z@Y?-U*@B#{=^a7^* z4+YVZhI{gWo0vcNx?(7VXj)*oTwL@V+j8LNmk}f=0tS1z=HE-70sp#b?rR=-uaCY? zd7J3}`{0d$z&9DEqv-J zlrkY#cxjf3DA##um&!$M^wL|EL~iRcW|d9uk}|hdO}>M&UDZM!`f^-VLLT$-$!dZ; zB_+XXu{`r7W9yZo=MBeyx~5{8jmUbd67kKn`lJ%$&B9()C3ic1O>#AxhHH7>sr@oE z<#14^VA#$;p`nzE7>0dKrVyg|Z9QNcpwi$7_zp|~OQCPNC!9O7fRN2&p{)EWJ3(RE zqA7=R(UxK+N1*6Sag<|FJWBB($8@ojk|?KrL80A@nOp)}=H(oJFVU|Sxd=+hmUFo> z%L}VOxeiL3RaCjAOV?I>xmn7vR3^El%d}LEa-Wygsz&l1F9%kguan7BAk6{>pMq?OWYafSzy5F+qakx-Dla-HyO!aYA7zd8(x-2vHB-LFr@3i-*n#t_PUZLtLGaq|E zRhLXl>47S5new0;s?;($rEgU6%=iawQaP4!DXmcXB?AV3MNzjnJsQ-L%Bb{kq6P|; zPN|$qv6M=wkczGeNar2NJmcAz36h{<-I#Ln#l&JdUCD)s2&0q8_~{?h5F=X2_fMfi zR99a;1)Y4RbB5*m@Ba1w|NV|z3t*Rg0?_k$FO*q`7IkOWofp^`k*@(NvJ+6C}>Woa^cPs;_o{FnegP3XqHl3YN!RI;rXS`0Xi zh4qAD$(xWh8Q_>J+6kb(Kw)v_$Tt8#FQ7Kuj75uycbW@)dw~L^ClphsHga z1QgC>%0iA5y&fF$uaOv zT>sFDE39RW1Fi;mr5RQg@P>(dl@$Zg^73K=dgFUa?6Z6&TeIr3X!isR_o7`R2iexk$p$_v&~8ax;M!7cc<0O}PMmewUSFpqsCUHo^V7yi`Am83GV6 zKsU)V4NX}3UQ-8Ny8!zU`u2GN0t`a031!h<1Z2ymeoVlfWZumuU^ano<`YOcS11>_ z8teq}$}FZrMm&7*m_RUn+musRSWjkJnkBn`uq>ht1s~2YU=p0=pWpu{Amo;RUZCBh zn@weNWmQ=o7GTdW9k|T|%+~da+=Y+AZJIzP>cY3ttusb20e%!$aWRGZ({@@6BuxW~ zxy~zcQImwM76TbQlW`gCk_9rZAipWY5x{@}FwlUcmgK_POa7(jo(q_q2cn$6`Dze< z1VYnJK&S%V{?@afro27_-Vn^PFDA4{i2w30c3uCbFD#SHLP6F5RM+zY`11l;FPC{= zl}SDizi#71Z3aXJ-AaQloSxC`I-!i(RVW%i2ApCWh4qmE|K~h4P$NFz&F9xf$bV&0 z;4KaPnj;skMExIB=BOJS0G#E)K(?lTO7`kOw#kKC*B8hYVG^yvV#$R^{l`Z;Bf-VQ zhquq5w2Uj^1R-aS$Pe>;g5b-4AU~m7YlAjgfUoB>FhM}~RS*NqBtc%b=maZYnWg@i zpgh@x6-=g8TS8q=fM{eYe@sxHY(fZZ!uY{r*{kab8sE4%O(w8S!V4x#woTN3uCBW9 z_9L7C2Mt#EPfAbdd zU&sa8H;>Z$dhWFghZ_>o-y+!sw14#-Sp(;HzXPyDeu&z}6k+)GLy~=;A(e4^0q%JL z+yR|xg5Un@X#nAh;0fizmj)|;^Zvq>s88V)>O*D;^T#U>0E)c_bzs&C`(; zgEubO>kGc#c&2H*{DmK<$*kYFx-Qjk`=1xsLd_HMN@foW@bFl+1=MeUCyhlr;Nefd z^aK9k`mCO7BY=s8#+dpsvH0{pH6bc*n@(()BY%-sFi${O#g#97n?x7qfC(JO$pozB zS-}L+3JnSqppiWc8Tki~qFy80xPb_Gz@z!7zvq)>$|}QZ>T4?ByzKP_Sb+2^HI0Wd zJ3S^=pC(hzk$wuhzQFo_#v4uJ`7a#V*ZsoR^##i}-fJ4VPZ8nBzWEzh%k{hm`||CWnTYkW9B{gF2H9Ot?bcUx@a1BstkCd@k@V?UrGMY{R~urlUMJ# zoP`qFQOngv_3o1;`|isV4|Cr~ZDr&#$dYJ+7f>+h2MUsGc8s%l}XGU;tE^f1>9X4utXh#~5$!p#~YjU9oLKPE1jv?4ibF`!W!Av^!mc>=v8BgF-{Ms;S%<&1nN2fkv1 zS?03^yyru?v4cT>wN^ty2rSn{?RqZP1;A(#-t(=A=EUZ!jQHXJ zWGC?8RMJRp0fke=Q@kdr8KV3au?58avDdm?O~4bwbhdOtg+M1tFLgGlw>a zr^Rfcbq~$@J#VV-xfxF?Yc&cP5Az9x9GXY}#>a9rH~*}EV<2zjA$wk+YZM`}FR-}4 zno&M~&uKAW|Hk%(ydpkkE8}5thEp|xJ7xzOcMb91e3}v-}9lM zlwkgD&+5W|A-}=@pkWsljomEBt$fDhO?cdsP{q;eUT`HN~i^$Kd5nN4wqA*2534InEef#jz0q>&m&$RzS zXNv^hFQ(pFWv<~M@64o*U1d|lrYe>i&h4{|H_xkbg`@Q(K z>3e?Rmg)?rLS~^T;~CT4(c~01+MxS`5{H{{q)g-M})PiAzvk*EFz4W36MOaWfRq zdP3$Sw2r2K<)fzw^p!b61Noz~t}6ggL}`~rY2hnvuPeG<@|`YG`uq78y@b-<7yVcI zi~cM9MgNsv*X{lKa-ANNkWUkO56J6C|4`F(2ms^|onNjSio5znHz7Iz+&5jKX*vLZ zrtqe_+K3LoFFJafP|VaXx|@c6tMF(I2lIE@eWwdeli~OC@}2&AUVYK$C|!TizthbZ z{X5-$(ZAE(TMhtuBuB&|y?9_Dn~F7(1I^EfZg}fue$##F55MS^Fa6y&-Q!Dt;Y}BR z>HkfK)}7#}uD|0(g4So;GEV&4cV4J}?Jw-Sm}s3U$zOdGm0=+Jrbp{x=R=R9HQz7& zcb_)-4}Dr9{`mOE*g?JPu;;}@GD&a0KB)|Zh(5Ew=qFxAw1+qU5A8HfCjX(mA=#Y| z?di?;&psp3i1$uqAb8Uw**SWgcz)Dk7JSjqi&;*ON#+sn4{eU*c0RP+TaFKZ?fdr> z_NN_-x7^2XyRhP$_xf!YIr*ahwn_Px`$Ma|d4FhoB*!o9x~}Z<^}BNdK|G;5=W!3c zn?Z9{XqNNBiv9vt%gnUjwWDUDcU7G1HsuSL;@OV@{F(C-jRz1{SLA2v_)c#ea3Na` zF91?@dg&Z6`(ZMHY>qt&nGFGdm{48-PC&80F~=VKw3zVYaew72j5}FXjPoCcQ7t+HFB&>)5 zU&TBNczc7sDFWX5=PxQ*8OS-P0IRd_CGLQ;?>P4Zg)zDM0EPQaSc4=-a`F88f3A7; z{oUdx579o0{ugJ_r_0AL&H-Kq%P-CYR)+8w7XU9qce^oD!g5v*CFZ##^HAEkna7J{diR$6e{hAFB zagdn^=h1hk1_(L0E+~99j=o3PNS+W~?J6#){EEU*#0#8(WTFrgPb5#cz~;OD`Abi5 zKzVzC1^TWPnlJ#u5!GuTeKqs%_)h4*i=O*vKNHUb2YtV0ap2}i&IF_;f1Hqj<9|y@3<`75nzx_;f(epcy(5Fb`Pc`a) z4bmrh|9_VwQ2wo#B%}Ph9YXV~zuM;4_e*Di%U8esOTGGE__7OnfBsMX_N|X#e);Kt zCLars9wf3)^xYqV9Q}XmMIrhW+2c+3C6{SJ>$@BdxE}dU|J^?pl-CyUMD+y=vQLft z2}gPDM^5lZyRt{5w}^t2M{hb4?Yljo=h}kkbwqbZ!M;-fH`Kqr#=GTD|9t}fh{g~7 zcmEKF<;&0hp}+lue-M1{S3=`9izYnK0ycSnLPqiWRrKnN#gQGKs6Xl^SW&+=0E(v& zpnF4Tz}4WK0|sylxR~&azK1tCtAcY6!GeftH24Vs#i?I-8Qq7?=DY?>bUu_nqqBuP z;4NUG7%999EEftne|_JpZt@YZCMW?uMX|i41?>prp`7y-e{vedk;MvZI_i{z946HVUddxa@x`qRS-;Q5HUbN!)g&t5c06jMF(~E#uR-J zCTI{3LD<7~5fc#Nu*=08a=zXaOAzVExf&1!@V zN@tK9k@H(1l_9=JE=XNS6UB(b~kP>lQuR5m_U%33z`QQt$_|;gxaYtW2`1ZlKKXCd7 zH#lAiuF4uy=@j}Q&bGpuv?nTQn=X>fpR#+?{3+Y8KV{FZ`;80mBb&>A>NXwygPZve zE^r^*H9xrZ!{3#c4_@&Vq`etpCzj*S&&%gLbK8ydz$N$Rr+dr&N`zpe3`?#O>v7BH0SAUoO#p@6L@t@lJ z_>cA5e!{2yHu*=tO|O1%i#vnDu_}PxOf33Wdix{2{*lJN(qH@xC;5?=Fg95U+h&SI zuMXN14eK0@dz8MUc+xgSNO?bfe~&Ofb$e)z_`*qR{^0db+j-olt_u9jpOpR?JCgMW zKf6D=E716v<7%Gz(cN@X;Rio+ADsRfTTCaXe)0T+|MU&hDc}eHnFDH`{+UBerzSr5 zPutbP$M;QFCVz0VN*@G@BLa|bB$?wN~kWvf)=~=ZKbI-E)Ks-xqP|1cO_M)y) zw>5t3L8?cjjBEFM|J1uawC44ub~`0fa2&%=#ZFF}LhSWxe>leT)3m_E1(ibHI;`70 z9qKvTy=8)i*sOj7-aJLeIc#)^bXteguyirB)l!<{^R%-iKJ;|4qO|j1Xoh@f(mC4} z<(lDRs1Uaxd9dWv?~0-Ca=q<};r2LZ+Wm3JnK?haC?dF=OpYrtChwmkQ2XNz&!?^z zAeW7_lwlX8mRf-+ePxJ35?%<}H0z?e*D>$o0LYPge-vt9mnsc_drSn~um{Zr`N?kA z%zv_}B{)H6hSa|*z0)%J+cbIC{y= z6V2L?Y?{EnYipDxcl%*m_qdxArH;&kU~6E30?5?LuR}fwqG~>SWk2MJ`{0gKQgBTb zo9qCcYQD)rP=Tmn%L=TEe^tSFYfRn62 zGtaf)s{O^D%zKdQ#F-`8U7nQd7S*5{l7_zU!GW8B;q{eKI%D3ml}gxtIV5hpkS&$w zOc$(NGvZSo@0r-1L+(?>9lUX{RL(^2tR)URtz!DD*w;)RcBn24wFs9z?<|X{okntC9+z$uM>*VwdA|0{ zc*y7KWhQi4o+u44qqc}itC+?#$Qsiqf3I!p&xc5}m2FIKpO?@uWnLHYnue64%X_eh&^5s7K~pzZTLg?!0ry_X(0$$aU7qv{f3K}G z)3qKy7gZ!xG11=LI&3kGu{`b^-vVD>7H7=0Tp9NGt|t1k61A3@)Az_T?4mDdkBf@l z;$26*-!0uFh~6S>&py=qRa?~u;ik$h7Su8DnOkM@6~CK&oxLxn7!3Uo1^e#C#_>?= zYzBvm8iu-nM+}+M)~nHcio%Q@e-b*yMX50hZ3r*m9JYb0J?^@F&RMmqvR!9NfoSyC z%-B;}>(|<`m6W0}*8*Lo`ZP6lDqt2R!>Zve$do=-gXXTuP)C-Nrs$G~oqY@!qRhjbj6vF!@sdP|8Sx-Fg8{G#7B6lvn-}5igeF~@i zurJ(u9W|0~?)#0UGd3QMkK!R4;_Ugu9fWRpzB;;l%Bk{VcHp&@SLbYY#KnQ)oaTJv zPOm~!A7I#;%RL6#3#WBGe|I}QeQ5D{Eydp2=(Y2ncS{|w-N|;Fot8T0LQqNktSLvn z7?l&9;jD_+1kawKR)+Rf2GU5ZLTif>#LT%>a&>*W(~#qiT;tt(pDK!fybgQCBlyA; z2N+4BtM!jd8=2s7e`aw}!^t73W0#YFvBdJl)Eg1I1VH2pbMbZ0OvL4+9YAQaM^*2y zN{p#X4kK3xlB=0s_d$Q;BQ1^lNe@7@tglyIjFl#g%D(n%Y8XyFXLPEV@8>F3cD5$G zT5@7H=qoNboNAovgoGLT94m9Oi=hV=!=~(2 zhPyrAtTt>ZMLXI}MaPHp{z-w&qa=@CdQD&$b7-zZkw@6x9<5Wr_1h3( z4{!?raE6FBNpBbjeoAw2mNQ!00#D#CU2ZPWnV4|(dWC`Q?dEduoa-akQ`|5drjDll zVI!rv|%M zg8cH@lGUK?-D3C$=33u|ov)6ff3BpLY?oP;GdO#;^D_qLb~@p1BJ=SEy1Sq^1HOgQ zx>01g7C!?NS`YmCmdxo@|Y$Y#Befkj)i_x=FA# zCC~8o&QaU3M4*?E1H|GYHGEMvDsA1S7dj|rQ9`?i9zkg4gGttUX{O$;+%Xs1G#@)d zW~Y5re_YmWHxf5V^UBSfo2PiNbs>Mv{e>oX?KHYfI9Y{NTy~y{CrdyaZ98!vllX9kSVp(roG=K_Q2_r0HnRz&I4%YRN z=80i6cOy#<`~tjjNzG~+>LlhmfA&3j=e7>q+bh1E_nv=N(?#6#VLCI+kVH;si_hv9 zbpD;X+k}DVw~I&C^Kg#zMu=-BfCOvwf9Ys$FZ0Qkwly)T_Za138L!l0QRHwxm{{TN zZ>1kJQ#M~)V>#;P=v)&2WU6O!W|J3)31A%+nb2pyUiLf7!8F}`z;3@NwPqZ&oj`Zu z#bq8{)=?oD#5lf1_OK{A6mge3-HmpNix@7{Iv+#qPABIhGv7~KpDv{UJdA)Ce_q?W z(8y5~GN;4J;r^VUEAooale=2m?7WuCRu(Ke*2@OG(u$@)y4j+0VP6mDp= zxRAn6^OIG{yBA%#llPcgXxAg-5MC+d`BfElImf*Fgv-ofXgKZw&QWrX_+WaFtypfE zszzZLo7qG%8SvFCv&rUhwfp3z;w$cgXEInrA?9{3$kW*I#iRG@m*HKEe`aK9I=GuS z+0)Q2lVdQ7Coh+YdZRJ2E?$qF9%v#t6luhZ(RsZ-v?m30nF$WYwbKkv9*$RO+||H1 z#gT4Z1TmWg;gFKJ6_ab*9))dC7wGL-fmiyWc5i0ot+aDo1bP^DIKw;%U(n+zoVm$6 zH)7UbUzU?Sy6sMN+hNWgf0)$x57W7{kNkY9drFaCa2TgMbp}K2-RZfpT|bbnJO6Sn zF^KWH~%I)?cCuaAdHT(XFfPhEJWyK_jEvmYGr1bAi#qOsEwa}ij^j&#rm92PhDp3k4Zx3 z)WNEKS)Px^F(I=Me;3B?=x-81_mf^^S)rUppMIL%=`~cRo__R`G+it@pQ}!IwTHcT zxEvPmF*dWarZ<_qamVA+T$*FW@0yqA@73o{zftZJQ#)pDgL=yD)!{m~{HE`mMC<*l z$TACN6HZ)nH>I|UX5o^4IZ|7@?GB9Mt(tP-nfX~Ix12tfe^03%A~!CDz)_}Z63MI_ z&SXU280c*qoj80gGWV3i`*xt>2Vfd)BLsqY$b{cGR@sSR4QMf6hU1XFv^{r%u3Dyg z*je{o@wl=SZCA`53rNvFk5x-t?zDW4&dT_BG6yoEX-qBe&Sh;OFKXL$!|*z#0-S`& zpkUD!D&~@2f3UIBy!&xT-9wG3P8*Thp%6WAp3l#xoU>?&aUfond3tr{^@vAx6rAn* zUE9s1zSG$!=iIXXC2ZWOBOcHq`18r7CZAp`dk!iod}cckmsf*Y=kz>p)l9s?w0_pX z-Cf!3w7Kf*I#hDUCaUGnygW%yuD#9t!(xRa8oA;}e@-2Pbi3V1=_w_2%%2m!alKHY zX5--4f~wKI)|Zdd;ZWzUkPET}N2r~IX#cQPCbX?XJQoHt(BknJj3vmqBU@_NsGV}m zUDD~qCqbiKyX4l#jilJ>ZA6hBk0+c zqpZ#je|DGKQwmw{s!d?-_u7pss#H8gy6p;&!G0Om%&sV8+x6m}@`IhwI05$ZsX*&+Y3jFY ztJ3UY$*AbMOxq1(Sc&XPI@6VIw|Almrq#oHe-6C)@b2^O`kbORebSyW%j|szx+q9+ zuGH@9AcRU2rK%xM&*47IR!Nw4Z{6oHZmYX_>CzJpua#7J;tg&X{v;+jazkH_wN^TX zSChm~(Y1p`!>lJr4|xALDEST#rLc)7o!-%9bU!8m?o_*5CTY|CZr9+EP9nWC`S5k6 ze;?EC#S#mv-E~s7p(M(b;{eXYI~X3BjIH?4P@RFl;@wbQmrjK@9<5f(2oan?MV*S^11!(}~2_pGBkd=;iz_H$dLcKA&0 z#44KyL$!q~IPK&{1sUmU=XiV4sgoGnf9)P=t~QQ5d93Ha3ZZM*#pXT_X+*I`JXmXg z<7nPIVS|}Wa3#EKH(L*UJM9|RlV!{+;)H*CcyNOqX86*h*yST)Kzpkwc4S(OOmoZb`#5yT^Blci!MV=vjjPZj z)*G@LVu^P=bK{;2T|wAgr)+$+P5<(V*ZpuRy(fM@2(^f1)yd5+(KV%l-Pyg+^wYuy zPhxt}XC8RK2uxq3nsJLQf9qBSR7+oihdf@EtcabO%Cv*X_>X#bxihLyxVic|@3qQ3 zMiIHAkFLAOY(*^?V2ZoH7#^EV54|SHnUJq-BH^}j+$#)Wp0oXC+@3H2kQYz=)qD^y z4?8E1r`8--|3;qAdwjpI$lGH(PM4A~d-+%oM5HnEwKPtr>l%t$f1;l^gCeNQuybt{ z9#bt=ZU^%ac{?i8=Z1|I^3up{mi5n@^q4k=xI~s!at5=jBBOT5Ot+4Wb1Q4#8_45I ztc`j);Nv4$YiH-l!&o`X&LM0ie*p+kX^&Al5Lsbf@pK_F{}@VgIB3Uv6jmo{S5x|9pgM)#7qTkN z+A1{{I}}o8?>OMLUc+;9k-hyX!#iJi+*BvNI`^BAh!?Gt4r*K4_t)t(kaj~_kC@&Q zMUi%qsMy+IWBMQ$4Q-`SvA z?eW*8XlvwPq!y{&kS#`QyW5G$4r(9Flqx6&&DD2Yy&e={LoLIwurJ4>f@&ukz ztLjRl`73ceVOFSi>)_#gtc#t1!Zm7nHDd6@u5}m`#BN*5h9|rDu%Ge0eXzGFya>^_ zyLXps+`JxGf8F1M-P0!=!ak2vfH^0z*_}dTZD62D^>Vt~ZFzfyiwv;Qi&}PIu-!T0 zb9-l=YNs8ZvfpI8w2tfmo0)6j+pqj_##@$}vhiGpf}cK|t7EOp&2+l;etU36Stte9 zzjlwtRHofb8)1sCIkv;9wkdY)CQi5MW#Uz6#`9)$f46kC!axt#E+ugL-f#r7NG!!V znGz6tt-3w;(#eJvw+l0DSZ(=`xf7mono&ZNwa>bF7h4G&8EJ^_(?s+){G^Y&bKO7h z;??6NSni~tKkZKKkW4Fn-(?TYc=f3uC2-WnMCx?vSMQPdN93#BV3tik6dMO>+K=Z9l4FWdl=b! zb*za@i3iDMa&B+B_rr_4VAv^2aq{jYZ8<&le~5lIrPrjrpk&nAUP5KQ`Aw~@l)W-~ z)8>b`#nM)ZNmAH=%pYW}@A0G0Ik_HC^J%`_FZ+$9PLZfo-ZNo3H#n;lPK};1_CwS> z+NPcLwmT5Y>yoIu^Euod*vsYG6P$Zhj^oICiu&3~cTzszZi;n}RQ_&`wWPjI zd|>eVgDB3MGZljQ5b(KeMeXdyi~Fw2^Vy2L+gYv(tINBbe)&89 ziXZ&;NvfG}Om8C{nw0zyhEwM3_t$B*fBR!wM%`+Rc3+44aMY*hxJ8aZ@Gs8gp`2yR zi(D)3)GGL?KZ^-JjF~F(jCk9g_2Vp@4tv2cXmTP?TSgc2!3uLvjaPR`W!*NN0;i|T zU^eZI6!IX>BYv5ch+NZwIY-$6Kxw@vOPDXzqYPUdee(;%VDc0j@(tQ=m?oNtR z&g>2Og+c8moDR2FgmuALfAHaUS?+;u_d*zt_~CLr%b}8w_2p_UQ#zhcww)Ovun6&b zwideZo+5j7B*%{Cvc&Ohqj8<+kVop5jR8>_+GDu&or7@-kC?%K_gNUx_ZRVtlQR8F$# z=42hBl)u%an%XaNPKs3zm92?FXV6}&T{r4Q9vEvoKI$5qii@|>)BZ6CvBzxwv}?NC zc*C32u#Om5b#tl$->W?0z*5vrE9x~&T3_C$an~&Zo8Rltsp$;yTz5s zPCT$rGVKWZ35@VIjR%}P%vokv{2b44n6LaPf4+95d4?{JQ|2YgtFUp~4PCao z=DT!*PHF{!(d*Y`$gh>T3$h>&aGE^ULr(O`;T}UfukFhW#tA=dXY!I}{%uO=hCS2Z zcl*(MThPD!f9rq$>t>-p_X=5`y+U;T*()S~zYpj4yG!Weu19n%;(dD;c%PMnxz!7+ zN*a#<#nguO`^=p26)pml3qJ)^h@*oQ(mNI)I(dVRCk;1X7;Pe^XmgQ^n8NYBZ%GeI zhRMjZNT|1b$!{#bcNkbRBwzb5|CRzuC>(Ow3`}*ne-M4>3n*T)6TeS!petbk843bd z`b8i5JPHN)W+I{Ey{swY%lR3J1fC&r4{v=?ctUU!st}O-DJBhcJ+Y(x5uMf-!wheU zB14xeL0$jl2f%6`{J0e!p$UM*y#q8?sASi=Y0eT$Oc^NnU`o9CAyHs|PKND5!8Lr_ zZ+_QOf4z?xrnNbcNoL%e40uNpFR7=WifzGfeiR0JA683sX{5EqNZ}CcZVIdl4tAL8Ry$ANp{B`dS}C9N1{K$b8lJ7-Ih=IFk4a;XSeQL zfsDd-{Uq+7mbxbj6K__06i@bWXivf=BdFL*f7mJ%DV4LMF<=;%;nHyJ@u0)Vpoup3 z5No4b&Fwud=9ymOvyAnpT&zXH9a_o-Ylh~a!F9h+ z$jjt3qOZRo@7BkM;H7f4RaF!UvHhY{wmPfYDp8o(UdIK$E@DUt6J2{lSP63+GCpdR~f#@bPa1B_SbM@-Abh?_fkoh=^myp z#fu4QpR+?c?x-Wc^2_m=db)ZeJsHZlg{xe5rOnqm6)%3{`1CgP317TCyDqL#Vf80} zDu9qWuq;h0Ve?8*@`3Jd;QDHpri&f!e_7r^aZvZ2p`HDXrc`{@ZrAn{kj^99sxGdq zg+H~0c2(}XV@OG}D3y(7sK>yE7fp;KkI};N;*4Rm;bU=2Uc!19X-17n)!1>EZVqv- zOMRXXux(Dgk@Ui*$;f!HcchPHd>e{ucOZC`Z9@y$0S?u(T?&BI@qT5kQweS`T7LrnFd1heIR&>6U z+AZ`3S@-o_a7x>epqtn0V93;!Nw1r-yOu=pD9vbKXf9JQaHvVmf8CSTE3#G@4^`P7 z=$$J_<3=;%Mvx+fz39jk&*wI>fA&75D#A&waj%}^aKFPh?a@XhKE(HOb;}NCFRx=% zs)YG)8DeF`Lyr&FG`a@TTFZQ=n|5ylXR;C+D2D!kO0-4Cgm=bt*T}7KD9_SmJ-`*8 zfZiWy@2ZwbeBWd2urUW0m-|sVV=MV-Yj4=Nf5X4j{eoYH z6ZNw8I0I`&2n>Z^GKW6B04%X|-spWsFp3GYBf4VoqI6I4`Ty|sX4}g0Oxx%IxdAG6 zn@I=|`q0Ckf`%RymFDQzz6;Xe}&+E z;!ikXEfUH@+z>`_Wbb7$2@22JYUJ}O$<3>{j3JHNv=iD9i4SH z&B4~eK-+Dj*R!?>)dBxH3_|Y{An4(KCXolxy=NEP;kB@KkEb#=e*|)M;;Br#co0tJ z>Q$*lT`tilK29D7JAMcxb=I=vi7TBdYIa>W^C+gbxYrCSBKIO~UK#U@fvC2(j(eir z)Em52Z{l|0p?S=WrmX=mrm-@70}U<7(QK^!RoF!7!`}q+-8$AG zT4&14q$=pp%g57&f0}x_+Q+R~A`sUt`*<-1hR@|qB8a=0EPK1|vQ6mxe=4%b4%AM~vvO9Oy6S~S zQ_$)SHQ$JljQ^wsU?UG!A+c4XOH>cjT@*!J3!H!tMdtMUF4$c5Q50{qQRViU>rSy**R#|yQH{w z=esEG4(8T-`a4i7&$f+PkJN{BYRh7Jp6_d#hW+OqAI{3Y~?dQ<0 zR+*v+&^2d(9%hoaw$jD!d#fh04#avozQnE&0y7x6TRZ6BE^O~SB@DSAMGml~YOjPp zcjwQAE8sJ0*LG+Y^|Rs#JfGH`X+BIR_tuXNVdS*9^xNJzmE}|FM?X+y_wqiU3g}Gh zm(mW&f94#~&1wv#>h#`~zO|3#Ob)pAa(M>|896Q3T+_=?(*uyt&XXW-2=Sa z515=+`;*e{weD3kj}^U3>kF2h3>;K9-d@|(e}x;Mq`sABTEut9X$Sq&M+DWnS4O9> zlD5eChZ1dr?9}$VcXd$9&!D=e(9#+ws9Yu2qem|bz1_B(jpB^?c$Upv-su~V-Ah?n zJJGYf4!6j$NK>~i^lmf^&Cm5=F+UxZx~Do1=PAL1-#*ORUk66(sF_sRP@hw|}YQo(pN zr}qAlbFfYvO&m5+;Yqu-WUdX42Bb`ce@44`AlQkvOay`JUQcd*ptR4}_-Jp|Wqz&K zPGVoVXcK$4kAM>?JbZ4&2vcFx2VA<@^km7C+w(bv{!sT2hXKO3&vo}G^X~m(M7;Z= zfw|#gHLu=>eyp84WKK#+^oQr%ug}~4`pijP@&$B6Q^?*pKoKs7syE2h5BUg(f0=FG zkHVCcB7e%c)=Q|<+$L2mo-gMpt*&i5@51_~>wN=(n{YnG$f9ySelMNTPKHA4nc{!80f1LCrh}DN-O^2!u)6&pS zy&(WWLjAG7bSd2i$?ilsoAzE38ocXt>dH>|@p-Mrah7&i$Z%?)+eIjKx863E6JILc z@VaSuU&5*P{nICn$H4N16gJ}X;BdmjSP8;m-n>TcWHb?>E|HxWayS)eLA_VY$aRi(=?_*#X_H@L-e{n=l*j$9ThMfstg@AFOkc*v285# zuC-^}EACd(jfld*tE+2Y@nN%B-NTz{#PoD9ZH(s7lm7CQ{SLxUkaS#k=ZJO9LB zC&GSreb~(k^rDeGec@GKe?M2(7boG`9sMUvs~!OrnZE1}e7?I{{e}tsGrd0;9+R^C?)}KrsvfY{m zrGN}kpsR**5b8=!+L}K%R~wF5?S;41X1$x!SLm4D>5WWyu0gPhul?H+uHl;Ouky4% z9#^Zpyk6}T&(F|!ua4bNaBv$KX%`*0*1;?d?Rb?p_~jiqq7q<5f{ zC!rvGa@gPBZTTF9e+&wb({5{thv*W_hhS}4yPJJFoYm`owQ(NC?x4%N%0HOlCca!m zZr*s=_{_R@9;Hb^lnkC<`|W%86gEQ9Zu$LTlY3YGB zOlft&`aGi}TWau-Yu#l;-%Nf&<;aY+4}f7*zOUBKuAS`Yf01arLeKq`CDXYDS0RoE z-1cnbWcc=ElyCaS#*LHrVdSEZ^VpQl^Q9Q`kiqf#dCv5b^MPPXYyNY8(XBNpjm&P=nw(er zdw=L%VCH)viOT%QeJf3bVSb?YL|+9pE%4|td*J8Z z=Bh(@lg4WB&)4I#YWKbsSo_M{#-Dr&O~&cNNemMggwcbp$E}>NZ>B3+qJ3CZ;YHuv z`cB(DwFj3JswRKJRCAh7Yrze@N&$+miEijf~Cx&>n&JfKor+_Kv4vK8BsubKIwt4yILKwCMi4fp%|e@2X5T z!n-9;J6D+(2ltf+6~ld|%JC+FRjqEX17(f7NQz zFD?!APWWWjerL(}Qs2eL`BOcSWT#$yKgjlVS|8pUehUxzK6^b5>D3H(!x)_VI4EAG zsm~xxZ{?iy?lT%+cXJKTQX?4AsZn3aZhzZcPW`$t@UVXH*R3eOT)Lm!&26<^X?5P} zTpQq|;wLh^rX#r?%pKoz$Ntrhe|@x>?8@r|@xrUdeCyhsRc^~9ddj9#+%I;lfzB;#uR5)AfKKo9#D)md2?BnNp zu;h9bKd$m^^e>T_4r)w?=(T`GIES`lKrmZBX2j{+^2j;+5^g`!~8zXw)Jwa$GHOTtY^Zc^8#kOhOZo>KL$o} zz1~(&eVbOVi(eacf1t-{kUo2se~xt%rO%h!I_HN09J)2^esvJ*c{dnu@Yp{$wq)n& z7#LGOsBvqF+0;7+`y&?TeXoW2v~_hzrqdBv#xd6V_gSur(+Zx|l`e&2ZQL{UUVP$L zVP6#`d^C7_5$2=z7P98h?|SEDYUH}L_ojYpi$ir5FY{4Fe+Sn%+fHVWG^dB74v3oTua6Td%2swFdBr>C@w(#XLBeCFh4C3|?ag*29;$J3 z@c~fEbG(67EF+psIg4ciKndmZ}~lc=q+wmkQ1>Ho=-M+{|N8C zt$-6CU;}kO!L9>W^Ipa>36s-P9E>LzOe)=B6<-U$3xgq3K5k$kvX~$M*;zgf#JHS< ziDC&fO|P-R5DI-jm``O!yRcX+t0GWfAZv6@8wvnNCxpXf>ZqSeUO1hZ3v8E z)s3v_A_#Ck10)fz!+FstkmF9QE-Z4oaU*xMxxD}%9R*;|8G0X;=cjauuCJFN=9|Ym zxCWAT22EN%&G@SECte>9bhEuhhe_L1>NQ)Prl&n9eBxX6cC}%8rK*Qj@(K)MOoP}0 ze;#W&Fv8Vg@WExfC;XM)hUWA15IEaY2W#k#^uWF22cL2)KIG1NRI#v5G(h~Y+3`7& z_`P`VG~x}NVC+zl zchh-K>Xd9hhGw5Uo#$IQqdF5`b;l2zed+Y@8H#=w|a3uf0P%?4DsnQ*YCW z+~%mGwRAl2$1^$A-L)>~+t3cDjonl6tY^;qp-8V;~>n@T#BQ@#SM&f1lUQ zs!&!|F**WQya_|*-C-oopu{hA1J!a%MJV*zgQ&=MRpS;t^>IGj$|nFduJkK6_WI z34{jS1}42Sp1#jpgS(vlCx`={e;kMGhTYPk&2TwhAfQ2da7(ClOcbCw=yZXq3oXZaioeEkEu0jMp(Sd(w(IVMfpla z1;jxniSEO%(9IDNY5Oqx`_o0T^DK-cs?vPF=>}dI$8ZPUD8BW)!V|L3e=X~{*(=%A ze0-1}K5IysSU-H(&3L3>VquxLyyCun&pY=lpRhOM%C~n@Do@v^{ra3*v(^%6)oPFD zKF6MQKiRt`y~9M-t3wQYTU%%As?<4oPv2(49Mr%4)`MK0U5(hXwnB;g`%_<9J-;O$ z-2zD5%DwqpH+t{GvkIe&jExAVh%B#A_hEt$-I}-N~>1cn< zMmWb7}$w~~`n-QLjHBK4@-C%lq!3(x!THYG=0 zPKk1S%j^;GF+ASgwx6NF*JoL{yWu>{hVZz_UP%rQyBq_6r^Ef;&5bH@@sp#2eA{?Y z_xAWiSFmuklbu%hf4AtkaiO&G+;Yc1ZprYt-giD1?DeOx+uB8(n6F^!rGx)rpW`NY zJL$2XI^z{bBS?9DU)}Qf&?w4um&1~D5JR6_K z>Rpn-9Yy|!R*Ib+=PWSlqI-|t6!Pc&%Fj$Av@~RH+&jJnMbwO}*=Jr75_tJ2Q+?`Q zoBY!EZ@m>?=HR}IPQ7O<;lSke?k#}#es?~8_{%C6gi(_N&RspW=<4X728Z^ZT+9&1 zr`Jn(e=1FRyd6*L%83sWaOld6y$sYw<47UX-v_hVoA3GZ{@ja=7-+55c$@8ZXb;TR zx-+Bnkrg45n2PQMXodztgOB3-D3=g|DY*}l!8 zK2j#P&wmq~Ppf=49{0&O9w}R`I&i!BXT|+A1|UG z+8BL6D9!87?KB{z$Vv!KR&_1&D&cTCpVc8>vw!+{4;%MY>`uIV?B(2wqT?JFSDvc9 zJgZ8#em-%1h|Q)|JZAz*dfA+P|Kx;~{CeLw;lzJx)nCWT)!g6SekYz1wLmLsCB^h~ z7o&Nx{{|2*<)0sPdV1W4TY__mECU`$n?2m7KlW<_nIO|HZeSD3UJi8<0Z+zrS}vv;qD`sg?`h~Ae->Yyste(k zvq4=J%C&Z|>fYS6%+^!i-d@gmi4qj5u(L{0eMcVUg8X?uM;qxny+5pe!P}N^WHF2! z(Juvm&cdGV(49Y-swjQLR{Bh*YkXsRwnj>lbn)k-;m5hT^_Mt4T*rGIt+=bUYJbaD zWoh$Fu2*vOaNaccr@?Ta1Q!#rLwW3!Tfih6d40-dY3&5QIo%}b)U~8`yW-pz?{3}5 z9n0QcaOH*d(H~Pk>eoV(W=~sLmsYkDcaGie3jqdKF1*Ix;m|+A6KKdedF97Oc&twB zw!wTRo6&?euYD1|W>phPxqRHa?SDz)y>fSdoV<87-rIRRt&%IHH=yUj!Y>yCOCWZ> z?I%B%OvnbqdTlAr2eq$zARsGFdmZs<8_Xw8u!H zf6fNu4F&KZ${0C*OZO`rHvs#;xrP6eTKIbX$t?VpB4>T;C=ZZ%YvW^7XMdO${gT=C z&8&;%pG?6eGY|lfDqr{4DYKR`bUy-q06-hRnSy=N!uAr9_iDR5KQ2C~CCa}t1AlTA zhp&i%yuB!O^L*PKw_LTBs%K4jmV-Qz9fvMSj__H1GZUL{Cg3;o4*+m!zV5EmT_Q&a z%}Ts50Oax8HTZauPh}kBO@FjBa)*!a^Hl!H{97{pmTboS`#gNd;kr(o4bUk7+PA^5 zdrjrjoQbX0&qn|d_w}LA%97~^znOhYtATH(qLwbnjo*UP6ClCTHX0597$csso6guZ zrn;d^TcCiu_7k{#3s1M)@hEPo{#hCf_yCZXzY`S-kWuszDdM#$#(!J0OeblKLt`e( z`;9rLv+2>TS;kwVjAtWHL&>48G{%us8cVUztue-d1X39eu|kTpQH!)xhgg9um4O!@ zKXI!1iv zxFolzpvvIvRJ76~Z+~cj!(5qvozXxcpeNVx`j=cD9TDIVj{`)4yjufiAXWt$jUl2& z%IrPo@W)7`Lj)*O<&pQ4L4ah0l?t#Sw(Kqg{6-+ia>^=5kLQIW04lk|pqHS>OM!a1 zpMVZL@&q1P=Dz}8dd0K!L!)K>%0kOu?-%-%U;0c7t(VUl3xA#0CW=eK;*yj;|3mX- z9eiFbhyq#s`&s3C|9dap5|+wjB0uAl=>~X1ga(4)U_NgS@H8mwjma0#P;DHM_=?=f z2qvnL^9VS8Z_=K#i2Cpb_JCttD?rE@BBss@7gNTmA{w4FrL_Wvw_LCY3`VFqU1Wes zMGirqk=&dCV}HoZi#QM{j>5_GNUaPp#yr1sMd2*f)B;X#!od*5(eJ_J3=WQE%#Mx7 zQe7J>O`dQix#5(`W2}rI1vw!SUU!d2AB)g;6(}eUFuu970D9a3sL@iI-vU7HN2)IL zSAXdLr4cNQLj6mpjWm-;%6%UW@pAu%W&wu2;FOBhREOacY3!bWdEE5Gc z^#%+lwY&~k&IMZqv800DrFj!-b4ukvDGAK&-%XlItL0IjEufnG?0ywru{|*MCQp995 zGJhSzuC<&hmK@X4@)#ozpT%)!>If4M#5>LNulk{PFywiU_7IMz1Uv8<0K)|XsVp9> zbRH{!VilvxM92`}KrHAS7gerE4r>*-{C*-MNJ>O~1Vz&5l&ZQ1&I&p%T90xoB6duB zFWv0n)0hy$lQ!T&os1uB`JqdzE_D3TjelGrOhf6f4k*9;Rs6=SMYfFaFSadm_g#Z0 zVCgbuF6HTWeZakP;ib}yiQdjLl`xDg(SLH zL9pOHPuUOd(>Eyo!H{42@T)5q{$zjn>A$%DqhllEhqm%RnDryO^B+C?vp$7U{N``K zj+EqS;Fl7ShdOXwlG*v7yDq*cpnqb%c$Yldw+>GNICL(a(^!ObXBSu|E|s(m|BgaI z6t{Jev_wZN4;6aDl`sK0d7au8bq_Z)9d2jQttV+&uJ^pk3ORt5Ozq-_4=v#bo;y17V))DhB zyzppAroXn2>3v_9XL&M_ug=i&*fP&!5AyVGP$I#RDVI={B4Fb3#dDXhokOKM%HR#_lqv_eYQ-A6@K#3wv#3!?{hk!*oBL-NXTA3OW zptz1Iz|e>d;s99-B*z5=B3gL{?Gd2Z5Pivjphwy`+~sL8vlVW)*nh?E4m}xE7Th%e zzu>O-j}5Zm#AGhT_h;iT4ot>x{8Xgb_^~H{-(x@a;cs3xX5$A-CZqU;<$?@|B2EGE zlmSXE?X7&8MaE1sU(1ezHOX0e_||R0s@@noB!*xo6(3O;s3DDgg_pwBMc?SOdWI5J z^;~el&2S)Kp(Nrfp??NMj47f+3pgxxPSW=Q&;YIOU?d_!iS%Pj5-dUx$V0Dq&lR=l zph+%M;j&XEMqJ6PJ9r4dr9kfIp;_?29O-YJ@k5_q*h&7{aDQ#KUo`v=?f%g{fB5vb zKKp)FCs1N18RCh6t^+bNny203D>Ezd^>>$@q4TJA8IXu(awU;#j}wx=pWfc&qV|JP1l zWcV+g=;BBGl7IQ%H2Lu}{_6etAO8fz%2%d~xc$XfisaDfYnT7B^M33e5*eV8kz)ji z`3lJ6l#YUq_?XE~#Q|aF*LWNRh4n6A zM^8k~tQEe3OK8a8=-|mXFZQm%Dx~BjRahc|p%NirZhzc_0OduVF$3`H_x(qn<3D^W&Ez*<{_;x}f5a%sUmjJA#jeWJ$o$g% zzxLSIwtpy(Aq3r3WW6s9f^Fb|uW*Vbw;Lg=1J#K|dF&LC4i@HdC+DfGl=;z{QWMry zBQZh?K2^zF`&V?NqB}U4rV7quT|&;qEM+g8PZTnO3{)keQ*AIXJP11`0{#-8lS-!p zl)wOi)9o=Zh!qS5pu7hdyL|1-4`)NaTI|bWp?^!{;Ds(*Q~bqCoE_kGZ?!+M;Di0Qhg7O&dt5~N%xQnzRkjBdhA07Cz zihmQCj)td*K7o)nK21Xiuq>(P+Z}dTngkWwsB!AJxE-+oC_yU39<>jdTgDHC4k2)e zDLFKihFAeiC=Bm)cuQeMdIUwGxQcIa3(`yAfYcrn0Ni5i$%vT0gEM(y6-7SUwV$=V!I02cz-Z> zdc(*s9af3Zt#s&&{d~ebSm;SfVgN(Lyh*dupkYmW);Sd=hi3B3yW&sOi{n;DF=ZOJ zG-iZP;{fQ_{{L%7{m{w(*AF&J|5se%AD#3UlfSh3%18d&{{PbUUmJG=&Ok;v6GgA% zw7FM7Fzf?3oD<>Yc{w#P#EC>ZLx1;-1kmbcB&LaTcE$MRYeh(cZL(EzHNHAn#_%1V zU5q}IIFO~8G71N;p;%-tqEjzP+HMVlWI(Kol!Es})QNcTMWq;xgz_SU@bH!#ofX3o zu-zwc1ZI}W?cq^pY2+r^s}tY_^jsPa;uIj=KN$Ja%9uQ*pLx4@{>EwkPk(>goXIb2 z_y-4vU-m!44VjEU0zc3G!_Qwo^pU6Z>ZAy`iHli`AvlT3m?E+E`7r7`4eqv?=dTca z${D7-2$*9G%yEFcqY5?`UJ>2SAjkz~K}|r3MMom31CD^}yLcqNNaILd?3)rgvda_T zV5agome-Rlvtwdxc|$5zm$IMD1V$A>&0Io!~bDx{^+BPv7GS!rN92#TIp}Q==Xa3*5yAu{#QSJ zU;o4NKWou0c7a6ye(!r6^eZ$Z-~cIrhWfv;B%?ORTJ~z8R3RXM@efPJCLD&FG|-L! zMs$?Eb;uCR`+Zpo0doC|~U%z1)zh8XI&X!p!0+!ZRVC?n9 zju;Jg=HprgJ%Yo|u%!HIVj{o+E2xTd1yB<@bky@|ggLOqu?zJr>y%)miJ(H_rJXtm zbpTUHa$t$jpb(^{$l_Q=v4ch-mY|SmlOHJ#-y&$J;(r+5Jvh9OYcNQ=7?1HA(js}0hZp`&{NU?* zZHSou^0WTL24qqg%2!qj@`KU8Wri)X^yO=Jc_n|Z%eIq>k{loCa7sXECdd(PkaaOA zs)Iub27d!akolhM0z01Z402W?mb!EzF<@08$wx`N`UFxeqB6ZoK(R25RnI$?A|nfC zc#siSo{Vvcje^wHkp$(X%CAZd4?$^h-#38rrU=2}vvi2XOGBXzw_+j~miQiB=&*ys zLI+a(^|Sxc^Z(KD-@olY_WLhvrqS1*`g`s7Sbx9pnY91L*uOCpmUc#2n;s*s?eHoQ zQwurW65wmR=dg#c($R1t@|fmJEL^3>C63a`PVY(L8WEm|(7Ai?ZtldHM)pWOeX49Y z0$T#4!}(Awa%)bRBIA5zy5wf&oh!{=oq}p5kA0+$IC8;p%@TUDN1%()hnVq!1ce-U z%zu~4pzwzfNz^cu_3*a6>=mJ7fOD$->@T!jKN#}j*PF}!!qhE(=M(?P-S3#^kI(aa z?r!``M}6-v`1&6`W_n+FMd}n!vNse629CkU!8iVW4?K+uBA96*L*|O0CzelD<92Gj zoG6%4EXi>UD&CXC1t)zj=soa>^u=|H~moPYeB<#z`*BY$-4k4^j&gOh&ijrq_1)Z%{^Kc7iw z>EC&?RQ`=;i=GC*?d%_(S>vDBdn5aNXSnqhQ;NWl8n8{6#xhL>OKcGD!1``e)7R$bc3|npHe-KM031BC2`U-&oMI+lP;vq8& z;jA-3L_ko=T3Z2EEeDR=J9KH!(Vat-@b-X8ozOS(Z`#A z=>FT+{7ci(52f4>|IyL^kFE6U?`s!*f?E~zpxPb!1EKUil6jI&ZP;U12LDo=iZCus6ikO5sVwQ|wesmP6y0dZ_G!331La>%FAG6 zBXPQ9nV2P|4y4)>xZNGX*pnPKq5&fj#lF7Ao(4MI!1M|S>VISVBhAr3N*ShC*|~#^ zrV?NdbRECKw3nstzOKhbz<t}6# z`If)O_!S#m_KJVwLJNmPm0@CqIrAege&L~lpzzJJE9e#?8XAQF4BbpYg~Vb*QgXkV z!(f{}(;-TIk@>pf@Hqm;*#!RLgG1!wIiOtwir&RA)1Z^_QF8JKV}Fgf*{zkK%G8`$ z^4w{_6F$gE8-4e_u|`57Ggf*|6;cWdrn-Q}R|ts>sqbNc0c(5%j@=hm^o7^g{(>vd z1y}sMTW|&b#EO2$InsZ0vN8RSojInH@q?}ZiShmD=EXjZBjZo}z@|?i4{)9 zfA;@=_6$;V>7XW!m45?4w zAR&idG}bG?0WuQP$)mjl0dYilx<+HX*Z_(M2$7JnH1ft?1b-qxu?PwyQ^b@)`m%2H zh#;j1)1Hd?eYlKEvu|ETIk-STVq0Y1pKKR+TpFDJKAG`-l>zz|+me}J{4LZ){+uW& z27Cb#hHIwUpl6~B>=I%De`w&qODB8uV#AqNo`CIS0G<7s(lTGZ3*@i&*}^b5ec4n*w$0=i}; zagDr(iMZ^h{Cu{$5Sg;xfV2m+37@0`bS*leDfJ+c!sR=sAo1&0ygVxl_n7FA`Vz}~ z41W`OLW3p2m|TDm1{LcmHYp#JPAFA9%4vw?@%MO3=G6B&@#nc=d2XR|ch89qG+)Zu za-IB)r^T>my_hUx3}yo^u~a9a^BzDE4k9+o9zZwL0dNn?vn=nyD9ZeMQ0njVS9c=| z7{GG59v_$lWHkvxCl4wW!rw7sv>9MA{LmB_Pa?Y`+^C4?fGnSdRS?Su3`O6R;_s_9fDlUY0Z3Yb zSr2lNkd75k0JQ@ipc@VlB~BuO6_z2UST6gDnw4+Q`FlTB`?A*#n97Absc;py~*G=P6cZ`34lk1^SNL zW5~krJb=(-j{Y-GR7?}10-$_=v_hU1nfk?Fx~~7PEQn}e1pBN9__@%T9spKDz?`@% zV;5FL2kd9fz@-=ALSH5yAP9qMS$|g=%0bctqb3^(|Naf4`~N>?kDO&3@Ezw5jhX>W zei{1?UgoY}fc2GsWTJlH4FF7O`5p3PR((w?3Z#*ZJYdx2#U#a#-VOJ(cZeXDFB}Hy~iN_+Ar%b;4be1Kh%M%z)Iz0z`Dm2@BWM-PJfX&Okyh*umTW{ z`?!GiBUka(D-i3&u@#NtbnRT4TPI?3%gdkj3`Dvvu%f%H3tKIVtpwVI9+G>V;QT31 zbzKmC%a>)b{hxA~yH2+LDKB(gkbleZ`(^)c`E{L~^HV1HmO|1iIod%3lPtrImSf~-91`G2X;K%GPMqMVD&((T7@GI z5Gk4Biiw_qN(bD*U)u?QDh6V6|BB-jAw2lzmIwSZ?(K%HZrQ9(6$2YMKv216;7$=# z^P()Xbg6e+wXO?5;g)sblx4An>gG?mt`jui=C9Lrf&VS{mw&~!f6CS7I$8Ip{H^PP z^jm(*H~%Tu-0S4HpYlo91@*TaUljg2KnT`_qZdWu4(WbVeEsN{|6O|l8|z@x8ev^{ zuq+%sFC2Z(U+2&Kb%Fod&N#ESIOAt6SA@J$bYR)2b&ttL(eJU`9Ja(JKe|Fe8+%pu zqaR-Rn+*yU+kXYz0)%kkg=w*UR`i3|5W7t2Z>+HZ9XvJa|1I8L6h8droBy%(KX_66 z&-l3Ky#I>N-*OPHf7~30$6HGmVBq+Z;-ZJy9MzL!<^P(0?f}9qFZ3|4V-vr}y71Ma z@JHob|Mfbt0G7XXWupeJXMp&bL)o}jIJ|Nw0Pe91ntwoT#*eJP`8{99w=Z?CqYA8D z77$p`v2VSS_j)4EAo^KDxqD3ve(Pj{0R-*fU}H|=aZ@f(J(cn9kNW32ex-ML>s;pT zTSyA5ojhQ<1+2VwaYuan827pwG+&kjqQY)pmWxHH@44S~^K-AwcYRqwh1MCkgYB#X z4FAf;xPNKRv|Ds|z_HZC)1MQx>6^@k@xcs3l z_NsN^@#VJFh0R-g2E3X9PW69vwE)%+IlOvu#w|1M?`4V(L^ZqDtbfPG{Q~^n&3@J; zg1dhcNcV}aKeiSA=i^*&j7s4z&EkxodG>kzN4o6`Y=3lL#X6{BDgBo+{D%#);GXyO zE`Po4Tw2(I4@HpqeIG0Qndq}{0LPy7jAj2Br)0kPPdM(sYVTeroO?a?(?f*K*soVR z465oWcH80sR=np|!B`f4oc{^SuI@i$?PcMYxoJT4{2ed1Z2pJ*+_t9A;JD{_=a24x z$6x$M??t_UqqUnqc9H?zzLxf$Q}o8b|9{bcye#&3`y+pA$x@qc~c zA`i0}*F_lr2j#aP16Yx@4)eP6$FlIe(wXY{M~oR zUEiPI7}v5cVz(@Z0DxfJxK!5-_J8(Ye{k201MDyESeARA!2iJ&KR6)%;G*XG0$ccl zqnD)uz`1=m3EXM+H!UnnCH#4=-045hjUT*w>q4@B@V{yQ2mhOnfAGKQ{HEi+G5)4w z{Nc-Hg`1Alz3BkJ`N54hU+f>;{L8=igUf#TXMb_Yqce~Kv$IR>Y<9U2u zgxi;7A3tq3+hN~&Qot{sVt=_CTirgr_wQJa`)T_xtZ~!v7qyKQN zdwv3sf0s*r{o;RR!YMy(|Ap=Dw*SI5H_hH(SY6le73u!n*&J-WT`W*|C#YKG{*4#( zoJIj9$pEj4EdDp&aRv8Yl~KE)F0ddbk_G&=<{gU-WP4Wb{k-WnZGRXnxKCUOfQ7ZM zP$?|eOeX*}w^ioP-Y0k6`JUS<_TTfw{jDAl+*1XT;s?$BQaj#kzyrw<7Pn#>QH(cG zfktfpV4Xywgg1z5C)7u!0)S-&PKgocw;s5!X6=(O7`}XEvwSn*;01{baLvV5fTJ5? z0%G6@|7EBK&;iZ{l79~pTf5JTaX-Nt=Es9N?(YkK{;5D#e)b7jaDUp!HGlRGfAcnI zfcCfY3Mjy^|B*jf0~p6&c@}^LIMqM$Hp~4)X!!Sh^&iW5TfIOW|07=k7(6OqAM6ff z07{#^fYgEHU`HU^_vYmYBm-zRsP6q^WC|vH?==TJ?{r7(HyI=RlO9IHM-~H71K2IgtAhFpM z$lYCD-|zDin~i};-~H7(w`Z=}+DO`mG;{zRNoxEO-C@ z-EIij7Z8j)zx`l>Qm7-*zJ_ zAUObZ%TotHyX&Pk3*Yq=z`B3r^P|7w)}wUO`)g-e#lkmT-mNzY2FXqT|D2utj4Q6X z@jv?h{(p?mzwG%}AKAeUx15Tb{+r&rAE^WRasNGj$HT9_zCZKXT~GeWcR%#s@{;Y_ zuKT-v|6o-h{OJFA>m#}4^Z)Eu_~&+i>_Cif{Pk!4*nj5_aQ=)l`*t-vsxc1PG#kL*#&;b)y5E57Y|00axAjKLPIE_5tfnuYTO)R=C^y-s8cv z0e=ek{QeG@xaqYHKm#6TK~*5x&>eQ`qaFaz|NS4g60Gx9uGgphU;fws>wmv* zB>3N6XPp??peWc(FGX{P@}5G8hc|9^*9X#Yx5!@X2h3zr;*U{~65G-Ff=C}{3i zpZBJeU9oX@uxMpeZk{nsFV-1*0zs}KIpiJ}Ex(P*wmK_eE~V**mTg2yXUVWCQWCj( zS!iZFJkOq5OasEQ!7!0mG+v*1LYO&73kp`n{jn20d$^Ed8*=jyjip@ZwK|R(>VJ_> zeZPmJ?C*>55G*LR`Gj3`Q+&*t*-2na!!AEM%z*mA&ANsR5$01hmrWDO^eEhk111o9 zaJihOHC*cpI)e@QXpWjaCyA{tE3>)pXJSe)fsm@&N8cGR70?dEW|?v?!hC6u*CAUU zBg#uE$kLp}Ji83paAq~5mKG|(Fn>vy*w#oQ)gKrF(%Te5V9W}J5LlX_vPh3 zN_8;us$oE`Q(ce)+b~}Y*(aRiQ7X;n2$staN}D=rdwYl8u_W=ps5kfuaxwL#MgR_d z5YMt@+ey0oS~utqk_sWuC@GKAR@=sZ1`Kn4~N)>hgu-`dB2X z`VxIS`1Fz0wYC?;mpP0Bh~*|unrM*hJG15SbT)aEg_zne5U=<;vb<%4y|;SQo(=}U zfKUY|S~V*%03c0IqXLzUW<1Z0&)Ajqrq8PGabZm|PP(J^mr-RK?tgIrQj)M?-xh&& z@)H(U(v*(azHqYOB`jA2wnHBxvOL`oBp3NCdx2~YeyOWmN$qwyrUqWz3@L2)JJH-# z5HPzT1C~N;)^@yQv}E`#1DLu`eGF=qTEZgi0}4SxH#M?2CQmhev%TOi(i=d|VQDtu zm$-G9V_*wCS=pR2Fn^bEeD2?(6fp4O09G-4;btVXE%h!%JMQs$eOmGj$Gt!*2*z#nXYCRgTA$@(#r+I-{ENrR8cQ?oCFd7EigHAT`Btz z{$xYUup0dq165qVuRfHENLZYzu{gMFaQ96xl97;rzXP*c#Au_qy!v`5XD_${xbK4NK_@ke3MNTEe9)>Q zpnA?u<#814cw(J)ZlSo|Xff=mxDXEF&&4wU#m@8)uFsUOl2)H z%P5%|4}axk)lvl`saA{j38&;L+w1|GsnzK9n;2hiVjsbA=#Wrr!SmA)UhKD_HboAl z$GpqQ&rfMo+PG4o8zRSG+0qW7+Smyo#N?N2UE!STloHf>lO`%v68iEcs`s1}t4k{k z=dxBV+Hu2Vb5EBsxYV;=0aU&6xfG!zF+Q$p`F~HswtYcjOi2N{mOxG_EAwe5l=|>N zraTIKJXE1|ZT^jbXDu4vBdW2Oz7CCu3WU*VJ^M~%8nNG}0B}GiCC+v=E>7voVxw$*T9oZFKF$bD5RSj-9#Fvbe z3AO4EOUlRg;Z)$vK2|1B%?-^~gpE!J!+(3eAD(0Fq194$ZMw^r~Y6aSuOn&e=T1%%nd%c8Q=qn;vDJfqd9p zixinD5SwWbo5>NIsS%qg5u0fdo5>QZ5jCU{HG~m0l<{hC(0ZE*ME4ta`{R5 z#UR@GL@?*nwvJ6aQaOAN{90_R4q=fsDUbkoH3Jf2;<~b6We1xh_?&d(5?%f zoXqm^5>8P98$=s1r%|0{7Jq$n33i&Nj@ z#3;;6`&olRa`9-9m1q7z*Ve&Amqtry+Zxp2-6!D<)uVp1SNtKnt4?ztF?wG(MvEDY zjKX2dUTVU*C#B-W&kBN(=2%mDTyj2XZ8}=?+BQBozAKJn&`iftzDq zS!Ic-fUvg~9n)qWwTt3aGmgvIGG82$jM!Q!)7unL3>sbkT_XF0J82#=f%GY+PKaq9&r?{I?$n z!-tY-pZBnr<$lQ&CYn-VW`wW;q{+F#Kp+AxBk~o0K%_0*YkN=h(4l8q!t?WUe6thp z?y&E!-cv2o?_@)@TqFuJ-?!Se=e*WBM|+J0N0C*QtbauQDO2ARK6%x*QkoJkMIqny zXHh=jy|#LmSXi}BS%qTN7snwR$a&Yl3i)91_08Kem@?YmxGh$9`CPtjC5qMttq|61 z%=DQe!hO9Aqa)tZ<-go!C|*)EDHOh=St(AYrL8ALFTTI{27;~RD=~Bu-WyCkFRe#g zPi4;-hJRacRUgl3gs!S%l07>OACHFAM|>*jpM?iaY1=fm-~w*_u1&sG6!U#>CM(Z= zG5ED3y%nNOiUKAhTK7D{dUjv8=Y#!tu03I7+3Wd22vqhfAF1#Y9xC}0$A*Q*uk7)P z&JW+rr2x6KhCOY&hpyNsHy4hwOcw|n zVb|(?$7xT`g|vcYq$lf8#jw@*AlS|@b=Y>6eimY1`y?qOs_@EY^YRenB;VkxUnaE$ z!+*yNgI5_d+oefUD<=V&Z`p*3QAqbEIbN;T^o+#_R|rYM8y*VTV^R{t(P7Rq+Vh-b znP*k0l`l5TL^g!QUx7=Uu->##>7x$rk^~7cVlQxEl{h1&VpWp`<|08 zZcrs#!(KIyX`&%URW4l*ihs}d@$OxGwY{jD=$aWU73FAnD*6L;VM0VPz8XOAX%=3d@KU`@+G^k*PgfsVj;$`ZP`4zzL0Cm>*)ZOgv}{ z@nLRpR1X5@1bL*=7l|+T*0mk{#(!RNn1*r&Z8!;n&&l?T{y2(Womf16e3;K}tgM4+9pEV1>z_{V{q^B|29i3c0)5Nc@vM{ht`!iwS*i%=!SZyml2NPu=n(XI zdWsO)!NDa(spG}FFG>@a3uW??ap#G`O0mtwqes6iWgw?nstHsK_q{q<;b-TI=0fcA zk;9WieNIEks?TojZ<)8$AN~zY!R%ldQbrU_csMuj@z?LCDc zE{Jjw>(i@jrb&Yq&;eT2Cp=#q^NmBLN zwjPo42Nxwz_fn3Fh%oq~7=e#?B#iqhQv1tStbmG9I)P>CwZ0#1lE&TWnS#c1$P(sI z`NaBMosko)TkMnP6(wd?R1DO!JYJx*m3%16m*z``YhwyUC(OlJ zO~xe*mCK}RkJWGVJHjt6*>pEwq2R0?EWrzU^XaR2es)%We1G3ImTgVW3b;`R3T_nH z=z`f$j0@oTn*R0qh^*Wq}NS;P;JDmsvWTqF;Ix)%6 za@Ov(YdwPVG!i3qr+E;i>rVlixZuowC5(!bNF~&ZEpwa znd(X)mOizM(NgKPLO;>B^m@$b`2@p9chs2^)$#jCdag5t1V6XOYG>m2BDqaE1CTwf zi6Qn18-FIH!WNf(+QBgcGmAg3X@T*hdv;FWU=OnuU-*k;#**kJ<9JLjTN}LLT%{<< z)!D7fAV5zmif2442)oSom#Xn-1P>o6y(YHyu8x-&-;Md9hp>m*gtOZ0(8z@lg|Pcl zeEguIQ~5#=IzcDzq$_f35V;KW>R81(S9S{QD}R=)SI=HcT%%##V%zYHhY(wP)_?6X zbM?aK3Z-K|9zIbmR~zjNceOILU$j+y(-!uU>)iGUc7BGiTYj=7Ek$^KxOS|2haFzj!3vQt_4cwr_ZND`+%2eniILvE6ghNKVlm zX39P9PmEHl_!Fq|#F8J^rhAhqIPdw6%7;2)S0dwE=`5Z#ldmr+%~%pM)beEAL(zrD z`*U^jYFb7JGah;HhXK{%z-K!1rH^JI5r2wOy5@~{HIM820^jedrqqer9fX#<6xtgt znq7hp_KKV-VQQ4b)jc6~f;cvv9ai@g-lhIptm3TmZf*{Zc}T4b4Th95nrGHW&s)x% zi-aeBw(k92=xarZ7Lj-#`(Bc=p09PT@wZ!RI3|Rup1!#yXO&>p{VrtgqbsQPF@Frn zjm)RCy&%d2*NBm=KaDL$eqrGg#RguTRQ+k0MEv+x6Z51%K~vrf&pA&pzwDOi8?N=2k~~xwl~FUe7QF zv)p$@l39T?iGz)ST{4WITtkzl?tdG`M5KqM7(2YA5W66Amytgj$`PZG+Z6R`Y7|Lr zgOGG=wj@%$)%2y31ytjfrte(rypy-G* z^Ow`y>r6BpL&CIe>LvNOMg)+NPN4@ z=zT4K)4X+?BTTD#cEcE1ge2Odm@(l)m0r7MLu^4MGa*if|LPu$PSO2o_%RoVWu9*d z!V4b&ca2Ff*JK53p$j#vl7$`6vSy_+@vQ1n|COB7~%C-i!a z*LgZ(4v6J6`!;*ez8!;t(n(Gu(g?WoX|NQe$bA932_CGLMD(FFe}6ykV?*JCu8Z2- zzC$hB`zwFe%WCRHE_JCEkB4U)>8?;>8xmMVeqX5mwU%e`4f2T%=L3sm{V}isVZ9@@AiICn9#( zOTLpRt|*Iwygm^PJ%1h&^H$Dd)9S9W?CBX1IXse^WS&_kR@AD8XA$m+j(7MTQ^dD$znNe>x_}*W zrxlb_DNrI%P@*E2?Je+JJg!&04DE)O4_zA4 zT%kzUek*4fkq>W9FC4-a=M8@Z;u_y)3rkw|_F8@U)|Py|yeoeWSHz2ETTWygSK<)I zD+3D0*%NIO5)PkyHu$u}kY^*lK3v-uyC_33E6Z(Okm5D;Ad)?wo^NCm2STfSQ65>M z;M1J5V6v;nNRN}{G3HI6@uQ&Wi4ebv=4?Gb{@UMKAw8{WH*>5xF08(D(>Q!O#*|I+ zj006t46N}8tTlh&2BISo`1Q3NM1U#M7RatOOy(g2@tbOt%3wo0kBSF5yHsVg01SX* z+5DZ!&3rAV^tuu%moHu5=Qh`H zw%K9E7b7HkcWB{HmY%3@fs*7|y)k|GI z1ft;hmyQbgY|>K-$cqlMw5ktdqrA}|S8a?&qZAc$+FR{hGB&+K+s=;ttEc|#}%6=`fg&~2?TQ_*vv2Sv&rr~(=JGPzx9k)TH9<=!oxFYK5#CZ1fj0| z#FvMR5?a)*;qKA&r@+>fT+_IVh7P~Bqipr1nDaL=Z76r4{d2zW{5rkO$|{Q}hzi9K zc(uh2BJ9We9S>FX{a68Dt*;t2jQ21k-MwndWjlY=Wgm7^JRv*P2_akhGoYSV_Mujy z5)WH@BkA#d`W2sLeM5NkHQSEBsBTy|(2jQS)`$7Cci*qwTw;P-lk1N9ZwL zvz>qWcXK15aNNTb#e`1jL`^6bC&J%M{WLq&m+C2c4nSbnO zjI$Ck$C?zWm=##@#{znE4ov9_Te~}o7jw&wl%;Hw!?$FKL$iW z|0yEM{q*nWgq*+531Q=3=Y;s`UtcBgj}t;q|Dx}ZApY-n{P%exwsJh=(}lkwQ~-bL z#6KZTer{&4e?qFnSOVXd3`opxw2|L0_&*Vlks+6g`(TmZ{v27luZ6~qxS^;L{S#7! z#j1at_xefN33?LAoxG3oveHd)lC2{t)aRIl;(cz|`{|$mAq9=Ggiq0*5GyweKOs}( z`xF^x%cpHLsf57gO)mO#PXT(DzXX3Y0$Z_UsCh?IahX3iP24BRLVtaA!*9qH!0P@# z+WQH?l4pasCN==lPuv&jW=&Y6@LdD-$T0K1U!oZPL|6rY9j$!&KCwFl%bi_+?qg{0 zs#@_AlBG01MF@geqWAQ>wAMB(G@8}C$n|#EH2$_;Bt}r4{CPnq2~peM{=0vV{w7(Pw~#J6y)KKHRV5mGX@cOGU zVfh?0yrxK+5<@3PH3~sjCV=Ji6;N{YA@wlDe3C*)KMITWl9RlAMxREY;)n6ZYwUyy zPZE#!@^FO5=fxJ|3cbfhvloA*AC8j1VhDoMBEIAR zhs^8`|Dk*GvH*;%)M)OXwH%N~wZkhL+A%AKGmc=kvM7mlLz0<;^bi0!B`oZHg&G|4q06FMWcQ_CMbF~ zd2{hQvcI%^U|2>ZE@XfFt{FGl9yKlgO41t=bglS749MbbQr&2B)Nn#nh9LBX&hn#x z8VJ#(a<~YPFi1%DpdPe{E$%G28c$1g{N-!OzXadnoZMK%-DX`WqQqE@&nxz{G|Gs; z@gW(WMQ!V$yz@dtR>tbmCJd6z$%p;7HVN=JQvFcrymc2HVoS;NU-!?5RHrXOcntbiXrolpy;SNXhDqW4vOM6j6o`K~3B#NkH+(m)#DI|Vz~*GHc){6*q6 zP!^$x$}F8lWlh=|_+cm$4Ptr9n0OG5NS%y4U-Ra5oONoAnK;L2@fbroSIbP6XQjs# z&CQ^ZC#Hz4mi6xY`Scy$f1$)06brVH$H#OTGyO6fQX1rjRnw-sI6P5Up>wV3w(h$Y zg3wo^w5osetbt2gGn+G)a=8;!bL`^mvNCuoY_((&BxJZPKp{Z*gmZzp$yG+{^uP& z<~d^fl^a$7b5IG0E9or2Fb@E@Qp2@U!bUB;HUfXt%m??yk4bc)U__9Au5Uw@dQhPd zAM9h(C#=8&y;eS&t93w#KXUr|0#h-x$LuE(9lL}R)8f!>k!Tuf6AYa4dT<4G>bmb& zPJrYu&VKMbZd-otdd0-$X7XKqg{TrmtN0fOYii3rY@#*~wrEHVs_l+SBy==U>9y#W z&yarwt@Dcau9XjR7JK73Xl81oDE8J{q?(9FGqbLTGGl68^xwp*nyINV`=W2&mB3g; z5G1Z|9Z6T<3{ptOciZ#Xq#*xWcDV1g%$FdMQ(OX3t+~*38nHq`E*CcmDM(~3vUP?T zr2+^wC`-AX7B{hTt2B!Fd_0Zgbg6wG#a@3Cr|78j=?Es69=1N9ADWU4RJa-*Znmz{ zaap)3h&*qFJ~H8_O#C>(%3;X~V5QrRa_kzA1^wx_z?H;HFdH&2WTc3r5i4@Hm@_C6 zrr~`FM96U*ujEeGIByFjP4bFWsp*6#>3QV^r>98hQymr^9nud?GUCO6XqSy3jaG^3py8$B&%&E0e9&x@dBhOAK65Fas3m<|fToH=de&lyreo=NZWzG!~ zJRQ;Ksilna4iBzfNG{fciQ<94O0Iup8>&UjVH`#;yE*gL>8Tbl@WUuHLX-z6 zfTdLE^ip?w_dX~)dl2=p7qcRu#7A&UMtnC358DS)#-&;f7%w zNK!WT6S+r@amVDN8LXuzE=Z@Gj7w)S}WIJKA(~MnE6+fx{q^GspseO2_->D zxX<-Xb77M&dF?T#ZL62?bVq*&`WVQQ3gEzQQQ@&rrY?TR!i1mRM9zR!mKdHM%NTxi zu705WNaTVmE&P^X-2-1^FD7AZ#JlsV;&;bx3Rqo5)KEjU`Dh6$mYWbQ_@t*;_!4C& z_}o;$bF~rLlrzK?{&W>qr;MR$jLL0OZ)AbWe++ybfHua#v6W! zH7d@BLTwDkQGNRKdSaLSfk1qk>X+x4OO}^jQ@4JP63t#pjUx)~oDbeCIF$Wd z+zy$gCk?*x7K;L@T+)AmA8^2S?YIv|HT(05c3ai$Ipt*j5w2<4s+mD<0b>QnSyr`_ zFJ#X#V>;MLsORR~(=el~9280ycs?siZ>0|oKAFfHi)Kuh4$@OB-7tPq*v@pXcd9rf zEQ+XBe)P>(t`o^0bqH~?%@khy%u`4o_wl>=aDCP>DXy$j$i#nT9dbVErdR0ZntzHR%PK_0jrJ&j}Fr3_jpKIRKz#~z9#nU`@I{nCaUmkWrvp!r*rT&cOLiqk7N z!FRA@1Pq;n&gJ3p89za9d3&OjncFBk%n9eA8&=H5Z(TP?xe0l|pq6l2E`@#F^39z6 z^K6CX;HY|@h+=;qUOWO{iFT?>BWY?SO_6X4sM~(u&(~E8uE3v|A}^`ZI|bTiXk|Vm z@^flpZb}`(}c{Z|-dnR%&pr1?Jb68@4xt`zL=fPDAr)&od;mCDDj-F;^K^ z(r)hJ40FGJe#DMHUO>87bqf6Xe9fheUu4=33iE}ZsWI>=7zvDm9*+Aix$BA^ukdqz zdxrTku$t4Sw0~_8Oq_!!-15NWo$GaayuS{uq~wBiCZ^kw)F@5sKSSF4PqwAxZ z&sB^=4HSR$0mLy|FM$W@X_GQc9bd+@zT%7=uWjd#F0{%Ao@8Pjzf=tFC$e4zX=nAw#0R5EE~uw0<(+?F@8^GIL@N z_(@iWE%^#&H4t@ad1(-kep(Q<%It;+G3x2)B5P9tld`(y+X+^xeQoc}_C}hhWE0{Vk zv7UYPH8Pxu|3OV=o(JTGG{-Z-z7VAJMRO9-ML&YG|B9g37rR6za{Ja;wWDgHW)@n4 zmkzb`v0PzGe$OmO>fO+}RomW-p1-up=YB(!_DzR;VLqm)_%!Njk=t$LNu zp@_5#m3`MRl!afSX9TM1GE?GJ*W=5sjxAm!TBtOsDMO%9KFu#fp0QfNC7(zjQ|S za;{eV{0@xQ7Gt6DjO(wV04HNIX^oUcZL2RGH`|n$Cv)foN~(l$S(>)y$f$bV*#c6-jB_Q)`_KM4>XEfNd8*ezx}Llq6QzG_(`m8uW@{tA zlq)SCCAYr1UuX4gt6v8jvo|6}};8|gSI*khYc+W6or>qEn$=rpO$&+A!DO7RE}%5mV$9?rJa28UgCTc^9m z=ok0AU%b+iS8E)*n|^=)ARO1p>O81Vr)nNQp2#j|%F!{ZYUB+zi-Q~Y)XT*bzF`je zUMy}Sb+tGjtJ6e3hc5)9!F191D-oRMt2OJd2Vjvpe(CGsu57lYGG1|jtXd6TBP4EV ztgg!JdfK*{7H-ChHbJCf$z&Va$tY2c;cEG6#kSClMm9-vzJC zoeXrGHG$(N_s#sl)JwInCPT6(V7o0{@A+ikA-=k-h-q#D6z+pZ8yVfItgmWtj?0I+ z7~%b;b>Km2o$i0S#(F(8wlDKT>qkMb+~zCxn65{IzNPT9=eE)4HH-pLS*u~4 zXTcg&1Qqu!;Qt- zfZH-VmvD{w@PTiWpoopFKFMYfk4D9qBY!z{Q^)L2Mrwa$UB}tWt4@9$$z7H=q^RD) z-#(946gK8s=Z2#2Q+{`jbP5^=99xeNd5h~t7lprGDq6MBPo7Rumb-Z!&#U?ASY4H) z8@SB@jOnv&#wPKKtchE{6}MnbU;D|RJt3^qbFeffn8d5&Wl}7&J`2~izqcJbk5b=u zCg~m*LpOgygV?m(zI&YGu%0KcP;r+!D_(9+Gcu$xoPA#HPv5) zMn>@3ks=D5vaI`saL|9pd}0-+SIRhTvQ|y@FfBT?D9y2czV5?A`mP>$+vo zOAWs}2dG%?d~mk>oC;5$z1*3{n5EbwJDoMyM9C%i0*lf|jeb{<^jcQ>47n?u}A zw~G^1E;>IPHk{b`tT*mE<3+xi8@y9g_ku`SbH^X}Z7a63&2DW5m0#hpJ{_I|m7knv zHM0_1XH|8a2EM6Yn&^0LyjZVcjS*Q-OU8d&C_A`gj&u{E;-Sq-YT4HwNc>|7Q@4)q5Q*<)(T_|>T!Du z_d-wD;)=|IC?YA*#xpl;JwgM`8nAMW7nxn~6UBAmoS@m_Y3xzxM!H-4KGND9F4TXq z|DPUdtv+WhvWWgZO6BK~*4BQdXXg`~2{OD*e{}boZZioNIeiV?+Y#8PCe`>vQZ8d_RCeNeMO>$iWy%{(!ZdK^D8!!1Q&O!>Qg*$~y4328s+Q{tta zw(IS8VwT}EsjdXJ;N%z~_62 z%j0$_%20i7`SH9&K*|?jgGOgkR!0l<`!jy1=5)5}w|#BVpQz}1W&o4E=Qhjz>SnB$ z+xy+q%i;BrY!ky-!Liui{A}weGsSJEY&hC2r=2sb?d_&F>uw8SDS13UjCw=v*0w$E z<@dy+UMKVuD8wnjOM6oh!+L)-p~_YnrT%SpRr`y`7ISgd*tGx1u1eAGDQSP@E^D41 zatCOf9>+?F4P~NV&JK;lS@p(tIJnI7a;YpP#cdE8*Qwc1_S)FSE?6Jj#Y%f!o^cPv zn5OgNWuI;Dkv-nIN@Sd^&7@8JCcg!@hqc0ojeb9z6)~(**IfuP@iTvKW`*MX^6%Lo znx4e?mM&Ql4bOUisJVDxXj4U7(b?g=J?vjQaT2rP;<%2q@o@_NW1Sn<{km=Uhs}#I zWRGhliSqgNA?8KCYntuGxhzUp7mTTod1_t29O^swidM62X=WLshP|GfVtutTEitt$ z-MX_!wtjls+jiKitlodTY6G=jJt)WiCEzsMAVVRrX7+x>bfh-UmD+d_+TLz?bKBSS zlHF#?m3cAQU$XE~AMGpN%+OF3WM!^~>x1^%jklrFybfhOKuC|=V>nEmjp^TpqElW& ze$dQ>Bh|k?qSq?g&X0+D#5=)_`$l1p)yV6wSMd$9M4_74>_~sO57<$xwKW2k6w`2z zoHN%|K@0Jn=2tS9)^N<38G;g?&49nd`!}Ot9XCNtXx_@_o;Kp+R_|ehgWhw z(e1M7MfG{PKwaXV$24oMhPHV6M|G7Eb6Gwr^%Y}tO~N$UmCyca+$<_bU1h7+O23a; zRal+z?mus$BP)mE5{F*2k(k|htc_B|ZtW5P1rZ4gG)a%ZS z_H=q(?AruLH@#fWucNK^vs<4Db2p5Z=SQUEu0?;;?IIsLRlHkj!w?z0DBO>g>n<19 z?PVPIlZks%$D>7+<^DbC8eBmO^=qk@lgswaaSu?2QGD#jY)m__oy#?GpI-TAA^R zMjPFoA76u4OwQGAdK*M6OYT)1Z|+{XQF5P7#(1|G<|T#GVbR5-YqMhBQ4OQOJ|9Z@ z2;~pryTA!slg`e~EVp0Rpqi8~vOG}t$A9;peO$QIqL*u%gEIocWACKd_bG_?w z5c&K1vKg6&TzR6Lbo=4LTQ`{9>ld5Z!C*3gb2CU!qsb$99wwb_D1+&J@Cbv^bP(Wl zGMG%*u7`_q)84fF8Y(8Fe%~}_x1Dz?uytAC;Dye~P_qW{EZY>lM?6?7Vs<^#+fsi= z&+1l|jcv>)>($oF=)g%I10T%S!6EHfk9zsbIntR(-E+P{V}|1=tF6UGdu33g*kLqk+Ha&I9 z&B6Gg!ecUCX-{R{D^<01%%mUot|NcE4czCEq>Bk%`zgt0_q%$tj(csV%?5RlO$`WG z&(ER_9K2&-q}+W>{o}-cyw;EBr5fsqb8?Q+a5NI9DPK6}6=&?asZwlk8(%0mo&%|; zEVD~%?h%46zX&6Ub!;lvRi0$^#RzuB#p}-PYeVhh)qA}*^W1opUNz}nY-)ej#MQld zV>+yXYqZ`}d$y`q1x)l!$#*Mbb6?e=(ynjbaCmFoK(~k0@^uTWe0Du~?da^S#*65I z%l*>zyUIUReJA6?$v!EEo4`8S$HC(K%Ig8y?@sfRuQzw*2K%vvp6B8)(_hYDFt?0W z*M}31rn{A2t=-3PQTL*e(g%NI(!Z9kwjt4M4*B+pt93H(p2o&J-Dc=D>y_hHo8)b) zG2ZK)VYiD{R`@)&FFQ$%-KgK&JjzA=Qq6dx3?6G+nI8LtV!j^b$!H2n7|A_3TmL-h z*2EWkVCB{rRgCBk#CJEK=U9n?O~N?7ZszC+#H`rJqs_us)l-I_kQ#rl!E-kg*s3;r z1haX)-IsOVC&g=Ji*mpZ+9f@P_i8*-Osz4?`EsefvTCa^yD?w3bspSqTcn8{3jp}xsMZ(FE8q1 zwu~I=={l^yFf??pKklcUv`5$VCTwsB;xHX{mrD|d``DNq@_4c7_GXhGeG`8a!6(Y?R$a$suAU0R zQ@ev`XXzr>Hfn5W*Yj)FyXu|47PI-#Io}%3an3nE?f6X7Z_`xIFXedEE+J28=y|gw zXcwn&R`%^@?XLF6hf4d?Q>WifleE8HZUuQ7OC>0@>s5_r0e+RlZyp=)LxC>Zc(bvh z^11i?X0pz+-fe%ZCa=lvh4Xzg(T=wq&C52`cZ#=BAEWIgH?-$#hm6_r$ zF>(ZZP1k3pd>LOP{=+!|U?^W9Slyir(WqT$%osZ*TL< zY`j0K$O5@hOgz^c zbtAm_eZ)v9?B>vbisxC;xL7Gp$!pfv745lhXFG3Mg(*&OzI2S_x=&W8W!H`r<@WNp z;#r`;a>IYA_CDgw>h}dK{@e@rx2=Hp`>&mVUt8Md@@zB^h(`nO>DBRUmCK!SkN3{D z%>3-^lRNnUVEx$%7wk7D#_^$m9RqM4?}<~BpU`QY(noL903ac^70z|@YbW1lZYw@8 zLFH*}9gf5OIkes_5CG`#SeZ$goBogu<@?w7$!&lB@x62Jy;Dzqzj$v449@FiphkIt z#{$57evqiv=4_uO2X{H90O<05pP8SXdU9u;+kdcO zd7OVH{cY31nn?O>UsH8YxS2dmF$SRbxHT4Kf&T!5{|!bydIP|J1NHGDF^~j^`v4^V z0vr7+nDHy#+u|E^JUiG`F_1Jmr-}C}MqaD#bavLEsuRrEA+qle!ng&TQm-vS5Q?EQ z)gZj7^j02(DlTRlhX$r|1oa$oin!|eRLp-0zw?GroeAQ1oUkI*8S!vKZk8@V^8_a5 zbESq9Bf+SI65p?!O|(~3EDDo=N-%<@&NI`5teRI^1l&X9#ll^*9EL?UGqM%M9gTr8 z@&V86_b1%Rpdqkf5=5`rNf12S0S-T4w|@YO{{kD%Mn6Cb2mXuw0FQfv?0tWM znP!&pq5H_?4>02|5IJ5*0OXVX2049$Pk%s9qi?WXuR@%(dZ5i+%xM%fThzvy(XyA< zlEA~++~~5o9;yor9N|jMJk&^F_RthsXo|&D0BBIkdCG()L^y&i9-aDg!-d%qJ2IWSITasX=Eud6ZW_7l7u3iw!=25`+%RC+} z2K7R33kwOFrpE|9c(Fjhbj%f`BO0TKU=Y+pM@~UpPJ|nCToMOY@y@S=i2^&C2U;0g z)Q1+^}Tu@VM`WotEK)9UE1%wma{ z9LkximmWnZ62LqJWkG0n#!`39&{)QTE|%JpQ0_gWOO_c-TSn=ijjKag=jxj|66B&Yb!{^> z$D(j_q22?;3lAwP5W>+*RR^Z*l8$EX{xT;>bur3GLJHLm+dBAo$FSc6eIbEI4Nro( zsycgvx&8!MpTIurybnNd{006i?fxEp1Ao75OSUxlH^`;;3t)fx=h*rNY5xKX9r`a7 z``ANz$T@+S#e11IX$C=MEipdp3sf^;=U`=0gps$^!JcjW?^W21^gmYk?)o}^riZD8_9HvP)F{y(*Iw(hA)I?AR zBxqc8gT7TM7Q}x*k*HnDJc2vsAeL2$dz|3zyaxG;2K?$*%wB>-QsB}*)X5uUwPip0 znf(b?U3qVi+K*50Bw+f-hQG1_`UVyLH5R`>SS6V54C<_ zw$Pcn0dDZ{MTQY$XmLOXqsti%y})LTvm771qEsws6&JDd+~S^LmGqQgVS-?a&S>Kg z&v$RMdRkl{)YcgSzE0bCi+Qskd*Xt~p0t|hi3DLlR$l>c57$N>+H5v2g}wdld+dGr z)weDC22OwdWy50c<$dww50LLUlyUq+7C*sQ2NDn+@xuBInEnQI{tWrR3p1_QjJRdQ9>txAh^SSkD_m)|ZN~SAH)}hHU1Ma( zu>6dxSSRW1f!d}vAoJalG6SeO0i3|ojrR>^;f4K}7-anpId1yNfh(E4#m=XzkE zB4NlR*O?C53Rt6?_I$H9nQbtk+Ym>%Z4BV5ZQ!#4CEV}{haea-jXU>6!AvGb%agVQ zPLKp(#bx7QMi8NmJY-nLznJrH`{08i!RWjAt*c);JNf}$D`oS0yzjHu7ti0Zr39x( zmG^%;KKcQ0>+Pk@^`tC6zUXHRB@AaJ7y?EOT&f4cJvWZL*q*EC?D)%u9^bmE|JGk4_|v}luVVl6J=%Sh zaoaM?@c=VQ{UD3k5a8SyD;<3ZGV3lph311OFnNGZ)hjm6Bj4~}i@im1^ zxy*PUGh!Np+C!!ME>x6k4l;gu`{lckaoRzG17J7FLr{^7Um*Gcto-Tcn=k(B8-Q5? zPG=VV4U%lUT^pC+Cg9ljpC^EAwb@|sK+3iI%lLqPe~<0IfLDJ3u!|2*fBUKLasF+y;~0I#3$QDQ--@~0)hjC? zfs7-}{bs$y+FWOxnq8dhoU(t_UM!4O&FG*Wx=eG3PD8!bYCDJ@E+)5RSYRF4O!`PY zAWs4sMj|dS$$aNHvx=BqUD4Pefeg8l8ZFH*r=DkIJ_u$QLeqiO%;fdl>Vjpwnq@2; zq?nPHUJ}o2xku!W4H*=DO(g|$b$O^gy7>uw;-=x3pI-jZvFNQwzx;ncee2+7?0P?A z$Zy_B;PrvDmtQ>n=Y@a}&jGpVamp095^`>n$t zJN$>AD&yFebNezjf641Dv*LH&WUuf!q8^5VpfWz{yhvz5a_QV0m3g}tN78tSYHghnDhgG`Tq{s%J6YPQ z+cHMb_O6#P+)bGu(Qo|;_iULT2>{GS)bNSd=vQ3!D_8Ut&wj@PKXm_lPW3f^lCg!4 zv)}RFZ@d0}9}b&L?(StBRQ`kiqyC9dtTMb`Cy^Eb3psAn5X=n zTmA5Dyr_S^?7;9hKZ+y$o>zVB!keF@^m?$5kH6wCk(oBD9bg0-H*|wiz1dZ>g;O!J zIko+O=ZnjDVQ_}cj^~LL7o=meDp0U_;1?{;cY_tKo@x~^rUUnX%+swkh72LdaFB8f z7@_m7Zjl)4EKm|(S9Qc?3@JijIhl8H>0zSc-gJLdw7^UmWAeWa;bdrD65oaFeYK&!WgbCL`hs-~)uX+Dj z514b6AS>wRJ+b(^e#*G?GiU#yn_s@|lXrj2e#ZZQ=7eP~bRYL%Uv%#=|Cvv{^OFYq zdkjZPZ}3k#OqPexJJ@kDW|eeDKyWvp+aOWa7Hf!IY5@(j zoCJ(P*&GeQB2bK62U_`gwggVM8On@bG2Do+P>gJy5dy5a!=|B#6M{G$rLFXvbuoWu z$|BU&%4EtCL7X#_EUD0#-f@D3Qiefc0;^wEg63vRbe%Ujk@+xII&;n=x6Jmy)WvMa zN4R~*bGpe%4Pi`yvAx5oj4yLJParZ5K2t0tXb`Mk2`=9Agm0ho!ydlv!dE`>D_8%g zy#CC;yq_;X#=*bxFyDFO;^!RW9Xo$aqS*?u*tNne1kUDqomFXB8+Oo)!q6Lo5S`gx zZhV>bjZoaYHZ%10Y=FoRc_jxP7Cw~rHAF>=ImI{-hpkc&XyEQKB_*QSAP^XHMa}~} zqSS#Np7&;OuB=M!>r9>cs1Cw}u5Ddu9O1c`#VHn^OHh%U#173s3y}f}E$M&3Wm!Zx zKhtiE5PyT(RsE^Z8FPIJs)KG`^c|=DipM_l&;)+->x%~}e)FMw^Va-nFMi}@@OTd^ z5&)aGVwn%^)~w>lXN|85UK>Csb2+DG_I}Njg#n)B2F%Bxt*sFq;a-EU9kun6MU-a- zFt^8&foqqPU=h#bn$yXHQs93IMi8Hgh%G(POcX$ES1m?DE*Dw-X}@2?jmUXrD)q&O z_?^%E(vd&rO~2=j*^jv+{p9XGG_g?@9~;TO#`L=$X8xg5RrX{3<5x~RC$TqK=j!+EQ>U@QX`8fEt}%UC_mC)tErz8=t&r-q#vj%e;sFwvQL!Z~Lfx<>EfN zv-f`Z=eNC6e#{xIpKE_*Upn&{ubs@H*TQ@?x0IbRFtR4Yhn!^JDWa(^nCI>@Vq6R_ zH*8j+@w<+B>gNp^;^aqnP*Ghc{YLHdCM33o%?fN2O?byTzs zvD(Hnm}a69JRfK-Of}O*hCuK!;hv7{+uq=E&5-+qDcgC)7jUXWYWb`<1{!c8b;+*t z`xAp_N_0kxWmbRKvpT|A6|L-@9>B(q=p8d^ng$Dr6ZQHRZ-KdLg!A<>oFQvw#m4Im~ujl?`&u&u%`6tbE3Cej6Qioght~B23mmNIueaA+PZLp zbG^uvI|lApKwaSW(U$9Z;9a~D0tdA{EaSJb-I#*;6l9XUF3ncWj8aS!&};6UJzE3Tm@ z#%4D?>oK?8e8^}CEn!)1gqaB?+Xtn}*YkT59c4U{c`W2hr_G6!6OO5ia9*?Sbqh*q zZt8cuVau%yPwkh!It-+~zJ86>&ijfXir3fu$AXE?Bv0HMuV38m*R)HpYKg6bBv8EX0T_bLR#lp_vX&77%RkOeb=E zoy8_%9x_IkN&@U+@ERxZN+hTaT&NC_dlAkq=Hd%HP@eE4PllpjKd*TI&w##v2(r>Y zbntbkQ~3F12zDW2s1nu$4k{i9KY`wv*Or_yX^c8mWU$u zxd-sJfK*&F*a6o-2Tun?F#*^@qXFS|1X$=1g8(Y4uX5)ISi?OITewl65VnAX?l1ZW zXa&rc(!? zf^jCzu3z;ZKnnK7hrj$;5@Poz5o5u4BL;Klz3*oajsrI-7fXV_BKRvvq!i&?V=#Z+ zf1Gf}@;a&IwkRqQB0If52eg7VhP~YFLuXs~h+3}ee>ls2>BHOO81Mh@eN}(4EcCuk zF8h`PmGwEg+^O|`!WicLe!s8w`+9$*{&50Z1JFpA=_DzQGz1<_Ip_z&Z6J@*LnVCl z5RP%+d4-)MsRUF>`C#%}XrFa{Wp<=&0ObuJoho7X24f#0?5+X1F7V9E?hVj}c4SY( zp@2^C-1T$=#lY&hAd1H`6XAb`(U>VtYcpSOz}6Fs^9ICTUbCT5HxRI`OEGX;&xj*v z8W(g8m?`s4FJr_?!6hwKoQ?<*$u`g_Kx~wUbWKxW@;!QQfHe(xw+1D8ioKF{rZ8$> z|3w=#JjYlF0}F)nRa0@K$G(v%sX%p-Q3<%69Kv+KOWKijb>MOZnx%i(6wIjC)CDy* zc-iP>o`$#uHqe9AH?lMoj)5||CSUc%ywN}JdGGghhz~P3))5sf;2T9iZ1`vNs%_L7lR5vKssbc5u8VnaBgStTM)TpTo78{_ffnv{YK&e94 zG?aB1++Bm|e(@B#hUYG^vYPS+xTH0Mgs&S$e5NG(_x~xNj3oO8;wufYz{KNKg&oYzWGK$u1lU)OCLX$KI`xDoT9Ls-eKR z6FBrS?XUps<7^E$JFWzza{(z9s5{>qs2lp}=bR1aDgso=aUsFinThiJ1}GaM^3_kr zb*Zg$qz5s*(zT2^(3gHS%|DC<&&9xl5DN$!$!EO*cE~U?s>&~^$#{c5x&Y`QJOgD7 zhPCWl8cMmMEKh%q0=jsg#ld$Q{C{6(C~R2|eCqi<7C^T{zYItyAccDa>U;cC#d+yW z04NDdFvMoyF|lAZXst@0jKn_r z6Sx=)V%o?%g$nF1dflsY^&sg4}k&&qH5z*AThn z9;DiWFKpEpddQ20`|dIQjFWG*TG=&un9Xc*ui39<&!J*$w>fGWq7 zPv2vR^2{wVQ|6KwL_loJ&TqcPRB;5PW@`YyHMF^z-CHK9>UejJt-ri#^^TRK0mRg<~f5 z1$*4R0V>L-q)B~mFi6DZg)=1pbrPp8gjAtCSi@ZMLziPJMVOjc@@8NnjwEh6s~snE zNsNE-LsVU}rUzLUX&MGx_K#LwE7xt^HSArNV2Wk`Sko}?U2cb6i}1Vr=(@!CluQ0* z?{WusE$ZIo7uO~9Q!e>izsuc9@P!K~!$aYAf@dyd8Akd?#`CxDm1D2L>Q(wu4|l@t z9>SIOODEEPS?_WAZ}f}~iAp=^%Q+7^;0=F_^=vA~q_oqBr7nWZIJ{+*VZWTC<}Dwu z`k{kNHG|j@5Uhk3UR%US5+q!tK~ueHI{^)7xBtKNJ{H_xfBNmeb^VJzPX0wd<^saU zzv<_v9MDSnin++5^3*0`$OzA+yN02QL8GMj_x*PP*pNv3GIFuHT;QRYW5E}a&;Nhc zPhbJWm$K5c7r!@pEo~?mRm|NAS&ck=^(Jl7(W&ZUA9Q|6jk1z9tpT!UH>v_7iGihD z&rJi6Ht6kd$=fH!pLZH2fZf1l-ThEi>YifIs=fSP^Y?c`cm$nq(V>hjvOT*I4~QeM z;@zl=WQ-yGYLL1fp%8O(>JgjAt2BSM-+mL<(l`4SRf>HU`ex>WDedY%>X`J+-FF-l z=$Xm(4J3M&1}t@fupwE$?+xkGQUS+*;C<-vcg#`wW)|@^or1%Zm4GT0xc+KKD)l$l z|IywKVgG5zT%cOoc5-VVmiAi475JZY2I0K?#5t&@Q_$sq89SMsSEhfor<8xTNON5P z;|5DdgrJBk)K{oXE-@5g$$GP{Q9qb|8=pLpC=L@5LG!Y;}+CM`6 zPFLR9-|6VTt8Y*7*nIQm21_<5Lvw)g6oZzAlhy+gOZ&>YPRy(3ue8h@*ngu9a3|*7 z!;$@YkC(IL&41RPhd{OWXmn}gk@3aD&-nY606;*$zx&V>I`;9mo-}aRozCNb3k(nX zs(yc8`clpg?DnIM|JE3mcIRJ?mG}6d^y*|G7Kr#f$8&*Vzs%n;WT0okL*Dau{P>UK zNA`stf}`{=pwSm!(-3%2`PRE`>35Rj@BVdN3;Oxy-iUyJNq$iolgzuOeCwhMri=;a z?x$R~-MxX3?}niqFEJ0M@3>We#@WaSSmfSd=u|@yR{u%4{5Fu7OkpJNBPr|fd~b;I zwwpodNjq)IIQskh?i$Tq7aSQsL^7@+TGMcVpla@o0rDDsu1V8y^7Wj`>#yhDhrW~7 z>_h*)jz9G8>+D1SzAlCd7XdgKi41n z&-I7?bKS{y00eRzn_Tt_0eSQE@@@wpBI#C@>=*f>Yd-yt-*nBV|Hhkc{pmmY(5avP zf9Rf4hZeZ`Z8t2C@tfv2npH!>!Ue$fxU z=+STbOx8bhy5F{A^VV(U){wUyP~U9|AA(7}vFZ=~6k$obl`&=Xi+1X|1pTHJvMs-8 zcki~VKgUe(U3c{m?A=#6SA(82MM%2h`q$635Cquy+Ay%# z%?p7O$GH)}#F>D9^3Jij@B7%9yi;rM`x;~}E^tW=Ss6&5SsB$>{uVDMVe5cNkv+KFPE81-1NbviAr2p^y;rku|rf~jUjwM1v z`@38RgM#wAoPbV|{kz-%LQ(R&-26+q4=0L_zwhUuGa^q(0#kzm&=@QVD~3*TKg#Bv zyyTDxxX6}x9E5yx&fpF@m0WA(J(ZI*GloL0TbKo`IdHj8J$v8Bu;yql|GTn2Qh)BT z3jek5zSr}Aeteh9@4AvcW9ae8r^@dT$N?R|RA9?wIe|pp8yuEGeviN!KJ6?xD#4Pl z6!PAKn9KJH*f7*cx`HE9wr|am0wjaAK%Fel8F1MSSGEfS%w>N`Bij+nex`tSKsis9|}~0!Q}IQOzyj8^4=65zF1V&CuDzDvY%M_ zr}n0AIdJrn-&u)-efqEd;`@d}E9El>`JM+3`|s(; zNAB-_y}tPIZdWX%ylVOUA^D#(Xyo(o>)z#``WJvkU=^?t@Q`xN-tFeFddrW4%h6M| zD;Lmz16=WL!_f(J6&%@5IKxJc4~t1y6#k_TuP^xx<^TN6 zx9lTFi^+D)-}=kY;mxQ2lD^3C29BhE%iHo~eZTelBabJTnbeQbmp;9}vy$z?@HK9{ zkG!n+|5`rZyyf+7NB6#e>v=5ekK}ld-u-xg%i|#V^Vjc2f2;SS2Z{W?MZfHd_OUBE zk$Ta7*^l_EfB&L=z~6c<^+b+`w|uDl|L)fx{5Q#`UGn$Y-|qoTq?}fO6iHXed#r&g zq?|m-pNq5~8<5{uzuGB&>f=-PJK)Is=%1|TZ7-g(ol-spQYWMpHaritD+lrU>|ClPu|7dQ- zfBYTKX!sM*$omE~!fpHwXcW!m{d4xU9QUuebOhjx(FGg}9TAEj8&F0K@;R+RwKc5D zl|{x;R`!rexlYN+93D^*`qu)fG-B4~p#<^zlrQ=>?GU45w|OE(_v1Cxm19?b{2ZO; z-sN#43WV2gJW|ot+?7E!=UNiI#?$Biwg{6{+nK}NxNTbUq&1S{uzPwe7 z{FdjL7I&Fm5mF7aVGmxLT{LZ}kqjP({cs-cuF1~x&W(SzNO=zK$MyB3_uFj}E1S#R zVcNxVb6x2!>sn0X(hk?f)Gbr7g5j$yPwN@6MtxM!*euugSqJW1eV&hh>v69aGezsj z$lf&Z)TG_IS*%Bwb9lQr=VBPA{bE@j?Q4)-@YB^=>m0CQR@J?0f16~Ne5|xNyvx3?HOqsX{jod=ka3B?(KM7Ojk;@nRe^Nadr1q zZJ2EC(^-n+Q)vvQXdPaEwPrdfRX76R7>#MjpuC#|@eSMHE*Tv?HJTi6Lg(FmvP^4V z-5h5;<}9L%{CRq{j%jXm4?NrN$$v%#4{njoD+Pw71!kucP zlbCFq#V{$_@X>$G?`}>uU7VIKK}$_zC#!dOJVi3JdiiPF-&@uRS({Vz2*`diTy@i_ zI2}rTe?fQfs_`{{U~aTJOahvWwSZYlIdqRLPwvNXQw==&y5-q?6b7%YFebrqwca+@ zS6iUXGJ84xGoGDVui*Eid+cu+QbKEIUx#y1xAXRu)ed{yLeS}N+g%y&L0Q z&ps?pOGQ8W#a^uAp<+cnbJ&aS8g{Q*s-LDeZ)~|^M?bZH+5EXLVs({nPvJ2*FAsD* zGuGx|yuznNeeGha)V9ZPF*h`YnW90BKfm|Qn;WTWmzipL347!9rCNl#UM!SBU3-(` zF(0oQfBT5)n4X@1rb;Z%t1`Z4&^pm;czy)aVR-OXac?)ftHUiebA{D)Nl^%^{Qj&DMGAYzuXNiS>dg$f;pdyqC8nj`J7miNbGN+c{%2s^pD02fC95{3Keg-`1z3v^^hdM_3Lg+H^Qt8`pc z961U4tFtsl&DyW~1_ZwB>3qL=3i(eB*BiFil$I0yW?Tn1ni6h?>){eVl|mZ=jb~~@ zz6W%Fw$S@EHrYGZNE{G_+wMiWtmE~kWRC$GE}CWB2l^m-DM<6(EMCty>s!Z7I}{Wg z1*9JwIt)&>TNb>|1CN}GG1V;4z@S9!?D{BDPp#%Vh70leyOzwO5oN`EgU=95z3Tzd zw7sDFzG6gEFgbb&gAG0XU3LB%&OwF?mvig{<^iOu4`C)_>41;tD?`OMsMpMI6le1W zBku8V>^HVxyl1_yjX%V=mT#pm5s=bRS@Fy-^<-2LPP;9Lu2oDI9p{yA(Y^mRTxbaH zRuzn}1eat78=ExnL{5KdWHh4u#B3;kVfg~Qnoss87z3L6w0;4mIFS!#Eb-~wCBEFE zDPJ!+ghB#y>$LihZdv@AwLSrKckXV;>Rl5~Eb5ZL$J?6k3A3c8irVwdTZ}n9??1MR zF#EA|kVUT7y@ZTmvPk|^@O5%XuiQ*=VmL;7s&&h@&QwO*Do($Dd4D6} z3^?_hesE5kVeZw1Y^1-z55n<`z{@pvtJ6nTZANh)*}fW{=6>I!OksS^(=KXI7@nE1 zpF_7juFU3ffbx|@uqE|XM%{vBr$IZx?{>kf!_&`S1bHahf zm*jLYJgLcS+h8xI4jU-k4xMvR6OE2l>SU0Ae%3B7tkX#coNJbaFk4sam%vo<^9N6=j)&~rDk#O&6zKr- zel7O=cvP}$zMM_ltQxmQC>RiW1u$afdg>8uJ?b0))P;qEC$+7#u^OHFPcCF%WUsA# z2-5R>0Z%n&&R=yqbd=fFoF!C!wp+6YCea@7sk_^Pgpry87pkLw?myJbKEqk~YLjVd z84SBcFCfe6K9}blTE^&nbqjs^fxc{Ec&76yU3Xr}ANRvz+N8e?I)AZE$GLaLMUO#67t2s^aU_m5MU>*=k>=5&TRtp@+Oe>H`s zoDhoVMh(?Wm!c70A@f{#=z#ehrRry! zW7YV&ba6p{H|5<5arYt=Dy!+{Z9ij(TD!y7h0|OeLis!GBZ(wPKlRx_3}<$M$GI9m zS4!0JX^2j~8Sse}5_zfw--RavV6bM=ky&v)(IhBDE2hF~B(0WKcsHFUQ)c{eE|6Obb_>F)9r<7o$@l2>rS z6^1yD8Y1vaN!1P`c?v!L8W`-}+kHufqu9$a3M0&0;XZX4lTPC~Z%`tZNv)MXFLmb~o)CPUNDv zHR2j|)heWjOF6U6`6vmYJ=jmci+06a%siog(+hWAVJg_vK!>#V`PHB`-=86{f={F# z>h3BJZ`aFpm?bx>)MT-{V_cQUtg5v|huF4lPte|mfdHI44})s|GtdVYH{G|AQ zmXliB0^HYSY(xnEV&&`wY9pIGsSup+m>i)8qG4+=sSIg6iXAA*e%{_gg~tK5iy0#3 ztTYW9r)L|xk2PnD&SNq>XcneZpo6{$@`9zF@$1_^Oup%N-jM)w8uvP2228|z#!>Kc zpd_my^p5g2&73=P0R_dt3ZsLV&v8P3#JtH?h3Mj4VbZFsV3cV&3}WTOeE*eBZoEd~ zJ{^K&!>tTq98D}HsdFXWU^m-413XbB=Z4TZ(f2@?>>;Du8?xaeH7RsSskP=22cWB% zQCxr!sUE~QCB&?*zi>=0@zn2Vd{2`h`b~DO`Z8U(@qWHfMeq9A ze+BVLN3=<12mOeJ`ZJ&Iff!C1<)3--0z7`F5jTY$vfz>sW{q0r2w?Ae9loS)fsd5f zt1SupIgP*OO6R_J;DL?;I;++aemzf(a&MTCuZ)5TJ>-lFGg06;oE)83-HeV&dMJ-8 zVhgy8vrvv`;poDD=;^v@w1#qj06N6{!;G~7`(lh(P`nIM&x`GOY6>13?|Dn1waskL z4&XxBGW{r&hXVd5sFeBjM|dpL)tJ3DiD+6OX|Y2MAM5v1 z&P^k%>=Ku{BV09(<5}FH9Ei*2(fSk$8!FvQ)a3dOXYm(n4*(ZLG!@8{?q%z-nqrDb6`#raEqc8pM*## z)~zScb4wOUP|VMN5N3z-sx`pl^QD)@D6n^e+b;WfQD9tXhCxje7f>@`!c934!0aHB zYy1TdT#1|}zv%lmo5GLT(N)aK?cH&tFO?@b8vuXURHbn1)N7Vb1OHq2j2WD=ueS9| zZEry=EXqUftJ(Lps*^cGCQ*H`z-lk)U%#!WjNyRu1#VY{`7cB@;Q6W0tw+fP8x)!gYB3j5B-%MKKdK8r7Cy zY+Uvu@Wvd#qm>0JtQyb4A`wGqK?D^Qj^+^e~9t~fA@f3`kyu6R1ejid&sA7Yu#4^pI zvs|A5-6Itov=82Ux+Ao)*%3csLs*HvDWmEZ+8IC7?hJ=R!0Qk&9HbrniM(tL+^jgy z%55d()ua$u>rQ*XZ@EF|P3zN!0+Imb^eWnx_5dqX=x8d)Syp@~4|4Cr>WFhrC($MM zS$=|l?!oPiNJAv#f*f@6Gt4D8cbnLcePsA61~-DFxPc!F>0-48l0%U*rsAFYtt~>f zPuRxhOf8T4SZbbn>sUd};p_V{+@fVNzIpIR_(mxlzmM=ubA|=Wk%Hn(nE{LJOBa*bzJ=H3Fj?Av^w!_kA*~D0O&|$rcp+g?q`sW~ZZ$<) z!G4xfhdkY#*%Y>!_AFI{*xvv8nvpNMSd9TdLTA` zZC;?1Uq*k>(W@-qAIOoqTkqTlM7_k|1hm1?T6#XOytx^9kO^s^9)6E5kLbh}plhJN zI8Pr+g#cGjpE$Eh6bY6-8z+8}Tb!nYOnK=w++n1#sGv07%ZZ8%>A!@03zAY+$dqf|? z-f!^(Dd>)^X!fl3^d>x;Xc6|b9S2bkBEXg_&XwVwYD_y}cz@>WjRsT%3;`BQAhg*I zv>TATMySre{@&JGseT7Ex5Lzi8D$vEheL(es#5<^trss%CXaZsY&R@t%E|&i6Uic4 z@U2>L2~zQCUkZXE-wyw=yIkjgP|)`3kf=Jk?JMZOXE}b65<%gXh)nLI+7Fl~=21OpBf=5=69efB_RF5%Mq3QK`k@UmCHHjxunJ)HU z7CH27A0NS&xbvhP0?SQr6ojY89eja_6qSpHGKoOe9y`mq9gc+%okLQ8Ny84D?((*k z)WrG(b2I*6pRW^#Rq7FOABM%6=}y~4s0q7QqFtX$&Pk;GtLH^{+{`@D{7llP!WJLm z==C^z49x$otChhvzc0T-AA7QGjE8?l zq-pA>Hs09OeKx^HG{cL3gwIBr!PfjUM;D8X*iR*B=1rN>A-@h$<$fZN!Q`@FF5CRs z*oKaJxx}WUHmGIaaR;`L1uZSn+(F)(-Z;*PsZx{@Xe8tFrc3Eo;Kd1`V1P7B^ zzEFYb78LPe(TKjM`J-9D0bB^f+()0yo=hDxs<+dL$L+Vs9k@;FY|}cvcu#2rz?JF97-!s(VVyA9vMaNjw(F^?pY}E)%V~@~lx2}& zuCqe|nW3L&3SUq{%aY^6bjURy7)Ms{>5MF?TYo5am()0>A z^RtnnZ3&1`uG_gJat*8ddP<7&w@8dpNEb*^d|B@>(FEKJh4{8`gH^a?fTm9|U6JRq z85^KnHhd4DhvV;3!HCu99bw&)i$)4eUP1s zJ&)@%;+_DgVWusQtQFp{{BMB6@)sK!NOf}Br zNvtklk;iq!7b8)+)F%D*Y+f@v%FwBpWUE=Y_PtW+#hZn?;PK-O%rg{@Co~nlF&?#r z*f9eYN8s=#R~Ux$Yt9w2@C%HWZ}qTyGCaPeZ+ zr8ZuFm17JUgtoM@H;`5f+hUSN-Tb8>D09744vBq|2k8%X`Th>tGE(J}A5T0>#N>iR zMqV<4@?%5fv<}kg+BtTHBYiD52b*i}6La+m;43ZEcFr;O>4Q_6^9>)dJS=b*N2F7a z;C5Q<83pRx%k&itS9U#+0}d|jtzcIYFOE%r9*S^8O7X%d9;dx2ow~BPE;WQ@AM&DH z2gzxdJ=7vdi2MaTA_=~&1ww57l01m3o|QR+$eZOmE*nfu9mKx$B4s2EQb`<%#+Ecl zuaV@imAPeYo&B(uv}G1x# za9_IUJa3ub1S-~9io3Ct;SU~0W)bD3=Tfr=pyxaU(7(Hg9h+=iXR9TjkO*$SYbYN` zIis~jSVEH+A1|L~!>y**j8ytB{ks z_x9(ftjgJ~06Sga5@#GTsok9dT#ZawaBe?1^EeZ;ueE#Np~#zX-FD>1=2h>>j0NiLS(h_VjpA^>gt>a6i**%sEPtq1u@P7-^+EWyQFD}=EZ zo$sdT?X>8CcWne<<#Rc16po-=7%^`5X?0eMwF8xsmh*E&>kF( zxh0c??DmqdId>eQUo&c)KMR<)2Zw_PTqXqE3Av{+?mcL3hthO4QI$D=QDtz|l!OtF zx%e975w-l!E9@#a-aM+S)=wQ!+|#Lm*U8;7!0M3V<=>e`1#o)7F^f~YdLzEIbWj7o zqEArUe29Shfz*b!IGAqZ1CpCz2x4^)ph2h9`^VEOF?C0bd`P)QK28lMDB^tZQL(Kj zs^ET*=#N6MEp-LT0ScG`_180n2~`EbfpZpze@=J@nFjT z{GJ}9?6JchpReC#km^iO zko|2_<}}Px-%z7}V*U2})Q+fvWUA6=9Z4^)Amso+a-vJoP^B|VE(6EhD2jSUAQMe* z4L_52YSK*dRWLpT7K;RH9{a0mwfbS|`KYQCJiPOxbQwhZHW`va?&%-Mz=Swga^g(ZiU`eea>idWhdHAH-ESq(U|Ara&1u9Dyi0_&?6jO$1=MH$%b=chskene2M5zL^nK?*pY(=5?JkwUBvB6s*a4S_&HAG&9MU_%%PS?9R?hDlFvkM+u{$uqCa z608oUKdUOIm;VaJeWije$u=T#l~O35N0%diQfL7t4MAho!0eqc2_wr*&=xuH_I@`Y z4OMwiWh1t8dT9<*32Fgda|2T6C^@$9h@iN}tDt>y9s(q21sYGhz)-&FgtGWcgy~x@ z&5~+=<8f`F{MtZqMTS;O6D-(h82YY|`2O`!#k{kv8U0lByCIo3jWjHw-}#8VA?ac2 z^*=O<&hwUTU&*`@U)3am>xV#G6zGCUQ=W>?*@RGL+MuA2(OSK%K%3$nA%MQ&?xZh{ z3dw>|18egxd9J*&zH%6S`NhO#)@l7T1t?N~i*1cXCp+wX__@{|?I=9hb)S(Z8)vb~ zhi_3_cY?*9;*qCL@nHUJ&|Ejg4j83>^-FaDn+zyoch8H}89FE8;7D1UVKuY#$S6Ux zYsO#Ncr|Y~EUgb;B!~skxR(WVRS*#=m_1x8>wl5!jkkOC~>NXx|vj2r{ zR)QyQTk8TqIZ(o?U`-!*yZEhO9vbw0nK6%*ocLV$VIqa9%8(w$3C}#Qq>NAkvW*vr zdR@gS)(XnWtPV**j;c9el8L{O^nOer>T$+K9~5(;f7aU~%^GxNv4qGtZ2oG0)GqW< zQx|5#Vg|L#rNc>_j3N?EP38jVS0=D=`c6S+_M3enup~qYYdgczrjDS52E(_K#FI7$ z0We(}9?TqS1{90EJnPZOB zc8#oSd~cm>6x`%Uu@-gZL&0HjWdK=fYz3Vt>3-XLR*;Mwx(0l~+RtEb&mLliaCv+i zKAfUAADPs`sx0)xjA0G9MT>IERJZ8E`qC82zj*mA4w#p&L^Kk_Q<*w{AHW3GArxFs zasfIw5?!9p?i({e4z54%*Ljao^_`6F&wlLvrID-@ieMf8~)PO|`K^ za>c{Qsb5Q-9<=U+94nRyAtxDN;erpb#0ZKP*t;~;J&oW7#>hp=qQg@-v~qgA;%mu;P`*KbyR+tBlAX^ZrU1x=^K5Vp>fI%~aLBTex+< zre>yl_!BBiVt2BNF~$;`C@2}teDsZuuaJMj-p*@^C-px5O_<*5%Wi;J!~zUv8B}o%PKsr=Q|ym|gCu zJby*GG!uzZW1J^{&JXHf;z2qggs$T?vr5ljxv&rp=?<5T3FlU}VTMh3O(t3bsN?CK zZBf}8&^&7+sCCaVI}okGtmcyky7^~?y!3YDjiHeSbyn7%MJPCJ+Q)t^REn;JKyy(? zb)Br>;YvAsr$-VP3lfgJ5Ycxk@^UWUx)s=vQhxX{WWzLnU!Z3eH3iYw?X9l+X@A6$cCu+IoZ|(QI}D`(9yn)z*9>wc3gqs zrw1zCc*{E0>I)~LzgStNa$=nK>(0b zf`Z1S|A_p{F}4c7Jhaql$BHL;%Bh^BQ2b_qn+?!^kiZy?UW=kkGTBzWs$+@AfZ)2T zwQ5x6;8eyK3+xFF`&{lIr~GuhkN$At->S?XIvgR4G#;MC%W1e3WAJjth~x(rmbf!x zNbj{qkZCvO@WTc=CXd5WiHqF7r%IU)Ey{BC65)KAEUhyHeQZmPSrP{}J2vuYqm;$j z=yiR6yLseF3r_m64BARsioyZ6>Q?j zSBKEiHbX5YU5bpj2-tP6{Pbd$I15QiS-(}i2S|~*(FA(z#E2Qelwsk{iQof2099b0 z<%+h`Ecsm^d8w}dEMbJ|A3m&6Sve^kVSl)P%jK2#hsUtvsx@L|FuVhx%CJ!mYekQ3?!0w=p|t&K1zQ*f2@l=KwF_KGWvJ)j!U!jXS+=C157P)L9LwD^Nk~` z9K!tGqG8lMyRz4EX~B5e&QCkL<;f>9>HUbJ7PK9rBlCL+4ufDe<%@w&BL6(IK*FMC z*<9P=Y4XU>@}6rK;`9Py7{gBCEMU@qiZF@uB3dY>YrI0*)e}H5wSeDzZP<7sf5XDj z_5nj_QE_acc}bbQzh(DvCTIh0i)t%+w4@yUFA1vl7nI6 z=<+4_6Y;dSu+8zG<3quct;ug3&Ci~KHlct~I{gqVV8}(c4dWu1k-q`NPrHhLQr=z% zEC%Lnr}vKjc)*qA-^w@ zZA1zAy>P#UgB7lb;u{%DX3NoPPI7W0bwD$*UA}pRyHoMuf%lv3yx)0n!|&#APtav8 z>QoSI#&siS^jDWgs)041kdOy|stS?j^XYw)3|7lTo6_ZH`xZEO8Sam`A!xiFnm98V zEU90CQ98>#G#v1Wp`tH+1ojv_9E#zA$lNe8lA-p}@jB$2=*OA5O8#Y|V~$6~-COcQ zF_`0Ewwy=#f$zY#go@1%PbR>6Lud-R8tx%#vM89mZpVNgCsf+qbDWaFgK0+xEptUej>MpK zx7OTlF1)^;_H*cpLn-KjUc{w1z@KQq0z9rZ7d=PsUG@lsk-**f(w+ph2&gw;>}7Do z)dk#WmdH^d_F7VZPHLU@S8{i|myX>LHW3L{ej^#T`L!Ih|N2B9=d@Aij&|xIBf_@g zaGog2CVK;gBhaybx;mI49@Qau^1Dnun%+{7w`>N$=J)z4#3$7p&v|4NkB6Q(k5VdT zJs~sA%1uk@4b}@-Isk;HHl#*cQL;7ARAKNlfao?nfNrF&6w*DxP>^VNa4OR78MZ*R zO}kI34f`6aPcU-4cAKz9%su=h(j5>qoJ^Mpen>VNa#A3FWr9y%t0SyIh42lee(|gG z-o?H_Dy9TNhIvW;3Mm-UDKIY53B?>%E>=i_GQOvx z$nvHI4_AeM9_}!q)%ajr7bhY1*DWnaQL8>a>9_a1fElw51yUPsCq4AZ6Lv=8d2&2c zD%t4#oz;>#n?A{_JFDztm8~?Yr8fD6yMz##*jk}v>olAyi{vl|48N8`38^aZ%S(-l z4C4W1Nx^}UY%7spF_bHe6kA~HOu(8R##+D;JNmMJZ-M0P(GxAbVV0*4+RxC-<_5X8 zj30zEm_^?ly|_iv>2XAZI!um?+|`L1kk)Amr8=$!OFBl-$HPP?g=hSch4iTH}7Z($1sa3X7UTXUA2y%8YKJ3au9> zdsTAO4_!{r0?D0twH$@pDzxeNf;$G$_z8g&l%+A%X)J+;s7mR8IYrGUoaQghwYfYH zEHkH9?W;a*v>ogRXTJt#V}n*J$U4e8nZ7(cw#5ZXs%W`I0-7ew-zDndCHc`Jt3{%J zb5yOF8c3Sl@Qj|8H^-!A)=m!=5zG~F43|+#x8SPDy5ko9*Z-7(Bma*L{O|Pt-lKK? z&mJvr-~YWwt1$lqOZ)%Xptb#%`UB)H|Mc2o{C8IW&45$mfArxDH~z!DC4X@gZ}@(3 z9HR;J7upE_?J&3h2*nHnF&#YSQt^j>eSg(~C0<*Hiz66J{9h zr!e*JNF^95|L7d-U(&+#mr7*`4?p62hM3J?ynp&~<3IEcvu}7e{Eu#&;WFxfzrny9 z!}1JqXQO!heZLX^y+rE%f>xt+R8DYbsQLGQzJHex+-5CZ!osxbTjwdD$RUvtZXCT)T5C2R~>p{vgx5yv08|01h z#-CCKw9lAIleVHZ#2@9VS?0`tKM9&*(W^Ei$#7mtNoXlk;%*V*VXJyQ)JtK1>7qlT z8hE}6-+2;}(Mcj{0ZdRzM`ym-yvpEu<>igXKSh7WyO!b1o@{=)>b-<{Ej}msTKx&B z3TgxpW6p(z^%fohq~0~fDBfB!!C|olVuE!8as^vo8<9kDSi@oOY%>>s#KZnwmEK`dn=v z&44pE)!9I;kxW@Jw->s9k_&>oWJW~db7AWjiZRCb5;%v!&54+oksxtYHWnQ@vi`Yc zOOAWkkJncBhJPEv?k!26pQpIGwO16Y`aRi~00~J00g+Rux{dd%NI?N=Vdqab6&5Ig zWP!btkpQsKeI9SBz=mS-f)R^$4HjJ{YP&%K$Wj3x7)m$xz0`7s$;C20_TH5YF*vWsL! zpJ=Z9H5Tn}gX{A`$xn&KLm?p6U>a~q-+`(0)5%G$XB<{M#B~X^N71sn55RcTN7ys` z9KRxjyY5s$u)~gjnjT!cBvU4tzdoe^O17{8895I*S*N1x6b7=@m%e%!kW0ref$!^0^%cAMPYaj=GH8i-8 z3Mpy`DF|<@)m$Myg_4X*Frl6{SSIR6@^<$4)R1HT%+_moK zlocXZfAUxJD!$dJl?yrLO^_mCJ3aXeZFl5V)#><(XiemC?;ccng-jUbPya=``9xO{^%NvzoNnQ z8EP=&yL_yuWxpeWt6X7>sm@{2`Wo`)BW3+HFus&NSb7N% zG7T|*X&9;nIf<`$hlGv0!rtpNJMA+xz#E}+9T->iTV>?;+~3K*o?^_45pocO=P1Ie zb)(+Va9U9S1#~wl#61XP95k}1)ror)fESWgSmi;0jeZ#1;$&%ozm=y(Ll5^}p<@v} zwI!N*9jsP1LEL*{1QXaw=+>iQ$$zsnXRnWYP%gl1;(+$Ruij%d>$b10eY?Gq9|y`~HuI={LdypkV9?+BT* z&n{Q0&)ydc`6|6(50e;hfr)vn`X#b|@qQ&Ta;iPeuf?^hiGeau%q$mSnjlY~1wYhA zU#B4+Z7HI~1lwbxc3hzcf}OS+{+I)61qYAc--F<8>@{58?Xz14?Agp)z9zqGQ8ZO> zppP`SoQCMGLfD}gr?17U@&XDQCP+|~YD1J#Mzoz82>32Fzvo^*I16w zLKrwRF^um?B3EWNheV*!_0D)sF>ajauHE_L4C|v> zz&%uZ0Q{Wfntl3FmQ2kMjeVsY#iom!Dq}nIYN%C4%oeJ0$npqW9sO^z-eYN2U{}z6 zAO?6h#Cz|V;XM@IJpGM-?S0Pej;={n1VN;vQVLR;`h;(5eAQT--$-5@f}NbWL#a0; z(X*kj6!4ssL}K~+nozrc@P>{!+Pll>r9+gUOsyp*Q8HAgECQa+sDd8^GazRsI&)as zkI!h?G)t(h*JdMxC+?6ML(Yopws%y_*We|G3j&nTm9Z*DB@{h>9W~NKJSh}b5cjY7 zD?K~6Z?Agd>GmQha`&9wN{*3=aFCG%KlIG=?w@#s{+em}?-*g1lB>+=mTrhrEmT}) zvRr~aAed`8cdn>lAXpWEACoI-u%`F-l1{36o5sk}ZUc0yt*pNEmEaYbm9HQo4uMxi zH^iS+89ql8*bwS}%**&h$#%PZ_xao64O8>7*fwG?+csfrh9T;?zd3K&{`tOIa_AUd zjUwQqC_y&m@zuk`C+$*UF5lLWPL6J!fh$J*JZP_(CAZO>7;>L`pef;qW6MnP;lN_s zA2Ppj$H-yU$f$=Q*kjT0jGKPtm-U_+Nhj~|RA2dMU};^^qty<5_iUGk9T zEedNhb&$kQy_0ONiqsrv{gE!f?ySOG*~Lugd890}yIYP%q-WTDqqlC$fpSmps8r0tN% z$CFpdyWb}zwTa(4p5$l)Hrs5&VMoP-NUHsRcdfr`6$lG}yV`$h3Y97Z{@UPh(8Pi) zHgtf6uDfJz%j6KqYB_Mqf9CY3z!WmXIRA~jrN>a2)RZh^b@y_;X98sLBmA5xd1x8m z;Pe!+zGXLj^K!dF#`QWktHp;b;P@xx8?FRQkNC0;w0ilNWAM#K5wK+#cq7LwzsQGg zOp`Q$&CNS#>VX2Vgq{-_%D>$aSAW*-uL|#iqBCOk=3y!7U5z_5O3g`<=tB@pvJZsr zm;=79VprPl*}An#eSLc~{aoJSfAPyP%`@v2C&$5Fjk6iKH>StDhK$S-JD ze+9Dx=S%`OA7p}>eMYGN-tbL3sM`Q#Xhvk8n#id0HeS@l;8BcO$OE!AKRRFM;%@rz zssV$Wy&7h}PVewFygILV|2S*li_BhFTXw#Kgk-d#XU56)-d4K{ zhK0FxFhy5o{lCt8K(BP~Zxr}W!irim1Qs_cQY)TnGxRB=HjEOF5hFP%(WmT3U_X{N zuPRbryV_OKv3!LkV+osIkofHEMu2I67F5GBBt84zQi(x7*G~i$_9>T~#h(E%e}oAA zRq{_vRHQ?L1o`(KEZX;8-D;nSDvQ;f)dkwz%q1H>qWGYl7;D9MAbC+?!y#DnIHXTy6>Ob>*X7y@71l@h^CY z0@9Q%6+@;#g_pVPR-L+wg(9U{@JpgHE-pDM4qQj+dn>TnyCspjCA)(vK}9n?Y;T|{ zKAaE9?0?wT9>`>=8TiKZkz=BV%xHVVAdxqu&rVfq$8vwTJ{6}eIuXvOe?d5CTD5A+ z>7Y=8vw)ayE-q+!1za++GecyS)8^DJG4s5~!9RP|s0Co@NlFdbkectF8uKD&6ii6c zs`!$#5>=ZINP>?oa>T?0@F~cVgYV^Jx*58BH`~gGijpV6tG61?TlpRKadLKXWNzv{ zxF2y68o#LLMcE3d*`p8Se|SN%+=$+@zA3~6&^q>0MhVH9)_zSg|2-q7TlEJ?iH?=r z6D^mO^X?C2r`aRI(LnL9v9je!sd1ns%&USv_E)lt3Uha!Lp~gQp8G`o_0?FefX>b;T%H-Gf9AKZu|N<~OM41pcIuX>pb zz&yc1W>k4m^Kg37U8w9}BjNdNs0C*j|Fw7IN52WqC6Ic%(-Bh(2E0Aj`Zi9E1L8l@ zBJZDzPu^GuLxeP?e{(9ecr2hIT-(~qr20E258fpLtx(j718eX`EK50?)}CS0)#`&j~%JZ&mCLPpLT@%4J1}AvF)#eAT(Ns~SbMeIo(6 zuh9@FYcGJPXmGyiG$&I^Bkdukr<%_h=K3MuGJH6=ZAV2zirWAWB);TzRcyT;)^cNbjK)48b4h9&mnVVu)DL<>)>L$B&z!^d2ji(mm50^$*Mk{yZkoc`-*bv3pLlQecO9DSd1E*| zGqa-wK6`O}Yu)dGgN4HG5fQ!#8{u+Wmn+e5y!jDwe}AQi#S)mL%d4iLaXxDfIP&5V zY_dNm=!nzO0^8&n^iE1A_`xr&U>!K_+%WBlXBXnInDll>oBo;hIdY&p;woypX5z}t zgMVpobr&0qFVLC04Af;CiWF81oTe2WkU~@5T9L@qhYI6mf3&hzf+C)93u@P*m3;2|B;Mi$PaW*Z z`GKElMV`@8x&$^kzsNsp$X?=M0@L4w9cE>oE}FgS=3m9!eD3%H9ptm5c%{^uYpVZb z1{d3N00isrTqZqb&o8eeItXp8=HV7Tx)w@55*<0*vJq>|M$`M&s3VTSHQpw^hEwD& zf0|>(QcDLhm7*YbqY%17j%Xxg4gaS$h@WkHs-a_Sy)Qzvk z{a}BJKmy) zR+OHNONfKD8(SSz*kjkZhqpb;7ztl{ei&1_sv7AVK`iE%B}xDQUH!BoGa1J}D=|w@ zM%3WQ^!YyC^g?-blD;&2F7I6Jf4-eGbo56UPuj;as$!SWq@nZJGzjqv5+oDQkU#Nv z!Mk_CdqO#XSW8O0rsa`3ep;xVKFKf7@^m!{$R>ZH5mi$qQ6{Zl{MK?!PuGj{a#6IO zQCJiDs^D+)iOt%h!9*tZ=SNg`qN$ZqClpgXLiXyUC%bf-P9Qyv9@9w1|NMv)T(hM#I1!7B^M~nZEbuh}}6HmkKaFtqvy#luczSRmk!rk+P1c zM5`uIFes{SMKD4pd|=O8O?0;`!HNS*g2YI|`P-WaeI9XrbPyw5OIF*LW9Z^y1 zA{6>jx2#9@w3_tH}2!ZichQ6423{Kf4eEJxMgw|oq4Oz zCFif*<&(M*dDbXjrJuN$GjMEue2=ixz^iH+R@uGtKEi4V79~4lupQ3Aq+DCZKrsTm zGCEF37rhvH!Nq`&S>g)fbv}^^^sB|F-w`8W0H~GZGuuPNhTjD_Z?p)~kNjYi z0OP8()^*~J=m0-oe=V5Tkb0gf`)M>429r_lp)BkNpd&82`g$ojKd(Nc-*$YwX&P4+ zJK_hw&Nl>ypV_{O#-t8bnL>0kgXLHJT-7v|VIlWBY^FVhrYmZqVxh>!m3O5m#HT!j zB&OFiR;9N)o4yI+tGSm7FN*!s0^B7t`Rd9LGgfj3YmJ9Le<5UQ!#Sp$*N6UkNqIq6 z+b_WpXkJxno*6)X1hSs;~HhuX4D44as)*-A|+l9mBpmWnId-q+tUKU$M3Yp_3 z6|w9evG^swh3V!3fHtABQ_RClo4k^hF+eA_?anZst*2h|`2G`4mh*Xn;*a3}z9UL1C#f5%X1wMf6G3tX*byHIdvz zX^=yZhaJ&2fYF|0M7P7sWta9Ad@K;+4dTP1*im5te<48|2jc;;Y3>Om&P-<98I6ub zBDbo1=DcW0aIx;O`3n*wW1eY6m9zX%Bmx`RS58QA##DNAxN2nYK0~mgD0%@(pQeOM z!aL_6?flvQjOjD+aanW;3WS56O?Qd1S}K);1u>c)`JD` zcj0z*e@dLl7cPtzk%6M@Tns9}0irw?g9u7t6=-l=H4o~Ce)64A?=%~j5Q_^lMx&F0 zxk9~19l?~Cbo9BQjOFh9-VpCl!Ef6GF z0pC<5-5+}lXmJa2&Y)+26JOd~p-NNc$&Y3%TkpmwV;#28 ze}e@x-k@iZt9IY8Gn8*!CyY177GuQUr|i=}+fp; z(Id;I0HkYpNS=_@mL0sq%c>o(~ONgWHHDDWJ^)j#tNXYgW6#Ue;L;@(0*4B(*UbdoNYAq)iL+97uU^_~_f^Lo^>v zLjx$&Tx2v-3!1iqcNxJ)_uutfH9`{_lL5pWv*BKr2dSg_H z(~moemkHp$6r*?^HwDM`C{>U#Yz3-$*GAGcX%)-v(mg)cW&V^DS$shb*y%RO1EjCb zUPM0qFhl3X^B_C2I?Itzma6LnvZ35QJHPbZnLCEFMm`gM!$aN*|6ubk)gQ$w*fgYFCUd*RPI9Mebj@MT zCC#k9>VszLO9>63HJ0q4KE*L+A#Dqp2!wruG&eM|_NV0g6jC<~bc8&4_^R_exwcxc zCV;Vs0mdhQov=CG_HQ$Iy{pIx=NjJg22j<3k*8=TW(oEyZks$f6VxRuQiqu z3d%=)&ReEjV3eLy8OujL6?i(itz;zwD_=PXsTX@4BBgroHqkLoJl?1}Z(BR}`g9@a z%|YLfZ^j#0tmD1KEJtu?r9BDN@3#`;IG1_#lgvJh>)ar9gK{lwf11$z5D|)_*tOQ( z%0zaYcCQXK3Mlb}+$4YFI}-KpKVa1v@EqcL?&|XBMgW9}i zO5U8AETU~5vz7ibusO+?t`^wjjrW(;dzQ8E#3;H-)eoK51uffQA&>!aem@o;|JnoS z`FeRGcuUt4Q?hl4e+0Ct-)}Pdzaxn0&8;jVXgRFo$VDP#9*K2C zGnL(-0~g%W{_~tzAITpK@kbUw$Go7Ksr{T6Z3%6Sa*}@2yO5@9#^(a{gC+ax6n*H1 zYgu!<{;WFV9RcYqU3gh&NY5r$1fWSL7b!RL{BcCW`DRPrf7C7N>+ilhT@wr5rBSB@ zx*3EVR&Gk<*kf}3 z+ix=ajz@!{fBOh;7S3gQXXw=$JUGfJT4VZA8dXYqi`gdlWRqcZ$cW*zDA^0}U(1jlA*>zk zEnOBzf1}j>IsFbAEf#v)5!>FQz06&~0b%k#~^RaUxZsa8DC-4zv`}I?_#W-j-}S-Iy}QC z$mo=KQQ7OMtDrP!s?XZF&(T~TNe+|s2mOYaLv`Lm0ab8tREE8bsX(oO; zK|F|sa}kW3c2GpvnfESfAK48gW=fed>>hqNMtqm93E7@mh{e(cc-(^zgaow&92Arv z8V^KcDFalo;^{TOh?pE>0e;Y+xjbW)1%AeO>I@-T?!a6H#sxfYWvZ ze>UT>(AQvS-18bIZEG69V@vZzhLw@5X_Z>F)Ni>Wcnd)$tUP((zuYXQOr!ZtwkG|t zjS}(`@pAf2Ds_jY zQjgS=rDzlV!^D_qlcQSWKQS|B+{!n_{+Z zjU3V~tn-82GP+CHuhGR2cY}OkCj7K_QncY6n<5qK;T)nE{=kF}z!~?LH znP1z@W^MCMZx53DQP%O%wcEpfxk*&)aeqZS7$_=4j>pnVW9yl2CTxmzwF1&73BW^i zy?G(2V0fPXF!+@GC<|MYV#g53e=0XT$&{7{cgHc7!#_9*A|ap0*t<-{(Evfl{STt- zBZ_qaz4d^6a77q>J^hS=Ux~8cLwFxC*~KtM1B{0^h*ApWafY)jW_q*|eF-qpACrDY zuI@+(32U4O_Za*f2e)%F)H^ptVP&0`jgh6~nN?-|wbt`n4HcY>u4;@reHlaeL>8mWgu2Mw|)CU{|ba<0%hx0 zf}WI#+UUga_)K~K4iwxLe}?h|MfL}j$4<04#e5LHV&_tpT?ejx@lz^C39F>}SYwJ~ z(10%0`4te>ChG=A`NFXSfWl<~?&nDan$FU?{XD6@U!IC7VEO<|Z1mPUJe8M_h!2x_ znR+*nzEjGIA4k5uz}AbD(e#yUrraa$)d6_?tKGS1V<@W|zAq1qe-zJCV2*?PD@ZAS zQ!{0bqUQu1uzlfHB4P)io6ag}g$F2VXw;F)$BqC?DhfL{9TEe5N5K3O?!}}dc+ZJO za2~-u-QQzt1W_E%V2~qJL0fEN#0-)OguE^!T%Bc}9i}W3vMIZj^Uiix5?^mo<)4>n zQgN4)*Su?20ar~ee?!k%KGEfVNEdUjGiMoFqZ z{A_DSag0Q_;>~$|aZ6Nu3oqB!)_D45j>rjh8Xpm`T0wKfIhoz75WI%?;go8}UOvLw z@~x2u1m$U`Fpm@rG=O~o2f$0vI%PnNNjZ&2oY>rBZ5u>-f07+@cLxN${u9pr?J6N@ zL!W%^s2uUS8U|Y*%=1Lzw$gd{9P$MhUH$=x%VLZQKAnlRt5YhFW2l!tj%w~_O2 z@AL6R?wiDaj4B0u4G(eDK-t#t+a|#+Y4LV+Gh&F84)B-kk%*dUM7`r>fGA7&p8&Ue zgp?H$-!mG$fAQx0u7Mj8hm&vUy`}_hUMR6@J&d;BDEbGL4^!G88~i|q9C^mJ;KQ?N z<<;KC#P%Bxb6VPiJciz1qMkMjdOr@cSVE&A;rEQ{8E+AOv9;K*N?EuiX%+sCpFH{- zRlb`O!FmUxq-h~Ak+*id->}byK*#zQs!U?-jeT_yf6?MI{!T?a%sJJu@_A0#ftj)U zB|j%X!!2m6>7+tqblXIMeU^ShK5~HE-m-b%JUrpi_a5aRH$Tum5?K#;fOYwF^`1QL=>d-7MLt%+ge|pmXmAHr$)BS53%lvO?jl^)~zq%L( zGyeZ_AB7Pb#{2uW{@O~K4nsPE&-e>lFp|mS0(&=d#8en3GpfSOH+M98$2dy2xjac ze>guR7U<9|4+xIOvW*}&PVe%D;M}hHh`5j&F3Bo{un6AS>aXxYe&*Vi$SQ&RhafkK zkZ;3WO!n|~6J4B>=ynT7go8MAOGLz>7;T$O682-}a4_fBEYGBW(Wqo%cGdG#tbGm8mFvXNfRJm z^Tv|ni-o)mD9?r{6M{`IIVU%x5O6n8A{=J_GsIG-EQUka(GY?`el4~~Yy(V9e*x&_ z#zn%LgLtRN1ijwf(LmfDX6Z z`LS-%=0Ko-@D_008(T=%TpARv<%`~Y;YsU{!Go10+?P&JVGbQPnnxBC%_bWZ8?KgE z8zK?e-3wS_8}=GbV<>QCGuKvje+4K-bdT(C+(@3mn7Cy|cT`NlbP^Qy=&$=RT^Xko z%k2b|02er^mea5pGMP;(u}yWYb^l5lUT>fX()DiMOt*4e#qg};G%zFgS%R=#ZFo?N zNnw*nKMra>!*-d>NMv5g44-+J+YlJWfehZFM9CzmG~u|0_hXSsWOj+=f7!S-@P(4) z*Hy;+c!|U>Jz_}2MM@=uEUqH4g2P&St0D};Vo3X{t~+oJ%Zc^#Sf_qUw>Gg5#(bIA zvXXdDuVjI8JZP*UID>2?6#Gq_>MEz_TJ0nF>G~zbsTbOK2|}wo$;sucyIWf~rnjYe zJ`2!vsroz(LqPCtAQtOwf1{gEBQPcHROesWn%rlbn8(#|7<`MYXIO=zmN8S&Z)Q?y z5Dn=M=^$twldpyhYE=UCyV;v$#C6PbIux!Wmc z+*tC*xIj13MY?X^H(@3QEqnL!nMVTq6g!2FEIf!%ItPms2~n9sk4rW7UwzLv zjo4G}Q}{$O=9#DBf7<8I(~srnIIM~fQ0S-Ddfrx%*85(S+Rtv8yl@=sv?BS(NC;o+ zE1S_(D&};`<)f?*9L$O6Evc_N%Sxuo798B!FYwJZuDqfMKZ~bT+f1ipUn@ajXCQM*!a0w5+4oZY` znp5L)`x(qW3$|rtB$kC@Js12oy+Dlb6d|q-S;ZT?IzYYnB%-u}3|8uNWw`oC8u5XlI+X) z19!SYe}2_VHmP5#QQdQ`gwH^us}5CAj;WiVW2nm1L2BnXS0Obr?{lc}MJ4Yr{JH>w z>_$ePM24NQCyO2EP2NG3sZ(wh zdYo(SnK_lxZCU`t))GDG(D+Fg>GNBHg{8C=apA>rv3 z)c!8Uk;D&EKsqthZ9-16{QjiAie&XAzcw&YPe-e0byqWLnE(og3DnoGJ>NAmZk}rF zgLCq!whV0vleuMsaKsw=*2;LD^L@GtSyT|Le4Ja^pV*(UX+Sf{pOauB>; z6(N{B;L~$a5_Y+WfC<8>0j_AvXQR^9gCr(0K}n*w+L#MY{$-r|u}g?hG91uJSpa7M z=g>{Bi?YQ$dv#me?y6`Jc~%*T0&CF;n~e00n(*Mf52T+qDfAubOIotH!v&XrGst#H zePw8j-Euzu8$lTX1f^+2md2Tg_Y>-{i>IfVh|*6Zh0QPd9qHvj+I-(nedZ7I_Relm ze_VYP0{e+CZx!? z4qzsJ$&AM2iczDVq&X4h`Q_(-_3IEm)Mjn+5fr^=vaz)#YaC}SyNDATmHR1-W@t=g zKymVWq@HD&H~rnB8}OV2tyvl-!%uW;@n^it^OcG^t;YA2Rw|yY(GlB^Hj!S?c{11$9QY|SRWACB(!xXfNlG1B~}!LTKZ56>e+!;*Do zcDa5c_5k=O1TrE4uI=~if=^paT3uY1ht_+Er8I9e`_Vv)KNOKZ59KrfurPHUWU_Zn z&!>iwSfe0No~=E6m=q9y5Paq;L{1=FSUH-8DEU`G|Hl-l%msdZ!D?W(Dql>y7?q60LC=W#V)2qE2YKkYg4mtxt=0qX=3@LP@ zMuclrqa1YIs?@9gci(I&c^l6>Vr^R_*4j^67JER7+ImEPaqjQ!@1@GvX6Iu(gB8n( zzub=mQ=7@7t1XmYKRy{vRWvb>u1-A)@xz71A7S*3hEc+5nv(Wk1G#5UEagetAe+lk z`olI^6Nqf8_q^CQ@=H_VtjRYXpYx-M?=C1p|8n%qxAjwdyOB&#U#-@OW%90+odqzj z>}Q%NP`qt_<#&Q%xAs*)5mseN;%P;B^G!#@4|BARw6f{Bz&d@S28E@s@db)nJY>k#mzZ)Wm3tl~DWZUA~IZ952SCmn%fj+|MJ6>Z&(L`q<^w znZFa^#`LkjcXQGm$c>0(2PFN$(w81cp7t<~)9j6vR-AXRI)37dmuH;!@DC*;^OZli z>Bi-M9g}lR)L&-0>5Ts2f|n|4QM!~{Eaw$Kb}6yNZ2k`C3zPQf9>9P302u^t#{R!q zEB_B_l2ZQVDp;fX_p_aId7K1m$IgEe z+zi8;RVIaPD%rhrGz`NBf4CLqujnC-gnuNIKU&H^77XVudF*1CFIaiQFlO|hv{5e6 zed4Ez0}TwryZ`RLrGF%pf8-N}VIcqC{p^`U@R(#UE&j>>Hy+~xg+za)_=&=~GmLP5 z{KLm6@*nwx{YN@s7*?eJ?$?Sx*vlVjhItt!{6nv>H^Uf@@Cjb=37+welk*yl5`xwK zGP%Go4SRzi!;p;jGEBh;H=`U5rLm2|8QqSUeDaE|BPp%Hv$rvszJnN_5{dF(62%!z zAy|c%5#$Go#MXgS&Vf`$p}_^>n3$S>L@vptH~EuGV{(SUZw+Obg%STVj?Oa7-#EVd zCjWNf#Wo1p&zRLdfc}l+1_65)i+C45z8%uCT_nA^BgReGU2RtRO*F3ez}s6`iCI*M zp_Kd2Y&WxlAj5l%hBJJ@I1Z~b%!;~$0$N=VGM}c$MG{?y z-EIN5;%;RiH)6+wGuWh;8cdV?NDH9!jS~&dC^b1AWI5c>=va6!yH1HtTjpn`ZZAY+)dj3apanjA&b zMauPWjxqwHGBV4!3wJetO!+qsfB(lG%9o&QQ_O!pp?*aqe$H&%iEQ>o%r-?N-o>Oo z8D%g7YqMBldCSji?@qsmn`<0MVsTM^3s7MLlxbwv>Xl9LnnZS?OmV5!;hNRzf=qRx zP;Q$+?2^9(C}Y{c!9W2QHjMsH?>qRDhgF=S6`aCV{Gt{7f<+jA@epjHH!i{x9I-SS zp)oGf6K;_;8e!r82v!mgsy9zF3iUVNrTsBf##c7+Oa5#&|HtQzSA3F4bRCJkOT=X! ziQ_0&yeU`h8JwZQTz4BgxEEi1FkcL*UJXs2`HT2`!r-^bG4Aj;52gK9{xDga>Itag z7%%8JK6A~p@y~C6*cJo@hH^OeFaEIGUusZzweP=ir%RyjyH~Bh*T!2Yty`!+duTO9 zGM&T04kP{Z*GBng5Ahae^%hX3g=Da2W|omnzR_(Sd`Qk!`1&7?N@27bgqb#A=|6k2 z1HQw#t~M7F-&e3 zb)65yKYQta|L|B2OWaKT&!3xr{@f~Ff&Nb%W8VT`{u|@iJCWGCkVN+{CKfT%&Vblh zY}-MT7B-&lhq3ZKYVxRp_#_;2;}6YMa`rmrCXhm8Mu{WYRZiJhuo$u zJNhj-BatI{66WY8(CX;BxM~_QiQbw;X2y=Scj5YJGn*K4Vvg>TERUxMLfr`GjiN_5 z(g;SvE&$CGJ(fNHOJdWICMNIKyL#=(KhkD@pa%!R_M@8!@Zo6fqIw>hC$dXw&NWkP zNa6%fwuQa9BNu9#PogE3K98b!o0Yc)ui>Mzva7 z9%sE=jh;1*r2VCC=0i8%>*?6$2S*Y$p92-~Ip|0N*W&{*%K%xwjqY?|3;X@E$yQLr z1VQEENxmB0Wq);@_ci*2LdTs12apDTjs*AKvAxA+1i)8v`=c8s0nVm>cv#_?!R`(3 z)`ll#b2av!edmfMMbcr`l9xcuBB-QSE zWX}1v66W&oUM~u6IpLh((xD5*zi2vIGJ1r)08_w0?H}cmVYPo<{G-pN!68?Fx?Ro5 zqR)5|JhQ3O>;AuSjAbRqqni`DIiBw*JC{?jKR*B74x|e--PE&5@vG2d^GKb?;2(6|H|%RKp9+senX-uyJsu< zBf&60FF!@l5nI`5C|C0tYp@}1um9xc->Od8C1*6MVUy#B;x{anUD^?U<)R$uEl_%I z;4LjEe0#9dKeCwHsEA{;jfeN2`QJEnb>;faZS|eoH9=Hzx{d<8sygiO)(W~p=aGJK z3>NQyX=?2}P97eKOZMEc#f0rCmn$63G9Op_&+?mz7EIYkTs}d}dP;MytL9rr81Bx;W;+Q_o%899e4mE9<(C;fP=}z8I0;jvnuSBuL}L6gx(wul89V zy%9r7_paR5NT6K#8A&yFd01oRXE?0)Cr-a~cxD3+mf3;!@}^^jubpQ${hJ=WoqOuo zFY>*z%Rw94=73F(sLfWr9T;e8qeDIky6o%;dY}r2a%PlQd0J@pZ8=VIBfHx^&SE6n zcu|tp!A;d5_F)KrsRd-c&yC~O3R_rddTQ|a%jU9pT17TGO!!*AR6#Y?(r(rwwqIvm zYyy*l68VNE{M#g=vxs(fv6-++R!fUrY%#ky2E@yx3LNe;qlGa;dbv!<-0R@%aOx$* zcP%S74KuM8V_ri!E7N0A;#PjpCrc~{*y7|LSfMGLXpi2108f~E1fx>=U4rcGlykrxIs&!e5bRlMGR@SkHXy}%r zFyp-OT6S8N>W84KaGh?5+XfnPvv22CeC6FN65wzv$wy3_zndVC_Lt4Pn z5cM0y3v5Y$og!oIy6=}X5!J+++|!yy{6AHDg80QU|?j}K2a8`jy&_aN)( zu!bK#@=GK$+j+ix)R%5YYHg}bWIb>7zHP*m4M1m}y#>*xSTxOphV_S;I@R;Ti8+hc z8_TYBd%cZLAD%baJQ!~#HqW^?k(KwX_Zvo!_~~bVrZL8rrhnon5BO2sPi7GF=jDv_ z_bi?yK`|}9SFu(uih66I0Hu=ob}LrfVs5G!29Wbem);JpGgvA=q0>v{Cjie@Cd*Gw zmk+%CyN824{k2htJb7WFAG`L;3+nyefA8|%KAX%r*-vNfGQYx*yGFQ>W|_~7qOz96 zgJP0@>`zar*C(@cPp2EN)HV7`{Z@`>2C_fHgUmwBjHa z7KhyF*v(5OEUXIGD4Mm2CD{_Oad>repR#pIwzGx5!$s{AA?!?ZMKGT2ts-eNH0N%F zZWcQFhV55=RTL23r=M-i8qL9a+tOV!u)4`N~ zP|&^zm$519CTZucXwhniGG=0JPHf1b$o2~rEEC8by~RcZt_D~@1VHbRXl*&ey&w3@ zrZ2y36+H8kqi}QaOM~vm<|`iY1nu=L=>{96c*#)q31#VDSk@k5KRF7pKZ9RB9LsEBtzIowT^RGtO%&lbcm5tZc`be*q@GZi$ozZ21 z%beK2&A|X&x67iY$Ba5@0Nm%dyL&t3-$WPEH8)p2YtGb#O}7m3m? z?hBsCb4KULZ=HaOT&E#|4auK>HX^%Pzd!S?uz<6Igz<4%HkC1_kyB}{FMGWBHG0eO zR=PKZ+kAx=Bd85o4g90@X-}KXrphmV{`leGX;m zvKiLJGF!#t`hEJ{cZK)WC0)#V#UFm` z1qrREx$@Cl=9b>uvC2??{A6+oNwDU}$iIv)A)g(w%v|dLB~B&Ics`~G)(+RUmejx5 z;c^-s=^F}t|4`9yFQ1L=BKhL%=<=sK_mj@Dig$2Ie@(cf(j{OnM=>I1W?T{)G)*c4FAm9v>;YQaaE4`GccgG0Go(*<?nP>`Sn-w zUY-X&@Avn`2MS{;d8w41?Bjrgjn1rp=+otISmiNZSl$!<5C06YRPpk8UYK*h_`M(6 z)7Dn7(N?=< z$Wjv0vz5+vCYsIpb2g2%_0@oC=79*^R?Cwj+dFY~_qlL>_8DdZUl~f(J~OHJabi@& z%I_k7gWr&!dtUD7-~N;R6L0g|9tZUJ^M|MZ*?;Zz#%ihT!mq!UTQHVKV9X<2k7;{- z#*lpn1?48!c6PjuE&L2jk-xE{h_FFLhGy#xsf5?5-xX&mi%8d;kh4YEm#cBdIlu}P zO+gBoq0mQD^q!GIKk`y*EvUk6I=Tmh9gQ}BAg*oF)tT=PvVLC``R%ged3c&_ZN$DE zgQm-bGN+5fAd*{dA47&_C%+I8ifW13j4Arz(dYnEg2w{+ihh z@CToxr`Zd8K6RsyTbH@k=SO;7@REZc9n<4h-Jk=WI6)|Uvuh%YHKxJzHR*hR>mX`> zjZJ@>6vpDtu1iBCDX$8rWuHqhcMGQLc43+K8odJ$06Ut|ZFdno4H|;SUyn=}(2Res!bL9XdwcU)Zb1 z_CNZ-?{S!+=!;%{o$_m1#tF}V@#zqQ%^%2k>0M#Z%A(l@B{D-X8`%bXH(SQ3#_oRE z=nEug6jQe3`oX@guPn_s)j6w)s27;e)<#>~G-MyPDI@o!e{%Em?Q^-ZR(?%?J7Bn| z9b9Imdw2_C@y_n;IwND5tK$vM*kpcL@5rGUnT2RPq_{Y{7#-VrJf9!5?Bfe&)Gucz z)cQ+O5v%7k`OlIc@=x9`n5#chjFXYRN;+@4P#$vWJ!{*zSy`SujK6K;Ay!7Qi zVI=!J!cQzf;hewYE^qMcW%aCoc=v_ZRxdxGd-+Gh9w@v1C-*xEo_&mWk9fX^wvWT( zCy5`9;pBY6O=f5xV_eS`C|O$}Ih#f^gG75b4q{K@1bHO2Mg5BvWXUpA51($Ye(KrAPq!mNycSiTwT7&V`! zeDUp@Z)nVI!OUR|J>PBvF8X}@V6*yI(<_G3+q-dK==hJ#dg)J@*=CWexXKG_sodyK zd`szs{QOt$W)E#20~_?Ih|9k*S#bCYyaW6rf0h?>8(rv*Z;UmLuLcOqE}E4g*6(T5 z9FP>93$q+Hbg;BMCa$u&tAN+`fiXhF?Kv{;18k5$ao1AY!kTI5Z!%1eqB%8pg zx!48$T(()}VHuOwKe=pH*`cP&#eA~eoGx!cW?t-b@#SbxSbXn?yuuLs`*1?#<@lbF z(jU2zHgdXI<7;w%VF+lt+QD3PUCp7G>1I9{XHt7)o~+X zuI$T_=YxG7Z0`>Z`j}U*>js~+{D-*K;I}F}GsHdm&*b)R9$VQN>F@Vp-T7m&F{>op z+3c+JiVb(VjnbzZ!qno8pP91{%Eu5v{fuYXCes9tpEt69?N-gQ@;~7cObj+#IjF8# ztnHj*^)NXNE#lZA^0~>K)Dt4J+Z{w9I`T4}`M7d9)}Be-sGK(fWQ-;(IKz#=lBs?7 zzxw<*Q*zYD^@ccb?^pi%Uq9Gzg}Nbb@sg?OD^GeF^jaTFUJaQ0zNN)};$ij%A`jxU z=2kdEh!N(0@|t@xqI^0_X%rb==TFJ$ni;bA$`7-ZSu|VvJiM?-Je?TpJm! zXpbPj@#*3lM*K+%{=J_7KS030R6%Iac*~2k75d>ZcAgH0zzqG+JB$xet0{f6R{EwD z?zcJ~mnnZc83*=|pFw|Nr}X7}y!QUbR~!7dL6>LKrSKJQf55MNLi$seGvu*)d3l4v zw+EX2)-{Y@IPW)}1L53Ti<$68TQxjK#p$jF&w=oC1%R5WdRv`4X zbtJa{jlZ0Bf9!fy(<~418z~Ep!)mk)>r<3K=OY;|p0m`#vN4e#Ic2qUkajoQyUD`o zTQU}C@oX})sP}@$7sHSXo0#^xmCJ2kXr|&EluW?AD*t#bF;@ABFRLw+NY7M$g8bJs z{PBnTxT;ya>H~Utdd1R*e9cd6U;%&lauU4iB|JeRe>CW#N$~8)eSnhfw)cw^7JT9B z-lv=ny#2!3s!z~J-}pybTeuh{adERO6k25?+^4ys;dhbqyZtY0MHkPan0>Hrw(t!&I^8=qP$ei4mGz#VBR>8#cvWDQpG~d#G!H2D z#)x@c;_&kOZ5=nJ%k?wCPPRPcS3k}*(z1ed53zcRO<2M4URJet(2yw_k3_k?Jn9M$ ziP2m$m~^i%e|}~P^2*!yvZVIxL|gWgOL}2-e;qqA8}!L@h?S^VI~9GMf!4?0)n2Mv zpg!)^>mY;a0eF$#R0jm^SjL{|>r`tVNlCt30(TqcmlSIeH4DCY(%2;IagI20)8l9n zvJ0vBCAJyOR(e2irSK+Z6|5MZXKP>|(A}fhAbShl0T-Gc;H30XHIAd$rL~j8Q{XWs ze@Bz7Lu@)|hpU|((*6hW02?D$1m8-(rb6Wv{H@Zj|AxQ9 z3Zs_hKAIaIWS?P`1w|JhBDRj=@O1Nxe{n!2Y(`q^CGLuU=!9; z*S#d)*GsfLBn6zmHs1)h_s@7>*(t!C~Fi%b+X2d#Wg=#8lpR_6yy7#R&(T^34CKX6Lt#>N(W*41Qwvs?X?kf4qvV z4gE%w)+gi?*vLl<43$WZX3+-XMX9}!Z_$_26%spB(fe+Hd9cUrR&tZ`r2^(`G z5p%bN2`g)aK{BH3IF3eN!YJVpe*;!_T;^8KXENu08<(ghssHBs2XfK83*Zi>GoAY$ zO!6WB1)rlYFc>`i*XLt0@M%8w4>@;Ae;mAju<}qi`!(Qis5123tuczd;q!o6b%Ojp zQ=wj~k*u$?0~XtkjL#4XMDcVo`p&j+AH7@i?aokjKBh6!!SP@xRPo$Ueei^Zg6H)O@-&i%r(9>*r5(FryJO&IiX zCX>D5Or8JANB6W$`nh(4oil}1N1~oA5wVu|coif8CRiY~8r@Qme@1&ex(#8jaq3G?I=ExUZWOFMYonVuwHLWF7NLb(j?JZk()8H4VaTqf%mYKk zHW1iE#&-GCYx+dRU@ul9Ls$k}E1-QnY~I(ylBb`b&hKgUf70(`68hud=_`KgZHC(4 z9}GImGPdVFG0r4*A$Br0cMxY$Qi*F&Rm{cN+8*oZ$nx{2HA}4kNEXs}4rYRJ#xjlX z{3Cs{+Pqw;pG-x@$OpQEU8RZ>3&0xSTYh(1je`KAk5~Vx12EMO)Bls)7C$k$+=eLD zRE@}w?855Pe=5s7B(3q5UynOfthG21bt1IN6@l0k%-d2pL86&qQgL*MkFlP32#J+z z70*>H&)L{9owBkV$>}+Ar)F9(sAd_k0*6QGdSjIOPk&69@^=rTfZ3la-|zNzIrm=r zQsIj~eECxcqvrY}I|G08{2a7HUJBhM4oo?ZnZF9Be{q&_KQs)j4Mc33A(0%(nm`>{ zRwK!Q9pUSt4kv%v)!J;W%PL{|oD=6BUesg}3JiIrnB@zsT_*3lkF7ZRj8{ur{rIf_ zJbKFzm+nK=3)_8tnZ7jy>-o2z&k(~qJMtxKKRLnSTtJ`KcLYg2h^HH^a#5I_$#HhE z4#I?1e-Ch;Y@P7Pyki!T(q!>5*e$dsAxw@aXVof}QN;bgl{5-N@fCJ`zVAMI68ID< zkNIa_nDz0;=wH(H@BGZO*XdKUaIs?$(@KLeF=oP(6q#s-oczKUsNhJLZ)@Ist>Uiu zvP@3OTzKy?O?m9Ovg-W8{tQF82_ft2tLfM1e+*l|R|yqhUDdIv`fqQjW6KZ0)7Sd? z`RJvu)&Ielcq*Ce?akopDxd$yhqF+T+)+Xn9Gb*>wj1Lam(jC%w~vk>o6Jtdj#%t$ z2}ba~gv>9ii)Am7^UXa>8svo)vlIHhK<|Wdu}|>&zSLN5@Zgz2zWdZ*?UdSP+5fCF ze}VOKR72F@t56G5b@xAcqHH|)=zYz}kIwD=bn{845It558cJI3#Lz6PaE39jm-M)P zL=aTx%+3)Jog*vP3g04=$gnW89eKqEbhaR{ldAI1YM%MMx9ialPvx)J`3ryk#M0zT z?@Ofze&6f+qb?hA6h*sQ5 zj(~zw-X%ikv@*?DyF^Q_&62~qv|2yNO0uE_c`QQdJ42k;P?2W4P_^p?JfBxGw z^=l3e|3sg7d>BigYc6WVjns5%Nz;oAr@)eYlakQjbB zR$=B743`COs?7H9=98JEg(Bv&5i|2^f*`ivY0v+g(P$6CN4kI|phoe3O4KiLALz~O zvh#1Fb>9(Ohn61q3_jCy)f89*Dx0YnR5qXY`acs+-OAn+o@vi$as*zXJnZH?w zyyTZ0$qB~3w&~<(q4d@0Q`rw2N*ySJo5D{{Nm-Bc+I*-*JmiD#(vp+&TgQ+mLHifB zYFUKZIDMTDInB<510x1R`PeYy(iL?TA{H(5HyfjOI}YW{KR9a+x*J}}e>W=gvnc8d ziFFTHc5)4FqN3tXDX*DPUBlJ7abj9vQlUq=ER9f%Ly|1|Uk4XxHR>b=X&+VT%Ant5 zVu-);E9`Y<<@saxNDM!AuIT4I4Zwcn++TcK)Uaj_UTB$bpYGwZkjU-G*W#nO;M3%` zweo_VHh5Fit}EB)(anrue@!eJEhsK4Z^3JyZuQL&CV)bXSVV5mDf#P^)<)~br%CuuEoMYvqgHZv&J`$&6_=Lh$Zz%e;wDeu};*9%#Y=0 z2YZTifVQs(Lga;4|J3^Yjjw-dSAX;Wz3-+QNW~qac>2npAibVFGN|&!1WFsKXdmD6 zWtmAl)i+i-))9;MmaJ2oXE%ex{-M!~hJFj%OU246Utk%#EyRoNLV~b9Mrg(|x!&2K zR(Y*%8mw6%p9Kz{=z(Dm4ndSZgXpENBR@f- z*o3E_k*FrkfxM$5U9ykNr$Sq7T0raf#5HkdLZQXmCpm==LY)gg8bz}-ZocjGCtS~? zDRXj#(;TI*+j{xZ*Q-DEdA%*O{)rb}G&YNT8i3jDna=Hke^O*n>#QLjAJ#>H{Mao0 zMIm-J!u}zye00Uq$+yzwjK;>QxX`${IONj!CQDJ_FIFPgHb&5RGCw#$fBsTe`1?)L z5>T$rG0=Mm86NVK$G_&Ke3KO_*b1rm$54N0P|@3w_!Xb-^#JTvbHBa!HCL49uN7Z;v_FdOPcf8zf?I<@17f3&I9`j*2QbsZlwG`8JVCXYdA zgk>1cx`mjqDL=Zfaw@z1NW-yz_f<@yyfPBQ!YJ9+E$^_3=8PA9x9Q&Zj+67-H!6NShX`P#Kle?D+ud<`(v25XqSw<{bu57rv)htih4N5*PqAj>tzO9-PaPlf8?h9hyCq+o~59F*{%!&_&#a%Cl^JSp( z%7SXTB4WGP>u$jGT_St~0kte1A3c2BIpS4+*HviG>`a~Ka#cM*m=o_ptcs(& ze`N&Bs#y^yfY3-zY zxNf$EX5osg&7wKEFq<<@bSmV2(Ne**E9u%L4rG0b(X*9 z+gks;6hC1aPK+``qFRmHy9JlhsnZ>UX;A?^wvIPW~5TIF1yFWe~rz)=U{Htb6-CnRR8g;Rr6V*kD z&^wWAk#klfb}WoBwG5N2t*%*_e<8t#3AOVW_p&RhF!76*nbQZ8bK5++XDP$phQ zT?1tFJHO0V=Xt&WAo^=g=RfmNvX@O?`8u1Hd1`(?D6y*;t+Zre(Pi8v29Maz;<1+; z{1IqzAODzH+HNWrbkF+O$0A1s3&NNgX2zYB^0$$pa37Hh_cKtnt!Ow$e{&9=e)_7# zReSnh#4CFln*QQxbvE(qW5r7ThzWJglzdnn-CH?%qmnKHnmyB_zo4MoC39G;YPULZ z@z*}%sekiiwzs;SvL` zQpG#qC!VD2%@8m63zPhbe_amxS@w0d3n*5beT#0;Y0XjwU2u>yL#vkyDVBCM=ES^D ztb)BEsRuUCmWa`wz7MRG7GmMD%O>=>oM60N1aR^Fj#2NtrnEc)6m($rQ(AwpT26W zl`eY8cz=Gj&!d>p-{*b%+Bnr`sPRC5pZj^?@{#n3+2ZK13;eDQd8~f~Pcvlc*ei^8 zw6ZZ}Rg>VN+M6Y{Hj%E`My2^0T^T|T97{H?Y|sX1w0u^Nf1s;M^EsQ~mcFaomodw< zVEpDr$Jw13vpYdM5@>AB$Ziwh+0_N@jM?jg*Z=>f-2cyS%7tYO{ap$GC}P7ifVopA zP)F+Cl}Qe+Fh0Np)JbeX-oS>L6fiTs80ZkI;8rjPBSRse0*(Y+=5W9jj@5kyM|FRI zdS>c5P|rj-e;f?>)2{j>4Mq~Cu$)0491Bo}ZUAS{W?(ZGgCVdckqz?!F3=NhNAuu(_7#0*A z4zTy>Xfzt>qmdZRN2BTYXg8WaN0aZjv2N(Qq#Y-(`TUzdbpTt622hfcWKA*h9sr9t zen@l-HmL`i^$B&Fq7$W;B=rD0sr9Z7e1jDn=!9-*Xh|?n5{Q4`9#uHi$OyeRK$-?4 zf3Yqn5=wcXp_6utmSRQctDA*b z;v>p3)QAS7ems1FE%jj1k72BZH9_k%skRN!V*{;`eJVX$N+6H^vvdnSYwuozf8P7g z+ts~zAb{)>d^TC=h@mn~j#ZYNB!u^M8ZIQ5C|f7THMK%%Dq0?^djoMpNh}&*$1G){ zft?YXr9@qT8xRLpGzC>-q;PK_YhE-p+zwR@+m)=u5@XeOZKmka0vj7VUG)DOFa=K+ z5`}d^J)o?^E~Qrq;iL5-J~qHke{oWX7~PBbc+BXyD$)>3W(HXUX zOD9CEF_v`T1+YFrD*96NkWBHA+E=qUrdu$aFev!%9JGHwrU#W82l{dT&`9wwotT7s zfHFLhMycc<4(#R!hPu5iK^f)EfhN_Jh7)OYT?@i78;chlPb5+DlY*cgESYxgc}Bga znzL>YGQykULm?Uj1OFh>f6zWEzLAjns;~tzy*^}H;A{e-Jb`EsIk3{dH;6Xjm}d>+ z6*sG@f$@<}#l1m(6OPR+90xybTbfi=gMwE(YZ?Y28z))AX~@*R#;zpcecb>ul#H+& zSwflMh68qDR|3xzKNwDevW5|06RhZm%eoe2859kID^)U)l0VN}fAKFeGVe7;nGEkI zCuwMbt=7*cSi^6ulC+ zn@CgbFOP5D7BbHxf7lG0dxNT`VSe9km0gQa-@f%cVh-)HX;{{`+hNzDwr}5i9-%|~ ztZ7))x4WG@{MrA7L?`4WrZ`aiAtBMWG&GgmHXimxgV2v$rWv`%DfWeT1C(>6W2$?D zwT5Jy?*8&E$zS8puqPV#?*l!v}g<7A3yB4~>{pfjw z4DE`(@zCCNEh_u=+VhA#v@80GzTGXm7KeTN!^1FB`{`}r+rvmSs1TI+!ybS3wpT)m zrB~LRIAd#kOuL40Q zM7O$rf4aYTur%sJ>4Rur#{I|JDAQhhh9$_gH<;Pk)~N z7wh;0KCvu^9NI>d@@StQbp|^+h7-mU1|ZzP-Eb$V>y^HsM(6h5*ZuQnD1Nu8mLC+hKuUKo z3G4_8+6B%%h#SY3&W)^2DCR^H>>wlP! zS=}2$HVr`9p!~qf-X5Ax36Ix)9+rmU8||+?SYoBSF$i7;c0acX(Y-Cad7k1`SW(TD z(51?*412W&NKEmdhE!NnLitylqHUNeoAuMza<*VP@u{~<4<^)wO^2-WU%YkvujRbc ze-9MHtUQQAuV;x;tSgiTOIKfz=Zv&WE#1clW< zqaHv0Yq__2n)dd^kcS0o3W||MwY?f!*_|TY{;tpVJOYcdc{C9qv!|C(nB>z{fBr*n z2Q!5U^`ZSCQRP1xL)l5>^zsrSd%&#!JVfdgwpA?z(3mKI@u$e`AWKQE>#~tM#uv&sP~NL-k9`?y2HEL%&A#1Bm;6 zx#y|4x7qhwg{mK*p`YF~lAu?=FHiX@L}552`qy#)y6`-TUhkLv>*4*w&|j$6)zH6x zeH{AtuTMk&{`I+^2Oy&6vB7Fx1h}H9RjGL>`>gt1`*pEbzhV7o=qHEuf5&~l`>=kq z@3$S+|LUi3C(PXLkKV`?J~QO4AwBQ3YVVi$A(_IdHXUSCedZJ4zQ4l5(QkiEVZJ~8 zUoy?eZ~tAX#@B9t>5ggt9pOVVH7C30>$&@hkm|n=hyD$rs_#cX{%_x=X&CwK`=#a{ z{q~*rc{S0bfNjs|3#CspZ9NHryu*bZ>{F>r*GGFSPp(?1KGAw_X%o= zip?mSwPxi*Vk?!qykIl{hFCa|F<+XNTQt_<4RX72`*w2^lJtUL}~Op1SiFRoo*rWCc$> zL4g+8Pv4Y3T1Q$mPRb9g!ircQLzPb*b)pRe4>B>`wLz1wnH)UjfB_#gRR0s;K}y(-?0Jk>vA=%M;CmEW-ltN|U> zRpWbgAG<{$QO~A;l|+{Mo{XUz*bH_Ib?O}3R-y(h23(+Tuyoh~vif-mHJ@0;e>*i_ zU(L&?U*G%Re;F5R!20o|n$Q1e|Foy4Ku@qLHNSp+K7)s%!$N}SKL_Zf#@#Wbsrha| z(Odte-%dcQ=Dh&*IUij1|7@R!{bR6Z!@Sn7dBsxEy;AQ#!1C4o6pea6d_8+&7=H<9 z3U;najvs1&vVPt|9cd}R7T~G%+Z5Ik-c)_X|NH#$f3N1%C^{C3j@du=fBf==p{u%^ zM*YsfPVu?TV58<`(ieUD{hO%&>z;1eu#YxV^IE8Jw+tP7I!RQ3+0!*s`4cWsr`k6X zX<(~y;Gw<;3l#VCQsZOzi(dzM+e&om`9h7S=&06bRgf!wv84Lk;U$mH-+9^nb2ZF& z;HwP`fAfkZa{3=@0JKzjll6;^|M0!!pU3U_#LQIx4s2hw_x&jh{Mhp`)Jh)ee>Xp> z_QB57K3r6L#!%VIhgz4NtNuOz^!*DKf=Esj9oQK(TDCNHt8Y zmp`iNMvs6qRV7d>`Emu^RQkzN`+;XKxuNQwIt`2%0s&JcM+*iYU^K7fZbs{dFqg2Bt+E{UOp z6ER`|%Mk|RplR~hL?BrN_AY(D@W_`w+l{xUqv=1P>)Ye_=^OrUdjP-f<3H` z)i1@|Z;$=GyX^m`|M~y;U)8U~{4Zade|-PHzh0fnWA`8KCfcw5H}kfs?vLAl>sy+8 zcijKSSq*U<-jVxK6!-toHSNDOxBNf;@rP&BuhC3?etKr7&8_~5h9Uc>sjY;*{z(Gh z@BM!-Wr$9yUP{dHTB^!C_q{d3ogfn^nIR;2BBp>am?Z~Wl(jFyd^;j2;nQWdfA9^z zIA?DAHTyE}Wk@&ndGnqV8F#2A`lgaec-x#yJ=2asXnvs2eT+8ri)#e3Szk-Vcd}&@6 zC(WFE+&`v#_jTgUYWjX3@zgU$e`EXnwRv+$Q0WE!V4EF{&NrG)P8;KMfBcwCi?@^p zU#ro#bD-S3BYG?khIgP*HW!!L*(8X&*ZcQu{&8|XcTH`6yzg0Q-!j<8!Q0#WW`2J*aBF5A`6S1^_MDRS@=HW=9)!Cb zJ>R|_ZzHRnuGc`DnKm2WO9RXI=BWe{U&|?w?Vfyid-__LEOpm#obD7UC6rPrE9ae7`$df-XUd zHArF>gaCfsP8dD`)J(HZk96$8qihSgD!W7d9?Zw5%|TlngJcm_MOypsauEO zJu_Z6{^!Jg*s=BYf0jPF2c6ElukJ9KiNon*JK67GHeMcg8+g7!)ih?fIQkg8&3inf z=)PtP`Xt+;R^R-x{BE+?_!ylMCoy?N%shlziQLgV4e)y+@b^&DL;-<*A&$aipT zmvNXbBW_aNJ($w|{!H~tHt|~ezBGZ8@2f+v2mP~i{l-19fA3W{?&#{OwO71j^Xnwf zk8kz#|77btTDAqcHTz$B*4SW9Lvy^%d1lTz`|F$MrMtfar)Pq)tx_zc*lS%b*smag zV--Et!`s}5`hWoE^(*YAiVZsY5sDm~Dl>-2;J^b1oZ5?kPnj)@BU)FaTNdw+1#b2p zQ;L;2@=JXJe@eBJ8!vQp(QRDRDOJojDq6%{YC#wH*0U5udwYk`G)_>w&*YwY6_Yw8 zq%?$UVOHR>U+0LUjV^|ora83MQg$E-x5xGSET)P;FTn|MDIq`^Rkk+moDXgd9s6pi zJ$8M-5sYF9I0l(>4PvM-b1%pLydMk9j*nMaiX!GhuAg zAIK3cW_$7TqQ>&Y>J^^L_5@2<*tI-uQCQ0EWN-D%&LULuoI#&wDr8MTsS;`XW~oh4 z1ZzIoe>>zG&4KKIqb19$WM23=Y%n9Y zpPKDdp{W3riDs8z6rA$b)G5lOG;d7Y#j?j$N_y3lkezkP=os|bg^#B|bH53^ z>?~COAiNUiD2!m|C-GrAOF>Lrs4x(}zTltsdtvfpASk_lRay9S zb_YE`4hW~MqLYBJi_GiNYDuPue~)&SE)EtIvaag~hP@o*Q%SN4-bE*+naW6Ajx>@X z3jm6~x}C`V;wt2FdpA6breHHpnuoy-ufDV1r|eV#@1!j1bPSKs0b?}AJ?-gO_d}?X znmuPr#(`~d9LA>Bkgg^7$s!rIN?00IzM1`VC)|nEKC@|L1Pd`JlYwMye_(wgky(q1 zfoz)C-%8*DLom_t^4qLbRNI&W*3-PxUq*7^0*#ICy^>z5Cw%!vB%^6gj6F&nJ6 zldUo9r!@ybta!|PlEhh8bGs{`U2x0`$HO^PvbbTw;(F7D6EDM2F%a3j?3 z7@KptG!l{C9Bh2(t!Q$n#0`cyv-lGT{*`}PaC4rCHO-XY^>;${Oe;N0vzD!P&Qr!V zdHs~a8h<}DT*Znt>&gz}0|eO%G%%oAyR8Yk=Lo~&P@1Hs35JBdf0YDY<|j1_>b!8? z>`+#ItxDFB{r7Efbi)k0%8k*}bVJ&IAyY)st_;v{uQ0zvY9Bc`BO-VZLU$05X9=6ruH4<*C7!BgZJV;MEWn)xt_^kh0#j-tiJsMuv-U?!aqpn>CILhW4+ znJVysTT73`OHC*ve{DaUjuHysooDfhMeR@not$CQAruYe9s}Scr7`Ck!0V{YbJZ_0 z?%tG?DV(<6hrC32x#oyARVqMe>hkyJG4fO2!1oKdBCO$%v%Z1haK3amm;UK8J8`p{ zxXoXL1Rz)Lx3zSJEX!1}KcS>Pvee>ANDi1DSbg*aC4h@3e;fOuBFc1vjlzqMp6@xf zF%@q@@F9i*uIV-#eRuZF>FlFYOf-7w_c558$XI)LphxNt+=1`lQcsUJJ$_=mE;&+e z{8@EFWCna0ugo&tPM^6B1g?;ixyr6EPgI|pTQ0NfG6Nq5Y41GH3thq@sWI*_?su>Y zAp@!CywES;e+xok?*+W422>*#;x7_{?p(N0udZb7*9IbjDRZTa_03|_xT+Z45aN!{ z3Clk($P`VoW9RMOX3nvx=xY3!fsumE#n5)%7KP_vcdF1P)pmpfasV^O77e&3jXs^iz)^&H27En{BO4lv1N%n6fS2?mm#9l$(we=ClX3+v%E!{Rpf)X##yaP?83 zTq}*uir`(gqfXpZgPg!UBYJn)K6D)U0T+fxeJ7lQ7B+4Q&Q)DPB%>)tmc@P(UE)~- zo(fbaSaf0K?!|*)Me*1X6@D5ZeGLsYuic5H&1k9wux9{(qTxnab5bKk=b^)c6e*@vNb>=aQW(0H*&$I433HMv(V>~PrqLGjI5i=!1HR=IUkE_^$QlBo=ADe1_xw{DIhc;3QeCQ zPHE~eDV2bfU|Jt`s!ihd2Qw3!|45HmOU@^Be>aO1w2;@?YXhUKUfpXv$0}(XU)-nc zT55HF`*z#X8luM+C<1;rx>D?GlKG#MLdT=3sQElc1X3Y{9zJ3U8kG%dfVyWMtz#@( zq{{fFT=GA0)j;K$XeY^kxMh~*mC;D&-B zfA;`@!pME?)~v)=LgMPKve_r#)OFLX!Gv*_pYh{YoP!ROlhtd9oq}844 zLq1YZU7QShDycrQbzX55mN+}gq~mE_?yK?&zwN$x!EKnK`7!rjDCKqg{+<_5l}ziN zYOUqN9bGko$Ov4%`_d=&c7%BM&j3_#D&xy!eVXPn2$d_NlIk8lLFbA5mwUfqCgVqPIq`)+>Y(T9lV7K(wk=2*PG~dh=QQN;8k+>PH{4xf34{7YZ2$$!L{J#7fRWQ&tku+4P|f&D4q}}wrZun9Ocaz z5=-w`TxLWqwPzbsmxr>IfD1BGO-hSCrLPnPe{_5#hSTs9RY_NdRjf7?2)Qt#3*57b zsg%>wns-Yl$sdtZe~~AZPHR&95p%j>*zasn5(~DO6_PKNsd{cEscE7W5sxs1-)gcQ z(>w=o@RbDf8`|V|IiFw-h9Vy2c36{N^blInq*!i z;tk!|ooUG;>|DZ6Z?UT=%sRj?MsZ*iu)Iq$@@~|Mj%iQ5e*sNIHoKH5KsPcL^ebLI z21RVdF{WB+2dIu5>5bdswojcAEnxsezIS6&{nz7x1J?7)(uL?uY-*_0djo!7V1fXE z57$qEc)GF?Mx_J%G|c!J7@fiNN`eQGM(XwZ9RhN$-u0VPn1FEZl)IN$nVJ1OGLfJr zKU$f?@xZche|(K>xX5KY5=2?P!^}`M6it&?2yIK3q1X$?#Yb0h0#Lly^KI{Q|!Z6!33 zKX`=5j!hA@YCTj7Ff>teCX0^`%X?wIb6#{_&3nk7f8EgXhdNsaRW!AWv5sJ{MI^tl z+^^@NDNsWYwwg?=)>GDC;kbJ81*d;izH_=WC}o>YU=r7X0B9^&oyzrl!EI3zyIXKL zgj9k}x6)~Ax&K@HsNAyix1WD|UM9K~I}u!Ll!`*U!_9oj4%-QrlCAS5F&4~TYa$@F zSsZ~}e_9l2CkbJhmdh<7oM!ZIe4-!Y$Zk2$Y8NlSlk7&yWA>f$Oa3A8#WpP}iw?kl z!NJPdI$QlUk*yC@g$yu~$%9o#6IrA0ofowSaI3y;0#mbE5a{-A*Mxq;*-4Pim9vXV z2^qh9X~LW|1%Lo>k;Znx%3i$u@H}FXP?oN9f6?DKuaDP(1i3^G@cT7w^xNy34y{B? zd|{o!mfT!^`?1`lJFcuUygT5SbZY8;Xs+NAQLwLp$Qo1p%bK8SF)MGdxzdU|-WAw= z#HhrBhp*0Mf^CL&%hCA=vsE%TE#3TOQSFiL*&^F(B>+OT1@^pPNpS?***t4vn`MOj ze-gpOA;t01Hv$X0TO}J~8U!4FGY8zXk_P8iEe<6VL|ZvpAxqD=M>8u7&@w`i*!|lU zzp9h)jTN@gmK;XMrqk%JioEN7=|?fX6vti|J)>{78s*%I|6Fa;Q0rR5M5^p+G_d}v6UgZwd z14$hVpD#F52E+^2#kh#N*yB#5AqDE<{&@>=>rVB6D0_$}>WXF0^Pt@(qL55Q?D3SfV<*Fu)tL5NqsNY_@ zP)K&aL+)(pD6mgwzQE@aGtsay@fzeO&-$%QiGU*VM?ICT;0zZnQm1)LAu^Ki^)B!{ zx_vKEZo28|u(?NTrAiR@JNj+Jf1VvLZ>Rkotb1x&p3ji2aJV1&Q-vj7VuNvh{nltu zvkBOs;g=KAS+v2iW3jVTSid2q6h?3rljp>-YtoUJD#E#t-^82BNQ7q32fF(C=C{9{ z(?nA(8m_U6BLRfMKyaax3Lh^?YO3*BIR?kXeDyG0V@%H~J*{YxLUUzYf5zQxV__ti z`T&1{y6%8p6wPdv?tXgsJT!)>$5ke+^Hf#fS@LWIc8tSk(a0?K&+cr|U57*P>e)4Lj~a1`bE zi)>f|{H6%?S0+z^;JSbk?8zEl7z{z%qffHi{NBqG+o_WRk#Ubsm=Hyoh=!gB@DWn} zxS7HG--f*@;qL8^e?)48uEzbPKAYmLu{dN|JAb9PA3~aH>yCPB23cFA^-mBOa9SR4 zE8#P38*ZiWKx$+yv2@Eg$i&ajNL;;ltO5pzM_>=8}6 zaOhij|6IxaA;;Q8Vsq(7tC}8il_m}5r~%Pmur0ZV040?ByawrW@Ody$M##4)ugIYU zez(yvxy_+j*>X1{She4D*=yn;W>wBQHyEB?+Deujc1E7afxtXywc*MT#E-LdFi4lN z8pZ*ipC{wOWa-fpJfGbT_bfrE%g_ugTwP@_iQ9!R6#9^hzAaXK9)$pu z5DDM>L%S}g{Hdak?1!)tFlv~sau;siSW-%SMZ;U>9&5MKt5v2_s^Mf<=D(XnPf;z% zB`_A#EtQ`K6@PydU@s+9(>^yjEzCz%A|a5XegM@4aml}zfua>_o_1cUVGIf? zqXdqfQhi;tP0WKf4a49_(?P+F&cuA{_dnBsc&+FJ+ z8OQHAyQTLL!vcbQB}-i9pX@_GZmD5E>pNLfq*ZHr$1)K%HXnf940NeS0>$|>7RU5c z97?2T{};8v#zqv-)m0P2X1;0}H+_LDNrA4BuVofAku&3qB$l2<7>=+7x`vKxfg2s- z=79Q~W`9gMs`M3Q0Xq=>8mT$#aBQru+vMSNPCuObio{74-?`~sEsze(aX-*7*|gjT zYckQi8*6v8h42N1!eWac`DUwEvLYsl4Z(ZtBuB)t3RLAZU$PLDMZTM887wLID5N^* zY1zC-OBB?$KpT9)kh>D;x#9Gjg2|d+xxG;ML4Pw%!%vI&Bpye!I!aeN!(TpM8QMMjEg(;oD$>y(xJ2u}NS zoDY^kSQOE8%lwwtCMz9*ed0mH-hdrrb$>RAMSNV5H?Yjg!@Z*DXsNJL*3CdjVYN0o zOR`*U&1&stO1fSwAxkB}Vcel)ur%NOrvOwNo1Gb(!Mo?{$N1?zE)GfzzoiF*TG#2k ztFpeV5vqKo5f{2C^^JkR>BIfa!HmOERf)2^;iXBI&S(IO&debEp^dAKjzqkmbq zN+px=X^WzJUV`xRjFYrpGGOOJ?X461$0omdsn-(Iu}Lhn3_}aBT^R_p&a^k!Jx@zk z%w{|Kwpj(J)ThpUqS}RZ6=<)brE3C!+stqiCNf4E>P+~d(cC9LdHRR{C`w<7_P`uJ z+@?!}f~619GTaHB6s7GR{IVanmwza;4H78v?YjD+nhy$dFyjaU=+ffC4xmUXW95ohT~=Q6_bv^9Eml_nO)}OSY&bZkiczo-T|qx( zzamEvIm!b;o)C8Q92Eqs2^<30>yx8@TNbv_szgXR)2PP4JNdq zAATWE`P&S#LeUVW%6|KcOWw8C_)b5fC$wRuRu<0OgtN=+6C+byeYrtnkgJJnsKfLY zgl{R%_7U@BtTHvZ87ff?rQfB!hI8Z# zM(D!fx0~*X4f#VkmY|{x9e>ngM(A0GDN~ltpSESag&L^SS&yXZystqiPPXgKmJ5$O=csi0bw>?!hW=Z z_T$t6%>D5S#LBsQ#^=DiibLs^8FwxKSlLxAt~B9`d&%PjrB?B^gjro-S5ALzpi^$j z8uf=}r3!@e4n{7q^L8;3{bvUixDxCEpxV|_cykPMWVB6gTh0=g>TLodJ8({TKnBq* z3FS#g7+dBY28ByxK#J)2xm}%4q>~trmYsliQ0VS(?^rBaC0vsn+9meX?~-~R&rU6H zL`_ftZ)VM0VGu3K3^MFvC(nO&R;{Xn9j(inbn-QV-mn3pTpRT{HsuOowBn3{?1_T_ zY;_=;9s8A8J;#Uh7vxw8en!YAkNGk@4P=o<;=5mYlB>f-DLzwUg_e9% z1ktMpC&Fs-me>|Etyjv~{O@hB0z*J&52oM!25RKUGDy2S%6-u@wMaXXG>7Xswj5B1D(gdvS2#e+>}@7+ zjC+om@iV-l6g@{`lAMBmMja^$Mw3!3>*@|BuV1FGdkDlK*K2<`u8#8qw|oLkTG=A; z6Nb0ohS`D+TFD;#XLy+eu;SH+*380o)8!*+8&k>01{t0;_1IY=l|R@)#$4=TDmfbZ zajLm6TBX`iedtOCeAUpoMyL;}0M|w?r30L~UvVHMVR3LHVl@Smhat{P>V)Yz`;r*b zo-&ntbDMu-C24=@v#8)vJ+T_g=~f^3HLWa!H@}Pr$r2T zSL8Mp#gCzuibkgfCrQO3PlV0`eMQ2kvg*<;7R!IhelR2zaJ7K0`z5k~{Llfu()v+1WDN$6F&9bLQ#RBR8t^qT_Cb@ zs5^W!dQ}OWnf+r-WsTirtTlI=z|BIvepAroTSjW00l(9am3@vT>5L@l#dO2gfWH8g zum69taTn{nZ2Nt(j9GJI0m-l@y*|$!S?eKxDar2ulOjw%JtXxLc`?paa6MC;TWB<| zXyz!=Tps-h0i+T;z0QHx>q0O^H{cTwgA9KXc;&8N&3fW_@zFdjkVKOJi!90ac3Rlt zNN+fh+tdM6dK}~|nLar}f0X@4h72n6OtX0cYk6dGjreXF-C4zJsm%@4^ahIZo006b zvkY8ddG;Xg`^7oFZbX5*T+0PZCgZl2wr<`Y7f0wHZ2OWR^fT#+o+&&`s z`V~cAuKNZArirH^Xd6Uqrh(OWu64b)TRu>r5aL8V!$Bo3@6bkMOErV%plLl>w;8^c zHFP6Qszb>ZIPMG1+o#tnXGFfKSghD8T9*^z7|A@XK7T!n0anLXMZ~SwyHS<@?m9r_ zX*J<)iHHqO)*%L;L%@H@lDyqSKGx(TZwq(pi=BTke5@C|3wU;ZUfA5vX>%dx$;(xc7+XLj{hR&ojcI_wcDOD7Lk@J*73R{-X7 z;iJiI^X;L*!iz@S#v)zbA=RyYi85v=ET;_!2ZE`AicHT;SB~UW5TWH>>OibV)o+KjvxPYkalmI<)~K5-6I? z-Dh%BL9>`%utyn`%iAUhXpRBWa2yw|| z)C|hZ^tVcI_b`8aE?LV_L?bg(o`vLP?@-0NDZ`3OKPk{OZp@G_p?Y90>|5{_8@7^h znP5tuK)i{5mR5OYLc*G~a90CS=c6((qCA7vL+$J_l7Sk{ybx;K7O&zUwCFAgHttfM zrq^?{76_@iQoUYx1E-es1e%1_ff=7IMi%WzI<2Vqp5A{i#Y}_)6gb6p=$F^yQMAcp zV@$P@ae(K~Q37-mL8={2-DFdn?jEsdHKo!OK;eUMC_N!Jg}R?)nV%#zB+s05V@~W1 zySppc$=@+mL2O;Mbc$vJ!0xhhugNfNJ4WZX46y~lUNsxo*)_|m;Q2)Ki$GSyv2w0S zbPGEW*vNlS_0yV)-He~c^$f%fd-e66Ded?m?fL=NbBn7={dGpsrMfzcvDz`p18Jey zb>MX_&N715aviPcAZQszue(Mcx=5QE}oreP0&XL_NHxSW4rF;BiR{i@ovIeEXg(TN}R zP-slt?L8D4jugPw7)dcleu#-1ai{|d!3l6yxz7T7H1(=wQC4t1;2A|zwi=MFm<$A5 zKjx)*D5puAV5qM;D*Ef;Obe>N`;75RL$IPuheOJyRhzcrC<6H?AhGus>mma7J8x(f zDjtv|f-`@$qF5&eH5|7AR6+(?_ceXA z3CSG7kQr78yM`{{R%hV#b9LzohAMn__am=Lz(?94*MwZelggTmPNETA&r=GG*3awP zbAV}f=AP$Rmlj;SbdQM~$%$X!vj*R>_t7A0#qfUy2wlj59;?i`6xH8-R~(-<5#Hi} zXKf2`zi-gVYBJb^Uon-EoZn=p05Ct8u@zgniYW(!oxkut7$xsDQ6{O)_jW467!3sJHc`OFuTj>cIpP$Qn*UUapG6PFW zt*C#Z#@02|vx!ji%R@>|;U;#z9ebT(t2N>&_v=SpRn3LB{ zOs~`HHPFx5G{|Lgceb%(Fv?^!dk<L6CT z^0zJ%hpFz257@Dj<8C6?BInMEsVH9i+8LV*Z{UWt?byDf)HTzkvhCpW(yD~DV4Z++ zNoqz+t`)e>sz3Bh-_>yjn3>m3RLojZmAWpQsFj#8915?=>QlN?w-mj`@#sxu7$tvY z6)%Wx!tb(iq4|!`JH?J2m+2O+u_?PQe$QXz($o+UM;V@VgKCmd5H{ZCA0GB(gn=Ra zcaRwRnqXFI3{ywSnrolvpM32;?@OlSOvWEFpPPG#Nz zjb!|p@s;qvM*a!|P-Jqq8Es~6{x9?d3u91|LpLLoWbkU~Vmu|HQ1Vkqbn(QNK9l2? zy>t@Ynn5dCWbIBww4nYNJ&fUA3V&G7r}cTDh`~CYW15VOiLrh_+F-LkAm>i&f!E<) zN4IE4)+Q(yj?w0ubToLak&!O>r7-G0>*lN)a~f zI11bnMyW`2NN<}dL$pi5vo{}S3mVxVu!Wo~zb=<{gfkXoIU)uKTU}xMCSyZ9cb8VJ zv_hvSD|bT$^ZfMii%q@3Si$ZW$?HWm@$<8=p+kDe8R!#9oqB%_=H^x$A7PEOKZyz8C+Jcino{YQ^b#$eO^1%6BI; zTIzgahpimvzMYt|{nw*orex{nt2oXFY`WSb!T=4X#i(97Ey0C zq%KOv)-u6ce_(%x$-KmpgkkkjtwlXek-nWxRFBEW=;Y-<8y%RIEDAlZ*_NQgA1WHs z?z4+Z+LciG4Pe%tvrmFOt|g3gXa7NM|6#EIbZHIrrb@WU}V$1J*i2xR(3G^r6_}TyP z?0?#8i$O*vW5--7j=y^UEW+E)FbwvKTaql1;x8ECZ+!NDnC^e2?F4!7V2kCAEF(XI~E5Wf8CI?Wyd!9RDz+BVHlPAD=KvVCZ&Ti@4|bA7=O`3<3C5y?{5}7 z7nrzWk8o4|X^y#c|3CWTzd^FRP0ZOS9*NB`zkd#I>93y9OA1<>%blUdUv9*U{9Ui- zf2Y>HMixS5-{kN4@$5e)z55%AR?P0c=B2S3WgRlu(1k^Se{(|1*i0wpLd#(`#;tX` zXj*x^SqYrQDaEPZ=h(G8L?ECuQ>{ju+Ey4D!;0xe>wRvkX~6Tq+q=6DA}v-5kE2C>bev=(;ZX^;54%T@p}8YEl$Q&Bl~FW zl#WjPs5ZF})7V$$=FivNoV>oOzRWa?dJVa6G}(R9(t5YLU3yWMyXOq*IqlM@x}tAC z#KowCBZAnPwkFdeB(SS6&hJC1XD0#Ft!@j`3c>84e{ZBF6j@PPeuERsY}%FQYOE1? zLTcT3g1e2qBZX?92iu#r`{l3!rxN^y`~rYms6OVvx}O*xAjBo_A^CVEzS`BF)Q z(TsG1WJ$6@`6{u7KEh}G&BGBS21O4a%S%46sse@25GCcDAa<@Z>8hGOOhHtt=HS^k z8#{yKe*%RyPa7=LCdnm&`Hf#HE21j z6(dKgzm1B1@ypQHUSp@#2EfNlK1kF(*=%&xfAYoi(cQ^8$4W2chERy1pF6E2Z1WN4 z-Gcc8Ai7cGNY8~DvJN~e|7Pw4f9jK>pRawT!hTlpq|jZjEAKnsg{}w zf5g}gITmsT!fc!#^oa6(!uKmDLH3#(-0aTi0wBsVETH^69GsacdM5{9FX%Pbv2#xG z)*Ys`(ae^^v@=<`)t+ zpWoYjoqnuOxBPS!FK&x4!hD2#2nnWYe+U~)_}h5CFc-A9T3}v4rlV5E#BE+^5_DhH zx`^mkBHEafmGZ|ov`T*|DpMDD6b^^_gOdC?z{im0v^I<2V$+C&Wm8%Tpc^v^IopeRJCqq1b^C)Ovj-w#eX;_*iO#_5llGYf6LJqpA|x&g|mMN9-m}TY~rC2#du(TdSDD+jk-y-)^2@2 zB6qhXM~wuEp>epG@2A*sLI)N!AS^t0jEt1@=u1%H{#lG=b<%=Ko7GLb2Y~fD!SF!l z5P)9Sf8=gc6I>I>@O-)Ug9c!cqej^-y2>kKptH5NKf}txgMUmHN z71zZuf3{qTD0TY@f9>eTaB*ly47FaUl0R&7bu|j-ZP{3O%Jm)>HW>t-_fX<%bhD zEM6x_sWnQDa&Ll*sZd)yg7^ctJSJVvNY7aILtglxm8N z`BO50*(igoe=4a2EJDD{mbS@ro6g0OP>YUm3E9Np4RW8K(zw95obFc;Z1XTp6sOB$ z(TBxNv555w0{L`iirEfh67xn%0v-kzk?^K916DNlae09-9}si5ubYt6VJs~ak zx)7pv#o?*s5mSxGd3ZAZj4hLZJQ|Qlu;Y_}d{Qwoe?`{>>W#?9v+{X++(?k%ZljMG)suZyI z-4Qhdf8UGEbW8O4~qJnHsO|R?*$mF4)ZdS zxo*@X@r6)-Xb~0K0|WTr8PT5cW(RJ|8cMY}ckj%vqCg!-&-INdN)svJ=8rhHK4*O=b5m)U^AuWs8gA+~SEFj1>jSu_ z@S2Ti42UMo*_aL8_!=7~^l8KzC5&Lj8?Tw%`_ua{i{r3SSiZC=RX?PIV2C4QLieT_ z>AEB4=~5hdM|+y}E?i0P8|O};A{$q#f4X=VBc-A3O!)qTjjY1CmH19qr{2snmEpa; z+jmMV=$Mb-PLmyaNuGF>e53PgQh7v8^IPd;7m+=Ik+QaR=H)dTakLz;>h+c9fI{S$ zh_|6}{?Gs#bax8|tf>nL%a%o^WQKXEAJN$vGJE%z9GEz^elYM?9ikeB$;jt#e~3fF zoJG*G{pVRX7z$_j3g8Ef(B=A;XQk~v;?#~ zQ|BlQY=&P7pxX(z_R@fzGxZez{CycjxNcYbmrymWw0Rl4G9%|c<*Gv9`Pc_CYLpa} zv8I4NAT<&A+M|JOqLXdC?a7LTf5oHp9v$D#pKj$fff-!;4yjkgzn_*1xa25JHns}0+ZK|L9#lCLSDAVVs&bVXR;x*N`0I02~P|Vh%%zIG8a)f8D%RUkl>;RG^J97i-$(Ve1SFQ(>*u*_E;sIr04Pr7);R zi?t154E=jw#=MC3x6lXWS^yY_sMY=v8x|Lr?ub$FJNzZIrQ$rX%M3NImUJ-&HSNBV z(tT%aHvu<`c>oI9fi{2^a%p`5+v@hD9C~)hrXsua8= zOUToj1pixsV*KWd|-!h3jpDj?Td z`+&>y@!G?VM+$AYd%hVF^-e+E$$vRFt|2RpO;8kX0c=z+@FVG;hUd zZ7oQWtd8}&hvpN48}v%1M=f~xBH)g2}$4{&qO0wz2u+6*X%lLIe zqazh>GJ%D|F|R0@OyMcd7gzJjwKZvAuK+61Hbq|AGjKO1*qiss9n}WKmBp@YCR8P5 zW}m|Wf3G+|b_tpK){70&N9z-yL-(G&GEM`? zteM&{2am&d+r=FcZh=(DB>7^7mCfkH<7rM!vAVdZH#)V~gHn)w))k#Lv0-~Rlz zf7;;j;ifoJ>=N-!V!+hFcv{gnTxC|YPYxPp(C^7Yl+)}b50kyBS-J)x9qV2)AtGZoR)9Hxo$_cZ6!02%l$6FD(ybA&wL*fg9Rk9e<0hS$G}p6)?FTMEBO6@wJ(gxePKTYxx;oTxk#IL zFV-5o_waT(`@1b(=!tfTVY&3p;+IE~iq{mwOS9D7f8OBC*{3oVbC)}pl#A9P&MGMc zAdxEal#dh)VDg^3yv~;u8lf3LgVhQPTVomp)lbO5EW%+32yV?I@S|9JfrPZrx&PTYXy8IUh=%~13ym9 z(~9@f=ekhwI(2tXaPkVvf51v=)m8Hs@%(H?^Zqv~(&yGp^;aQX*8z*lX$+D5#;=~G zgl7i;J9B8{FKOj3lS~_>P-wkGmk=3kE0a)~dt&`qGz$VS3fZ59vTMiI#DuCWX{Tt8g@t&_*@( zw^n{B>ercqmREo+hsn)13P^-TAsSfehc*l~M+g_xq9R72Y&RBa$yn5s!{`26Xt`I1 z1LY9~u&Saw*n)?#e`CzQM}g|hA(b{n_8~!W*Td@oHL~dD?L_iLWAWWYr)px=zK<9R zS!IwUzrV$RFBij2+e%Y1wI(XrT@Ze&H;l?6Vu>&)j0~$zDecG_W-DD4<*NX{EYqc7 zjh2X@5^(XrrFso?pG}dI^tJct@;gb(o`J7BR2!6N^UHCAe-&2ub68R31 z{9#*YAZPuXRcdA^&7bY^^?w%!(p`Q|#OoC{DsILxZQhBy$>GERgsfY|*lO8>`%<(4 zRg=ho6i8~QCKV-H6VPr;>Iq$e)d5y?M8$hD2gsx}LreO~?DH>ANZ>kD)!lm%LCP>^ zVq4hy0LmP(e@$#|3=C1^0$GVa{`}YuL;C^qx^r#Rf23I;7Iig#<-9u40phdkhCf&- z-z!Rlm_#dq>L~kNq))caQ1Z9RK&VUl@}Z^SwlDV69yAOTl0rBXnhB~C^->x7GLvSF zgPZ7SX-kfm%%$dZAoQD9hS_w;K&tE{#5c${GliATh4z+7{^Uq5v-QNaFBr@Phn(%lC~0S*P@`$I6B>LF=pnMI!LNS zlD)+I=z2l8c;vQ@Gdhzo-X#_cm7B|%NmV&&Eu(<;5c_r=qZEg1!Tr*{uA@+hTO6K* z`=L{~-c&+inPJ*RJV3ix8Zmdw(D5GF$p+~we~T-t9T4yuYBb|~z6Ju5XE>!%hIvTm zmNQc;w0%#yL(-)H%oN2RUJU`y>H`_J>;S`M3I-k0#QD#C6f$}U8f%V*B2Qg~c?0*3EZWWIWgo0dUPfNBJp#umWd`%r+1 zf3N2=0m`VR8o$v{qzQ$pH04fyvPU9s`TGU@%C#6Eif@-fT{z4KdN=&}kc1qT0RP!4 za4oT1@^<=~(-wY28EPDK56C;=V6*kBUUOdEGwONz3G?y}pgDhhG1#Nf)CTNpdIkG1 zfK_x|{m89iPnsdEdU=7L0^skJNnW*Oe{;g`iNrN%Y`Q&w>p#^^lllS9FPbYMIFb`` zwN2c5&)*^1{DF73n(zocqofM@7ikq@jLR8sW37Z7*9enx$U6&A%0mUnPrBTXGXUs zgbdA5BybF7yAxiK!f20J{3x=>UNh;#2(Jj9R5Bmh?~W(Hh-YCd~adHi_>RqxFN>~zn_NYVh1Ck0=h1W z-+to=3N_xPhI+c~mZ$)58zhL)+$x)U*sMrKx>(nyA^Cn6kxc8nbfAr#c3NCyp zQa}4CJD7@ZZd6ZcxMBNNZ;Z1Fjorh3uZ=l~G$`0;sz@#E8O#$QxJ$ByP^633qb&;W z&e)m@PN<>&VVqvQM|#*Y{Sy4F^%O^kF)9v~a|wYDJnQ-<2HV1(%{s^*s!Y@mEG$&x z$MxsvYY(j>9q8gq{eGnpe?uW4gdjLqC^v`U&N3mz)tC+Ni_Q}_qLdq~XsiFqAh6UH zh`P3mf;;*RH1KA|Lc;cFQgYq=zQeA}M_Rb!qR~ee<86*`R;Mg@bVC*SHkScNOErxj zZHq%<9zO=*Cg&hrwo2jA$es@Ov+Ji_K;xYG4}Akyx=Vz52P`D9e_X%l@4=Dt=XzL& zVAQb9G{szwN0I~Q7M3dyO!f-{)g{g=kZMGL;-KuR91g5SQmYqC8xNe=($!zn3FXF<9UAN3N%M{5^}9uNeGePJB#KVeJ_Wb>`k*2U zYg(9Xk#AK_{aS#*e*i}An8r13{Z?C7`DzDNbnj&L0t~BO)?Luzf387Jg6)$k?@ia` z7t^!mBuvaApEK0Np8l3KBRE45OOqi|w+F@c$ZFG7xNZ35`V@=C2(b5tH4MvDI0^+W zyDU82GfTm@sSyM?Wg@`RZR@_6JmZ!`=(giMJ`I)3ol{a>e=v26ywYCVpoXb3fpvjS z3(dK&VL})fO|va8Zqh^2{$WggcN?jcX$6&}7%dE&afP59j;Om0t&egJXcjZiCv8Iwf6rSeLd5}BWm+$gK2q(JHm{({ z9#r;J5rJGcsEWRiy(*U+3;`HChNpfqS?s%^j}nxb9`Mz?2il`&Ro4=ce6Lls{VT#! zaHfh3ZtrrM$2HF&U=-MBhq%=ApF+1|r6FzR)rw8jU%&7bKV7Vn-Q2EgLzv7~Vbbd& zjMDB6e}p&J=Qf8HY2Zu0Uwu`8z-+))KHHZ)D6vcQ-gf9I2!D_-E`SB@`b7OzASpqN zi%iCL=0sK)o6?(nn}>3hEe?LepgzH15d}T88Uny|AqvfL{0WilYfCQ{A9Fs$aR#dJ zwzmw!bN@@)o)+&easxWo1}F~6^rFfiaG8J+e{6!zPm7+yAQAe_kC?0eYhn1B3!T|w zSS;NWLW`fj8%&gGL%FcPUP<(4gfdm6ACs{+FVuCg3csa~iLzr>x zfAXW~P54<2)R-I(hWT$kY+=$y7HOcV=4@$|gyJH{I_5{_`l`Sra!Q7NR;pU({^cOv zNHzJzxJ@L+XeoR`Xt<>bnRxEMgKU1+vwD2`(d2VIiSk@w?h`VwQEJIr+c+jQbX~=} zkj?^%>d12cLW0$w!Ha>NmzVYu)!2qt5c9`;sr((UCV85U-n{CKuW zSKNFXtdE88eV%D9uBc{&aW;lKMWIJ7wW#?FF!zsLN&&#Af9~n- zdbU^hjka1qJ$!U)u=rQttXZ4$jFDzr9P!w_6#|jmwWP>teLtX8e=RSZd4p0DiwW`3 z3X#ZFpTIrgs-i!!KpwWXrN=qUh2e58HB>jD*w^*ruFJ8OdB$Ay-{AJfye9;N4Ik2- zweu5kNve=^D7Ye`meXG;t+G~ff76EdS9keCtg>4W?_D{6-*~_PX~>KMPf>e!EI6_S}UA z1zNREQtz@71W63s4YE5yr#hY(pwNe>EAt%qcOYLXtPlE4=%s;DI+R;EC#uY|R>nOQ zGcS<8WV#O;MqSW^p4s66yRXAqL!YPKZ6@joU>9zuX2@45gvEEOxRtt_jF-+B?&E0pS&ESrYXz#>9eEBS@`{kk zGgBp}Ref@$h**B$-1*r8+8X#bWhf}wAt6>BgrX@Vd|E0pMZO#klNHur0yq9@Le&vMIj*o|Mj=e&fBq4!sFno6{qS#MmDvzp zWP((*7L1xE)*PIIx4`Y99}SQ??LgNj{i;y$XnHf2p6nb;ax-1y>MdKwpX-;H57raL zDqG#GUeXh1;9cyEW!P*$13E!*XJ9V`_+aH!ywNS%+>Y>1s7$ z%H~?YmQpZLe}4uI0+;oue(}lLZB*P6K0p2kb~+|uZ_^7OXN6t#v%z}R2PmiYh-4e6 z#WwGzb$Y4k#SG1|2r}(KqvAmJt6?w-Vuyiu+o8bNQjUg{8hQ)Ao6MP=AMvrBl2yX> zm@y*nx&`^KmYgkjPO{p>wtS=HxU((#5EF+*O?S$4f2)hYV5dEd#oW&x!-tRXiEZ=< z7x4EEe+Iuv0$g~^n{i&JyP$z@&yB6Q_1aB!v75(=k!^vOmYu*;!SuYANf6h#ZmR?R zvH1smwFQj+T~#Mm>pa~zis5%B)qdHdV%FH?8BoX@u5j_gC{u$y*+RgNEIt`e{phFh z8&|3ofBSQD^XApUSBfnPJEOb<>hnt9i=3GF880WvUN*pn+qP$Gup1#nDzo>ZFdZCW z#tpgoLx1+oh@)JYHov>yG^^_Xqa9{j=Lhf0I3*conztX(H~Fp z?S4${J2110Wo_thU&B(jLL4D}d1#l0ZE+?oh6Q*kutP4@&k(*>s9pUvkxn(knXoQk ze|tol?$6}Ms1fFRk9E$}+r=e*?$>U(g?D?uv>1Z?kh%U=!l` z+%UR5bXVw=_3(8u#BLmC<9|q$?{AiAHP{X+V}}zjA9h54TGhC3-fr<;BE2Q^=DN)f zHHwvL0?&4P#b*%nnU)>5w|1IG;Q3jXJ@Dww4q-e6>l74Xa^pla4JbjJ(RpbP^F^G1oDx z+kPyto+Y6=%|pdev#Q_*3Or~He}Fe>Pk#z7w{K)kn&SPDE08^il#cITWFi> zGSd7;v}Ba901|a|f7c6(hcfPH zJHTWwx1w7zxqO$VTdEB#o_AJ(5#m|3Nywl&Vel9flqhea2@wpA{YyBay=3M3BgSY< zzX&qjs%C|!!($oTj^P&;;kjjC@psH@Msin@2v+C z6+^;XW4PWrvc|H$CQ)$u>5fdBAKR7r1?(?1n`ef<@N48JBM}4^>C<*-d3IO7Vc*}! zGjcGFYAt(0zbC<442v-3|C3iE{7X4ulz(Ling2<3VH>9ZOCe!ze~bS=NjM(EDt`sO zyuWgRDZ|H%EwG{^P)w|_{ViR81qoqB-kH|nJVwQtE#Pp+h#9!x%iHD|5EGQ!4Os~j z4knn{43jt~9oW5)?WS5eW>*rXLpWbi2j|Cg`=UF{~f2Hau|DDf-Asu2Hh}IM?E2Gql9ut}$8=C9ce+jRBgF7NOcd_NooH$AJ zHQ6N4RFd)DUcglM`R=YsEol~_J1eep+hv8P@nmM&J}Fq$EO+2~^f6n^tUo^z{P^1K z;Z&Fwp;rj{`NMDxTTFbTf4~>-8lowZ4lY*?IVMtlNewd9wqOuNZOV(tf8uUsSYYMI!RvF<-|fsPR!yBsZYKKr8B(ot$J6r|!|j1U-IIHAA}F1l zU(49L1@elUj;jNm;DDJGrG_3Y0w1$wXdA^J7i-?sPJ`jT&q=k$l}StJ2#?3#!3-sB zZQNuIlR32SEp0PxYflsRQ5QD*mgR$YT9*LsCaya5e?D1D%Yh_an9S2A-0>R>WQD2B zU!O>BlgJxEel#yCR{*{>7?ZNXK%wI}3UmG)IvN&3CbLO?pJ*u(J0=plCdZGz4c@r{ zA)P-aF?!`u7(r^A1a0>Fk_R2TtKrO&zfH8*QaXoMd)+QC(yHeMiNr2GU`bg|TBT_& ztRk{Qf5HeXF$PQ*D5BSB&nGfk^9UIn?TK6$L{@B-?`+N4QR%zaAUqJe;ido=7GWh)g3=Bp6jbvKZ>QLkpGq7 zv2Zx&Rp69|KF1q7gWo30EeOD+9BKEAznxa zW6^34`&`PPjBJtS7dvE{nTENH-TP67)knPTLc3;sd1M9v zPFWc|Lb5=w3`k$xeYmwn+frwznfyyAlu{|X?C}W#t@OP4d_vr8L8v0Z;{p;GuS!iZ zK>T&v`B{odU7o$w#~cY21WWJ7mT=gke?ecOiLQ8YCyDV`FZ$YDB3i^2-AfYZ4$LQs z{NxoquB8S0cHEL>B#29Bh`B4BTRYC_q0X0K;-w0Jh%~U&23}+p zA`-Tr+GWjLJLPGxQac?JUK=P74~K5xNoqQd1HJfgjNR<^8NC~aK`&~}jtNuof7v!r z$bk00u~M6vQrJ8$1${a`8$m|xmc){?|MEwK1q#$%0;Ss##MpwC+glX|VElrE41|jd zPut5-Mc5z{=#okZ>36qfygKb>`n-60^UBGX5xmWu#knND&chlpzjwX3{KcmIDVh!lEvmFidxlckFAbo6GDNt2~H&RYEbGS3!` z`)s>MamAfLVeo8Zhq+|hhvQrJ;0J6i$sGu$_HqU4QAHjoW_F*~kIcdJOTEdhfA6{i ziz|F+Wv+1J>-8B}+y~OtipxmRDf4!*L8)RED zGwgW6Pncm78p(5<;)7@0rF&H!gS%g3B<`^uYpbcRJd*Z+YvOkoqeobtq-$->lV5sD z6U&ZRzMKZkw9_MuV~ph>a^^hNozf)jNJCbEGP6{dKVPjy-lgn0aSpes23{(UavBAZ ziHDvG>eajbQgp0U$Jlc(eV5kyl!CaCI?hn+ zFDQB$2c#pD;?PKmr@*i0&SH`A<6Ka0G0#p@^xo6EtGN$!?KbgS0~lv&s<~(8SWs^l zZ7e0G)`A~t-2UH1>(qjAr;N|i=4ZfOi|8>bbXemgvdq9c2ShXwe=^va9$-QrXvupX zOYV`%TSgfM&7pDY0a}1kn~_4B*DAE(sv`ia{IE&#^RU?13f3_lG{=01ZQWRTJrXHL zHaXxYW0CcTs9fCz9ox4!DJt{glIPBdgH zR+|-V@FKhwD_h!8>IBqzzu!(5FsmsuK+pjsiFq=La_%c}DfymtIx|{M$=|umr$&?5 zq=8pYVSv0$O+0-VdP~^{Z`j?=&U8ES3SvmkrFcXfI5EIWf9Q{Ou#?Uk25PhU96v?P z)ZZBu-KULo@Flnwm(MRM^*eHnh3-FzWClGoAlnPd>TopO41YAR4LM%zom-j}S?0q~ z0(9Vv=TV)53^ub87a1sJ!@)LUUq9`Yp`VH}EfFC@>Q`raxkRL{clb^Eg|ICvm&NsIVeC=m zC!?Qdbm_DNE|q0??N~$CdffBik$5ZiKqq}+E7^Hoe`ycCfHG*8sSsI@BLsFI38fnY8N1k7L_W6#G8<89h?ET)~Kp!-v0vP|U(jo}joQzGIaL(#6T+cOPZ;FUkxXbN0 z#*`Io5HfdrXwwu%s^$jqlljH~RzRu0Y80r)rE70cs6V;+;w$IAL)Pj3J{P7!kv^GT z^5}7UplSPNtBY6)DrW`IZ=`s;mkIXw?TOtV0XhX(FMm~0TT@Zgf?{7QBcrg?LL0sp zZy2^Q4T5vN__zVEqji}1^p+NW(J?>KNLT{5DGawLFmj)n#qjWp$_Pr_nwWyEs{k2! zGD!o@zOoxh&ADI>Q{6=-xI4(52J)&P8xfP=hbBm}P-WBP7%fxrK}OpgVl8g+@8Fb} z-O?rss(;I1HJl3;?FQwsVgt|i7~YlZXs)`RMCN3!HvW)&k(pNX)SU=uO6|>G5NvT& zpK6_hfCBh2K@GPvkdQQX%VP zeHt%zJG%w`#{PMaHAAdm`$b`xijDig8bMXq+DT`WK#9}|4=oYhrJ2a9kPSSIP^Uz2 zK!1-9$Th}D{g$r=R{k+_hyMF&AWoz(baYt-eDt>JHZ$opaN$(eZm$eFgqIG7y0!q9 z!ya1M>i7uOw2SN83zG4Opf)&AwQZ}xX`7v=`y*HkPUEkp<{$qf}UJ`iUG5jDLK( zEJ4dZ#f6{xqReQW?J|4P8ewBb5 z)(gTqG%+S}ZnB1&7)`MfYGJm8s~FAkVoH9x!kKBc9$8dZy+P8)F0anG3zZo!^d_bh zXT3Oncn2rP-_w1RAJdob(PdwaC4cwrO%~N=oX#0<-y zzy5FT$^U~rIgbCKp8S_;VEw!N-)!T<#$^m~<*Nh3@cBPrNFx1FOt^p45`TtaQ1>4y zCDb~q?`b{GYntP{{th z`iGd}4J?zwHkFLY+5h?v{z($%^p8^Vk4o}KF8L=0aqg1GE_P9csXGi~{u41ICYR_w z3H{R(6o%p5f0MiaLnZk~A%9^Q22%gM&z?yHk4axKM8h!d_D@7|3W@$oT`||HVHn}~ zCx>YBABBYdM;&1pR{Tdj(TYDc>W})uyo?eujKQ!s!x)e7310CDp7D;8^BRp3g4O;q zxxg?Dn}J}!e2n%o%)p2ZqZ}@yvA4n*-Hw=i@)>Osu$|GFw=tQ10Dqai@rte^Gw{+$ zY|$!ik#OwJOYShTo)hbx(&?SfGck2DcQMKwHXAl!{;4^fBi)SQaL3JP44W{V9@yON zFifv(!dpzH@BH-_icayVZvvaV@rrE_uy@gkOwjRc5T1dh}9mNT^|xX2*1q_7bEwVd0qkPkLwFVNq?)IAL}rz(e3_!Z1Lv{ zf3_$Sp;1Eq!G8p)k)YdcOunI7zFmobYM(5Ft|J`G`A_|n_IqI&U)fkUnAGmyx}s)Z zglsDWa$~T1Q2GL^1X>Iz%){hnW+d$5#wm7ksewh(s{^@D7xidz|JoOG(7AVSS(Y(X8&ia08(F!3pY&Qo@H^m!;(71=YLuQV)h_cK|D4^^$G>*V%sM0 zr`%SSu`_&H{nrnBvp2(E%;qrizkVp@Uq7#bn0F$pUxKP#fr@Pbsh=USUxJGFKR=Px zu83x@Q5X-aMK5{O7t7TaQ#7U!)D}y1?nV`N$u&k{Z=L~F&gq3$DJ1qD#E)u~@2(V< z3gx$PWPcZ{?VDj4js%ZJ7nxyUxchHByZARA#uH@46=cB^P{kEcCb?P5iGh#nfww7G z{cTv0>jB6qShH(bgGm_1VR`Vy7`WlIKYf8oAB$URS)X$SUyi9s{YRJ*7`@?lW=~l0Z-4(W*~c{h{AX1BqGfiT&YOZ8s(bmj zz5)tT4(~H^@fWA0{Z6>Q`Ixo9-u zj(-*ZVh@w_O83v-YK7Bw z<^0E9;4K?W8T?VK{_OS7e;Bur{?=4FErK(OD;vS?^xi+h7Yu4mt5PIDg)-3&yzEyEqsp5k4d-MTZg@cmHP>>(76x zfJGXn__GV&DF5st-bJf^#ec`EKcD_j{ACJ>ZT^k(GaEVQ+8HPubr^f0lSf5Mp?$S; zNaPqp5Nudw2gQjO*j0St7RG_r!kA&3LSX+lZ|~OJs;>PDewTjhN_81KByrAp>3<<1 zfwTfa~8!222Znl&xDPn2z+3o*g&m&X!eppXyv-hy<9!> zd7Gs37DN9uV}))9i{%D(j}P0g?P}aXjKkMV6X?b*2avAlvl``55{rc_p?^XCBa6xw z+mrqIGjAP1=b(C+1;k!!_G(9qWf*zShQY7VDKXYn-4kIsG4v7zPw!DZ|8%-~OU=Yv zANeDebhOXz`)q(ij=unMz@vSxtfJw{J_8?l)kI3i{qTvJHytZo;j7;SQT>|xc$_~p zKy6vUYZN6KM)RZK$1q4M+kbbugz%5;BbmJf(SxJZQMko7ZJ%0&kopovf_$pdFSTR^U1DPX0>?*Ta1Ui)BOOn(8q-swE^mrl4L zBTs4fTW9FlpKRshhzssmz*hX@%bTZZk8iMCI|W&WCzjdzQyrz*&adEjplA2G74J-o z+Yo4G(pFoYoQQ^E%cOSl>ul=`*X`kW_U%aTS&;af*yfg)zh*<5gt#7f0oydL6+iEl zyv@N%70zN3*fk_M;eUf<&%_rK*ZMIIfd#~Rv zg5qZ9i*R1l_0plzV;lh(sfY&%>}qoxQf%o0sfNTqN&)^?Ff! z=6%gHa5(BD*nm96XMq7ZR(v+-vcvKXO*V&pVJ@eiQ%v$8WMcx;7ns16p`0swn5%J`NI5puXty63&kMgNcv#Or zk#rLDHT@59-G97KoLHQ$=j%deVpH59G`0Qt&>dNmh$4k!a$oPXENuAGlLb(cjb<= z;Kf}Z`RBSr67@}J2l1YAamniXGYIs`s$)qAxHw#HNXJ8So@KBt?Q@e2Z@`GV5a!9tpJ#P(I`d;7c zNuO;@ct&{jP%Q$gW?>E{K#lt z;WC-8TCMVp?i%8@M#a_jP-4gN+zl5`nv2_Yj*iY6THg(F*C+ho4t~n}?vwIs#D&MG z@PAeqmTWyQEH^4PYe}CF$}J-G%n)*QUeRcz6?>#SQ+&9BV&qN5b0$){V&uC~SA1VnIBsk|+UC0+0p5xrYjOnK!$H0e zL%H_@o*}NqeqDbVxGo6N4KF5HOn0tZoPSf#gxWm^yQsfz;8=4n3Ifb?3kP|H3Y4G{ z5*#d#9BF;fgGHbzh!IC`NUjY@NfO~Crn&ESyjhE48(q+QQhUl??q?4%eilv-hE(?Q z%qDxeL8G1+F`u$!{aSdBegTu8JNh~NE?ar}Z6{cne`B&ouNviW(k+NU2E`^tn{t>J{;|2(Pf48M^FAz*Sr*d7a+#T=ZB1bhsLxU} znA5<}Zx*jlj)>P=lyg)pBfmrsRzcPf2Z?7VJQ-x{8VyV6N^Xt^$`EuuC4W8^B4sjL zPt`YHvn{u;iVlgQgXZ}o9ilv?avJ+zF^rl|9kYJaGe7olq}ew!ZbF zV@6pRb-IeBjJyR@zLmA_bahTEx}m#6wHj&*3CWOTJH`|4b@k3Dw89$ZjCIWcI?f2M zJtUIt+B&1O1dh`nbIe1*P=E7`VrZyys`wb5J>yzW@z)xlnp@l#A@+u6rM;vTaH~DE~_Nbia-X^=4dqilJbxwVD2}ee;qNnj_SoWrJ9{7=po@>}(I1aTUv9)A5&vUbL}vYk zUxw`QP6s7t56H+jqnv&FTLb&Src6-jA=c2I1(-WdcqBP4jvFHqg%!r0N!Y=~S^|p= zT@!9%A=bES93x%6d#9}P$+6wn`QCY4o}9%M?9ls)qWFXQNq-SU@yQ77Gah(jlKs1Z+<{7QBB%?$s*_LDMws? zbcFYA6k22TdVhvHhC{+V&X+IS4Sf2-c_$>kpdxb(z63>aXQx9)abgcA3T{sAdlS2&sN%0PJdLiD9L)eD}-<4k5CmM!CvV! zMHm#z9TJ?;!mHsOoM55N{ScD1(9GDlJXkQd+`i8fLp;qZ&n5K}#-;b-hO^3~smoCF zVux{l-xGZH#GM{KYgM|LsCkUz`eyKP9xWQNjU~KEzm2v)C*QKF;=lgj{p(y#x^s$O z|KQiV?|+=$WuA^U=NATpkI%*mh)bkam|-Jeo_A}m8yNJ21=+=w3D1n|xZ=&hL0N8a z{({Q4dW&9>Ul6_{g_RY-LtJE~A!KCp)|q|Iu^-hJS`f?b0*mt&ZHPn!t|lL{j3ftX zh4}}Sdg}J**^+$DD?MaAN&P%7i0I09p*I%R7Jn!XtVkoT>n1)}CG;rE-f)qnMkF#T z)%FTsvGwc-jlx%mD15~@zsH-@JhP9x4tT&uKK#;!i+gTij5|@Dmy_@E2m8}5n|!mo zzJ!mMlZfbEYChRWRvwqgfY`^N`y^nF)h*>F@II?~utdaH;MZDX{W0bZ2^L<^&{u8; zkAD=;yEk(VJb|TQi8eQa+%rUuaRoc;lYg0g;KUnm#by3Z3jIKM0H)y$yb@bWJf}9= z9u2~emWunKuh|plTNO7J%HDRGHQC#dA2t(~`hzjQ$2Fbj9~0in$5{C|?$I{i{h<+~ zD7~p-F0tn0I}c7g_A5>{;#B1~{qhBMJb%(|l-DsfZcP3noBRY}9<8~!1%-avRph{U zo12JlIbI4Z6qaL6ZPbWhkVZg!qlmMx@>bkxZ?xkZ0(0ByvL_QY1c6x5kR&S{JE7AI ztc(R7mJ1fc*6iw#BuDBFJZD{JtYEpbg{ETIYp)}1+~+dxVgstW@#fw&DC^$g^?#a$ z%UN-11;trH7W0RTk+@#iAgjz8E3uh-8;Geyld(q4B?@@6@x*?T>?(6F6Z(5~Dda|mxfO`JvU;03Jo*PfiRnb!NPfcE^=v{^;F#VB+^_?(crx-{x$5 zmoQIO7pFAWq)^_R`#l@&IhH35W#=TMM)=-3Jcyfo*wW%TU|8vbSC<$49u zwo!Ai`=&BcHe+d!^Fgf5GV@VS0%P#gEJ_k6;>yPS#`?UbqFq^G_TD0G8GnRrN>3+x zUMRnWZ;U`12zeH-&@8R`AVC5jN|y0Jjr%i59j~zZqh&4R!5Nl0mim>7CctHr*@gGz z&9|ANGoHy)}%iLPB>4P_ImW%r zSsxoVI2xcDb}GLS#W@xEh*wGEy&>a{;qP&iiPxB-$;iW&(g%OH(H|7$6CZ|3pXip- z-!Ld$H0kI6f>(DC`i|9miGkTyR>GTE#Re2tlb5`5*a6$>TUtTmn-JgP`l^S}#s|^b zNf+-|NZ2GGK7nS9I)8$30Xrx1-bpu4Lv#D~cb@e#cz=9&=j{%Q#&ANfs6v)_s7(!` zpY5`}qNvNgK@J!}7IgA(C=Wp;v(If3TWfj6LqH77^R>c$RCo!)O>gq*Ic*AyrQ-eH z@b7lw--j2FOU1u?#lLAyM*H_a!K0Q}oh^d^kBp{h+c>5==q zC8*CZ#6pW}{eO&S&1`K3z82#~PmBCb@vAq_4xVVA37k<-cusI413k7SDfrguSm{{F z374^e0xUGdd|gVys#j_t`VXmncTp$vm^-z4KA*3cW&nEJ+_b`0%XwZdc|NY(YzIaua zA2@GTgrON|Yrl~1j#e+j$AZ~$JCyM$+S8^&MStT`-uOr)E?$r?cytb%q<`N=-d8*n zD~6k+qmgmkrW*Gr$6s<9w>!lRkAKE9#gP8+PNvTP#Av4&{o>9ule|vxH1k(%d&0NE ze#i4C&t}bst4YU?V~1jrTIGC>itRBvvSnJvIX67X>ZJ8r(ysbfYL-`XRmhtiwnwKI zMSm8G=ORECn1PqF()>-*e*}vJTPyjCQ!VdMmuNdZB%V21{w$j`HRQM_kP>h?|<0+ zhpS>H_dFDA&IfZ~i%rCG2XPNX>xRYaC*xXyMe}KbC~_nI^hHDlkXsPJB0o=ly0OzV{U#x3aAXQ$2 zJhI<82!+p-&i;UL9=0Br#W2{JaWc3x{5A7=J;!?q%i1 zb<10HZ1l7!uD37*66t?Uqk}*Y`<`o}yQp zXzVCbo0(etX(e7&V%wN~#b<{E<|?PF3e|(f$tNnhNJ#Q(L`P)P_UPg#j8R;NgMO_0 zxV6gH`MP9p5a!2Khmdcecz+WevLcS(Rztq~H~1zT^`OG%ZLIYF=-=D28SBKRTyn1L zq|)`q-OegsR_Qkt+x{4RqUm=vA17PMj{H^sp=v8c{^$+-9uqmNjzqRrk)u`S5z4i# zAzQI6+9fu!d5h!QhU+4z$Ys4)Rz^&@(Kj!iu`Ilvy_gE$YsGxkr++Tu`O+a2Ks>xd zFW(ZC>D%NBjF%ZLAX*9eNXyWUaAQJwO*c_Qj<^L+6GN0Tcq8@4V*yDF*SbggC!QLYdph}arAHE&Me z*q~xDP=`hUE7S+p8jD*DZIuk1m)!Kz?8vM|^Y(9eWxqPLpN?Vk%R})Rf7BJz@{`|5 z!4JEzpyZi=7rID9=8o1Ih&2xZHbuV0fyc1qXI3?9JC-<@$C= z`@7TWjvf5y?i&`raY%m8gZ_!D{;jTP#4Tej%lP{>Y+tipKbVh<1#!$VTIMF4SETY@ zFAxj*$Zmw9TQTs~2t>eSw_2lRV1=X$dl981?;JtqYZ;u^Mf;A+vrzb$0HY2r<~z>J zAj+QKKMs4`XMblFG;5Bu!49+`o{L*TWHQxLRx_dM+io%7u?{ge?|Jlqc5#V1l6sPC z4v&Y6j}JPu!k?{~@}rDQs8qZ)o6Q~!uyy>z!Jg_m`Bc|A#J%5<6C2{k!^c6lhv0IM z&WjNUIUPZPxgmbAtHv<&+wxPJQue zj~SBGCd#b0UPFFTM?`vtFSIy)TwYg5e?1(ppB8URbV^QXuU}O>a9HzN z=AaW!4i0J%n%4u5xIvj;B&b=Tjs%{z%uP(zNN_I<0Vng^H%t2z2yHOHb{UQ{&p=O` zId8-6YB#S750S@*hXW1lNY|}eoC(s2nwM}rtA8NPk*4e_>;?>XKePYNLybAU5u4uQ zjA|Tc`z-r4#NIhvzWPw_c-JPV^O3FD`CE83!{NGJnK^OD(xBb4%A@+zs#?z;kd5`Xw$k%|@}u|oSDyaLZQXOqqrUpCn?m*eDjeaNNw|#M!t;?_zO4h# zMt>FaDl<-?*M09;952#y_Sq})O0Dr9?ZA?}cYE@}<~*XM=0rNh{Olo8K<@D8wO=3A zCsQ$y{Y>Yyg@dQ~@J;cdh4NtJ!_HIsS7EmeEwYWL>PWh2eii-bRO73$-j18{iB++n zP{#)dJR=mzHl|Rb_$l-&?F)(q?N9Nur+?>92G?9)Y8b+t2N89gvRx)evRw75K69*J zdz*C(1>k%DW(r3~h^zWr&r-Hx!qAGJJY@$*Iq{0zQuXE|R#Ca68wg7I zt8Ffl#O{|4!}>rYMv=ulNM@^DLzXEVcB?}eMc89`=kuU1a){$E(iiU-v7^FiWPiQ< zf;>laRVRw$d;ME`Vo1v{SM_fySMgKBo&MxFr#^k#am(hJ^^+?ea?Ss)kMA<>&3%U8 z)!6h`D}-zVJU(nlo*DG|!6bRfOrHmsHPr^}L)C;R2o0tc+W`B=tk-obfCiP%jM6wI z-S#2ZVwRwF$L9(6!+5`%J0wG}cYl*3A8UqzK(t;y!`{O!hmR4jbh(m~jen1$-|eV1 z>Ylq?`;|Awz|dc8^n1;bd`G{nz3RHEks(;%H^eq7o zPx^Z7%TzB_ytboS|8bU=zq0?5oBL75!(Us7fbW8H2xd&tmEIyb<%;#YPJwT8%%UXx$0e28$AqYuB+ zF^$F?>r}5la~ma1Lw}6CMm%|5qeHrp z&+jZE%tg<&qlqu4JKl`$X}{^T!!=yLgp#{}QGq2^+k%N{mGWf$Iu--{crj{RE*Yg|CEldlkE^?%uvnS*)8K&o$`G%KdQ zSKMo_Rn(km7k_rV-qY~aj14i{QNA;wJ9uKs=a8@28AOd15-v(FSc3fcF{p?)mf%xu z*Uz}J4l8oVmK8-BeSheffU0+iQ(~dmo%>$WWz_YS5stR5-RZM!Rg|>jURuCm!dLF>3Csh$ z5imIHl{`|RtI~BPkEeSc@dvZmV~kD7!nZEo9Rsusqm%6iGEUzFF$t1to1;g38dhn<-y#k9CK- zsqDtRmOlQAULE~HFY$iXSmowtqG~X+9Uk-*5<0`26G*%jDRu14+iS?CkX`y>k?xN-e@sblg96d_q zFzg1K*Q#HSziP|B>wX?o{z>7NX`R4`2PagP6@C8pwwyYQhSy zV1EHSvN=J}H5w`Ci3XW@qQoj|IjlpWr12Pr#@E*_uOM|qnuXFJ9+yg&8I1Ak;gKKO5Z zWy%~IrVLU+9!QaYfqpC?X?VyTWE72Mkbmt1d*F`@ZL4^R3id43`h-c z7v+zLb!{6`a`r22kL8HB+SvogSAP{(Fk%Q5eu&w^k=m43y7$o#laBheHc9ymqnzJ= z|EYof@nxe?H*A&G`zzr>(6HdP8FB*QtIJU9fFw-} zI(Luk>}$uhS~F=BF$Uq`xYQmD15j?}wN)NU+SfaWw>>iXC!8#9{ulmSa8ib=Jq|%9#gvsm8TYUmkUa(TDwKK9$)oj63ls6M2lj*IAVL zBIOK0Sa73&TU9q_-08}PIju`5n224sWGR;nxp6B@`85kg?mLB7{(tbS*|f&|DX&kp zcnjOW>Xzc|c=m8?A1$5@v=R>`Y~oc))n1t@P{G1@7$e=x0hwKEiqg@=a_(7*QHnF5$9JsOtWT7CKiuf-a}?#ZElcy@C}j}^L$5#!#ZS5Zb$9H4D?|ilh-l6 zg*9{(_ygJ)$Qa+!#&xZv8~ZiJi0?c!`kIFxxD7J(ahzL1@1WqFDQe$J*^z45(H2iJ zD<;3s1=9t`Q|p<>Iuhd^x4P5hU)Q;=pl&z_?+ik#jFNlQ@PA?&3RKG0K(VZhPCQZB zxYmgkv?9V+dXIce@sLjm=7=K!{nBgSzVftAWzr3AAo7nNJpU&?fEKlNbiQ?F|Xr|QSio`+YTvX>+@!_67~e|)S@DQ=tn>O!C&7z@|T#`H|BY++6aSfIhtqe8=hNI zJO%{kK6oF&rm%g%JZH|49P(>A6hI8IjdFH#KgcKUyqw6V%?h_|bCP;wL(4Rfjolwh+YDpV~>!VjWmx-vUzkgZDju8-H4{M@W3j3d!CkXl|r=n02h% z!0Q^5_G*PW!QkOjfFjl{cYaz|vk$Gj$+xr=XtD4h4Yy2kOi>KO%-#As6U+*FL+Sc_4mgk)xE+6Pi5`iGW*3dd$XUr@% z!Cn;msLM^)NL^YV-XVfVHzLPFMuma(Be4Q62!F}D(F@8C+NmhKAlABCKz;Cz%hz?K z)}6yv`L`?Se5tq>ATPP||J_&k_S5e%#336dJL8o!@~lroS_o_tvU85AW#G!3(?y;V zTGK6$cSg_8G_ob(^!+&^lywJ@e3VD_QXEnI)8neQ@DN752J6*zt>8P{?4Q??ifSa)qdPtV5&Lk?(slr9+6yr8lfr)69o4~H3?(vp5TwWZkfq$!s zE-+S^&*h>v-t*NPEKi2sa#El#!US&+*2tO!tAHNKE)O0%wjn1Gej>LdT@&rlYE5P& zUZLQVx^=wl;+AgOg=s6=jK{&z2caeTlu}q z?YNuFI<%7Ge7Rn)V=&~p`Nyqp*MGBm_O&FIbdTfKFXO-OYmM@w$7e=-ItL()sz;cs z^|Chx9ygS+L zaN+XVOkhXFk;`iKBAi@V`jY^GPy?p)0gz^_RQo{!65b0yYpenGFlev>=Kx6H;%jz- zVF?!JDWDh+684}GFonGXXw~?12=;291;v!;RqgX42qeO!Km=M!@L@|J5Yz;uqX#uZ zqJ{{_20%yV`|MHC&jgZzNq_K6f(2p;L|A7a481Dmf2(07O$x9?x7n(*FEN(J4r|Vq>Ap++gda4z$5foFFCr)m&E_T%#xHZtofa{XA^a6ERHo z69A3H@FV|Ip?>jHZ%efs_L{>CXbT&Gp1}cOzIoPcww#`Xv+24@&2vHl6Xh|(3z+vlZVL|)P4!_Vfud}ioO*@()qUZWO)QZ;F zs%J?51pAC0w`p*bP^Qu#e$@gBQx_n7QTK|Xh0ZXwo3xJO{cqvI)Cr(U{hqxD3Z}hH z)1XNAfSmbp%U%FtS{zlsw=SfLmT9<G6xVPob3Q*BY5WabM&M(opU{C_1OMKpT>>XbyZP#BCgqPzfdUWi0t zDiPiWsfoC`pkbPbDh=tDxd`HUDIsk4Y>yxff9CGmLf*D;G@KX3qR7YyyQ+es7V3(Z zo@fCoet$A0oa|Ezv}%Eh7V=Mb54MY89H&wH)eSggB_5yK5+u@eCn5Dk;7;SjW#412 z1J@GllxDAV(4D_r*ZsRJ7+4BMV$lM*C_3{OAa)9B9dwn2sAevRY0hBZ8c5Msm@f#@ zq*n9FQ#0AN;2qh5lJ7nk;eUQ+iv#r>7@zY)qkofxKCZGjAZqOH9#*6LV;{Ft)hGB= z^~3DFA`P3u-NV{-35GhATayxZ58NH!52A%vBfKb{5u$}<#g8XRi0_Pih_NJi;vW)9 z(IQlCFMzTh>j+R)mVVJ9YGfEYnQ9yQnDq-u%Faz_*&#C^X&6c)jjK1jkfiV2I4RtC zet*9fw1v7(7SnZ#`woz8BuUwk!Zuo<1;0Sx%uf=?Nq*HS0h5VwPF(~UJ&t^sxhmP@Rcdbe3xB}+!Vqve@h^&7P}l1uPF)C@#ze4(m69nV z9AyYI6F-_rAV36&M3~pkNLQ-n6==~0oH;Mpb-K1xb3#i+ulVBbVdJ_4(XhR?NVF|y ziPJBn8TK7?lE$gu)=3Oq)R)q5)V?Eqjpxvlr!i980Suq!Qc2YE5N&AG{ zgo=vcX9*j&XCd9Xdk`*QY4o=az@`Z;B`JN(NGgp|(L)>Mo|i}{+FS#X0fzvHjms16 zD2PkT+KbuS>RE;B5@5K!wwSdoCx1eXkM(^AC8+V$u1k#Rdfc|0kJs(8?@%~i|8QMm zPS+KE^YOY9_8rFK^-tF&_H*Ss|8e9B!Bkf{21&P z50VwC6WjtxTZ4SIJVrE5GLP1Kc2sS>7F<|6FaM+MdCQD+8n-d>`n^4+|85*g=l!Q~ zPS+v5-eFE)=+(+48U*nt3m4KarC4KR{payx2}IFrA^l$7=pe7HAzEfg5%mArPPBjw zHCKG$>Fyq$MjeW>Bg47~UVi|T>_VB?WplQemMeXoDJ-SzDASyN1q9K6t9gA^_Jm$w zGulwBY+^FqvnL)9M`F#pY_sOZdJt>YsQ116{hnC3bzTL%uds#5G^aBF;TEoj(_@(_ zjG^pmQVs|MW#~2_oKf?5#FO1TE8DE@x7dPI!3j%LuOyuQiN~o1C!(AEO5)l#l?MT7clHSU^|}Xss$9MSQl&T zDVpKm*O&{`-Yc8+-8MV65Ff++XqTSGLO#>4SbyiO^xw<5TY7>Sy|cpJsAt#xU;E8_ zxZUSoWG;xz-2EdB|L`CJrT5)>r}*4#(ZNV5?BnAxV@9`iFMmWrz%T9|p})6d`_#7LL3qk+RuCIbBzM@1LbN-dihqNf>#X zaI3=EK^&9*S^8yjZAtTATvs>-5JfBL_v&1>txX?tm@pv4#t}1mIkHhhJ`aPQ3=ju3~+K?RKu~A%$PZcD-r`Xlf_hR@J4(W4puH4(Ql!Q}zRJ za&Nb6TYt5$vchmCsADu8leQJ){yS60_usv-eW>QbGSutdX~0 zofib^p0|;&wM$EgYX3B~ze!%IwvBrJpSG(1(NEju*q5KSntJZTZ<%>lINs1NEm2q! z&BkY!H_6pJF__xFAgtO7h3TX3wiu7k{%Px}e*Cm8#y&59(Gk<}+5Md^Oz9_>ebc4r zr+@YnO)wju{nNHq&;Ds!sy;_QZC%$@7D@S?r9{Q8N5G6AQMvcFNRuOWUVv@)VAvGt zzp&#KRIJJ?!qg=QvZrkeKQ>RcYHQ*P+X9Er>Dcoy5XZR?K$@cuws2>q5b2wabDA%lSM{}{H50o@xov)QwT z8)>Dp{&D82dJO>lHG8d#{V1Ed_ z{B!O>C=BP%IR{6O_|JI^T#$!9=h?qp*YHl5zuu>V59?30Khj=pDQ(q|3v>oEhaDhI zMgAQ?72q-7sdg?_cRr~)AWwhaMXfH)O8cgCF}*#RQtU|y}G{TAi!X%-zw7yD>1$Qzk0sQQK+tO zC7P=we5U6A|Lc7;Q}pb?@?gi&*EDYE0PBiAFREV$ML#oB?dNK9@V!(?jemL?&qUXS zqMs{3AMcZJ{$HH`ab5#J$9um%?(-3;`PgwqUtMkeeN>x+GeGuq4!7y~uvGhOA0&EI z{n@Lyy2aJ|Kh=D^sl1W7RQ>t@LkEZd|G+o@(68M{N22Ih{Jp*Y!zXJ2XEjbN(4(UJ zLFE~gJcx-t)BKI|SdVlQ(||^;){%o-Sj7QO>?R?lbfi+?9M{8IdI0M(z7zmMwoosw6ql3I#>-Xy0}|HghDMmgnEeW}SIv{fU@PHEjXO}=)th3HH*3@fU+Ouz(t8$D{g>nO6`xOX(f;NWSE99&`{-AG zb*6jY`PF~^GvS`k9e=;_ssNZ;cjqV(Gyr_1&kYHDj6n)D1zfR4s_F+!fbBSiQcao4 z^Qn9mQ;Y77Dz5-DK+L~oOK6nbII0c+OI5FRQ{(ZJeleKR6Pl_+^aVVs_4O+b&ttv} z;8n>bAHQS4A>fMMxk|d*Dm5HRRVP^y%!WoqXUh-=B`u8$hDRbRy3c>EFaBK5@AS21GYRWIUxv9}Zg1EA{Pw^7@Bh#LslE#Hf4uMVxBvTobSjVi ze{*)x+vflBcIC%^AAf&tul@1uzkR6tkJB&mtrW%ezrASMe{V1OfBWlKX4I!=7T@2Q zIcTq@cdv5*({0tfj^9q%a{$xf|D13Am3ch@6qRJDIOPT+3^%U%#;oUvlLeIAD5-P{ zgMsoP*VVS0Ib(xmwn}mIl?C%98fk@)?8dYon@ST*&OO{Cpf&}0ce2Fu={d(o$bJd-0vowrf&GO{0DLI%MvcDa-5qmuyo$GdC6ra}m zGs_<1{AHcC-qZS~vud*lSA(2gUtea6*>gFYFT-%ymDj?0O+)_FeTw{*1PweVq*cr5 ziQY!H2hV$W(Rlr`z8UW4+SL~Bi}s1GkIS|AK)hyxJJ^4mpFUnsNwV5_`Iq0w=V9~q zp!Ts29v;^Nde#qplFId0NER~}q;5pFP+u|ojW+TAMGVKQ-3PT?+mZOh^6YUp{E|6P z)Xr}E+q-UV@l7kkq|zHaT!jy-S655kpAHXt@bNTToQ~J%;4Y4@jsf+0o#}&@#v%GV ze(dM-)!To=+i^jc-7GtplDQ`C!c)aO-);@$zka!!hw#MX@aZdjZTqD7Tyk7nF+F@< zX6O0)Q!?K~)^Yjn>cST54SN54xEUx*YOvo=N%%O(7cSy;Rb;`d`O3&S`Ff{cak||; zKR>r$R?lZ|#_Lt&n;Dh^{rae!l7zeBb=#ifp+0{vNm#Gu@#@)rzhW6I^!0XO?yvsI zpNZFZx}Lf1wJ>cqB*-h@9)~9j^lcGRBh}#PO@_(tnlL9vui=e)msh;V`r$mQ-?q7~ zC&i=o`0zSk_U_ktU)@O1RN>4MHnFdItLh+b|1U>(jchp#1WowJA+;0Nb2T$Om=y>_A&K^Lf1K7L`_PooF{`L~yR+WU%;!GS zTK>YJ)hBZ(n~vOm&ya+%6p5Q}gwN;^;0kQ1e-GJc&3Ckydx(g!?4soPrG8a=c=fk# zKmrXT@IhZB0k0_3fq9pH+N|cz-04VxH(-Bn=8fj4#AiU@7f?-IF79#cUJ(rXCzzAt zL0-6(`y#n%8lIN?HQsUR8)A~^ zttp`!W3QCXE*to0oaoZtT@2u+@RvS_^Gf;KPP||k0YMZ3>GG3$G|qp;^1z+qc*TDq z>x8-0$SfZxNqTiCqaqpaQWCKW4?U6;u(AMtDyuLy*-DI=kIn}`EMxj!ED|=z_pn@o zuS&mZ8>x_BBR#IdnwtAV8n6;V1&qesC<8H4h5Jt93(^A*W#MSR8Kbmd6>bwn2?o%7 zzaJBCdmwmK--jY9?MnHPk8A#mWT1brwZD{&ABM&k#)UV&o{}pWuT?(xeIqrF**aR~ zSX<+4kCXTeG;Yg3z1C>zobd?MKI#Amla3fBWSvlTMA;G7n{_?b`L&_e28*)|ZAi4i z*al@2PR=$j!|HBG8|h+TXkoINU599Qvhv+t1@IezT8k~2vya*0Z)X|i#9n`eHL}oK z6SF<0gZ%xVI3Em}S^unjW_;Db69dyOvg66_0$ZG7b4<2>oB(3R~~~ zch3m;Tp5tjU^(RksZRb*5>S6uqFUbHl{%xt1JL*h6aa#pxG(N$H?Mgt(B&D1GkIGu zJhmsIF_X+Td{s%Pp4PtK&uhyW)`Z3iQ{Sh7On%^A=SeS{oo^83wZ1X{SIJ=RGfK{j zU9POl;%xd4Fno1B5Xhv3|`HfP;D~GesTnl&kg=4-lX%SPF@`s58G97i|6Ix4OC>d3xR&x z$Y?QU6yu<4S>s14T{HKu=td0++mnu?-q>R9@+J_9tg9K80;n(C=1Q;cqwgE7Oz&j9 z(cTrM=+_fs*q#5_13G^pN>yfvT7iO(52Ry!r0v-_Se`^+GoSE7>%3YPrJ+i{sRC%G z_ZT~8f>6L%(i5ES#@NRTo>Ah6OZ${O3%frH10?1^gM+wE=|D&IYh9(MrzDxF@`KbP zvV_g;Rb{8eb95l>Jy~3H3qgErmaV6=0dNQ$Fv$hx1S%rvW|M!hs^9}$ht?7f+Hfl7 z7m9nPXuiN7n@jU^h%%7sNWejo#fVjHna83q3vNZN(ZPl<0+BJ-bx})M>D@s78R%TQzr} zT;gC<2?YA0-+baSlj!wjHosPk*LEL#g|0aoRV^Nnx<=XT$lfk*{$d~k%ha^jY99TK zeP)H~9#Z17A3yG!uL(>dW?tP`y2BO+FSJ~O14*!xH|a3zvq8WYI+LVw2<{u5vUKtg z>nkSgBIJLDw^sw;;=urGj373_7Lk&wvnb1?FVxBkRp4w@5(X6B2UNGM_!oOW>lnWF z7=G#LtBFr6rLO@5%?7Lmprg86sO%J@X4U0OR=Psd_lSF_qzxeV`e9?2jSz_TB=Ow% z)0kzMMh;q}mj5G1IViBZ9)qk$%Tg)sL8Dt1HmrXwju33A=s^&ex% z#-%A84?5|P-Z&2*tH9YGbw?1#OK~b5mI5xyo^KijiE%{)K`Abzi;7RAmZoQ_U3+Pz zc~gHhNS;TcCPiIhV3zUyq3YvXKr~D$DqmyO+kqr=Z6p8EWPl+D)HC=-F+X~^u{;aa zFbhR)kb8o*fq}l9my8Tx&X@ZGvVFaypDgI36ZV>UbK$+U{Y@0Fyvop%{ODkc7sTcZ z{FR&Ag!$6PbS(;}DJx-knt39#0ymnwQgDBb;1FU#V%nOH9JxGsp>pnD-d%a=(ZuM& zR{>58+n!upAk8X{_WXU2C%F{2vur2vxMw_t5ljUK=gN@yM@Q5LYg;RS%W6#kWU&2w!l3T2Sz4+uW_c${(4sQGeeh(v& zW#^n;rk$ixm3JKcZQkCU*(Db~&1ioJQypc8rIWy`-mUI~SMFyJds1$%&JUkjI0uTT9+S%f#&eHVvJxsYS%Gp$siTO%9mAj0!vXC{YN6!WX*rN5M&vZ z^NE(wB_hr(oW+Zb>cbt|&0|pHdeL+i!7|n6nau3)Yn2j#-o0g%Z!gVmT-8`h3GoDz z+O7(fwJgIQ%~~ALM2k)FFNpU~_8~i&S%`V@5)~T`N&eD)YK$#<(e0_XidY23>5q3x z8aNQi_G0+g#p-?@IGQh*^>Kd~K_^+B|6nm~&~|5{!sAZONVmd=Y^DK1l}~ox-AKvc z^h%V7xY-R1=!B$oEYa#!hSl%zryZC0WC*&5KFCmzsd>^M3XBn%EHnj2quDGh_!-=V z#h$%Mn`%DmEh0%Rd|KIU#?u>ibN3kCHd=i_!{&d+P=*2Nx5e#8 z4qrY$6cd$e@*ro|z;v7RzX9jg&!moI(98<1!YSmV;yVHo+v7aF_oT?%?s{rTLStTQ zs?NcjIHcn2grucv`3hOf>L)aEZ}I&Nn{M<0=i?r{mgKx$X&6h9~wRfUT{My z>4cgiqQSD~Gmj!OygdlS_KV3|#SGNw+YnJaXvWywxBwRwJR1;7LgsL*f`S1y%nO%r z8Y&7V{Ipj6uD*YPtMn@;3czoj%)$4uu@tZK6L-Cmz7`!XvX5K*w7%9!HLGUz5V zaQV}nmGL+5r>KiMOQ_`f?)FBYB{09-xb2238g1yy66b$mgl^9+`8{FPvaQL2nEV!k zh!^g%NRGx?aVbeTN0VnfkpzN_sYayt-P?v$oa$y{+Wu{fDU?aLsi*qNL>~|)+aTt{ z5_mzX2#!oKnyIx14GAwXu9qIFWsB?J{=Ban)X|T*3zn4?qPWw+w}VCxwa&Lf`abC= ziiT=7S<`>KfB>0lG!x<$2*HYEU5pJ$$qhM1rp)3QP;<4u`Qny$owU5A5%4)Z>KN~4 z?7#@r?61Qlu@TGZER!gf?E)HCWf`hxn&?b0$PPZ>g%UJMg>#Wv_9pUQ%oJGv~* zr7M5Z%8EC<0q5T7%orZ>$L@qtsp}za7;bsX5qN7@NZ}G5d)^NaXP#8@t&Slj3;mgV zC24#R?n6a6sRrTLW=DDl0ZJuqEMW0Rw5I~dVbY@LB0ZZYM~=GcTh(*a8DSh(&11*czleYE7p>aEQ_z8r4J-)wA z9f*LwvHRM6e`)|vtkjBCoM@5%nT6kYbi?H7_ABG>3Q;@Mc<{ZKky*jo5dz>0o|ht$5-s5PyFMn$-6& z(UZkK_GYB{G+i(*F{02F5;pT`IzUH4Y>w~d@X9{Ij!Bq_!$>AQQN;9|_Wm(`uHnH1 zrpF?v#-KWNCkQ>3%w&T!vpXEpwlAAXoAM5UWKWd(lF4f!t*9!Ik(rp79YL@P^26Pu z8wqXl^W*!#WSW#1_m}IK99w_&V=@CnzEI1X`}6+NfJ9h!01J+_oac-=F(weH`h`Ls zO-27;Ib|WUMhd3PmBT9|Yw5W`zI#TmKjhZFQ2{tDx+5DnH}ZQ(;vkMkyC&TPeu+d3 zpw^9}mkBUc(=N(sB))Z2J(z%wUu#4#_qk<(NNO%z8)4m;)T%chhs%H008x~A?)H$O zP+W1$0T_)aqrD`4V`LANszJ476>puZK>(}_{i%K+Qpz(+ghXL6NTpsR)?xUhs$mo3 z=k_r?dys{>e%aPvUs-n4F_Wd?Ml|Y2>ipNLv=(m_RoXT23i`46LGAj+3V{X7rbR+? zzx(XLE}`_a#1(@L@pf5$~T zIWaCf+z24uU~>`NJqnQWtsb zh2BQ3)%hv>Lb}GaK|dgEszE>WlIaIjps(fxSN}Saa*aB2ee?HFzCe_9_U{^f&Z^Pr z;-$kOVQ(@sPD2p}7(F5^Xjc~eP7w=|AR(;cYJZw$`ed=S*&BZxl`_t}+5!=@WJsR8 z9})zs1K)q+=~gBxP@x5}G_kM{qgPjYQ0d-aT^Y`xiGAPWEmj8m=`hCSHv1D22fnCN z6~$)vOvq~u2}v|R)nS_pdO5V^@CWyC(FMgZQi)8=fzn-s0E(ax=p*yCq1je|2g%+m zrx)Q}m9d$ei}RA`$svpR+G{AAMLFNb5%pTj-*$fwZiM%(yLOl#4@bXWrzi?ciaHzl zeD_J+MZig()rlRzimG5P|ErvD$Gop|HDmi)>899)EI-Bp}>ci_6lQCq0XJ*r=huSrlQ< zhV6e$NynDHhw1BO2!2vt8*QTwwg&X2%Wia6qEpOF#b8}<)Z2oLJlCtj$id=j8HH|W zkzQJjdkiu8^e2e6o8DvyOVM1&LA~EbL2oe4w9${zkkLSglHk-k-7B;30>OJ&vdWGA z;c@P8uCZ}F-oAa$W|OF>F8~_J@u3q+W*dKi2Yko*NiOseR2&X7i-9H%)rWE==)Iad0z{!K6!~tC3xmM4G$QZ@`Ay-KjF8EnR|gWe`Er zP!xM8Rn$MqF*9JDD&uAo8}=4crpDzW(vepOE|!~%F&B)Abhm7mg|g3##A&BZPS9l# z!Kpdek^7}#fBJ%eLTQW(F_h_`$FP5&r;v9CHq+`pyn=cvBE1^94tzH33WpmT2kb

Q>T*K>gEx|AVWWU%=W%nV|T=B^766n9v``r zUR-mCJ%#J|t@`2@iy}(85{iF^-C?=Ha8lis`+1VtX8kR;g)?dEki219iH7ag?I*TQ zLbj~wXpbnQgm^hky(x;#%B?lOR!>%>!~nX*Ez%5zc)|Uei(i@5((SbP?M9jX91Z08ctkCc=41pILFl# z2V%&dT4O3tINwv~BivZcy-M_4TG2qX;s9n-)bA?wC=ORx`A^wN;iR{N-Sae5(LK?d0H{O{{fR9xl?z!q$-du`)Ei>kZBFA{`0EoooU12mY^?1YJt{}eR?qK4;Wx#$nEsqy84+%Mf%2MC$bQ_uc z<4lyl0tva&7VT3)#|SL-5y&XM3!oBZ7jRWc=7lf!x5`f4Bo( z@I~82EAH+Rm#k2VF}x~~XIi(QE{t*5B(b>)rhsX$q?GJ!fkIVsWZ!`t;QCCE9Wq5N zJD~~&%1RID!H<6@qPn)zLVp=eIq2A;l_xO-w!ow)0@vhYh#A2E9*U(mEy9h_^*ht% zwM?egaZomCYDOOksL6i{<#I3dJ_3ZmNR8$p!={mp_#rty{RV$to#$5vJU1t+rA@_UH{+X1 zF7kR5j;Ip4>7k=QQ&?gT5=yazN-&h%=L2jF(xWf&hnl`>_v?4Jkwk6dHjk(^_2bmU zc7&lV*0tTrTOR`Ug37uzFDcw5{MYo8<^a)n28t%UJ{EZ@S5~P1sMEwrKzEU7GL1lA z5d<=K=p27|P;^7n<*oT$(xD4s$a7cc#$WP2uluwOLE;N|6nd9;(077G!n)|azA(a% zdTMNWfVCeO`*%l?kY;hx@Tpu19K{zg&B1SJR!|Lrqqof*%vTI3jH$#qHwOq$WAiB{ z(pk#6S5D}vLcDd2FH(Zj@sY~-34G`d7i5YIutk3X6Iq>PI@`!Kecd7rX^~J2y_e#} zpg$$g4z~KJC9fiW{Yzt^<@=k{*)*yJ*=pgNdvFnC4^Arp#2;lUeh>N+lMJ5W%SksG z7l;x{SAARk=5CZ-U>$BrLuhvLJ!71A&jdv|gLw;r&r*rd&0TTe%tIcL(O{|2;3Her z#J+!##)IV*z**2B%^C78t0j09f>jHa3NSGs)~Loy<-}H9S_LPV5JC@C`6}fTTRjCL z`+?gz29>K(+5^WdGk@A(#os%4)E{+iEghYDwSR(#uS4F^t7OvbZ;xLFGz4XV5<%kK%SB<-b64P@iJX4Jd{A;ezGC}7{kHQ=fDAA5fQ zho^)Q;>vjKNZ5!Ew&%q)7ow(P3q53wXA?U$WuTsls1ut^!I?gSN9o4unR@!Prgh7{ z_w&G0nEczhcZG3!KgXsmV^pWNtc&ViBj#ewL60TklR@0GZpW!6gQiDH=h=_m<%puXvS;pz|4Acv=n>a3TAP}`mFse+ z9V@p2C9sf_QqB~l;;?x@j@4{9P<9S!t23aW6QHZOeILi^5vReKM`p5EJ6nH};Dj6L z*Wp&qVzaB{%UCo1Ps@LznrU#UVj#%?$8{Sswwgy5znhd%Vy9bWz?&m05M$7qG0>8( zV0zm1_Tp3$UN_(*=9N@|`u;*Ri4q`wP59hkL03auAK>ms3s_;YwD6<-epTxx$B~z= zyDfL`YfZR&q6mkeRmrc?r1yVkZ9Jcy1S+R{7J3G!qT~~rq;*?45@Wf5U2I}F_|f}l zfaZ%2VPH;YIPtDrK!ucGG^}Mo4u>^<-O{`u5y7+9hQ?jue8NgPXE_OH)P4)c-61r> z@LM%TeDQ6u^+`swM3&%pnzBR!(AxknB^uXMl6P4)6jc=C?;kZ760m{Ioy|l^uTXyBGk^&q_=9p>)wun;nL} z2)O9vL>2U4))!>Ini49FUW-}`qf?v^)L-wAkP7u8y7wd+X5n~CQ0=n=x~gM07JGnIt3 z*CaBTq4;QffO-tyouVTTdJegvOW}G-GuGqpj9TM)(vIHl>z02Cy_uDoUZXml^kwgu zRQJU%dWqu}5LPdJ`1cDhGjPF2Y5Qe!iV|Ix= zYl9gXm3!xAjXQ-5B-hCL%z&`j)Qh&XSBX<@BY0 zKI)B|Shk$boT-2Ki2}z$%S($M}@_iNxgV{l(lEa`BS^(YsivtAVq?b5}KR?K&yI=-kw58 zQ|$%tbDiGnu=)LyF?mqCsFtewCMb+BZJPj+0>5BXEm>1=cqQsqlwmtRo`G+O7hZ+5 z3kz}`ABKMjF7lwI(`|v`!_Q}&NY*0v5|wz8hh5nWE?`F|0(`bhF6dK&9oc;AaLM+g*$ZkwHTSf#(V&t= z3PXH`6+A5cE?TEmwthI(093xUepq1C;$I7UvVLugJb^ZOz zrmej+b8?KpV0~Dol2f*RyQ%z>OoHe^CMCriXt* z`LxkEe{cp>cS_bTR(1-xEaLcUX@&nx#OW*LSZGLCK<)RQW2SzBoJ^m7E9lC0f(*$S zs50QJ2xCH!D1;R@p2hyFDl~!fEnhgwzGnjo2a31T+}pui^i9609B^VpkcA$<SStEPIro`e>sgTkL<^1Qz2y zupmtkVL{0#Uk+$pa`2(yQ zmtn-9FdN#lBfk6M8#QW!gvYm7J8{l!1zC<;Bai-3NviFD}dkQuQ*+(pb|7rk@6V_zV&>eXETvZmYY(5?={dh6EB2@_HnLMhu z4i)KJecc+y5H8HGr({!0KJB^ebMtEU&fY*O&xNki^&n?cFn-;<*ZlEy?p-A0-AHlF zq4S)yM=@t>%ofh8(f)ri{4o7jsMZ|XPg9?@Q*}1L)8-yV6fu=jj_GM)nPvbem`BI< z7v0nNwhHJz-PzbME@Ab>E)b-`H&}kem~A9lV+!?aYVq@BsJLHU22>@86V%3a?Pe2y zwCo0>cAAu%#8k^4_U;(Oo#EXv`&@@4Vc0{WFW2bx$?r5^zzBbv!b1zfEnFbvWy^s_ zZo9vNBv!D`VXXGwJ1>Z_{G9ugRs$qq{fiKef;Dgy-~7FM$riODY?v8DE3<#RpQgIo zhfCF%9l%QZvI?xRF79tNnYzYIX-};C3#H5`Q*f#nQqMhly4|a%F;wU&O-&uK!;~)T zx*AjqDQJ<2O|*YHH5%rrCAYP)zp;(JHOR$)j*g=E? z1S@k!PzxpmUq192FJJ4u2WJj!q>^SrzPql!cDzm=W;&yzs?kG>;n++nck|)1_mbk0 zqmo{`Ga|?AJQ-~Et>>PAA9n2W>8|YnRTQ|jVKIL{Pb&KBHg!TN>cc^#Byo7lk$_@L z;={2l3=kA4m_;aL{q#93rfGyJZ(!+z%=kCS zeRAp(fFB`U{gqAm8d0GhCZSrI0hTz)wXsAw*9ch`PcL{{7xnu?kaRDz-bl|GE$ucS z09k*n1$_Iemtp(GaJ?qG-mQVvQMWfRq@VIsuRJk`SDCVK?Ba2beP!@ETR_ZKP8K`G z(LPc%E~+fkF%j@`W8G_@(_qD_tM5O}nM$N8!Haj{K7;EbI>{(IhJW!I0c;Z0i+_-p z_U@B?4;5 zMS{OTGmIfiVF47Eb8_w!UVC#%izN^Wa°(PuRsZ?kJ1S8+F6X&{}L!en(aU!*4N zbZcR`Ws1od7zCH(N_TU!Ef2z`e93=3Vfh(!Dm&u@RA z^swr3E}tXwH)E_2?AQ339jO`GCj!_cDB5<52Vkc%-$W=ipOZNxHR$PLE z0aXnA$919{{j@-eKWajVQ)K6ax{rTxgV(&K^P<{qzn*0w|2hy247LB+_is{$k&(Qu z5oVzY&et2UGYE$Q@d00`R=WPpKk7@Z-=g?C@J{wflBBWJYTWWfih$H!%;e-T+uNqo z$G>hev}ZSs{O+eqA0&<9T|X85X@l<2=>!HYD88Ph3eZg9%7Gu>Ww(}8kkfxpprQn+ zz`t{eib)E*p-}~bq8w)p+pofjPDqq=kY5s*)8zN#o(<`CV3BJK4>E+uZV0KWmUa#2 zMD7n-Ydi4?#W$>ZiQkH$J#RyhnUlyzO<>*Gq;V-w0OUX%me$kzkwOM=eGST(PYPES z8=#vd^z6R?i_lXJZ$Oj|EC7Gz7NumI#5YFMBvp*C&|}hdXILjV3MF&{pW)5&OY$wx z3~5!g*&0LG==DmEIf~VZHR#10Vpdxo1Vw&L-1V*-t*Pi66Nd4aM^p!XY7xGY{MJ=8 ztbD(rv!U?0}3ANM0c5s)~>B%%IW+;=1YL4&3++*=9@eB+l@uh!hRyN<6_zR`G zWlbXxsyWQZ(RY|hMZqS-wSsBmqcK9M#RA`;KTD|u|6m9+gd?c5 zLC`{eqIzt_m{6qbD)E0C{(Q1TO)}pz(f~%=C>LV2A!5Qy6MFhzP@YdZB6&Pgu?VvG zdlj1>7c*7+v?7>J327A2ZmEFx{d;n&P7ODGuqu-f$zjqw15tbO=+=*oK8{IjpHPik z4~?D$fD-JgPYYV3m0o!3x%`B-XSucRW^MA8WuHEEvEen0uYZ4gttM0P1w+fzl=-yr z%4zD@BIa0Mv$P~hGy9M^c!PHLk{CuZS|aO3-&Yw@-?`8dq|b-MKJKlqrm1KX3?wa9;?q?$J)R~Smi}5udOGw>PrC-BdmIz%54W@h|8g8k zDl|&?8y2Wx{Z)UM`H_Hu)R|}~QC}iz;36Fn5s;|*v0IQc>p7zJ zQA?SN@`grD3(mIA{LBictAV$1*Tk{T#ibh)F0G(Rp#6*E)*RN`;u#S$Y< zP_cqItb2@f`Yp?dw5ehBoai8p;zvIr>!a$CUg2>DCqIA3^T?r+9N>HWTJ?|s@NcwG zW|8kmEyFMT*%qYqA%i`!qx4luO)8QNkb;fhvGXk}k?qpyhS6_`E+7!k=F`U)0UjSe z=RHx64M=d0suWEHc!t4_OGsbI6+IBkP{uT?2Xe`bevz-|}RwOwAwAYL!6qp12 z$4xO`b!s5OR&?P*p>~ye0V6j>ClM4H7Upyq6?A`7j6JO(R_CsrAtd2=Awz!L^^EgN zI)e0-5O2@dpj_xsd)V*{Y9Y z0Q+{-m|&@e+yUj2V6Zaj2|j2EB#jEAixTw-DC0#c6g0k!sfPqeeI08!PU*e}d%V-) z+uVPu;oW+En#<@=$TQWbfo+qyJl+*SdZ}Sxw)#aEK^<7OuJ|W13{|&Lt6rlS{s@eS&(?0>V9l^w3YTHCsI6=~zivVZ+EmXo z5XsG;?Db;vkZH{PJ~D?P9T#cWEgn_TM^VaJj?diy1q!c%0O8D|C-*!6s8lG*weWvd zO#eWRaOY_xb&BFyoYvJTudbN`vcX+Od*qUHCq(mp@T7Jm<3XA)wu(J>@$-5+mYaEf z*K+&ek=|_NAf99&|clD{c-+I{_M(u zfM4S4!B7Sf&-y+q;$JJ9ZgP*32_iY&c8gutLqm66f0R~YOk&-5Fc>~`RM>EV(ko_eDi zCYT1bfePYe2VCpvUSh1ul)HF=qdC7hQp*dFh&YI5g}7Fm5c-kee{t)#M75^eIUq#w zx$h?RBci|3ygob>r1Hs7 zt5BaWdw{6MT7oo7-(ZRI4->A19Lj(k1Fh1J1>4At)8 z+`^K*{;B^%IV2BMnY z?wFP-nLa$UBfgy@iIrGJElXw&lso-sEvhQ`$Jcf1Y!KtdS23>gVGV3fHm=y5bKY)q&iQ+KM23HkvE8@d3C@`rkVIGpWM&ryUg}dAuar8pxCQpY>FwU2Ej|Qk zcQ0bfNsJ)HAk)L@eGR#axu}tc3Hu;1k0YOz-};sgmojThVdEv^zXZ4td?XV}=~L<`tOBT)`k&!Z}Dc0}zhJBebO*X?lNpaaACbG)%UPG-)0* zu$|eq#mzU$xrg?b%sN_h(TEY~U}Vsn^h&TEX1D^p0g~Gu5ujAeKLShxOo;byb<}S6 zR}4)b?bpFW1MzJfI4fLuRSzL=M@EGBGng}#h~Jf$ZlLcr?1Z$WpOX zh17iJ{HmomHGY4@@`@gb+I+dshZ|z}ui|YET-sG0@Er!-#6k8ZMKy77MGj^w2`lXa z3w6%|Raa3gYQ|Z*)2R6q1efSwc7f`{4~~Mnttf$!I^&qj1`}ccd^yr{vpsBq2(ax zpe4I?MZeR$S3mK4P?m_iSyO8-bL%qfQq+;Ac~SwOjJ18|FYzcs!LMH+k8X^S^n_~c z{c>5qL@8${V+*Z`%Op9YtxYzj&2hm!5ylJElDi6n^LEWE?K4L{Q7ig}u=t<;QZl>1 z01*(Wzutd#l_igTq6`{hOLtZ~|I^;}0<@910#aDCI! zgRB70c>Aru3MkxeW<>~G>EvA!(BE*S=|j{*YrhqEF`1GBIi2VNtTIKq4tLK}mr z+HfyzK;=SoLw6m+kAy|6F^~MfG-RW6)_T1=NQunY#Ya@^>FM^9qqu(m5~VUQg~|n5 z52=6E$9g*UdU7$`K5-zbcdpR5eRXRqUcJB}A#^qyYI`j0pGS0-#=;mxE`vajN0s$$ z@3eveYGX%uGUZ3;A*TzI2}a@}y{AxtfGw(EsW<0sOI-SSqI^3rr@p@ktC6D(hm4pd z0C!qxBz0ZiF!kjPc(j7f*biP@gd?+9(}4DGG{O^(C!@PEh7w;(`g>FHzUgjQ7TAk^DEs<#QKQADAEzRdSWSy>#r+3p&bnk}id`I;!{w2^3p{635M2EQ%$oatl zQ6Z&M!sDUJdJx?ic>3X4L+d_gE1n_lZ@-$b!tBa(FWdC zz@!=x9uSOp;WQj2p9+!!(q$U1Hh(ck*nR1CqXlN2W*@gpDvWQ}U~G_mzds9%%(6zU z%rs)xb7R}@J%^R!D15n zhFiWXr1GmsAB`O;Hix_Ml8KkAUm&fx86;aqBe;W3u^4*z@LmYEEZa^vWAj$`_L7Kd zCn4s{Q@U6iYc4)F4Bl)_*q&F7t}*2$2W0k|ELvwvjE$5x<5pHD5G@J0N=%3YynH6%(G;#b z1&Ri!I^^XgjxlSu^*4XXh^jiezEWwGE7IWV_hsNV-@}7&IqIV4e#`E(XDtC-51Zl3 z_aR%TR%*`1sMJIhggZMFjIOq%*=P_Pl0&hq z5Lo9?+f2}q;B0myX;nxW(Ud641UmgVp5wbvZvU=0L0Y-cmRFBe>Bt^%F$H%CAAdns z!w)G(-Ti*Nv=Z)@bzk-qA{}ob+d~nS@kuQ?z5H*mQ2}=-9sYZIqr#u^)rI?C5|lM_DlRU3DcY=^jc~meP!zu9LZ9p z%-0>jtQpYcBWt-|DQ*%rw3l83&cd%-<}2^LfxQjU8vK9#jDYh5i6mEm>oy0o{5Yfn z;-%pFIrhFPFvcCb+8yiVX5#cI-Jj45+#jDu~zBGB!ci z@RsOycqt6PgH#>kSTY?CQi=5dOx8=f9ZEg!Fh4_bOxRe~LrK(BTbIErYOeq^N%U-3 zjAHOj!J2=CYB0O;#6!a`0fn?;gBJR4=~-{j1NZsfPH-AqO02uVG?9s$tDmunWt`4? z6_PXi29G%F@=cx0gkJs;TA8aDLbJvWgxynByE((h9gb?|;#~nIbMwRAuT3)^nY+`I zH+|Wg@Kp65U34Q&WDQbOtt;8b0G1<)Cu`^|ezSi=2<#T3ag?r>`Y7?%C@$_DH-G#< zs3x#VsZaaNqoT*gVS7*+li+pD)XdjNqRWzbz1Kb`w76K3H4H-Q=QTD=brxSa%qXF1mlI2RJ=7g9-sRFHp4 z;yq6ns*-#@=N0%3t|Qfo<-)c2CnYkdyhu@+Yp!WoVdt5IGNXJNT~3b6Q@~58PPD5z z_9t0M0i#L#SsjlV$1db8M=j)*z%^C5O(2yyF$?gEZ{YgMeLR>>iw`?;$ik@TB|!PG zK0BTq*4(JYD&(?WtHkC-F9LIoAb6#X+AMl8*VQHFo_W|w`9io~4Jz*~>TV}adsN(( z`yPG8@nz>kF5qfCsNzfIW#|AaK-9l?n^p&-e2L;^Xrk@@_qNl+D|xjK>bXyb)CYKf zsxa?G*Q#Qds4(MHyKU&@)IY|wl8@c{+>dxZ4`|je9XA<}9M98^TRzrZy=%|zltGRF zT1k0Zlf49y1^ZobPwBK1U-1MvA0N6(P`#Vk{Wk=>a5x(x3$%(} z=mawlK4M@lKR-i`fVJ}EO6cd`#)hC&`pKqNYc9m9=ZtDSfafjl}Rn zn}Wigr8SoO$}PxOXrzSpDh!r&Eosfy02vN8(#-cwg6X$``uvw8?kfI{MI5VtG4P&* zEPj;ALk#L{C~5cAwfFm0cBtji9DM3OSRF!@IJYMgM5-#bZ+y#4ATQESCX@O;2$o(+ zfV|(<=2PcBq+i6z0lL{~zLICJuoR(vv*!49+ruuNcabxL`s7O+gH+-msqAu;?t=B` z>FK?tr8@azm4mQ-<9z^T3~lg#OeOKijhATlylVt}D4G_v6akt68I9dDG{j<$nXC$L z%Lp#Kn`q3SCv>8jjCRFaAUtT=awvn6|LkQjim=uv>!)o}8ULbe#lzs$ZCq;{NTtVydX-V%3UDOCjClU-8DUoS^Aly<|F`W^B z)n9~rAcTj^G=cxgAvOJhXaLf?p!t~-WOx+-AZRy<@h|1NMz`ohQaw`2_?K@!eADY}Is@)|4av#cfHB^exc7_){`zQ< zAsy*HX+Sz%tot94^K`odKmv`6D_@h)sE*#o?m6!g+p%-&<1~bS2nwF86V4E%Dzlol zgbK$QH-bm{q;DDm7nh`c`iV*AogI-t$ZhX(?JU0JxXO19F!4nD&aSD31Eujqwi)ji zHEg>PzyjX%tOb`oyii}O)0MW3yqk(--U$)MGfo#VEzv!#orX_I7A-Y=TZMD&K*yNM zSP`Lux1m3vad_%~-te@TbU(A;u1LjIyC35lo*jrzFMOlliiqQWrXB*%A z9dvplDyb~;k2n@VotyEgFm@V~CcU0J4Z^2fxd{M!sa{~9pCW4F zNb8c-)9pg)X=ok-@%zSKaWRuk4!R(mHh#%9|5+=4Pd;!xdixCe^5iDdW{E<+TIv#J zu9YjL>c1!mn*!oEfB?0mqcW`n=;!F-4 zWE8)ANM)E#m-A=im}onF2I3rob1nsbTiS0bC4luMn*c1w85{Y{byj<2+tAX=C$lC- z?m%UKbDzlMbg3wehB#7M32ebDL{SOeNFEt!<1}C>iFVCDAHcIMxv}L0tW8#MdIUI# z1vy8Vt|A5@TO^Z?6G@Bs@LFk(K(s4{wyEy0rG9jCYEW?(NJJ1eK=^2(g;5{Vfpa2w zEis2@v&G?V8`9~>wmq@5_f=uYSKjAnM|zTN+XHDu`x1@DAM1 zU9fTMk3ccVnm7;F;Z!FP$}G_ZnYWgKyb5aTVEhT`6D3_2%cx&~g1oN=g{wS{!%%ef zlHwN?Y2?jK*`>B_*_L-P>a$uDF*>_{I>V>E2YOTPB{2oJv{*L`k37lIiFMxqFrR2i ze%Kau$ojlE)=aAROkZtyK=;tK66eI{8^NR}K^H$#srRZa=1fQ^dZ3 z?uRKo{vgWd;2g}m+@h3a^3YLo`qA46SPkGoSE1dkJ??ygwA?viwt7}4y$oiW0_D{j z)wFu`^x`8CU7t64zGntrV-{&30#7$g=ZAojS~>kYoR>o0GrbV+B3-ku4oxR#OzLZf z@J_*Z1VZjZSyY!f?FW1o2uRm|?)s>t1CU~@B3K`2MnTKvXrK14?mhatgfWcqy>p8W zD?gPLrc-DNOUKc9A5^lf*v+`BjYfROTzhtG_13c5Z|5dILlwaSI;&;T@ZUJ{s$Pnk zXl6tkSlt-TY6&UBwZ|#$yPkb)5}mg>=s-eBhjKS4GB}O(ch{`lTRxqCjw>~7N%)!b z3%tx$|4bp7cxKJvl}mdQuk$r%?ThWV{jnu;76@(kFIK;@?(WWgg{{y!zC@o(+ljd8 z`%OPbWy=SMHVOz6iYWEd3ln9gv#(%|J%-lk15|g{@jFE*U1^l;7kslO7rQAU$(_$! z(;|mD>qPXs#YhwF6A8qB6cE}FYI6M}vl;Q0ASP^D^3eQZB}s@FyWW#{G4O}Eii^Q# z=PGX-OPQu!#;^Yw>`VQYc==6|G!r-605_rsyvVSVyIY~wMyq(7`d^(B2-K1F&ci3g zSn=rHW2zDpocP41FLd85d6j!3i+;jRF@hK`anx0I{LA?`W02l|ImN()PEitISQ=OO zLmVQ+(6_-)`_j*h%U?0`35(sPFmnte0!lSRXWiac^}qmk6J&Dd zHDCSNK#sEo9l(NreY+N(r>*ZtDd1ZEy*F1ga-JAO_^r8aZj&Ub446GU+fSddI7+#@ z2%6#(7LlGt@4M}ULz*P&uD6c|YqK5(gaYb`_?SbUA4h+dtpH>8B## zvBVLxdK96Bt_JW;M(nK`1o{P(-Fl1--0B4h@roL#>9T2pwX_wU@B;Q;NDa@OqFu;D z960SWb)XoW@wZ>e@ERg;UoCUwuGIo{Q&9vdv2yDK8SarJi@EJ>MyGpk0dRsj}Mvps*}d`wJN_ zm#o~lTs~$I*R?eI1Lw=~8s>6z{-gl7`Uzf_^*w)#zeIFb0?}OS{2>nVfq6{gg4WL* z#sd2WVGnoA?UDnh=J6WIi%!um*n{RI$C@g@^>2WGG(?P>+jZcw2J7dYCkK=}wb^!M z9564T(6lJZOTr7Jxh40snRN;6=VOOWAWiBesYBIzlqryiO^gVBt>41sRC(2n1-rA5 zST~F=DrN-NvhIJJ{c#Z3$yQ{5i0LrNjHB{Tvjv%W&E~@2Gwt3D})xZeZ{t4JNPWs9cNmG>_=qHP1X(L7OvD z#$HJnt$vnuloEiUW}-}m?8JdA@HoHm1H=l&Z}*E7RFkiF83SQg5rFZ-1|x>n)Di{if|wH>kXDKS_)ams8o<*UMaoK zajLY{&7;Lx56JWhj(wG)CdtZ7ZMCO5c&*5s9YX@6Mt7eN;_#rZ&uN7(N4a*4y*5*S zJ+J1Cih|odvVwAIURnRU6tSA^2-s~iQp_={tO+1{)V0eE6khN=J#c!1yL^Yw+3?*x z$}2u3@cwawMW%4U9qNn=xE-Lga4UR8?LD`|>V&6G+H2O(y+QipaXS>qST(8~BaArF zIQO?P=LvyX{3wc@Kp5Jzc0hY_3y&^;O&o11T=79e8t!sC6+*0utqTpfgJO{#+@H~# zE0UEwEA@r*ptI4_QDcyJvkTHZz3R1uiy&tG+~#Xc8bpk1ZVs%2 zMow2KlF4%m#gk^4?2~X!muMEA@D~ZNrGj;7U}Gb0uBo@+vd_a=@s(J+D*5Su4;c*5 zM&L=hV?c_9Qfb3zVyH z5kS&%82U!v41ColJ&xAa=zfVjEe?qml7FwE3Y>FwFeN8_jrmq;TBBcVJoNAr3j0rG z0R%G7RxF!MK>Odf*qL!Sm2{tf-^Q*X*pu&DHi5}qJ(?#Y_e4me;Ue-v_nHYunO^Yt z=paCkezOB?Jel$y!#e(=XdiIpGLmF^Qm+4_7XTMeT#)^QFY(L&ZhcPCIh|9ba#S$g z3i2e~WiU1HU?tW*Rlmibf(IM`0h(um>}Oextr|rPvOT7B`RUc=7vz$<@CUP|#0? zcQ%c`_Nm_$iow6{Zcs6<@sPzk>~fx+G+q>wVfp^Hb-P|a9%1MHMIMO3VGZW(yZ5L- zhZCxP^yLABgvGUwn~tb|nuwVgsv$U_{=%XVUg*BjVP^x_t4MW%hLni+GJI4 zst*8{j9J-CY{Vmh%YVq-7tQpvk|dN-&BcswmX4rp-O< z_TvlK+8+a)lABK<{w=-Lfb9+z7RHO$^O3CmZ9ka}S+R_NsDRqx`}NcJh=>9F?yd9^ z7PCdXN$4jNu~N8ygN?;x2vXTKIffD;S`_?djG>Jas(3qr#GZPbvi5E1@e9``UxQ@T zmo{)FG=Mwn%(kK`h{OhHsh=%4;ECx1wh~u^e0sb#KTQp~dgP`l{}nBGBuGWk66_Q> zc=<3RKZvWa`6Fc_3+3^CsaY7L=lYz_P^Ku3UUqCedS(fKV!jUzl<7|SKGbkdq`|^7tjZFsS7@R4@dbD{B9SmlN;qDBj^UCjL7F~7%NjoGwy9o zRS}4Ww|DP|BDp2inVj5AZq`s^ddsA?3>ICZ8w z_^P*8EB{0)5juDaj4tD|05bZg_D9Pd!WnqrXIJqcFM<$k{7%K9g!r)NeGty>3iUj6 zs=MX;(`EVUHmT4blFMuasj$m$2<7v7mnBDD2Q0wlx2f=!4c8&jTXSO^J*kIhnwgeV zTxtVeu*h7q^=+( zUN6Di3#$Vanp&HE&!mECEim3e8{qIi$4+z~1fz##)@y?aO~H2!&R^8`PSb1^9O3Bx z7YT=OL}T<`Ev&goX+M?hF9!80-_)EYfx9bz*1SEJ1ec?GQ7Z>zbX`PML0f1=}Jv2nQvub)wd~D&tI8aIF52EKh|R=xq`aZtb)^j zgMVviw^#a!&-*7t&9C|iby65t5IX1~4+cBXcykE~lX_cMtP*2(%4JKnQL@Iy@%o-i z#G+FCqJnbp;0>H}J4K=C=bWSC787_3emr5Vkjhey6Ws-OWl{=+L@?TXX$#P;F=LjL zKSflv@otYSgR(U0!I?Zv&`sg8v2(6}Pv3c8b3PN^d5Zj&By}=G0B<+j{s0ulnvl^$ z`=N_74J5$WdnzaITY>DOX1J2wp6*wy-3DNhhMkJPOV@CvwpTMoVV2SY015qBf*lt7 z9n@0&zP$XTuG!(@Ofv~wX#_}eD9 zLQLvmU~R+9%y1FVUr$g;Pw=fc@O}X0<_CZ?2FRX~!`Au6Qu-l7dWIt|Z$jH5)vw>u zw$=|55sJHL!t#+a6oGi&GKoL8-r5IT1AiY@sRh@`X0VMPq(`zQO&Tu2K%(ucr;`xc z{_5l({OL88-OYTGWuR0;04A585U`NV2)`a7F0U}@G@SR6!E&Be2(NM8X3G2c;)e0y z@S8gFF(O2dnKY!`h z?AO8gbmQVf^7=z~+P!bahi$Ws8Wjt=16JO=1xQ)9X^T&JTz<|7?>n}}lDSSP{j#8< zx+P$bU}vK)6(~HH_#Nj#8dq`=3_`=JE0^PN&I_(5Hd@n-6M7t*?YGNV3qA(EXmD8& zt|nd=La*fIf{=gp=6x>Od62n5df0JTvBifsBk>drL%Oun1fZ_=b;pG=Uta3 zR*yL01u((D$(qU&f-Y&o)matg%c9u5YRFI-s^+==@##}bgycKS4COZJ_sWBGKY9Jv zI=<*~ILr(FJu}$?Y=6;e86gfXrQCU!2SF@dKp~HB1Rz{%R!HreRp7fzW;VoWEkv8G z35EO-)(_jK3NzxJpk;5K?#dr17nS3^SI$D#l)4`)7>@Wp-PbEXrhqVKGMFLta~LP6 zMqQ2f7IAZdHsw2!z^MlvC7J|R{BB0Q19!-`W{%vU*63Q1;(w30#A908xXWOtWQpdS z3Pqt2wB8OQbq)8fwU`*I(c)|4lZ@LdEh;^P?)=p@AXd~%uu3%1u9(-PP%q8aCzwGM z`Wkrt+E4oR+aKYmO)=8|7UJ~zbklJ0q(bu~lEuh=;nR`DHsO0Sn3is@;SFtI<7+fe zL=i@5s%X}F5`VFYt6p`StH=)xO@(KwtPzf;l|IK-!)6taM$33qspVP24SI0@l&L1| zA3xjhVVJ=d2^aG}>6Xn3Xo6tJ-p5FzTvXFcB^~}&uCihldOMmKYG|QYqq+MlaYO}P&m^+ zxQ+(er8qrO*_@Jnq1dHht=$XFor^&2BE<&9m`E`&i=>@r_x{T}UIAr%e@Hvc=+#sW zyC}8K=m#FJ>*5#oqKb~~@r?H5UNnq9An=UzS(9PXns!gTlHvs9!pX*R?#j26#3c>k z$baO1pA#H!$ewVcG_+u?zFEOp4a8UifqAT-6l}4J0LzKF3Z87DO(R02kKDG-jHtd^ z6M5G@%=`1xx}P~tS8$!_xCI6rTW~Wtb6`Caat02F>}$9*?zt((9pr|-I@x%(J6SA8 zAv9a@*UJl39vTRTW(#y`x>VT5Iz^vgNq+~-2(0_mao+JNm|FmmzK-UqglD*TBrOb- zpmS8uiByH_7rU+T@E!3E+6BZk-y~E`8b%UyH*X9(eq#I+ZpIc)I8`Xr6L!U6BRLpx zcjoqd-CC<=NllJ|q$WjfO=P04vHk}v%V%5xna*(87lYA1um39kDA**mA7k^=@m%pJH6iD)9Djqf!tu&I+rn@^d7Pz2nu+#o6C#%rQ3c-C=ugL% zJxK!$EObLfw5Fwbr928*-yS8ljrIsLsg^s{`+=^`a@NX^1|(1CO_8yi9Dbzi2U^d{|A+Ns?&#`bU6gqYpV2v#rF^Z(uvJXIV00j zRyzV7$89NJ((7?J0CUHzAAbNto&nC?_(l#K$C%PY%(thzGBRE=AOQ!$wg_0QmUUeH z1p7vXi8rr{LAay1X=5C&Rx8`SUT|-D+maV0@Cs?uclhQw+g?SA%-G}7VVWt7FMv=c zk1HnMxP4J1lye*AtIr`fZM;$8j{~83!vsgorbhxbaY!M#)UGQ+Uw^1^3vPjhv2Cfi zj;pF)cA1{oe5YWR3FqtVB)rF#*N~VX6H!K~?9y$YRo>C@VABSVpTvW=sP(QOoVMd) zJ5ArZoRa8SXXj=q`%FG;4_SXHf@j@D?&3%KeS{qT&hn+sZ9}S|f-N8<(9HDT636gN zV?PkWMbUvu<~4NbDSsHnZCE3G*wq&CnZ;{GbW}A}(pVFNEsiodO6iuur{jq>3cU(_ zFXRohcY<4XP21LuXtyjn7vh?8gk4hU?XoGol4ume>5-6 zEuGt;|2Jre_Bqb`SZC`CygAOM&==|!lwA}VS535bkd_^%Z+;tHZF?j`f8DNtq$KRW&Eq{sgN@PSxPs-C(R>%gHLPb9j zetj{9WJtT{&uvg+YZyljq!_SoBML)%)p~G)6ll6Xt_!P4S2&RImeswHbGabb#0Hp2 zcg?O}pzC!eb9PJ81XO13(S=fo7A_S`6&cwHxaM^(UPH$*!~D#Y zwMm5aVt;fmemqt_BeHxG-tbU!LOa9VfIXK`Zo(C2N=7-rZC#;nDksYxaQTIVG@K*Q zHO*gIv%_3oppv5z;Yw^& z1dJ(ri%j`h$Jr2+8Jy)@CQP`p$w+IAMca)HU<~uqim6XOal~z~1nMo#5RyVHhH$wA z$A2r(zCh9dSGK|T?JcV?zGoz*?2bQJ2lwVE)Fx{`JrQsF!`p z&j0+!v(z>6kAMExwU7T5{FH`a z9OnNStQ~yR$1v=;|82$O&(^rt>g|zvCVw%*FjDdt6371e`Mvl7d3l1Skisxbj{MsR z`8WM9QRl65tqw%d2cjUSlALpQNDZiZnnBk4%Gmddt3|JC=eBJSU#&EMdp$b+YUmx04DB498K*ZB;V4(>6hk$?DO z7`B)HBKg1i7$kK~{wK9_8VL*|MaJJv_k-w05Ruzr3w+P>0cd9$Zrf~Huir&^j*vf(s7bCXJ+TT0^Iw;fShZ;n$-^T6i8uQHH@do(e&Y$0py_F@vE&^_GyyGAYW*jTT$Ao?kiBm7|cCRRB*2@_>6pCO?50vC17&3~6hPziVvb(o*y zEZFS0d=A zr$>uE+7ryfM3~9KHCq@gQ5O=^ew@bq?O8`NGwAN7^s4S$isoub-Ab+fu#InZ3D4n#Th1V?9 zmL&pwTYU-Xqk;z6=AEAij*Zp)SG=C-V%`(la^#s|5j;;(@6$I3-Ji+%#D)ymS(Mdo z3eRBN*Jkvp!s&N4>1GYTL$PxQ7?N85+-S@pj;AR4!tAJrdO+*;;MRGA2A+`83)yCP zTiqlZG0Zi%B!8Rl&e5G+BG>DLLt_(vNarXiEmhAk?pjjHCM$1N@mRPhc?m~sn(^90 zMM9<+@L@3QM{E`Q>I@rRm`Yi|c5y9ZRxMFdw+{=i<^Y_npEA|ILM4``RO>RqcVK#O ztJPFT3SVO3uMMo&iF!829stjwxW2RrHXf0?x>%<48Gi%|%Df>ri8}7Id+bh`i%ND@ znI9mB>mm~VVer*ESWu$GY!>+!?aja&7;rC@&-dP+3l!dU?PS;!2{)iV$XXGL&gMqj zASjgA@_!oXVMBsd^;h(_1a2)f4yq2(8#Znm3MBm$(Er=k12yP~%unqM!yHzzoB zLb+A>d4K#XB$*a{J4^|Qx?nl9e^3~!OQ<)9n1v{A?|y>}%si}`kNv1P_>tOop}?ge zH?iA~$Ov&Xf_km4Tt0}bqmuT+<}}!Kn@Trr;D}fSTVyI9^&)Wrqv;$DsokaTx!xwf zAhZ3!dB_GZOtJoMH(B3fUs-jG;-Sd~epsPvTYrzthr6q^DKdy`fRV)CM;@00*nL8+ z9!7KBm2k3d7~I~nF(V{cn1Br!Jj{-3XTbAc6=f&($=2|`St-Uu^+VD3U|@_%!NSZ^ z&0Z*MS0}V^pP?(5Vrt_i{b^4S>!8A9uY9`WABzXw!@gnh!RUYSQ`GQEfhbGznt}$+ zaerm>rOZ5dSr^<-NpTsV->FqnhA&Z*m${K|d}uv*A~^*Fo|K7}d$f}K>s%prQC8Qa zC)AdsqH!g)k>N+#xNJlFt+c~lpIbgDM-JmP<;46$;I`(?oF>&w4#R8uf%pj1f(uSQ zl~jzw)0;1b$rHS2GbhbZE0t;fF3#nj*?)kRgwVQ|zHVB=EAu?D7Iq!&S+m<5HR-RX z+lcH$3x-@d(_ifN{c$zpS+rcBl13Ooo8zVAuV@KrPFzI%oOasuzJQ4miKNjmchh~b zPH2$wf+7<-vU1!g!6&$R^uvPJtgjlj((`Mv8&gu)C+4g>qM5xg)(%l7GlN z7w)QlS)P|~o_L<6KN=*OW1xN2^ZXI+pe$Y7lpif-(TVVLk1MM2n~58Zx zOiSgYhQz|`4Iy;lpv5dC@8_BLoPSf}U|VqQ56(=w;%3tm7iLpl8L5Wi*Xobwg3xc+ zJ=YZoKIiusQ`x`>3f%f^b zc#yTv`ZS}Anf49Au1usTpq8?8C7(>YLaA7v9tsJ|890arJNkE$GIf}-NPk^zn5h~j z17JzxRAn;4KBMcuCo>XB_AR*}Y<0Q}E^vTM+)YU? z-h0kZ1DTDS)t_=_MzcM$NimbDX0nfz*RP+V6mS?C9>yf<14qAe0e|4HqI+&H0v)lt zAIaeH+k(5fq;fG-bS8=J$MF)CV%bY4{t%8LaEVpUCn<-sj!JTaI8Yy;ut|p{nNkd5eNoq zMV%9jwg;~T2hqFng?~r>nk{ILzq8zsKxV&PmJ=)-TyaYEZtO4)n>nFjtxfX=3P{E5 zMRO!T7X9ZxU-`XWqH7WonyS5eusCOmK51L9F4i1L_t^=7pwvHb11+w>bYe@W1nDAE zMnUYx+bK#Qdw(9uLh~K4NM3uf&LasDT+oYQQbD@FlLVSHQh&jOt;kHcpO;|7Q7q(U zZV3YYUIZt!rSBRQ2bG}Zl1hW!ltLM-|Hhx59nn--X#r_iHCgL|c{nIE_}0zJPwTU_ z4(R4|KZ|kpyO7?mx*~RJ*=YLvdhC^9l!`AM8?AX|4T4w__3Hw&-`cNtm-i7wPvKhm z{W^?(#DpNGe}AULGj(4J`_bqt;BS)h8jPfAG1(6{5jzjjIg2@*P!O>r$LBJ880|UL zYRErrxS}Th9@O#+bhHm`yXg*c%KUs%@QQ0s`X$}k|0G0tC4;UEb+i^z$<>w0WSP88 z=J=l0>{8NdSnWA2pwT=|(q^*_oJO#=qr`v3s!XjpDu2rYfxDIwsybQ|Sr55GgRT;r zidTn2a3SD|4hHZ9wWPsOHNqR*xwPlA8qqP+s7Nonq@_WB)A_uIr)ah*MJtryp{#_s zr?9XsLdq)RjF+V>5D7<{)Rn@-G{63Ydr;+8_htThIY_^L&Fry1hF$!tA7~Pvb-`x} z(3ZGc5`RZY72r;AI@zehC$C4C@{*uVCdNO(;+=vwjRF@sZBff9Tnd#cPoBXF(Caft zF*mZy%`xnK@LVGrPba78Yc|xjVqgvFAyM>)mGCNX7W9kBazgNE)cN1$#y?!+?EEp^ zpQ`}Mi+=EsqPJ@4$K7rL0#o9dGDi!BtNNBgIe$bvmWH`HuC322?^sm=@rRQnQGSIG zg^=?Avn}(7B7b&1`+i-Ld;e-zgK*lH{Z`VY7FQ7Ey4X0b-__+4yt~%iogBD5Iacmv zvceD9E~XDk{kj?(*~*yx)mu>ZFSb6m4BbQk)B%9VBfUIBkIKm4BIntVY?fy(oWI_3 zW`E;n@f;KfXsv4nlMjgM+(QBWDOYS#U|gX%o7(EMy|0UveuEnWD`R}y(HREc!Zi#N zWR6%cH~{at`~7=6qIZv-&5gmuw%{(`)}gy{2_vqpl49zD!6NEE;bNP*TO#9!e?(1k zhg|v$x`~k#gqOG&&5SnU)BP=m49-nJ)PIA~ML_SSbR$fd`kuwi6sEu1T}z!ZrN6&J z{}C=OQq9N+G1GllC>u*N88k%#kX$rj@)fxnGjo56xfU8YoaS`&BBfTud-<6APM9a@ zn7RC{f5&cBO(I75ZrNJ(O)|+Z15${Id;!bCD@1K28N$>~9-5H!APN!vx`q=V#(&hB zMNGgOuWtNbIyjBPw|nwr24Q*es7Xgb7)f~a`@|-GZ%9{`I=+fgPanO4sMCK_LTjWs zLR3J@X0B&~s4r9kDN>KcIe}pi;>R<#77_QR0o$gC$bNWZsGSt@?Nu!jF6xbeleCca z9Q|1FuB}=YQKTHwz;vN3MvTEv<9|B=fJXZ!lacSbPUpo)t$<$lw#kT}2`{-2JD{KH zYM9N{Tu>@!T&19P94vW_kD=2qPWLB0^)8)SS9CbW@E#|%2SH{fJ8_^drL_ndm@f6V z7}yNH;6Z#b2>(t^`bWb{tu3-ailTBm;b4drWp@Q9{@8xB1hio;|8gbux_>Hs%|H*S z+=c3_Lp_bTDy7c~Ftmu`Lbguq8T=md06kj}-F--TB%8eH#um*g68$NOup`r>OCT(Q zfn~>bilH_=^)wC?UzewDP(3~geewy&KW3)Pm-l}mNqr_Du}2Sj7uhgzL*`@$q<`+0 zU>6NnhP2>El-_x*78%b^27jiq;2E?$ZQErRwIvf@NkJ~_Dmu3&XI&pn%sKU04T=Ol z7TVF%5x$+nHZPU6Xr}B5(nmPcR6%#0;wUTi4oN*YUoii*yy&(ykI6D*n`0^_nu+ZU zmnTZ|q>bMw=^z}Eo8|G3x_*Y^awo8;Fc7`T@qPTy z-bmaGD>_5wj0UMY9+Z9+3KwFn9e+7t+Q+0|gWQOfOKzhXD!}_aF90(PwO56Q2e}XB zOP()%Wm0-f2ENHjR{Na~G2=;zy zyJoKJ`!;{sBzwBe@nn3s?FPg0$jgceQi6=l2APP!? zdsC2oee=xbtt$BDWYd!^hOINDO_G1^E| z^W}6CnjW~j3IdBW*Jx&NW;P%=NV_MoX-Oug@b}a%RU9L0SEHaGRV(5MPSovFGP*c3 zxH1h*fsjg&|2Q0q_#UC;Q=UiuIRXBx-b~uHnez3P&`)iZN#a3bE}mTUf;}Ax4A)4>MJ}G|S|JF?lX&?152*%pGW< z6Wr1O?hNEF$tAu#8 zE$m)CM}J1PUs?c5u8SyJh$2?;1&cGtNXbC`Ao!I%nNnewk;a9hqywR)wFQMWuAccD1TRPdYP%nNs`_@mOa^pA}wUB+WURz zcpRz}#fZ2UtwDUp2LJ|}ps5!nM9Ps!8p+H125J*I+g;&Jg*xG(Hn^=2ke5oNdGe8M z6ae6d(ztn*r+Z?ZAc4JWykw~I?y9FFchFN#!0f6+t{>N)@ga5R?N1>~ek)HP1XgQ< zcYmfb^aX=}S32I+f&IQGm5QPWEr9Sq;NI;`!JvD`B+1>+24YcNF)RdS9#(ihqVMAAB@Xyq?bV6&sPSTOV6CwQlnQ&wZWToblFG{hSP<RT`=%oj+TVe!%Da|0xrfaCkZia-sD=qL0{lVbQKUKL| z4g5?fY>@Wx+DblV6JlYJC9%_V+szNa7!eJT=@Gb&)LxmCx3b}}N*66J&h~Wo#OWOM z6EPZWWvpLC4xVHr)mxHP0A9D=>7Nb6SPEX$X=R6(rbzr%JUaF2DEa`iS zPkg_TwO7AIf3_u@hKc|hq>hixJ6`YY%`oBZuaK4pjqNl_PfO#VqwrawpXVnk=Y)KF z1C_}Jxj`{gSg)oPVboC?S}j0wdnPMGKb+4)!1{oE{lzrKOSL*A)qnTyiq|?a2GOWg zV63^)ghe|MMiA2J0C`0nQHIJ~X-=oCyg_JOvr2ioaD>QWLHewdb+L=mx%z!pCUtl2 zsl8X$rYWk%g%SG(2yEt%dl*RK#m}ejiN@X6zYuK@I?Uf#A%RBjdDTZw?ibH^KJNT? zsYxx4ye8A$MC_^4sDH;mj4he535I?t888%V<`D5K#{7-&GH2M5CX#T%k35SDb0n?z zE3=*B^4VF;ZKD^jwa+mkfpOH?0}+KhrZAw1YAc{_-h(0+`KZCwG{ww%?vrFaWg>@1 zu)RMEAC*ijTTc$u(bxk9|GnLS_U9JL*`&#lH{_73*BC=$tAEpzVtJU9`&MGEVL8iY zFNtGCBWIyiwdZhP(*DCfn`CD!{|TUpISzkPSmXbM&J5!XiI~h{44?}MleSZ8)uWZc ze^ojsfOk7DJJ?0;yr`QJ2OPzEZp}Pc(YnhLSZuYoc@gauby+dMl}%lhje|{9T%J}y z{EkyCs7wP>T7Mo9Tp0V4Vr|JFz_Ysc%qhQZU!>I4s-W0HstV%uJF4&dj-hK?{UlJK z4%Xp+Vs-()Cc*fYXy*^@bIRj!j0l(60WWu3+bEr4+nr+;j)rrYgil**rm%YF{oU@=j(-md#C{`Ddb%QfBYhJCS(bQ~cKI>g~!uhr0GOjnUzkh5Naq{DZpbFSrh^j;EB|Xuc%5%!h z=pwks+Yx|BqMBmZnE#%&V(Wn>w6Guh<00dWv13_>B)x?M&vo*bb?7f%<`ZEjjNU{_ z&zP;n*FkI1LPD<@y7$(+&VfJw6pldo;?w^$?2 z#DC8U;%S=4a#vLsdBx_*BqevG_U%bNI7I0Sho(AcZ`xa#HJegFoGxBx{XN9tBn-w@ zp_{kU{%sq3xAO{=8@9GZBTo~PGyeqYS4ur~+2R-e#9PmgIj@|Iuedb9sP1ci%w+$( z#?mndF=Qw3%o-{>?57YNl`w$K@t4xf<$p*dcqaLenF(jrl{0#-)D7G6H@y+~y^lrJ zlhn(YOiHYkc5opTIC9^zdYxpid~6^b8oSh$4={5&co@1tdJ3H4vp!EJsvN#DC~zIY zLgKk>P`vE`)84x`M1i&A(xs)NvolaaHK4Mqx2&t=G%f;@d0y_&Le_gV=Tx;Nk$=>ka~Yi}(lA@U9)H!9JQV^#NiVHy!k;v~$>@X>daf zF3dioS58A#uv8$|eB#YNs7eM@@WTe6d`aFvk|X%Uto6YS@JA2xH99Ocj56qqAR)Kg zmmg}O_@I@V!k-9M9FSwx;@}4p-+vDWNpuXL0`}YL-77bfioa1ienS7?zecpsq?-UJ zk+OyiLV6^h0QgzA(-OEw3&mkJ&D08B@o60YNel^0tq%dKQGG*WvWKi993qQAK++Os zz2Z8nhP&~UONyNvSS~_s>Z>h^ z%>9FRiP0F?dQ6(|{-i}0EWdXSUFX@lI8gt551rg7q#%~bNJPxU%@Hmys=0ivfonzo zQE(3fU?n}OQ~Iiy#OA`~3xAZ{`%E{BTBQoxq=Z8^uEsUwPoNz`x0{fvCsU7*>2m3oDvVT&e_2DAKm=z=J z`*9pw5zGda`DpvJ|BNfn4lne@CS2_BJPZ+$OzPB=?b1>&KxQbc=AUO}>ANokC$=@`rq%@%rPc1!_UWwxMMM-c;Li0J^1XjauL87 z;_7l6M#_w%EQ_P>Tz?il@-sYi&67O$s~QC#Uf+zBnp+6BAZcp89LgDn08~Ur%<`Wt zbw!_!<>&NW&&6+Jc`Tft{lL{kJjhnMNc1vE2W&IoP+>0Py~5b)uT7Q>DKr(o@geS^5_nmX$ zZMj$lmY3_`JAq_i4p`BE+_)`wXf2m*pQM^&(dXpA0Pi&&xrC|lyuO4lKi=*+!2N4v z=g3$Su_^JIOrdQf_)$_U@IA?6qAFTuI3=o?wGsM;0%Ox0`I*cRQmM9~k?29j{9+h| z<9Fr4*Yi%AFf~PhCNeJO1wi62TTz?qc(Y_FXJEqYhZ`=l)dkp{gRg zhk43dBL$O~)3~XP^6EqEk6@tFdVms%BE78P?#cFFQMi9VGycq33~tu-2J!^hxvk#5 zP?Pk=w%>R-1zGGhdk$Rw7$DaXp9shHVBy~{e&V+;<xv zQci}WjY{nS=lZmjox=sU)w+K20b1ANH0VMVK?Hx#hR~LWJ@q)O>3s)PKi2JVds$NC zM?+(EzGDB``vDj9wqAaK3_x8vt^U2-PgdTsyC{?9s#vb)#ya^yl!3bq3cnyX-fX4M zuMh(psQANRClsa^jI_(3*o(8sOXk3JTz=q1bp`h_nMU#ja3Fn8O6}XUOFAEn!7d@`%s$_p%Q*7?C)OHxKi-VC({=-Rdt9Uj;2u)87 zHorDMi#Q>5f2gemn5=`dtD+sh2Tn>Ss5`=GIZLv1WV&&YPjkVs!o?v6`#A1ZrHI6? z;_%jOoyF_9+s`!&k)0Rh24vgZIQEPsw)6Q5ElZw^NDFMN%Nf-lI10#=$!R{d6C;1t z{`Eu}b5{hWc~@w|CqQr?|30u{S~D z;8JYf&;FtJn^jVc)D58m7y>SlwS-j!8n~*s(TBXB(2Ac4A8I4|4Q2q@M785h>inLu zxUfPNB)5ha;COe2i$i_7Je_V%HgA6j&e%eL_5zD(8Oao`M+V`~3Df-YJr(9A%e|TL zeL9AspC=6LL2&-if_RwcElb0~YJMutc9iVX;P+i#5h$Qns)(`1-2eT?;sl)NOnQ5?o=`k2`Wl0;j59WWB7N_UZ7T5Yk}!1}E;|MGu=mi5=t1wD)b{5=Wp8IV(Xsscl%tgxQ^(}xmfMTOy+Hi*yx4Oy(gnP??{-4?wuL1w~NyWvQB5epKL&| zC{!YU6V{{@`)MzvH7gVFQ%XP=MYwTeF()mAo=>5tU{L>M@pk7QD0bflWhoVYug@!u z0b!2@xD~Sn{u$7KDK>qep5w_jWY^1~Sc>L-7D=-N`HpKcu~Gt!4z z7J}ceb%5q%+kp)PP_Pt7GtET|SaTr_N|Es=bPK_pT;I-$juH6vH|rLKE^QZI>#S02 zGFuX3zRs`5Ddq!QxDlcGt@-a$i;K0`gmF`tjn`oWQT0{Zo2p(#OG&~0^~^c9HQYxj z=JZ|-9xXK(Ir4uHmE^7JtqnqVjE`+J)G8*2;x6LYwF3YTytU?t7a%fT%M0Uk zpGkRXzH$+oMrE#Ema7lW44-B!2lRGnsm9bwd{)UqbXdpO#hE`EeKVx8V1rbp??`mE zC$&+#ua9jwfui`85e+ruwC}5v4qE_4E~lK zPn=un7wTLLxZCpso+cO(dS}OQC4bgyWx=X!JvGel1OV$baxVf66U;GN0y{CZ4@!RF zIEy@d$A_F0#~Fp%aP%{4RX_pLQ}rGv1Dco3FqAJIV)_H8J{I4edi|vxyF!j%RQv*-MXD$Q{};k z)IruO+Uzyn8MkJU<$uo8Y&lr4NfQ;$_vyB$oECph!ISjRapAyCHD6hTncEddo z&f9-M>zmQQ!Ej#7$m@H*Nu10cjUNCCDAiwkYo#J=mqZwE>B3n z`Krz!lC0+y=-N_T%_(m9Pzcjij<#D-cFj8YU4Z;{0uYStWnAwyqo|)(InTb}_*wn% ztKuIsO9?wC{x5E;;xrc>5LB9l-oJI@=77VSBPh2Mf@$<4P}zn>v5an-w5c zti83#5~{Y|!4ZZ_Gqmv6{=GC~SPup#^VLZ&FWLpHL3+JD#8(el!w}j?9lbqVW8&tx z1|NQnCHDCJl=#pwu>-0i=jEuE4u$laiy9@{!u=Yrts>gMWWT}F@iE!HCk!h%dI54j4<*+q+-!VR(2(9ZU>#h!FvrKDEW|_RGo$q0 zvye^>k%{NKvZD7kxr!qy6uDoqQ;SRS^^)t=)TiZ7e5Jp3F|>B!FhhTmwB5j^s7f&S zn1ax3E~aTcM=vNaf`OC2alX*YA1xIxzt^e`hr``8sL&jC+Dk~!+k=xQ`&$34|P^0 z0?4K0+uY_1l56CkSWka{gqlc;_!60WeLwPo<+RR@90;Q=gj{U&ORVq6;?k93OQE_G zf$&-5a3T|C$Fs?~96-3=mb&#PqD2QoPxN;WKcu8Rd4^hv2OCco!~iRxY5Z;B`MFkz z=}h3Q#IbLbeg-irhRR@<;3T0+Yt{z>bN!V~(MD+%a7l;aCA+v#l&-D{9xHiK&)*=Dj&Iceu%l8-{~$s!ZrLI| ziKm$q(#)6War}Q+G-KT8Xr>yQ<_8SwvU)ts1`w$sKE+~J`CIT&+x21pQer=OCukib}2@vy&^S(WDSQ(!&+5>Qi*Xd#J8gLIPd(3}EZiR(C!0&y#g7Y~3f$TN@ z_J^myw|PzTm z0p3ue{7`@Pn@~K`y;HXt#qVbG!;bz*M21sThQ+^f&R9xj4;7{;lCX1jGkMMd0DKzb z<1y(`K@Cbb1a?g%4uz@aIYXCpIb+4m*8>#+sUjPJ;Vw7gDXZfeY@Xr}iduGe03pp6 zPMM3)56m4DQeFd<<3sv0pMi#xsc+6+WF`4B`Obf)HSma^wmv?EJh?QMx64Ar-C}TZ zpF~w$h=2wWaRlirDruXitQh@39294XmL#4K(Q^Z$HlRNg`bXS+hjqH4jl44D~p&&ymwqXaP$bOP*i#iM?iW%5Z6 z1xCRxA5S;=Qes;_pu`MauKF+$;v0VxtIp^X*-$v7>t^hlm zb84LhsVJ_&Uqbjbjt8w&Ds{iiTOd(%#g5#3*yQ;bVm|{;)!t+a_3v7+A1F)=$eA7e z07`jJqh3&5CBXxoT<&_rB08(c$wMw#J-5g%Q$M0*4>K{@6F|Skm_cl=GhisCU+JOX zxwrYKJ32GjBUTaMFfZn0_3HBl>}k0{|C{;%)aY8AGHb(Eh_ zXj{6fi=<4_&i6K4oz~}Lhr<$IGcup84dTnlfs1J%D4iT_C(MvJ?kDTJ2h#Brbkm#T z`5((mht^(D8eaG{MhX(Bw+#a()8=EIGwoslw{0(>uF@5ERY0`3+l_zlSn=U(cL-i0 z%_)CG>fdx|o3y&S0p$4s>TOFX{ESDbJIBrM-_%**$)&DB%b2TJ|M7GdxfbJ45M59P zVH$>+IkPbH3$DKPyQQ{dTk;C5s=9Rs7nD=)0R+A@Y{t{SxRG@wNFUbRi7!QB(tqHg zVBf-~${$VGG~kn_xp#j#`Ekd=YPG#Le*BB;r)}-RX+xx7;$KI25qn+u?AY)_n$A!8 z?C?inf^26p<&z75#@?3Kf@TM9j!O{oc;bGHfobUB~Fgapz2e zpdWHz8yiLRn?MJCvdB!s7;4KU|NA zdGUGwZF!|R{p1@nm$MsRcWY7w zoki^Fuqx{ljF*3nR(AHSO97#Eo6e{<%qu&4UD+^n#V^6U8L(fNnc%E}ikn_R&eDEr zBB(&&@HuaodeQ>-H=)AoN2hHTx3@8-LK@d1?`M3@(^ffSD=AO3*)NSkcW|8gd{^R> zchw>XhXD6ft5e|3=xX)A`4|F`4|I}B^J2g}_r5D=KPZ3I6Fu3Yj5)BwhfEA5XoX!w z9cVNLKrBqHOjXP`eASkgr3Fyx9Owsziu>x*wZ5Pnz5Pe#Isl4or|KXGIfK#6LD2nm%6SN`Pl%GkGOx|E5fz(`{g_+fVte(%JA#ipf*}2&Xm}e# zURuVP(`E5dOm*(n5XI)rFt~uRHgK~dD_@DBFy((V!ZejE{^m4f4#)}UjTgo%0Tidq@R{Zww-$AE9rp&WW_|$TtkOb`}GohLkMK*+It16MTm# zKDPVeDszo(MylQzOk|_Gnm>H+$U_$*1P;&*9~v9!0NR^)(P*hTTQo%6b)L*2{2)Fw zA5nqEnzgp;oV>Y^FY&MIt5@b%i3xwTUXEfhtRVCBHY<*bI_^Mfi;tQSRpmM)<##1D zMC;rV2Bf-~64}r$FL_F2bJHM7eE<1(JJlEu^h4%T?t}jv|D1IT#+QJWd%=WLg|9-x z6#3N*OEVOwp$1mI|NAqw!;btmEV0Mfu`o*_Ml-<~TQzw{D?#wsY0+%W>qvjsrU^>5 z9AD&dNSG!i*%75)Yw*IN;U$ec1g61);wC^Mss`ky?fzYu(udD>bQ`C2dNhw@YmhvE zinRw_{Pvv;yRr@^@qXUh2d)%s9`PLKeQUV+W<@1Hjz#PB94@R{F*zJR?Xl~Iz5ntH z%iVtRjw3~cI$}af(N7N2hPHpzs(FlnGZLBUpU(A>QU)T|h7}{dfI(~q*Zyo&qY29u zpwQaMmOSwq`}!13&V;=OtDF6}D&vC--GlIl6aLxem$n!DX~+T?zt~f-53$cUqQGHF zfl=drIV?aHgcWDGML1nu0X!|La(?lcJ|a+y_n6E~D7lKQWyittJQg1b z`@xgrn_+jD7|YdFfR!tQfVai5`7L%}vah`uH`^)xlHgDJslp05jaDs$^~(JTgSP;Izy%mzHynhRpn zw$#m%o8eON)-7zVxGo*LeJy{y3U$T#`RP;x>5C%c#1}SWvpb}aJe6*{uUBX zX>x29Al7}a!XJNN%JkH?Y)f|6LH1a;+UFjka8Wr?6I~U~Mf8(0SozcS4{%>7M_DGW zE4}JiDk8X907{u#yT(fJ%ss?p-gTOI6J(toi{Z8zS;@59|5^JvEuXRarGV+ zEmk%IzGB_}3mxaYND&Bsz98JFi}O%PPTTnUOVtdYyoh+j=2OM)>Y!NE`LHKB-5>fI z=-4uN+$ew1!9-M_7-Q`6D#?YQ`!y~5{U~d%ehT)(eoH+HQ8GS?o&{k8pL2kXBK=rT zJ?Ck*=WUGzsOkxdXuF#Q>>LG(0ItpXUa z+}wsEd`JZYGa+K*pZcpr5r&~HWaT9J)^P@`ls$j*Z`O60gwk9|$%0zkg0HM_M|WX2 z!6IE=en0pi$54uh9*r*hv%*@kkZg#RB4fmszyCITU*8m3MFsICI_L7f%u;u{+POvqfAr! z<<)3Ogm_e4ei1;{L{v1Uz@ME3k?$AlaDK*Y%HuXoB0%{nRLZ4r^&;&sB{s9|WnL1AM8<(&N z`;r<#>pH#ia13!1o?-dUP%0#5^MN&fV!*qGAomQoB5ku~r4mCy z->EA`2a*12)s6dZgT}DCm|swaZBzB#wh&S$k|7MZ&8U4P&D(?=-7mpqd6s`Iwtg}| zx0-bDDIS?|jFXYq2ObM>IcA8JkHe-3LGLhooIfgOSxV=0o=_MBZDS0YG=ZV%JGJ*b z9Lo}4Azb|h&L=^=#_g`VTHBdek%p8UM9<1gB{?oz03v@ZrmbDV>akUFSUQwD5i&5B zoXu|t3Csctz=n0pR|DZj>qUQ(48uAm0SHAMxt3sLOBk!EG>)_b>Jh&ZU2*V`Bo5A* zbuR|LqM^Le@Fqr_2d_Qw`tANL7H69(iE$#b!2rU<8*h^P9=;I?R!C;}x}g?S8VU$9 z+ukuH^cWS8eun%Lc|hH74tOWtJRxX^6Pm8%Azl2gU7aVKh0(48v*Ld%U$!yc<)X6J z98_j|193z$h0kt#AqlHfIgqV@&zJEoIlU?u!l>>~`O}m!m94KhJJq-jS=g;Nk98Pr z3sDtTDhtwPdRTiws9Ui->4~B?*9Nvz{m{PWMSJP*dwE2!3etDRf~&DfHgNk&n2_bL zhU2Z5D~>9^mvc@|<2Zlj*}TB~9@?og0oU^eUP+HZfSHf-C;_J^GH0waQ<{s}8)L1x zcT0z$Dk53P2xHxC8wjiFnlE3~`DB&u?V|Kn9tNH&lD$tl9AFIMIJV6xy5F~|Yw+-Gr?$c7`({+vT-n~pB4Gr}uAA8(ydMer z=ezV1_YJB+>c0HStQh#!sD;l0~ADdB9v__ zFODAI6C-0-%<+d{Kn*P_a--wZv|lL>AzLcp9>;U$)Q%5^4j(zQ8)z$K1xZQ~MG$>k z3&E&amq-63^N39Vx+v}nZ}y$(wkyxx5&FTQrRJ-HP{Dr{*lA%ZPc@W}uB`6Eoo94_{C!0IzTfbN%u5LYne$?n*)9dpOY~l13J+ay~zAO=y=~ZuJ0MR-P@F zE>km~jCltcl;pI7={O&NV%DW+rRLCXNVnHzuSa+UA5a5LckStyPK#<3Iy4#|_0 zL8WVnrm`@k3M>r3w%W=8EeWM^l)gnD2l?ZsK&6Z_Sx0iJHH_H@4GF24C5{>VtSSym zTbSraAh&Puhiy$tIY0N;uYr&H+EJ*MDq$2PUm+Y)igNgOMbtKL^`xt(qnNs7`Y{|D~h*s4Y$*UBpMu-dR?P(3cx5%JFa=Dk`-t2-uXUCeM+X- z;#ACrV&|assm_)vkSN7@ic2&fzt#rP6klof%T);~!y{fM_-oAS9vaeCS9F!X9+cLQ zl%dTE(8dC#-Im@e%owZz9Rk2#JEY@}eO7-VB^Fj@bfrAQ$*oDYOU-aT&?a`Z+L?Kb zFCHO?AR{KrFN@Q>zVxP^8nK{uY)>s*HFy>v+J`VO9#AJ^mk}!1lr1YeK9xgf9ZAsA zq&>CqYjIhnxrunva3I0qRLBCi1nTttagf49iHliOe5D%AApCG-34%w?K{Dfoy>oxp znZn{i z#rZo1Ssw5t#lV~H&Gd{2O->lw@d6O%^DXa@uZ)mEGTvyKe&}V7(WZ!U{&040CCTc# zz>K}77PxP@_+gqP$wtw?6G4e@T?Bs>9G-UjrI|5v35G-!$iyjfbJ}JIB7`ep3L;1gV?7~qe@Gz_IDXG7*lS_o#0n_GdUS z9!nDt2Kw~EGt{)`Lgh2JoZWvU*#COBxz(Q6jml&2mO6Iu3HoI|3b#zfNKcAo$1phN z*w}#fr~eQtbwP`J_6e~=Eyv-|{@&m3S5<}(i4|pzYkp!;QFZrR1V`iS$YpS^Z7E@f z(gH*C9a2k;bKog`gmlx%M@7mZaak||lgW*MtFN1ft{KY_67LUEG;4n`6V))~@s$mv zU*=_0opZCF7%i1?6&Yo#b6v>w8Mj9q&us$ZJPJQ!d%Nf;&KNxZdG9*yW7L@=xD;#& zO9!KUAQqklX8}l>OMaE5S-VcmL1VUZH0Gb=ARH#sE_1h&7BhXc) zZ&kSOu;)kp`w=w<$@4 zdbI)T7dT4s+kz9}Dh?zE_o4F3Mu@=S#w~hVpp+ttIwGGT(tR=c6S1)VrVnrOGwfuu z)}qv{jkq}1%#fPs+^Ju)V2m@_{~4tgqGDeSwi8Ws<&ULd|BD zonCAT9ju+>ACRfRP|K*f|CXqh)PjXlQtwl<}u_l&eRej5yYMxnyx zIA52sFExMFJjKCW&D`6#fW`AzSM*+sd=sgOpBNYU{^Z|G-rh|gqSk`w7h{Z_;E_>S zr%jBJE@ruo`JZO;BKCX3@|sA9{5qsWNqY|VRH!=UnTR7>(>M|C}>uEjUu5v z*}I;tN0DBEotL@b)Yuw836(RPGaJTmi7elgu2_F!JkOqmEaZKxlZ0&A_JQM!mid*f zEE4PRcsgNe9gwXIOMdeBhCu@=FD;DWpNoK~K0+^?)Xt9mP6UYb9+xuP9@0Qn;vi_j zCcptIC?2h}t*H@{3*CSCZB2T99S20J4yW?USS|{6r~ymj2-0ZbL_)lBVGgDk@6HnS zOSFIQrR)nk9?I8XTZov6HLrz#v&{0@8G)b~c4rT1j0u8O1YM4Ko>t!q9&~2ZoviZ4 z?&w0&$5hKw$Yow~v04E|qv@lzgGNy-EPOVQ=MRv9_YLD0Xs$%{OCg%6vSAS#@Q@y+2BQuu-Q!yv@sVuU0J`}{42RYZ+XcMd!7MR#t?r~ zj*&*19*QR5{72%jcH6%bCx7z>5HN9F&MZ;ooWNiYVE5@yr*^5QvNB8*XHZQ~Q;YLF zD9$y%6@V{9Ws2|#RI!r8ai8QICP?~zrHx5HFtbjTy2*-q8dDKUQ18^ zli}^BJ2uDw*$-LCB zxfv7$!Uya)=kV+pkgtl8k|k%M(JYF}nA;&txT3HzUmqp;AH#%sIeIn0L2Ad9U2w5j z0KL(Atb~=hdhV{O|IV=+1U!Eh3C@JrgyD_4S1OW@<4FcKyGkYXCIKF#0|1w}a18IY z-g~JJh-s4u84R+|wTZN~671BR7P@408g8+QOzS-l+3JoJ`-=+`mF>=LR4A%7tgEP% z#?6LN6z^>k7k`E((KzV3W}f_Jf?bYnP@5%jA5`jki{uuiQG)v5;U0egj!WPN))|cE z*eG=X1NpQkrkvcklV~wYEeke>l7d=cg}>fvP%g&T;ltIXLl*xQWlK#42iiDekj|9y zaicLW>P!>PVsW2&@gThxsIaU9Rit=f$w(ny8==v{#M(Ab&`IO_C&p|aD`lJM-;7d) z&bi<3d_O(}g723Q4%UAd8N1uoD%+jYI{yCgfdIP>mOU^*C4wPm*E)gJQL4cU8+HvR z3?B4p@^z>Xg3|0LkFt#M86CTRP~3w^7hpbWvn){y`L69cs03DJ-LA& zq(0{zKNwcwQ3|%_?&B$`0)JiPCp`7sF^Ft{JKWxXk{UdihU;qvoX1hjc)Y77Pn;a&cj^gSL5AgKG~eg)>JxDnDw3 zjxGMc6B1a}v7vv-Cf<_vY*l)S_Z3YY?0pcH=LHb4td*p_VY9p}s`P3G-J-IJ{B&@O zTH~J8%qa>YAWf{b5P-O|EwwUhN*yW8%HQsy${)ebKXx+lL-+UM0}g79OePBmLa)5I zM1L=KPQzhg!R5Idj>pvPR4sj^#i;H$WLrf-XeSUH1&V)dLlItM?$h3Fdkz=SmMheR z3Z}}odByDRThQAa7NvTjshP~jyQ^ta8hL_`U`p#pqY0xGQpsWOtROp0O`1GcfRNcp zOk#^>Zpf3K8-6*JYkm;JYj&~{^eM$ppWNU#0u;H^zgevt?9jXqSCCF&^a8on5b3+R z?nkU$KX%)R;Pu{gZu%E3UG0|KWA1DbQl3+P^9wv)j2i2FQSMH zkX?IHY!K9KXTPal*Jy>}Hh#TCQNcr5RFLoyEBKa_$O9KE(Yt0Cq)v}7VpWLKvVe{!sFlIHAoY$9uNkH#*4RC+CD8?*!-+d+4yS5VvRLm^_RMi@j zz_~)FjH8-$3c2I~$nh$EP{+!zM2z_Wa%bh3l?!M04c*?CqpsMNB^i}Q9Z&mMhU8q@ zT%p>$b&%xomf zXIOvkjdM3bA4M=n*P=qlrGOo8v!{@v1dMR|d%<$bUY}<}64thM> z7%6f-;|3vcN?V|kr}P1bDpwV_6>S6lEmePYH7tdh7nPp0NAE}p1~-ZYUQr0-+`j8udRYLFr>yQ!KCK*q$hK$$EpCq z4%-Y#XHCyD%YALtCj7I3n{PRH)_}p~+Fkb3WXzopL0d@ze=F@i9qht{jadxb<=lTo zk;MIMn#kqsT%%M%?a)R)0ZN1LCGp@xG%uti)5pC`s}f*^%NElp5AI6hdOsoX`GoXQ z-Sl~eb8zh2UOHdJ@l?=f=oAho7eYzM!{p{s^Q`f^8H}ALZ@JASdR=ssYlf)UeTcjjdWa2RHBoxL-!8d!wJ#z?yJoNU4AKUb9y> z8)AIWcg^PND4<@9u=xt8RxcPctu9($Ng6)E_P*lbe!7Q zk9=V&fUBYNpqYw}^lUc_r7{~66xZlF=*rM*3aj(k3Yf&+)MCx)0~N2^SGGM^-V6dD zsB7@okd`gO)01;3t?$4nAy39-gyabJ9Gy>(WJ;_A!;#w31@y$eOYie#>(F_{nbR}F z=ihInqF)&d@g*k+4UTHXZHGDBJx4Ki6S7jVWPEfy0mD`KNZYC<0z#~w+>3UM;Y}XD zAEkMJhH!{i^qFOt26PoYRe@y|qYnPfx3oJ5u>yZ3iurBsihRV`8ai^abUs{g7+`v1 zQHPmD%jkyVo>4`Kk2NDNo*U5L81NKi=7Hi|9&5=EtI|Sx?qJ9Ia9Lq@df}mlxb5*J zx>|;JftQg9vOa-rkZ-Uq_A?;ItD)q;uBM9=WhEP^l7VbF=^@Oe3j^gRd1%xe-5K_g zTFQU1iEa(?K&ujRHW|{0ZI+WoRYw|foLX0p>t}`NH~o@$ZVe5=-Ypru0}^meO&`E5GW(R9K0)jk&2gBwQp_# zvgCcp9}Fi3FDCbgcxa8hUcxya2)A~3p}|Qfja=?843?fI5uUuUj5GLpXwN@Y0Q=40 zQ0Vj3gan>@BQU(6Bz8{EypG9ik|0As2Fp&Fey~sCQ0&6YMOam06h}d{?>&J-^(=q% zaJtObi*x#2F&KMZ6zdJKm316DMgb&!r%EPK+qP|AVLy<9G^5V>r=GX{+Ubh;?ld74 zakj5d0b`I*3cg=6NhJamjsJ}2r-kLHT4JcCZ}1wrzb<$?vxjNh)M|kzy_7 z3YQy%c6QE&K+`xIz8I`P_gY=WmN+2`NUYVS(aA)WJ~PhF$PiUqiAn>Nr$&EUqLRg* zbe(`)|Lj5%SCR}{$#JBqcu~LbebJObeVZZ9R-}pRw<74D?O6q=Rk2ZwJVd9MMmEG_ zd>2z7)*WU~&Wfo3upM-ewQ>W}&t)7}A73KXx|qh)$648CTU#ivFGGYX?Dzp<)on`$ zrt(mMNgjt5h7vX;1KT0z_#b~uHjMs^og^e!I(hBjWU1oI(4yzdpE!REN$X>JMp_D! z9U}z_NG#j-??+3Up9IT(;ahoGWXe~%rk$$Bw#X10q(P| z`LUVxQG!wsB)8KkSfPNa^X(G%x&?^N1^UG&5xMjW#afn?Ze@A!Z!&`8^J^(zYGG!W z+G!i?JOHF8Wn3RoM3X_?TlF{ zu9nG`@X8uPsOD#|0F*@{zvY)%`_5k;1SlO&$3Z$}gfcB~!qYfx@MocJn0h`;kK%)* z;maE@GI3IPd#;ayxK9W2+ixEYX=2Nnt(z37VWa>{A0Cmx8@qq?O_!`SB-L)yy=^TG zAh6y7Nr7?!Nv>aNMnH)^#a8-d(= z;5O!3LA*CJaXzjh-^YL)5DZcabg;A-^BFQg{vyFdPI;fI?Q_x;ay`JlCSQm0ZrOHP z%|Bx(D(dEHP$+*T&g}HY;nQdwIt9$S3EQxys3hXwJ|Q_A8*bAh+5XEV6trv%GwQ3G zDjv0(3?)G#2G#HVv>4pn>7ub)qonaF3k6LI9t>tz%wHB$F9Kk36u?6#EV$Sxt(%1_ zcqwvn7yUOLLbV&wO$ac_7C=rgZb0Mh{Z1Yk90iPsObCAlXuHu!S0sHpu4wNDwpnWh zn~=vG8}3eOu!aZJYw)ldRZf!ao;6DlVm&c*TK zSD}dva&!ZE-e=8LCP0yquO{Y{vfNnk397WNAA)~#9#x6P6^)bn-N?u|xTab6}4i;TZp{K6an)jn<8#)3uK2Fb3r4W z{2nXP!stXgL89%D*UeARuge}+JIanXvz>?_;_lZ(yq@jT7YNc*K8ik!x&<2=$C>m* zRb6url^M>O3+rx!qa$9L9>0;^<=eixaf*MZ4AP?DR%P16I~Z4YXlw{4zubAp01U<^ zgI?o9t_JGgul!wO!ts~^KCQ>w#g~Ow<^(W`L5dLBBO;iSh|c&BKcr8_(q6@S&k^YO zOJg}e#lusMNyR;T6ZPgTaGhRnTu%1KUm}&oqnI4BH60+T)8-w7MVAL! zyxd2qbRu}yfhK6pUq7dv!6v>?uKBPRJlcbsa1o?dC}?O4VvjS>)H8l5aT!N)yrdx4 zc7yrmN5PKdz>TF3ornn^K+o!z<7EG%#Si2t$#y~~k$B87_*Mb>YY>0wNg@FmX`9YQ z<^s_hmLs$Uv+Bo4#iG&bR&x`WSilU*L|J~IY}tQb(I+fSh+v{%?+D9lVDRHMme8Kp z@eyVzLg2znWHTP+nngnIPPN>O78Hr1NZ#mc%nAKfF!KA%8JeT1I}ptP*~*R$)Z|uj zs6a5Yf1DDE8bD;Mn@E4nBN;=d>@m{s4J{`q--O0c``y(vCF0E}G{T7>znbPeeSCUt zkH-(ib~0;YZjdTNK5tnX2kjZ_Myd+Kfwgl=q-j@2glQ9JnKz5F%0RA>{vRH`7&_0e zlIL_-)IRrkAe<|YrCRAgrc8Wo*78`!f6@kJFxKyQlF;3Caom5BE*`p<=nh*(qqu-P za>LWITPTI5NrWH(*33&rySn~4kPEB+WP9V$B8u3&&7O#Wgoo{U98TfK${NXyV;S8c zGBie~c|j^aK>0@iTEZcX3X&6Ea3BbwoWVQlb>`!A@wLl;@Eg|vR%+-nj+GYm~{T$9j==Y2afrMKZB9nQS8i+j}iQLcYsJ}mY;ws^AYt{DEEu=3Cg z22w&2>MH#vprOCYS9U&iM!Y2yX>b_Jb}Z07{gM|aXnzBsaw?K9tlO**X`OAhQsisO z6FxGIFu?>tc!REW#qiML+1f-f&xT(LiJhlztqIZ*2gfYWAG6xLKLRKBHo{Bt<)ZMS%{IZPd6Q!+E%x-5g)F&(0MmK zg;U%UxEVcW0RD!cBdKoEz^oxQLIk1s5Ly)U;W>XV!a7hGW>|9_QS%_{g+L+0<7+!m z^1LVzDZyBLhfGdOoYIWF4;ROi?E$>Ww468xYuS9C_eGKty{p<~HXkpI07gK$zilld zB8ncfx}Y|VRWk$jm&KCy)B-0cTmveyoH%sOBGP-!k#++r0KK_@+%gRLyyh)yqVn$H z9&}U=g7=|jYVBK29MFu(0*NBv>*84-bOHQfu9ZRJ7OPD%N%(;R?27zpm62({U1|&=wG0z5|1IV3NEikByDPrA|9!2@wYVJ@cJSWbFnMnc(B$8akZ;F~+R8&-u zU*7>%t-bd1zP)4ZsM4a!%$&C|#+>MxixrX&Fq3rA981Mzjn*0G+F3XHQa{yx)QmU! zHMIg|ziDt&#!YGEnU)7@(~*ma3)>G@=Kf9B8@gA7`Z>9Obb)V?e+?{Eslr{=EXrM6 z9Cq>Zwi#vfqc|;2$1 z?0(XB=e_Y%DQC*;-MXUM;jLs%QWG^0xH zPMhli6|3z=O2@IQZbLSbB_)wgg|&6@ggvFnrI|RL>#}Gh$+&&m+v9Wpde-_%ns$rD z%vl~zhb{CU0B>A__IezpblzOAR(tX$X*roJHyd?-LuY9>p89I@3MQ_rGNrYZ-Cn7e zpUvB8Puy7*1PmBu|Z*{NtmG4b6Mm0v}vi_a_9 z^&CaTHMWan_AJzq8y?l$(Ot?@@O>-a=q8fRWq2!@Ijzs;CzzS`==`)Um;3H=dri;o z@fz!YGh2V01mMA zZ0bxD*L6cs8@Gc0?mu^ykKdfCVm<30;^80s#>=Ga;!q0-_oCJ8wCWDYVRksh+vc!3 zO^OZBG?Zp-A&p;Zy$v3G`*@MdVtt7A&3L_kd|a>RvD!4`1Pb zEYvN3w@R8xqN$}0TR)M?)9J98zR%~yO)T<#9`I>WWJkpbC-&lWGiu@G6BS=wb>W{r zQ_r8+#_ed2SogFqdykggezhuGa+xsWXdJW6%4DY`Va_{rUmon5=eoVxtL2fFm&bNJ ze%>|hX)L$$ditK|wSV>X#YGC_{5_w4Lb6k${f|Y{6FR&1JgYuVrwyynQ?$ z*)x1U`E@<@-b;H%m&MCeq}XfP+3p&?YfYJK9toNo2iZ+)+TuArctL_6+Ul}YAb((F z<^`k6&7--l)53Tci(tLdZci_=588VqHo7D&f>h3>?qD7CqL}bmNUHrlP{_4^DdwH1 z5A&wpPA=hSlb*GOf$xv7JKAZx{minr`oh8fwf1*haptZQS*Ha_tMqk>_H2BOq{seH zhstWd3h`p@F2;&|Kc4Dx9~k&Oy0^;`llL9H;=^(=GDlhCI=bRpvz=`CX&PVa(2??Y znCjz;uI!spRNQx(ZocOCtj)83Q8wp(F>U;}#AapnxEJU3tsH5S%CV<9yleMJ*;pOR z#>q0e9RL``E}5b-(bePlrP#dnjoE5tjHicA0Zj`57dqh2RixU{xfO|SD$U+E#5P7^ zSK7YHniWA+D^iwKEmMgK4F{z1hAo>gnLXOGpcXhz8X}SE8BeVa9%I{o%kzt!c1kW9 z<^~4i&cv9SJ2dh0h5-fjWe}eZ>)SI$eVbvcf!fTDlk;>IP3?#CoQ@;)pISCO+K1O` zx!lF8c>f-wb1~Dl*>2kHUytR4`lc8)>so8xcK*P-2ORh9arg1!W*aNgbb5;Hz}A;b zTY9{Ab>1nJtK3}$Ka^L0Tb<41eott;GheZVVrVn!+awb{M>vovIO*BZN)?^$_I5P4 zaWaqn1@}kZ^cB^|a&+{n6dE1LMdG(CC#5IR$uXdpIYUa^GBZ)hvsmvw&T3C?hdLgu z5i(@mPYe0o09xYhxWGnweXj1y$^N~4twzg<`*x$%ej~YKG+wEH^-Z~NlUquXMWe-| z`JtEuC!>u=>%(g6H9@sjn5&S7a%tbo?1GgTl9Up7=X`2Gh(5(bU$-;7oyBQ-Ta470 zr1B!U&@^yuRX&Z~kdO8WI@O|C*GU(w>tw!~yp3>O+k3oJ4$n>Jj8CW4eksqR>wI$2 z-j`edwl25(b9-}tw)@F$w~lvAj}!d@@}-cji*~C++r8QcCT!+=8!4Xq-So{C`-v;q+SiKP zJ((Jh@N?XJR;1mb*|Cm_y}pxn%WNuc8@}A`9OhJi=ls(tY+b~v>q)i6x_#_~qq(x) zP0o@*R(BY$-ne;sr}fI!b9cKlZizBeOvODE^NTcIZ(ohNzf8SdHC57NzDXql{&DMQ z;WW5EYW1pbmbP*A?}5zq86j)CIA;rvo?`Y{i1@v!yo(M#^5ZV6^+h&yHSeu!*L=Lc z=*!N3xX+&NDjOG79Ufizky9vkVS;sodj|x%gp-v7&C$m99P>iT17ouIDCuYsTjPUO zsv4q;m^^M~z7y9IKfD;yv&-``Ke5@owBA`7ZgaWWC6%~EowH4Eb&*lC2Jm6S=l=_0alC`_D{fewV`*ec>{?P9`S47&5FYs zw(E5_FMR%ZJkl8pa-)AgG(_XGKMrSq{`InqO@G2NoY`{uD&lpSWH)VkU)%`2+Q(}( z3M3J_-XgZ?+|;{j(~OkqKD!CNJ?aa)a5&x8p1F-i`grxYG$Z=RZrQ`B*0VDFjeZA0JyEJ#I|(BXaTP-ZVOE#W4!Y(`Q!S7x7`f`Gl&iOXuUbEiB)^m_fjQM{qi6 z;uPW7eSX~H-L3PK5&58>Mu8p@)3DjQ4KLz-*>e4OFi&Wo!Q%0NqgGhF|E#GaDW>s3 zvGtT*n|dq-a!lmFy=}+lwW+s_IDOy&is#k(n!sk>0+ zQ6!M_viv=}-Gi}?$5QQ-E3#UD*x_9F+qr9~i=&_1^vTJ&iZ(b)^5aO=N&Z8>T9ZjR}eEOCj;&$mM9?PoC$$D`OuIgReQJF{D_DGyGe}{{I?Q~(LE^dA8 zX()+<_Yg|+i+x*7MY)|C=J7gxgQlMDyLx|mo$v&RGIO%~RV-6YGS`;K&X)_B!Kb~k z)p62ddVPJyVW}JYW9nXxhXvuUs-A0q^Yvvp`tVg(#%k>Z1(ezTy1sqr_DFB9_?QRyx+-VexfP`C zHa^-s4p=hDyXhFphsUJ1A0F13XDv~9TTLJ3a&@Tl?a^7!4*Rk^jt*gPK4!<4WRJv# z#p!*l?W7wPlWW?d_iomiGss4&JM;#9vDNxV4L%PMYvBPpsf{+>p>oN0Ku zV~35-q@A#@@i{BD%{iw^cI(#jNuZx-pi1>rC^E8QYU$VY>-b(={3f&yx2<|&Pup>) zcT0~PvM`g<-QCt5t)s*GS>H=uPt{30F0G|^c=?-XSMf#scRVkitucYMmxO$sZB#5s5rzcoT1a)vy@gd52q<@q zlDcuTbA$5zxBE)XGnS4O3o@f0T8^w8l zW@&mrugT&i%1`-Duf%+J_mB5dubWFxk0ZHJit;0^k8)b)|ORT>7Goi=#w(c1BM6Rf(A-nL#<$IM zyR9F0|DlY=>lvSob}Lm+cK4Zi-bLMM@iN#_c`sJgDUllF&5B*}R*%gFaA+~fPzBGP zWH!m}n>M~|9b~Lf%H^*0xq7jRbD=Ga1Y#S!pxxrpWd+kP?R$+Ae75i{q@xU%cl#4M-(5`?m&TYWWd- zIWES3YRJ14tTUW2Q|j(ws_u1{H*7qbf%1HhPt(n=yB5-TF&=9&t;pHx+}vp?pPsOzcjXcQsi&a z+zM6WPBv2Ui6L&i%L+RW0-Ma1mKQ6yE5><$_1FjRw9u#HakRbfkBMXzYi;>t<9g{; zY@*#W&peMra4bh_b(MSVBpi)&97-@_;ljM`O&i(4Lx`*6E-oqlq}za2ifl^RY3(+7 zo!{2&qT}=a9uoa&z41#ijhVq-SEOChc|K91L~<78ayfmRskN%Q<;_;6%crKiCNLg< zA0>pYkNbl))U0mzadx`jX;$jf^DBIu+^23nCcJaCYM@yU1; z9VbQg5S#JZRrTc(uTs9-Ue3pP)H>s`B^`aZ+M#IQ2hYvUk1nIH4W52@xxY2t^r_{$ zVme!Ij?Q8F3N@|r={~nCqGmFeNh!)a_Iw}5&Ox=QNuPCEi*qq+EX(p~QdZS1 zX+PFIFZ0Xoez)TFF}qbadcMS${9J63#lrIK{q<4ZlGO-hS()pauIn*N{av!9x7+rX zor`N3$2z}#CPn{FhoAfz*NH!Wd&Er9o$LY5d)u9TDU}0ej8c*vuDW-gbh*k`^DRd2 zQM}_#vTsDPQsiuMQq>Jh@tQg8RwZ&X>8>IO)WF*+RWs+vOSzMJ9W3~v)0XsvS19d0%I%fF2-@>n73D~?1IOCguQ0Y+&=7b=h>XX z*-G(DE1zz|`uX0mMN+xCl$_~$)xVFeZdsk3AJ1+pOOO3leIHi^U#P8cE_>s>TopcM zVJmjElwX+5F4?hpqGWy0_4c`Ar&f=qi|I3OpVH*I-pdE)aK4mdbGzG5-8h!l*QXH{ zdRy`1%W>l!Tk7S1{^`5MA(!2ZE-92p|p*RlD}3F zRT|{;@)47Nu^w?V*tIi^S<(pq$`V!Ij`rb1EYDrNp~}1+=ay4#00_WoJO)ty|Nr&> zoT|rV_xzv#qp41(|M9>6_v>-~pNHLZ_y3-%>vQ~kp8mi6u0Ag0|2_N`zo)u_(Vb_qLGxPj_W<~Q>!0k};&A1E0-nh>Z2;a@|0LRNujsmu>1(`c z0IZql|F)_hvUK*_H&CeI$#!~_ZS^qVa{#bn)?evdvT7R> z3&Bb1Uw`PoVXxY6JlXbP)W6CtT0HypyZs`(ORBH>k6V*BP6)uXve#_&4fS#{ummv(j<82Jrc=MnfKIV@X1J+rl|MlflmIV%?ZdH4#3$&)$e!#e*y9L6Idp5etBhbK;io_u7zD5#J(7@C3L}JQoXZul@(Q6WWaMm$g!8*4hz{|#|322l@Ep>AygeqO zv4C?7yb1}I90+d)2#Rn^x!TleKCM+1ku@3j_e5-E(Qt<}QZNFlqLv4^Ar6F39RyV| z;(q@)rvec~83v+*n9>{w2hm`aYqlq7|JCxEA^*Tw5oj#z7Z4d@KsmsDwu#y}koXBY z`XC+c&s0YrYS1i+wKmyk1^YOEL3lOuw6|q%n~rs1rv>c$^C527)y#XpOlD4(T+B~$ zrEM5%*^iAx8qqE+%$!{YyJt+-JigJgbmIivcW`pm0`}?co@L6_K&b(OXqbl)Z6ykx zmNMU@Yo903$(rSvl?MDm5#1PG6=P09*=gV1tF|csS%@=)c2+W-2s7WFX zr7`RJ_zEQm`fGpeG5q}@k6h@78Yl}xtxY;sq*Ev0SQPXVmW>UZ&^W74o(fZ^K$_(r zqQv8yV_CWcBoBnWdN(0ek&N>;xoa6;s~OoEt=K~lHeMOtOs(2~8xn7vmlRuB5~@P> zbWch2#>~_lH0Wa`r`OW29j0!}q)Qq@m0UO{0pSusVzbhCXc$6ha7$X<%g)%48cSiH@NP`V`ebGaiSk*VhFiLENBQ@Auoud>g8bIr#l= zPA$^UPz&1R*Kdt~;jD+8lon3FhVx`?SfED6s7Zg$t?Av4vlauS5M`G?IAC~u5zjtg^d{79BQ2I^f(9XHG{2h@Tr2up;xHSLQ$dN3OV z`ETmu;IqGf^>O9|UqAMLYM&;RE)t=oiW|}dQU7awYnCdMYgLGM1j!ol@LR(;x{jL^ ziN<~bs{7Xupy^+q_5Lu#%<%dm)in<`n&|DyF5i6U)ns!!sx7E0Bv{2t8r)M|1;*9M zd_j1KqQu>}4b^eOnWL`x^u46FrqQ8TCgPJytN{dn>T--4Z38bECaS+t~xtI_TwCeK}A^o`s0P?XKl7DM?_=BOo{>?t$ z`oKDW+UNDjaLup%+Hm@JuA<-C{f(16{pL79GR-H^zTp(3`7dHq)4dSwDh^e8_ zfW^A(d4&H~M!+fD8xR1yP6disUeCVx@K2rno@vq7iQYeCJmmL2cFO8)=GB&>WWVNr zjfOgrDoPNlvMo1X+>Az!dfBIL9A`9WVK>ujE1;T2Tq_tC4<|?%}3Zn9&3MM{mXBr|2n&&9;RRH z(fdUab{Zefu8*M~elXyYU#t>q?S5l@Sfe$<)h-@*MWPs=dag_T`)eMc5R!=nS6x9< zIL!m#W}ary5OFhI#B`%!8U?%H2%hF zeQhy+&uhRWN#_*FPS>4)PfB;KT}*^ZI#u3)Ta+P9Y~}$bDWtC6Fg$LZnW$`gOXgG4{sO_8?6ZmhI+1b zrR=nX_V36`S~~C-l~fZOr-`P9rRCQHFC27)OvWX^2`~q2hZ%)0FQopkj~-7=Xb{ev zLcy%BbN%`BXBDuBwqd9FsfR?mfsA?CHuTkvI+`!0PiPW?K+<)8&NXPvZ5&;9XL5c) z!$zB6xt;R9{!+CEGBOaUC`<8g=)w>%(yy?0cr#~WSxi#2|*wTzvB z_q^u%8_z#JJaLM2=43zi`j6a8voPv?{GWO^)X1+bE0$RWQ<0y}9V9c{Py|9SE!Vgu zQd7;+9uE@`Vc<=FJU7TdN6pzewCtd_Df(h%WwcFjluFKpi1IP_tIWv=LP$BPes#8R zXq&CfG&MC5)aw2G451+^%IXveu4*2zO-5Ac$~pAs1F`t-K{#Z!NH%*4zSRJL+# zNuj5TM1_WhdRrDJwGsvVP)8tO=t*$3~bA1F5ZrOa;`A&MCTbL(DXjTOvF%^88B`9u0qfa+H52QT#30xY-u>a0S^CA?Jx^2V$?Il+r-i5wD_NBai$%M3tbrmd_o?>!%}#W#?SdJu)?UXz1r=O4_p(1=38% zOz4Gwaz#9yMtIU$UB)yZJK`mKB9iO)N^3h0cMvtPtu_l+40XQehgcQX8gK)=uk%6E zQk3*{e)i*^y^SaIFWvB8T>KA5A2g6(oE!9e*SbITYse+vzp?x1A5zndS$fYyyl|)c z2V_mRxg$vrOqHuDuON^PshPvk!o<}(@KI5Ja%d96Y^AG~fsVV{)EzEL20h69H2h_Fu?>R$82+*%ob zADM#x`zy)Zi-|TFE;vYZCdCC6bM~H_p0>04%I}k3>@#8&u31(u> zk;k<@?llz=6S>RX_*whlbEH9NuA%WR^G3zIaWMDO3uh5-8WYhE!_`YgyzI{vK$ldP z>*fI7p+8sZAU@#Q;3ubL)cdJx{x{~SPc~Y{ji^mS#I-BLDIwW@#ZVeU^ZPh|K>@FV zf%7zJNSe36UfAPV730M2{VQ=t4`?OqH<0$Yop9)J`{tKNI$d%$^R)chF6lG@zwxD^ zjpC4uW2+8PIJ<=7hoYjEt}(X^1f>b%#bW$uBf=%F0u45Iy71;z51<{wjnPMNQ{{~b z^Ur>$bA$kJ;|PxiSOep}&bexTlAvbL=ktH#)gK+=n*;vj0kin6fjw^fgB7A5tfvlp z_=5v>rHfOn#y2xB=!qBNq&L0ZLNMYy(17>#Un7N_fRG&ZLvqch2r?!6_8*k9nJY;7 zq=N843lVk*74_516zpO^t@hfmipfhmYee_uL_=+mWe9r@}qw zO>M+ma@+E?o|QabPz(4-WQ{0eBekKoOG9)OMdyf%H0*93WQbx|gJ{?heBQ3}iJBB- z!@hj7zl%q8!CC;8!5QideXGq7ciFJ_fs@gRgT6K6dp0!cdl#C2&xSht(i zkHgP`bY^~%0%akxVA8*Frs=4Gj5yPgmmV-|WJl#Tsiu1(q^AuP)80y~2~yjUcX3pT zx19BS3Bjcyh*OMtusQ?5gXSm5&M3spX$W>-5<1P6E8w64?&T7@>EB-3;BW(#0e+-! z+YfV{6Lsjj1wT1|`B%UC;){Q{U%%&;XnhaB%vqrYtdFOSgVQT*N!Oo)IWjIdiX>G~ zq$LSb3#D;I{Wv$lcoa9N!exmj^E5m&GeOVIEO5S9sQi0)!>UDH-~imhIF z4c!EAL=C$`b0Bf`x=nh{nXH5A!zqDq-19QXA<(d>Bk1F?R!6IM1$`& z)EjzLJ$C=<@I6OqLlmk~4TU^{EKG@?3UBJ1Xr_3$kdf8cD{*OSxJcLl(ra^;8}c*? z6C{9M)|oq+B6hPxsljQfkWW$uL5iWVM++c*tktgIs?tz`fUZN0m}uSWWpM3lQ`J}% z;Hu|;8`i)#KIBjQ|9j1UIO;ccinNcX!twiku&CE^?w;RXQONdsgEyQUAdo38*qiDa z7(so9P9DLhS{g~JLMLNel?jX)Mo`x4PO-6@VzU$B_$k6QPVuUfe|S-%2Hcv1M*DR- z!25NEzq;-ZMjrB5{1Y!5r%OIs&nwz+&^?BKzW29Ae0hXO!f#yBZ@l<}zx6$qK?mu3 zFTb^=-#1i3KFwFxG)u9g&Sk%pR8@Gr`f4K55F$82oan&$3qhjdK&j2o6ni;67`~}z zz8E8+bG7Z8uOzj+P{k9#h%*gK)U}Ju3U5`YO|x)|(>?f!i#h!-@dYiPZ%`We*ty<+ z^tAAiyoWCQxV^T@$!Ht#1R9Es9)ke^v#-6A20uhUE70qg0}lG94)iwstKX#gWYXLI z%SU>Bg#4=}5dW*&mB`;+5$kQu$o#X@psm4oYW1(@HS?a5rme@%N!@EEP|#8Tb-8Ya zuj5o6z5*{sBUK*OtM@f*TlMP=hxPJ*-}S;DpZwY>?bkJ|rSRNQgSeAksiRr=q`~Bq z4)uh7hPv{et=&OSE_%Je?emQ_;AF)abmEp9e{yFL{raiyzpYL;KZU}mUf<^T6M+^Hneu4)bq1V2+&VTK+ zhbFk1RjBF>xml)$TT912__~lWn%<;cQz7Kl9i#kZH!hdb5h1*8;dD7o+9?=xTK zP>q~FeyKGU##@a)DEg`O7TNQr##}O?;p%Y^M?QNzdpy~kUqDN{Jt(R`tw$Y{9^t&@9%ke zv>W;`vDOZlq1Pp{AKxqf_$6n*z40sLEGHi6G)@-kOJ3u+t$|k>M$c`3s3MZQq4=~h}Zb9v((o0ax|X#PFGYKbjwv&$bJ4qT0`6(m#IQl)rlX-!cE`^B+y%h6n<_a<+@=isj>( z0NvZ%xT(4shEvLr;NIaw_Ij?(2D-I9(?etu zGNo2?#iF`CaQ{hwzqrQs2D}P=lQW-CPqCoaHUBe?zqc5OH`+hAPW_|t^}j1vfSVMXK~;JF&}-{+ zsY`SD@@rGmoP4x`ux!CS@Z@IC>sS-Y+2`+yk-D`y*ipZK>`+4t-0|Eks101U)(vNA zBMAeS42i_Hu;i;5=_0??^fDzX=W36YiTP1AboThT7 zopn!=c;yr)pJ_-)Tu1U)6~w`19y9*~-S%u8MN|23;y;E2fKVZ1MBl=?_&?h!mp17-=1Nft+$t)68#X@-K3FjckbMWMuJh;MuS0O5hdP0WTzH>_Bw33YEX=EuJWO(E8VwspFeA`4W0u5C?sbPPgEN-*@S#t6}Y>mtq~4Y&dPR)z&t%vRB^ z;6NmQYOH>qW9*xq*QD4HJueF}2uev764ZDy>k`S&Doh*V@1!y_)u)a)c_UA3zqs)J z(R*!B@U=flT02voxS=_C=7|eAwD79dIC-LgU=VB@$CBFX6Ck)3kRE44KN5^r!YTlV z-}#hbK8DR4pY*d6zr5v_=lqQ+Jo>9ywP9g@c_FUaaD=lkWEqidv*bF%V;$kGtqPB2 zJnFg4{lsDgOo$R+J_-q12wO$I@+ygPshBXfTREPL4EX#=*kXNH~g7!T&XxON8fNl+c z=gvCG=#+Y>Y15Tt@!;a(gX{g^nC*C6s(z1SzURn(=2`w=7Z`NCwD`_qv47+5!9SCk z^I;P>L$seVhV`|!M>0hL(=nRYSnRcW-D2>kfsE_)PGtsh_E?-Dt~ zc`gsrtWFQAu^;dOD#j!3v%WrsF!Ygs2YdkDzxfX|GS+zVM?Vhpz)NODN4~sy5V!XMFD>MZ*xrT2oRG$ul+JiA2AT%IxO> zFb_{IdUhMAeQ{%3)t8>vP9AAulH%6FWm`J7KRv;Aeun!mzFu-~K6gP>8VK%xK&#Ec zcSo~L=|LmbUJ^*$H;d%wfJW==mJ{Wp*7>r}uZI7`9B zECW?_(HA#r0Fl;Jn05bXG)}K;dK~w2AoQnC0xFZ} zpIN+!Bhk;&^!F}QXk38`LSd?hcz|g_aC$OYg2u1qXeqc2LR%r?to3W-0M}o;TY{07 za^S|Q6$Ifnb#?Vw0vC9H{3e*_I-qFys*o<0e67KNkmS?{8@Ew>I^D z_)jFPsM@)ex=?3ISxEn$SK>eOO3n8i%rCBrNk%u?H&04H0QGa<7}@mB2_ixY@o7GW zT3!>6yvA8K+VavNEl03Ozx@29=wQqJJe1M@{k~uPqrVP3f_RsI^DP#*4rYq3O6tDP zH-?OHfB7Q;b>Jf*`s$nC8vOSh>*-GL^+OSK&DiZ46?}@<*9IL0ZHIi>kzpO+j#Vg@ z z5GRula#2VTCu*EkO#mJFkndt7-Kk1HI*ckb1H?)z|lc>Hs}O3$rY@|_?0 zN8=dk6Y*fciVpwgIMjy!;LCuEcv2%&Za3RaDcjx6izFUtnQ{UVwkp?8nj|k77rGUa z+U2PZcbOn}Sy-D%#V<6aU7hMcklo`FkBr9ma%MHB4t3$nLQfig?>4%Q#}%3hL;KMw zkJ?uk$Oo=}H)wc6UH`j(_V*lE$A0d)>0_II5@bsBZ5A{zpj;X}q#6oU)bJn)vz;5m zh|hK(GlYxELk`J2h)5f#%>k)>P&OsT1z%c0JKlCrZHq~1`miN}4*q7o#|yX+{1os2 zRqFk1ZhN5i{szI&)AGpBcVdx-*}B&XoFM#rR(|?_+mrcXSXMaRFIVb&RxVN1PTRy1K;Gb)}%a<8dINy%JB0*X}wQ<)EbEpO!p{rbX%nOAvo!IdDn!V z(JDPAk2R5=4;>k4xuZv-MrO1+4fc6JYakDET(``t3X zBThemI{uGcer9q%EYi?lJf)ATe>nXYyFvJ8_D2hxnLDZOSaY^m_}hy**N~{s>!#6g zMo!>?wmVlI64VUJS_eN7xCwdBS&WZ)i(3c|p*hk$ zzH{K4KZjf3QjXsPZ{KyK`ET{oiPQaSj=%4JyMA-X=eLI${?R8Lm@M~PHoeg4=f(KG zl(7!ULPtrp&czx&rG&Wl*=xyAgSjd%*G;u>n*_lJwlq^Nrl;&lK(8n$r|x^%&pi@A z%dykY7o;ip;(MP*D*Be+?Vdzu@Bh&DZr!OWUAOQL@U%%S`HD<2$y!+X2`@W0SR-(=bMIFZIqxU}M=_lc!kaN~!VkVV}F-@(z!yoouyB zY4IN2wO$D3D{8~3N6dn^4JTb)Ukayxs}Cv{OiH0@?81^?q|N7Kw=SuO3$LCJtXl0s z6}2#q4`#ZgP2)i$h?8obRr~1J=r8%iWhaczgCuWH3!M)rw6D*~@?yoA6W1Z^w6!65Y0J>VX~gT zrCvK(`!O)85BHeckhs(4Ro)zEa8$dH4k(@d@f6sps1H_9+HYPtnY-YH{BX*sQsSIP zu5nBge%)IXi`t}Wioi|TVI z;Ucg5dHav~LeBH$6W4+0;=lLrN zIbeNo`}nZhrNP&-%GbeA(9pEl+cgRoZ>6tZ2!tI^4_q6;lt-uo^p- zd~@S`MvYypOEaaWA=sV1OqyM`m}E4`|7=F}FjfbWB{JiG^G{!$>{wPZp4Aadv2H-d zL&Q08{5_PUV;q&_*?GU}*1ectbj%k$$bXz~qUVJah&A-*G40nn=#phle5ZJHh*|CZ zV@@q6&(g|2DdcaPN@eiea6)V0xqp1cp(5{jH`b4vRPgz|?;H>AL={#{Tb3I<2cGdh zO1uwO!)K>|M~^29j`wj)ou61YA7AVB+v4;WruZ6ei_u@WEZHCHNwD`oxn9ZIiHeUC zPn6pJnMc_qopZrX^?tT(K7FO$owyiG?{@pOYA9pelgak+tJ{wrj_H#f$P=mNJXe=n{qCMtCub3C{9Ivxwrq>tr>+P)9 zyXHW}_*##FYfWI(?OT1#>{^Gy+zIPYJ=T4rp=$HR{a$^s`HH!`2qDg!ms5zkB9H2@ z!yEyBb!t$sh>mul$#!IsKEX#BdhuaFb<<(aM5$CL#fi2j!Pee$$UO$ucBa$7ujGI$ z!~jFJC=qLsoRyO*iklQ-NgdJn!czq^7KPR`T!#!l8zep7L2QP9U}@7AgLU1flcTTr zMAqZP`SK+PkW#2 zV=~y@ryeNv9GSG1RPM+z9Z?fy{B7!mZLwT*`|ENz(Ea{8>v%-XIqt()==uy!Jtw8p zDIu@;vYzpbKXVql+_56Jxnnh`mF3BqG-Q9A(&c89|` z-@`|!6}jY3?26N0a6r@t_5YnYn_d0zH|GTJ5$9cpA(C2+;@D^>E+wC~>cou}fmR+r zh3V9&B-T32#}Lsuj6xH=LacLmHW&wgG*qUa#WC&Xrnd|~OQoCc?08+B;3cIUw%g%a z!wDQy@(m=x>lXUvx_{y;yzVJUq$4ZY>1_fnUzzT5^Xhyrf8)P*9TxuA>k{im+pyPr zE6=bjbsS}9)Nr9lxph>hwV(u$S6gramIEcXTvD7I%qbEbQ2boVUFMJjQ?&_ysEPSe zQ3i5I%r4JMDV?|`OU)O~3GTZw>Eg3L<5{Qw2|Jl9HS(bA^nPYkkItyLiEeXMF4+pVX2$qT#wO zQsAjW4|Cq>%Vp_TZ}Fm!7eD;Uymj+pJCZOGdpPGex#0_!A!h~ecQc%V$MJq6)2-hS z?^m?QSI+tAFcW5>KT(nsnh0T4|K}sI}7La%9%to*2|}VSg#`nF3W^V zVdXqIhIq_W9yd><3fE4Ove?N0Q9!Q0Ge?IOqt;H(&fKc6)S8V|_w)UiVXC!arR>bD zLhUGx=LiPj-D;u0yB4bcI z=2f#L-xDCQy#vi%UWObho%`zDW$on2iRF}lB9zK^&|FHb2~C~;>3++_WBja zP5oO__{vf4Gpr9>#GSu^gY}951YbzzB_H8O z$J4yxf5p;;a3M(A*J6L_?>&wYtCW<63RT?b%P*LGes`T;xQ3FV=QEV>Q{%5~=Hz_n ztESUAFrI#Kl{-AS7pP(HUZd$y#})@uBzf6A8^@!vV!#Qb9?}``f7y(XXBF zX(rk!>x5~=LFvjz9~~-Dm->$5NkC0`SwFdE;L@tjeaJa0@%`M;qxY&I-}+_$yyVt$ zK!M{XDFvkyZaUqch7j>>u>#uF730ge{qUHO_nj}r&z^z%qub9#9G~YW2KHRidMT?g zg!op8MQp!#KCpgwGfyP=)u_mAxyMrOoRxvFPZ?0*E z+~g}tRKS!zlB#IIgu!0h$g6PSm{X#a*s0R(Je`heFmIcw(-uzwV!Lt9!>w5r8J@$O zQkB*OvM5#hf1T*b%xez;fz)9vjwtYef)$V|1JE_jBMJ^sVNh38prmlo;cdqD&c0t) zEiA_GqAYsOYNrXy_rv2v%gUZmu=R8DNP}upyGM&wsrm|%@Iip2^GbdNm8=7hpS@oN zz)blAScbU7kym)Z2TF-?bqPObTEchu-5H=(gun8Rf16S)gevIp>wECf7Nk%zB4#-l zc3u>jDS-Q;izy^DAahvR^jmj#iglE=(9Lf?;P2dh+12fO9Hx(xx5eFCyIZc}gBu>x zz-4vig^+uYD=77Xsn3KOie1^6MS zADRTn)2Bfe&9SEPTZcc6N;&TMv@zy7De4D$y=u=Xx@g?ab(SuV-HIQsZ>P5qDyO|~ zLpVYYwoaQABL}JbnfPjCuZ)oq3t#ui3gFdTv=hNJZAenfIw<*)DQhs>@+3&q<09cB zf9J>(&~gBYA;aQ6erI3;dNi1O#iqkD*RWi#XIjyT2|GPSW*HB@|JW&Hm&} zq!H2XCskJE-PI}bVBz~u11cQ+S+4G2Zj&d)U2TV5XEN|NLmPqxlqq_mI)`Ji?BdF+ z|4#9vbLjH$p2K#-tGw|DDf)d8&(KM_e+C^N*KRyQ43X#wU%RojR$<6uN#QlN)~=#Z z%7{TUTD$R)D#={r8mZN>7{_BkPdi;OqCBe@M_5xpe2-DcEd0AfX(m8BM2k@|j$5Lz zJ|E(G0bDPCG#s!FnxGEcT9GNW55k}hyyL}GHyFjGPI&5KY1DtV#XmTB9-bGSe>XrE zXF6By^D*D^liGU*ZaglYK0?i@WQUDp@b&(>*0DiPLLRZU`3g;I3myBMlGoRrC%KZ$ zDGwx}pOm}G2xTQM_{FQ1YxG7K9&+LwKly$xr<8g#h>KR$tHo0mtE@Ey;&ADgHlIVO z_HC_oW@nBySrR4ReeGLZwV9W-e@V87-Qz|PKKoNH7R`28HWZAtcu6AJ=T+n3z(@-| z4#)#_4o8@FI)z`n;eYiwaeE%Vxcnw*4MMSKnjO$zy?gp)tE=@O5Hx5xM$&UIfg90%Je~&|xb$Lh% zgITTPkZHn4md7h=E89X4_2z6x?M@PdMJnyYMM{`jM)m#DjnjaY9hc30N+zGN-VE&7 zpBi&mHZvV-S9!u3Nh|(f6_d?f7UyD{?NY5FKPzQHK%0zxX$;rPtbTG{hTJT#=3Qe+UscP zH3_KGva(;Qqhz7={wWj}A~kys>C8*G_Owi&%y#Ou+ku}Pw&?vtz@~JS=b%NEpExDS z{pYH~|3|ZVnubK>jK;8;vz+_pz?a5M!-}6sB^$OqFwy24@3@|Af7F3btGXP1aqY$V zQbH}gvPO)+pPBr;jt3e}hGq%^AR2LL|MSS7xxzYHh&+s+Ifi4S)3er`U%actk5@r= zK9dDA9|5a}-qAb^zb8upDOnk@$Fpy^mG?nXKTeN_*O_s@{jKRfH~VV*?kv>(xi>oM z!8Ea&*|zDfQ_cnnfBL}q=vzf?m}>N$(8fQ83j*DB9>u8`_s_cc?ImCe_k4C=_E_t; zM!2|xJ`Tla{2D>-LL%GX7;gOvO1g-Pd7_SCl7{jaen=`lBffD$bH8-7?Gqj4S)Lc( z=@FPwCr`P8oemEUa2lNoN4byLbDN^>{?D}41h&OkjYq55f5@Xm*N<{1W|AzD?f1hY ztI5}*op_L?k5|4zk`tpb{(CwRedfKL{(+}oYBbv@x&QZ?Dq?^CQbz?b8$PPoz=wty zNEc_$tpY9fa9n$hwzaoD zTeel91e9Y=7n~L;4g2IK^ro%5yz@*Oh)qeMP?ev{+LDbK=zohvy zRohSG3WQtmho9OHnRGiHkHT`vXd0Q6@Do&`qJI8Nf9BIF{C595JofW-_y8jdd1-Ix zXQjINYCVL$Q1OV~KUizBWWWcQF&& zhmU8gkd()7|HJYX%i8145a6de0!n=A)*z2$Q{I|oz z$<+(xm*>gpy5}aSLkUOo4;!_Yg0prs-Kkf%wca>{K0Vbl(1j4_rTg5-;b8}?REy-K zzTB$byKPuNZIV-zdIZuQ*Zlv1uc(b=@O+?qfAs0@eD(Bso#q?peV&#bhp55*N)Uxs zOL0o}#(##LITFoL9FdcGLt(dGLhrjqUG!bsO@8^~ag47sAg@{$$_l&h{{!wA3PTMp zVdRBEk8l5YKBL!8aant$Y|Wda-@G{qQ&8zKDDx@R0S(G#&2_Q!3F;2g!8FcA&sQZ! zf7lcx)d2;V-1uvTXYORN2+_--^B`Rv{qm6UQ*DFdDYsgWmJNL=&4Ke+KwRfOO|IMQWKybp5`YgH4QO;CSCtw`59XasyV&-30}OdE;%&m&!O%t>en;Qc`_~k zzsPCPHwLH=J$`WZ1HI()XU_qNaX4ai%OA~MS5xkBH;f-Yn~;^E_T!?G#>@#ce+;&^ zD=T>Ns=x54)}o)NgCR1P#>8N126gVbZSm`Or}Imq0yY!!M+H7W=ldW(<8(&!**~WG z%NNKL%YVN1@*pU$q2w4d-Sc^(e*dn>&@>BJTEYjp^vfm=tAj6)!;M`4kIfUG%O4$e z6Vi9+eKG%4N8RaU`m?UnMf6&kf8ML$FMa$>$pSqGK=DA9laZ6kQQ75~w|afXrp+0d*+JZ1hQbeTDSe?Y?+gBDN^H8t$p%mW*VZ znc%5xL;VGBZS5x|=+*t7!Ej`0*xKWo)M`1Bp1Yhp$)kMQw3at~wFm9o9<)lv^QvO@nTG5T@FrPzUMh7PLj4v7 zm4$UF2D^#Fbc!)bNN0CjIO{h~;ej3}4c4m|GUEY)DZGcsud0AGf9RWn+{+~~vd@oB zN_RE!PNUNE1|$@HU8n5!SHxMe4zp3=-aGX*=nl`VV0_h4%tMEqat|7C0Xg6*4aLm9 z6GU#@yJi5X9L&BS25$=M=$ug#7F0V&+8*#{GQ--$+KoXuuRgx7>U%J$AiQB#Fe&Bp zq?DTpSmwZ}t{-UUe{avxJK}+=hFNF)>*9Ct4#?y9&+GKR@HJr3qW$qRnEhFwHQ9&R z_Rr&}uN%_fvMyEERGSXZ`c3i?<~RCv?Hy$sB^U2{dCScxuO1smIr{Oa2)nt1S}PGN z)Gqee*h!s26AawpgFXBRQIA>|D@oGSnd8|8MtG2F;)29le;Q#KNxfzERNUDvft1v; z5;a5{?`Wd_ISmJo3Tqq6Bb8U{JqbUQWIV?DNFVQl%H3=ix1wawFwha>v_9Yb92J~4lS8RSf11;y9iUOzG*trfVk*8LUW9pYL1lcZ(lmv`7uL6D*8MRz1$D{p(baGB zX$g{;dI@TEn(Jz{>z?OBx>x12FgX#XcDpv`=H|K58X_$D!25W{u=}P0)I(jFnlnx_4>OOc-#oKp=gC?L)?*W}=jq40f0ePOn3k3qMd{JSpN0SMi5>^6 zS%=X)3rq9h4!;V@h(5@Qzq`oyIOUI+$GcTiBL4EEc`!>x$6#&+uxJMV_S)T0OSY9( zc#>1PG=m5}Y^v_r!!RB;*-84ImQY3$3mQ80}OKvGckCmA=K zY8_Br#Go?Uuk<@nj(Z-9wROs|&Pu7a6V&T6o;#kS4cA4`PR8Q?6D~L*{3h~)<_eH__mTh(b8da{_3cVF&@A9w56F?jiQkifC_`C z(g!CgT83CV-rwA&Qj5bg-j7OmE);V3CM9Xjjwl|$U3&oIP67E?F`TGLS;s*-hTc-$ zfA8jFA{>3loOUucBG5U*7ZAA1|?+ zCE8ReJ0;Ma!vM1y^ZY~Cx96gdJEiDZL}%aiDKv2T9N(ya{T$zHuAh5=w1!9Y86sAH zuJ`=`(sMli!w>z5TO6T^w0lZ0{7oO?+_mP%6{tJxA3grOaAK5(?4%^$=6bg^nR1)9z1K0nOso) z*EMY0OP`%$YE0+3{`>nSJmdFo74GBDzzVFpy}8<=oqp-+rGK5H=l!hXQ!IN=fAsX< z#_PX2GcPeA1o-xAL0d&~olc@4 zz+9f~mXyEZ_YbJ#PwyUP^XSxj&}w2=4v{Wvn|D{Igc$BYsY*AoW>6|he^G0r(igux ztYszF&aqh&jUWKl!zVsb^^BQXP(AUWZa zmnQ-!G0at52^!wXRw>wiQqn^8>;_aQEk)rSpTQ(d*^-%JL23{jkGH$FArkCy9thA8X8GgDMOQ?O^0gu;QG^Z5DaIi@|S5mIdiY9tg9z=eI(}M+(%-I zO_S(H(fVYvK<}4xe;mSc`T-9_YV#7{BOOS1T_fI+C&R z*b$cQDBT2FldDf~8Rq^!`&YdEtKTu+7v&@|@`U`&tKU_7-NWb^Z|&wM@8XGPE z6Gf@kd};P6;8FfxQ^?n-?i0Pdr$eq?8RZiz(&8x zDRL&b?sc63me?DK{o|0|k9_cPr#G!6;%k?lV;8PF)=SZ|;Gzlo<;&aWWBu71o__H6 zziNl0n?6k4e}COQl5m_NDIK_Es=g~v+C`13|eUE6b-&T}J!06Fl=S_op# zh$<98IbSR1agv5KNnav0nFvzf`NuHDe3T}|LGt1RWy=_PxvOwWV)S|m8G(tT>Zt@3 zO5dn#5lc0F%|EzKU26E;ttN_S$tmHLcWNBalc%r@f9uzI@#J*%-Q}2+`IFPC5aMxo z_U*FKx9eLI2A)Jzx%16C>qVEM#GVVc@qN#~ls{fAuQ$HGJn76u%qjZxBQ&Ujp zIU!QW5|Xg8XZ}1~ht;~u+O%Ds(zV-KGxVnds|^^jPN*pmRTx0b>p-iBgLRma@CiOF z0!n{5Cs+}s2r+4ZJU`GXbyQjq;P0FmL&@3;f3=x0W$t352x9;@XIE_2g22ICT3%V*E9E${z6IH4CKe^l=h znko+Wa=o|q9{JpGQ%q;H4tN~OnKs1LdA{O#2rV!IspqkrQbE46YzGVE3YJy$e?%t_ z?|9BxI97!uZc^^1Z@jy zEC=+H6Ph$+PCfQd>9>=>))Aj~QFYIT2Y&Cu!A(_2=v9{PAASFFtnTOKjZvI&^!E{ENGTew?c3 zMR#7$=Xl}R=TiOtb@90>E*iR5a(mv))d-TJp9{VB?e(0BlKhWrH1^{{UJetAJ3@!^ zu;nhrnvJe!Zd`9@8LnB$Mb=t1=$!l5>rIE8qTRDH>!3b7WLuqk*4`ztf9(jZM6}Z` zc@2Gu!X}k#r#(zPuwTR2-q5+ARP*QwxH~KLx5t||VHBq*2tYsPtu1(YpSdPYVIdlZ zdCM^`Hjry#WD0T4m@0&4G|nO0t?HGN#r8YNU;WaWvNqf}?WElutqs%YnT9~1q)Z<& zznv8F6n;i^%N>}YCW^w{e{_RukFbg0AU+G7?j$L4p&G%+p#uHMLpAq^QSB7U!K^JZ zkEG9_>M1ekdyjtElI&l>Wl0mvxcLBu+{jP?)($(r;tAboe++Jo=i%LTZM8WMPwMJd z{o{9d^_#oQqstmZE??I7`099n{o1}g*XZA8->pAp(enqceL=4If9v=>r(V5NY9%^d zVq6C*tO@WhP=}Nxg-=zUQI$GGhg_PJIkb?NzmYQch$%Sg6iKNmO(#<@wGg@y$UlLj za)zHZ0gc?%8U?)+^t~YI$OlspV!~OZ8tRlduhKA74elpw4ipkq947^jEti_me~}8~(21gze0O7Bur|eONkpzZawT~`=4Kblo@kHFmlO||& z$V;(1$HHW#-_A;vRtdI_znmb0Uu!z4!;-`nU9)>kEdNa<5IZ;XWAT`@@3hX2G>`NF0wBe_&4+F7~>}8EPyRrpAN1NbwX8QCJm0&$aK* zh;1<-thhUS7+tl`=Xvr9E$_W+Ti(wfb&$yAOI<6&`3^=gqHBl5yN5L9=+P0dYF*8A zLl}_@3fxx?e}9GCgse?F9^jf!D20$?uPG!Cr>>kMpIxX)$dbrovxjQk2v0VA02-sNpEW=$6xmWwz$UFxKH-6KwA4QHEr`XJ@e;yj2M~=QrKj%tWEYF%6T+=Z>6~Fpc-e?@o`j%qPbT{6m zf`(F(&xr{M-QHK8u>%Sz=b3nLRH&#fxLA%9*GkMU;|w^y#^FXPGJ7b8@3I?f29u2F zl=HEte}rzr`MC3)bk2PDZ^A4}u#^Nqx@$e-^5V$H=a~BJfqB;RwAgnyz2Jf9o8Hg8 zBggj~^5Yl#qZiYB0&Uojd!^L766t#xma`ZfTILCXT<~$0@m?d<`K7}y8*!7mBJS6V z)4o0~7XN8oNt}P(bMXvngRDBqV9xL?Xuag6e@q<;(jll%z+R1U_K+gyRN3>?%GV-$ zYdO}qbeLqWUJ|e)Le_RhdR`Cv{$s(GGeG_+vTuH{(Hji^pX-gZ{3h}|sTf9I&cW$w(SD?smORsssd2h9DIJS2eZW}yVo(uQIx-7q(=2^tnvGS`Ld#O7_ z&#PgsDJZpNkb6X{1#*eyJoLHwbk$unf8`Klj?o^iQ?_hZ*86ruw~5+PFjKBVVvN+d z_wi%4NMh_k91;PjrLu=}hj?5gZ!<5as;@s|pL$$3cL4X@x?x2H>c&&%l#`B+@e|ke zHQ{SKfA+)b`97C8y<0ah$sO2O!@a@&AC{s}kYHp;L`Hv{GzOO$4rT5z^*CKwe;Fk@ zPv|x*iIG%jE73e&gR-AE_&yvx7 zI5W0S0J&fxhj!;(_Gt;~vGXy)^X6xNgYZ~bQF2=+IL9XaePL{S$=h#g8DXcV?)JRn zif}M4F|!(8i~sx#7qMbcNM`Kmf6~qK=kncY({WUbrMTKRqe=m<1u|m{y7&eI6Ckd_K(K?qYQi3@n8Eks2n=2@~v0-k6zEY z_EW}KY`f5iN<%sHOR zjE1q;0$sVb3#AP4xemYE+3>qqJt_XosFLV|Mvd+L56@Npeg00d8*`6|W&f))SQQoW zRl9MpxF=$m!m=Q*WmraS&Z&_#nEz^zALQQ;bL@vf6vk?ryqEc+Ry^{!hyOMp>c8C6 z6(=?FK`-ZAC@ImM0UWtte@~aZ_Ykg+2pmV@1m-%U{musvvXe6f8al#LBG$oquzPd zfkJCuexBo;;B&;Vm1BN+4|J5^XJyL7Y%8#iI3&(`iH)mc>WcUoEgZf$k ze(pEdd)Uo%da0S;_F#93Qm-Oj4#dWugSJSQ_nDk+t)`2Mg~HsXr~)?e;pspeWfIZV zvemr9b`XOzp1&hpe}K*Xj5GtKgDE+Mo9q8gy9)NK_A+AGS7A`1#m$2fAqDHIq(?m+G%;uT!a_EIn?cIeCO4C0Z$EiH_b4hcs+)3f8AY=tPLI|iZB%n#LUjb z_aR}xWyI^Tqp^=+p47EP2}p)99`=KP&X0h=a4_LB#XCJT=Fc)Y_vS0r6wY|M?9b2h zv%^?FA4LLt8yv_3`tV|osHSri^f8@gL=}u2<%fcgLAJSH2*a1?EyGRf5I~;|FRDxF z5Ur)NUCT&gf1lTM(f99T{eF0`K>~*MFpn61=fz{E=<5L8TC$lSXZ$)z_vF8NwgU5% zcX&GZv!khq2}5Y5U|P$>3t5l7Saev<2O`edefj?|4~p==822jeb4@OzT8|yACB_ar zj=D6QFR4qqdEC{HhQmk6XG(5S4-5dm>m%mcvtCHme`^z6-RC^y+}pALf4A1QKN$XH zu64O8`2rmaopM@E-YC}SyWaNCgWcv~!K!+M-jLX5X_}ZBEE@8JedWI&?uH+Q^pD&p zDqj#~Ntzym>HP2lCS=fNAD=9xu0g|m?(li>O|fTjn%M$XFR6lO8)Sg zE)#xoCncJq(neYok;Cmb)S|9z9e3%dGNjQ>CHedK`sf3-E%_b{GFWrw#U=ICn^b(m*6n5SjxgTk{o z_epmqb6;fs-0f;NdBkXQ7b;FEDI$t{f^J@=3Yl9eAKtM4hwRU^IF!S$`2gc-*&dLm zGKESpq8{CCl|O^A9AZL7Oddgd*l+7%MpRgXwlOzZ`#8s9BmhX%+a%?;y<*fne-KD5 zUm3*Xzj1$=YusNfSeUL(bm;@?%G*5`&!oS51a`5@Wj>)F4PRd8TqVyqoh_od>hLhk z$GZ8Y&Y1a;-$h0o;3%=*i;Cbt#^$i#aViN|T zfmAxf7JA2)GXjN{1d?UFlie^DH7ctAgW z_Ac>d-wz-@&+!{>N;m)a-Z|dlcs`yzl(-T(fso^UKsE#dScAv|v5-%!<8;{djD*;q z)gdsaxhfKJ=h9!F)AozU%$%CegM%9}ak3<>2&h7uL*UW_#<)2(!6WjVsdn1fNJdB= zqyKQ&hF&%MV=AaKULuZ}e5lO*IrjkIltn9F40$S4F=}yAJ!IQh#|uM zU#v>MAk%LiGyyke4k{RoRVtC&M7(JN#UQRJe@%Ez_h7|f)GBkwLJ|Qo_>p?e)M9sUXX&~dG3y)`I#S0U~40faN9pen@2}Jl0Tl?sLP3o zF3KCv5aa^I-JXE)#_Ba2q5l14`-&JH-`L-xl>(kVqLWeye`E)2%8mF885(}pL)U7! zGzrnYKM1;|^t^TW8vZFLhhTuZk3@C6mmj?!a?r`y=jvQ9!W<9d6Qnd~Z-F_gT&3d-yIqkG2p(|qS$EIeb!{2(HA{)MI{{`y9EgtoE z-#q8K)=n3We7~>Uyng)Y3)cChU{7R=EZRFUe->G$ZLCvf%RKa+@0umo*>SCkobk_Z z0bA>l6baePElRvX5j%61@lhBL^W0e~GK&+nV6R!a(AaAo_f=evoI@KmacLpf(8IkD zIdbm&zI5QgaL&?FWoJDmj8(WsTmYq8(fO>DUA~m2WL$J!39%6_j{8_OxaR4t|BsqY ze+txTI12k@!TiRD!Ikap&p)0Q(T#V}#m6Y?JsDnq`4PP@cA)E&n}%Gyi=Gy>FmieE zWw`MpBF%#mg1ppk>T=)cV}#dy&Ld4uh9>xeE@kL@kdSUaIQxX1a)2WAutyCj=j}+O z$2I;eiH6`wAQ-f{-wjLi#Y;J5YP#!ne;30c(e4nwtS&|6$BWO0Oc2k*y)MEpjj?~A zPL6&}AeNDU*TKdFQ&#dg__mATKJ{3P*y!}N$4P2%!5zUGLs+~A?Ei36!|C?Ji)Rhy zHC_E4>-o-^7gAkoJl%N9-W&8%=jmf5o9PM+JZ8bzqb~g)XR?lhs}vLHn}rs4bge<-ihD6WX_%%(()?l2$CN`HB8*a zcloGLVj&-u8}9E%PkAO5Ijn06f3I)#@)c&VJ^XoE@jE{J^RySM#a`q+o<|HBYWErX z8r;8spXNTa>w3M9mYihOEF~vTyDQM%!%Ey6X5^pg-9C=WK7XBmKZfvVSfpTob55q% z(-rYw^!@+MQ@!Bd1$29jo<6dYli^FjDB&?t;{8eSou^Y5IRg6_e4_X~e|8j!nmR+B zbQFgXQ6U*}!zlJ)FFrgcZWhGUoZRK8bFcNYMcrXq6Vv*?FE{{FH9{$kSpEvGcyHKi4wRTdwrd(Yu#8=6AEY zjs(G;*Ug-n$I*gJ=4AVpe*{|+KDW{#tyssP+~r9C-Fj?#lAb6Fe_5w-R33=2*IP$7 z&Qyhz3wue_N@U7ra4cRy$DteBa$Z4_*Y%SsM|7bMODD8MqvExid}X-Trsm^YJ&|>dZeaZuv6jxzp+Je#WxB>fv!re|Eq4s+XRE-QRj% zd&l?JCjxGEXKmfOge2w!^Z*ujt0brzMMmpS+1~rJx{e{S-(gIqQ3<|Ztz)GM5~-0J z>xl|2OVN$>LgUa*O+)8Z_{HP6$@iKYYivbz=%!sLj# zsXkYF8PKZnKxHQ*R~`o;sh?5Qq&xez3W-PvU8&(+gJ9@6{8_ z7bNsSaVbg}yy03!Y!Wc0k(8Da{ZN83Pm3tl%BN+4*!gn!jkuAKRLQRA3@f=e+}!UWfA(F{1y61i}rp38buZ|FE}Y7OYEeeCw4FX(H=T?|XcvgxqU_-!2XBH09xgHnW6+Cy*G z`CSVywYE{y$>Ueg-BQN1B+H{!WS!^cP3J0cU-)obaebnpOL&eE-9p!bdXfhee?c~v| zeedPUXb3AWs03vbFHJuS9IyXE99e=*3*H=K}F;T=t_PHi5TG>noW7G0ju={6qW zWRXV=0}%XYJbO<(^y=yAeUx4L{hfIZ@omp!+wCxbR!s?0)}f4T&9 z^2Nqa?D3Ry4U1wSzAZB-4~awW$pW|&pci19I$Y-w^7p65rh!uDOf|#2Ez(Hg``YCO z-&YU54wtOu-Xm=!#4zRB^!?8VDVr&8fb%R;sQ16{x_;Mbx_IDbT(14l&%AJ=i+_0G z{dhhg_P<_GHLBtgyx^2pvy=4_e;h|NQ#z#T^DWzk$9|65Uuu`S6g@GOc!foQwXnyK z6bmqy-*d7ehxt;b9+a248G84``eEJbPp=sjVeHG+rf7 zslwczyG$w07N0Y>Y)&~FF=>h@$RMU*QAS6z735)*93n>TijV^aw}D$f)9ucdibr38y`l>&P~aYng(ldR}FxsWrd!EjPSB zoBw!#cU;Npre=7G%HpPGcpcx17KFLlj;T5{#cTc(3(CIB}g0}`1d&At@dbp|Jrdbb-m0=JD{A&hJalaVi zb-VZId*%Xs|GsXUfB*Bk@k~%wbP6Bfls~{WEPMPwECq7X7X?`v?jtsed)&`!SmU-< z0`s(X@)m3hTO~kNZT^4Qd$(p)nPqM8uXKD@-xqcVc-FmdI;$wK5YPe<@uovji-Tv> z5S_z1G^%cReCzOreUR4p|2>pL{aXWt+_Ilj<))66oeNy z7yXeiM`jwge+@ETAHlT^f?O4>1mVu9QZ{7lHF+t;pg|50bM2AXroedK%w?(T{NAnt zxSg?|r%9B7SrwVe`hziqE=> zz8+uSr|=Uu?12;uLO%OX760Yf5vfx`6539gN5yJ!nD#v)3mduW(AH-7+&8BEO1qzk zh5l*6JTjr%`F6;$Wg-x~Axz@2OJ)jWQeWBg2je%4?ep^u)(zz;aDjvg&2uB;j zJOiY8=vJ$}jBTb=Kw#Rq$)X--y6d$-xxOJ3e<#a(ayZ8p^<9@qVQAObrUGT;*tj8Y z3hMEvjWaxT8pwM6`@zqMSdu^C$Pac9|Mcr=zFNNLib znQM2}TKR}!DY)5rjLWRt5*5+_aii(;R&byY!6PpB+sJd~{sspDnvcYP&+^yUew zfA_ff=;v|16(@}U3+$V-;0xdVIRpQS|L}#|`kV(gk_mlHM6w-=g14@N=PT)n>^q}D5q zk+2=0I-YlN{pvv;SAS5BFXt>gD|P)u-P)PrvzpC$CTY(ytg{tkzZj*e~n!jePKsn1NeP z$o-D}U0ww@ecM_+V~?a`I}!)Ie?;5GqkovE!yu(xdOl*q;+a*6lWVZ9rC0}_h3LQ^ z7^~>ZE+rTv#Lh$(-$u{pY@^WE`fZ;gshJ1iqG5q7$+vy$N4o*rt&r@swpW=yG2tIu z_-CDNyFb3qf7qwq{0qNqK=rd5h+d@}UXN+%fapeH+l}vIv{gmVzmUPFfBAgdj3&T3 zrXSLqLJUfa$3bx)$z^jMc6{Wa`PgTWPw*|1Y3EBOYx|VOi;ulo;kN^}v(_Iyq@5ry zT7YxR;0x0i^BO<7XQyWEBuemj20DT!(>xc7X_-#)b;8P|ALP$Xwm4WF!goBOY(q(8;d^OITe;L((A#35-|UBW@4W4r*l-gdjev2Cx~Ml1Am_a=y#6eSJ;}S9Wqbjxi)7@LIjxz*2Vd1_^4+Z4O^lkOqnmnQW7JDsMZ5Y zLcixJDQo`38vpAv#Nz$ojQYVZq(8oEah!C%fTPw>(>|DKY!N@+*j1@}xxR%tN1rlZ zn->vc>j{_<%;U(+f0Mufegsj^ADc~E7<-%dKt`sJgh3}T-s?6Vv+2FoQ--*1fBY2R z+>`aZK;OkIF<5}1a`+6nP>Ik@K)xEEGlRxs*mW=#-U9rXm{Lp$aGOork>%IAIQG^{ zqJ3%HfxSYM+6Q;WEK;$9t=x}(Iq|u$`>10k{%ClA0^%;Ye@VhI#vK#SVySK7fXG;U z-Zi&&?H;N*@%wKF?f{MwOa)uiFE|RC-}d@<{EKgXi(h({c+cLk2S*UZN3`SC`=^#=~vpWnYh{AGI?aRIW9pXVo$a9YxwxlXYv z>Udh$B^RS;IlqvC!c})K{C=J1!Ukb$S{@T~d~#sde;olg zO8~+@)SIK8x(_V;Iybz2^YM{CxJD%V;$N@vStLllFfuK#3J*2@_n9odeX^x{Ba6 z#{0Z1f5EY{(8+tcfNVe0)0l7rS;M)S9P(fv6$?vWuD1KVmAmB#-?Orcr)V~};<;|z ztfNl|JSHl6fA_YPF$KE=B*C~4uYW?0$xquEp8re*;jb89Zy)u4IL`Tey0TV*9eA|2 zm2mh$80n0?}%>2!D^;fr>yGsa|Mt5(2B^Dtk@CCIUMJx}JBUTe0$ zJR0}hhv|Vgyz2P(ZAmNtdTGszaRa}N2kwgvMbJ4cdU{2o{F$$sgDD)x>U21)vmrwje;EhE7iU%IR*hJM9wi&_6S z`+uf}+?MtG@5NhZRpU1ut&va{`Rs|na|!Ni@A>&TcYpj)YrK?Med6xD;x_owGSs3A zN-)sycWE|+J0i;Ik&#_EvrV2d&q#rfL50RpNtdd;MIg4|QCNmhWM6&qqGzW~RVhk%z#>Ab0&^dwe)MDU<(0exXoRhl=9*DFsBk)^p1%$Qf@1@(zkScaMF7b2NW5Ocqh)_+2V znC~6GW~VJ-kkApp2xtPX0I+8wCxG#g_~Nt96!tq-b-h=z*%v=eyznydmL(M;p0Bcv z8pD9$>KjMFNUZpf`k_t4tZ^yyd(&LbLBcp^;8@4BSXgN$nVo&Z z@Bdj>%Vz)DeT=v8N8RmT@ag@TH}94Fi8l`t_s{p%#h>qe_yNyyX2KjWF31MI@BUJL zv+$q0_eVKbqmxVdcf0IrKzasbzuJ&I8Wq#O>y`7I&%`(9pPRXVm?ti};D7DO_+%`9 zj05|darir3`rqA`!C&am*MHIP%F_N~d<^y{&ceK*{ZseQPtc&6N-#de9Z3AfanyTU z@&pCmV#86}}Ap5B&_kP3J{;qR;pMT9`#HAn~V|zMY zFQx{)$+UB{L`ay>r$&2L*t%`lZA*D-STY#6jLFLm8^|}$Guh+vNJV^YmoHP(QeH^Bzw)0WCC=HsWFs?=;)2 zP3e*<88AwC?K^dpjG7O2Ts-3lIeO36@e87$LQF74e$$ywd=+~X7Q+n)i=aMm?hf~v zn}dj5?Qw1NfYniA>U|;Y;Jy$5QEz?tll_HmJDzdGb!VcErGGVj#@rytvnt3_AVVYB^-a^tK2j( zJnAy?6wJj~-Jd;Cpr4vvv>B9Q1GT;||L z3`L&`R`tGse#>96#Q*qx`^H@&&hYU4J%o9`?B3pX41ep3J7IeL;+b)nw1%zmDD@{y z3dV{~2rqVSk0-}=P)25U-JE)}?f4nf@HlTeT~lDt&Tt8j%_3Y_Ez-d2-2v9OVSv(`VD1M9JA>$&mVt z1xijuJ_yN^bC8@fnS@iFPkG8xP;9cPI(H^Ej-SuLeh%7x3Z7Niv177sV0z(B6y%L> z7-?PQ4iGOjmSpk^CyH?@e)9GHh(qu_e(|q32Y)plvgWK@jAR|JWAWV&Z!QSLfqZ=8 z;-h`kn?5PS*lbr70hl}I8Hrk(ArJqB3lI*3saFqiA%p9{~;2Ls6 z#cAj1o#Q$v5A4~|A}CA`Lfe129M0hrQo?B9Ms@{#8L)8}s+iL~;WJs9et4Zw8^#&L zGk=qfd8DeSh^M4F`?FbiZ~`?kGQFHuo7*zq`-a+3*Vi}WSI34Aeqkxy0a{eYqV|I) zzxgev;+J3VFRaL&7@`4hj;ogW#>DGiJPU8!R`SOF)%p`lN8OnbKx@V01^TI*r{&4Z z0PVXjH(#Wz!*|lIZ?uf^CL20UlD=l1>~3*4|6z~ByJ6eViWh=91szW zaNcm7H%**3q{gQH#87=?2=xK={BZN%IsE^NIW{L>+;Tr~ZV=i5$eRl1wxY`PEr0a= zeHKS}x~6Zr$8q5<__5%BG1p8nD|0jNM!_o)rXeq|2zxOMTt6Z^Uxra^oXA*l!vaay za~r%_;uGOJ6oz#A6eA{)$4>_2DKsGmd>jZ$d~))O`O%Q#_`#JuU{Rl&mS`8ib?`@D zAjd1ph4>MBb%u5-UUB3a|6nGUjejXsgPBltk~eJXvo%33R&^2EMwM?yB;7PD#katOYcH~y9BH~tOSEc zr?F4twYywJ?u^y@&r}=iV9}vXNzO0|>vMu55rg=Vrw_m1w|u?h@qEto;oDd0^_Da5 z^|f|~zjHf$ojbm-f5cGD!++*m*Oz?B_s1?BTI_}WmWhbHtUPAI8KeP4qe13u#kPm?A{G9~*IY>YIQ(A}5AisDGlsmxictjEoW5A<4adVTvDI?1U4b8`t(bWArDc@La|gSsG9T2AM*m z*=R+qT!GRB^R=KyjI*8&0w#64{j!R<*;+UImw1$r zk3q67gtg9W^G|aqiEmqOjce$u^$mG(T=|(7YLGiY zhO^B)4C=Vn$g`kAI$wk{g1!~1-fe(_x)){)4XxMTk>aa{cy z{9lCb8u6o>t)B5WpQAb2#D?s~o2S4yv9nJb3kKE}Ugf{X4R+H>bYx$=j^UOh%#b=W z+iA$^SaCH^)PE7V5&w<8`{KPB8GyVLr@!XC`GHg9B23?F*KH zQD63LJN^rfm-HJq`Qe}XaO3>OqxmB?*iRlPoMWixH6Ti;F3hWu>0Mm6LGq;+6cg>d zZtQ>IDQOA+E|<*a_Q6aOnz?I?R|SGitQ7m~gRK_j!+Y`(#wjEz*Fkm~ zoX#UvqR%e!iFp)?=FKYSZ@#6TyPENW3h5C)wum^TW0f~<8usBoBadtXw~(w<=Qa_@ z^}(q%3(v-*7u93=SF)cD5M)e9%Gyqi;rSQFL4Op_DPhQaEizVY3_K$!X%mv!i(N1I z3$C9(&x>!~z4yNH#y!3H6WwQ0xo_G2H@J*hhMa_NJj@0$SfY;qDOdvj;J>q{3?;^0 z6QRSNF^APDGS9fy{`Ytlzv6ISB{9EvP2T#AZ`^8*IEFU|#&>M*Kc;`|t)Fvd&Dp2Z zH-DB0b+Pri^GN;hoIJZ1i?9)u;^8QEcB%o|Dt*&hPL>$MNS&Bd4IYj{oRd2u)2^-^ zU4yF9c;9y&I1T1ha%%YTj7VqrlDq;b=I{7Ldc;&rwWLD{oTdy4R`bWC8c<|v%`NnHfhd<`39Un#@cuZ*+~pmMG-qm+0r4B94_A=lOJw-+yGk zw=I9lfX_}h!Sppqwq>9b(9auZc1lf1f^Aq1Mk=H%)$e|k51#O6-h@*fXG2H*9ol~v zUwZK0G1M?3SKVbuABPyDCwG}}N1AjEy5Td#0fX}JOzaz?FpXVRf~tMA@m~yM26cF` z8h3>&g@nAe40I+2eE$W2$Q%g)`hRyHC2K&^p^3kAC^9Wf zt|A~?}i~g4SC+sD{~Ba^JrIh+VGqicIe*W)iUNT zLd&N{$|noeI~Xr_dQKr{W=9-TW6i>We@RCdmKVXyx1a1324^yUv8R|V$bV$Rv?BEV zz%qk!z9Ug(?uuxZE}Q0EEL?x1oBcsd($uuZdfX$$(2DtTn4SePANKuQX0^-pfT-(; zmdP`HsgW-?v$W-n)~BuO_h9r$cFR<|a(a?!MQ=P%X+cLLzKt`I@rdVTwxDCqFG<$U z+$GmLxy&X*oA4scf}8D?`+rFmx%Remh_m(HzK4fy@caKmni%#gJT8N+HYVbf(LEb4 zPiIvfTjOJL^{ac|VQ?E|ZO@VFKCgP2bC-{Kr7@T2YLgpNH%Okl*C=pvQLSF%)V;+o zW9C)vQ0kLBK>TU*^osA^LA6?W;(OhqUFC8!8Hr9l%e~2D&}GKHJ%3s8uF6kjawvN} zqs%9-qQA=To5`RY(AU$R+Jiysr69UDPxOllo*QQu-iI?dyCr+Y7_vJkTFaRnPEyvdX5FCx zlWaqW_^@XOfKPl*oF7&gRK{j!yk1R!fzgA%`ZI=|M_;GVLA&yM63Oqh9vJ-nGt}SL z0AwM_68(&AIcWd`Iz>ODeg}l`8ejmKDl&%k0G7(q!02GQ4S($p2w)np6WJ+1Ulj-> z*T5Lise&8i0|)`H3%pi%&Hr#M>T7sk*U#7G89PbjyL_(!7SNF*4v5a0+m0k)cOq?=FidM&^bb^suaRKO9gnC>&6$veG;-RcH-9V5?4vjeuWHGH0< zA9FhdTX}mR1%EpWh@lk%7kKT`2lmfw0J5(CfJ_xEu{}F%KLB;mK6V_yNQ^YB-%h~+ zB(~q&0hQR^EvDOoX+R}eXpJuHF@Jq+JS6Pj>0AQ}`1}UbZ8=&2(aBT70ms7>wgxn$ z3$&-a2H4&mkotN7yH9_!m`oKVSU(@@X8|-&9$FzR-+$Ls!}jC@IAMP?Aetf??$cu& z|8auj0k98YBwVd#a+2@-9P0YALXa7b54KP1&FmZg160ei`eeqkKS|M@O z`~6cz&G-0B8eqil@~are$g^jB#`8ViSU;nd&srW_4GfnIQJx#97c%RkMBTnIUejYO z>NpQ_jDHp7*D;5u3VLsOv)5qZz5xg9F99PQf78JF%_W990&UB&klXwHqm`z0Xcsr1 z*8s+Htkq`lPk$KF$GBZMz7sO)yyY7@b^80dPXBd{+`i+nezkhxGsn2(N;k$m;&-5SHkHOb^ zfY%hSeTnan|H1okeAs)wey20zCEx9#s$OrfKVlRbF6!%g{7F!P#QE!Tq9HD1Hvolv zNq>a>7@uhaScuQ3pmo5<@uu(epYQvs4+jhThddN?asEzmJsk}Q){iJ11SDs$fX`4q zbOlqjnV|V(CP5=r0YXgb^{eg=3*qfGE0l55`Z|~=K6|t?ty^+KzS*Mw@JCFv<8LK> zAkG?qW^#;^0&*CS;fj-FB*DOMp*lbRet!~wO$GucuiKB}#Il$dwIB2ZBvghr0kVRA zLJYKe@!_^c_a~fQWo=RX>|=GY^-<2O|E~d!s5~-i4g)vF*IRfLT;Mt~1~@6C4pvYK z+QIY|7(1B6&;a&?xl29ejG!!OOFA3q3gZBiHFP`12_`x8V&f8k#o;7P3QJCO$$tYl zaD&Tc#ioeiXBCUNuTn3tmV6bbm&_XSgrzS!nsW&mRNUlJX0+lno}RK%#wADFUBItA zSDB!;QJsQW`t1{+JE4^iCYX#Kyw&NxT5jgk?@qp0=xlqrO`&rW+-_Z z%9vKdiQ%S7Z&5Q*XgOmkq#(kNNC~NX1 zr02{!^R-G99l3l18Bk8S3^P)3ho?Nl__I8Nj8}Zmvm~2R8+hSneqeK6=zm#A?UWxx zw#w`tKk+Qh#uI*K*(n>Jcqy`qI(PnHIi+r9uigDUh=z9J?)%kJ+6VpEqr0Z9smrvw zJUZ0SG)>F3hNG!nL(}>X?LKWZwVtM(9*scjytMQ&Y-z8)HhOgW+NwRgPFqr2YJ=Y6 zb<`${K}UOK+Qb;Nwb!=8*MBmdkU?j3l$!==c19z)ylJ%EdF--V!)|vLJ9!&k+D~Ig zya8=LM%(_dzi;22?cM9O+t>B>w&}gvm-hDR^)BtpaeE&24(-cgyWjMJ_Sx9x!=BeZ z9k=PQ+io8h+ho(t+xzx58g}RH{d5})yLLP4Z=Fqtx8-g#*tFLz{eQLghd0)GX=~HF z6s_}P$%jSU@^?{lvpn!=@|j2f{~tOWIFu?d=dnL8Bxm;yK-UlYV1>fZG zpzw@RcIFWBggZb*<uKWa|Y50+6masGx%zqssLQ|$CBTK8IYGRU@ zub~so3t0;034NaXky(^96%nM$TV-jDmz1j}AV zk)K9uAmiqK$$$F*p_%Tn-b)7}?8_)+_h%B|o@S~lelaLh}Jd2IC=F+$Ib>;EST4#bT6V2(2;OGpjQPcpj#8TXkY zL8OqhD6)vKT4oTC&9c=;U!#2Ky2Eg6wfv{Ddx(s$nNq*+x_ z(ZP`3bevf=R{SGhj^q4!E*Mlv&O%uNyJA0!J}^51>Bpx$4oJZrknb^rb!ianUupFT zMDqg2Rewa*d|86K=Pi?c)xikZN_PP~ z6YF7BZW;xr)LFySaTtXnQcKHZ&KiVMzaL}?LnFAN%t%?nAQ$_CV$2)Ya!9FS^esOK zs45A`mRBHgMLPsYo@L0)XJVw*ag2EbkMgReSbz3(=4SRJmd~OfP(~EyTiQ~rdOG1e zb;9N+%`a0-XRUX-NvkIi+nCQY^jb4YWT(1Y7$iB4t>^-3G|u+clcQoq25DJM`lvi8OY?9+*| z55$?9GUv3XV8PcsFqh66qytnL)%^)zn{zW$Eb}O5kuoyu4?RasjmXd|!c1>|9e=pU z2WHdkK%XYaYst&2RoPfp>NpHIEpH{|Wht1%_VKEELI&8r!f}}KP7ljch&tWQswWBR z^rPc2>z$7Mo7Cy!pn6hTr(Yb0+3)mD$&E@u1hA6!d1!1e1{uU{4XV>zQ+v!84;f?=Qp{-yEGi9lDUl~EoX4(Ntoa`Ex=6S;9ryp){G#MW^lrCN%laq3Y5rY1 zaLxNi?R=+0^!2gj8=ZXOFdh8x!h@FbG9{;!jQ(-^sRD-m)ly!i?&eF~N`FI^+)&c5 z{_#Fh0x@u`M9b|sYv|T%D2ex+cBJ12z_|Orv{Qbh5(JpYCWyn(`CAum9&~Lg^Gq?pGe3EiA5&p9TMm5fo`o4?L6 zOteVB`(TQeSz!MX=A~ABP3k?Z1p6=gp5+|R8Z^%R1M67TMA5>ta1LNy-t{n1el0vN z{?!=ZdICY2m$1aOos9cn;(CqeiodEip7-MSIY%z(1kZVLiT&c2Ie(uQztsoVqA9Hu zkd-nkkMepy3=f=kKjY~RP;REdk30^HaqZ@mSX|%Mb&F^6RvMiWWwj(pF0NqMYoK=c z9;*^a7}1WLQWyJ-{!HTznD!d?tgpQBzJzcetm}QLOHol^c{#5>vK9UPJa;lza-$kg zs9UeI)9Uwniwf?av45Ab12VQ&f7yq>WRL)^`%bY!Ik!r(F(MPz;eH=;7bfbIZ~=UH_Dq+=4&n z6=i9Z*DM1$ea{!{oBy&sC~M9Y6t409XYAkeFaC`CCM`kbB7ghH$r>C{l{u~{PUd!g zy+1BXal-S+k23yybJ*|ygSmq9qoU*Nfah@Eh%J$qFgN0;K0`9mV(a?a#Pj1{&!4!K z&1Jp6OsU(kDkeFLbH9&e<2q_Cf3+_uO98!c)qA?YLhP>q&u~V0UFaS z^Lm{=o$`2=eb;Z6Rf^*If$1oZQ^(~o*v@6Ua=&L7}dSfbAV{vFo& z-@oHJ|ND1>^#EY?JU-R!5` z{!czUmwy@R`eWTtcz&DGyAt^9h~d}|#(hFCf0EU6w0@>!9P_{MJp3!az%$=>{!Q@7 zKl{$#s>=_`Uu(yxmhIuVPXM3U)^e?1OU5vN_nrT)M22}!SpKiPSpV5~-a%cL@4Pye z+xR(Wc6g3AI@glnS(0dV+1XtQ948L%{Cz?(uYbfd{p>3*rFGfgd0ni>ciyzF^WaB6 zQnN0*`t%FO^$E1Eeo3o${&$~HUYGryx4^Q$^JZA**>_&Yad1b%@6Jq-^j@JahCi1% z$8Ws(o{sy#muoO=lXQNZ9dCfTDmM)xhY^Uo%M!l#JXN9#`qe$*<@m~g=;NlPB=WJ0 zzkjZ65zj%suW2~$6ICnHDL{EC1vt2x*WvDg1FbGy^m7FkJZ-ReIM0;eX?z!8faS9? zkR&tB_zn^>GyCNAmsTHR)u(>soVWxagi#(h{r7V~STx@+qxqpZ64EEYC*!LmS|?J3 zx1Q(|SE_kIBw_wPsm`+iVyI0UV+4=VntvnfnrlOh-GFuqMW^T#>cRMq={IR1=>*1) zpV!x#)`V+9o;$6lz2y7(_n!?T zU<*_A>l*!h{L`;%`Fpw{>;mRLt}%}805ELR1IaMAf>dA?uvFxPzjOkjEeYqZ;016U)OI*Vbg z$Q|G7ORNvadhD@$SHd0N#{qhu_kaCOd2j>9v78d$|Ig-|OYAoei6ZB^KG+U})g~3> zQ{+fGKN7H%Bt<`w4Uh*|68rW3-A|(!`{Ry-=^)OF7XQuhtjCFg6@KcKyz6DU*uQ7I zzm_D&{~eT2R=lri=Uu*`V53MIMhQ5Q+;Dt0=;UdgAQq&9`PK^SmB3UYPk;OW_j2$T z^}1p|#@LV6pYuE4asUnNPX}O(&#eT}@i`78`1DUbej6M|3+qd)_c#(A>ovo1vvgQu z{aK0c3-P%K$PVKr#sd4ZgX4zQMVQz9R6v5bjURIPo=*dm10CxT*ZmaY{WH!xj$~j~ z@t$W^=UuONeVTZ^gm*n_d4ICxyFR9iamL)bo{FgVxc=$A?w`ti%STkVUy@fX*O;He zM-JJ$JthX%`Umx+fbD2u|E~n5Gl(VrU$?7``D?kU^S$dO1o1!5=@=)C%D0?s(AnBJ zAwSmx-xsU59MwS^MlO!$yPg1d?8iisBk}~z6tU2WeD#@UNj=X8;D0F0?_$0)%H;;g z3+47bAJk9zcmPBDf9;>~3*R(t7~lA&zx+dReQk%vUc+_VthXHOS}1C&v3hC>q$C{R zhU(WiLj@u7s-Z7=y+74mTz;z9K^`87yj>jQ+ED>E#>b%v3fCDvrfc-?aU?uG^QQvC zMciu>j2JwOM`jclw|}dT^JOZa0S4k7kai7e*NKT$_tAfn6%a_`V5MdPDGL#2MSL(h zgbFJe;`+A0%0_~$z$~%S8!$U)Pe(g40ZC2Z9ju%IwhL`Z5j+BgK*v`cESiFsV5OfR zT(q~RC3%nBAU~IrEpsg74?-7ji3Zf(iV64{;G0?BVXMcoAHUo_FAy@={LM)&a zI4;~IZR28c6O@9>;iA|Q{1dSh;p)(&;Y_1 zV}@}E3ld1vL4VND5%EM}hs=~IV4RRpm;uaP*aMxzT#&gyp1y=cU@l>PB9Sr^nCoOA z%^fZ_u`n~_i(7)ZhbYEaVJC(rm**lC3a% zuqeq6%poi~WT)_i#E6uZfs-*6mIZM_GO%ojPb5>83xBara%oLr38PKaJ-Y@WE^M0X zL!sQUK0U*p zoXOlfFI&x&H0LU5?aOtmV}w?Eh9-HfzRJ{D_gxlOuEF2$%t5)TPOn@8^k3=Wce*FP z(|-g-+$fnjpXwTQ@%{?<Nt2`f*tF-09^Utp!~VD$2|Hfc)w;bY*6q#x z>zVj4)K{?oer9QPzUtKdZpWezvy!$EMZ!Sw^;Q z`=_t+eIvB-{hPzQN?K+~Tl3Cp;pxk$tFI!nq}KeFjQnoWn(NfsRB_~=F^#n5!*_-YG(e#=D8nxA)7?ksj=EPG*}lh*APFTU$hx!?WBpA+#^$ziD0 zZChVO!+RVlGmg5)we=-ay!ur>Z_>l_v25-0IC`e*gRs}u@VJV+bZreQ!`k0B7Pf7k zZ>;@eE$7Yk0sr5;R#Ef3w#@3Q4xRyD<$vq0+UM(5_m~{#*v9Ys{?{1gp`xw%a}(`b zarE4;58nQ}oPvGjZ;3CN$v5OXUFU8xvL6Z7t3A)KZ;d0V)@`y+j$Si)*H<@2_I`PI z?zbba86Um<2D;TUT9(U2`r0h&y!*{}eNywQ{K@S5{ayF_{l)87x){f;eHFK!`+vlG zHk0qZELz6FJ0}MZ$F{j1`RBxXc7HwhJx0G|yC#AAYA>?pHlt(D|glT;prbx$1k)y$JnI&%f3# z_gjvyD?#?O*BgJN;`cJbg*N(~Hko6zt`Ho)@8Mz0+-s zY*?rJGpO7T2oU2lYYL|S>wnvwu7}m_fBui#^}fAT=k5RaU;oelxjP+i|9{7Bm2Cg- z&#yzaul{4anr#>Vm%GZ2|5@KIZ`FSLAK7iS&yx2YwasLD`;We^|7Uqk{^Ne#|If|p zw)ziD)YNIWYyIbXRc?Eo|9ztmE%yuW$Sn86_XH9(0*t*r_`<*aBL@JPD;~*1Vi3mZ zY2@s2Uy7Ii1$wd_7ModIgF0CrRj9IZ? z+1jHK3%0OYX=}CfpL5&q_%y3{`;f0M<~?7Lb3eAzMjvFbdlj?uuG1Nvv{6*GFAwRg zgZ|^PpSPPy$4j(zyLa@|Ym*aes?W8w85A>ixlns4dm{68OWqgWbbr|nqV8gKRzWi;OmWO?kUL~YLx!|*!Wb3-$)z2&1@ zax&UJcUw^XSGqr>Aua4tbKEn7u4Ntqj}Ej&-fwO1T6qX#qcQHb>~tMZBp23(cZs#( z^Tr|6rn;elmK*D1SAW6yrFP!gbuJfX?((tGzUwf%#syP$<5>mrW_Mru;&!_A?%Uhd z(59&uH1>;oc5mp*Tj$8D$0i@YZi{lFa)-~ar+YUhNqQIU#yT9HHYcwIjhtw}LCHScBYMA@Qrca8p|i4qxg@BR3G z+*>@iLgALjMz5PYp|e;g$MvN2Z#gt#UZCpTi61bTYk$+Y9Olp!v@u+djGjkRHS%w+cR=~Hm)waN3tX>)x~Hf+U0^YC9SA?Ja(r1v|7*jEcTAG zUE`P*$u;O&hv?eeKkv<^Zkor(wpz7b{g>5e_PiJm!-E!Tjoaa+t?#?5-1YkHp+C_c zqEnW$jephKX#-PQtH>@UH$PSGV^S@8{irw3H@(Jddhc~6jxla!x82~eeRN;1X0zQM ztsQ^R>370ozwfNAv&o#vmQIK3Y`8qMwSBtXy1_-apWXIjarA_Dp3maro?U}i>S>2t zv#>31A=@UIzJ}UmWA_FBd7P|* z_M<^YkH)%+XU1!~@ir^E>KdmLDvvi^!fE$K^vNwcZNZz~vO!V>{q7_^PlV%W%i(1m z&kp8bArGZ$^!H?u>Am&Q3a0MjP+Zz&e_HLko4HdQSEv4K9IV3TEFrU2oF?{7WtC8( zOMh~5;0~YMJ;HhW>2|DZcR5Ui^E^({dp?Z4;+7A+9n8+&wihOgS^K$MMX`5V9gjUX zoF@a%i1c~x#4U%f$Y5lV*LXg>X2*DH&RLuKrRd&S6&Pk~R6^g$61j9?bKfQHw3XjB zUA^Bb8g%2j$>hw=i${Zmtx0Gbdq`$GLVreczq6$VEY5eIxidMow&Cmbyj^_%I65Vy zgu?gp&LxT(_xq}Un;wDo?sM=V(#@*P^R`{m_Tu8$a@_XKuJL-361fD7NdlYOgI>3TAiRW~ zEM*!!`$Mhq$dgey>)6dp=52aA@7& z>~wVVmfj67<6v`M+bh=?j?d@EbbqkQ9%}oX+&X#CxfI4>$c9}lTCUv9^60YG(k~`m zEg9NR+7!b}oDPFqI;8VBUY}?*_h}=0jvJn54;#JLtyxZ<&Dm(ZVLCEZ#NE>7%CJ&z zTpFi*J~m)*Q?%F!2%M2P+;*+9>yT)m<&p5Rp|P8K9g-%G-a1qF(<{_3#ecs4(v4!; zYMthnDr_~k@!eZ(m^f9_FxhT6r|H>9tCbA9Q*jQCr)ECWSFqMs*;-$1mMd#;)%=m) z)n3tQ?iu}O#LJG>Y9`H5tJ^L{-NSX+)`#nyE+SQGv(RWJ#r^s+I%OQIVqW#qSR~

V8i*V?b_$v(_K8S!7lOG+VaEw-I{bq&am|ya5Y)QPve-<$1EcDV`&ff6$|Hl zxp;Qu{C;G+VXc-a4jv&}aO1n~V;{_!S=| zU7LCmE>7d+WVv_m{jt8-wyse$kFx7V8Cy+8cKh<^#LlxHECyjGTeQO6*jcySU35Q~ z<$dp$XE>}^-GBU6Cgb+HTt?;W(tOYZ1828sj}DK?{v}_RV^5pRI{xOkeDIQ1=WaH> zXb-t8rhR)zJu}!8P4imz!ihU3&cGUuC+eyDq>N^wDSI((nRkrH=)Hp2I)ZjyP2{v> z6?CC)wum;yExi`Py)8V? zx}DbNNeNxnFHYe#J%D#>%29dfszDg1R=Dh0EV}Z=>dq&V^q%%}^%!M_dS%AzC5#5^ z%(4qjm-+d+*VEbhG%x!ZDda0LUXx&}mavGfE{)8MyphPY4#rg3+ob4UUaaWOJLPk{ zJ8z9CxPO!Xm!h-C)ewMz@PSz1wnTp19d@|8bNZKeg{GOb%}l>4A-boY!0D+{$w6pn zZ={-XfNLU?$N)$AJT# z!hdO~@4GraHk%664;r^}$7!TD&*E~nG{7&pH@;gdSjiU>Z~x<@ObjuR*#MU7bo zC%c^mec9d%Sk@mAFTV(}Bk2Kt#jgH@{(nY|4xk1c z)3En7I^Wz7&F4bB0tz|r<~8N&)>D0K|3@Z8dnfteP%SU|I1Bb~HXsBF0y+wHAb{)| zG`81zWf1ArEf0ByBHHtmY{Iw^txHM~Kz>S(<=YPSi z4;J?^k4mnDIz9m{rgg<^4_*eOY2!ulHl)82T55YY%l0Sp>F{$u?8f*vnq?V`66j>a zL2kKdlMG*Y95)<*+NVwErtz!IE1vGzlqr$`hXFZff3HLuAo12^nH=gQI((@7s{Zn2 z&FOSl*1z_AEs9I6v&lExfp+7$(tmzP5_xr>`x6eHnJ7ZE-O}lkX;4iYZia+;ungU9 zTFLCn{|s&NXsZhjDaVsqTvZ!Kg=krTDcl`-r_uvV;#L^d1!D8FXT#Hu6t(GElVEdX z{Zzt8G>-v)eyjIO?sT!k$uVPz#b&aKebg`+m+=Vrx|iyj=%$rC+2-63Z-1cLK6z@X zlgZW8VLzY-S9)PZZ~|opmj#B}0q1I_0~wxaH69!fK0fyOB3bZ&JThoZozj|phdJtk z{DMSKKPsVXrWq698JQ-G7n)_?T7%Bz>puMRp!t`xAcMI=L|gO;5_e+7`svL{Wps77 z=Qe~~g6VATd3i^igq%0@djHvB>q z%)`yNqVI>!V=f=*t+!oOE>jHmXrOh-4mIxSTBgBudeeDDjPNI^1qsSU2Auh0%{rxX zK6XB#;zFWCu>YKIQtj5Tbo%LI{bbf;yiXWN7i19Dig7eE^>&psHZ2 zMiA>`R%gAf_(NSdn)DEN=V46WtFWm{a>U+h zR!R)8nB6;mu&W$ztR2JhOUg(JGAK*OVbeev80$Fb>^6)2Q=cCAD1eSqLIX#5aPd~@ z7uNnx7VY$2)#u|+>VJYtV;GvGfQpXKgJc4O6ODYhC+VKp!2>T1on!F2uC9OJRvyjQ zeg3j)-*Av`4VPz%aslGYdk)cFl=i3~Uw<$>NBV=xH4w$WZdp=dr<)^uZ}rPT+l4Pc z|1?pWm`9-M6#*%Lp7_@N+~G}Xn{>9u=%H)mn5b|XxrY!JBU{(lplU22(&Q~6i9;3@gSPce|5 zKcKZ=8wp;t;x2Ug#-!w;Rk9ns_o&vYF*2yP3FKlXmwxm+4SsGu7f)40SlN4cao-Nt zy0~+64#az{Tr(hE|4O(gxdTb)nxw24EHMWao{(urrLBMLyp*!8;fiEk1u|>b$s_U^ zLyC=Ka(_6inix)G?$7M-3y*UOPyeRg1P%=_ZD-P50ItSRE-Oq6NT(fRzZH7+a$0%< ztDIUEtDe?iL$=Ra*_D04*h-k@+2wty)~)1vi~6V*ZkV6(sc+)`(2sKFN+YdY=9VC= zY<0Ika;$U9d2ZCD!<3)F0_v`R>7lazD){(^0;|Ixxn%C<*Yz@R-iv6`u90 zX9J6gwsbFxR;W1xwBq`xdB-;z&FF@<%R}3l(?Cj0evg;HM}!HjOvlGvCOObv})cH<`?ArGD>nRmXzNE6Q_) z;PL7y#+2UI$+^aaI)dNpEgwJfSt|VMiVtU>g3EnSSC-<^r_a`LO#2>5aN0k9M1SMs zmb4NAtZ&)+jh?UT_??-tW5Ftm|rP78ud?*hPv_=rjHsQ{roOsydIW zFncHSs!b5F?|ZV62`A8mb!5x)vLfA#y6YxX(z@ zN~0^g0zsH3JWbo~cQQI(e=i(rv7ddSo9Fx;ETS&7$UdJ}=l8|qi=J-KzqFO@Vf8QR zS=oXLb&Rl)Ga*tnFI$vv5EbsGBxHr~^VBDX?L7KmQD2zA8fek8t$!|-i?P-RWSm7t z86nawyqLOy{1~j(GO1Kh(Pgadki^3a-3~h96(f@F^BmTo1TVPFoy5W@kZ52;kfe_w zvgk5Hz|Lmb$cBi^27or1lux-#XQ;J3;j0S}$2tfsM>+4!2*)Grqma>do__hWG&jEn z7D<2*88X1upxLw*3V)|3cCQHRZxfX*iC`Dfn)f>BW^;{tIIO5+w_@m&Gb(!F1!=x| zH)?{kVw-kvZj!9^blQ!)Pv)WL<4xR%5p2*r@Erw%L*h=EWH&8@THCLC*>+^S;i<1I zRw-03s8D&^rHyt7cB%$t9Z1;GPZjlO^&XU|rhMolHTz;Po_|rPdD8W%1#Z?KyIzpP zr$zaJbD6Sf&3adH9UYqc-6W}wljZJ%%ah?1rVfb8i(~xMJkJ7U!k#`+zO#m`i?H@q zsbDfMA_V6!#C#TA4eK#{{>#W7brbFLCF9>DcIg2R2pauXJ^H=*4C~14kB&6kp(CD& zBaBd?ls9}f(0{tnhV9y|svp(}<8qGBAmpv10c0$TS2~+b8-58(d#(g-+X{xhX>uaK zy?!XNiU4cR8nRu=u@gKyCJ8&3je<=cL1E<-*DX>_rA#SnrgaaoFexucK^tYIB#q#tv6v>7!X%x1t%b1}ew|}o6Tk*F9*_MT^afbI(Kdm0L z@m+etY@3d~EC+2{i%=MQ4T&D<`fYq%wQi{!1qZjI>*eY_Ow>&ShD{M)$ z!>jbfePMWw$ohX(VgFliZ+*#)m_tA$g`z6T;0E0$|C~bT0k)ovC{ceyI7VJN;ZyQK zAs~J0ZkapBQ2`MUd_-WsGY3AfpadyJQZrt-Nb0=i*;(T%kiF=$cQDN=exg=xh2k{x zBc8_}I?7W3g>W31XPCt1n(CXEBMF9CsK{vmzd6zcTnm3}>{%vDRj5>jni#0I_NB9o z^(-PDQz~`W9JbLfq{%{hTJ@qgNNOUc@(uVtt>R^7h=N}G zc*y`5IfH-3TmCs<5~ZO!L%U1!x-(G9_l%}-{g#OiT&5VFX=1z`fkT`c?)ZI>?{lIi z##w#%s*5?nc6_j!%}{&Kf!m9S(g2GFJ10|oLsC8WY-MI-lb|c{Qaqhl#I6Ax2w=nZ zrMBYPGDd}p6|Um54CW!R!-0-?xJOcJk>|iY5%YiM;ayP|8xWLW4^mn;a2a(b!_1@S z&q@bOIHT}{q^S-zdFB1`v&&L}oXw?zWX%6PtU0C_wHAGT!X%ol{bqmEeSCn~3U=7^ z!c5u)0E&7v?y-~LRC7^20ap_HTauKGDt`-OI!VTLNjr%gNvQcU?H#!QK}HwTtGSKgl0RJ+EF~Yo`%53euU>1lXG0w?X$&@zqx~V(x3U?aBX;h5@gSsvitp#j zNZc8f#k4u0fx5>hXr{0swSwXcKC*>lnsI*~oAq(6A-Ntl7ulR0ae9gyqOEMN@{b>< zIfa^Z61O7UCu6zye%~ZdM0rQ4Cug#?8wu$l-eDss`xLx(j&Y%^^biEcZNFmZRD4i9 z8o;k{drEaZ!Z>sg1qR3M0P2+};E+lEX%}qb!4&*y*2D!>S4Z@8-xT6f%`zeL>0p22 z-)ZPRO5M?Koj?ewTyFT*8#^Rb8%8Z>`@91Q;OM z2_GLYO9yz7Qka@hFjzlM-~rQh{gOSNZ0oh+_Xxw&0koN4aQy~_G7w;^n!tM^{GcK* zPtKtHI2~R0c*($1o=-|_rpTF!8|=&AJTvlc7xB3nVfd>Jfw|o`}?E!y{ZfNs> zo)R9&XytYB_IVk(Qemf%>ggbG)n4_zg6*C!08H&=e3#fg#;>^b=f&Eh&lwSDn(p9Q zork8ysd}e(@BP)ve7O@<#Am}uS)eALk=wWn;rCPz99*(f4Yrnt;MC`)=T#o*qDJEr zk9HwDO>J*rOJQ%K)WnB6!(akw=Nfy)bC%kh8Y-=FP8B%p%1`CBBznHZXx<5*S)drWD~>oCn`AG!Ft_Z201 zxKSvWn-bWE)PcNK+ZL-;_}yK_8bfnWci_u^`w;D7^!%v#=F-m{Y)NWlM%A;?c!T{U zT;ccCIhe6$C=B}*Pq^iszQP2^O(8*mHMV>&Qo#-Wh94vS+arJXfD$5LU%lZ?6aY^C zmm+m+b543Et?P@!98NN-y5*J!_|;e9dk$i$Hr?(kJP<@wbNhAkRY>q5$tEtF6^Fn`uAJ|Zv^i!0*pt>V6m&=)PK!td7cskZZ*|VN*8su-w z(&yg+cOiES$~u1`Azah*fazWzV7RiRMH0-jPo~UpF?~O9AO=>L*B+vgi^&*A?WYLI z?816(+>PP=5)^OwhDfwqCuSMe`<3mr2_cGQzg`uqE3MYa;hB>hCZaM4y*^#+< z>&q>r^)7#!${fV|hQIZ_4I*0DuAJXCzIBSUd&}W&M2#x~$pd=}b037G`?9Ovz`jUc z$NV!WfHLAU0q^*^gR}b~-cAelD50Z9>zE}+IEcY5*~|NK%fMi)KD2L=lca1Uh;G%y z+fAU--R!M@2gGrVr)l&dDA_j(TA3=egy5R&gypvu?A{v){3?I1UjzvLB{YVjcqe3! z2`ssiW#B=wZ0`eKorFHdjZQ81x=zbarvie=Wcn%Y`=ba!RMse4v7bfBkk3paDo0P4 zFHBX1uVh$}Yi9-s!1S9OT^47-*(J5=BqX_ zKPwZ_x5(mRx2>qnB@Rt3VF%QZn4r4&xV%JiXGQ$6o?I*?Jm%A^dgPPow>4lW&#tVSc zuc<+Qyy7jvRmnwEa&SX5-e4#YrSt6bl%EN?@KZvp10^17r*sfYs$ZQBOR}LlMisiP zU0aS+#klGN-mn5l&?=Di6&CbE(C(xP$T3LK0nHvV>+MIaG>w|LIvy2z``$G*sW+Nlo}sP-Cn(>{^FCbWHbf2rpv)5Y zE~dg*=(FZHCCz{z%egS(zis0T`6rcSPKs$JtWYf*Z39tWNj2a6cpfRT*;NooTWeW= zk)|xshjv!xy5Kqd!{ru7|6v}@IC^YCRB+*$-(<-T&&v`m6W&M0*ubRBmnm+uU&9#~ zF`63bLS{4HN4S*q!;Xy?>Q0(2CWVII5RIERAz^}DO1z#lw=8BDs4vGR#=QP4*Q>#5 zf;4Qq7i#-uYr|BUjb%%uKB!Y_H5lf9I)n2!y_g(kXpwGQVZatl`f}6?HvNY)BK~Mf zpHK3W5%64zPH>ED?)JpbrG+*-o4|iN0azre#N^TbXKwar%*Q%rP0ekFygGe4`P%>T z$`kR*Xp28GqXH+QGEoTOGZo@#W7~jQhvDhsiDuz8C7&z??=tzImHcoeWg%jJQpCM! zQhYt4YUk7Jq`~`undYaiYb?7< zZw)Tpkf06?h7Gl7lAo8)w>&L>5lEW3ZY`K8)sXw%?|XMk%Q=HfF)PNwuHKNSF_n@a2@e~1p5HM4fJDNt zP+7r6K}bbvXK@0jwSHZHwsc^eBEA^P_M?4o5AJ=N089)}C8K6TO+{X1`^+GSn%$!> z4fEPLegcCJaLV`6b`3eKGB@@2?2F(;g2j9_131lSImgwnLgp~CTm0u4fIDz+igREj z32!b5y_fJ{jU0dGoCyr_{xXHgkmyT(y&ln<)>KsvE$HxsfgzTE!<*&z(IhiBs#)Ki zG5AAZE$@X*Ku6fc?TS{9-~j=W*Kmc3a^kR&{``^UN3-O>qe11R#=>q3nW6-k-N)Xk z>DnjEZ_!DW_mvxm7xRJDYX#eMooVpSpia7U#tOs+xOL+f+rrT??L6R;V1Y%4mKUmW z)z(*;Mtt{9huHsrVvuz~)(BQ}Wg?8VMc9ND=)>?LA#%HDuL$7vqF(AE_8Tq(?7of= zHJ2*NKhd59)w{t+@sPmWeZdn*&YHuiF>Pm=!}JL!sg;L%X}lDE!7@xlMX_XhuUnyHCAn@D%j^=mEM&5Gag7`4@LPmQX6Ar)xmOp%TCXC}ECdnm;kj}00MTdkSCnnyCKpu^+eu8?Bu%|JIT zWFfXdnO5$9;oT>gt5cS=oA%8nA@(;!@&uXUsgp`3YN1Nn&FI23H*%83kjhf$TzAvL z{M0t~dko)p*9;44xO!pq?&XhHzkVZZ8!Z>L% zE{Lzz?-jHeC1QD7ActCfDNl(YT2|qMM#Do%q^=jQpcZ~La>L!@5Vy8Fr1YDzm3{ot z!vo;I8v-`esyX7fQcUD0*7n?;{_dws&|*uQbhn%W-ipi~aO=?G;Qg@zc(HlI>YRK4 z%wYTh=eNNm33~;9|Kl*E;DU>U{nJ_q4<1d>_P@z_L{e9ii3sCU2}8hYTOo<7n)8a8 zuq-=^P(RCg{|V+q93Fk{L9}XDDH~)~N(Att?j)TwEZHh;=`0PwjrZg+v?BLOn4;;L zm#j8-$N=xOgOCf54s&?uY+RGm2_R11JvI>0d0tMig?^)doafZFkCqIboiiN}h<}R_ zI(@WYfGLxfz^XA&^G z6-G;iLNRNTevv2=JyV87Ep1IV;&D7+0F@Y|k6@$OZD7EokEws*nYBGV*MwsYl;z8; zsA$IspP-(9>2>1X?q$J5Ub~h+kI(F85>h12{hOj2_t1`Au2r9eRVdE~eG|s~tV`+v zJWaC`m*o}$gY$QQw^=eNd4qCuR@SK|S^e?=tZzPXmTKCy2&Jmvv6J6(BgMJ%=E)cD z{1wJxRdFZ1JsrZzj5xQ+F4Bdf)!Cun`^o2VEy-qoJ-b%fzVMVog0$feCU$VHKpjoU zr>gP-QOh+!nYV8ZAbmHUzfMoQ1a^>mK_0vQwvoEmCRR{!jA1^6mqB=Cl1|BC?>* zAvYI)a+K3gW>xa7V0HXXeTd@`ogmbA3~y_f^)^U^34Xk3t1Fy0dSbcX#b` zkO44wvA#!RcJ$qTYkjFAbh%FkTL6n$9@>3~Fy?FJgjAUqlF|A~6 z(w@ft$ZZd;R8kbXJV6;qGaX!`HvjZ^pkA|19FY1rl&A88A90mrYbCxpUz zRfDRxNIim)zJ~{`|3b$PS;X%&**M=LhT6j4ef@x=Oni21%0pfp{LJY)_dI}hR2X@KUN_dGWtO(%=+^ENM^;H5hGjf$ zK$i@~nUHRTZ35xu8kxl<)lFc1r#tl~u1243?i*fx{m^82kIf;tLc?r)P;@$J8fjOA0Yh>fwaDP$DYyX>6!i6N+^Ib^vdN&Q&P|5 zXJLYOsn)x)u%E^X;Nk)namZ1BHvJC=EPaAVL0~2Y1U+IB29gygJvQfzI zE+)^Yj#FUCIVG}4cFI;N83Dy^??h?Y2C?-{ci=NF3(=BPOGNCIf4pmdWO8{1@B}ag zQgzKIbW8Ujt+jPh0QezkZ|%JKe!l1x4RnNcoxBhXqd0n2P(s<+w14@!Jhvo-E~KMz zCxfP+BKZ&PN^wlOn4e>Igr554l=RDWsT}Y<1i366e3PzGA7h{;`yGhv zx|-kO=CFRCi&WjwJB+qw?FQWT8juoo;efV}h)y=dk^rR*?P3Og`BJ#i1c$1~ zU$SZ3+5iB^Y$W;I$K?>Bk?beKqikSQ>}urT^T;EIgtKu+38;#Hi0m*ZNC@=Z@1vbe zRf*b3=yya^e%aR)j)O`h-AHv4cx^f0DFmn*+}N%^+vSaPg<+ z#EMOp?=SP`A35fv64xyWT*|5D=}w{TBg)~$X2Y4`<!Z4Wx7wsd%`0AA`R(1Bf{3 zGNkiJ1o72>1I_B2X;Q9^?|s=C_ATn7_V5!%wNC~i;&5C?)W2T~SPXilJOzXp8)z<% zL_HvZVB9*!_)<9px>hln=9!;Rou77wOl>rG6ca7XwW>*kVQ}CsE|uX-DM}mQd)ukG zd6Ym{zLN$R{LnlSt7)_Q<_GEnv&g8(r#8LMe!81~uw$OBo{-D1DJWB(KwP9E*mqm= z;--mq!LaZ0T>|%~lk)M(N^Ge7nnQCI!#1Z6@CY5gk$~lc8IQ2mUcO83-JcANFK(@~ z&PYzIhcBVi-g$bWlui#K-vs5(lA^r7mEsv>8Znxkyo_hqdlm{-wBdc%U@A~zGW3~m zq+2q7SRG+zA!&#PB8k;#1)O|M*tWSs!}NH|M_?ygX~wPZc^%a03w@6u?VsX7d6JQE zA22V+bRZNB&wa!C;Z{)6PaMTmNQwi6s3D15=nKF-NJaBC$KAP#Npwhzpco5y;yM|1 zdff^^c5ure8H?m|olSxtGnI1S4kEseEO{V*pCN7@niJqM<KrJmiSe_DNmTh2CKuhjV1Yk2>ISYw^ik@q~j+d+6Ybq`C%>dQtJhn(nLI$ zeKQFif0tS@%yR6Sbfn^tz+KrCmbD>If8577_eztZ?-*|iE2p?+9uJIwc>>Qy>$+# zUVJnZ_;|Xo^q?5?i|oV0bPlLZ`=vD5p4Iw99X=6mgm$1aR!_7Vgi$V0T(E|Ej6&eR zyGT3zQXdH^W%yC7jJ}W>;}}hfP-dmg?S2>&lQ&P|)V@r?J{)q)<6SCI9dD+Q*I%a zST`d~m3jemcYIS$v!OGyUA?D@XX!4jk!|UTQ$r9hY?XH8P0F{_I#4gdP_m!Kj%hQc@2JuXa_&-(5Ru`y&;zes%~__!CB|$wzF^8&>?$JdQ+XCDes@j`RAvaxc>%Y z2Z4j1q&zZzgR9tm7%zTWsCXH5c#^NK=9ed1X}t<_v#SX%<4c3Rul@>ii@W*h;mkZ% zAW(=T`;p^i2VS`_jNi;PlR>C61S_K9I}BDmePrww#c0%5vIrU5@Nv>)s|vbzFbeUuph zV!$er42~8px?@j~6J{Pv;9Pf>KmAIKjye#E>k^bZTDdHohRCPGPJ&8nf6dhal3esL zbjfNcf)trUjVIrzi&>VR#$PpSTZ{AH&G1)K1yXlF@!Gw}dEpl~8@c)^9XBw5){O%Q z19mih_Ecjm45}*BR8`=|M-48zdfzneLMI7+MYDH?8W6*<>c5lzoJ5Rd{6&;RnSGOg z*RQ7jipGEbbY-~ze^{Bu7F>3iM@AVa@Hb(nxt2qx!^SN}Mc8Ls(==Vm`kKXgl{z+IYHNMEa{pn)X_?lVteg}Izo;m;?JZB6Q-{xKaY$YSGbogID@yF0n{9Z{FdS-e+a@wDBNrcBH2iYHep-Yj)7Pv78wm08VD0gOaOEzY{`H7yx ze|=hCMAm{~RmZ~Xa$>LH+KJ+~Ht2fhHlk`Jsx1e6C=0Vr@FY*h9G(y&X!Pqs>zb~-^>_Cp}dl5aW8RK^C z%_N>ET9UB2>(!&mb@3hbfeE?y+!fC^Z7rLZTo5S&f561@d=vtZ4Tq5*G#}*fygcDa zdISOuR^&W@56j*elSAcYb?{(uO`;zmW#Wa3kfTB-hu(D}x2YUj@H@1Z!zF`;kr6O+ z^Q}w^wJ^$TW5CiKtHf1G#ip0~d?hBp>k>A6%Xli6cr3~JH$GCBvgc%P^b!;u+u}`y zn&T0lfBlDy23pc>R9JJcBf$;R9pRd=BLP_p;{oeX&Flx{U3bTjmv1zFQ{JQ){O)Dj(x<+fq?+o+PMEngkk5thD3Q2SV(?LQh%+Pn6sbASt=b zB4z53-MA%_hIVzDo6>J>pNGSdE{!oZl-R*ef7212`dT#)Z(F-HjBE6mFp2GLtO6uZ}0C9E}f31V{?4vyFqp>wwe^ ze@U&Z;)2#Rl1ADKb1M7my+XJKxTZY#+LnPw9lt_Qy#RsJ=17>BwHVmHx4J9Rsa3S~ z)uQCcjQC5U1V z$w%?cmHzw+pi_%XfM8NmL5T9w5w6#a$6a+F`+>85)$m5&msagX`#g?_Vj6sj z{W-eJ+ir!pqvYo0{`9K-+}_^P`_Ya{K$z@kS0qcmaPf!TVZP$1&bD3=p|S#5nPLeC zjhz5k(C8PM$sCM$?$GngpDu!ef5C-hW2pdSl1)HQ{aAb5QU_K>*HGz$0y1iHBqeKI zz_1Jx#_2AXdQ%fS;?pa}Q=cz1S2jDg?E_ad&#V1d-iUVbW|ci`pOy$xZ?&u;faKlc zCe)U%V?wZoW{F+jaLM7C(MvS%k1^`Q{Wby#%0?q}Je)fe1@059fLQ+Ye}?1Vy#h(? zssNqIyru&L6idRhF|c|>TesTZUp<9tNObgtD3-QRm6X|^&f>j_<(G71UrRCCj*B`m zS@W3JW@guxjR8R3ez~Oe%47Y((nn`5H&@mL4kB_JOe1~UINV(~RB@3?vC1W5M`DJV zHw%{EJlrq*exn5O%jEuMf0_&yuX%BsfaTaZ291zavZ za*}xsoiMfw8yda7XAa4Po;ScZYyEYa&>d-%{&!LurPeJ~KBa>)e~+P(?VP|23Bp%L zQE)M6-K~7mng+)T#h<$p@9Bx)C+mpiVeR=z%jH^P=}$QYdp$}q!OrPV?o$R({WQ!h zv__Us27DS#L;z0;HCF}bPL6C089A%D%C;AQkxaRJ{%O6o)Ly6M$mSeYZ zyE~uG!hmf*e4ttsMGs&-H7$V;xnEOWSFE111gOtX|E-~;RkI_wgqWo^sAmO=Y8U=v zLOgM=G!EkG^D1 z!H9tw}P`8O3ihhmIT6;pQB+PP?=4vf=U7Y7L`f^Cygd^vqNdHMY7Q={v_la#JC zl*EgyNgre}fA<$JyU)CwmvjQlDwH7ZrvGSe%<^p+-2+5#d<~HZ=RK83a&aF437-I% zQ7ONxTB!i2ok!t2DGWR0Q7b>7!xS)yN{Dc7c+hm4vg%|URQ}p6z_yMe5t~DF~{H;TD(%e@P_PI|};n}gvH=vtX5iwrAe@$8>Z<}vh)l3*3HwnQDZxi7s z7}{rjR27MWq9fwjXg?3NiRqcQuO!pn@LOOD@(u4!@)DKMj-4%UrIAr92|A$nrwumi z_Z4*awY$2GNh}^5G*GhlI9c9|wGibrxCI%#JojK*f_t-8xi4Y&Pn77uQY+!{%^@zDXV zm>oto595o;f>IKKU1p4wo}O)O^p)}5D93Q7>Raid@vd;g%4!=1s%HiP92^uV-~rZ; zVvIlixSrE5ySAoEA#8tEC5^PwY{c10uYG#(e*pH=#D`PlMhGI26x)qQb#c7&Nj`oK znU~eqQ5JAvX%b^Oviusbd*ETmRZP}+92`U;FCa5}3?ROci@GKd6K$T-5EpcZ3Q)62W|q$%sfmIs9RDydDoO zf6@lsGz&GZ(G-8$=^(EMRu=@Fu+=w!P-20(u4lHU)&we2nZ9dm+Sy5%G1DWH zq)OvmI3!ix`_H^{RDl&4g?6vi=e!SWU}>1LE#DCCysto?dgI34V;DY&a(;|~f1Mrp z@H(2NVhtvdD6!KxAxY3%fl5%0%GndszE46eKON;*4===}7B8^S84}V(gJ2OI`8rF0 zuq;gIzwxMB&xyYzs2+XAZ4d8bZ`K$TLacXg+{ScqXY#FGNJo=bf>CUXUO%Z`cnu2| zAo;@)m_B{(Jxf}=51&Td^Q*>Te=^^Jhhc}#WMVl*rF!sR4juL~f8GFOFA%vs%QoMg zj@wJ2Agxb7w)P9ts!;)>Ipau1_m2_8Eh9i1zA+eahkflM^gCv+$oR?adlFCUkn5ZW zBty5@W5|H-Jf2Xj0HUWq+j*96KV!@p@)MxzpZGLH+2t*+B<^$E?%GbARs4N)41~_Q z2^<`P`&T`+(3+NN&!z+XQutlC9P+?vKehq+QPMN(bs?!nN{A%E)a$~z_M~vz%%AdB zF61TqLOgrSOPh#)G6uDJJU4{^uv@3zEF72aLkSjtu*=bts~>@v;*+{8y1bv}L>sEU z($OvX>|fsc)s$?ZeoShw8hPjg3IN3Mb1e$x$A!@3r@SlrAclbCI9*C485 z6UPaEX8e@uK!#YWVK5p1=aEOpc6dXDCk49|UX{0RFws|NNCH9tVY)83da&yAtnN^8 zt2YWw-SJGO2L8GC$y4coGUb5?VU|z;uHU}ZRc)v^?$9ySuC;(lVxZO)hJ~Ly6lJv7 z%$@3_gB;(7b+5dQ`keMpnzlX|n}o9b2QKn|?{MUZwSy_0cPpFQ>nBE9XI7o0Zwat# zX#`_c)`xb>F=&%hsX&41xD}LmRW$5ve0oLjDQb6in!NQ55lSf7OT|5&X~HMQNRrP~ zlszZEf!W9d@!EW;)2voK-1@wvBiJt=2*R&ju#Hg*>vIU;F) zHm91MJrFY|1NvGbgext#mlJHJ~)Vp~gEjgNN=iKETMPDKNM?AVSijBFdUW_p@5{oKvOA)OD~nq`&8?qrGO z6v=_2q6hS{nF%Hn_fLS5ptaVw61hCw4(%&_9Oiq5E`d&@1?#bKneQ)C=4HswQXFEY znbRmhVXS%jstdt(-f}wdV=CeHIS^oOgLQMRrbOjnCr|49%0{^zqbrq9kZeq9pHZ-0F)ba$&~Oiv*c z8R!m;J02B_^pLu6OX_FIEiEGwjg{aKh0eV0Hq$!p6k+KKffDhglu zms|ew_V65aLR``}#n%Gr`JD85e-2Qn(j~b`8z6TXii;f$*PpWiSH{E?)|y*RIGZW1 zPDJ?dq*JU4Y*5(py$qs%rj6yzzc=zc8DxPp5e^Us(;&BaQx+^kWfPsy&yuJbWX`;oPQO0MfI zWKY@MlAKt)*z+BK+hIVueur|dT$H2x-6q(ldcw@1r`fciB6TL{4vyXMJb%%g{Jvek z@ifi=miooG)oYkv0b;bsX9Z0^kdl=bJ18)(De0*-2Xv=u&ldUt8wYscFE7f<5Pr3S zQ2u0jJ40K4_u;29jM$scpXYn4NECtZ8XtkDt!mKO_uFBNe(o=NS1HuXmMo`ZFA zJ%pjuSMncL0R%^_cH8ti%eZs;C6X zzZOpHfT&7;^!euIpCu&H^>Ry4_ zf}OTuPY2CC0MgU%S^lYqeXEDv1SZZdAJW@^D&+I;ExkZ_9u752>ib;|rSctX!Qj0P zcQpOZR-&F51Q}u=;-!BFzI7!};e(|^;%{Rrd=kqtFK?5;;w7M7>QqI{a!b9}66W8h zgD77d5IIjy`F`^9_|_?yip=M@)ba9^l*J?&KZ%#BN(mk~^W?;IKKw$XYxXLo^MFnP zf*N3T!{!BNiI6sX18**CZb3Sos0})fYhF>2XrqLP%*dx!3Z#mDmP-jPe{VQl5`A*Y z<(WeiC~yb^hWVRGQ;D2btjeL;zyqUZ;VWCC-uu$z)Q})1y5YFO&3Q9|`B=VnYpoKu zb4b!%#<<_TmZTv!$+2IYgu+jbNiQ7Il-0WT&jBwUq8QgAmF3n4b(jGAC;>6B5VP(# zBjb_E^lW$Bko*}V^j^jre=-I4-M$Tlkvh`2n?8|G03f@vQ*#550S+fC3ph9_D`p6{ zjvnbDy4cAS*eSe~7hnMv(xpc!WPr#PSE}dzFI9(WQ)EYFki(IA5FkNj;b>yvK|CKW8khsaIXhnn!t8O~xA<;=&b69*^T==x!f8)P_+yeO7=_!~+?o*%$X}%b7B?obH73zJxrcX&XnZSkv9Busb~3pwTZKRaE{ut$ zs_N0I;PM6eS&RF=59fN?=m!T-XTYB+3?qKH}bUv!uLK!h`-8_TT+8D6|zZ>;F?Qd;2wp&D#zTCaLe%(AU~ zw%>|l7ImCOH^!0pv0M)N5B5VF{sQ|o-GzLvEN}Du zpiwJcC1ZxC1S^$`$*R(OkwO4RieRsk3W38snabOHLG;8@z8w*JOekybt@$n)@jgl% zzA&+=t6qBLe`hHNOI6@xoqOP&^t^$y5cUKBxONGZLirRJ5$R3RjD}`A^qYA~uRk;v z?QI<+M20Jp7+m*^wys$4`dbIGBWOs2`8_)sL2D4dXd97%(N9A8&|l!b@guJ3N)sht ztWgMMntB#ji;=fn!v$7&#q$SJGq76>An-9w>x7v^e{O0SlUy7cmOGytoc8M!B?&QA zqp7h`s+%B!61XfT%=_`&&u7>@3)ysqYKB!AakIcO>ruIuL`m|UQwU*%beq15t%E_z z`snJ_JiFp67=w>;aOGxXfg==^Y|Ow)=~{|f75*nM=g>U3NDf*6q3LXNh-bHLa;=$fMhRvpR5~p8Xn%_w)`b^!;Prrxb5qK z7jf;+Gq>%Yh5oTi2=K@-jg30yd;}KP#pp0jQTRrHk=(k=QfMq`21CCU@p&g^EDJQu z1C&1$aSavBsW~l63zgRvp?`#UPUAKD4+D35f0F1Eei+JPJ>p9p#Jnv5Vuxq%5`4Gh zf0FeL4HDj1#z5$-gRSO~=Xt7Sw4HjD*RQI`W)7<6{(?>;@nzVXFzL$m$;nc$4&pDr z^na=9Oy67t(GuE(pwu7VE_I{fc^S|$pg;YV;>{4(lTV=mL}xd6)6xMYnap6A3(;ex z{qT8Lwc@%PTiUf^q)zl1yr&!726r2JMENPkiVYwi_ z3r#$ft-apa()X$(gk9@OlMdM54-SUtK;MYEq3>($#UzB5p0$jgTAVi%x}h zw;leJm`Pg%=xJby zkW$@=E_`iX)gNR54NuIGz7U#LTSDvUHbDkOw(zaqK3`l2UKRj{gD~*+baegp@f+HA zyI(%{3_ISkzS%)(x9LOD_G?e+CT}u~vAK1VgGR@yy;1A>{es02MI3fO-^$<2Tn4qvZ6frm2^4?ghl0GQ?9@n3Hf=Y3A|?LdI^>25y)En| zPySF1KhXhN`g+F0?Y2kK+S|~_9D6Od6HPHPZXHed%a8dQlTACB#P-ouSC7wSSiSlv=n_B&gRYC7JE(sJ zB18VR0cTe?sEXB%e4IRr2PR~Cixde`d7bOWh%^>BDH+3xxSm%6k4LnC1_O(`$avjM zwbwY;HEQJ{T<5wB_}&sKekc2;<$DaFl--N$Z*LYvh)vG1DeIYA(~i@8_3{yT8O0G5s_z z>!+aBd%MQBqbYqx&)-s0&gIH&$V@h$1?raslQa+H&cuwD5nk=r!td_#c&VyuGBP-z zIfNweHZp^}j22pt24F?+9LV=Tkg%CBV28gB_1x_ZTYolLBt*1rL->nuIFaTtFV5v2dwE2Djiu}kKqkGM; z5FPu)!>Nf%&Y4!F?Jyy1-Yc{%* z5P86f(&ok7JgjRG(Rw$GbFpX}IFM|>h&ubTtFGZ9-5b#GqRmYfmY#p0%Q80x+)9HT z3JshH`{U=-eQUP-e%88l?mX%s0`d_h85Y~{bA(0~s+ZusB&;klT@_s|QPO2)ZtAbEqReDd6p@KFfMSVd zL!cB6(s5n1+UZ&TwO~uG;M^Ggg+7zKlH@@d1B&wQBo2)cb@9hpCNoYJnId$6b%9z9 zY1SiS3;RO3E1jicJr@*^4qF}EFU2&$_V#gG&zD|qm(X+GvHxr?mBBLK0n0D32S(!h z$Recz>7n<0y={MXewt`0-P+8P5jFOJ$d@)kWW+0r;g=ctSqrcOc964`8^6NQuY@?~ zE<*GxkjvwFsxq(&R(Koo>+T(s`&^=htM(gA~g zQ&vPlIxn2W%D@`Iqp4&VrRDJ=OK6gwcRk2Qsc{*yjG}+Ggf&c4Xpq!3T#?`LlYJFs-BdvS z<;R?@nfrfct)9zv7k7SApH!?qt$WjF2FihVVCsKPG!!na7WD75@c#DE5LqVS^mlzl$S(Wsn?nOc zVQLGNM>En=(MaOZ(&BI*es;4NFCRBu8JKX-H|dc4(zuhaw&RBFXi5Yk0~2RE(2`&m z#=f%ie_)8oIuen`Da;$RepHl`)=iyaX;wb%8ILYDkn{bpUGf*uQam#=cr}0P-a}my zUNdL>Qw>^$pL`6V-QPR6q*WILB5T%4B+H{odf(^{f1?pW)3nwk$vugs2j0&@WuZe3 z1Q6q-as~6WnbxRd{Kaco|puH8W6#!vpir6_M za}GC*-1*K4HY-c!w3Q@Ung9>XPcG%KfhqJ_!)7U74}l)U0EnCk;3{OXS`sz9w22Q= zMOgch^rld>4mXUqGw}Bp3gqW{S)G?dj---cPa;e0mz&9-0($cBm&}iw1s8p1vedP0pfcD1B4_h;1u5HM>63A@ zCmf7t8Ln=k^L^&1AR4K3rFFqk?A@Wgw1Hh2gH4BpAM1P!)%05OL8Op@AR;D=^!A1& zF+wBYfK&nIu2xo?jb}KjXRtNw96be|^V(cR#LAbDd4Msw-@3Yge;eNhvt$yM14~%i zEP@WHRYeUb0V}ysEc25W>fJ>nC&YsH;w!nOso>@7SM|p=L9Y;~?zaA|&UNxo?F=Xn zfi5=0dlwf(8(J>8&N>)F43)&-4?sA?e5ou+odeO_Sg`60cmEpos!GkK{Lm}>s&exo zHgno}!_SLe;5egyiMoP$knomOX(og<^DG~&v0lS0>gC_GQ^wW22$Z=#1JXjLdV z`+4F%B1(JMt1b1(&LxU*y<&)THe|d-dH11Gv?l_}n@W?+%A$mVf|Gn>Z5RK%*6l`9*LKV4HgEjY=)j{xa6z^wUeq2e zh{@Ne@xK^*k8M|xCS34=Gynt=FwDezZ)Z5+jX>C^?pY7{=Yq$?b4(4vbZy>FOM>uwf??nwB^$sIV7DT_fx1< z^zB!02Rdfh+Iy$um5II1f8)KIypq##5+>iQ^XBLzfhX^T4=lqODbOKGkx;UcX1aKP zaV&I!P!ruCUxh(lFA=8?CSOfAB~pQGb0{@`O5_{uM~>2MhKTQ99|QCY_UQLG^3o|M zuSt3$>7A!~dtN40Yu%3a!gvaSr@mu6x$BNmP2Q(bijT`jaqaYpH1IO!%<7q)S17k{ z-(Me?R+V@~^E0t6oT(etL?8se?yyRNo4&DRvT67PRY7T9nw_FTq1?#*gCYV&8r0l> zaT&_0+v=xul4%^KB^XhA*Mp1Yw$_>+DyR=!X`6_n~YvnM5z&p!9D>7+ks@x;nI2~f5}S$!@3Gb1 z_^3}7C2h=&N3Z%g8&-S1#M2-KVTLXqC0&N|xbY;_m#^fn zZSuQ4f0QL@lcp=7!I>eSwh~uY@cKb2C zf|6-O{L@$huLx?BUytIAh6UAs*4;-;!|qK_`u&V!MTvbk9R%djgFI;hj+gF<)1~jk zW5yaet(_VSDR#BvuD^ZJqpNq073}4v5rvu;^VFYS=L`7z%EFIKc)LLmDKi_`{ZX_Y z`JRcZB;!esgBSKuODE=`c}t#8vWt|~PVaJi_E^j23=SEnn9Z~xfX5|xPA8G2HmIcQ}5vfe&kIgj> zljrpZllcgG$a(Zxvv*{7Rx%tBiGMU3X8wcBK0Q87nz{t)a!Ws-({-nzrE^|g5> zCy@`nZSiPVk)M{M^RMuKUFR6B~?xuJP{7 zof`1_auVBkdS2RoT7`3+R=A3oc)mVS#cHZKN6d}0lt#nePur6Vy?swc^C;S-cV=li zl-Pjlpwic!PA|bFs&{`cc!}EFrWL?=T3a`%NUf`X7rPefvU6=**RaiZW z4mW+`Z(hBjjj3ggIrHu*lXay*4)0zebxI^-v3FC`UXUgPmJGv{?(v>x&wPp26Dx0? zj&+DD0?_M*RyA|EKW{>kPPI*|BVxLb`rSUexH08U8J4*0J7<`CWa>N>dDc4XO%feD zt>F~p>DNo33auD_`0=5&1}_@dcwa8X?ioIfoxT^i8WVi0^W;5xoxKn^jMQT6^e*H%W-e5sRM z$fFkIozyJ-d(4^lXPoWSKQ)QvZP3whurqS2okrDrQn`gox{ap&KHYgCeeV|Mz4Gjo z&m(z{y4l?W<%!no;LIi*_L^C4N^zrk(=HUdnybM`?#zp#^x%k@NX+`gX{GBE;mx94 zbb|43zX7*@H+fgb4=psX_XX*lBo5K5C_4AUxS|Dkq{=gd-RG@x%n=XT<3y2d@I7NbuQCjTdM$vZ(brGW^P=^R!tT{H;D|Ptp1#kluhC|Xq zbGFHa(IyXj-063MCrwg0{o5@Y@)>2fZW;HuD$h25aiPEN-cH+cY&}t?^>MeeSH&p| z=Wa)TU1;m*X)BXoQO1Pp)?i*(uO?2AcZaN2;n-unBussEXHzr&7-TeOdcvQ&zQIQcU)TBi})_eIECC_~aBj->UG++d_~nJe{xTcDtpi zzZxXRIs7pMjHi~8V14)iRa5h2AaOFJ16~Ea{Wb|C4L&lK-Ti$!^(mJoUGRsD`lQG}km=`YCjRu~OPzU}?;(RyeFb0ZT#xCZ_n(j1GpX9?M@Y`m zHZ-3!a1M$SBP3)>il(pYqKlT+jhs1Nbe`D={~>$tsp90MT6L6D>8UM=3%lfQ+$Zsv{VvR3-BYxfV$h?`>B&k6 z<*isaYrObp_9T7H@!f2_eU#53p^>dsWMQQDshxAD3agC3lFVtU@54HWA#H-=dO04r zBJ0ajG9>R!whe6FZp=rp+t|wIbBF6L5rJ9{cVX!0_1*))Z2I+@C|et%hs_IrSVYBV zi{-BeF=1omJjza|=GwX9FnY-Tpl!YOF@o0b4`S>yF<$~C@3GhS24B9C-k3%PSo`vb zoQu`0HuZWrHOger>w}YB7mi4|Rmhf|G8%K{aD`253F5-l&8!K@r0r$?!jR#eAn|#Y z5h(imN)5`@(?%YIrS0TH%HLyuT!}9AHDtJhm9eNUJ1=W>_VQEbw$f4+H%@5j;z1Q5 zw~Y2MVf0ITR0-Rih)5%TIko8{dqx3`m7X>(40wYL!K7TFR+ zzag!5{U~yHS$8-@xF9h18)|&A?g&QtW4f+}?tz8!r0`ZAX2WdFs3JqO?Abf`?8Yf5 zte3;Ig7x;oi=ORlY-JL zL7jk{=hyX~UVKxWHs)iFTdF9&nYHX<5;4Yjq9c9M>xZg6viVk!-W}vfBT8OTCyb-plV5!({Czic3?;>9_`aE= zp}%60z_g_riG@5bUOxg(zk|8T(d$K4rO_V^_NeXpC=AIiecpZn%2(d=FjS8lFw7Z@8LLcg{JDE~OUl z)-x5iU=xn8Zv?3O@(Btots$n8ZmFuAMITyL3LAK=I5$n%nKfQgC&R`L>%)qkQA9J{20yEV9d7^4X=9<`@^$Br`dads!w)|{m8@K`uOgXxeT8U zW4sowumZGvJbLUwtSu?r`u7L4O;U)SxU9G5W69Z1HC|7QyF$DEppQ}=@;zc)BM%yz zR%VfX*dL>dl@!Nk^|Vfmy>I0y`clPNPqfPmjvp@}MNH3;GvRJg1;?Pn_)h3WU@!C0 zyhy%!Mq?m+#pvd*;bH6S#IBr?zj4NAFj&oy9olxF& ztpYr#52qL$yj@o#^1Aj;ddk!z#8d={2qo;jP1vE)!>E_!rp+!Q#^~NkeuxSKdbCwx zYMk*rh7UHpJyLvWOTygwIiAQf#li55mCBkf79<3J?(;3(xHZE{w_1b(zR*p*MUZft zdu+KcV7pc9u&?(AvFi{O)zR8Opur4zZ}(|`n*w0t&=!#rZo8}q&U6G0R#(7|kB}I+ z?qpu5JD`9TNVpY`ETod;8LGKJbkyeNNA0+xCy#l4FbGy+?=6bqZGign-1i+hh+W#* zhT@QaQ-1<>KTKSsvq|EFjI*<+RaDhBt@qK_7Iv>dqm@RCW`(R6=rBD^8P?mcu{dsSE`2HYcF zYJgAgts%$9kOc(ZtnHpXJT$gGfiLg4MzQM}~RZnWbWOyT3cL~3V= z6fK_&-oIPaSLV)f(8Ojq$@V#jAoR^w6VXEmdtrPFEwP6%978{B08x>g(`c%^MSDpZ zV16>8Jp{Dy9@YE6-pf~1FPur?EX6%OHz!byw%CWx8B3U&INktrzP+@E8tC$5aaH(# zmd+Q^WJV=!&9|Sis1I-dW+FAddFA;|k-aNm(7Zk>zC2cZp!5(M@<1v@He5U`FIbJ@ zk{#=%b~q@vNM2e>o|j0s^~q`24oYW58Zuh}uW<}>_GNji@kf&IX7RYY%FYslg>Utm zq~<0_)yoJ9KQrN-MlA5K5>wJ1)pgMtv$7C_QH# z-b!}cp7yxX9rf{E>wZh}>pJTLw*DI_dCE0XJ||XJzsY*FOuDyo)xmA{nr>^ohqpk$ z?wuW8VQLn_x?}eJG~>d>nbfd1TqfO0#}ac8$EqV^5xI>lP2#-wY)C6?@NU6>v;Z3^ z4SJi_Rm;bW7uSjL2R$V2RQ#Z8;lIi{tNGFI0)Ma^}GFQG-EMmn;?z8V?6|lGE z!OHr`Eiv6(Cn~4UkSZ-{wVGRh_8>i0X4wmPpQgBbD&{r_%ZjDEkYeh6R&C-Gsa@(+ z8+q~w_A{~k$GcOF(^fi51#OUIvm8g=2h6|pX*XP!aNP2<64=S2lVEexdfXt170k)+dT z79oPD+h>6{j&aE7*;^T}uX|opYGa8~THFQtDKUwD_bLRs5*cfe%Q3R&s#9t)iYjZ^ zcM{+5ap~8sQ%c`v{G9E72nA%HE>(Pa@*NkesKd zg6n}=fATx?Og~ROB>2!cO1YrCt)Av2pQ%)SLG$xG{h|=k74M}c6ODO9M{2ykQu3ll zq%ooxRX;_dvIm}T_EwR|<@W=#%&~T+nz}b>p0?lYhs>jO&OOVKxJzz5rf|{A7D>4Fc7&fpHGv? zD#FeMTu$**b;F)u^ecoE60b7!hgO}>(-Vj{vR2BzB4XSm^GD!iS;;;qt+##D_Fg!4 z)vES*asTwaxA-boURuw;3$eYyWEr~Br=P{3)=r*jhkB=fK71jD*e#l$B?k&WiF<_y zk7}!Ac^>-yt`CK3v<9eyagXrfmNv@mgRpl2M?F`d9Da41a*ZBRwz+Mz+B{+L>&umH z?skt6G#@2C4K1qKNi3O3i8h1g$%;8-w-;}0F@_p3m5IUjC|>0gA%!O$DxU%b^v33= zS+uecA}~gOlQp=J)~@IMgHrR8BDZYi?T02aHqUjB2pdk8F>W5|N{*Kn#>|sdPqS_B z7D2I2_DZwN`z1s(WmOxYnE~Z&N>En=Bpor-rR5C^Tt}7bp~R=^lVL z>L(SlY^QeX*;vmtg7YYpIg=5Hl{}i4S)FB)gj{D&4>e-qrJLl%YS!np2&9gT*3Lyt zv<_i^Nqm@|z&y1lmx&wW1mpHH?b;nqdgp!OS3YExL}(~Zk^_0>+h$X&b+D#=<62%y z_RkDwDiho4XJCmTy~?v(GgVO4@wi3?Mv!`+6?I_SgXaFr`z6D`K-@ z9J8?@P+S`NSZIG*Ej&mF4fcNNs4#&>KH71AuPb@M8pk{9;aaGSLT+0WJp&il6SLb# zhpP)$!ps7&`A3*UDqq6HEclim$)b1_^y7JwBqkp+2buR$(pK$A{q}L<+686!ZKF~Up0uH)omq3$d%YJl-ST$@VIxj_%j<;`<8Tsg$aWZMGt=;wJP5zQ)ct`i`C zvQi{0@|Y6eaJd&P+fG>(o>IQP5`Q~<#8DElKHs0&e(<|H(Zm)@S%U|@1P9Fp>wmkdR z&HAB&e@z{KT?!*0{aPRvY`Op2*^w)O1E2)B5{5t- zp;L-GSQdufwFO`(i&>TnS(FP)n}UkQJ%NYA+!Nw5^&vWn8z?JA_9UciQ6?OJ<;fa* zX3kUvtOE-(?FCSmO@N`4q|ctH+1L`m5- zp`gUvFq@&W+>$yMsRr6AoX>mV-4)cWXLf2`;J%7eX~8>)so zMbQ>8!L9uN?6Qg6LIjrX6Kw5Yp-KPr-z4t!-2%i}ShmfR)IoUxI=_2=JpJ@{k`2Z* zEAG2KYHxS`^s5;O$peWI*~m=o(pzfw!fU@AN(qewz49r)t!(y$HIhX>dI=AIBsPz` zf7-{bMOJ+t_{S&&(SEa@dr1;bxiDtO5-yWf-nt9KgEXD9KN_(%jQ#px4>xpgdhVEe zet)BCZb!oNbC9D`tdo|1BsU2~1=7+C<)EicfZc=SrgUtBRk=QkFZ9`#Ji2+J^L03s zQLy#QXYIQvx$%ZgEdKQS2oLxnFcMV!&}w!^SvTPk4e#T@yX6&o@TTLUN#?khn15tQ zx=Jd>=9AhQ@xdmMngTO_#Fc|zDBn+xH))ar z{@%B^xJYc;rrYlDw9G#?_alTx!qG1xjCLBh#+#dS`}TS9*!cYR={I%C%nLq1Clv|r z<3hxlwne;NJxJ7`Vp9iGvYm`Aqy|P%Ng}|5DG@O65V0_(oVD@1iy@gi9yRtGi{J)k;qDk=7K{SB>@sC3iii}YvKd+ z{i2k+!Iq=6ER>8eL^MtlrEP~53Fh4NlJI0N;{%w@eB8?3-L&-VX^Sxz&Gf`up=q-mheSB~y8OOCR!% z*JLI#8AyRw3CbYbi4?e0#>YIi)sB2lxl0k?_TE2_JHC(^dHP#H$y2x)7VnaSVnmOt z)b2~<`f8AWS<-!e@_mE#AFSK3rBw^(Z9Wo?YQ^n*#V;fj-`34xKbAFH9cDow?0VF- zKmN>o8Wx|A##ZK8t*B%sflS&s?@rBU@Dtvmh-$y2$va*^NYb2fJojXk5oZ}(Dd zWtqIiA77KSb7u#Da!LiX;70BqNb#qH7APL*GyNui5Yv@+V;?DtMAqnyX2=Z^O?<suKakZsLPLH{yYMGiD!FW#f0f{_S22%ey;;nHTV6gM;a|U`*`V}2y@9{ zf6PIDXl3qwWRW|iG8j*hD3wTbHx^xUz4l?J?`EI6)T1YXzNRt%Zqnmuy8N;%C_h@% zcVpwd?nV~BmZiv*$*e${Sr)Mxfj9x-&c4()Oz5H#q6kX7oEpNOzQu_01u6;(m0qKC z(~7yc-)iw@)Q3$V_4<8EcCt$;${~+(AhqOw2Dey9EXrGw8Pk!qS2(N3MNZL7qi;*| zxMqr+we96Mlhg1{>|7!w_q3y~S8u&<{9*TM1E;!Xx=`!Zg+tBsHCLV8tWe#x8Sqz1 zXI;={zzK+R#t^7XrqpH!K2AIV7VAUNF$vp38Sl8 zD9Z->Y!!~T`909zm1{j7eAT|Pu|E4xBqc(@?cExmA{jL79`gKL0%Gcr3#3lTt{g$) z@u)w@dxEOAQhR(YX+fMtvgk<}XbCHSwI0U1$;nqIkLhliSz%kRLD^^hvE1(TU3>ad zn?AE=y}Y?tf8r?B%a@n5(cg_57U7bEl$yY(zI=A zR4 zVzCqBzzg(;@7Rc1obwdp-AlH#C3Sof5wuBGqE6RY0@73fX-|%BxO6iR?J4iOTcI)X z&|=GKv*kp1Ct+@J2jU0XQT)n(vF~+(M4(NKji?pjnBVqbOT5=F=0a?)!|{B!YtNpCBg+a0Uz zjB2lunxgOkFH}9-S7kl>!Q7`{&H_>1$z}?5(NS@unCwIjsy#wIufC}XQm;&(YNd*l zW(6$rog0}k`eRc;&+iBeI~CfUhIH(9O68>heH|q#FVJ=SXdUo`v4<$@p09)go_?F& z$LIE-R1dgUb{F0S``JT(t0mry5G0YCv8vEs7#VU>U63m6SW*6|Jum-cU53iM{CqI4 zT*w=#99q-y@OM@?TE9DPB~M)iR;QL8^=YdPR8%@DZ}9^kZRS~DZP-53jg(p3up?u~ z42^O4HG2+H)e&|7h@Y~iyzXv3w3B}ET}*sntQ}3DnkmINAMSF0XxT=eJU1`AGvLYU zZrJ^lI|3zhY;|n%f!T)t%jD1#xdXt=R9Hu&01w4Zq6v!`2V*;2F?o@&wgoJB0$ct9XD zIgcxPSGcm&6nuC?HPYVN0@Mso{Uf1IEPK89k8rn)&Z`!G1nCW&dwJl&S2lR^nWg|o zw=GMGe5;JcTjw;o0T_GS9n0v)UXn%+#b#S}mHMr?kB?mkec*mTHUxcf%{GX!sU7UQ zB1Zv;^GNn9i}CyZ$p;H6aVU%R1AyzIlF6iZaeWVDdJ4{<7u^Z2xYL{N?3xokSrUD` zLWyDTcQ3(zqQdZ3?>G)rAj9{QaTSL?Gpk38Sr!N*FJj@LbYa7uhO1f+vgU>knX*+a zg<;ykbtkYiI)a?YB?{TbA~R7_7YA1}hBpu|nqm3-J#E*3);5)5!}-QGID)c6LqPpW z0gt{(gwnm@u0PjF*)EbIbN2O^Q7x|CbJqJ)s3UKGBD=N|neP?dxPTW6eX1#)gc*>n z@v<@lG6L$p0%3abN!+5`3*`^CF?ksn(c1{La8d2*_UeZ_JB|c9SQA(Cj0i{PO4L$t}9RW!_n;VReMv`e(s{K*9M11KV+Jihqb1gv9~t8-h@U&FUWbKEJ*X?z&M(uSSQs4G2L}c_0$02T4ZX%>%slnumYza@50;5Uerk zVIK)@1yv~ouE1ScS%TGucW&3B@=B%eO_rH|zr$tfqa4<}WSoKBX9B#JDI2w!|Ll1! z>J@o4_sD$+mQnhtY&A|TMQa12l&(?rO|Ik;s>QO-2J+IM?s|4*lui0$4Q{$tdCzRQ zJ92B}7MmZ%;27F_KX!t(Yd$Uq$HFpn;jXb%Wg=|u+Xq|j8s!+a`mjYod;~+DW`p~G zao!5$@LX+8@8MzI_>1VP#i&%Xp1C(^Y4R;kR3<3oM9?(d}dc=<72YzN4~>^{qYw{gR}!JrU?Ag_ zd_sl8knar$uDY#5?Kz)9Q)DW};&;%IedSeBh9--ff2;{3Jb~m7;u2R`qF*b2@=ja6 zR0Wh9nk3Z(cJ!ogodDd(_;bG@>1S3RPKgx^?hHaw`Otn(BiCR28&5vQ=1u-;h6Da& z?GlakTJi!@aN`n<^S2g+X0Q71d-MsC++OZ!-rpm-C|*`(h*p~!xcI)*R_&gTY{kX- z8gZ}!>;B9(94D#Urfo8Rv)4);t&-Drs-w;Flg{M5UYQCA05B51a%=w@;J4q* zuIKIC;`gD#jg+Y$+4mNBE3;kfaj%16xraHh+XuAP&}sv1a3}PU-$bViw&G=$E@gOl z$`?s@?@B@_!8G2r%MsA!47_}F9rt~$b=p7bfGL*5lM}QOlD4gTl&;f%PFAE&=j2WY za{t-mX0oiHf>{t7uj+AGT-0YMJml74$I}C@k1>BrFdP6c1Ob5hzvig?zs9J1KL1Wp z`CHX7e#My`mM*4kTASK+Q34A>O+f(wzFqK}iqhnRRf1WIgA#zE{mni} z;sFk6AgxHa10by5RSoukpD8H65*`Eo)a$At6}ui#ulV_+;CsA(CW2qZg2hK+yVBK0K|HsN=FstNo}F#!NRep6Xg z={v~fN6HB#9{>%&9RTg%F3!m=&e6`Uw@$5;e56=Tka7T&4EtSg*;Nt11t=DfJ`R^L zC1A9Afm#*$Jjeu4W&*6WT0SF8hZ3$g5w0}^qg@gE=a{IG>IlWU3haa0L2~?zUu0???BK7B;&0zx2cXOJCOi=wq|HzDL*p zk7J;45Ut6Kjm&}G02B|)T>#}H6YaDYYl~89t+kklV0TY{U_r#N-JbKMhB2(6wpYsZ2#yLr9CsiS0;^z>Q{8v9Zh6Q>5m+~^f%sr*VAa1er2e1O2gmdFYWps z^XE^yq=2`7t?&6he`)w{z3(w!-dLFd%1q=h?RN}3H4z@xC0|w}?U~54+NJM$!`lHY zx&6jlguvQ>1w_!hEa9_dq;o)HD~~o{B{{I8OQrs7sTDa~8Q)4~)QvLASx^cP><-vt z&a^PzL%(Zf60~-k0g@+K6z%cA=L5d_o*!bmtJEKV*E`=6YjXhu1S@wmfh$o7?j{9d zh)H|ouvRGoWQSnSJl@J!&*%Pq?H=92vXKLJ$zsb^U?q3}OM(q5NwYIfu6mmYVdQ<5`FnJeSWoN*PQ!>Ku?b#MKN~e&wM-1#qnb zmfm3r1=yZzGh?7sqX15gWJ<4Qz!I?*Vd z93giDfTaXg$;1HESUa{oS4yX{bLYSOEHjTkBL9z!{>Mi&8UEw*?{$RhRd(`K_D7cg z*kzO9I+OL%DSg)w^+x+=-9NIezw1XLti10Y|0APp_pC05=mjniXg zv80}}$4ESOAMU6rFjmY}t*lj`R)8cM_5KofjRBSvg3?NqI|A)m(|{xK(6KnVT=;K$ zWbu+}xoKrkax9Csj^Jt$ED0G{8Yf)rEiWY&RAmG!4l>q~u)D{zfP-L;GX9i*E&;Zj z3OLn7QB86__kt@=*Un1sLS<$j6Bc@HVRZ*m+5M3WLh>M>*qQUH>REjjr0c{;28 zV-w%KO;Bt7_u7B<(s-zRMvyyNg8|cITyfs)gu#^r2g*{CBb4%;7UyWOb}pU26H4_f2-^4}#I% z=#L%!Ym?bux_H{Nr=~^VNGM&<@9or2v54J_VpjV)cz3+FLFq z7M2n(*`Ces+y(dY#kva*t)+rC0ZYKeMpd#5BwViCS*4B`utyAkbTpTA zsWT`t5)ExBabCQ}p2`c!qnm8r(gX<-CqPc15ghps-WGA0C~Zt;|Yn|)Ed-Gb?x z=X|fj0^fD7|FGLvMAnLHL$oAg8myW8apg zesO7PA{!VjrUSqRObZy*tNE`2HPQ?n=|9ZzFQ)lnmTxS&|B=TJxAgKEHPWBhDgej8 zfrV}^IoeT2r3%;CM@=-cr`u*-F=fCp#T~8?zyjO`Jhg}RWfCYY2D;Hb1=L%E5)Vs@V=QV` zTjI`t0wv%;dc$923oMC&VDz}sV}ZpnTHVyyyJLn=e zib;Ut0b&2z>xcQr-~Ej*KfiYTo7?=s8Ao>hi!*+HF8b;5e|D37Wiv?s`nc%({$IcN z*LVMeA^(jh+Mj#@)0uyK!9Vg(H_6$bI=T;kVx38yEm;y9D5}d9%u)KXpas&=x}v4A z43HvW_c&<7+SomvEr~&EmWs17oduRG*wHuML#8Qvt+g^9MGL0VWzLF9%eFcONEXjz zI%`o0v^0Y|q`)ruL^-*qgGwn2xVT}nUa~3Fv9nsVMh1dRV;o>HB_~3QxW!+*Shl5q zY;h9nEII@n11(EZe6&+f&tfm{#{0W(@0_T9v*t3=c_7t-jjHfy1;&is z`GcG5fAGN<%WMBfUJOg=k8FyvDWU0APoYmOayBbc$*M^q-Xr*%IISp7L@j~`OdW#tCEA3#(^?oSc`)kD-|q2 zSq6BLOi3wkbL3d8@7Bgp$Es(73|#`tP+A! zlfmRsY7r6wL6Yp6s=ZF6IpI>hJ)5-tp3x9K4Y3D=>B^b3H6Up&n22g?IWEE%aHeY3{a9EqTmojHd z9OHWov|8OnC&`7%jR~3$l;GztTfB zJkZSkkynsKl^M}jr zZ|&&cysMYb>=(nq3zUkL`CXtZ;l4AVJT%KHdsyj&e=RQy?9hZ!C%Oy6?s~=V77wBJ z)^_WASH$gq@JGV4MhJoDYi`MKjCFtzc#g9;Vpko;*Ux|h`V1$_&7Sb>)UmL=LF;4d zw&lOwRsojGbHI~WP+DSbwJfvUYD^+k%ThS2HMr9nSo`y*ZX=Qi1>^-x7SJrned{)+ zYyCgWeOpuG%F^Wz;=uw$w|UIENhP^tmoB#DH;{3ENfsbLK*|2~6Dj#d?>_r{F*7kS zJL1G?V;k(cWM!^gYt_52Kd{~HKXRjE&Cs98kZRm-7rD)VcSoO) zOImwil*b9L;xf;DYuc#Rud1?{-euji%KVMtlYl;j^Rn4Yv#jHWyMA0eDcwe1X?7lg zW>TtuuROvi$Q~TbI|Ks!CROVL}H+5DU9coUA zX#)o150$t+RBFKAHTi_woKzY@AQ{#|b2gsh$mEh3sU60{Qwvo+^`8vw(w1w>bR;sbACf&GlRPzoCQBVXL&Uj8$$ z@6Y79=(2u)&y3y;aq@&Y9dpgYnbGh6!6o;4wyziFNAf4q49bgTcyc%{1(P? zsg?%ic@lJ%^|2Kcp}L-UpncNe4bazrk!Qv!xDF8=_^eZJQOf5WigMC5n_%nI-md=i z!5jOen0Z-HQ_VM$xgP=J&k4yG(eA9TiAbXf6bvkIDUOibRNTg{FldkNbd62%lL6Pljf(q9_ME`t6qa@sGCHrkN7N``q5SwRzO5ChDy7B9Th_#b+ICeYWI{X8Th z&dylOxqeTBp`O}D)-OHXgduGRSSfD0@rI=N3iD~%ttLqfOQH)#K!V% zb_0fw!=&ca7z)A^ZAdmj1xWfdh8u2%U>jpb*cB!v5o}g1iNC3V<_0r1N^kY zi|0lVt2i+QzlrKeZL~`Edc(A4#1xZxrv!~ZLsi!^_lr*m;5_Jm4mQS{&(+#@o{_WeAg4Pp4 zU;$QA1%dm7bz&Jk%Fg^Az2#TTPDP31s?~I5fIygHpC^KkF{Fsk@9lGs&kyJPF@7HQ zV{t`Nd807L{$oskgP-~NP#bo|5igz#kVEb|#EaX=C_YQS()nI*#Pj`fp4A<KU+yb`zE0xF5HGeZgk*Whj*Mt6*-zz&O}fis?=+HLjbUhN5bC z@0%K4dn2c~Zlb+TqP;%XDEs_&$OZa!a{l=l=fw4O$h~{KXCVd*{e;O5ua%YDDI7z@ z(+>5GE7a+{@6V&M$9V>PZ~rB0GNBHmwvSIVFzy3?*kVSzoKu~z{P=D&1AW6;^T})$ z3S(x}L=>!)qO3h)GT)~dtK^#Q+KG0ITqKSOb)Ske3XK#-2M=0Ny15Oh)qNbmTokk% zuuI(v#P6_{BA<@HnvvS(7fp&cs+-vjNFw)bOiUJ%+3y<|en;jLyA~h52RM>e$Ie{#u~EA==%Xrn;U1q|B;hIFZ$j)emoDh!x;DT@kNrB z_O;?(9kI;gKBtG*4)5<{LxKBw20zDtIKwMdUDOi8JTMT%)Pfk9v$A0XsnkXX#F&W} z>vuo4US#>kFN+u6xe3wD5}(&vdbEQr*kT=jkY+>dm;f_C%)b-hzotfq6eObzp4Rm3 zm3JAQQ6B7hrwymUfK4JE;^TN7$6jokK*Y$qFbuS2Pre~k-wAGVihFeQ-3l6mM@u{R zpe0Z7C*d$=V6q8`9oA#dT51Zlrdsyn-i$^TTuZ+#*$g;l-`{&aa1CD1nDa0m{h02L zxe#LPe|;~l?->P`Jpgmjlf<*VU-QL}y>-AkZx40dR(ASWuhEh}+qxdlX=Rx3YUky; z@o5Qf#2LSZ)NruvT;mv)X379#f}|=1Pjt21)cH=5RXIo%y7O_i1hCY=4(YGJPyqi8&8pf2n-(t)xt+S0k$g80iskwaCaEpXbl! z%3K%wv>sQUHORlZJpT#ke8PzlFt(H=vhP>);>N3%ei-r#$2+lybzaIOq6e&Zhyi_# zs}6Z)KQ2d34)T2tv3h1C)>!+Y*1CMHqJsT>eV=2zy5$4chyB;r{w_#d&+{Rl%6&rA zf9|ont|rD%%zISI*M3fkA739{qZ!rh{DBrPd8d{X5X;LtmCgMLb&h%0yZ6$(tcZ$< z9&!t%ahq|X`}3pe=PWGW-h&sNa&wGPe>)T8xxex{zk%ia2DD_lAnrf< z+Ft#F%lO6#nDOEFV1GvP(KmA3e(amse<3&N=P|;0L~Xo*ejY>P{+!2)9@DV?aeR+; z+u_{o@g05ScKxF_)dJ>d?r;X^jPrf$_zSb-I_t+FVzG#KV>_I){dg}Psd-(M8poRu zt(}?4i3dD^^V+01R_qXak7Ln~Be)VV!C@Tnbv@30hd5}9c`N6L>;00JpYfO>e>d>e z0h2lOi;^BYzS!%B^^9DaWBmRAbHLP3yrzf3ukwOh+4svRDdu?g`CA^rYcrl{H935* z`P5%8{kZMNhS>TkV0I=6UzOtSQ4#yw?&R8IFhR~2XsoxgT?hra!<_OouM%^N(_=Ey zzc#fZF}LqN&Ax^{!;rV{P51@Ae>?OU4C0!wyTHGPc!yj%A{S!+XV0eFJK9NpvP?$vYpK z*X3=MLRA~EO$fTFgm^2wO3%P%A+OXl+Gd1wnGT?#6T@4?>u?kBc>bDte>y#`L6ve6 z*BL(VgYEZls$RzHymBQNE(nvIOeB4Ox56VZBlx{_9&sDCV2sZ@Es2#y^6Zk)JK);% zC@JuBV2VJ9X?@VNftv8wAwMC!l+df1faWJRdXUR9@tA(_6?^VgcECd|vZ)5x4naEsti-+@I{- zmuxoE{4aN$o*F>6@I{1tE= z_dJkg-v`>Cc{I_sNcqrPy!_q+CPH#t*Ef7yT)`zr;;{DcZ(iaee^@Ia*7{=|ck&v{ zp`O?c`9%csH3G%N_Rl?_D)Ou`@Cw?E9bV4`o*Tz{b+g2{sEkB=(g(E#lS_Dg{td=C zt-ClY`gMzE59{JONa82nanYY&jCW=GBz@BoV(Ymu+K1XDI)iZ95sIo*J(lhZl%1A|#*eS)Lgjp`zCe~BZkug7Z=Ib><2RIyQj z7`(|DB_j2^yNH#U1XF~fmf~et|0`doSHG8UoFDUIs^(dIJ?+N{3Q9E3AjZ1oV(shn zN?S>uv}RzGAe~;~UWZcRncmOU=s_{)$5noe%0~Thp7(WegS?j;PQMLgKpm`$pL^VP z;Juvs3V+tae|_F$e&DqD{5cjFuX)PHi%@-Kd}qVet&Y6cE?=~v=fO3b?D@ts#%IPI z2W9=3r)K5Fb!_~6oVdGR40yD;X`N_s^Eyc!N>)m6R@|-T_ByEaS3u~b1$9;T$?@;@DWdEaA{2F{5 zbHs-C^~`uFX}v~%24(KLWY6==v-ymGZWUx6Mg9fmb~o7DRGdK0l=dHp6G7P7+nCgy z7psI>H@=8}oEkOdP~+JxW_#m_xCydb9)S_xUfNEXk%B-SbCVCe8!*A}dTBv>O`)~) z%V#+)fAb|PaZ@R1X5-Oe9XKFz$gy8{eVxbK;)xoy3wPNiZ}`j+)F&MII6lLEIgM*E z7bHKe4cMQtsl&L>1u2$?v$~&;Q@v$+-tPfpC~n~!B(*wZyUwSv>_U)gLW6u0w@XoN zC(lH+^kwa^_)F$c<1pvq85AjF5fc+-Y4gBKf4(I~Zi!KW-kBTEx>2}O$;4#Tf#Or_ z>K92{#!c-(Crkg?Ca`UOg(R0?*WPTaMl=4UzKkVodH)fyx${xg7!Ej;MG}}Xxpm4Z ziQ^=woR4xh&IGs5#G0*Qa!$2U0nh9Ay~;)};P`Q|V0b!NYRIn#9(IqDV%+;5uJ;4a>Vl`gzrefZMo_FzAZOp> zW)7ybQoEbUwVlyzk%rI+4_r=+UU)`(UO8@Q`SqK1P%#eIkh({lfm7N%tg3k>$O#u9 zj+v7g1)popRqd|ju2xjnj1hfB49_9Pe_U3FVBMj%*NQ+KGoOt26V5lpNj!NundiT!L=tJa%J%tl_`>%_ByEMjUyZ3T_{n?fDhoScg17 zxCPPkE9ztZcEGOt7=dwvV{EoA|EU@2<8&#>kG!pa?ugr9K6}8$4w@l-s7oIM5Am;h z?SrlZ*%1*iz7%Af2~#WNXLxbWY1Uv4gPM|tK!X^f1nfOyEV5=dqbtF7e_B%QdCaK3 zXG3-ISTfmdVxIOq$wbwaBMCQLlG4m4ohiA=y&ea^RyN7zb2%?4um=AqHl$yz8W5*98&MCXfSiE-A4l6MY?gz(=eU z@O}3|ZpawN?YbW~tdEe7@v!Im%&$G*+&<<((8t$ezzmII@`A@Qf8Za3*6C0e%mtBi zbugY+RC6*l+yz|(oJfh|#bcFFKjE4*MZjYnfAnifv-#+1%&*L`j*4E`pwD1YqJHLB-NjlM){x)FGCY@dxYn}m zzv6h9iw&<&g9}{x$PpD?ewqW)&t>kDL+u9J%z@)$Bg$x}Y66G6tb?zdlux28IMF!| zlO~{Gf^Z){p-xlo^iXGk@@X7A%3ZMGUC(LGfB5?{8N@3U8)pu-s)rVp*xu%_x)Xc7KX1^z_&w)6lIwLQ(7Nch#46o~P9W01hgY~c+ z<2;f+o*w(wf5Z8NSnEq*UD{(SUA|OX^(VIDo#!0$VR_V#yO3 zt0z_|FB{BNkTdt&$1~Pn;rYPAon_S49x0(+m2WSif77hjv*;Ev8ag%nS#n88)h>rU zqzTc+v7tt7!CC%zQKGbL0^I}jBsw{^(8F}5Y z^o~7t0u94jk=TrxCJZXuQlgs_dmUh8Ol&(RIG3xMKvHj$p0j6sZjW)l$BwjzxQkI_ zlEIZIe>YdHJ*++39Pmwsl@q(s;TL%Qx%zZm2Lq?$LT9^04*hA5Uy<|@6DqvW9%`xs zCtFrHH^;MA?GD`914h!%Y1Q}ddknDWKOX9fAQhf@U3I3yF^y6l2Q2a?v_bwM8FIHq zPHu{aRas|YZzw$tf$T5#e-K>eJS3>J>4wBN%<2`}1RHhdvC z-a7MrGrQkwO+n|=T$nkYlhmeH_Xgu+um6cWt8kAH@*GD*I}Fbts%K@dH=2!ckD`v} zsn_v9e~vCOR`Y|F?^HAH6zcMS;4UlUe~_ownh^Z}%qYQV^AW669aK4|1~HPyUqm+o zo(yp<7hv-VvCW)lFBv^PBt7;)97<581pa7*pC_;X{5%BLAnInY-+!*PFxUCnyMtvL z!Sd}6bmkdw8PWDV+$E?RcmdlY{^G%xE2vpsc{6tnIvet`661!KK>hObGwHszf2VU3 zx~TgnA8HDA;13hi$HhZ#mJdCIA!cG6f_Xm8Rtkz+_SMgj`++@#@EtV zjHL&yOW*Im&>Gl0J?obaI5XCd7kuH!aFtveMp$kMaLII|C=9U@%yhv@$^3Hs9oBa% zvJ-dFvvcbw+Vc-RGGPq%fAp6=QxT7KM#U=RrOs~@kTdR!Jw!8u*Y1~?uO@zXwVZY> zS#H}EAuXq-Y4|+<@M5Exc3-{ST&EV--*R*XatVb3g}jum8F(qZ&h7$>Hy4=G4}9{V zCGYp`1wQ!&?c^mO3^I=q?5lMC3%_od&U|l08dm2hHvjgwv!1-vRksNs-AD{J>t4}fI@?` zXlYYjhr4{i23~x2p~X3_jn|8Q<0Wq5lWpM~xT?O|%PaT_;@l||(YX?u6ULw7G!3R= zH=VB7+myt?WOiepzfYFkGg=BPvd$67!0 zO?tfbhwkoJx1ZKazrIFZJYEs2xu+Z7H$32wKR{|+qU!+jtLeVw!i?Lc!;Pri%8SPv z0(nYo9{1d|e_FudwL;fAmm0xtdrM38J=?jZPmJ+&K_Joji^=W0`xT${pc-{0^P;<7 zNs_PSRuvvqYba&9GVYdY(_?u8^-p?AKGUw4Fgt_FzRLa3w}>|58`v7s0d&B79{VUF z1KuC#e`t<|F+0V|{HW>rq3Jx-DEOY&!B^y{2|e+fe^EpA99H)>=y<@oarKZolY-U5hxIXXSY3?TX)% zt-KF&>!23t{`yH zuOY+rf3?fhAAHcEu0CR7X`c3Rw33+XA9I@{&l@%I*-x!tafvM-dCKRW<*?^S+rdI_ z`3hZjv()Ht5@(W^5`8m#0Y#2D5f_;| zarkTo!gz6UFSX$#UB{wpL#p>LFwg%HYpBFlf3l>(*8HNKai5r(u~U7GPs`N@6jFP4*fI`p-ut$q0AP2AMEINan|+a2fvd6lkIzlHlLF+-=ELsOYfzA z&)r1(LH8A<`JiLE)W#X!c*J=|{iGLGPd3`%Ses^-lXx-s3)eu$adbe|?`? z3>uUhK4A70V#^misgpOyg$8oKUw0?Zo$GTPD$ZJsKF7=dzAtUQJ$v>H`k3I58$QL^ z`Y-#E!d*V)Mp>n0ENJHVyX-Zg^&50%6IMXTjyHU3x6Eqw*eOai_bJISrU^>4*XIp8 zHDaI?*~gN^Zo!S3b5l=*F%JU%f84V}zDNq6vKU@er*pWL-AwDkqx_ASO#0y5No$#4 zW68C3iERT?K^peF1T7gU3t<(Fc4JRMQL&Pf%rPs990By#n2lVR_xro&t3{;OtKVYp zYv68YJN3_fds?smK4;3?83g0GhQ9Zt88t<}9$_bZ)Ok;z^e@@825C%PW zkUbkWfjdcJQU}}tPJ0f88ur|>PN8#_f|&T>dBp}k*B|4Cc@@@g9P^~v3iG5VTz8I6 zaozF#K`(EPoZ%lm#3#7)hAE4m`sF_`|LCWF;H(Y0&>Z~8HPU0u>z|*2ajDSxg&wPL zqSozmPD*%)x&hwt4{5R_f9YI~9wV@G$Be8*VBDBhx|`O)ltA-|JSSS#3vOEEKlu1w z=)`D7Pag=Ey)aj9%qdpZYhmQL_Ky%>H1tZeh}Alzsg@g3zwA9O7whAGn*vE zH`SaeT_EN`L$K7>&4!o7ddixK$4XZj8-li_M~ehWn-p{~6~#WVf8(ZM#Y*g*lETDc z;7}Pi88Kl{89w8rUM%eM*w17QWxn=G?ZMmoBbYJbL>|@;t@SyG9tZf%ZSQ|^@}`ix zf8gv(-^g=K5ufUV`Eh-pYlky#I^;)LpAR!D&s@Z9hQIqZVvkc6nzW>^+M2VgWMMgS<*jhYK&9{c}n3t>B6!IsmE66TeiP(J! z3@1;Jr-yn#vYqofu{D7`M@-Fw$|!i%3zV`zKQUpY@5xog zN!55Z_?>3$n;$n`Wc8B9<6BOg2V(fAzE*_ajFnf(&EhtnLZg*e!tbZd+)sSF@tB!G zqE{2&n*MVWe>5{ef%`c&B*)L4Ip&u2K5khRp#I{Lx7^o{==X5UeOuc1h12}+YiHR1 zf1bs!NB_R_e|JXBa6#f-`v!@A^kLvB`E|3>*3F4V45^JhW&>`=6Jq%v`#Cp$5|99` z{%hVv;u99;F5z4C;+Jy8e;Rhc3(WJbA}S~6y5T}MyqC+1 z{P!<>@?orx{6XaY@zzNDaC(~z=UvTeRr;KU)QD2T^3DJ}IoHxl#JRFHXt{Q)SQDba06^!DD-fAG6c zx;^cCe;y|fTlwl$PftOtd>Pl=v%Q|;6n`e#=Lej6|6k)zUt1k@V;7#O?Bwq-J^8Gj zW6O-iO@0w8^t4hT-PYKc_@2>g&W!mAh}kHjE05^8u&NAn=7%K9E@Jkk*MHzynSSEB z+)~J&gLs^XBy)UXI*;fnOB65f)J%}(K|oGlf8J2@-}k}hqmdzZ2ET95Z$@1^;tGek z)4k_h|G%TZ=mD2L#?V7f*3Y-N@GLm^AfEHtkK!qu+ZAe*JQJ)4l^_0=e5O^FgcU5j z*jf6l%_ua!pz2nn?nDJ+BX{m@Qt6d%^c_oWAWRlh%Tq9|6IAVYyfdkvKAQEX`3|-9g@5ah zJoWa{Tl4~dxIX!VKIKMs;M4Lge+;|&>8;aSZVT-5&MxB1Jlf^$q=u=Cv)>$?U!m1Z zcFhio%n2!8Qgs)uN;N6z+*>jk)9w;4c$Q)5*f7qj1#hZY=a27%D?c`|Ph4lfUoihx zb0{)cBL0G&BLn_&>2r0gb6@`l-~B(VtL(z_pdXvzO!}cU>wD^m3)9V(e?Q4xW(6%A zj|vu+I+*M$E2zvUl@HVwf^^F7>$VbE9Of6bavGSH>!f(ZnkK?NDCW?ya|cGQ{Elmi zI>55879`DV{ z*Y)HEiU=yc!teSEibzxle+1f@*j=wY(;DBpr**Z!ed_gom{|aq@%oUfRTthuKlba; zE4aXNj%U>|j(f*A_Sbs)fG=O*I)~r$^dP`_;I@78`hI>r@ya}#62&QzviFf7fti}S z)dY&yh&rQ5lI8x+4TtxciKzI(OS6VqbMYRsjI7PG#QMQrea&`mf9;X+*R+F@``wEG zlW(^8hrgq*R#e~>%%+c_S(6}7FMP${J~8MVc;7n=eWIrFdYguaml**VzfSyIB| z86D4A-^W)^lnWZY!9Uk(raegXhJrhn&$gMA&HV)4XEJk}zIN-3KYFpjoE3Fs-9+yB zTI^s+VeU)nu)g=G`#OXG>-$?9%;y(9KHHL^K0kRKE!F-4e^-+)0usU0sg=fD2|%pBi4HxwM84oav~D_A|RLC)X0fJe<(rA_WEI)Jw%-cl#ais zmYy;VrDRn_9THl2!Au=dTdX}aIVv6GKCP$8Gl}Bn8nykT=2fIK&Ikx<4WV78;URwl znVv{B4``B%dzXc~f!{N;CPG03l-t5`Sc!&igKs`6L4;)NPnntN3pQ0q=eGJd31K8mV9 zzfr=BH1L|!PYV+ueI1f9%>TDP&x=0C-2Z$G+Pw`({W_)jgB-hVK$<$L@`nI|LheoJ z?ewkenrcebgWQ@e{=FQef!d{=L4Q+&e{bvAmJEL{|H0Wm?`71L%cE!15EC)R8u2S| zR7oi}e_*cz<_p+<`{P>*7K2iobzYSdGhC#w^8r!DNp_JVgHDlXys2WUIVgz z3$>dOL3a6YxVI1C~M-rsEBHiHitM&znJIud}t;p=7wkSf(LT~GY@c!P2 zf3Ej}WyC1r1Akk6(=A&uN5)bm5E6ZBR< zO}YS{B6o`ua$cqK~RTA9IJ zo6KdVn&+;X6VA0W=UQ??c!c{8eZ;^bED$izofoa{O%Pwplyi^T@eF>c7k&LqG}VUd zJg9&tfqi~iy{uk?+UD|A-g(44LKDlU{XP8ryWSX<9B}Se-?_Usn2=JWAh{k5p4W=b-%kyL)Jdh-Mv($)9cK#?~w}* z$G*J_JcrDpFXWxn+a>}~XJ6KnPfUJT_vd~q){TfaoV|@sHT1xT89a?VIQzFA>v7Tl z|8`6dKPOa>7s7>nj+-0oW63ysVs%6&brun0S5G(j4s^~Z)#q$he?rJ(K8bTeBCC@i zh8+ENKG=91c1E%CO9bWQ-rcuN6r0&ZtM?Z&mXtN4IGYENY0&dJ7M-(}<#@)+=0WXp zN_TukllFUN+?d-;(pp!fvbBecTG?f&;LOw)xLE6KA{regP|g;+x2~ zuPthWB9=HUtt?@ypyfoq1=f;<)tr`^%ry@}6d$L9bDw)hj`JS(S;{;e&C|z z7k#kP7-F1udd|o;y9p}w^IYFQ%)+YvoFVUj=jI+1s0->ms{_0jgmn5nhBj5{81dRK zH9T+q-Xpxne=st5kpFl3_=aAij^oXQSKzf!0?5zh=T}5c-^UM7_jL9NI_^jG5c{cp zzw{XW;qlvJp+jENWX4*pO*4`dU(jlK! zkc7PPFm@X-eSb<)V-^ISpV8;qnZTFIvi0Uo3!XZ^e_PMi{yzT>e=P~o1@(b5T59x0 zrUb@wm1GUsH;W*#ohbE&*=#a_OJAeM&4i9U4na0^OvO{jL@dANX8b*Yj6*!mnf8cZ ztC;sZYG)TbJDghO4Y_0^@6i`(@X85{-z1{z-k0L}t-rTGW9el0t;M?{3cP=2?bAG? zI(XVzfBdX!%yse8`)CJbmSii?*M8Ql#q3sodP(*n3(tC0fA3s1x^kG-{tOsic0khJ zhp#TKRL4E^;Uh_pZ}{Z$4lX=Pd+(#4>mIqYEpg84mnOx_T-3Ci9E}l8po7C$nU*}5Re~xQaxr$`h@hJ{(+)!1hk82ZAD#DVw z@jH`+S}r7M=KL8pgcMOi&10a=J>u@u>$eH-;`%wpJJ{4z?Vkzj5c7;6hTecTHcA}@m{^`|w%0Kpl&aSut-FS>Z6m||Fc%KNEkj99G^O0PcvixfB)dE#}4^zg94fY{GM;4VZdEa{tid)0@O+j zJ-dTH49){&4SxV^H)SEwG9<>GhSa_yCx*QC91Y_4`uxz3_Jt2S%?7=7wiJ;01fmRm zV+id%g9ywrDoyYkUKAe5UTX)R3P=vz-u;id40R+EVisLHcy*uwR7Gq(k*MiGe~kd; z2i*cEYvb?Xg`*fY``djew6i>cgOqnqO(dZ8nGSVsfO`K&wG*5Mb_)5lf z&WMSA6N5M_Z_zCHmksX`pXl$+8T2a7P*=Ki@ctQkdxP(GP9}^$3K+|v9HXZ^;ebAI zJRoMiCt&HC)Rx&dGDZJGt7c$Lf6sM+XTyj4Ke&O7r5laB_}*^`^@#X)v$HqM{>LlH&SQbA@OWGn-2 zYe|iYdpv1YQ(?R(_$bit*=ub9m-k>@@&bwB_x>*X3P1&t7Oig#6%~I9e^QS6peMP- zyShy|T*H^Jsj0#1e2B!) zDL-R3`!Sy9HS>D8|NpsnkT)+%QOSiYm0VfGq-%-iwEmqfWSLjomahZ~-_y8$1%vnA z|1jsaEbwe+8ntTCE$PNjqBAKK7ByoLHCX_+#nF$5UP0IZSACzIe+_+Sykp{gpXK&% zo{t0m9vttqvAt(ac9EBz{nY-mrzW}j;i-vSYcmT8+(*U>V5tW_LMQWe(!Q^-zk2Xu zQh4KV)%(Pq{Qih;yjz_Gy$-I^Z(ag3ACOz9$!KjaUR%(!*Icicz&*xtzJ@S zz>FI1X+6d7V!?;`e=8;J=MJ#m5AbdTTP&3cK8Vhyq(hB-PT!%Sqb9v7wF-z;G9M35kJkBf81wq)TW4^{PBO>iFg~C0W(y{h1+H~f0rDC^PCsAIM3vGcg9(Z zwukkc&&AuwFWW5qda57tW=T!9c++02Yw#Fz$LVeyOP!M$HM_)7D=%v{Qhe`4$Pikb&i%FS}(KGRa6-wh#FD&%Mr-LS@Hc!yg|%wK-q_fv6!Ogi+} zj(fkStdBb~Ir5y&9F$&@mk#T^_s;H62Qm25F{6I^3l4bD4ywZG^UxD#E+>(&p95v zfkEf<>oNLnW&t(5cQw{p0kQrIts2p5>a9#jf0ac&MhvZZNB_Fs<2+s@G5R7r7GPa` zf5g!<931wBKAyTke__IUZbT+Ym@#5`&CT=|dIa||^0)Nydy_J&I5fR?HZ8Ro#sNRi z$?xnPeVP}TckGIP^J!k5xj2XO;KGmY!q?}*SMhk(sNSRFzx$<5-AbbuV(&xAC`>-i zf4<3_49^`s%$kRXYE@tQ%|TxG-ll(l9mb&Hbw>~23%|0HS26+r{`Ti{m)@lO;N8q& zb6bG^Padam(qliO{{ip&OwNIy@^Af^LySibbFn|;z{Lee;Kx3|`;KpioT7gnTN;O) zaRx=zwkaQ<=V4+bdtKG6Sk^^SweP+ue*`Hr#k8Swi#?Iw52MeTQ!7Kr6k9y=31B}| z=kx>XmT1f<-mB2@Kx#c!Hs@7SqZd2dytW7R*$nCSK6sC9^5vF|}X zhblYhk=Yi<=iP)O5E^+F)F0})lYWj=>Iawna875U%Wi+_hVt#i%1chhaPKzze{X(? z>IT-PnK-#Z?+&l5eHO146Qey^I$mzajHdibbLGUk6|^i?cy9+W?)PjDt!MCV&U(8( z@Kt$Px|8bqdgv$PS{q-b;1Flu{tBSwy>Ftm#U$g1y(FdOl_-OaN&|>09+93apeM}a z)7k#*g`nAnbpGybn8H2cvmdyHfA=-|#}s&<;Rf;9+m+3qucyB5ebsKFBp$@$} z+0=h4*XYs5SaWywM9ywE73sX#9PWs8Q3K+m-wE04?Xu^HzljNtx5V1%e{o*$4xaUG z{un}ghynM{5W7#H$Br@%>QbNYFt!0Fz8O!&mVYr_c#PdOsE44bZMk+I{CyWu&Ao`a zfAavs*x)eI`otoq*xkfXG-UVY2`VQM>+cu%!@v6I9~wSAEql+0NbR-ph%@8nwd?z4 z%lg+;bHP$|>ysj5WZ+lcf1nol&c%7Z?)4^v-%@#*k4L?(zBqg5_4gQ_cwabor+JCe z{D0!diu1NVbHj?uT=JL)px?&)AA8T_gvRH60osY!%ecY2hFFZgHRG?TRWG0$6@el7 zt3G*=igbM?eZKNw@Nk;#$DQcMRhZ4FtIEf1Pd=d%dVJp384` zit!4)#FM1bpOWTlL9kwNQ##yp;CfYd>u1fXuT}HyY7@ zp-mQj@&Z!b4fkLkx#qo32R5qM0^^?7p9>v>Bc9cBApWTj+7>@}!GjlOu{-t*&-V!b z;6xqo7rp)A)i=z^f93r=7kcr2jnH@=w4qf~0)<#QHYYj@LDyVhZiD)xJ%3@4=RP#S z{B?R)KmD^Nc=F3 z(H{W&#I5%R8Tw><`0v2xNw23fMa}ocRY>9a;d%9uWsVm0e_J$p<_@oF^wAR(?3X~@ zh<4bLI-jJ!%R~!eWfIFSQcgBz-5cJ|2)SW*@S2kepw&3yTxz(Vm=%XRDT=`lAs8$o)W@{}C!8SK}hTKwP^96yK6$+UFq8EvY7wav88tF1Tu zU75+!`Iv(I4xG>Vw5kIR4X3bX3p?DO^4p3OeWE|{f65Tn^OaiU+z$MqfBD`pc=|*Y zKl!Eq)U*%1_u-DeoGePWo{+JWV0N13E`vf4<5Ko)*TV3rU+LDj#!wbOur+w+wa1K{ zE{U^%jjsGkIyg_fjwg#Wf2Af*DeA;(@<0>$B)7efSrYTlRa6xrDHXTldZ-Z|T z2{eG;fAaxYuPc9(;EGEf@Zayf@4t_ycK`h~5Vz>p{w{Yheuo))50JxmxEtnZ8*q4; z!Z1f*?g~H7si1Nl?hRFRJz1pL8;v0(JxAaT7N)NJq;+8nugP_N9qL;5I>OU;>+P=* za-tA4eV3yD4|8E+V2f*uxKjqF{}RN}INrN0e+qavB7_alot@mm&a$PR%?VL_rsF5# zTi)2TA!#A5_3DLuW`1Ix#*}jTthZM*)xnc_d8Sn!e%iL7zX~O6H)gSl8gSp)XEvMY zs|Ziv7a|BRAfa7|l|cF(#~>!fd_oqf*t$!IpQN}daz8FJPiA>j)>Chpt-ibF%4_OJ ze-eLwdRMB*x;^;wV*L@{E&b1Y<8@aL5(<+i^j;>qY%2d=Hfl?&X%G;o(;KsbVmAMLM?c3AtbrqWCUDLNJY?}Aa-N#pG-H){TQsLNbRnmPR2?Zus1<=rRqx5|unf2(bH*N?6~%*X6^IMSaRw%=a^jM*>v+kXuE zOCFgu^hzHl&77Y51xybA%$(sd&ZwabUL5#_sRiF*EzlNjbJ%K8L)4NRO)FT};6pG2 zHD+r=S9m$!9C2>ZkQIKMS;x8bPQS!p=zYi6GbltJwzc5Bq39 zgYJOC@ceZF4X6}2XLCypT(_Wg7=oyo5A+*uIoz12f+*(|usPhIPc%LT+F|{-(69he zk4E@?c!Hll$ItIcDuBE}sbQs0e~({a#YVra%z$M$9zMQ5{$I4c+0OGivnF;cK_7L) z2{c)<{BJZ6YMy1ymQDfZIb>rLsLwuEBLwj{b7Q{zJ8-i$AM%;SVo<+fE;5k6 zU-B^$1m)&%j-AAOq}5O`=GB{)*xz>|4|{73)i1fH9L z15@YB?Z>`wOTPgL+5=NEf65ge2VcYfGJ>z#EedWMOaEd2Ylkp76En&UA>a2a^XPxI zi~sPG;eCfv1;!10&z9=@?_sb&hG*NV^w)F2b3iQlKVndGY0`YA>jc}W?~=D{Ch?i2j^XW8XNzc7~Ew8zD#UCE>wVtf6mv5iV((FKm@D~ zwRQ&r)DJ_!doy~#_CNz*0h|cnq`=0t;E^-BCAgqU*?rSAF!wBBnI0f>uHDQRpa9oz zW(%k~7g!L<3;}N|`f{RJ;0`z%JT0d9Pv|>e`njfnIcEuAy8nTG1E@Ub4-m@Ci6974 zAr}p~zL6ZcBM7k~e+hCMkqNm>2=zqT$3;S}67oXs19EBzzg|})mhSxn_mJp86YH5E zMn_}F86bvBqsZx8#V(H)_+rF^|37~d0F$f%;QNITI3Ra!Ip7*Wn?U3SO}QD&vZ+Fo zM+~Z44h%Le*`d^20z9M=LyJ@eDIa* z;2Su$JkoSdU_4;e=EkEs{9ucnEZMVAbyqBdn}EXkQl3v>t6AB zy2*i;$_ZdC)I7MtW2|D!Ay!7!gWq@;@*v*;@PwiX5;*`C%qPziu%-#<N6_`*?_+RwA9PVO)`Sfo-4tPTT)pObMIlRy~ z5CNo@f9ZiwE*|V=G>`_sLuSqczlp>fV3h>u77l%me-ciB(Y|?EapQPBacvIcl15NcTi zl7%th1k%bHfh?nPB0G?VkZTDLwO={~$CO7JFNL+7Paua_c;0z3S-fGPe)avo0XF3Q zY69LMe>?2P@_JsDtP1&=Tp%b3;FU%QLjfeAuS5ZBM6zG!6VJGT1{igpiiu~UKkClG zH|BtcGV0^>y=&CStrFi@_SjfB6L7E_U=21*02{XP1_E;f0lP6rKTn{ZU6!3^2>43rsk%Ac3+44y;!ujQn;3hgXH3{hJ5lU(<9p1=0aO zbpC7$fKwc!uz6<#0hlLn{kDG@RZq^^06={!VEIHogh(y?q_Su$%UOdWdGJTYK` zf91<8yK$(Yer*PZ4}%R7U|Hz!wL=-+qV!`ww#Kp>FDmT>gi6+O9*CwYr zT-JasqjIGsz|r{G3pAdOcso)wU__2_)e_)p{A>sO?EX#@4*{mTztTNT;{jhAexUIV zU&DG8*oooCAOQf{nc}HCx5@{nqKkGkFj|J2dji zf-~s`tXpn^x4pshQN17g6`T!B7GQ1P4kt#gtdbEXaD{EkkiTt6&OgW_tgX6ME2N0;f-1XB2DADpY(p3 zEbAxzICjSVrl*?5i$Ce;G+F64T`G^AWxnZeP2-hsIy+5J&%f(`0#WW@b|@cUnShYN z{sXpKjom6}Gr!xV&@!4YIM^;oPqmEtXfq=jrUk~*~()| zrwOi>9rz}f#||6-<*PJ}XJr=3XHAo(f72D`*ctYl-f9{z{G>zue}><5l|6Rm z_@;j}jhBDYp?>Rcx>j!aLjz36;+uACd8mOfWEt2uF(Ge$`(D`gCiXhOzEp>AmdCPb z(0_3}^k34qUH%I@lX3nkAFwaabNc}K1V}Et!8Qr~G-CxC145zs)>X(Fg~?3c`q9h3 z*r0V4ZBN~R9dEib?2N_xf3}}*fjyO9?6H9f(BuCs-e9-fNPgrtzPA1g@A>~k4s!#} z@-O84Ne4`f{)}=M{!!}J$+Z~4=?g6ZxW)on=F@*IzXsrKsnEX=8hd>{SbQ_tEq_2g z|I2=Y-2iXEy5hn!pC_({J`@P0mun>>O#r~Q%PL;0mUUX$hf&Bve^El8HUYk}M)?XX z-WOO{ul+Ou5KPRszvaJuqW6<$vNm9iK(mgyDa&gmvw$uuSl;yac^t6Vhat&099x8y zHS#*()s02y_c;~b`zZ-9gUaO@-= zdJ+6PehTP|XiWn^7{L?z7TE3>C(O0q^?7d4Ztj1@OrWo8`!V>)Rnja!49<&&~U5?pZ}Q#b?Y^-&-#sJ%3xz0UjREm#J~CJmuh%H z4hV6*`fXePyq)Wde1QBR_gC2OYY9O9J>MTrX31Z5&%m)t9*zG8PT z@g|@~=rOYs_<8;i>VIW&0V6>F$Mb)WD`xBOaq_>SSII0Xeswc~r-c=ch%r&X0y=Q0 zT?f1e{i}5xn^{i(5o?K<@^@I9JvQ@LR4C84z4TBw|Cv0?#?<;UanQ#z!Y>{Ew7=i= zj*A<|8tiX7nZV4chn_!Ju*hfmXZf%%&8@~7|3c&cQyYeU=YOBJm2dly4s0)8Zr{G+RyfXP1lw}85#VZx zZ%+B2q{Fh|{eKZRK)|>Q>)Pq(iATSF({^;|r$so9{$0M7Fs7yf6^;{evL=OqmPdj5l#VL1B1|Ac?=Kj9zzPxyd*0ARy*Eap%y2Jl~2N&!2R?&iV)qX`_6yJai4P(DyL$ZTJF>5BqvOPX{lN9hle26V zv>U1Z!QU7bVqb7f>HdMeX&TS`iQOPCe_-EV-miacGnX&za@nk}tngh8T+|!GKzz>r z;3r-lwS;B9w0~n|8Pfg}3wd_`!17?$j4lYk@V|5-=aBXv z*b?&X{()7$eEfl(rYWC(-a9oQiZ{4sY6x&{2KHHjCyx^-nhEH&CzXGW9UT7?cTOg-M8Mnsvy*@!T$d8z z|92i%_TkLuiXbZ}zQ-NkfHc9yVH6BE}gE zSA#T%Uq8V+kRbFsZ~QyXz)9Tx&vX{<`!E$&I0j2>W3SJgW=YbF7LJsG{4H%Cho|i$gF%u9iNJjyZ1?fyctRdebZ}SNuh%Dv+k`Ci5#LG}0>=o!1xIw-$z@TvdNN6z_lq=bo9?JEVhXp_d3=Z|F z<>30w2ldSB5Vrvd+UHL_#^4Wp1@DQF0XSfN>j04OzSiIMqQmKL_WM0w^M8x4?uReN z1>!f}z<9VZ4Pdt)eShO%`JoSJAf+JN7y|N>eAg8$SpER(>jjqaV?84Ec}viobf~p&pCB$6x=jlSdBic7yg!K)q-1 z<1`ikFaOccPyK!CvHt2U|9`2+uif)kNOy*Gf9(68*zdRf02S_!jNy8rq(l6lb_>hf zAl(p_Pr!UBET081{|WySm{0ujv;5-g%TEU5qV>b)k3IFj_;H~^yfPWvkGD!DpH$S*9 zUpx=I-}wMMf4Tp$6UhQ8jI%V*Hs*xoeCU+qKNm}x91FA=|fqlD{)3IGi}??&0+QeF+z z9E{L@H4C;+`j@|zp`SqEz7)8`2mP;U!BxBoJOEq|yuzU*yxId6EvVi>PoWhyE(`$qIoo=DBVB^A^1pF_YP8z`XV$nxKE7HRN zDd3v40jkyy+m;e==tr>}!%6RxH+>CmEk#G*8Q|!LRjc20>`RZU6nh|F{3` z-H&ZN|JVOAkIQR5KCl1s-~R9a^WAsz|M8B~*Z=qxYRl&Mn;N^X=l`Y0W%qC2$J2Z? zufLUZ+?46hlkHXJ#r(G`ivHU)rhi+;=D%j~9RCd_B7a-|e&hd^#M6tq{_h!j7xnzt zRvlhR;gK2N1!=gL&CASxww}tg0OYkL2YH)v3@@|2WJR)=XMo2+X>kSEv^>uN!vdj{ zWGUZMJ$L0~ZySNZwa^4ye#Hq#<@+GUJx_@oG(aTe7P6df*=KJiH-@+RyBDDS#aIXG zY28`sZhzAgg==&UFFP48`t{j(S%%)GZO`yYV};_%N?3qp=WP)ahJ1Y{aSAZ|es=ee zMK5I~D!Pd(Rd=z>lR^xHxMeFs-Wz+Do*1*c_P(dm+p0fQ9u~HkzhsNY@asO5jUK;U zCY#l@BxjZ@JJ)y7!qQUiHD|rQHIM?j=S)xKg*Uh27qW*CZtHEdE0) zHeyM!EE$?3zjEp0uWw=Xg2;=q`^&AmJMwM zW@O0c^B58Oodn^NCYgX=)S^?K0KpNLSbyU?YGtG~_r}Y9f@*&fa$Gq|gEXkBLV^lYyiD{Io*-ekO`=%fQigfsGu=qpsF$NK|a zGlS&2&KsnDf0}`szJ(zp7g}RXjM6rTq3_oR!!*(Jy~(!G@sqH)5fXl%kPMyj?Do;qbrggKQ@UD5 zAJka4Wx>f5r6(NcAQ#3*@TZGR#0P!dMqUdsE1sY9jo?U(aD(^7$fdJdXAr*Ulr)N% zJ{xiE5$cBAyZep25VtNN=tCHtDA=&n!Oro5TOWo6nRmGz9Z*@*HQ!x(F@H?P<{Hig zmrF)SG(krE_F*|x#byzy-YZe*YMn8xdLgguK)$S-ur==$adl#O+~;(&`T^l(@@iXU zksXD!Hy>6DT^eoCVq@;9V}Tf_@|x3x(m526Ue$>>nzYz9JD=9=HNBgUD!%o{TkLg9 znRY$tt9K<7S6yUwV?tlJntxpU#5=FjAX*AVHVvqInf9%nxsS)OrW&f9HET;J-RIZ( zF{e2ir{*B~n17kO*q^HR#K*|1YHclPHibgI0@OrR7N;w_E9)qgWJKdzVO2kZss z&^zS|b6+Sm+Xu{D-*T4_Vm>!0nHhFqJyW`HuJT1P17rToq;ka=U!($;*;U>UpHID# zHIKi#NykQb)Xe5X5q0E9TfJIE$8)tgzbDE<^}@|lU(5Bb(}o%I9M{+muu2N$m3^{J zOC_vY{@9qXeRO;CE`Mx3J|Y3gmltlO&p~s*erVkg&Q-?Fei^YUwz+Q_F>qvOuCqdy zb-UFi+fbz!g9NLoxD{G3%QYE)4D)95x0J8LN?0m%E?4W5`X5R6?Y+e9`l(J-9baXy zSzqGFQwbgbiK$=yBNouZS583$>g%nhcL%AesfknF=oriJtbato)lF@v3RUW<+e6Na zd~x8?dWhq5xrDp9oF9eqPVq&$`{kD0spa}fmU9Yki6Pep8l88Q+$r`}mrXSt%!WTn zG157f3k921cZv51^vQfA4et(DABjNFP8nFE7;UHjR80pc@qp>a@KH!PkMfpOnk&mY z&62;WPBR%RdVfmGYvxu|l?-XHjfGnTc5zY0OM`iyn~-0 z=I*faesl>Xwdk*2;Jo&T9gb6%vnycFx@o#gm3KKWG>(HS5^j^@eiJ$HmOnN=YsxZ4 zZt8VG8lhMG^`^uk?Oq<4GkvrfEAm1VE9L{QZ;X6(`gG6*I(}LA&Owg;m5%-i5>iQf zJ=xtjo12ps*dAm3u055T7C+Ct*&jtkpRsvovX;8% zS0toK`^EA+B1rYSVg}R=98D-%d6CpiXD)-sp_d|Nw*fP7Wg+x^izz>fxeohW@@zKx zba4a=IE&_lz$I*(ef(Ods1|0 zMy@U0l3S|tafcfLr`dw|nMl@Am~dC`YoG{&DCfRF&kSW2YjJCaqh&-DugYnVT~FEv zT;up2)X$ss=v%LgJA`#`d%tf6v&Vrm`X6S+eH>FCfm$uzGtBAbosk)R23v>9ezM$;j@tCc zYH%SVwBa_YiFC4TICIMrqJgCA)bq$$#Wx?0QtAdGr+TGqUoOvAxDFD9RlVVi+ll zY90=bZEu&aCXUz5ODD>Mfq0T$UhApPrK_4AQCt#r7NK}SxBWWjxvJVIm)SEo@6BB{ zy%(qY1=w@)hDaqF%K3T%r59_d<)64Do_X;yo7_*u7mDeqvg~}u14qnUi~Z=_5r4EG z;mm8YRiQ+_6v+bmQ?3o$J^)*1mQ2ud8tCP0Tr*>NwoYwK^P(g1DHB<8W3KGM((`Qn^)iVYeZQ6O4S$L2tPa?7w}+9c*J^vE3d!)%N;iu)sv=OFNPIa~ zGQ&I0fyq5}CZG0)rLcDpTq@~1E=kMW*HecOwUr<|(synhFghnf;v zw78|j@`n#UigGx~s|-G;q|*X=CObL0v*yr=dRgbI8+CDPGemrjL)#nNmB55xz+Z>2 zH+0gxqA;X`LG7EuR@po}n$vd5vR{0x_BbQ(Gs*Rhx#PWha8_Y$!$%>2a8w%u9+%%A zML9a^QNFOc@5_*=R`z73TYqh+K@*i8Cq3n+wvJ32Rx7T2a-EP>k;bg5U2Le| z!!z+HkDPTsguB+{-05&yp&8_Rk18&4fHky%W2x{mGm*&(8}Q{GPO8bd5gn(fY^q%Ee;R z)%gBk&sswB(Aol8mKi)M9v7i%D05i*IIb}N(Rzp*`Ra82EX=t`m+Q^t9BB`4?^|Dk zPZ{%Qm}rN>yN{%c%zs(|L%%JrTVgnA5wOyUW!iy;Ll~7#*H=|bI0buNy5y}iT-<_t zG5b4Jy3^H(EZ=3AplXFTS#i3;=cSTnj7A8C3Le&29?hg(XrEs7r?3o^#cKA;lLO3(yOBa`Z=}vB9aOL%r{Ae_0&NS=K^OL^@&P z;qr81LE?`m--Tn?S>?Gs5)|tdE<`_5RVe)gqPw#pR1&JriipLc6 zSdImgxpk5FE9`xcQk-HCJdkcqJ!M!`W97Amrc4+TT&~M3D zJO`|cWPcEeulCJ|c36dBv=A?)m$}+t9q&rY)l8D4WFD^Wih)Q@$Mz<%W!v$PZ?91p zmF;w|PW%HUjj_nBHte{n%)%A&p#~g$I$b%J9)# zo$-d!(EI9b2w z{pYb+wjcMR!B3a#y$q|9wUCXn=fjv#mW zwLvh1illJeJXPZ!d+*W_Anq?^Q?YJ(S`72dIsEySy@V?(?`CU-A@0Lzfb}YbD>(&Ana*k5Wv~lYeAL zWfaa=&d1I%cFv3iIlAk!{dkx2mX&Dt>14mk_bBR&Lgnpq5j9y@7mjg8Sok5eHG`7vyvzv!&-WavkPzptbw(<$1cV z1ua{$;1?i1xx*81cDh$ClD_1ZRe!5Qc?LU96R9o?6{80qF~lfMKLHW%&D^E_=CNe2 zR_~V>YOl0*Ltn3AYSHQsDQIeTm+o|q?|$dGpEV&fbhDG)TXsDq5U(pGLZH(|5BX){ z@%lA-`pc*v`CKaDRbYcfEDb##L~Z{tiST}B=B#jvym%S0prJInu6*UW^nX@R*>z}I zojv{7>w8?H9ABbP^_*5Je#tL#Z1lk?$Km5XBNG$i-eE3ML1H#er57eO&Pft2KSE^r zIAqa}5j`dQT~AZx9?o5#X58AQH*Wa6yJrwnHui{b4ky=F7i95JnC0&Io;^l8+GJR^ z+02t1Nl;1ECyKmx?^uL7OMiEOGI#0~4$-dOiHry-DHp3KCQ#xGAE(+F%vm4;g~}xr z2ju5ztaLl@P8e0C@G3~90v8|;&lBk7?Bb2zKkIs32Qj0~KpS%~uND0!k;nd^$HTo? zox3{3?g<<1&KG%6Ds;0@UB85xOYA5mR93<%b1q!EZf(GY`S4gds(*Kld%kevF;s1j z(5`FguS?5b&%7ZK^9m$as~Yk12~T5QkWjdi<>x5u8ubh*ou3 zU!QGcS0w0$ceD1CIVg+iH&?=Hf;NoueAG^7bt?U&EJ+ne;f%^tt}8O8o~&G!>`)6I z(Igj5W<4H%$c46iE`JBEvm=Rlw_VWHDX=B1Xn9^Q@B+QWvWzCN%gr^iq%BcqNfM)! zV@KwiG+ErC)`dzsZ`rAF5ROEC(ZPZ{F`4Yb%Fs{rHRa?}nD?{D_P9n{p`KoutFf}N zX=fO?oacL^C$x$d+9YB3R^K33P7q?I9nz2bBt#`jgUWU(OkS}N718r^39r3I zUh^=j9v@M*WFw`~5K%V!Zsrk-;QMImCWl+;Vu>erZwS27#T4bt*VtW;huM#4EDvZx zjx;l7PW!60L4V-1P@_lR_pImssM%zEV!nYjFV`ab=+-6}DST|f9G?Yl4BUhJm6 zDr|hsZ;U=F7qrRifqG+fG6hlW0fJpBO}M<=+euz!y{6pG>CkoG@60Do1%wxYfW(yg zxiXyWqR6peDV|L?)GwkNdX2-?iVy^TKitzH!4b#=G^S{ z&Z+U4aq050pYr-)vU#o+T{0dYgox(X%ec2{d}Ok1pSdG~PuuIFl%O7IbK4Zp-WIG= z-Hoy=c7I)V&5u|tAICcibVav>}@AbtqRRTN18NKLmbC@A%W~6A=>qlXw`5 zN!s;BNp4=;6t;d|?~|qq7Xel51tAb6okh7fM1Q5&bH;h4mnCNxG!x@J3@>`C_3B_KJQcd?2HR3`hb6y?VIa@99*nHc?uY@>+5s@^UUE<6?uyYHcm28u5xygwOEj<8m=cHL(0A1Osg zl-KLvV}3y3(fA^4q0*qG<(%^_yx-iy&a<>1qF%<mh4ttqa zy1JaQkFptaf4+7};9j-!Qbd$($Re!TlYftA*~-|beR~NuyDgmD)DMo)?p;tg#;a$d z%D|1 z_Xjm!pZMc#_>R18JCAGD96oo|bMMBXgn28p)5$$Q$&`~5q25aK5bWgjC^BQ87k^~Q z4yJhYxjEU=n^2Oq9luhOco*XtSx>7A=G8N|a=m|F_i-u-oQ ztMR7L!(Lue#~0G=Q;7XTQ_}-!&3~uHbpE)yRERMd-icEII`N(B4h5R22R>P`8UPjANnoj zq2d>}H`f}T8BkSxt|Ue_j7VAiRk`rpOukxmBF1_#(wz+{aDD`tGRs0J)_?UG(u)m7 z?u^RohE1NuJV;I$aOXm}o6=09wr`np*JE3JM#8iAx3Uzh6<(%{ufZkgNW554Vkz?) zGqkC@Jq7RaXrE#iQm9y6W!qv^PbEh1^!rm2hj9>**XzSvlE-ijuCMiKmLj-RFA}H4 zILMLS;JblkFJo7}q7k!0D}TFKUGebc{n8i19N6sp{u!N=))T(mZzJ|p$W+S|F%X$@ zZHc7M+M9Rm{j515|0#SsT2~@QDZPyL)=_WvYPD2CsjF_UQTHLIqn+oP@Pa|~94(RG z-8_{Dcml^plnXVk!keg1T{bj>DQ>Kkl(F@xeMEIBF4;FBdc6pXK!1`ES5(2!*W;dg z_*4)UyDuE6Rcw{v&8K!5vSSJ{s;?HH}V1v=7F5tVVp6FRj+AHEasW-tVzTp-SUs z5k}|wg(6bo;uF;1N-XlcWnmYN>VTxLx;0JZA!6)W%=Tf)yx-xt(eXg(%Q69{y_$2% zQ=)AmM}^P^!3X?MKJYNEgePk=meHX@=0sziul~TfO?xn@rGEg)|6H*7W%pDl@5L>p zGA;8q&!Y+4J$sEa;%%0)hQ_A&(kg=TTAmnvu4f%BL8@*$TMhlklBv7!I){%Er1|~1 zJWBd#uwZ5Vx_-O6`T#hs5|7(`HG&Z-+6)CY1l{@<7lLy{s%JVu~=|H{NQKCGmC0it* z-l4LzHh<9@K&|*>=dJT*arxtR*E;MHg%a7&GO?W9pcuX-Q=>Q3QyW+O2d6+kaYF5E zo#c52Q&2}wm=-PuxVO>5Jr0~X*SUT)8Y@8Tp*HBs zNH3fqJn(nMUT|JzYxaG;D~_D8>w8F#M;X5T_J4Y6xYnKO`w2v6dpWPk&d94aH#s=iV=SC`Ji%s>4tmeV_4-c?T7|%>Hb8v z{9L$BabDTM-;YZwT7CUiPjFQqnoS)E&pKCh6fuO*NuQsz3-Z}7qnX+#{ZSXTT21}- z;D716TxX1=INY0|)bQ-8yDpExo9ANEsU8>0$cc7O!WMtIx8~jF=s+z}vj{t1ojlM# z)2RFWGGRb@nS2C9?n2A`q{$Q+`ibhGr!`U|ksxkx!!G7J9m|{2MsDcv4vE}JpAu?g zyIVhLS%n!nY2664cLwZg#C2Qr^{}WsB7dhl+V#k3oWaP5mlI@RJ~g_-Q+5HSU5jo*~=jG$;MJT%vPe81m%&OSK>*0R3cD)qQdNRTpldn z`^8z^?Nl$@w`VmsW9}+-J}%Tf6n{w|O=PXU=2x*3txlz{*B&t!_N@H!a1r;pp`Gsp z{AS_;WwSjQVgo&x@#}a{*IFtaOFNRNJ{ikp=!jJSYmjf=V~1pD>BWkNcX+=eP30$u zD^W>>*!s%i-6G=lscQcTx9T!)G=DM~bBY%Nyo{#(J zc5E;WvqTNpmGJooi(PHGXGg+!ux8b&-PHM0=zKZ7YWG?;?*#TnpHIbz+7mPf(W(1W zc`v799qTaTbbmTaOz2WF6iMTDmvAI551J$*Y%r{02n?2f(|z6+`W>mJ2@MC1 zQr{6f+c?9hAL2$3^d@@6tVA|-#SQfg=omi{-f<6-H@*1NZ+;RB@qd#xhr4EH*r_=Q zW`Dv|G~C-n-dNAY0ya>O)X>xwQROSM=>XyJCXe~iC;Rthku>ezhWzVtjPvy3J6yWD z#Qjbh<1?ry%zV<(!%u8VzgWD02Aqs8;+AJ)f&tg*^f?;H2cA`oyd((n!C!>;Jk-=z zy7YBh&Ab(z&x6>9L4QoTD&s4pDFr}YUh?hoV8AWMpDB%Y_ovQDs3zn*cADX|j;M^i z)_03d8}FUrOb)faVyPI5=E3?>#M@}n$F`u5_Q~vds0q^b1F(i{lPAtuD(36^a}KVi zc=wIwR&bYg^96CI>`i?l_(d{$`ofPdhMS+eWQh$y$zsbo(x zGo+k^zU!aqL1Beie>jX%OZVDTcsJu8+hg#DL)wz)VVBs*XY6`rw8BWZ8kOly7}u0F z60g&_^*UY0Oh!J7pIr%OzFy^t);;GwJ-X;as4-={d<5B$6RENbPF`@;jG`0it6O~+ z-u8jn1*Mu7`hQh9q#pZC@)DlOC2ffSw?gY~te4>uRu?bninR#e{_?yd7!t%`h8`j! zNi(&FbIxPiqBDti6V!#_qnGp2S*C<^7HOc5)cf~S)B&omD8)_Pg>tzYbOS#kB5tL21|1gc)Fcf?6(r}^mZPWn~* z09xxdwSSi(R!yGam0s~k_7AJ+R_-mT#Or=j^9yo+`W%iVnCm%D&3HX8J0}{SD14j! zyE)j)9pA_sLhu+zRoO!I1p%C9kr1iUGCpLU(#StCF~M4iVsXhnS8hA8zW-HRmNn!9l+MG z(9&3BrYmE2THlv}_8ide8bwl@#FW4`xnZBr#teD#nKu_Fx~rgGz@@o0ug5}VmZ)>D zz<>A4P}rzW&4zkiF>fQCS$~a8a%}m?F&&|OuD&|tR;hU}CK#~xs2eGZo^$4VBs?@3}5uIeW#krc(H)Sf_VmY%r2f z1d&I)w;tBVYGdfwx9SzhM>Q~nbi3L3*neH+!PleOeqr=1(R@b}^Q>*{lbkNe^-dn* z(uDXW4CvE%1o!JxZX0BNYxhU|MCsv)ZnvxdL>gy2kV}LjtwA0>F|vw=H^lv36WA1j zz3q9AQnaV|xSJ)FdUGE=!q_f7XZ0^mN_nrw^Fv&#TD$?JbK?oWeD1=4$8F4=IDf?` zvAR8Olhb)bc+9{u|7A~M zmRVLz8qt+O;hHGtWcqHTBmw;3P7UVk`gGqeYt@wB+|KHD5ltHD$a_>t@4a@kqj}rs zgpGdrkdf2ar`?XlWT99++wv8O!+$Og=9?=_Ya8G1gnF%x`ms1@AQ!7dX_TY66LDu# z1)&tPLrm4`YL#43?Rpa)5naDjj6J6pvR>Ef;PSFvD`e!K=Qv1LcPMV_Lb(Ewrj8?H z#x|fqhOJbx7q&Dy3NeHFB0p=}cfCPG8W>`61cGTkQ<3K@T@f(=bmeaM>f zpg)=Go7AP%$;#*JffmQ$-p^Vjkxpf`C^LlJ6m{+e2H6`8vGo1zZqs(!tpZ^~!y9?w z`rerm&jH$-XXRZuYr#VtiDG336#z01o`<3t9Niau#OfnJqVwF#s$;-p`edto$X5(xrbbI1(f9=JE1sNZ|tTBwWG}9~z}`p6Oyb4`-xp7@AIn zFFz@_ZV9~w9{BN8lI)>5%&}UQywoANZwlcHXtrHhvfG-OsJegc4N!8ea*@0TYS+#D zo+GJAbcYE4o&6AbkUzOAP3dPFp_Sb0$!(O`O4_5K-qVmMxV!y8*b9HmD?jG8JPTB}mVYpIm>&qDMT9l~Gp{-q{W5LC6Za^1K1|dKjqQ=ga1m27~&3%U!FP zsr6$8XV-4s=*CKP{p`t=by$amD7BdqeUkT85_sR(_i0PT6vUw=dd_i?N;AT#-;oSP z=`CwJg-OWE+t+wEvoOE}&mryDRzsU>Pkj@x3o%XWy4`|WNGyNB76en22_?2rd>%yc z$PyaQXPu00e>Hs~w;NQ~!PbGanhS+O0Je2t(wxMXDyqLZPAq=YiSMT&Uh^w{uMUs5 zQ87Qm=~E+w-i)gntQwHE3lwzP5~W|4-=m*Ni~sBc4)XEy0`A>F*V8Y1Kr2rHra9K> zkMQo(ZwVBU1w?;NYqGnS;~1ML+Y~}0PR4FUHC1K{``qz|Rj!8?>+fNVNyN4NdukSF zMA9F&YRjj?31|q4!l%;;x+gsAK6WDo9?2@(;_A`K#xr_K(KZ8Ek@tVw&*#*LhXV@> ztyYFN@pb8%<;SOhU?5v>?qwJ4id1C(Gc;=EXQj|CM6!S3O6WMLB_>RXm=sa+1yuhO z-hfq$qTYGdo|d<8-E0_vW_c~8x1Jq@44}&TtJStEKT3DuLTo`)GT78wJU|(Y+2Uso zD|2PHX`hHw7w=Ybn=w#K*B%CvA%z^O2aWjBEY?u9qG(Tfburj2_x&lpwWouOK4q(R zOEE14c%^^x;u9u3UDywqa0g^e2@R!YAe~&4$`F|wnW{*bW~RR}S(EW<#y`DuBd6ap zcx~moyqRashkSCa#QdY!lL_}CDNC7>?`BY9NsBH52ypm|I$=p=^V~hBiseu!EIw}i zFlLdWR-;u?x!M~kh29y4I2SRnZ<|*jj2}GOJpO;ZT0awQZ4i?djF?~x)PU-jG!ZM2 zQ;Y#$1S{F`E31%LDnC};T{guDm*6netyYwV%URnI^~7jcLP>*I>QjdSGP^8U&O?}7 zsS6Gw2r#Dj-0)->*|0-ze!2IkU=t~o$)|#>#W@ zM~o=?Dr_ynK9Obh0uM8FlF-jmpSYqx7zsvsSM?vo2%+c9W5bO?<}Ayf^0VW>3SoG!R)1p z=k={f0vh>2TIJ6c2(t1Lq&qPLB$i$d<3BR4Q2Rt4HpGKEBIG$4zjrSkSV4x|qX zLYMApo0S;koPVBRubC9|FIlTF?L)aZZE z@G`x+qPuT1xgyQW67W2_9|<_Tpi$FLgiwo=0#t_Es&^LWx)AI6bK&RekV&T{DCkfyyi85Khi-`);H6QDA-A#nAs zTxU#gu~3oA9>=|8arFCBC^mxI>Pa~gJ9R{N#|xQB82kn7+oDhjMy5%S(p9AmtnAkB zWzr?*FO7ux@9_S1#@8?^MjL+}(-WgPR?xJu!*y*#0S|g>iBApqJ-unwF+wjvCAtmm zIr|soGAcksjbu|=-d=~$e{Z%{WMKQJp0Pc;gw)-^u-j9f>1FTa>rbZgLK+uRY`zd+ zxzbIcd{RaoB=1LFXHD;pZwDd5a4*Uzs$z6&Yu;0_Q+sN&RIvd{XqA6U2ZCN??p9Uk zl1pxx=w+$|vvxk*i=PG!yJTpskAZBS-G`^}h%sd@!S zpLp3?k$~$64%tuqvk8^p71(u3%u$d#+fWU!}1`>^} zx(B_QxsmfUS_Nei0Zx9T4{>_+sC*thEJ?*kE$G$_^sggGQ;14Mse8l>{bk<5OfcvSd` z{U@(nz&~@ONF*FOGC{=-Bh``Pey#lB^NS4W^|i6z7c#7l+SC;gMFH6=ZF5a@f@aJY zY&a7{&N91S+)sv_E9#-+wh|a&!J2eolB<)rN770PBiKajinaGsOlLaLfwL$0L=LL> zY1=in>;vUZ_aJ|BcX((Nsv9PL+JN29eWchJuUvQ6j%(?LK0kUm>+q6N&&I(C4HxK&1HHxe{KLET`caq( zL+5uz84zVXW{M}`2Vpv^t7}f5Yh0IIy#O!Vt&8fHr*@&A)RBz_8y3Xb%RZ3_L{0HU zz2oZ<`)GfURi%chz@Rdqn~%MGCYPq}g0CanA%1#ZDcY%9b%?HE)=ncX zCVf_Hn1=ocy^8{u&o7&G4?;{T4dzyPOzCv);X*QN2aSvEH3897HWRaRw#oP2+)Cn?qPt>PY3j|M$*E6a%JV)oSgtJX@-#tW zttzBybhP=8Q$YD)8r-|{MVNp)oalK34@X#5UtHRqAbK$E+5Cq&zE3ZE5VU*Pld)z3 z??Hc~kurci)FrU^+uu8@6$rB%*95z*v2yl zOn>cMe&&~kCge<6@}|~N>Lq9BK@`2vpFVuQGzIMTI_YFrVs72wC{9rDLnXxsXO=gd zxT@7ZuLM51`OW3g%_YxvfH5-!TlT%Po~S1HJ~8JMaD?Io@3K53)w{S09Va%*UFm;{ zH&vyRPcXNuq5BiRQR+%30QreIKubXdLQeo>I?btG@<1##BaFdne$r|&vB-yuit^G& zU>4QjDi~#lrAX=9lCR5T-vb;ooXomJcH-=byuMl}d+efDpFc)|Z@*9ri zu-Pn}7F|!tQ3kS$lCLn~tj$ipc%k;nD#Jq09<#8+!jrhkb@V-MV%ZY^%l3b7#Q-J# zrj)&IPM}g8`j#E(7fNks<+uhPNpx+wd|Nl=@rz$G6&k^j@K0$Ex^Cgms@<0nxvf%J zKheVP9aSG*gjqzY^;n+xT->k~tjbEdvC~r=;RItD9_(k-n}6aubg}C`mk>zjy!V_! z*MUb-9wtVl88zC!?u{r8|IvRK?FZXEr(V(NAh6^)l+WO7G}rWQld;}v;$JC&&Va-f z_r(pvxpteZy9DFKh_ps&j!-fjQ4ejCT9;+ELN4$#zjpsQktol(9kiO;j|9(p93>>< z4!gk>q$~cq=LMmrx2$q5QVeuM)reD~b<(F*%-Y3F$ybQlCF8U5s*W{U?ESt?0ZBw0}(IeP-t%@9mWDtbd7 z@Nh}x&$ioP1+{>PXo`PG%NFG(p^sDbO}Pf~C>-&jc5-imXP@eYq*}E+>3f1W7x6tC zOeRZg0HdgcG!uWWRvEJEZ$(#_(gzhQiuOOGR3aFqh*M>S(sUAPtX%Dvf+7)a9i_R} z05rd$!plCCkIg+wt{ln)SzqcTbhj-;z6hw`u$kLA9mpJl;@f|YU<z@I)ErRxzk+hV*xR-#_#~)AObD z2WCm6_2U9?VxLZ+5e=_6JTOJ-h-HP_n57?A^;fnsKpzCOs7!=whK>bf|o; z>uW`fW&PNY7vz6musjHXRU+Y&pkJ5-7-oLmS)R!1X52M;#dYpor8of$45~z#7iCVN z*JE~tp6|54Z!n0@(;72rdqkf{8x8FhOA09n4UyD`Db^eIZbo)3SuMQMs#rzY$nhFf zi{*sM-)SkJln}kYYwH9GlX?{O3_Bkv^kEjr+2-#Ov2}kFx}WU|hZEcZKTnfS!}uP< zzL156sM?V6*Xj*Fx%g)(r=j~)DW%IMn7_p|NJ1}}%NEnsG%{kx4Z(PU78g9kieHyW zQ)bP(!91zFG^`z*A@4T6y1i0t9%GHzB8KR+j7WlfI7b9T+svJucFn(x%65`1N`TG8 z%@^~Zv+RFR4;tWIWOG0h+*-Guw@UrKx?UxAz!*%>{@C2hIv3n`k9hf~o4Grq;o!|I)JDf?<^eTo6n&aIo_yuyhk zS0Li!(ZU>?H?~NPs#shWDICsLfffcb&3KGe4$2&}K8DN6NHz`vo?oY>cJZO1v+QKq52I_hCp5 zm+pTwbrt=!EtggcccHZCq z(9iKA`}4zxS9%O{^m2g*QNJss(sK%$MnQk8a*WsKiY{EE`j+kmXYL_qIMAHg_!jx$ znP1HRN+>KRnSZqKQpqq~JVn83_h1s)OMZXz{+yAHUZ1a{81cI^V_kiLTWHADa}rbB zkngw1^{b9^&Ik<`?Z5=c#8IL4vI*z?tKs9Z3-na>v`I74&(Z7P4ssXBSU7-RFI{Q; zXNH$kr;Ylzyh~CCVB)&4T$_pS!%5TRPthY{GvN{q>P+%SF3{QJe3OeABBlbKPMd!T zzS_EP{SIfYPn)A}UUp~g-kfps+*wJWle<@nl@tytw=R=K{%o&dj{&kzK?G3BFc`&l zbOD)mJpZi=pdxFrO!CJcKmKsH3#gXD2Q1H}M6_s$dXjScSGvG+ziAQ$Jx!AiJ6eR>Z#+_PC7Xx8kb9Ww&BG*l>+5AV2Gsj8eU0(k(4 z>-lqi+B+-x0zT$?_q`L`kze0L7wF9lYhJmcP27R0ZpSqw>MVqZ~v{7Gw zmYj&=ccuLT`l?ybD^BX>ux?T0ILnuwys~UOvywyu9It6($vy|N)p4^Bpc5NB4G^jJ zaEAUVHR1)tZf7fT%15^e@}3lP{*22X$SDqi8J!JZB#k89AVbbN5kpMz82$n;0&(0? zv^pX&Otce|+a-pDGP^5qNJf*1BqZQ}XvG&-g!LzB7H^1MffX3Ezj^8AABT7u1(_ny zAbOf2v)O4K@cEB~j04JdmJJo24~M0kvxzdzxNfvnpDm4J<+_UCQ)Cwh7aRv?pSO+SYS-W*ce)l$*!!rOa6J5>z*qXo1>+>g+> zPtD{;?>IJGX6iO78}XZTh;jR?Xpds93gowkClbZJ(6zs&=bAH?n70XNN+8D*awMaK z6wKJ;vk+C6kBkqd2-o~iADpOvk%s-0PQAkJrU&0=k|fie+N&buZS9wshMuhPnb)yb zbO`!McOInC!BwbgQLN+Qc()x|0$)p4+L_-fH2JGAxFFSjQWS3R8i!Kk;Z!abn}On< z-o_3}`h!_a)EQBYc5(rK?M4mX)jVnM%;?w_@-zDE#dLp0q<7*g8Rb9=O^RD7|FOz2xkM|gV>Ry%(#-f8cgp| z4pC{id{iG;(y5(c?B`#C$B6qUV)w6Hv*-|o2(usq`cR7|!Mf=1r8Y=^7eE&=sY|XHf;2f5E?PyY*_zMQtOxww*epJEx%AD8G-3KA=RcOI zOkL1#Ozsw~o`%=&uk_A`Obin+P&D!fv(ifQ)Coc229%71UkXpZR+z0@o;&Z4v+Bwf zi3GG+bCaW9MQP18`{(SIf(H43W{7L8&+0VkK}cKU7L!(g2v=;?NxdNH11~&Pi6p@g zW+Ld)ALie1>xi%zAYc{1?jGI?J2oG(tOcCqu@>lML&&TJHJ;AS-eBv^+9TGptUkQX zZUlu7XnB}|UFmcfs?&doaL@#NS~gwxJ;0|54zi_}ln28z?5n#LZnEtZXL&q%{Gsc9 z9BoKg?Gyfg(NS_&-QYk=gnY@>uQ#@=xZfKrexE+pyc2MDpuE-=g4$&-ViVvv4E;bM z;I|oyIoFY$KH0=0f!m7pnUQ(j(Bd;P zlo{lG!ngu>p-{iRt~^vL9@@(!KmFyRC-&=)5g}>&)twryW3DEN4gvm8qt!b#xhSDa zNUS7({u|QpRLzA`gs1j38TD}9&G@D}0@qo%s0G+MN`pdXzg_d|L?90Dr`VQx_!9T| z_dYMV?2ospSi+erV77i_sM4I*M7S_J4leJO5I3!tcGmM^ie)=W1BCmWncMMbisD=e80h@x5?wcJEL*`O6VM?hn<~AeX`B zp*$de`8iy}OX>JZV#4J8!0I`FYy|>WDjqFjt~)^V-Fry=ay?eDA5*2H;NJ&#E59QQ zZTK3ytGE{(#5&yQqDeom%Im$D49~lN5X%JuI2|3?uC6W3+pBN&zLP*$8G8$lj8V0J=9(tm-S&y=xc`0r^tL=Y(z)A=gc6->S7J>56G!8!bFFcl zAJPdb)Kac%QOjX?2;8Ltps*wITWYzeHT9C`pl&OWfPeVPW^wdBUreEYSsNTx%7c%w z^fY!+roJ+6&Nqn1kDUs@~f~RO8 z-&!>aSKhlcrnlx3pnbnZ-C{p?*~#y*-cQ zS!iWQh&I@Xrj~?SMbAD^hTEE|l8-eCuT^bNQ!{g9e|u8mcp%AthgnC~X^nbIC)@Bk z;c(}5AfCx|^3CA=Tf^b^#?a4Q{REzeAxhgpv(|O{U0>QjL_%a0C82>P&7>q=P@8;l zpf^Yc_DuxnG<<%u(+HlRYEF*}wJ*)Kh`If|rp=OC(<~)J5^~|=hH0b>VOp^Ln>bNc zrHCGqaeh{Mufg1Z?COHMc<-CntLEE z({nY1(3E_jX%gP1fFYafIOg|&7$j6B1@*!o=~nr3#xSC*Fy{MX6wNwA$iY##jPx7xx_B`d0@#?o0 zd>P+X(F7O ztaJx-O8J|0ex{Uqnqpzc^t&sc)@iUBQRQ326LAKbhW%lB$n7Bv-j@XeF;A%Sg<7#z zaQBvv)APQ6{9spXP&Y<}k{fK$+GZ0>tuS`x#wN^5ZIQokx z&3{A4Us=A>Oo-3MXafC_xZUDkt^a)5VXdZ;myEf8R2&S$a?M|)?N7Rb8F5R}l~go2 z`ma9!KQiBc^M48Q;K{%Ef7S-*Vi&<}aS~Z&E%0QN=uWhMEw=u=?>&O7o{emLj@k91msR z(6azlw4Dgvf#%gd2{>WqdqSmQ=>?)NSn4 zXMA=`;~qRFXOdMGePT`r99769PLI@0{oTjxgq75U3cq2F-bUhTH4|X&1#{~Dg%|uS z)QWk`G1rO0>UtdYa%UpCa5Nd|h$ascaHJM^+p8{>dzn?`!)Vi_gm| zuE^=fy0%0J&JveZ9NS?K5gy>tUo=90tqnmUA*A7f{+%?sHRbMa8~bbR`Z~iVYBl~i z3FSPw@?vc@`pyH6J;9lq2RJ#iA+O*(4|g-4vxDM-K;0^xkQpL3LY9~W2Q9;r0c(wS z7CE1k0Y(`gUS+p*9;75;lO}(Ezh-J8%eGU6cvJTjm|K1f12pCP0S>oOZ7$K+Ldw7s6LI26!F7+rPWz|SNp)#US>S$^bXsN`gdE|ld-OA{7L(-4)z z7kfz`)mk#&#;$2gL`u?TcXFWG#Gdn*sHzgap%-gM9xr`#CG0*wH+D`|07x-f&{`#h z>Sk3Fmtr2JFY`KlPL=F!DtT{zvmF&Hdbi}!@IYt2;Y-EAX18!X44C0NwOA-??$7S) zk(y$(*gtGvkRDZh_2i}GOMzOsH@4B0ngGo&OQRk!vtl0mFFZO$A*ge0vnT#GYf09$ zA4Igo>X7|g_^bgjs4VGuS@OnM?wn&>j6m!a(sOZJaOfFdq=2e%r*Qs%vX5RcCQ!X! zj@LYL%i}(8-y-t^`UqKA$#`9qg``e>BORePA}iq z+u4>@m6OGR9#W_76EVt_s652`72+_7n`x(=6CPkq=xr1Z7F%36>S{)F6E)C@!q(+u z+pPTDMrh<*$yqqRfKI%BnUk{lt_M)0^Oftx1Y6FGT!RJEj?2n+WeobI%?M&i=M3B9 z^H70ZCx`=z>r{H6I;5ui9}>;Dw6amawR5$ImjmK0O3E;Gh85v!2TPy=Zcf3u52DGU zry#f~M5aXHHjEv*oaAlP6Y>R?&l!#*fPA1~VW=TG&T3aE3zxuui3?I`V4y7$R05MN zodU}mZJVTK`YW*12qnVFdPejh(&wjH1W3gasMCVF#C;X5OrC)Grx)fIru#!^Stas>qRWK{pcrv1h2p)VleO}S<4dxtHy}So@=o9d&()6+A&2y~1L=h`~{vfQcW7=1LIrb3xu7bcE?YT?XM zF4YDTby~@9rvz#SiHdrmsYrDifFMzjDHWh_Y~-5X0af4j9iSzBAQ(QzXEIXcSAf;o zAheFf)v@AeYL-x~(ws-{H);8dL#B`)Q|r7%YA1uF2kDl7qYH7NB#J>|K@>*58mYM! z0E%I!4P42(y38-N{!Go;;dX)s$_s<4RzXcwYAg{5ga;;YBi{AD(c}zYWM$Hle0Mzr>hB#x z2P+vMj>SxgoJ~4FH-yA^a5=Ga3fO46;{-dn7VT`U4 z=<&P-R63DZig0W$8%LeJT~h!Y(Vx0#MOWRf=LO+^F2GMY3qNTSMxfgo;z)s32um|| zJ4?&Lup>;TsN|(V2BsN+2c!4uPCR%?n}!3xWC%Ze?s*X4T+YG7Yl#?2rRC-uM?>lYji93o$WI+Gl*Qe~Q-Sg`$jVu@KV@=#gX8?Yzfq?j0v&6Pcgj|ZxvwkN zVJ^3O^2$~Jbq@>2?A9@{%iFi7o8KSshSS{)aBDI}!p4o4m9l^jljT8_#ThqcP*8dxCbn`IKuW7NI^1(T58fHk~9@ zWn~ntIWHX*Qi3+`OaFCowyVvFwaRwj4O9k8Yrq3FG5CDdw6q@GM1~j>8U>HVr7nU7{#4|m$qh4s7^#>g*64terH` ze!cAIjaDS{g%D}FGXESkK^E@~vDBuI_gZx;yNE6XNCsC7TPFcY_}0S2p^*|OvBUnW z+JN#tPB(6^HyXk<(q}w#r@IpdLub%`=|W4DWX&xsOGhxbMlKKyk@?J{6$qr9&Xa8_ z5nI~a$$^7X56K08#>N>7ql4=ouODkR)_wvRyczg?w}>B{Vg*>00%>_{Lsd|WEHjB6 zw1lawVBVvI*ZBxZ-I2WRz|ecp!q8G738?Cqi*8|hrO?jZKWPZ~JXOkYK~Ot?(K@af zKc71h`K)YjB=?D%CwABbFoJ;bcdm&|uIdT}!s_yFxzGy!Yl~%QgGHaG#F@7k5i<>j za3pH>V5BMOmm7Y_6VW7@aqaZ1kb8kbgi?vW`rUx)x4&7%iY4QRpW8lDKD^-GGcj)Y zVQdYSM}a}k#!!KvnN5;{{}nTTlBNhqtnYnx2NY_^m)BYBNhYwIT8fdMCTC8qy*=Z^ ze0{_#>QVHrM?b9_r}aG6JKYec-x2i7LCYIGYUWoQ1*-0ODJsCCQap)rXbdM@fOuu` z1qb4tRT=CPpR9gqUA7ZVmQym$$%XC~+UOlIqFSMlUO+^Jd0to{j+-KX8=Iq3T4jgy zBc0)4*z@qpe)X}KQ-yASoc>&}Cr`eC{q;h*+_rKUGRa$EvShHIZwzp@+A$zK|eUYJmdKZCS31sDf|Os_eU zw>-q*64q&d@+Y;QNOPP-xIM-xe!H$HwR-KYb55hphl2GMyH%`9R~4mXGvU$BJaLL` zFLbn$zzjZ%Mt$smFpJE91J~Vqp6A!>(!2tXKrw-Y9FW>h-l0WunWaA z=!;xybWBT4zeXtIAq7eFL6>dmq|PESRZ$(K&;4lJCvkCI>lyeqhh%(3k(DG|S2?)W zl36kE3Bhibozt9+W|(93E2@v2Sg7`gJWXyQ!GS0mP=xIzjW>rq87ojyCEoze^57*E8B|`c zl?&K^x|k(&C=uE<|IQUc^6Y zP%)2M2oW~ya=&RCHa7@ZVe<#5MCHt&4UZ{S5}LnS>pU~`2vNMyN)moU+0=w!E^AlN z7o*F#?tbs4K9lzur&b6FAyq^Al*|fuTaz90(G*}O)C$4awR(xCo%@|D` zfvM4%rP|y4?ZZB)!ju#VaKJzAjsOw*I|1<`-1zA%O1j?Z&46F1qS)5vSl$9g1lL*c zGOS?-El?r01IPvb8t})Y7EL9DVkq~94<%O}6RfaDRT!U_&UOY$Q#ir2y4?MQ&%K*} zcW6K?IJA2L?-oa@r4D*NK0}Z0{0)TB<@g+Dm9c(n1``o6FhZ0ce|GB_Sdq{XeCD9` zyvLyByOC%h5@s#tzb`0!ZH_{Er@ergXzv0O#;}90P1f z=gF^&B3_GE!r5SNyjE3r*N!5VHRR$EdO1@8V`9__AQ3#mxk_ST>$Sh^$lLRO1ea(x z5!X)L5%60Iq7Bw7O_$Hj`#a5%$yf+7uhtWu)8;oLk=o#h0hj^m;o?#Pk{VM2w|ggA zRqyY2YzIbp$!J2RKL9>xzcw1zSnu_R?>(4$@9hG&+J}8eAZWCu%#Qh65~MyxM8+$o z@CGApGPliwtnV)~=lA;&R%_6IxgjChk|VCkY}hX)*%s+$+0&57GML8^l2ukAhz~>B4tmENPiV3UuM~BbNI*KHml0rv(s0_ z3b+7`p7M5r!Y$3;zA2iOSljEW;+BXS^Q{DvWYZP=DEZ-H!C;5eN+JD!;Vy@9^enk} z(V@?qxW9UU(}-xDcz8?eRY6N#Kg0*?&-qo*3B6T7oS->!(74kV0jStRKocQb> z&Z)2jI;%^w!B)gjGAcdwuc;S3CUdF|l$DI?v~pLGB*XVFpTW|LG4anbG;#n3Ao+gG zR4SU>z5Q3B9%W7t$c{6A41tGcb{~LmSg;uU(oL+c0T8LRb=E^ z+x>-Wnzk@2z}F}rXkg)In|BDp10#R1h6&t${AdC&bk1)npqRnlov0&stk8#{7M2DX zFVq@om2s672s4F3v{{v@;ImPPj!%6Fv|gWLt1uuy8c_4X*Lb3T$bgvwO<^G+LGXat zv6FDYs_UQwBMR|o>(DkzOvE!eievSx&(-1ouyxj28>&kZz90hJLfqXx$Z&V(>d*T3 znLU%tA}8tOt*)-848*-RDnDiJyed$;;svZ$5}q4l#J%Uk`k-V%)N&BR)Is}IF_c_x zmCtaerpI_4Y;7ki?Y+`0i@PX`!W5)Svfg^GJu4%+Y5<)O?pZeO| z!c;+H1Z>e^0@vJ<Ko-yKGr*8su!BJTpY&Jxcl7$N+SXT| zv=+btY_!klxxeg0?)9N3Z{1*QKV^4+iUWI_-`S-fG8tzfH$5qD8V^8`xONruiPFliAg~AXpB_xgILvx7M&#v~07% z$-(Bg>ypXeZxJ|?-n&8P)YrQ((^qD1uX+J9L9rF1R!;ErN2Tdrobs;i=sjK`L3Jvn zAUF3>txcO3si|+KAP~^@5dhibngwUcI;)V&bzv`m-0@TPv=kic)$g$mtcK~Z?ZXkK zXQGDedUf4_QE>xpGxcKOea4Y+Wl>u>H{Eh%u1}2DWQ!uWH=tUm1Sw8UBu$ ziZVHWtg2Kow_O#|5seL_K$r-nS@zR6AMQNL zAq62w&Ys+!b=fOx9nSQA{w%PvQU|gEGMUqK1?^A@E>+*XY^xbk27#? zxBx#lB*ATTUv>EcA+iYeAdUg16lGuqc{CYaW;%?!NEZ6pazd5AjUBG>cI4V~EkBVo z(@MHxs$hB8T^eVwO$jQ5l0mjmZHO4W1+73_1YZoSs}6|lqC`oivYqy-+@>oC zRW@kd0LBr8`FhQ6!ktY3vlX(PFO3wxG@_qDFW7aUrkUgnpWR@8a-XTYX740j=GxVj zNRw+lZftQzzEuu5cm;NW2rZuuFP9&GZfzS?E?ZZg8-E+^+OXImV?m|YK^I^?U}eaB z8(>ALA>$0gsII_Z4g(S2^?a^IUMWVu;oUE5^zvCk^R8f?vk(+&UuVzTk7A~DuiH9c zox*ZBmXY`j@I!JCOmR3($p;Gg&1)Oe*SNPC`*qqQcpVDW4;>e`i!cImn{5w&2s%~} zXBO#AX-3&)#Cy;4lq!*C&Xoelws;BKv{I*`tGBrzA}SC+VEm;d_j$fZNUnBf1~*1= z(l=j$C+|%jwW~##(ay@*f<`gG^Xte4k;5zHir|YMlVp?y$%yE0m!Q^@C}6+CeY+)y zdK^1U6{el>dS|b1$Iw1GPSBKpb&{r9QD>FYstiGqltAMGE+*w}UC>IpFMwl}_w<`D zcru)Ad#i!8iB?o6?ppRPaWLpTfDGkuArzM{B**Z6gn?q^1x4v7hdNSR6s+XCKJMD% zmgg^=cG;YocwX)Mj5Uzlx7f&Cu>lyO^nf2A4BF40vLX=kA)Gn(Cr@#Ii0fego|IW& z8N140B|V3MqK&k`Rew30z*${c1EfSx!)NqP0R za+ov-vdv8Cwkvw&7Ip*9hCi8WAL9};dXquL_;E8Zk>WF)tc6TFmL-C($^9aZ(D|ry%Olv@GwUG%fLDT*182HEunS}}oA+96TqMiU2rA8pU&&JpDRyvYr2 z+9+!&zZO?e8|j`*)#*Gy>tkkg0%(9^y>*$Djj|~tQ59U17)sPOXCQ{k9MZ<0gWfA9 z5=sdH_TgoKIpN;L@g*q4PNbRe0C~kpeul>^;~|MC1u=vKzOy2`%nw(LsLP*%MI77b z>Oq3OgQ!*#B)=Aa&h(YMszN?ol7KU;I&}IeMlyWX+$etZNbFL%ZwwC@%X*z7c!ARn zhMWAOKXOAtIwh-5?Y+x5H=lejo5E&Uw^k0`tx6SdY6B~b1K(7GH}fr=5@nr{vLa1h zF@7Vx;K$=tITv!Caiw@7Be!XORCQWlgGK`WoCA;2Ym?%CYx0ONTcQU`4=N({wch`D zH{G8uN zFGPO2DMqP(2~3h5?x3h;^M?<&e3DR4w60obPMVxDxvn5^I!RN+BFmK2eTwQ6-#wa~ zNqb`SyGG{cZ%;Ke74j)YoHgCh&k-n)2HLL^AV@k=u3iZpDr?&kr7B-CspubzKy0#A zs672?{fDHnvDR@}H+-lm3}vm9O9oi1{)kxj<$ z*!9CpOj_TR6VQjJoO*Gzp@o3LD%NUP4UsN9E!`thufdqWw^f`12Idv>>nGSC1WSH6 zcEETWcQ9R+Vln|UH?3$m%xX5m=R#}_GqrTZ-%E3UHzmj&9^*j~xe^&F93=~H8J`+! z`HnO(Ru%Z?OJhQhoyBZzQPa<=Nim=-OLYzc!^A^KmWUo@l+}|2iiVEv~0?6+h1UTXW{iF$c$BL$=c{Wa<_C8*84`r$vgEt|FnJ z+8^$J&aK*fvb{|&clYp-|H&%;kRqlPAH7FNXpK{^M{;q}^y0}aiSB^w0c4E~9G|F_ zGiqQ$sel0jV<(9fFGZlM?hn1$zf}xP)XLX_?sw(eL@kgS0Tl=fp%MZlhtja7Igqghcz1lTRPL6!~_+>$)jK4M7?(i>K3ecc)ssR!i0#sZ^l>G*uFiO&WS21JcnFHxhPI2!LD#v%yaw5I zw>)+A1+RW{_IB#NsD_f?`|xeXlL5$yp%w>is$4SGkcO4UT+LWGV2{QE?R2^ zm~Q$Y=-P~xSLH4ru}?Rdq=dB7`P_zo5?+q6;EYMZLuZv0r<|5J6|=62(7n%=`Ai#q zd65AW5s}&!xN)Pe<8nK2fJ-O*enBHds)k)XSx27M8p75oxiZ+S5g6Hs-@IGem4o(m zd7@`;oP?Z3JO9g1I>aaa8#D+5zf`t1zrtvXhg4&!gE1?irFN8F8On?^X&7jK5-Ckr z-ojg-(S&gFMg9Pn$6-lyJ7=u%$$YW+%$5^CA$+qAUp3|^`v9EWc zDS^%5sJKW=Akd0%X@_q{l`BtbN(94X+l-djv$^(eRl~Mx93p?9y#fUq2b& zOD-SA_>tl2RMSlRf#@zdALsB3@Ju#QOB$8|%2{c(eDl9fRA*Q$k9J+|uJ3Dr!5m(o zzK5C4AZZJ9Ri`uII@@xD=`IDVFam87hys9rd8w!7qgp953YHL@g^z$F3EX=I=@p=G(OZ0|YVN2F0>Uq@ zDy{o4C-2xg&(M^NZR0K`E0YFKr|MEZl&DSJZZow|Ot@w;2gPnPGv+X^#c}LK57Sf0 zq4{gaNLBsVP3jmfywO;6UP(9mmW%r+NzPEPk%CLoJpn~)=44=ge<=8MUPQM=&R+=a zzK3I^gddzieIZe-b(R$mX=?DE!$E6W*6tMBNG-NX0KVtT{wz{6I9s@MecSa8JRd zZI3}GW?XKbskU;O27&{V_wR_ZJ1;c*k|?h zZu*6$V)zNdw#e!6eFivo5B+xx$&1uKS^LJI7tCqdN0bMie?r7n5;_|q#}a+~skHC% zVC5r~l`#-$MWE0=eL$GVJjT9{sAQ4O?gvnnrxyuq)FG*;@plPG`vXgK7Hvw}u94*T zNDps6HXt#n>4)sPid9V6s1C?Z?2`K##yJu>zAa1uV(7N>V8`&-mxUi_Ldfr>#G3@$ zTkmds4*NGce@*p4O5kp#!1gAPUgCFK%Qo}pi$1Zx=m8vevLio9IA8JuydT#EOf(M$B-m_o;6y60()13jm5-=Vj}t= zxQO{C-X7wy6p!k%6t}YRe*!wE>BM8i;(Q8kYVM{8%wt@vz9~!)QSt^GWmr5 zAl-%EJlco$Ni*K@oqCm~0na!;`*iQ6(krUUf50eYGiLBOpw>fdXIzE4ZF_yi==gek z9}Z(3HBZ$oEy};1v{?#5M;B_MBEJyoUr9QSk4`|^Wgh&6>I=ey?S{3pn(FN$een2m zTfTsybwBa#w~&Dcen7&bsbDWVvD&`pOsj@a5L-XRFuu3UNQ(`~?#K>yJ)>I#3Yn9B zf15A*&aL?qQ)!wP9Bz&V?!V%6)RKZO=d-Y-+%juBp~lR2M6YPbuU*UIA)<$3z@g6w z?B3C5SI5wKUVJy6&VyOwwankGE~Fm@b|o*2Te3?v(u{u+?ln-m)^u|vpxHfIBl9j& zB1Bd9JXnQx-$IYzuZEqQSc^YXXi@RAe-+s`V{#a>%{&8m~n`*E25Jh0rT7ZM2S?KYmEJv(!B?eCzn zB#z&->uot0@w!l_gv21Z9bx-hQ`X&jWF%9bbGTMe2lIoyd*JlGdm{%Am~~ke@MBcY zgUuUhUfNhsTkDiO*p|;gsRnHeCus|hbETQstgu^H45+EZmT7z@o67~}e{r`=ObUl} z;a9p^x{trOX*j&pLuHhxNW9J=LG5)JmVc^7oLPs*OVqJfsL;1Ue9hDS{O|GP2u`(0 zN_EjiFDG!IB=?I*;!EpJPJ;mVy!ysqRMQjpHX+*OqBg#l!ly+D(Bt>@9H9=YR{P1K zc5TW!zkqC~?7nw&f#5!1iM$Y~-%DXR z8JMv-(PfFkZX@%uK#5@jAC&umZKgD>LIEbYQ88T0o(O2`lrf0Ee}^m=z_lstWQl@# zyIxAKG+($%MffHi1WiEalO98UPY;(EFn?8{o%jeL5C$!Y5<>4PAjga1*T&U=boE%0 z$tWCyV|L96G|Agm&{v+vAlVW^yId-BCFbox83znI)-wqKma+_HE{zfqN>cbBgT*2s z&yE+Q0QacK>H?@ne_fI@8Gtmyd&VNz7Td@{cO|)5%Q4vs$oJDoXmQ6`C#rzY^B*jT zI+^&O+?9~&Dtrx!y;iJAG&P03y(8X$>-#Gb&+BmfCwzZa3U?yEvzW?Z?s1DW2O(2hiqwBR6OH_(SyRCG)Lp4e>s~sF!US|;wF&q;l)4{ zAWyqoXTK`z%uS!ch2}Ht3X;K=Rc$7%CWLjST>qY6p_*bN5Ii9#;5B)8P+$@FSXN7$ zf)#a;Yc?Wf^wisn;)kcu`xY0%y6kc`?A>8>DFfMCDB>is`5n5b?*W5Zc0>^f=Ge7F zVekc47TT}pe=ddiZSfbIjSbPF=r&$e>|JWlH3>|l`)ct`Fo|5TH&~&GODv4txhEJ= zzj}w7XeIkO1Jc>=IPjMbI^<^B4jsd@glQLKEzZ^FTe&6o;gf4g6#xP&z--S^NW;%~ z00qjD{h{6`zRN;X?7J#8sbWzd2ml~nhMk+EpOEy-z3S*8fZsHwABmiP9FV0!%t}p5c79SQ z0`m`))}z3c9{)?ig4@w)am+P*bOZ>Ablk}CH!i&$wU(=BzCf4*tgH1XA?c+G^Utac z+_dqLf8C%+((K0jegWjEd9W71mR)hUDey3>*1NJX`?`lIK?J_MovME`Vtulgj`8NS zh=O9%j}v>qI%>B0xov%DIxvo%<8r{br7u8t=ez3qU4`E*<$vt}ehaa^MPHY-{nSuw zrg>a~^jL=(*&7k|YKQ|Twhd>|Y9Om&^tDT*e^?E6={FV|bgJboaW+ZFXUc~cgHd50 zZ!OSDp|aT?kx3BhqJa$Jj3eMC^K^SeHHv$Bv`ML3O-v3v`5<`^TI)ErtgZA1?eeUF zPojY;beT_zXLNCJpU6)$ffTTY3y?rcq0HNnF1qQBus_u&BdzGkDlP#IYrA$$&YV7f ze-;m~XN>fI&lJI!MPYaP1W{$X*ZHWa%sl@qzG*CevZ`BDubP5$^({<$oma?8Bh3iJ zy7)>doaGMMIM=r#jFn?!_K&TWNK(A?Nf6(C;UUBqOQ7c;q$*aXQPvX4*ldUF{7Hl+ z7B|_7Rjh}LcRB;|TLjb-mknxyW~J<>f0Up(q-hibk`-0aDlnHf)l*nE1vquzB5nkKmR1|vd|sGfAaUw zaE}7dh`W(xYv$KM(p*)yz|Tj)y}Gc`64x_a9zv|UfC&`hgU#A1`0t9LJ^!8WXSAV& zF!0x@bc!x+N&@NF;^Ci5FH17jYlh9ELNudqt&qXdh9lVw@)&*QAL(r8m+~DtRa^2P zdV-H%`gYM^ML@>_K?TNrF++6bf3GaI8Ez9TD)vMBsiPr`qhLwg9;JXnn0>*}Z=@YF zo{HKxS*0FZiqc6E9@E*9U zMjmd87kH=dNVJD z46*~;sa>-@8L5iGAig6^`)vegi8=UVWs=%RqYl-)uDqeW7cC&1^@)Eml{uk%uKl?M zD}pampkZs=8Q+SkxbG&We{Vj~S9N)e5;6i67b5kzmaL*G#ag7ZJ7O#OFmgW$i*Id6 zS`8uN3FeuChl(B>9jV_<-1<24Z`az->CWNTWrIJj+S2`N9nKkx4_&GkrdYmW>Vf|j zy&x>0D+GNT)YWbhuaO8Q;haEf)E+Rz`{VsJ+Hu8O*OQ3^qQ~H@e_0+RSQ9KYI<)g1 zBkrBCUXeoVo6tg6bDxn;l4_K^__sxN3u!7faO$+wUNj_(uE{(Pa3dby9Pz}(@fxvX zypFJbmV(8W*_xKQO3xx=9I33|PAW9h5gR>$C()OmEiu=6Z_e{@kN1zdb!cdoJUzuj8KNS6I&joun?Z zCF8boP}OlFf2O3j$VibdgZ8td$_OnDF9`1_DMmAhiU8#3&`6Br3M%vthiZdrTGx?- zHIqzP(vP*U6ZT$`?E||xz}M30CtOnMbbgZ+ zme)wH-vs-!i$N`NGgWF4qXdwZ6)Jlw@jJMmW6--`J}%mdV^3+D;}b9uv%2t4zOMFD zYDsr+e@K5zPgy9olgRHl)DtX|`RpN54j{ps@w<UjJFYNiNsmd?NpYpIq(=Xa=fm z)%AcF)$t|B-}fbYEAmW0QiS0AMDi|y<5x=AJYzW3Tpv_=7HiPgPXd7Z68UWb#w(rd z{4=1R7$6j_!{0Sh5`Z{d2`gWPLYvb_OOGGdf1z-BNv`~iXcmh42K$(Ji7oFtN$?8V zTJns(^c74HfS$0iZmy2I5NrYqeY{v`KSwK8qFM-F&bQvxO^5ccFT@>R6qyYk0T%Vq zokREuHfJ$`jbm$}JWEDGUIFY4lC{Aq!egV)N+2fi-Bp%-y#c!?+l?zyJ%}>qk~0hc6w;P`0J&%Rwmqsla`gK6M?pQ$r6px`a$}>p~sre_oQ89$fhHN8s$WU!jh?6j5$`;*X7GazzeZ zHNdCe#^V$Gw0M}7HjN=)fw7uxz6h~WYU!rvLKuinq;aLJ8e!|avAPZ2zNi-78TbD!$0^db)$q&)gU zD4PaJ6|tSgwm^{{cM0XfTshFjB{Te&0Ubsav)OL=8^1x_=GT0I_zd%uK>9LXn!?;Y z_*t}yR#Bp2;KD7dAg-E?O0#ox5qCCcCZ zYH$yvvh6%^#(hAt2rta#nt_#)OzBAj{}tReTwh4D!K!{cCD}~;5O}_pjF#DJZ3jX0 zvJW~N@tg@9G1Zyhqd&^ie-FDQ86yR!%;)*&R1k{BClX&tYd(_+0+2Mfn)tUYr#+o3 zgW8YjD6Zah4?_U}EkM%0_9a*` zNx8#fiwG>mh2AY7oiUM)7JrHLK>G>q7JJdY%&SCL@y#f&irEoTCUSS z(P^_p&^eQ1C!zXK%s4b0&3}b=XJU9PY&)2S-dj9qg+o@=ts`+lKvDn(B90|2pA_Cf7IqJ| zPufL2tu&{d!|RT6pzb7U3bBY#hDNZr?Y?u?pL%q`(R!UBmKQ(L``W{DS}M|YhIB2a zB&*(8o;`{*4#yHzPus*9LNM#a{%(l^Tj(O1^PXTZEl?KEmdAO<&`P=HH3xgvHREq7VAI1Nm<=(ye z%tp`CGOLnI&AjnSPy21N7PA zG0tzr5Lsg>_%Nf4C!+05Mb7myTUCP#34i^!FjxB`nxT&Q&vC$`C`yX2 zJ;h8`ZF=E|iF`1=u4xa@{CAXitjl6e#Se=;nc%(DFsO&xqkPL@wqRO*xDgnKf|~*E z`(*)?N>C{>Mbq>6W_ew5vuaH(iTB9i>$YNdE414AcM_+QhEIu!O@kNhv3}aM#hq{f zfS_EyN`K8OjQFfJa!#JkGDwBhQ^%4yZUkPTkwRJ1h_cj+amM|+G`N{$e;)0pa>l0Y zbj_mffy7c#hX%9wTME0XtABFn&C47s6rzy`>9Zq)I2y-7F>`Ji?o!p@Ys|a0jmy>! z$8fWG6%kYq6c$_OJ>1sqX}F_!#k4sf^)W+DlYb{v3AOVK=h%1D#b?YAdx^qPmx`|8 zZ`*Y3L;P68Ar62$t>ZY2JN8YSq@~iFR-R2)8hXOi_%%MSp^1PMn}F*iZ8z%}ClB<{D6gsXj`<5PS@p z8c=q}uw4f$tgyC$o(HPqa2?j|Z{^E6 z29Hc`(yhgSVP43y8{23E|A<2T+Ui~_bn7IMiu#ec$ri3J@Gtm10N(YL2QPXQmT{w; z2`z&VGXbf6JP>`)j^aG?2wrqI%YQ%$d7Ub^Oul4FYbrzrVDc1!6lZttTv4{htGv8{ zdG)&jPoc#dV$ri}F?LNB%gst?5^UOEQ@htbaCG;@8Y0 zQ{x_&MbyzW96*^jD|(BETDhrQ9%uBf#D*7dBp)+gl&8CtQziP|KAQ(3xrn(_0-b{> zim9Cz#|FYXzsCb&?G!ZS=6m1vfe+G~&9=lM92~`cd?Q8T!Ts~NW52j*s9svSP((p* zTbJ))*;jPJG>QX5R7Zgu0^Kb zF8g6x;LdF@L^T<;-ALWlNnxi5exn3pk5LuW+w;;>jvMHiGUNxp#Wbo#T+@ zoqOPJ5R~{4T>@dPjB%v*3UgR5dmRphib*r@tGHC9No6#aMw?F zJ_XZXwxMl~^FJfZx|*o2$s|}}g$yMHHetJyeSW_(f&^CL>q zRK1Dqy5ILTgg&O!9X*xBT1aOhWKNO9ct4^6yxxZBaswR8&Z=EToX_31ZkNbi%CRRd zW^hzpQ6-b?rXO3K>{F?P)3O(*Y~QvtBYL;Y`UA<7++od0oUupZm3QQT99={L5Dc%x zOc#HlDC`AtG=H*Ka0&_vV2eOigQIL^vqz;nx6*@{mG!ZauJFM5kAJR)A)poQ)BKIQ z#*)^oHetRUUE!(g=bjCz~WM;uAbDCZ~9#Svc#nx60T<|w~-)Gpr zSLvr;M|X`D!+CK;CQ*^BzGroa8P zdQ=dT(0#-HAOT=D*_V=s;Zfay>gjDO=ZogtJ%LWLC35JVeQyg_0>&GyskVRRGLp?#_ICcRlc zJ_-nD41c9dcg$grz>WrL5L*6lmH!YaZ?615Xy!i@$e2<7K|+>%eg7{xQ>Yn_VHHNG zS)I{x*1KhZ*#(YfjFEwN9cGOJdxFUfp$WcEp91hGM)W;d97=kQ*H%x`iYAm zb*krt+a2AhFPGnf=*_8#e)pU4@(@&vhj;z`m?(%|+m%quf#@^w749Gqv<`mSP2s z=Si>9#%;M53;q}F(>vvLsS0cn_@_dI1KTWj?0N3bw^Ht)b9GF z-MD4PjLgT3)JGF_*}#!qG=m6O&}TLJ6@NPxXe?;3_3Fo^s{ zRhg=awNuoPPx62-OFkj?LUhasY!gNpmtokG(V8o5IyW{H=BMsTP<%?m zGo9wtkQU6f!H)*Rg%ii*^ncel&gDXFtA*p}cV`==h11^V$rtvQ#V%9qCO^1!0XPc5=C>gC1bvqMPKNI$|60?gEb&(RWOcPdH z6Q+HW!pVP_3UwFEZ?Lq%$wUlwu}%oQpjmKCTd_E0!qZB*nD@nAYk%m~45Q9RFWwAO zn(H1O1%W{Sq3p}LlvS2(e^5P$U6npYc93YINu%PMVwp55Dwc`-`a2S>X05%?J@>9L zYOJqHMaYb1#+(tLXU^s_TnwY=T;la(R&~?CHSEi~b0gc`cz@^DjXVtdAN@(yfxUl_ z@2maC2EJr*x(|=tNPnwl3bTYrd7)5mKki7P`@YQmk8l_F?!9Y18jBNr;JhaHw_$em zo6d*79hbMy0BsgU38}x?o&w5OVL3ahGM(dGHN|1NEQUw6m!0$DzTnGiwb3c@tYR@{ zUr{x_HnZb69GFxUzi*CS`rZ|%!|3jdrP}qcEbi6EoBcJ|m4B0kHR~<&kZjzzsyc*} zUDa5-Db7W&n=j^Hdvd(3cc`9k)V#CR)ihc)_pmPW-ZFh(&bM>3VSQ4b!ht{eJt{zVrRyeaf8tZ8FG!_YO9ssYO>(q8l9NI}lE$ zFQqE@m>2c0Uy$k13$sq>)~)8Y}9VC?xAQS-gZ?yu-8_$b1q@}fkJY) z$kRJ!gDQTgez4dcJuMNMuX2^;-RfL&tE!Wt7|hMR9u{0m+W5jzG5w%m!h8-yK!|bYEZPjb2>2jDYE|rHsVe{4Rs{kS0>uDD~ zc44v}ypQX*Vv@V1Ew}T+wXEHsA1XV{tGC!HCV%hmeXpT|bWDpb1neB$yPkC(es#0x z7IIkzZ|=10=4YF4{N3g2;$!#GKflk5F=xd3JYkNby(>x^z6 zlYg(n>2onUz@xsotNSQ^6va7awt6G&%>TszJDWzI+@H?$kk|`MQo=+kea7)Z6?JL%7X~V>8(lom$<;Q?j1>^Uth1 z^NZ>;to7TeI8OZAs#y<2G~CU^DZ7nN*@!uG^Y?2N>Y;JX&oXFSgkXSo@PloRYRvtDE%6&hW4sO@C$$wz7 zt{!lIgExxE*V!8C_jH-fDD#)Al&tu*J3H>avK7%)ly3cwP2^ECZ??|f_q)=ycj;qw z9$z(*{rBV5?th&0O`^Hi#GCVA)<-)WW+h** +HwF&2|TOu>}qq=upl=gbA@AtZo zMI(#obS>PmweE=wf6B2NexB)?cC6nyE$WWT!%XPwoSA6I#J>o13{b3ZNUa8<6ZH{2;%!%mZXy}V6i>3^;JLT-Dv z(PO9i{!qHMINqXn@BQoaG5tESuhVt*&Z@tA0pSQKHJG~xJ?rH9a;WlNXzE@AbU zvQ>E9PR?}dsqx}I#9!8GTYnt7pF^)cNIl(VV?O{ixr4va9NRda3q{&rwA3PXFsG`v~JQz@V?6pC!GiNBPijYj(F4yY{Dx zdOTWM+cmr|0=wzF#UF{8q$`$;l5(MSiFlVGNYe#IS^mD52x|bYoqr=2-ZxpSvNQ~r zH+m44NqL?hySBSouV)r|>^AF<*^Q#p@)+!#xICmWI&{@x$-rH1w&h~eNEOQYa+84l zuJ5-?(OVp+>-f|=Y~4+~n-%uQZW51YH}Q7*JV4%O<3#cKcI{sm>0r1VhK}RN?d|hg zy5c~k>arLgm)psE zap}zO&CT*U$GfbL_T~0iFAquo?V2{T&h+iRaeAJBXgBB`r+>O%F%1Wtdq@u`-}>G- z-W*1Y^D&fuN3Q5hjG_XDSvidBPkpnU;oD{(&$3+2cDM+5#1lK6T)U*cGMk+4gS}PmrjcpR}dRdI{w16_<*TTs+@d!K@WBD$Dz)+MF zbw(Y1reYwW3@&7Hn_oKPxA>MjUvKx$?Ou1|eR`8l6?Wckqqv3yKCVT0GG(2UZ?3M( zuIbIEbTwah-t$JxlB1Z5hGf%T_2_+-{q0+P=a5fl<$q~X+zBS{6&s%A^kbpKwHCS- zA0xb_jtym9v0E%?#CiTZ^ZJf}`Le@thUONZeX z9UgXXi4#Sa`Vw~8p~yOqQ;{w<%eu(tMLM|mtp&G=F}hCH;c8oVF-%r=9J06e5)_mUSKYE=pfA{RinZ9*)O4y-aJseszi%cxu zlPtdVh<}}}xSxGIqBzYC6P_G4sXhHX+qqlnO;QiM>GV1o^`{;|+5-~$)%Hle>wlUS z<%c{Lr^S6w`(90j=)_hjyDpw}UDxs4hW01H(w_vAa1!9q3svK6l)6n|ARb94H;xbrXFce_l=>UxW^_-gjcudXh_VbjQ7ppUC$r+7Q*cEqC zEX%A^0CwFz+9htEI)mD$%mAS33xC?D&g8q3RB$NQo&wlU4GV`h>k~R{m;9@<(g1ef zJbGnDzMnGFQg}uHGykQht#deT9DNy^NHa6<-zOV;k*;m;C~pOTeQK#a@7O+7rhSTx z`F^i`3XETFSN_0plVL{zY%+5YF1GeKkK_LGVjhrV=VnuzK!4?<{XF`L zm)*l^xEm1w*!$-5{v)x+&2Anm8jS$B-9CEf&^|Q=wNHrwK%@34Ia6*Pl+zyS0ek@< z0BHbG(}o5~Lj&7*ht4Y z&t>2>aeuCO<0;Yea`9kk6n~*iWUgWp=66M)1ZHUAf-bk9l?^Ii!$a*@ZGAa@lt86p zrtaB537abW4b{aAq-jX(o}8fAvb6lN%?c~5VWqIGCOtrw*$RFOMmd#HdtN+?yB>*~ z_WXS5Al07Z$;~6noTHaV=J)3R@qGxRWIIKpl_U8=`bCUY<&Y@a5r2?`Orw?k(0f)} zm_>ALLpepd%N(gGXpQH7v2-$>+p1XN?wLD2b%?AGlI0+%EgK}aZ9o2;VzI$~JY5!Q zM`hw+#Tm}isUu?qDsBnMq%(ShM&p}8H*g}r)5T+3m>Dd7^%~h1P15rMk+~0ZT}e+g zqVF~B^*IcZ8K}e&O@Gq=*_&EAs7@M|N{8#jW}*3evSg6hC=)r-1BP^B^9MhAq3nAk zd4oLZbDhkPOq_Oa+qIH??;%cZ7Uf9C3v(Vxjqh%@KCG1x1iU(-Lm#Pv6JVHPz5;nX zh6ie6b6(MH5*C9Qos5p|apBuCvXP3C<#Ig7KA#g^anStrUVkomnjpMc{Fm3vajZG) zdIvY#n4dLPXDysj$Is&``aG_B9mDKlp7dBFb8dX#=?4pjlI8RLY{Tjzwh{DJsd8eAdkA8Tw1wdQ zUU#k$Ml$9EI03&xOqI1D;)6WY-IvLnLYz&2I^kfj!++Y^*+@oI4O}A2IfetKiX)Ph z7a4NJ!!p64Jc({{K}tlfvII$CNe~=etndH^a~8#j6vZ4QpJFL6x(`R4QedNNgNhkM z36hFZ>Q|f=pg4bB|*O{k50ZzVdeO zn8C6LCS|54JKH|!uKtKKnTdKHA@E- zuS=Jy+uVPs5Gl(A9ZhDm+F(gBGH^g*+0ijZa{Bc;57UA3nAFMfH>V{gA1wdmy9Gn> z%GeF>&@WN*M_L>x0p&ow?)ug!fK)4l!uRRObf75FBZ-+(kP{B~Lg= zhNNT!4;q?JY@eRq|9n5TF*D!KkrJpq$HmDyccZ#h|AasiTyElTq`NZ&BM#ZlGW1q@9p7`h6H=p_P>GOHgceCW^ zKDKwe284jJNmkiNmkep;fmE0&z9yhQoqR-)iYFAs84qYC8bOhyTcVU5f$*@39D*pu zdZ6KC*{CitsAxHh;@Al#Mba%vT_FgH3lEAhQwc{vF@uHY;wWOcKv2V!seejR1anA7 zp9mt4)CmoX@`I^F5Lu!F_yN>wh;75r&_kW0DRk^5~5Kys)b&Rw4X@~xP^ae8ppAtTe9b#zhurJbA z&Qx*c=D+sHf6uq)HN1Bt5;HQIyQ-YynwzI~QBYz|P_lJVf$-@-sH#8xnifjydkeVBuL}u5a2NSGLFfC$5D}!hBshX|$XA)yfIVElaIy>!G9QD6%#Vyl@ZHe{$A$oXAA>idPO&_U8fVvrl~2Mn{gM z-cE2i@JMGhNk=mpRC_0sXD3yRAq=d zZ1NaY)_YVrkJ5oat|rOw&GQogiA)KJx-i)Qp(d^-#KiYnWPg9+nTZu9rbLjocxxqE zzj)dde{h?>aMtVELJt!A4M`5ZNbM-z(_?1OUDNpZQ6K9ubp?rPjD`c{n*uG zG;QNyYxh4n;(r@+IKOM2eEatkpSC}uVd)cNnrn0)uYEG_Pa~T@#PNxH;&1$MY;lHa zUyoQWUi+K0@B97E*y*7{;kp`fe{^)M#R{p)n)dXS5jbFoQ?E$&R7c5FRTRl;W!Azb zq7r^`^;4I9|Lq53I_MRvpZTlBA&p2A6NgqT{i!|LJ#mfAJ~(2K zAcCwDVpa@Av300ONh$(cDO1ZbEJs~lmkLx(WE6LOJ>Xak(L@hA`9e7)UlXk9nCob7 z17*`J?oKm^NKn{Z#sR1Sk{}Y05`(0x0@NaqRevmzwj9JgBC9D5;u+7A!b!oFVWb5v zW0ofA#4494I|R%A$b~Suqtsuv=b~jk$x3ru{f&?Vb6msbe07=u>YMd7&yl`*Hhbcu zp4Z54+da9@_-WQ5{8`g9dip{0^nrh2bF&vw>(}WMOPTkYn)>M*(=k_3JbS1i^MwO7 z1b?<(1xqz3NXikaZ_7)X%qGnsfI9NADKfPcvNT56x6C*($0?LNrIM(^A*tM**A)X* ztVj(6(wJZbKH3&&i>r`G${<*a@uZAxnXu53Yi#aT2kCYxDaKL|tTaP9nnNV}woccvIynpzFXYBs2A2ab?gP7M=ue_Fh+u?8hA{Kva zthM9Un%jINorR>7l7LFk6jVHM7FNYTZ*84W&|-r8xIfEx7^{i5QeuI^gDq>Z5GlJWcg6Fn+;P3`L3Lx{Y)u@aa2r@2v{XPx#G6pRM{pi)C3htNJj51XDTr_@$9qp zH(}!02UBf4v)fwtH>c-3kFqE3YVqNc$-f*(^3999A=1_g?OMTK?2w8ce1DESj^a5@ z#c+cxlQVs)qJe`dOntD47IYFy8@IJ-GzDFLNyVw*kB~|vQkUaWE03n?1S)FIy21pa zIYYO2*5W$KkrNGOlG^Zyupv^x*M$oSUMb;#;s_~0$vu24G52UDC$uUNstpbl5O+FP z$O?Q^a0F69s9++4eOq_n!89z4jmm5fqPrMZOy)?JAU&Ad~}#;Pn@gmIf8c4H+>KdJCkZFYL!Q zlEM}b8cpUOdoL^Ams%qXap2}a?V{XZIoU{(e)>pI2&g-uVuLJpV1KH5OzJ3hQFk|k zWI74Wly|MLqURtOc~n^^Y`WkirA{s{PLNa`6ar(C@?8@I z#s?Dn-H(!~Z!Mf)cwuREb+UoYh~`{L7n^w<0$Zvn2Aw$naejtlCwl2HpW{qmBu$Mg z=7OjrF%_}-F@JXDNJypNup9v~UE7d#nHo(%nfMM7dQkOUtMPzRk0dEaPtKeNlwjz* z_4gm22($oAa|q_+Pd!3h(;PuHMe~nc=VqaG8oQt(k3ftEe*5o@DFsa3q)lCyCAg8u z2F4A~KMw*mHFXpHuBV@TMUVX(r>9Z$ZSQRy{pA+BFMs~N_sa!-$6tf}3G zgYXapZhyfAs5r5+`CE#tvt7rVXEIGc5JW&Gc3OdYqJ z=`RnTJTZ+*{?<9qTD|$ zikifs-MUXetfpQMM=MxhC8w@7Pq--<Y_NXAyf;H$#_c7ldw)4isW#$(2U}w-A5y}!nME+peeA}>|+$x zNy{VffIqR)GlukkY9xTC52VSG)_f(1ShoJ~wBx84!GD4pE{<>&W10#IQhiYwV+7JB{y1QC5K!O- zKM({7lOR|(gACzP7~TZ*_6kyQkev2w?VomUWud=ZI+l7;K)Nlc8! zntDr&WI_p>Q7{iZS6${uqg4$y7`!LNoJxT}#x4S08E(~RO{nKjp7}%3vE&#e_rhGb z8YqJDU4WE^5A@gh=sYw0i@WW-Yf(WZOrySCHSzArqspI};ahuqVYg@fpnv|>TYoib z6NlRPV`7M@KNk%6Gv=E(l}A0O=*PUb+lhc)u%b6noaMeQ2vVa3DC*b)H?#x3MaA&U zpultqxnu7diaGZml0cyuvADTjqvc@)&MoNP*1|gHYps^coIcWp3CA(B*^Dt6BiwVn z362*wKU))jy(B4PFQbHwwJP3s21= zS^i+-rw?at16}tSqaVWfeV+8FS(m`roH)9vh(seE-aqQiBr}sqDC6c%={DR-iY{*r z>UyVnUT^0#c5iIFdD$05@;~gWoM+G#fqM{WG(|Le13%t7XtV%XQGX~0_y|VV&)9n9 zeQiil5}*hvN=V(}LmNwpr(o=Y+eq%m7Zq_DPVAUk1&I~`hNnGslBDI2jou4i3BCZj zn7W)j`Qzqg15dwe;^4*EoSthEn>y*G1&SADXs`d*&YoD|x6Z2L)i>A2zmF5jqw(dI z)BMes{okBt>dwUGp?~EdEk9k9F#hmt1jUg9$Dkt$_f7}Z7J@XIV-Sot5X4Q)h!S}2 zkC&bg1hfdK@s)tOSPdBXAVJ3rgQaGVnuV;~M>zg;bx9BmJOpcnq$Hht=^z9%WY#sl zxZo=RQkXi-Xh#_7JT^K6f@sb$*Atl-F35q7(W<+cKunS5D1XJg@>GOC%ZMZH6P>?} zm1Q7`5D#D3vbBE-ZzDtuPi6K+0)kuhvoHLqFC2KQvA$}EHmCE4c-9_0nj74enAm96 zGG(cJ=|?ZlDSTp)_81vALwN0eh?hn+SH_+V?;5@I6|=X``i{5x>xm^^zJeC1x8(uy zi5_53;;EylB!6c*%DEL&93+XcP0$G%>0}3zFNjVM$mr@R{?1^OGDSVmLU97`6eSyM zFt%)aF6O8iVY9DDS1U@C<`_I4@DS^Eoy|V{#7(2`*rtv)dCdobbi#WkR!O7xmTZDl z*VX2|)In|fj2$9WRV|Qg`G%#@ncaFfrzYs(W-_GB~oe~71~>a4rgi2Kz9 zExK|VjeMjkK~P+^xKmb(7%c)O3zLT^Qq~paF##>-RCTB9?Dv?TO?J3gE|f+jS9WOL zC-b*Am4C37CsBH7Tes~p?CwHZb;u_r*Y(V$>DLUJ&{JTR{(6D%#}=vTbdMYKo{J6k5S)pGugIrPXwiW>|l ziGPe8sE4Utz?|r5N&nk>l)?Rqb#{`0ZaH?_aL`7-1NHkIpr{MVc2cP@GI&+SgrXMH zwDC?DAKSOMaoqC5s(k8&Sp3GiTs-Tlr{>VE`OA;+cb&uPU(Lw)+4tUi&C8AyttM%* zdaWHMkHa=Vqph)H(?c>*+D2+J#o9VEC4bR#&7x@KpZ5+_?OuSAJ2x#%-o}a4=6Zc- z<5qjk*p}QjxERa%xC_z&B^(4+^nAVb0b=6RGglkk?4$Fp-8)<=m>b;o5R(^fdvSBm zoZHrM<{DqP*2I+-XZ+%~@BW|fJ_Gx&rbd-LifZY$_yZzLgYMp@(G2Uz;D3yLUVkFx zy+iG#bDc!9A~|wjZXggbl0vd9o+Dpd#JuMuCZ;ZV;tq~5R2ZoQAuSMr?VGKOUIZnY zh5R^8$p9q{gOTYwiBLZ1Ath1FMuazmEeWdd zsn^HCiBu3G@%&zhrqiP!vu1#%G7{L2m;=vPLmtLAL>2wor(Ko@c$Y2dGE<}v>eDwL*|QCuY763;#Z5bdGm`O|Fs`8-h=fDMLtp#yG=F4 zAW=>cWtLAlNQcXHZ1izw`+o-u{yb$^(A zH!}5zowPcvZS?-;scHRO`+u|bR?A63hIs#Zo&U&v&owZ7;eT=wr+?Kl;j8EK1Q{n! z{QdixOYI?|EeBE$s0`@e92VOKPt&G6xIV}deZBrp)57I4CYhG*1b=A;QV^sE`4X)? zs`LvVM)Zjfm($K!A(ZmaXo51L9~2Be77p=87-SKf8jkA(Q-hoTuCtzwds-km_?mz; zxu^X*0$sK_sKuKyAyjGy6tzK*Ol(F-6?k&*DUgW`hiwPuAAM;1kAzPk!EOHnF&pG1 zCJx8C?Lz_+hrf9s-hb-g1`8xT`iM_|_Qh~|0juR3B5lmc`3n=a@hNYAdVe^gZ=1zT zw)=aUn!bq#$A9@D1_R7hjA)HGqk4g5!4M>Ki6~9AxO4gR?Il%9P?Lx08AMw9ge(v> zyuPZ=o+=9rWn!r8SIm(vk`YqgG%)c-CKi@@fsZBHhh=Eu2!A~NoFHg(!VF1~W8j|# zmm_3+oHDo%eACNJ(o)OZ32;4{AgMjmCuiad@>^dKI0d_NJH7Qab^m2zCDs57Lz3fZbhJUg`g+>@{@ZWx)7#W)RX=*~H&<=6J@L1$ zR6HTbnl0^O!+(dvZ2B1Q=2pg5MdutuLb5zP@Ej!MrG-+yG|evlL+vq8tbu#^GUdVX z^UM)+0;eALL(8(FagOA0?;mHt((W{d3V|97{QhDWD)r4j;GTTOxuuzNh-$=it5-Dt zi7$Wnt+t=?rBD8HV^1D2auk2CoHs99dG%bF>!;7&ihplTOefJl)&M8R`7chynf`-0 zuXc^QaYlW)jvH&e>c*dr@mpQGME)nTxbEH^*rxgbMFKl?KTKxpW#53a&Amf7*B{v&!duq5wjTfC}K$vM$BLJaB z8-PoH(SLP4`6#E*@GFu(y7+%+;;oKnZ0ff+{@JtgSO3kD7ssBg{`MQWkDq?$#g7(< zTio!>McRp`LA`(4*m%*}-cljF9v`u$l%a^=tjd@&V7b6J8?F$Xr=TNp46VxaK7cx} z1kw{kVSrQ}h6{ZpcrZs`@9cE6b_1Szkj&>+I)B>ltg#0QleoSzx$6vaU??k3NYaee zdWvQ8StyMbwe`hFS^BuTU~Q=WvG1ntG}Z+Qd${M3bvmsn=MR3iGipp0yiVr=H-RtO z=pB{e$(wavt&CrapWe$KjP~!kYsG#!HXD6&zkfB)od4B0emNx*XLYM%d_vBb;=f>g zcz^E2iK{qkISoA^%>S;d&ClR}$p4%D1x6T$;|WEJWDe4vfi+`(Jfl#U96uAj#RwS- zQo{o^qc@J}ziKMU5X9l4PPew=+X+_o4D^;N;}b?B7Bf3F20!}b%HpqD*yJM$nWo0Z zfB1I8WvJm?WK>P8CYsu)(L?y!V1`Od4q^{e=2587ViT>kbgHv{Z7 zFO2mN*>C@K+duZwxn8mBZ@kv+tdNOI*-In&)ez zM=)97VrEMVv%DZs12=UaNdGot<%Y#cpXkV@3-!(e|1i}B#!^$N4KC(lrcAG1Owd}4 z=88|EP)u24Euf}Lf?O10tFu%Ip3BjWC~gzjA6otgq=Woor;dDVKP+eBTW{2-GoJ3q z##i24ml&%7Wo?X_mq^6~A*1aAf`5t83dIpFk?F^*56+-4@!P(CYKmNgK{GJ59>u_z}z9WYq-8g4GuE)rKsJ5R)0exAf?Ml zIkG|7?F_@cA0XhSfAqIL&~p3i$BPB{38Fe7km=1TE$Oqbe%g6gPaL*rahPav7z$o- z!Q@F2o&J$0bMbGk%*2w`*8a|^(5HRAG}b4d@TYcke(LG(f3ME|T1&M4zi~4u?-7W- ziMF;xQRd~$p6BHjmgpOYy?=`?pWtUL#wa#~W6V~XFAQq)LK(ATSK3HAu$KRf2?qQ6 z;31U|F)1F~P!sM;Yu3nS0TO+x!6#Oc_-Fm5Cls!4sosH?2{BRHd9G{UtZQM`)qnB~ ze#A>`z}OGW}Jio=|OX-m5+y5Ru1^*ZcqMfqxAUT-;0wGJ%S; ziCpQOPee$~b!kd)1bOFdIn0+7@)$v;W~}A*TOJr2JDWECoqn&issWA(>7)Y+pZjYY zW3?fJMQdB_XEa}fiXWYgIS+|H6`dK*9&}s762twsbtq71=A8?&+q~9mc_-BNB_gh# zeG79XezcuuA8gXRa(~t{X1#nAUKj)N$Z74a?NR2YmkPJJ>3`v{m@fm<&*wFX(cetH z`>*d4Pn@TO?di(Jl0N(*Za~ttE%j?{E#)^dFmoNC%Xddg+PMyF=2P3;>f2`3UOu?P zkm)Te5W>mk#T#G}xYKz$*t~eK)l^l8o}#^%8vo`Q(PoAuGJm#wn%M*2)FPlIfs^IG zzf}kp7p%Gi6F=qz{_e+ImtzziQ&-Pg9=YY`6VPp4ZDPS|Ex$SXq^-C9 zXeB?~nu(>N{(tTh5;imQ0(tx|8;GsBYWd=S@=4$QMbEsq`o-qSKkM-PrAHgwqJR4| zzI&ATW3zIz(m`6-NJ^Vnx`E>%McH^^zNbwnJ zT1~nNBg|(lr$k!ahcXyYkfMRp%_8v06$sbxAXHkmsl7oK0Wq;p6%Jg#_GZ=tYqR&l z16`QelPB)iNn7{Zsu~{jGApH=m|C9rEw5~JBSOu5rQsW8jE0gn;#d%*lNeOV$dg+_ zAWZu8E`Psue9`8Qf8td6oS}MRo9})7@wJz4zpy?3=^yy+>j0zk{KkPFKPH;bUNaia zKe)sPcSf?UyADi8oTzy1kqBcv_yle8uAEKtw5HPRdlcHLoGjGTML?(A@WqOnc^yxp z_--SZn)-=``b3@X&{m0ubRj1GZ+@CLCO`euUw>2Zb)WSMM zmJ9^>qwyNd_tJPxe+_*190Wkj40!z8bN0iDC6;L72CfxDoqQmdkjjjIIyzxk#}d@q zM}IJX3k_!hP5}}Sj#3Xvng3GLx9y*LQl z^uvGt=__8EdJt0BA}J{ZaXyy=g{88Q;&XFfhNIz&q`Nen^TcQ!K=m{jO>HyN3KB7+ zvFHha=}kJBL<0!esiPIYblws(f?-!?hCJj~*3L9@gR_@EJ>h08juQ7z>syOc73Z`% z?F_eT8-7btLN%vGFDAO}J3Vwd5$4i<2NHinX!=|uIhfYP_x&m&`02O%>0|gOKl6yY z{7;|$%jUm1hb3#`@4hAb&uslSFY)Xp1ji$2;ea-?D>|e)Hkuxzk~4&qxmSd?<-l+# zralX3Ry=UZLN8N^>noRnSkG!WIrV7i_%vj?@@Xx1wzeFW7U-P9vh$V9F^YGfq{n|~ z)#_FVxl5-`QYs6CByW%!%9%g7%HC*0Ywj>v%6XtuX1O#%6`$7JC5RR@_10^My=Px_ z(-})Aglh~gz`N;x6co}+AVomp}1hDANieFn$U#>9>9_%9Y zC82Bl)2*u(Uz(irdk=*_J(_vTB|CrmvHr~J|MbOt_uQgWs@BX$gr{l_p>Tg_CimW> zm|!F^=s|CZioFfRK+0$yTQalZyrqh#c1l-Vu@2S3W(V=UR4NR7iit04laP@db*PRo z?yj+2r_)crn3-cZK^)Q`lF$8Ib;gqS&3;NiZ9j8Z@C$v;s~En_ocG^7Gq&6psU)wt zg719L{8L9yj$R<_|F#C~fAW8iSAK>C-Ep5|Vf5ASeU=LOMIsJB|37@j*HWi8#mU~O z;~n)|ZNekI>|!Mzuq`4cR~}LZ;84Shhe*b)U940DO3dcV*tNsKNhmd27N8tO1!!^d zbV1C}{NXX$+ddCuJrMSy*-Pxsso{1DZW?PCv649i$z$<|l(geDksP^bWY%);=T$uQ z)h9}KGh3IaMezI1xzm40q}3Fe{&aNpTwA~96o2^Ew!h=ah3F(lzjz`4Xa9fl;ul-4 z%G^K5`lBO0yzpf=&A;Xw72CV1Ff}42X=MrBYFy0Aqpb^rn4{4N`H)XDNT?qlsETGp zCljJX1X3h+pexkQ{v@}lqNd+r#cHdMRgw!#x73euj0P)IC$4{ttEYygB+5R@L7dY& z0e;@Y|*w zbS$Nfs0~UToRojAr%)lHjh_iT2_e9bkpVTRN**86DPn zBydn1Oh3AiiJWLf9bYTEcISqCr7|(t$epGZorSUSImD$9k%bod1LmM9b#!5P&>6^4 z^f3cM>ZB9}U-OacfwsCtQBs3&Stt-jqtnKUUUUAPE@Xe)+!v{xgLky`-ItR6%#)~} znb+{yC;7Co)-Rtk+yBgG&zS#{*I#kn`6u_C^dPoM3=1M;HL>ezf?*I6U!X$AQ^$`? z4ahJs$5*w32?LKz$YLBifTk4!U(a44ZL#G=(5jax6KFi-We;l zgtI9G5HtB;R*!f)-%<DQAY(c zIWpba1PVk`J)$5GlF@7tL^F5{j#NivEQM+I@EvMqo;al|9z!ivyxe|XK8?)+OfUO-I?J7Lq-T9Vfi)%M5>M zYD-fO(oLvGbEi(YgXoE+<)X#vAbb(g0Ojh6gMv@f2qR$m0#y!4sRxJX7r&bOudsb* zEa|}DxM^F*mE?IJn^*k$d%vz<9Qu!VHCMdMu*1WpJL zzKkG%69NPX;Dpiq_faME={c>nv}R9Bw{C)+;MirmTy@n|4}R8F9BsCa{$s6b-L+0S{DJ#o;!1&&`9u;rjVIi^b|@*= z6xb5+&ZbmTJy;M%jnh`il_h^983{R}H5(aI595(cB8uZoYJ^N8N7rQyK|auxosm@C z##2yebAZ6VJ@<;xPQ{I@!Xs(!$_a8uq5tvT`4ILLRs~E^?&`$*!aY9qjCKt8(I?1z z^vzAt_URW^`YXoZ8594TgX&91`KvK$)(WDilS|vDF&RrS z(ka`T`mpI!Q03LDl7%l~gMfyV_=&r_25!ncLb&+7$Ebr5o?d*JH|K-}%D+VU2LTl0 z5u-Ijod@GOzjzgX?VEq|AF==4uUM+jKELkyBnl>S-xo%{4oDEc*`e5254pkx&XS%3 znfJmfiEeW;ns%MJQ$)n59J5MGRTmE<3>NbVznMf3@+iyEzTl0X0ISXz}iL`cr@BI&P8IRxN`NOW%#C3h013vLDpL#_5USS)*;AMZ`dIsOMXU0F-4J?$m zs2$^&r^72z^3~s6O>Z)djUW#x>CYgIrd%e>6b)X@q$%Mn7n7%N&on4E;F#!!C|wpr zxOVvj?18RFiJvmhAe_=VvGqDX5}9WZ#9!kM8Cy_2Scz^B&O)$27E5CI2!JCEZUAv= zt&b1wQZ4~&LF9kX;i5l{6sKglo_ckXOjv$2WKxm<-f8GKjeHbb%pX(p2|r zz?}3K<5%1-`u6cm;ME53-+larasP#jZDRC@iYGttSJNLhjn;NGndGA=X8KeP9HL%1 zibp)#Iw`Y8$0Tl=g!<+8G;=<5e%H|>ugsJ3Oulgp{MMRR^XkdVX zxSdW!-T=>0jXnEF6no0&7mv~WFLjhh-2Jvi{nPs82SjoYIo4|PGEKN?MnlBki zBvHdH6+(YGLS|fw=b9uq36jOirEpoxEbU> z*BqwznsprC)5G^H@Vv+bzUR>RJa2#V*0lJ87AN(+j?}+&(e|CbFn6CkKVR{LpKH*L zOT*<#6V`;!spxZ_MGF@4gQ+k0Jm_<)QmAL5zlwhlk;h)fVxt<4^t#y7iMMs9{5nqi zb4wT|C)}V?yD-FYfV3zSf@j(xB++FI{kc^qQx=M54W~ir={hV7IUJ+^($GM;kN(vO z$`%jP=s`vojvRt!KT^={M-86-$;0!7!LW2Grr&m~=l=3}KKXr?`(by8TaLP2!v<2ITaCIC}GlmfSAu_4_D>Ls`IUt$%h?!#0|AJ9zk&5u7?u_ zIoG9KGoko|p^YBb3T5R%tZdW?L_Fbu7}4b2Q6)nT4l~FrGhmtny~(cOdK6>CFu;^h zQz3WH=RVDcu*zE;ixw|_;W6;Neyy=z@7jNi`5*S-u8;WT9deZN7N_~mA^Q4re{T5= zalSBrv>W=2M0350CT4(KW-LU^Fd^Q_s=c!-S%3j1UZ|xkDs+9PXsDRZkWX1@0ZAF2 zgDrCc!pM@jE<8f((i!h$?oOijai}Jiu8O7taIv$+z`OKlwbW!0}U@OB~+K{2y#z&2Fo z0}ATw2eAc#@*(Dt9hPf*Pr3o(Q*wVc;L$W1*Tv6%QKsN`JBY7Qm>(>rsBV#|Me>8C zv~NB9BFRmWr13`qZ}o6J{$>Ao?&AmkQxE$)8TzsZ{Ir`SpV+6T?kB!&!e6%Zrhn+9 z>7OVuGq20iSnNKqE30d#BR&)1KB117;ek?~zH=8Sh5s-}U1n9EQx`ls;mI zfj1|CbtGxICV7u~qCeA#w&(QvVOP&S7VwUKC_q1yF+}^Q!CF4p@4cnbi$8G&xS#*k z9+mt)|NOMGe9ps94$0P@eEfgSH_`Bxe&Ylsn>bmLnNp*Kuz*4|p6u|f;hPpn$EGs( zGPYtQ$%&K>6QyNVT=%7?yHIei>iZ;R=xYjTFuYjR@Wqni=+o%I0~Ht7V z+v5g;dsryFfH zgC6P;8tM^7jQypc<*|I)u^Rp1*+20B()c^xzU7Gd$w~0rX4BYezU|7*dAvQhwed0X zYfWj^kvc866y(fIb3p|rDu~fO>@tWgruvuFuFNTvp`D2tpX7fIsCZ0eVz5AawF1qf zq8yu*2y`(-f;UDhFiCx;YdTDgRPITGPaDQi0XbD3C4rVSJnJO!^vo--fi@0 z2<5v3`IH%AqSoV76QG2T&TTyd4@7M0(W#Tj+^3Z%&<;W&vVGil#|db_2M}?v8@miN z`iPDDuKwicX=8tl>*hT7xko&4c|SNa36)|@(x|pnf5ZYAGm<>0DqRR-X#`Dhx~W$z zqC(haU5+#uI;8R{3Z71goV!BVRL0E6z-NM6Xd{M9wtih9s3Um^Ck?1XC~?-s`Tfhj zS+yMFM$L~;E|n*b7GqATok^Il4f4nw8A%fgW{~m<`+R@l)N1Xd#nKImoJhVfg5d#x zlM%+FNKPLpc|_rPH)a@bv3u%kKhKNpe4Rb_msStiC8VU{MuW0LQW!GDw~mpVU8P1& zRW7J-Jv)R6+leuReM_&St-X(wQ3+|vQndQwqcajllNZGPjWs*r@THNQCFYfW>M{45o8tfE(Vk{2dwgrFhxyStQ7k%O$0%11m<<}b8?D-vN zxF3@roH>0Rba8`Lrl~RUUVY@vnGEM+p#>Ow+ThJpG#Y0t+}Uz%fkYP(`H&v7khfnJ z7D4Pt0HIr}oTL#$TPn}^ejJF2Au)aj^vO{0%JmPo|q6Kr)*9U`+A&cL*8OmjRw;wN=B%*`4G%S zOcIlxPULC{O}s#?8&su^>$NF=b4)yAw?|uRe_G0jBj-1PD5T(7AnboD#+xGcxO3|hh!c#nVVxn<9pPSq4naut z_n%)w&Fgq!5@4K-{~3$*>AQH^VH#U;8fCw@Vw-h6X>mzUKMvx6xcrLgex40lUOkXc zF8SS0UM-ZxZ7_}(n~-VT)cQRt7!weiv;reU&f-Tz=sCTOA~G7uIJaekz0w0=c`kn| zm=1Oj?D{zeMfCh4L|5JiJc=nr#Cy_p1E7fgg2}>O&0p8!Z0JFV_{(5Sq{pe;CmC<4*>3X-(c5pA$kQy?MP2GQ49#v%2 zDg~VPV08xfp$0ieez801(TlXfjm&>rpLXjTX|e1-{n_zPztOj^v5M!Ge|(Qkt7rD9uh^#^ zkA8jaX^$gHs+CcxfaGyaR6>m=Zlg#=R1RJ67|pnkXCN|R*O+2j1nzJ)n1hTwQ)nTm zqBo{mhiArW>Z+|pAPht9>0$mp4n5Y9>P44D?MNZ5U>d9gZoWR$W)y!Q@4*M7DPkzR z$b7^PBKl`tDV?FuC)`^sjmLDtu8F}2Nu|(%dn6M#C)6^l@^l%nsIpMW-y<7BKd#8LEhRe9#1a5>gO0Iy**81Go4drS#pVWZJ}By!W2W@dtR$#Qlu;XR3HOrn42oDh+_X0{#)UE^PN z9`xU@au_xCui@mG&*H#;Vr)J+HQIb1Q}WF<`0rpE;;w(yELwTt@AHh*F#f@&B?VzD z0B!KqU*i`*84DsY2>rj}ljl&}I~PJqsw63a3$%w1R*&-9=#N3%+NQEEo~kC+rODCp z3&YIkKlr=?_nUkrlS*0Q_f=#Fm4GJPO zv)o{^)b-_y0_AN$h>)p9X9$cP-`Sw84PYJtwq-EvjRk)zEUD?FQ)G6!s^_INPnm6wI@uG*jXR_7l^go&n@J5ltLdUHikwQ${iAzPJDbU zh0Ys(LMj=1#s?TZ$qgdCiJO*Vqw0$%pP9aSXhVz{u^%uZy?$niC$$?(h>nYn4sz86{llj6N;@JxI&nE`u2j_jV`{r=^ zvUh&kFMn_~HP@3r;O)NF3i}Va3)=XERX)mNhwbCO83NU{Vhh4Et*JUXdSZhXY3mxO zPUBpc74wjnHf{V(CVMPgO5!Qm4Rh4#j9q%SNO^y>gW<}U@X*lXwJ8*ZKuQY=!`}5L zdfN47vZRLbrdxHFuZi;UoEu@xXsR6>%w7~rQ+cjid)JZj{6G{lkV4mq*TdW$dXzAd z#-S^W#IA?R>0ReGQyw=K<|Kua87M*?(U~{=mi=?XtF`S&#QO#6%g-{Sz<&4v+nA|8 z<6wV&+fR(gk2&$p{r+5k{D7px*sCLhh4Su+5_4<>lO5Qs?$22$<}64OB8KSa6VMYn zh}<_*ZDWWh{-yEL^))*zkUvK`scSlip$0p{po-5~AeX%IgjGev2=Y4>Ch}4QEM+U} z5q;ub2m$Ux3iJk;s%`l&q2aqBEv6UG>`8ws2W_r^;}`C=^}SF=N&{`WcIJKP8Q2tJ z6XcM9a#kP`1NJfI=Q!iJ1}1QIRk{}6b6c!%FK$z|WJIT8^a*z3g(3i*HfOY}z?jfd#@)XcK3=DtX zfJ@j3IuXyjFtW)DGiiC}zT)oNbKo85aUYB@HboCih>I{N^mRCKT^G)q0e!R!z3Umq zR42ABb9w-dd9TnLU&AV*H-Vv)zwCe9-+Yqt8TU}fga^ZriP8{ha~@?2%x71Wp+%UT z1I8L4--ZK3#EqOVjHTUa^!kkJAhI2o&vog^fAfPs?u+Bvgji@}S?ILlx=SN95kp2~ zF@cnvLHd?oV{A`h7&1AyxhScM(2hD>G887I`0d;n+oWj}-eAM9t4HL^4DghVdA*r?I2 zSeS&wQV$p<72?Htp$nWR3HcB<@(_aoh!V`YRvOs={40K{>8tPjp}m$f#QXa)X_{OJ zHqr}1bgOdps>lnM(mZBO3F*kIXk=Fo^^d?Hbk4^0XSbA_0IzMy}WPdz|MyY*4d zCm8=l-Dqi`-M5kRb}e9-|4zjzZ*vg=#;!E6KoTvxyh{Q$CBm^lxi97Ty=u7}tPGTG zdpnPGPKwmhwL!(KGi5G$#&W{NV`SVLcREATLCy#8e)H$hyyaik*n?Nnd**@o$#?r} zFU2|VOGhE`x2`D@t<8Ti>9w)l&G`0>vErZpf6U?Zc;a{QdPKw!`K=F-D$f#c@iHiO z#{<8+U9A)H>1Og$@xb_4OhWK{olT6X?aiq~j5}S<@+OwpXyT4Ny^RU>Fc$%Z!~WF7 zpH|5jW7b;E7_RH8Z$k-GkGn<4PRDZy;)@r>PlILelpXcF3c7!qAbFLPk9q>KaB(^i znd?HT8Ri^n?fb~R^0#HUs=cA@I~A@6Mw7Q|_Q_w?a7BH`dpEJ8Eag9JGkp5=ceHzK zDn^j;?_6GAal&5R=HrNdo;hb+mTBAu&X3L`QaM- zb;B91*pD;Ya}UR7SK?u`xg3Aw7JA|cfA~j-Dk13+2m}ganJr2{CsJ61=*~?hoIzwF zIR?o11anS^U`I&32;&YR4h`f|gVYO9r(qSwJA0BeeK|8yhz!!{k4QB{h?I^%vj!W3 zT!F~ts62n?=3yEc8T(8XLwtic-JOxWW$~1)qlk! zG!|dHSWo`qSbp+^!Xzi7-{m?HnBTeo7u>4FH{VdcHX%mpkN!W@|K`d1$z$B`ilWZ? zgM+$rLvDw^=UF9!B@jr`6F*uxR! zc_i{#_Z|}MZqa0mzF7v`WGO!p%-7x-3_ADHA;3zQFfx@4`Brf~GRd{TT+uqKcwn&oo`~}53B`2T#N!;S2`|R=$<}{^ z_+~b|4-ID&=KlKR{>8X01HvE>8MEvxrfh?u5oWRqQ2} ziB14*IA!YSFNe-m=}!)5ACzpl9>9Ymp4qMvc~?%DZ(gF^{Iz;p&#naHE75QKgL9?# z#iRI{gZgvrZ*eR0A2yj*W}jt$>d${a;)H+tBQsC?Lk@)ASn(cti0o%RW4rN_aQbNr zgtfPH!ldQ61TLIjyqpJ7fa~{@Qx(@9LH`jpW$$^6xeL>wPSiTFEA{Yzx!w-QL@?9X$EgLjqD_CC2lCZ7 z93aoUoeXtnHUU8>Iq7EApBbUt1w`2({V7O^dkfPsSCMM?H-E~6?xcJHLi(dR4vmzV z;VWXH)Mbn7OW=DWJue-33Ei-8I%S`bbZR#FDH_ML*T;2d+u#V*xWO|_MA&k?{)s2& zPdu;i_``bh;IpssZe(Ie|I>ft9=2G+ADnZqUwI)=rrPHb*=x86%~AbNV;!6u-q9l~ z%`ozeP;9Bf9QJa-Dj2ca=MwtJFFkY9LT@?WT7BL)$n~=NG0&yu;CX{8uCri$rYc`B zh;))AiU!ivb8ac%NzG4T<^Yo zjEDvL8JGMsX5)vx)#fDlW1QeGJ72^1`NMbf<%=GLqzTveK6n*xiMW#u#2i$$-UN8= z+uMu|UrZuHo|T*ik%NC3f8xeGFnF-XP+-Rx0(l1dvDRcC8^FC$Zp*=K}MGw>%euJYX!DuGks?@r9oA`TQmqS@6`ELU!;Uix-fi zRv(dqL8t%G*?m15ZJjz5h(k`s6ywaih-{Q`flC1<@{g&OUzdIHv7zn!>E~$H*603t z&$Te~wp8RCgzSIt3b{drk2YIZ8iQau-8gA%>7wA|VK^@8oK-%x!amxwrZAUeBb?qd zZ+$>oyyQr005V<02ji;5C;B@=V> z<pQ&MV|Sc-q~*Z1&&t{YTETE?#@}f2I}SVA#Sb@zMRro;@SLsjDc&A2T?g7_iL1<{P#8u^t-n$N6E z?M#`^X!pdefT4_GK9PP{;U3J*mN!~QeEbFDCzk@?7cTx8%cd*-jrAtHi)Y!Vj?sZ# zjB8>W&z61K3>w>)X-s(UN9;JplQjGXzjEc`dYOOzv;#jeHqzYK5AL{1)4hOm@$eAO zVCccnGhQ)8V;-_p&K$^HnsUrp_{Ep8Q_Qo3aajt_(xKGXM^E56A7nBJ9%~9JzcI-; ztyHB;&(s6KboA(};%*0!Y$7o>9wpWH+)w-5Gb`|PX;~0Nl%=>K#lB4&9)pHEgR0z0 z6wiN}WbY7BJM^hudz=bxA>W#zhL7!z&2(9Oc&cQ&%r8O3+Bia-UW#s@Ay|h-4gvEk z^2X_~(WQdVKPDjr!hvBtzmt$qSube6{6L=0ZmD;rSP-VzN`wY+a`qvngskxZyK>1R zx6V?IDObEB@*|yT_E8oKT4IRbJ<#>sJAr@pp|8bgDu#3YH6C{f4cTZIWC35YLmmzS z|6x!sI#p3(EVLOROJ%%@7)ZKCrS?|sDl-s^U2rVSUAa`GT$yiP!bnZ5QL&DK*MpS} zH(K>Ew%$ORH68I$l!l>MW9})Lc^2>YbD%-TcKi7~o1E;R+)uMuvoG3lPPesV=7oO> z58$NHd6k`Wr|h4Hv|Kn3?Xs+IhBHVmADJ4uwkox(yL8v=wyTHI?H7tobl;0vO& z6>h_c2 zKd%8M9xUm-hbVd9rFC8Cr%B-@OcqEA#Z>9eF%Hu zEZ`WDJxPqVe;o~U^y?9) z9Us>NrqK?{Q>oyHvm0{u4?53S~@d z;l^-Pdagz>yp{S%Ft;3@)9r^HO%+ z=bgj&#p?8}&ZhHm?mXNMyZ7&%rS(yadr@aG9=_fmtB!vhb~}$*$Da(f&LixbN4dC> z{PorQ-h%#D{kZ@xr3#E`987b`87`i0K{x<2>Yn0K7psz{i`a~1wx7l&@O`|>Id*H@ z^LBAsrhEl3TLFKF+Gk~B5A>zTdJ4iCcjJS*0_svP! z7e)={(=v>7F#syML7%(ear~_L>B0pTSy_!|ThSx5q{iN&rh?qpOWB`7#1rlU6}3;x z)FVv8HjES)BwkDpwINw^pSuF)x}ZA)n2N64@z6_4ve17;{aH6(aZ||BBccRxm|b`Q zqN@ip-zZ^W%j4aHWs*yorzJmU2n#)o!Uue@^Ff;O! zrea3T>hpOVnfdWPy}eK2eb~>%DdjFWk&)tljLb~}q0&w5rmTH)|0e92eFkKQ-^T-KfN3vgvml+VfH>X#f)4;9 zBi&=W7cR`OFK1y-kx9jKx}a1M%@;ElEUr0G*x=77L#n&Y^NnK@mQ z;O%+OV4rQ!!+(BeNdtTiG|&0!ql1Dv+Qm`8@ZH@NjGFmReOfBb1Mm%Ohgo~+EG$xY z1!LF6bP_YQFb(FeK)B8CgDl}g4=*S)QkH)(+==}`G3Gms93sk{w(`S5B}+oG#RW)Q z(>?*R%Q8PLiJ`}oIx*G_eah>eVp-)*jjSSOz-D2n^jWmq(4Jy-M?Xyn1$M@EK`{Ca=v z0<$B>$ujEjo~aluQa%6@hu_yIzc{94`wVK&+V5v87z=HK?{CE}&_Bx#{?;4LI|e3~ z&6v8Fg?8V_Ax4t{@pd@2VgN9{pby$;<(EVyV0k``W-f$W<1$#m2r{VbhGSx6*k94bUBTRS88O20S`t~70y*S+$_(oY zs-}GDx{S5uWm$@-DYs}{k+dmayDqcZ@_tzg*_PLJMT4gN;!^K>{0Csh>>ACeC*2E> zc~GVt=Pgwwuc>0@b6%2#47)+hYq*tQRLvR__LLi{9mgz)2kg_4vJb?a8ghTf>AQkC zU-8gbxGRt@P^H(m3cxm|My6QiQ_f~e&#*tVT?w{CD?IkiHIPU+8340scA%93@>25h zV%9c3D|KDW?pt0<%F9wPiSNhjx`GVw{c_i3dRrcqr4UWIoz)cyoARUUGP5nm{!N;4 zC#)+#*b|RVGGp;odYOdb{@miCwrk8M~G`YC^+HNr$+8 zRn7WU!#~Rz_0tjcn!TK}1TDKG(Hr5M%(#nlIa9(Ui)O5Pu!|>h)r^nMf5;#kk$g%^ zV6#$Z=Tbz(h^Db;U(jk=g0*ep`Irf*e^l3)ATWX*3Wn}13DG`WHT%*V#e)-7bHQb(K zFJl+2hReU~!*3ZRfa|`SFHz3TlC1Ta0jp@gkGWng+zUfvz{On=^5=Sd6&Zi3NB`-0 zxK9MQA4f}NXnq`}S23GPIX3cJIRRqe`pUer)QiiX>XLtJ_~*K!EcNn|Wgy4xe8Il? zci#tP&AouaH9mNbeLMf+=eV!a64Xv)PdQnEE9!EGYl@qB{qNt8%Tg4mkNkMXe{Bv2 zgMXMSI6rFYWCzs413fmyu7s%`&l(+)i5^=u*9Pj3e?5QVMm87C{xYUs->jMBEZzkJ zd^WD5#^Qhb{gSd2@b1*_l?N=s{t8it)5{AF8(CBY_v}a3mSfxNcjYzr<~U!boNZdY z74_^Oidg-w{H(Z^toV1zQI7#)kdg9hyjEqY=Vy0C{AS(Eqg}I37sW1~WMA!@WVNEG zKQImDaqPMrfW`P(G@qm9bKtt1JiiZ{&+Ye!P)U z==j?+T0fdIMqOS@BQHyZaC;ByEnT;vUu^5uSPw{BPn9LwiK3?7x~T_7Q*WNtnCs

@?dQ0u-+oT89RRGE$3^pA zf-rw<$1=or00d3F0^f@~>s{LSH=25@_Wem)Pix;lYUpxH*{;n@j=liO^4xjZWU-f&<^TYCc?XViz{%{5a z@SaU0*V?6I4C}wO^>0dKShv9Ef7iwKPriTZ4x6@o)z$F1FF)tZ7WH_&e<>O2lBCl- zJG&`?nR z_6g<9v%l)*`0TH`8Mb-yRo8W0+>!9ynJJRqYV`94Q@P`K#+&Y`GXTC=fo>b5|3iOw zyaL*)yfmD-n0ohZQNq`rhb)VhxOfF#Iu`~+bGoLInA^wsytYL=1=(KH$QcmT7%C}1 zc`gOGxSET`%*uh*=QqR|0ShY|Odd@$C3qU&1jc^_tPCZ|3?sgQgv`jEb%TXDU`L44 zVCEKF0uaI|kGny89}qU1?iaJ^p=*B$84%zj=ORg`KU0MFDv*SgYMK*CSnwY-=b7lI zA`(n!vRTrG!@IOP?~)^TFibI+g+^ct=hErK^eWBwPN(5dY`BH5_s$tEF}dA4UF54V zzU_}TSR6nB;&U6*a`WxywSWkmALSA}aO3Z5MJRaRV3iAN}_}xDKZiw|w1}g~?7{lL5 z5%01Pm+9KjNnz^Ks_@p)??cQ9r9yw85KV(gzA4z7PI;eqi6|F!XL z#*N|ow(UyVb{QVN-wD58NwUNLU6fdsx7W1aK3`X`R^%0|1YAk3_`YidTbKp*Qy=Sn z_V-v~oX!7wx%i8AU9cZx>__v@^<9bW;jmEfg8k_NIC$T(89#~r6F&Q=8NW4-qlxYP zAy*X8rIv!JwQ}`OawLDzyHcRs6!`v$!upL|KF1m7*AV4E!*;|?KSlWc3Fje)FZpZc zU(>F(JzczBK-)-#;#?OZ=!MNv<#;+ISg^hAR8yBXAQ+kYrOdwvu!%9<3 z@nR_4uW}s96~2GEC18U$V^k+lfpK;R7AgoY<^EYNib*xwX?JP_DV2{yt8R^WU?|18S)7#kG{5~4g!flQHiY^t!4xb=_~ zHg=Bdu;8$hR0qEg2#mn|fCbPLQ@xi!D-a&~EmQna#Mys?U&1TsYw&ZxyyZxCgPc1c zN}>bvLG+2PuyG7ZNgvR-9`Pk3!sNQUI^@b@Op({=Z8?G3dnDF+_IC1gTOWyMdG3?X za-ne}=o#15j6LglZ$3<;r8(7NvGC$qH(QvD&di)fW^P2i{SoU<=j5@RRri(j#nR06 z$nsCmXFh+#ByXR!V=8FlE(rQpb@f00=l}TMw#DVJy#Dw9cD>v-*ZQ>iZ~x=}`9HVC z@%n#km&xY;ettS+`}$wba}N&B1aU_!*mo=hgeT)44k$i{Dp^iSPMFKtyM?59#tM{O~f8r;VLO_TF-x zAl-j?$3Zs8i{Y*q%IaYRL#;arm6mCf<@I&2%3kiyc{w7Fm$#egY?eoBQtn^o!zfPq z)F#oCIku`Ny!R;iJx|}O?e1e}@@zHPm3ePi^t4X#Fc&Z9kBSe@Z(e6$Y&&ne^peE| zkB#N-QCtrFdlZ+W;p(1^k6>Pl*I^h9HvNCue$@Hsu9M5L9|npu;SwQ5cw&0Sfam#0 zH!gfJJsbQSIj5-d*VFECHtnnzo%LeY@s{1&vR+>oFWr|@e=cu}kJtOF+r67<#y^HE z?L~E3#?JBHId-n)%iEy;8h8E9%l%=1)HGdBFDH>74!#lF>q+N+^!`@B!tLD&ECrr{Am1&>PmeJI_1+V1? zdwqGozr5aWWz{i^`}S`2iluvE_C4s0K8D5RlAkSxaki_SL~Xu~pmRSTrk74R?<6;0 zNiU1%hwIE+h?|5D?Xfz4TqkNjKQ4cQ-MXAlEIvHga8Pf`r70vH9fq&VZt_^E`FlK^ zeY|w`m!7UOySMN?O?9*3ecsjVK-*2Fug!;*-?5#SB0F81*d@vAiq?8q9xdevH!eV{?4pSK1?=pOX7(4E(m3#X7i@q(;AT--CUU zdqcLr6#7TSZQ(t_(?ZIOo#mU8?DnC%x0o|Me4O`(58E)Ofew8U$Eep}rZ#nh{!_!qOB(E0b+kE`-zH|4q>=?U`LA`J` z%(g4FEBBpVTpcGbhw0$uV|!a0i=YR2UW|v!cO&}Ph*^Kd@=90w$lZMq@}iG*X{yX0 zzF*CRKfH1J(x=U#Bp+{*VzsGOb9S#^{m#i43_A`Dm&z*KRQV;Uti~wnKfE}_>0?My|G5;m(_oZ45hiBmGwT{pUaoX zj^kZ98he&VZb`YbCRNwpnQWO$)SdqMxKaF+ zXsb>2NVvJc=4WEOV@7$NzMcc8Z(WShKIN;&{hXxc)KR|X+Jn$OWEV}%z1NfeR>slH zz0~vVu>Zm*qwV&6)6v7rVq^EUC4aBquCr8sknBAnvv6dbhc{5#N$%W%6_MNi9L~z~ z8Df1O^YU^?t%Dbro26GkZ$GztX*Rt?)5U{N%tO_GWiPyMkE}EtT{npyboo0uIWy2r z84eDkp6SkxygbbgrOR~5PMdUco6*YD_q(Ozy}X>gjdfU@LWtkqD#w82wTwo+`Fpu| zAF}u1$E4aXX;zdao6qk(q5JXJ)2AdZRKL=e4IQ?-8Z<*nWKpQknL%RrOBuTe(xcX|20fez|X;?7wa-dcE88J(KfQ zSHJG5AIAM+7`Wy&XV+1&x-Pe2d|4HL1)Rg<{Xk)EpBy8!b5_j)XFDuDR-3D|gMg z=Vl0YFRQc3bySz(Bz>zsxV>LgX=N+B=-R0ksuS2cdSr{!#a{6cbT<&LW@<`S6Mwbw}&)AaRFEyS!ldsmy&-0Lhh zmist$Pam87((6@xGgm9?bz}97%iG8k?7o7GQ~3EPAA zahqu8vtilZ{XzU72))N^zR$<+bFJQubapb6^`?7q%dPwHHs#3vh$6Lr-n-{rST7d? zExJw2y}pufaoTx}sxBXnPUqWXUg~CW*;J*WyfQxxtYv>4?)2AH2g-M|@OW<2)9!+4 zCpy{{UOad`kEJl#)KC3B(FnCB#H`Ddn>RI*{N=4X3_Y3OrW3wQtKX%@{ z@o87lt(l(J^U02h?xvCkOO)QD9QOAymq+*irRXe@EChlmcpw(|2bQ?Iv%}q;(@)G5 zdhsIqRaNFNj3;V8XNX2(h9Z1KTRTmK!e%x^L6)wWpe2dZUqowvwXm9?O-V0?Q#us^ z1pYNWKs(&l-NNs^ArK2JJmC(Ho%CWGY_y)b>5FXq7+g1mk7`n3jf;1NH(T&u+q-p1 z1F)lW4eL~m9!t80B z1koM_tZ8>PuPKFpVhUvj_pwZXtu2Ly#R%1z`1lZ}!OOcEx6Vr5*&^@i&*!UN_?V-O zQ<#%7)8Z?Xc3sF=5p;8!LqxzHI|@=|y&@sby@3`pGAfk*c{{M|k0}6J#}D|1VCI@u zdG3yr>%$IctH9y1IPvtdow0DW6+tv@OJ-BRVeUFTw<(2TmC&t@Vhng)M7zCj z@{5d`;^XUp$JNNaC1Uy%mm|n#8CP<_;Hu$f@+k*7vpJNWU6M|0Q&{I(Oo7a~o73&< zYanBE1p&A${p>q=*kV-U?R2mNP|x-vboG_L%`vkGJXcgj>m!Nli8Kh$D z5eV3S?6&eb@yE!AVu^y%Mr`ccxOs%X)1sh{#zg`O`S`R&J6Y&BT}_In4jXW*f%BQt>@OG)o2PMWejyZw7`}<-^TCj$EioAR6 z2KvGQJ`mG~;{n6z&C|MPq@a`!vrlKR1u>z2GSK4Srt$fe_t($Xa#3sK9IHU2g57T< z!yO;lKT81@KK_?>t*T37ustFWtZlk!-Zu}6(3&%oE+@u+j46Y%z0hARkvam&$%S!r za+zFYmHBU5yMr`0183zTmct*{&kM}jw)D9kHAAs?^uVqGIR_6wsF>FREeH=Px+&>@ z#bLQ>JJ+^k%l4CQI*ULrIeuf1;=2%k0xK0bd*92`Ge}Y?n6Goid!X-N%VQ?61w1;! zG^R58lgHf!h21(g!E2XEhXxaWS#$6=HcS1`o@fxwA?G>O<5hUmxn|v|u=u`6ufN(o z>;fs#3Rnm%6srut_Ml)3W;gWfCxVTCQih2nRP&LxG5u?x?^D?kZ}>4^-3|L(IQJ7_ z=R9o_lvbJe>phqg5mu);SbzOYaIkWLBuqMO)c_&QdQnuEKbg8~RbSNIhxaDX(Ui3O zUx-ik`&}^!!#u;p8`Db~a>-)83(&Uy&U&O@gCN+!!i&fevYd|Ne$-KZq2q#oI2qL8 z-*t_kdKB6EOtZ*WKXnx8N1tC4h9YY_odQu~&J;XhadbHIAEj?zk+2m7=jzFuphlqg zFJq^uHAu!>J)Msy>+P*;<%-`7%Mjv}$^m-2?RWyoe}*LgxHwkgb({UiUYgx#Qt>l> zHDgEJqh4K+GU}0uVZx-_*Mu;FNY3owVZG~nYS4q?m}m%5t@o51P(MV>Bt5f%KMjpyL4x_NxS%rO{?pN2$oJi)1!_3!?~yxHixDo#z@Lg0yJHlPkec{>MX z-2|&_C)lX$mbq|$q}2ye3Uda}rn-jTp0u*F##dtFIbNEzT7cH34*8A{_CA#wN-^MW zZPc>jsX%qXu^EizGBNc%!g`MXwc3E-s7o(Ctxef7_@NPnPJYo9vkn~Ryf-f%B#~xI zYyL2)ihQWVZL~;Y5D`UYt3XSA&#+4X#d-+T;j`U15x}E=dE^m53`FhRd-`IxdE9ui z77%1ng5qBBy`^ZA-=p2*C97$ZR7=w==KFw`Ceiq`luz_k1`8ohL&Nc1vpz$ zG?j%b693qL+8P7F1*uuT#@=Ed=%D)^n3uOFRlT9K2hbJFEQWgtH`?a^w8FZM;JcHC z*wXgEGwb?^P#zYIa3KCK?WT4D6`czLOYV1N(HOOG)lo|Ez^8hy%20gVd?h8WEie6? ztyo36cDzBF5y_gj=mZ@QJxZeSeES*q8EYXjjiro#Kt*_~pW4os$3}BJS^S3kD>`U- zz20Iy4337MOg>1`fwfXO`QwV)h+8XP+VyLHAHMvB;;P*&)I+3SOBm9p87qhgtg4Uo zCQXp_edeWrkvdNh!!9ROQ^6`Ssj?h)pH`D1tg;&XuDkp==K04#9i39%b+5JA`57z> z<_yq(r%}eH6mnaddF0{VlfgG`ynjDiSi-=t6yESLm0{gmuSK_>h4^+R>tUABK7KwY zBYSgp)B%8g`$biJq-D(d2;6rLdy*S7Q3ywdN z^R3*5SyX?0Spthi_I_Jt-;?X2gs^bw2AR8mAF%Z=ozSf#1a!V#&ay}0&Gv8|fi#a= z55d~`it-1R#w$F2fi5v~N+$g8M>3Z3)~O>=&FQni`OpApK~~{a&C5JT#a2cr)=U36 zXjgQlw5r6NWATT#yk>muTOFs?6N`QnU#3^P;>7%gZn=K-ai#MykHRh-Fb^aXii{V3 zD)-?qhanJR`%n$@@6cP#tVSBm7+?j{W3%Xb_F(fpl`*c-R+jHGTNmh&@k=s4g5TlnRxG9m`Q)>MF zIL)r4y1$T`Wmt5-jn=JEoiPpme4Bka8`vT+wHe5DTdq*wNboC=wXSl?~A-OW52F~NH9wfb5-W@h?-{IlcfO+muJKb z@$^>KOIr5J=(FXAqO)`M$#>L$qAlHi{g|n}bvZ7$dz#zjxG!(GVK*5%Ss$TWW8v*u z!jI`ZC}KvBb&~&1^X-Zm;3-+qm^LOZqVCDc7%0Pa^d9l<*22#CY1^Ln9oPea<&ojv zw*t4??dxwO5w*ky9m$gEV9*>2s$;Jy5DX%vON^~e70mlkPls;-8#mH_=Testo6!ay zE2)Mz{@^Z63(J6!;X7D{TBU3oao7LkBe?`#MO35y4Lch-9ZcS6z(JF!dhlZIWmF^M zmc!GHI@)XE_%t{>)|wWL`wDv#tzUtp)>Dt(5h=wV)k!-^+HTCuguj7|s<*{k6SG&j z@Uy)dF#}9im>)P2E!G;wuU_as738DUW!>O^^uKpXbu8=KGjlQwYc*&K7)-39y7 z$&~oC1`+DCz!AkBWP{zHLDytcd(DhSq>h%Jf{$DJ4WYXy2?JE$?m;EI@jE=?K^Do1 z(tVg7nN~brcQ}Q)7dNs|O^Egvi{Hf?hFLxaX;s)gcKo2ALf!*^B#5R=&KFAb5ncBX zjLH)C>}0}%;Ab^^y%4C6()nS5PR>Qy|Jb-zA;^6^k4Rj5>`H5WYl@0h z+7lE*WZ}H4T_@y!l3*;^-%IC?UK;`*RB#lmlU5Ra5vt9lI$JX|b!5q``2l`2lL(sx zYsgAa;or2(b%;|HAN%R*8~>03SNy;?1J%(+-s&k589sy;TzU+C&-|c|NI0H#ob_7i zW>@1i?wZtqec9v5^^r`2yg<*wb11DTBAWUm?CV!^dCG==Wsk0+J7mc+pPjAQ z_~$a@mkxEfogcpk07&q21KZoAd(FreSh)v59rh7#*D==c_?prelW&%{=yq?Fr+}Iz zu9Agj!`1JMp|JaLLgK%9kIiyGMX}x%( zj`F35S0O;%*4;S;?nePt0{|p?J2OB|9y%hoA_edQFOgXmp$6gq^()S;&|LH=Gexay zE6No_Un>@+LeAyZDmS%IgU_9IjP8F6K23etl1beoSgn*}zx}L%P`Pn-lDwQZ3o~Xk z>{wZU;bQsek=BX!g(S-`Dv{r5<<@zIREN5sG>ItO`$r$}GjnPzN0%v&IqCKgqv>Pz z6KOx0cj7<9PM1J@RZ}5Zw?#W?bD%-nVp(G|8V9jpIh*ZMQXNd z*V;}YrHg)qA1`7}D_vO!rBP$Lxjr2IQt%qk1{*&(T~G|M+fF(wDr&uIuwht7d-_X% zmBMGYlrWFKbrjE5qNB+_#ga)s_1RUK&IOEYNq>fB18-h&meHhM7hK<1!<=BJ9^Cv> zNwd6S7`xO~K*|`zUPh>=rPfi^XuwzYQc_5W+G$g{M}aQGabYzn{!B2+l~8pLp^1mf zK8cRo=eKrG*=%_A$b)cx7-zPo1%uLmI|HzJ4`Cx(C&N-07V_<=MPa(dieuOMO$2*} zql~fk0dfL6@wiZ#uu_&v58(i&GwbVyH%8GN=9$Me@|Ycxw7I;A<+kiADupw&1p#@Y z%5ABQ6NHl}x{t;+>OwYeKdMSyNpFFj1Cy#g)kiC({`mj=?LC=`OAQ&B*8)g?ABC~V z>}E!H0WG2Y4u+A99ho^f^3N} zK{l}L^un`O(OqOvf!|E#RGL*3pCmf2rLqwV)!_ULWGs1XA`p7&;oi`%8i>C~oO%3( z8kXPuJiZTwTizglDXilI{D0U1#81Xq!J30>2IGJy7O|l~AGz$RQrbfo zi6{)%Mf|)~Bd{1rW`MPya__taD&&zi%C}fp{i|r=9+W3{*ebBk#OiEy%rmGJ+z;Hx z@y9r1cr2XvG55N*fi)i&-45T=RB4qKs}O2kr|Rc=6sVT&PU7hF1>1RlQ?$*F=7E>@ z+SDciOS?S;&55CHHhq5|*g|ye_e13@G=Uf_zHMz9LmbW_)&|e=Pvi%peiD-#_DmCX zocq7*z_1Teit79Q++#Zz6qzCH#23c^l-)S(NKQrd zJEN>-v;}X93eP2+`1pbwTF_=Jyl6(kKmq1|$`r4qgv##MC_~m}#)%wI6@- zJj4PmO2o|RxtjTMGfY{5dTOMaYq!k+iyB2sNl12f(@3+0G38f(eB_~_#<8*>(i(M{ zI0K1O!c;-0)j??(Go;ADR5Y792R4nRqUhFv7tKDgW-!1Jl?y|!!j_k-p-Ur|aDN+}4UZffSD!f9$yTr< zwe{FN&|0Y=RZw&?v!zbDV3)O?bfQRH#y@PH=P)0Rb~0S%8d;R&nCAoD>z6NDR!!-0 z#fjo3n)zyD$L|tFaI#__MO@;7p*Mz*?Q?=OSQ+kIY81i!pM6 zcaPt(g#-dY@-Piy?(gFgpcC*dHhs^yaQ=eui+EgC5*eEb2ykh40r36jj|hIV8& zM>Q%PP-|;{-wgQl-Z^wjnfEU^7{gr02v7M7S1M2Q0eaUBzUD&#BQTI|-s$et-x~vS zwvQ(~(Bt#4d6hFLci(UpGhopB`LlUlXL=x48a@+f&lx{c=_j9LN;~}y$*^eRuZTHC zGVz4{6+BdUOsLO2B!LK4)(h$BD{Us60i{1!CIe`HLk%xqyqfx*ufx|$?mH!zpe9X? zab7?DQc{NNS)&>+qd0!Np-Ox($eOUL$A0YlNKG$#&B;^<@&{$=>!cggrwj{_090?! zr5F7fdwF+>*|xX$?JF92p`RYol6W#JU4G0<5L4djPh%_8AijhNU)0h?G6040huNG| zD6uhrk!1`aWT;mBdc^|nG}9!HG&?K=e5LvbDy(g|KUXtT5u2vl5lT6a+^)y*0Bm8W zZEyTo9#SYmT6J4NU;4U`Y$?k-qg@xr!pcw$`pU_R(wIO4y(7(i<8X1ZxS?gL`@R)e z-b&3{e*Wo!}piY!g+c&q=MClSS$5hwfgrMql2=Rd8B?HS)b*proRCpA_XZx;x$QF+j6_ zN1x;;2R8uw8+}rBXOik`@k?hIA?D+LbN?;E%v_}+&XXg&*Y(#Lg)g-jl}a&7YYF0O z=I(<=0%S`vkPM11XOceM6CbBU9!RAwZf0E^i1FRm*B)SsWY+BwHSbN zy$2M1cDW)Wl3!W_ms7S(3!wqAC@RQ*Y;oX9vA;p13IQ}C$nCz^K*qqo!(5taq#o}p zI5msmA(l=!5(CYUOa4Ih`I7v1*>Prqki%KwT%zhoGru1m5j-OH$Mo3fn~v^*Cfo#K zjhE>^7l}G@fu3`N&ovI2X2H=+qU4|VQqpPt1IdY2J{yBP>kgbM>K8sn&o#_{j}5Xr z-dXoK_dym5nSUW>vvVVj`r(}I913K`Q_oebIJM1zQAfLGOKl9v%%|(-RDx7M8K@oz z9+M#B#L3C;c;DnJYsMHCx=9jW<=_P9)h%juG7G!@+9YklO3Ail%At>>264D}f7lXL zK}%D|Nv{_T9$kK$Fq$0i`kJnPL?oLA4#DoR%(lR{KlU;sx(3f%e7W8Qxh4uNzCYlJ zB`yUOolImrW?^3VB-ycFzF!~_rM;>|BZA_+uz)dzqqE=Eew#6=LH%~5J;|zSRW+M! zmyqFMm#a`Z-{qij?3fQl{M#95HMGfz=8X*R3Qh@)&U`m^D<1jLVhA69s?PRb=jT)o z8G&kXLy&acQc;|tr`cs-O8o)Eww3kpoCMW2?(P?}(jG2cV4E=AEZU{aew030d*DND zUb7M^lpfD35N0S5nvKN%skbcX`;?zm?Ww%H*>fX68JyReZoCR`)jAA;9Jq>yXx0DV zQ`f*)X1XaX_7o_4Gi&dEfes~5+7~c4c1?Tb>a@s_kDF+!o*0n#6VC#Rq65*hLYCXx zpsAo?#d4hGJcC*%>qi;hVt((r5VS>s`6Qo8nO?;jn$byZRJUriMnGnf-x)_XuA!KJmlK*@;kwSk@mZpu zVSjJq1bgpHW-UKpG{F0%G+(X12d77AzGc-DsYuiH^;@ zFnXU8z0rkcA=|yL@}c?rjTA@cpNmOVKAHt&3Ik`R9V+77+;CR35<5?#$~F zvYtaXVoy;{+RQ6|=!4@)pD04&ek9e1I(O4)@i0iz%~gS4j3JO)td`3GuWQ3ueEhIK zBp4=Mm+0h0p7U}MIW&*U4)!J_wXyJi zx-3!wpQDp&BRaxEh~94~zJz(i5t zE)>7B$JJ9g2iToe=<1vUNe&N`Jpk45dX95-6KHbx+e%rUI$y17UJ@?D;*y`Ufh-|> z>&eh@1^gU;k@t$z8+z?Q*8&Ct@zL8%&%A=Z)XX#$GrGwU$UIFq3EYSlG@m|DD zlnM5}ELkFJslG}Nd9(h=?lBKrx|Mqpq1 z$XX(@+!RGcU|$;@kOx-3tFH-C5`u#nr8tm$%R^mSbm;ux>@cOy=LKRz-U0US?tMd( zk=|H;v$@`9Y-e3?DzKQlrE8oN0EHdMuUNl(nnm64_OpjO*NxOG0idvegyn1OnE3#C zRQCY3hQ;s3pE(qDzVR4P*FSuP!oq1xX&K1M_ZZQ}y3lV|ix|m3_G0j>uB_AxYQl#4 zw9D;>6l|U6r57s^>^Wo|k7amW2rB2qg_MhbRYKZz5w{3+y0T|Pu|mABC&`NvbA88&CJZN;ZdHS1 zp@WNFJ1Z{5{u=3QE??2lv+)ONHx@TONCe5qX9f=AiX++LtsaHNtBnE(#Z0tKq;JN5 zl|x&6OH3kp*)YO0MVZyDiNE0y1^OS;QM=%>Hv^&{2MDm$O%R9)Y4-CFz?ui%53^V= z@BBWCdKmyq!TaGrZ^7sNDXOG-4Vb|x9KAJN5{k`=nB~l&VbersyJ1i;paiTlEU?!+ z^U)}C^H(-rqWM80?k*-78^7Bd&^}myaW`xi54v>93?Iyy=ujuZ7Qh+*w9Z%p^@!M> zM}uU@buX}mf}JV?rW;3&Tyj(FT|A=H1Y7YKvT_~cb{$`R5cMHebd80ze<>RGU;M+L zK#Ydo`q@0zww3Rl?D~}0vM;Uw;PAqam28U~7>&twfahRogS6+|GJ9y8w#zhs5d8sV z5?fS)4@@fz5lC*YmyZrVFG-@s1`vZiWO=^yOtXxRR!Uknl()i+mV{IgzJ*nhl z7}iL5DgkkWMP(a+#((ftQq1%Yv481tx!Pk?=U66K>ql$Y7h%j-@Fs!+bVfB6o7wY- z`HN-dzW|mSP%VCGRdn~7zcbl?-0;T6EfSFr`4bVFm<8G^X!Gi?&A;$nTdF)Wx6_4! zhJKmpCekJAm{3IOAw0-Ii|ziq3Rj(bVR#3)E1UqreF;T>A;fpAu3A&8 z+{3wSlw%AQV$wuZBZjmKD)k3IJgeFlUTUIpFas2RSl6)2wHv4rTZ!gT62{MX(AD|x z>uo%HBcv=xA-Yqv7P7_cMuwWzYJ1Yl1Is$wZ?6QC6fZ|VjWzU9*W6y?-OcOV=hZmr zzR^~ai_C;6n$;%KcuQk{dxFhZ&xXrmS#5I0c=>eK(Q!j23n(srNVHqOq>9Ell|pyL zWC9ej!#zD{8hYAWVM?tzqKFRp6{0b`*`MzG%*NBkV(=s17~JZ%J-BevD@N54Y`yl> zvsUmkMsI#w_oLeg1UB7EuX7uU3Q{GGEU(HiTEzdUQrx(BewA&1w6;>F>A?~>dmb!g zFlGbsgu~ANCiAK4hyw;C?8F!MuqEgqTYaVbK?co2Di2FV;mv|YYF-4$&iEA+vw?nN z+kn0Axr9))#ISwJE4I|J@q-!xvU^Z@?CDT6g$N_QASSlM@? zrri5}UOHz|P+?bkg`Q#i`T@%Y!f9+01k8%dnziIXKw^%?E&he)!R_FA*DL|>6jM1U zYWD#Y!{L*9tbk&mQ1uQ#C~A zSQVJcYTwcm-7IzCm_)O78F7nn)mjsffP^7%^hKWgq z5bQ;AFgyl-n_5>y08M-$IyhcJ?){37kdkd}&2x z;ANH-c(x=f=l)hebfY=deh{h zIT0rVr8rL{xgh5SZpoR!01{-+jc~TMloiRt+!GSgkB@5mf)pU^U=fZFoP@rs6wHsQ zQJeuDCSdLy>G>fjyOmmYgRvuoS1=sZy4=Wmgx^uTOBt&&&D$u4NH6T0p!gE zGcLI%4uBB3=rXUD6-6Ix_i-in>e>Xs28;V~^b-Bj;W&oZEnqje8KIhmWbiC}0DmY) z$OhC&W6|y6=A;S}{!o^cdmUX-Cs>B|%pX;T7z7VMR>lFACZoi1AJ_Qi`V;iy8yrqR^mV z_S{P`k7ATQX{HytSn*{@`IHQmru1LCR>Kej&ufPy_}`KHA2{t$b!>e&jB1*HVXbw9 zc>aX3m@ak7DeSd5@ri3fCpVF2WvlhGjNdl_Q8Zsagf(;;3~d}-5ZLk2fU@6*<7PC# zKqs4=xbMsWDaB90eOhk_%l6t_#QHhZ^l|Bao24Te0duMiqNc31HmKkgjyJXzy+MV?`SBFLZuQ4y3G=*B}z_M!Du` zLf3}ovL~8AI1NuejqInl%A|T!chmP=5IUQo>X$@!gzI;Pdj<9`ZPK6cgRmGQYM9H8PbS=53XD9 zK)z{zoNCc*yj=pHzm8d1Ei&nPu7s{|aH!#D)g?l|sKVu$ZGR@Lmh_Gc?LnrH_kxk0 zP|zn4EHp`l!hT3=pw;p#yFvc)}3fjy=-WPh=yPzv?z>!N*4fke<@rsfTyfs zg)F(C*Qu#r#mA>40@`jSVda24JvssW$F@G0E--`;qvGu$Z7Au_Sd)syQ!CWPJQ8A| zafGLta()(4XB4c@WuMyD}v*>?Kb@nt6tUj@ts2TgvH90=yOz*z(QBn8X&650<4 z7g&@3j^uY*2jdWosY`iOJnt|QT_+%9VdF2zuEg@&)i>ruo5scR#yvUKhdcRAhKr%< z@!&_hkN|1l$oily9>#uGTn3RQOX_iqG{Pt|b{f>|1c0c2&6>fZ3U;HTF8-_pGgD|j zPf>9lO#m&sbKNc8lb``yd*^c>p6t8YZSWAk4}s{@MC4YFALfE}z31dmd6=3O_nYn1 zN_Sk0gqO$J*3wr+irXhYU4UfwHzm!C3i+ScX#xqmdnf`02S~+@yc_!=-v@G|Oh|3B zaEIqgys=GxaLI_!KwS_%Z-!2EbBvaiOkMUO6GhjBqxDSBk4*Fi`lSf+U=h{8S!F?X z@>`&kT-O~rxN&6+@c-saym=1k{p73m07%M%*Nf9C`}S!qJ4YR?gEmp){HT?l&E9vT zaki;HdV{$HeaEVx%JP$uXLz5WHJwLuOPIEc_+V^*&K+~|BbHp1zVX^j$jYoGyv`KN zCRO(9J;lO1J5?#Q2W_(Mb62;YivdnaxS@Ua+vg|!enQI4Au9VtGIGHEuq%d+PE2AQ z1JGB#&q(zyg>pCzsTva9!QZ+d$Bo)=U>PTPx^A^s)ZvxDx(=a7K|f1~0v-GI5E|Y> z6w@t#E-KGXS~h;?dL*oMMZ!vm%_u!Z#+HAo9rshhq$!C@bA}o}?d@t`wO9Zb1Otb~ z<4V&4-QlkUt1XgA0)YkVDaLOQ`bweHdk9WRrzyBL?X!&qe91I~{@t@kLMoFrsWHEV z#THr-02T(;lxdv{yqe@!vQUf|0#(nuz;u{@9B+ljW50uSL7g;c2Br3uL2KkZs}_`l zffETFQK`}4zuZo(^Y<+OoxAT~gJXh52M11Cq=X3_}3Ozxp^Mr70&y-|-`OG9f5tm4czwLwz?k8k1XN&3ky( z%jKcE5PAI$B)yqSQ_1pCXF)fjpa>Lyl$BHS3*w7;QEY41_c1x-VOH@_>iIdb!o&gp zeu*)^me`L3ETqIaa9J9W;4HdA%73Op;JsA{;FoHMJ_(}s8FnzLO-tzz zX0hT%!kbZi|@eb4&S=yrR==hx|wYZXYZ%_ST%00sAyBaxMVL^|@( zFYNS)dluj3F`OPODAlkeo;D~5@#aBKIi;VQ8Qg_ge|*?J1$^B}qADu=%QJ1dSa$Ak zq6pIwL4s)@^7gn#kh|t1zQJu$R!1<;-Q#;%Cu^rgP_DOb4;6BXD z!}#Vq>(yyE>X;hmhz(VT1Oy6yMwMmgv*F(w^(NK}oUdZh8LjhJ9C>E1A|Uk0JVZ!0 zhd?>`0JRse4{@VaKRe-Dk~;~=w_X5^l~u_=Zx>4KOi@Oe1s1QxhIUM?wHkZPBe3*O z6v5i)QxJCoy`l`yNq88ZR=J-h+D^Ri!??riK|J|&oUGaOiW$RDTvq^pLa}R_&YMz7 zdtkr@T+ch;6*Xbc+JKLW@s-l6@Ef^#sOvs<&OK++x9 zVJ@KDuO4MNtPS+Zuw>B$LXV|b73uolF1)Pg?SlQn>5QFer{Jww`iw}kNw5t`wo}CP zglby|6U`Si(wg1sCz2sZxtli5|+oHv*4Re4qyw@J1@DSUK!l}UqC z1mq>iuM&ON4;TX)Twkt+4$cLY5w8kP;jFfT?jnaz>Lodwj8D7%F+Udwe{dJC`0?`; zg*S@?Z};)XSB}<_wUkIM$3o;nPfzfLDjv?Qak&O=>a)o$T-d$6wE16R;uSYE-&B(keeo2aQ;<+6>aaCHooN`-m;c?dM^T+PXXJQp5R6*`KzkAx@mLlQn&} zi9|{;+O?lq_~d76fUN_;EJ;hn-~}C_@)H#FeXuOVlDZqIWl2#rzbIC#^ym6~ZoMD_qKC9Q z05r_W${GTj#4ag)qf5k*;lD3<0J4VjYpnfBnAMzrTYj4$up?=_CN-OF2cRVn8r`Hl zBe4$(6hL@}O>u;aqwBF%HHC5bd+Y*j-3gm-DjtRsn#yj451o$*ZS`UHm@-%4${`Tg z638KTo96+6sOpYo+xw+x%B%#9qZvaYat!D{LRSM8ugwQm?v&y51bVQouls-(WG|`i z!M_`S8i56-gNTmlOU}QOI9b|Q(`T7ED*-?&fFDS!yRVc#5}lJN3b44iL69o42RCT7 zCB3|}NB3^D=%caMk{7B6Q)b<2UExR7vEZz8uSY8JyUKSAiwV&4YesccV~s;GeyGTV z%u8#OGhxQnJ+P(fhGN1We;u5$n(;F50xA`Mg<9fc=FCc89R{v-qPIb+fm2R?OX(k9 z*Riu9&s=XM$|}EzL~n_*p!eR{(R+F5aP`sqZL*6OmKmAxV33i<$LCmnqISL80gOKr zu+_&UWp2m#N8B|dM3UX*oKjfM)3oIrlF@4-8_r=6lW|1t7L=xRA^mzK<-3gN#MYmG zI4N5O{C@J0vL`z7pzo?>2FrUkl^NYlH|XHR-6(&M9XlUh-TwO{0T4A}yc=^Eq1i=7 zZ6plsFBD((q;RWQMR{ekp1S)gm`gr*YM`{xOuEmA?(V^^fbk6suP$Guu@HRBvxP!#Hp$ejB^E`=|b`-w(ql|Dp z45*6fJ#ifnAEZKmj9a@Q>OiisdOSzhv?>78XhaQWVyd-Vj==YvFT>|NYsg7|FTD!{ zhgmAwiNNfL!3s~J2%=b&h-IEX|9II1wq!L#r(i*eW5XmskhT(FQq=wYluEz%c2sdrTMldR@{F(LgC!tFpAS}-XGQ&^lh{=i8+i6J3%aTib5?=f{D zoP~Ll=xM@z{!Yb;xLoediFqx$nF^ya*{Tlch-|;`T2!{0haSi(Se=7cG2DM_sehTP zu1`Np*2S?shh^6-4FoM|wY=9FdjFsSQOTgvIHoiny>;*oXb92#X|@f2f6GC0!IU|s z%3wO!?ninKx^9ukNFQP~J7gkgw^~~a3uyBbfXp`vy6C`wTP-XqvHXahTfWJVCdYH* z(FiP*9|ZAhTh{9t%>XvC!UpBrWF#^(tW5C6pOC#?SKH{4B~#s#ry$z!C!C|*6Ht<- z;B`Z$ZXd=q{0yWy)DWLp1pBNDFAP=Dl6UTKUY@FY=C^!qPpVI{i$7C6w&rsVthVk~ zVmlHXUh2@+e6A~CrIOLO7UHo($4tI;5)K&?hi&gIdca0#SO#+j-j6R?gmj@GsK1s3mboF%OEq_&lcf(U40j4)83 z$hKReXq{xyJD063wM9+b=AoCsBB=SJw0__AvjcxUZ+L)s?ZbCuRO~0&;LyJK~;a)Aw20&#{m5x?5O?It>)zzF;c;TAC_^GtA`?dYr?4hy;$trqjFY4{p z!gUQ4tIbA@pFQLwh16T;3;Avt*8Q?AoY&sr6<^LNGSbyrwl`BFtfI++F?n>{uS0(o zA?h~#5$(&*MwyqIc52nTqOx$vCHCwcKV!^j#B^XrG{QPY$NzTUbp_K(V^@Ip1D%>2`weQIkN53Z-VZ#V{0Hsf^FQtbdwYBFfO0{F^N+6)6-dVlzR$_ z0OOiR@RmfYBeVI$-GONrsHHnnsX8XDqzb|QG14pqXo}D}%|^P8PO^7#*F=A&G`P4x zQb$(FTox|q>etL3h)f{Hho77+@FXW0m?h@L;xim;$!vu2rgxEQH9JoHn>`w!F*b<=nyqGfo>kQVkML`#2w^k!}nuWgM?M6tum5V*H}8)o~oPLw3Ub|=09Y&oCo zgBN*S!nfT5rxsbC0IezBj|o_6!p>5XZr2$51eXUKUe)ONkhH#Y0w%@hx?X5KY0$9u zU7!m~w!NidWcA+AWkGP)jU!dG%`SEC_+h=u;Y2B~6+pua4!^I|PNRQ-QP8injK>D) z_+lYO1id|y15enNXTgr*NnE~w7F~Pl^FCuYVkK~c(TMPA6Ca{7d{f-rjF%j;_)S@! z4=^Wa_a*_Bc2Di)dvkjQNf4uPaNoHNO&5Sq>K-MDZX{*7{;{h$WyEh9M|yeTPcC5s zFCX0N!hyh?^1I|G%<_NYSm`a;U!ooYcT`jZv4~jw^~E@H{ULw-W0Sn*I1owdp=@GP zmbT5$eczo^?54gY9W4VBEvR$SntF4^#*+# zoQISNrW<)woxB+;_Tz>syy&(MPT4dRQ`bZuv-1z3IP*Bp z-tyT{xuc0X!>E5ym`dxDfxR9c0(_d5vGWOTSNG+fYZhAr#J_m~O&-(p^C6ix1b?Kw z&Gl+)BRj7eZnkh%-UH#}loMRnZE- z`eux>wM`u~yTB zVX96Cx9GSlLPQ9yfC1n~wS&D?2^OEO_6tezFL-JT;XzpT$T8rSZa}c}^urYF_{h+z zBMnB`I|%Vaa##Ze5MCMNOx>{aSs;v0*VKNG7=BgED&>TqwL0`?Z*tUn8}6`^(@5q@ z?=fpkmFKO04NJ%5YFur@^SGqCx&3sXTPRDvm)v#AmZ~wS-mWsuR;7azW-2K_qmQ@? zcA@BAKUvsfnM2mgM=b+}9YYjuKws;w;G zy52ZXLKV7JS9N#7{b!_RwSD-#pH$tyGGAK?==1!Sb7EG9R2^pla3+QFIp7Hp zSOI{!7pHfS0It`Curok<^1=8Qz+Es*s_%7CPtU*OMrF!Rf0Q#r;2@cOHJJ4u3WB36M0Wc*m7sKiY zX5SPj&0$@YJ*v$Z3Y9BGE!^iAs|Hiq6Mo%``EGRsvF{RU;mJn3w>ga~sA>t@duUyM z--pAI*}vfkqy1cOXzG7MOcF40gD(h`5ID|gnU1(QJds9-Jc{X)lpSg=Niqe{ubA0r z)Cu~<7T=d-y#kh1ishyKZ^1v2!Y!Lcg@MIj=hY z=*b-ze#3Ug5ym2?Ts@FzKd{O%Z{(H~N0n*jr$KU;^J@ zZp_#}ZgG=zl*QgQlhNhw9U-65iCyK`w~O^ml8#?B@HW4Pb1&&9#Sdum;uN1)^_5iy zqwltYLxe7z3ziVYb8&>p@+7M!Q9Y8_wc8!zBb6B96h&O+Z;Q1cz~Z{Oi37IN-9sFD z6H@t`w{NukBgBCslr`5B%7kQpe?U~_bW%|OmxKBBE|BYLB>ar*S1Vhs#-#S>a$J+Shqm z$ERRB<;l4cr*UhM4|?vKD{{|%Y>m0W4O%29FWBBcB`%z`Q4RZ|k z`IKfO41Rc|Q#d?#D_s~LH#H#v_B|*8BhjP$d?9NZlp(NpBky3@!O`X! zIfx&w$m)-i1l>wtZ%BP5s~)IDq5R3>B1w?aWw4^%oy3BLR=P*gmzE>AATw2uL{t!K z&>3^vO&V65Rpxo$<`J(-h6CEsfa&>2H9!@$DTGb`WXN> zHT){N*gi3T*pl$%amKgW84buuK;j7}4#!utfkeYleS+N`ASEGAd=;R{$ZeNlH^OHi zQ*xBMY_i@-w04O>FwPq#JN%~Yw8+3PG23vV!V!3F#q6~Ey2g?yXgTgmtoA)hPB3hp zYkpeyK+eG1V_s$6=6pwrSC+@K5L{L1+m%+@7Yg}*&3~v27#AUe%KC%~mc)MGh}nAr9x$ziXKgACkbg zI0Sf=H$pJv#{NS#rd>rZuCakv9uIqP!l^ynS}M)VCP4;rSOYVwSou~GIIS9k&vo0j z$$Kt;Y)z#vXOIp}i_cR2YS^T`pQQl9ck}pG8G&Fy_%>TIzJ1gY!Sxn6_~Xwhfg};x z5Ul&ry@xie45i(hPiewcn*QQR;2`&52-aTX(8ey#XYmJOEE6y99I7?~VDPfLRgfv> zMV8HabzCiPGuBD^?HpvIZ`o}#Cv-O9H*^$#8c7%OXh zLoH8vnkgJaGruwUZ=ZKhgFDVtJ#~5T()nUX za*3LxZ(tRlc*O>H+5~)oUdkHbH;*dQDU7bR(;i*eEGd8$wBfB%v|VgW)62&jz`LYPOgw=MiHA!*?{328*r+%U9`ts0XmY9i9^D0SFm6PQdCk_@838!vNqwnF2qEfWUYHGwlP0Tp12|2P>Rlj5!M$J#g zB739s--Eg5rTeq15BAUt?V?ME5%InYmj=0K&d)UFA2^t1?Ho4ZLnDX;@-c~jv!SNB zSW-SpAT|MSb=_VX<_ZH}PIJ**MK<@S+w|O$$iy#9Z_=ZG`aWcI1Y7>D7VJ>3I-rL< z84es{O*FOe^+eNMcl>e)p;h5=wP{1F_D#(N&vd@dUA^i1Zh6b6TIi*e!6yOZXPz#Z zIKBcv_bb3XZ;%paZi6y`it7e{`kk0Pm0@0JOJ6;Kp=cK?1mMrtjwY7E)A0C$SDzUG zPy^rk*0yd<>&~tTMF}Tdd}BJlbahY}0X1j^cy?Mga67u6NiG#nKOgxjR7Ry_p|A7fMXrS&x-}ZFBNXrCLaj z=)en+bkA++2__9XItbO==>aZ2Sh33jsuNhkMkLh(5t;lq?mq%)Y_VC>a{s+iv4$V^ z@wU2u#(m)Keem@_$(6|xaEz4iTvisLp(Af#jc2}Xm&+&S0q%2B)33~pT+(Cn4{uFs z=YdhO27+KeSF9m834x%0u3&DOxf2jj63Kya%Tjk(UigS_%Le!JF>%vcIyb8&>K_&zN8-2ru5besl3`Z{^Sk_kc(Y^RAiiZycS+|4Q>fI z^6H>*FW)G%dUOdfTA6nV9JCgOEoT$chJa=0d<(FauE~Joxr9f!WZy9gU`u%!$300l zKKtlez^Zo_SyzF`yzj_#1JqUrOzrTw=L){Ver;LlL%|B@j$-D3;}vl6*wQm?leW>| z61nE*72oiKZ#Qekv(YbTOz6Wx0ceMordGu){oP{{Iwoe!>&sT%iv{Lwg6Cf#XH1;dgf}B;~-~evM z#K>808WDi+_iMJ%2xaaTc!|$iqlzUfDAvs`DBcqdegwr3lP4!OeeD)#?II4ZWtSdy z$IJIP77j|v0RhK`?_l?pf7DDW)MvN1SPW>YMMjDx8!Vp}L436@f zGX-Ox6bI3NfYjTrCg=y25k!q-tgxYUehF0_P&9w2K669v=i@E1o{R{~5K;jdpZ7KO zo*Lnqf!<{(>UP#402htYS(Eo=Uh^h1s3jAsj~oiKx3_&WdjzLAuvf`lWBbQjl%KO^ z*cPaXg;UNU>h5kSiRQ0Qrl1*jAQ*v=7Ia{eClJAZcYuj|haODf(^L;U=J3vo&pDvRhITafg|Rb*h7!|qs6=R2@|c)XZYjUPJ6 zniAg3#RTSAb0Rdl>nFYi@C(4((NNhV-$tCaf5^>aA1HcfJV+$={d>5r14a5| zblV;!FT>*uvwY@L_=BoNusU~{;!KY!QM}@Rl~@G@UJ_JapkVBbDhTCl?Y+{)iKgdD zG$4Ai+2B!G4IfZcv2S;5>@7(GsM~U{eMR7FH_yQV^{o&eFL;{YfeSUO@8 z_iXfB6?lB16xwiqj|<6-Y;HDKhFtf~O!+K+$v*Dv_^PrlWjr~M zf{Qjw4dPRO$)KI1d+7;?2hq>jg}GKmwA0JsjO8|)!;?-N4nN| zJSBy=bhrql-`wpH%?K7mSIfnq8$J>FZKX03cb)g1JmZTeI6=F300aG2v-~E1$UHPJ zCFW0kpJJAn3=TCY$T-|HK^ zCT;G*5}aaoTSkV?h>k}{I(!Nf1p8ns3A{cPey5M3e0mYM9kN0dkBO#^6NfyYg8qCD z69~NnN^Xc*f)~;U)M_s!#!Fa#t&UT~qZ1ip0-qQU18?>0#=12mXxTcVi~LV!3l#h* zB*sDmp6fjh-Ha89tit^bxK1H~{duNT-5*)@MEcz~xaDOXE z`2(U;R4|OE#pV2rP9M}+0S0G)OfbuNoA9$h4V}sf>zK()9h6wKA-7Ny^~Z03-c|%o zPztq){NiNp^K!I*I5rYFo%M#%i5{3X)QVXGRZ&%9Zbh?L8e?FFRk+8(4K(y|yw=7q zyrHV$hc5?7q28)BEuB1LCt-wd$$2EjA1W%pPQ|-LaE0D}eyYNTAsQmDp8BKnulMw! zGRv%9kL2|LZtNvGVdta-`^)`9aXlnP1RJ14v*Ci6)sf()(sWir-KI&e#e*3xq9m#CO2I2m6eq-Ej;JhpC zf`$B@DS>)1Cn8hx5w^MCDBX9}3Xo9a62cVi-Y7PIY9XJ68QGL35-*uyML2*2?J!8x zob$@P1p??#=x|J#oqO&5x5r#G2PkCg=NFw<+pvQSE$FFK|k3DPGifl0K9G(5EZDr zBO6bDe4D^yKK2Vw^D9m1PrCG?Tncx3CT<{ zs&2YHCiW85I!Y9?EO{1mkM%BY!03L{ZEMGWyjmGf2D=kU3G#=O|ABJqZTcor%d*L6 znr5l3kyQ~|sqAxxc=mEE4@2&yC@^Vd>!N+wTau8+4es|6G3d26mLZ;0-zV=g4{sQ$ z$$~vy6^>$S08#v0mP-hWgacoiulE~PNJor|e`F&lO_o7aZ36!7=)5g>{PCdFhiFiL zAuEWbSF}YG09$Zkz6b0b_hrAmx%Zl}?fyW$b!N}vkT~Q5Ospr}4aXYRrAFJ$`<}X* zR3x%5oPe_JY{DQw@0Gm1MH6_|*NpFYuC7v#3)8)ykp=~iMl~N4zgS3oFAaHzvFN2@9^C5r zCPYmYg20zqWKMBtx9gHT3w&nKXC^z)%nSt_vwa{;0C421Z;sDdVvKL*-D|+SH}$fb z?WRu=Qm%-X`k6se=L9(Hz3i<-L0x60y-ht3$6^nYjFktrs|2S^ljEt5K8H4cM-XyF zd8romp9@ZUcNPXi4SSE0pgmn$ui(FcV;s-twDqO|R64``?s=m3Oq(ckn_Rr3jJ&g4 zLMB{^xd-lAl^EUkjDqvt1-5PHu)#X{RxSDb*iSSPTet=_ypuzd*Fif>od={GaS@## z*w{pI^q4n%N3cqGiyYwh#yLoTowCq1c@Hqm@#7oyseY!H#!D?2UC`Va8{rl zfnH$^CD`sABzJ;WET1$tKeOuGRGOIF7GZ+g`g$Lr)>%{5tfd{LE0`r+zs!mDHLcMS ztrMX|o6$s#ds`g?Te$KBI`VS?PSCWbd2A{it7*ZVERK>}EPA7j=&Wr)I+$-xX-c*iZr107&2aBH2ClsKbhY+&m@VY!c zq-k+-=uVYRmMwVoXfc6yTT25m7!A}*XuysR)B4;S#&v1Gq0qj|6XQsXeatDe z+xotaUacx0KPlYpy(iKjf|}|0V>u0l1(Hajyp{1&YcyLP`QJ}}4w*5O4G(o;2NYbz@zh*+#b}y`QvMwfy4S z>8GR2S3yT3DubS2112gITE3r zA9Ad`HMx!e7-xnMDYGG7BCYWQ3A8|!YnZXP1HzbAaS-JuzerSih~x1JR8xAjfS;CC zQZ=PBa9Xo}eu2Lnysj^7CX_LFrcrz&jYZ=rQ&*2?QYIFh(i!$VS-aQnpz6~CoJ|dX z{9%mNJy>iR>X{uoe`b{}^x<&uIAmjZmi>z{Snr4#DMh_(7WBnExou01gCu5$C8&JC|B8@0vG;nd)1H2 zMH5zZK2#ke@6$Oqg@^NvZe_)TF$YHs`y!C7RM&on|46B#O1S7~nu*L*3>+4tZn@^e z370FHr%j*4EFaE1JUn@;lZO>BOqTRAq1_?6r%VOG=$S^#&N!Oo9&YC%YS*p=x4II~ z<)n*$(LS@MWo!=10g$(m^3|w6>9bPV0}QC6pCnh z%gQ4v*^;fr*1kVO^roAFMq2s)9cXc%DTsSkQUA2d~04n|w}?!4>7OswI6Q zJ#V1%6eP?9Fvj&BTZA&T;_~8@S$}=89kM8YKL~XADc|V8J>pfcBWI4n*qvlO+?4#C zK51IK3z)>zgWNHj!_js23ga$IfL40T|nHk=V9(T&mWO33xNUvo)u z`u1Aw_}IqKKjxRM7(*t^dl!h&ov(55b-G09NQk`M^__if+TIGP>W4&{JZ@BT2D5&D zsV~Vbk%3sFpgIQ06$Rng@Sfh6sJRP|R-ct8S-uFZh4=7&qPV~ufN>|H0MSaVo*?s$VwXX;vd=dooI(Wb*z%ZS=6oe zo3cN}(iD;$o(cNd|GKqo4nG>^QX2h#ky|yiV_FDzs4VvxmmbCLwV8;>-eMIki@oJB z+$H~<1X2J!>O?IUb)_npW1t{*-4C)lj|Dv-OygIC6yH|SmEcVLQ+2PbT8m&MDzk$H z4p7t7oF5_@?epVczn^*M`4b&EtmgWLS0Rldj2+dutOYRlbd1@q5VRj{aS*6~f{_!G z%E~J(XVoNS}2qOlbnHw3`uGOxgv=l z__yv65(^gg8>SzpDhT1lzdvq&W?demOZOhX1_KAvPJbFeu|;saymBaYOD2R7gQINe zY7ntYD!ai>nHPd4U_8M!oMD}OzE$>C3`W)qlCu*$L<)a5M!CuW4O5oe6xj!_xBA@Y zslgN{scTHJnoePl1Ad3ELO;SqHKVu4U4tlcLnMk?7pmhcdTS_UaMp)^E8L{xe03o* zO_u05bb?Qnr00Cdk~uyamZciUY^&*%8(Lf&A#I;k%k$>PLHm|HM{Yfu7c@(tjoRL7 zmQ|IMN`BLc-hqXS>Dad8*~&^EW^J&R%VGvezR+ocVXrO!)DCMkFF2HUpY@kpR;_ZG zF!W;>ltL|J214E}^ItZ~b% zZAIuNliWR-)+wLFHI3jtL+T5&`e8HErHp?(6nRJ_!AMKnBR#kDLav99tijB*E?3@^ zCSDuSzF<>*dikqmBO4?6L0@Sa&CkP3nx3zGw(DKb@{yFRByB2xC_D*fVJx=6(n5=5 zSWdzA`t9$AJ_M@wRR-pEvhlMAmo#h@*F?7fk82ceS=wcSq)#ymAVqgwIu#X*KR)ad z+Q}9_%p{LcA|P$gfoGo3b_9sD3f0%vw_*8W3CL?q&-8s?AD%bh=k|hUuL@LI>*Z$t zrKf-FJ40_nu+lGoToRL@r@*t;o)dN8*lch-tKMrHXhHW1It%1rYjtWaG{8gBA?3wcPy!{L;!I&X zk*Nr0XX(+_orcFGIfM22Rfr_HkXF!0PUH`vUS8g5G2Z5X*#c%X(*iN`iH4|B{wJ*B z#Bf$}Pku!7yR&8S7Ebx3x|y2BSD9y<1Yy3(X-Pkj0|QuO>c<|d;InIK(yT(79L3$Q z&?n3S5+qbN=$A5*IbpHe87P;ZmE^5H$)ySgksD=_zv8e+3s^E1k&~QHmw6SW%bK%I zG(-_K<7j+;*MUh+vdvGC=Xf?$T0Z)WAQHZ_kR}3jOnrl09#jBEtn_!m8A#RN`(YAf zGD`R6`FgnZgk@(WUMSwO{lv`Lcri4r4h;Yqq(zY?Q$0LVyQ@f%VF))EreJZESu$RA z%A+A7*;z8VLK*hylOX9-YGZmMDd3f40LB!tAh4Z(7AG$m0~wCpj1JtHsy{9kK_4OR zvr74@j?re}{-6K(*T2=vHf6_u{QJGsHS+KO`pJn zfAc3R|Ga_euhP}=ZZxSffBSid4Mt~?O+4R*y5cx}YP0rfzoNpM_Pk{b!irlhKUnF1 zl+MUlo@3F{^ozwbeac<}JUu=IKbDk`s#0 zB%f`V!I7=ePx__rWX|EcDYhMEY)tUzW^;^fh%_5ZALS%iVMdOiEl_LiX10Xy=Q#eS zuTG1Jdc1XVS#EtA8b9ue`JnfO*t1)ITyfQ~4+18qFLkt{_#vfc=h_7*s|FgCaRLzm z4Kh`04I*yUt;y=dQH?Mlf=}L4{7s>2%ywGZ)2i1gG@PU3EW54_j|WLp%mO?&gA%Uu zxv4%J(^G(uCOYJ}2S}uXyQGRPDc;lb>&!Bf{R73dT5=8b6Sy1p6Q25!Px+I7I4BO& zKJORa>YO@U2A0#ClFJ}_N(NeyFYle@>QqwYuV^&TwDI* zCxbQx-@X4h8OmqP$~BNf^Ci_|OL+^w4;+4qMLj|S&Yc73C~=jnE$!i2)7x0{J)y)& z1LkeZp}nb3mT;3|kCsTku!#GICcyCJru(y^8U+h=UlWK7P5c~xH$A$3-M*DtYQY}x-nTI z$tY-#tElwllUCeKUO#KsyDsHa8EAsN6F6W@E7NbkI3SM`-9Yn2eb$uSR9>BR-lbzs zcn*v*sGyL9q`2{oUmDn&Sv84M#sSZo(J}?Md|1TB=JVmj0G^V6G0nE$!8@f#(b3t8 zqGi;O_+;`6$VJ>kD9050Sonx2AS*{~UyKZ60Jd@(v0=h^qwTMc)Q2;Yq9W@z2-y39 zZ<3j|6?$oj2fNffP>R%D(<#syWGY8k+DWke;XzHuSP&U}Tg$xG(z=!t^g-QbWGOk< z3rFr;F1^<%)_UfnpY(YV*OX@zC4djXlbfy7+_SMoEX#4O$1)A*q>oUjz zMc=P8Bdz|vlO}k;|H@>Jir>gTBY~E!_iJ)L^EG6Aw61}^Q!4&u=mg?=Jkco4NFZ%v zL9#%z>oL?{_znk!g(#en)nD||>0R?J*!qE(oBWrIcxBbx(Hz>1;ZKDcxF`o90%`Gm z4d3XWY7K6GYbr;IQkzGD7_DnIUI=kNq>yc`s`qq@x_rW^?;(rUOKQvvJCJ})s( zFv_#uG#(7gII9VB0;+rTfEDgbdTiK8$D6EP82P3TUuBOgBBMf2$}jYb~zELyCKNs5DEh zuR&>l-%s#pI|SrgF_Gwr1iY(5W=V9;n2nAa34GdjUrX7a(W{(b2qEn9OjPW7Acf3< z)y3q@Q-nE{eHpr&1-5JW4U`^Zx{`Ow4Vr9Nsp}MwARj`sFu%qx-3bQ)^q@wO25AE1 zK+&|+%Fzi{OHtSeRDLM)Y1IKI`D$Pv^hv9K@F#o^cL`p>ponprO3n|yv9!(geNCBV zrPgB9aK?OUFf(Qs`Tm`mB>OrCeB)A+RZp*2#lWXrg07OqBGN4NF|@6&21)8skvxw^ zWsjSfe7~`V$uQuzx2AqN{*!pSL&7&2N+%wAD8u4+vtrTm?Ta8Hv&CM^UWGZ#P}fy| zYSYTGt**iN%6qr19$Kv4mF<9{L|V1gnv_zXCLtCu(VsCz04w0)D`)}#9Hbz~tFL1mYJ)_;ke zX$>Zde(uLxXWA`cC@q~JQz_J)=3K?7U}-QZNqYNJ34spBp-J8^TWRs3@3iD#%e8+c zx=w!G`L5O}t!sooVy&4x-`nvGwzDoeeQ~PnlaAeD|Ah)A*}-*3ut+f%L5iiTi~rh3 zI%HC!J9`w*ta*L}iR(&KxTICGzC zQD!1inL`=nr!X7i_jJWD2Fi60&>sIB*P!wEOSaZc-cZ-WNSF?Q@=5bT&QW5)7ROD0g(6yqmV=s5 zsD)lp4S4VqeS;lQtY!+p3Idc1s{kI`BF9HJ@_bF)vL*$V049!h?sGR3+p4@CrIBUA zjPBSJg^Xl$@S1IcAz9hh0u^xSlI^|ph*Wr$WNOeTLq(v;UwYLkuBR;_pG-vPNA~BS zjCvLgb+Pl`JbOTZ!2VT#a&yps-WlAtNkz$(_r@}xgg=hn0}>B3+^xt=Q$Q|Bbi_6@ ztG3nkpYaa&y%O2)6_%WHX0HTjGHeFP*an+~^r4&DPnd`P5Ep z(r3139=^yzXaJ1QN*RB@rbBN(H83}8b%7SqgKz8Maj(F(Y-z9>9(rp`&G8LT3!^Jj z?8>DAt(3%BWM+eZ_d28xuR8(iaI{azP8{S~?sp1S*v;V!f?plYrucomwVJaWtG5dr zmwsd!Rs012MPPAK(Kutoy%4ho5nvy@J++6s`qj3N6*68yaRCJRp*#dGlI!m7T;f?` z$CviEf26qo2{ZwdsKNmHPxOohr;k+p)>y2?HFI8F zDh3$Vg%fUnQeRwA;!Yr!*%Wl=A`!Q&M)ANyx~Xe%`7Z1q6=~h9ait%2nRwuRI{$GW zye@*O$7DX(pd}*sL2=5rf7c&Jx~MB5q>USSo?BR*DN1tmX?UHkq=J}xZ7s}_=cT9z z?d!irf$ZA6klcoLqGzV_eZc@4n~7R>c&$DN`WIM#LnQKVllZa|f6R}o`gN-gJPs4G zSYzZ+xQnvurYb>zeJ~1g=IZKh6~yiqKL0SL60qn#_fnvbKM#}%%7n-exO;c@)=Vvd z>-;i5z94uYhZ(H*Y*Y^vfR;4vf>)&60PVWBnBVIbCcRc(nglmZH1))8E`I5X30Zd4 z+l;k;##xa?xtw(xwx1Co23&d@-LrjGOwn`hb;mOo{}B7$r$O6-evUJc3@Kq@YbW=4 zTA#8da(P=~*>qU50#IpL`mCjbH#iO+A}DxsM62X^An-g+E?28f-88Xj-Y*a=kTb12kk;G3>+1@&}+^!k^NC@M|w0enDqfg z%TK^~a>g;9w|cpV*~-o}^@QH-tkeEJ(9r=>TYzfm8!| zKkbbG8Z<~mB4s(yLkBf^yQ=bba;^L-s!##IK{9q!jCpkKK?md&HgSNx>78E!PnwK> z2VxN#c+Ad)ce=lI(7~(HE~m;u_TURVcjP&y&8HweGNj7IF~3kv5Jsb$CvumB-~tX& z5mUur>w$h#dR_GmO2nV;RT6Wn7fdiz7U6*TId!fJHwl2 z`oiLPy1B2)&+h#S6|<~~%VrM39ipR?qQ9_6?s|ZTs?7Igx5xC3L3;vXT%?qLf%4IY zp;)PB+m)JY%ggWFg|R@|dz#Rs_n-r)%3SdRLRq8jJ2x;p%N?nY{_K9EKenK388AZt z6l8(IuIzAcWYK_m^XIxIzO1~UCO`&fY&Zsk12;$3=g4h3FNR)#VFa;a#3mxe~?M~-5zTI-LN^PhDSjqrTkMYrniZ!=+;8O6o0X36!Qt%3q!Z<&FVh`VE;yH-_ z46wT^4MDi!&^yFirH2QdH_o;EfJ@m>(zGVTx{C_;Wyh@;v-KjHU@0SFDR5iUSNQkU z>F+g!L&=b-ZHXY{YW`QlDwk@(W&id~zHQV=MH*^leqaNO-a+|hn2*L^?qwO*K+9@G zZ=RjblH!~1=M8V=v;^4+22VPL;3*G-?FgQNrY zPTJ1+wM+NZnG6LHlJMr#ya}p-)@90rR0qVPGen+RS#1-I!_DuN8>2e8JOt&31t@!{ z5|-vJ-O>YcoK16o;U@3cVZE{q55|l<9Ly`7TqZim)ltyl2Z_`e$-zk@jx-~4+T}G0 zx~n0%h%73zXRRH3Zl}{{zD4)Utvb1t)}|M(>-&>Y!Qhteo7Z{X^Pp(A4M~!X1&go# zaa;jo49aqQ=t9w#s7@^M6S_v&H!(CGeDW*)^pFG_=%SZ@7#|g47=76N@MVWo)bKYn z5}u8lf@biR?Cz1g=SJ9DW@5b`vqXJ3!;l|i-8KjN9X6QsCa=-Eu)SQXXWm{H#~*hz z7)a~5O2PLwFXc%i6!a}_ilB`E5`B?p#-@ZIMF08*VPAeg))URl0K*I!s+9xc^;Mr@xcDXhE>*C;L8VR@pg#ucUGEUDiIx zf&UKj*}Wo%+WRV^VqXS9%iu57RdYYQV3ibIYDEZt$0-!<3El@AE6z_s7{;;@5s-Q? z#PeaWGMNlK0kSYvgBS%jK?B#C7t6RfT-pz-PsN%&KAALIyO0Wva_V&+H>QIuGOEQp zkUPwjb1YJW=4E8}2=F(pYhO2pCeD9gPJ!u0234Lvh4!8;=!RjubSktjNU$CGL$?ab z^1(`fJEFhQKH&`lT#2rC+&+~qbRyroBp+kkCQ?oanh?rAUj7NRVXmLFgv6G(=Bls) z<8wqM6Te(ECB~SlS9i(2y=yq8Ln01^+LUCENNhl~`8J<3p(0BBUh*XF1FpMB zj8z~f24HM=033sa?y{ttmSHO$?*`!>QwPX@+4=>y(AI`5n_C`2io%1soNgp`-Fi_bw`cdsYz;>*q_*^st=}#UVw1 z?%8dyHm2jGwk2j%G~>fcG3+#LHMgRf2 z8i}H^%5o*+;tvg@cI}BR-j5&qE@bhP@UMU>lGMyo-zV`ktIJ=5m^F|Y#SQt-2ua8; ztN$<#fZduY@N|K9B9X4`c8NrJ03|{j=@PTNapkHa#oF}Jfm{97hj3`KEN4uAN36Za zAdo2sR|9Lw5Vo<3Eu^EFO*0lHbE*sn+PTx@&EYG10ouLB?O_qERBs9)d{4H(iQ6<~ zM6}?k`YyxEi}(T)fU=Orr^K@ee@E2hFo)r(9!{XyKzacbD*{BPWg=a0^h=isW5%OY z3Eaqao}3buMfgK>F5~zX`O3|IH{$@ZBGQhf>O;~-@>|aUOOGcKITs=y``a+cPdL@{ zvGoe%X;uVK?Y!|izQ9(x(rCT2N%VCi5;yIPf}QvB7ST_2*0oLXsMxRJ3 UVODg z5k_5h!f%{XXsfDM*+uH+0_W>Jumda57Vp~395tP|y;3qOO}*aw8#X3#o!dL4VC?;U zf!)i^6&4R}wMlg$hzl`)6`ESb;j+5IEeRW%5_&eW!(rvmu|T=_gy@H||^QH%@jwmjY3ryL^FUX1gDp#Kf{oxt!a7O%unt$X?v|v}-Om zXB?mpr!=+omNgcz0(j+#)iM8*D9j z6(DG&-f6OyZ40h{9E_V9x(U4hel#@3gougdh1L}pekbTu?Ac$aH#PI&?2BBxJ$!t4j;7Z%BOWSRl4O|LQo^CZ1u&epAi6Am-gH zg((7&5cqWCx=tigH>@JkFQduu5_9$2-8oqciOYE{x6Mv}tRld7@vTCHK@THF93$E( zf%B81<7pv?8qJ+x+XY4$z&Q>9SKkjb9DCAfC?@1cAz~#W_Q|sEK!s|4^UA zy$uW~vd$`hjrH0 z-FmBjhSqEER#B1+?kZngQt{%Fti`;EgZf-GZSq+`R~n%J;!#d2C8JRlMi#a+ManNe z`-nPb1EMT@BYuUC)xy+N>}Z_aC~C->fp946Q1*&{!5bJFH)LwrV$-@5MYa!Ph}a;T z6TYn=^F&kQFCuw>(-_+pI=<$J(DW&c3oYL`nXEEc=$nt5*kI=O6HUw7mH?D0KN>>w z^F5>W8m?R@2M+Assh*Zm;UIHi{&8}R9w5|yT%~xl^S6T)JxBv1Xt&k%dG0J`pINo$ zzpc=J@G&50qu#7hV`wD-!Y?%AG6x-9Pp1yINNg2xBJ?O@bWyQm$Ack!U30|DDP<9I zfGql>>Y=Eyh5{c;VHg3m+?@-5?yQX(cY#GPUGp}!LH^KE%vvK;8|U_<%S8ZJIX`=j zoEj1x+r2wZwA;?H{tWT}i#%&mc%h7SJ_2}A$q1tpWR9w2#cf+a0!mE_m9Qx0rBPh8qXu)Js4!tuO{m9! zj;OeE`uisV6P)58fz*N6V}_{36@|03&OUtiWiA43yx| zDaWDnchamR=Bk!lDXpYm4+~PD9XR3HtCRp%d*@u$2wCD}@9ac9r^;jGXtBk8aUBJ` zW0Olsma@#Pr+eZ_=S^4wnidp)i8QT_ntk6qpyQEt9=JGx_XKbqt^g=Ts>ukN zPA5cVtjV%zAu5>$p&kN{jxlHloH)}o0Uhrf5r7-Chjy{e%2PNp$COJEeTE#}?l_slC(Z#INI6X<;Lm489F25;bPb>q#gwfK5nQ`btvx}lmE z;GD&jd2Dlt|IN?|cFFI5lv*&Jf!@)76Gi_%YP2j&m`8O-C$`r~ZP^iPcwOIaZDt}6 zmE$t)q6M^(0sa7LVTq0aS3s!0PWCu*c+%dA+!J6uawU%pQqtZMcbXsZMc%|Cl!@cl z+%sla!NO~l5?qm}UQdgxqy#vUK&nEm%Bngmf{h1ePrf&2YeOqC+QiTMe=j+Z8*DJv zpa+0JI%*wbopVv{F0rDxV$uhcNK=y935x5J5XVcjGZFd_2``y^WtHF9?B2&KO|u43 zEE(Wcohc9YB!z-FU&|&aLB)I>{6LMvtY?p*R~W(k;I%#yz+*hlfDhZrneblT8_%S> zDvc;ptoPY9tdRxJn3W@~fBi;unl?(rC#yp$en-Se2zYX!=eA3~W*j4I&9X7p*AX&f z!^-^3M%{zDpVWA~Yk(qQ-S6$fiUZ6>&m!eM(c4t5W0Q0+wYg)1MJb;Q%gNq@;OU2A zQ)7c76Bc6gb(2Bfgf_!ZFPOvcq(sP#{-E3#Ze)I%$-NYae;f=3^+eFC3A(00 zXq7J`%^X}PDF~!`gWnU$hmpPH1phhRX4f0Kmz4R@FxVM*;Dznq9M(a7cmQQ7oentj1r1&H;e$I6?Srv(^0 zFIAySe#ckl)2br&>zl8sPZ#HHt_}!~0(>Rn=Pm^nU{{$}g5f0!Anz9_)tgQ-3OoBd zfa7+7ICU!Ij&~C5?T#u%q?p1*3&lqhr)C8Gi70#}%hUN7e+^@Zk?FIH&14wMb5AU^ zadi0*ucwU7n&un-ia;*B-9>*9_25k*&jWVSe4;hJ4OEiZ4e|U-HqQxo&#pSE_<`oA z8M82r!P1?kPYdfLLqwO<0DNG0tvSV#;=&qI=Dfe}{@7uH4%ko zd3O-81Zn8?ThymeV{`P=$a;LFEM;?*<@gJMjqi*1B+Ar{c)oMDq2$$L)v1>|tc~1r zFQMp*c;k}&FHPF41@*u`Cjhyup|8Cf25+vgAWL?VYbFz|%$wa|Kq%T$dmO*s!V08-mwlWd2Y@j5VFk4qVFv(Q7u zdvAp^nj4wg&NY#Wwk~Z3VZp?S+rCo?taPB1<5=%HmkBLNHgM2e>8NesIzbxK$X!# zc58vrfe2R3K}qo7$(IJ1eu+=ND(;~hs4CU^f4lwYF)H(cinDE3E}g90GSy{coWZYz z&;Bp(;(OXr5c;u9KyuP~0SrVFu}iA`?l&(|*vYDL z78kB~my|7<-I!nUj1J}MtUb#pV}hR(RSO1kj8;(vr4fO@5c( z(s@MPx-|L2wBc8^lX9L)&<4|VN<#O+AtPI~dp?3lB5YF@(RMA)PCKr>55dHkimRU* z*Vx%{FqP>l@|y@pu$|K18Q#nz1P=DPJ!B&>d2{ik{R+7j+2 z5X6CL(nLV&^(cR_kTB^*szbOVxTNn3#Jpp^VtLqv0m<`3Bln7?&kE50e+HjJ?o0LP z{JS=xK=^R&4mRRQOjSE>m6sL(NShn%sB<1>N9kMNeh|0qh|VbkyO-hVxrNR_(}67lOne8&Wk0|5b_JLPp+l14Qv?eSA?> z6a}N-8x3FfR$N`>n4dY`&-tnR$yE?ya{TuK;yUhn95MQ{9nwI1SXC8JALEUMkmO+~ zu>Wag0mZH(ih!YSr_j0AE+hivqTVF3rX+jx5G17L$js%s@5d$?f24c~z?9dly{On; z#?~Y#BRLz)#w}&B-NwUfvK4HcI-tV7=9*7N15(szhWk1UR4Tt9Zm#;TN;0cWQm~n@ z*;2Ps{z4qyZg1XViw(K*Pd+XacP<8n%ijZv1!wmV_7Q}l*>e&p zS&&xk>J|%t=P+t!6}O$QcDJSIVKq z%!ra$vOlJbuBTN6Q9AxrhTfS<7|&k{LTpEw+D#>k7lEhdf8MSQ`#gG7*sMzp*&U#N z`Xb79SmgbF8alIIOphHDzp&s&ht8u;?;9GPbh906`Z05PzXNx?v`b!%mz@PoEOZu> zcEHJU8sY|DbXdQ+zZxk$kH6oAzLnS}ziiJ^^IXg5PMK` z?6ytt28yR)e-vlecajqVetf7*#VtaZD{6{YZ?9U6#Y*u$!a+SUA&6`F?&4%~i~~O? z#qyB~Tv3Ul%PwH~J=ydxE4=c7OdRz4=;4V{bCFM2)~BYk$901mLcD}$(liBO9_qH~uPh?zr^}_uTj?}oZXyDEer3#8K9am0MN6--Ff^&w`Z9iOg(Tt&sc>jY7z})U#Iq z?JTV>e+&wP?C!~lR*H50^KOQPp*#G|w*v|Nau{}Syb$&QJP){MU=-DYKfm9ej!&+3 z$>P1!?3TP?Y+q?Q|Po^iyQ#?MC$S z-D4YaBxlQfDKpY{IEP2d5}&t-$QNQ2Hor?$IBq!m7XgZ{2buX*R#iKoUK^HYIsOR> z*p(NARw$kg8-5zRJ*w4y2Mblcebi4$s)sHN%mt2MNSqe_h{rFPj16h(QiGvN&H+r? zfATnNq7q+r))-ZhT3jS>W{FcM*4qoL_jyc_sy!gDAJb#4a{8<&SaG0FI+687XM_G| zjMy_Lkv8qRlG!hpf%QE~tnT>>p~~~PS@lVy@K!gIU2-vNElWDc}2`9YC!RMB*od;)!15)fc%Hy{TlDwB(`^GD<=qHwVES9vwJ$Bw2F z^H3*pI82NxlgTXqV*e?9X#tU_zMf8kR*z&7;fL&D`?E~PM~-tO0gOYPpV z^a~$V(a^fooUWg;4%MOF+XT>MDHTH1cX`BCQ3RuV^vn;npsv6e@O7dgOQgW2tg$qqfVZkF4sn&V_QO6+9_&1XA z(Oo~68_Gps)y3Zh(X|}<)P~$>q{vFsJ}EM#bM{b?n#WoX0><$r z<<}u;c^hnd;Skh%-H=rcf9J6)=@Ah{SZo=-K5RH2A~7@z@iX3a7DQBHkI4P=e#IgfN6$!>@~@=U8m?;T_Yje>FV{?t^xIW%LVq zzU-LI`p%X@TWoVwlu*g-4ow!`4gi;4Eunh;MHkj^-{?7ysI34m!BRA8xQ3#0R^xdl z9|8T7K%LtA4*)AQy5;EzUs$`U2x5!}0>&au8l0aJIi$X4UiVj616ku0?G4b%J^4K* zvWG;BKmk%2Nngede>>BS`7fC;I8-H@U36Gph=>HLujd+xJ)xHLG|!tWq-^Tzo@tTr zEYo=Lm$wT|ryTfLS|Vb;5_Y&0RyYW~iU(-)!vZ)uvJ#izHE&L$y~x&Qn=#-myw)YX zv}N9Ngr9z_OA@{bmmKf+BJZRXn={;+LkS<3*kjcgf2&Z5e@G-22CN_M2x4ObjY41Q zfoj+B#?EP5KMuf39^Q_>=yqhYrqumo)>11Tifd5@vbYuT?{6=9k1N>dFakd6?5`CH zeAd7{TX^S&i&Z(Wp9PnHTl2EMsCA-6sUhxM3$sh#GUzGyh2YLohAR4%c?%%-8^?uW z`)Wve-Lah)e~hzYSROr|>*Yl`eAN6PqW^H9iIJd3K_shQ;o^y`Sz78CrV|4Gl7?GN4<_At7+J~V?5n4|`KJjh?i+%bz|XEx+`z#V@@ zdrD5^3acovjgK$1n3;D3sUf^8l|zQ1^kwSGVuL=L3Wb(XI;=#mjb^P__(xX9DG=ec z33MOshXhcLog zFD0fwAi3ra$Ot`Xa~@o|&7#RL%_F7`kaya-LkRIRw6sH#ZW9bYgKr8?4FzbhS3V|a z6A%#-d2N*)%cbLp^Yo$_?IAu8#R>s^1gRJ>e=CM{(#1bwE~ZqTPD0%^ac}cb*sqYR z1Rq!2{or$94b2y$0avc&%QsEBHVbN12_#u>a_t?QAo#X)+L*M}(iNkWm$Zp*IXuCM z#-d0Kywo$`P%~hwb{3K|>wDwx$wW!Dd;4iVrQ1KELOx3}<~Jo+TG1Aj)^SEd$eO6q zf6>85US=85AgV}^7s0Dblg_Z#9%hY1^%+ElJqrL?tB(%AF$XMylThR-I*3UZSI3?l znT~{RlSIP2GqU`nnvD(Wy+mc6hgFsL^*G->v|y=JpP`zU{6bqZkV_f&)(6l=Ku!6_W8JOWV4w(Sbe?&rT8kZwT&f|PkXvN^}Ti>uq{)R6Bka+UK z5u80hxxlD6T7PfGzX9fFDIj@Y#EGW2QQ_1m1Ang;VkZtgugjB#y*eDalRLjqO?2fC zsVp7n>eg{m-ve%r%g)G~Por!QO8uk*V@P$nxOQN`44Jb-6-rz0@l;JYA9K&gyPOmexMMftg&1 zbtye}zx#c^T5#lXR}&Hjq_RQqtzt!1FFA<=Bf3h}5z|WvAjb z#qXIA2zKR)ST2q_&I@&UaW!=bf1;Ikicqf5Te54>7Na zCN)A?gX!vN^!xcgJ!4DKenDknDW3DcvO_z)rM$hfysa#}BCQ1i0Xe1BTmDA*F0N2= z#aTU(|FCxS!#?QR3^sZyX92a!QN6196?dDUS^^MNb2pnOzW7c&V!mbAbbOZ;egtk4 zdJ;|qZcDfpcu~k*fNMzff3rHee|v;GLY$Eeq&po~`XzBOJyY>xG`I`#&*f%R^#L}~5CD*z-^!|7i7Fc88y3?&lxmjnf8H0m=G=cn`gzwd zKxw@sR^c9)-#T}(%@q)5dg_vDP*ckP7JHJuhxbQJSWhobnPwlAV4KxQ!1J+P(=y@o z?u|0M8G^WGz#8xL#NmYoUlHyH*KxoSj{-;)=HOJsoEm_o6i@wJjeOEIh8D)c+1V?Y zjq7?lL^$V?9*^_(e>;EL57UDbZn(?TRVv0->m;Md5;CGFFn$ci16_h6QUs$>UzLOl zM)6!onGIQ5fW<&WteWL@yh^N@?#I|oS(YNKwN$1il9^LMrdn6HRyKjF9{hLeNG`e9 z#z#(?ix_|=fXVoztxyQS>d7ZY(1|KmD4939kC_AY)F^Ake>hf8T=|+@ER^}i)C5nx zo?BY*3jD6dHA)k7drMmPX?5O!89wt!jpf4A?FNWnB{$vRWDsgduU;nV~ z83vBALewvfMat~c9t%7#w#$Y{95a?H06C~DoTil~>dxw8d_j%vDFuMp01!OzY2l)% zZlMH1QsG&kn8brEUT!P;WDvC2u-C>)`tkkp11Swpe?b?&wT#xVaWQ*-#qi%2>jh}^ zbHI>2dn_LCku*Spq)7rFYOo$SD+pg5qOii7wk_{se`dCIUB9E+F$JYbWPg42MBB1| zvQ-y=p|XaSr2nn(D0F%Ja>++bc-UC93H|yuEd$o`v1Xsq2s@UCP0G^vK~Q<5eRKpv zZNXJ=e`J8P4dZ)avenQF4-uEN(4cW{b ze>El7`sSfb7|OCASx0$>ipNfGol~Vu_G~LDf5J4OS1dNM#+a1i5XQTc+i{MM8miws z&LXDg7?qMRANZFqV^l(g;S)xDwVAMx*f8w3*ZQn=?&F_y=qI$Cq7-Jn_V%5<(a;E04@`i>#X{n+B#hOsrg{$spG}fsi2J|6xTgfB!uM^MlN{> z7ylK6_v{HO(NtS2u5gojvB3DvO*Y5o&2*B;a1tvzd2KmO4DBvN=|Z2ia64jKCD;#? z>n_4;IY|+sUjlq&{kR=*99)yR2|a7|ew?5c>NfS)K@wlaqe}~Knj5G7+Qqvx`nFZgk&&TVds|m0IB7p`9J#cFT z_eUI%Vo=mHu)*Tt#s|1&^fALK9b=pVT?Tw*4;h?qK84vhtRSwZvPWekQx77` z$N3{=N*Z=*WaE=s{kv+1>%fpqVm!^8Jg?5Qn}={2D>Svz0dQaRpi_%IfA|=JesL)5 zn`{6dN`m7U8#v#GO4`(-uXXf%@Vpo8|Du4{#uehgddV* zbd^Gvh93K6TDqx+6!GKRFObzSS|Y@E=W-W9g@u#sHi?6y*F`p5#sEw%#+6}9r>6Gh zu7hPZ1_ERDScCz5=~v>ye??DYk-w7W8aVhyI&UCo_Wjn-l#{e;TN(neL1ua23OLNR zHXm!p-lbzuY%Y?PewL=IWp4EqHj>3FuV+ZAN2F3$BTx`2M+jiZ%o;OwWjPj} zZD@HwirtKZ!jQsE1lE#8Yt67Z$7DwK(6n3t3U^chbt7~Zw7*}3e?$Jx;2A#T0@;h= zfrx=e;G~}{dpgSY~Eq0?Rpe!4h63l7{R0$MiYGkf(a_F+BHS+VxM{@ z@$SvsH@>e2UMx-)+pd*nJCU9}ia$D=z7|2#SSOjI)jasO8V3=y#7Wp*6qQ{g`AM6t zgvlN70)$5*(WInvuUx%CQf$w+g`e!r&m=n2+XcAk9bN_Af7=y*aCJrx(7(thK&Tia zz3fK*RJ2cI97cM9LST~h&az>Ly-&1$u~jEslq}#h z+1l{+G#c8*&baVy84 z(Q@jNmx{Uae`>C0ueU)T6|{<`2ld1~Jg8&{vmRqyON?_CAaO`Zs6`IfCyBvYmlGc1 zeu&UWjo{5RVn{Dsa{l^xR8M?5f8{n&qsAa5w=Ru@%gs_OMLc9hK|FesfBOtWdHca{ zeoWW7C$SHeVx?ih5E(og?RLfibjhLQ_*e`S zTX2q|vYQE6SBaAgwl2yo{}AaswcKb1jk{K69wX`Og;R3zS#C05gN*O^K(Bz$E%jkHuZWDjr@#OialOw&YQ97 zg`sjz@j<4NV_MIMd{j(lm+t*FOGn1-gs| zfBD#n-t9zsj`(r$gHC0LWgryD|sVs%I0GT#bt2&ExEBWj)VUxOC(YYAANf{%~F;Y%a1s6O%VOt~f;+l#8 zXHh?Ek2clfBXP1!2SlL+&s10W674dkf4s*}xf`Ylfpeksx4Bm*)f;8dx22WJUF|ao z7+<(*AP}@qjQ@Zp7^cYVOV1k3?Ux6o1&FQEbgwfUl0YRHe1-dSEBT~-BMy8_)ap6n zh4c>+>+zh!9bMI4U3RI#x!c+0y|loDvTe0oe~uK{nidX{fKC*MI{xpekQA#re|T!N zsFa4TmEyqHcb1O$ljxmi;7D7iaIWWlFL53A3OjA8EC`|Ku;Enf6ZotmdLf$O>HY zy4mPgX(}4_qv%pWpBIx+f4}KIe>abe{0=rc`|;*;>eTtBSv_$SygJ)*4;DE;!LtsQ z%mH&dI8+0|C4X^C)m$Iu`L(~`EptaGtfB;6`^anlK@(Jgr0-kc^+i5_n|bXxD9cdc z)SzV9bMgjP%dWM8ce++CH7L}HNTAh5TaJFIi)P)G_;(cDO6ySRJiRz-e`^E=+y(;qpZmx8qPIx||Ub;GAai?0@sno_BDkOh`@o(11pqC0sM>4`SRu*mRpe8%-u&97C<6HM8%uWHOCz3nB^a~k`7wpQT zPR-Ox4Uc7ah-UY6j`=rCX*Qwm08N2F8CiM&WN(VmSR7kYuXvrj&KU>x9e4|zT38ITLEtbe@J-)9oz&zHX{m) zNMg=q?h49>cl32)RRtzl!MewCf~%B`Ol^Fwx}^(ozkA@~9Wt7@0m9T8QhH@sFMd__ zF6=q8nAN!e^xNLdjT`OdLr(>d0SfwBYrkVcz>Q`q{%z1CWi z{>uadJ9Uob)^|1O-_?FT0%5eIv6xY~X=L`!L@rJ<=I#)?d)2*JA@T|-&%+C$gY{!@ zspPV3Xkc|Le@=d`L6|`2)h4=+qXRjya%h(|VXVT;nwqm)e*-uvltJoWdx|2QeP^`-wdG&4Vg4gZyg-tQU^>7}Bm+ z3jN=*;@vu8e<}OIy*|u-^GO0KfJM*@AJvvnh_S@pJP_)i`Y&gkl}r@@hgs!Jet7Xr zr%2_NK407*75;1srGjRp{^M`CJFQ8QIHnb{s$FDmF@ox~tT#Ne)%B0&n%+I|5_7}c zL*lr>2D4!Zd?65Fl~V?@z>ma7?pHCN_t$$b0d-C!fA{d)9?x&Lo*;+pedqwdb{qN+ zUivip=B=~X`0(GzeBFrR5Iva(!Eb((K5yj3{3Ig8s=?nb`zwIiCB3011pp2}37Bs15Ov ze^0dX=-L)Y@goqJ0HRiMd~sEqR~efUib-!y3&vIMr$=J-Dar zllB=D1S^6&@(I)8crQjLUr8l~&}!%f_(06DYcvAZqPajtJ*62&VLDzo4>F5Qtw^@p z9*B8{rlRiU2(}LjOIE9HRitVF7x8BUf9j=>hPisPUgulj!pStpxZ38#9V5lS@Dcv{ zay-hT4su)LQlw_jEb{V#y`kNPw=adAw%TQ=loQharlU(pL5mX48+&X{Ey8Z_8UIYQ z8Dzy2bB+4tZM?TkK_Bs^=u~J)a;&BNJ*)}lnJFrTk5nj8cp_HEhCW!Pn+Jdee@|&3 z!w38@5lZjP$TkgPYTO1~>5ZQKo(YMG!P7g|TD4(LRQJsmHnDAz2dhEWuDog~hh=aI zJG2K7hes2AZZ1!ldIKYTv!AZ}Cqsmr@(ZuX8Gn4L{H(8od^kg8hKzUrX4Sq*ZhIo?=s zF4{k%aAM?T{y2re%a%(t)OI^6$kue3Lq13}o>XA4siomu$=|WR>t$qNV2<~y zb|p_Z4z!+iI_Y6TyMBvGpbYQ#3yOJa`#X^{uP<|V%Y1O5_#-O- zFe4Q1Y=|)Mx1o8K=;Dx8e+S@1o+XkFoOm1*nAOe-xdNTY5jXJZWBb7O+qoH|f%Xi@ zmRL>FCEh&+rbsmIlVErHCnayem1SF+B$9uE9D$)+ghO6_skAy9;E4R__9u`M61>`T zHQu$&LBi^0{Cs(h?Pwt7SOE z=M0G(G{8p*a@}a3i5Hom9=-R84;xtF5 z<4o^DShyN1jQx-Of7@kprZa{06q@PU)B}wbJKapficr!#ENMve154&XzJNau9xZ4S ztxcdS=Bj?jT?rspt4fp=U8aBxt(KaL>GiS0`kqPCx(F32tpv0m@h-y> zg!hqa+n8hQ2X7@cC!Frwp{Gt-(^JR8M@@H)NcEr!+#o6ye`_Z6+y_9tr$K}|m|q9g zvsaWzv^JgTj+)gO4~%M?Hzs0;dRMLd?GMqj-+j-a)?U{fCil#WzmXZfeKaNWvw91? zx_Wc>7bsQGdI{g-8{WJzmqFYp2|WoP#rG%XEs)NXUx^uFp^gu)Q-vcx09Im0Qp22B zZWX9{tw7uae=|d+21^lSL$B&@pv4JAGU?-pu1;TsCYB&G#LI>L@#MxL-umid-V$A! z!@8rvY4W?by^6nQ6|Dpzgk&!(utAy{?&;SV2Nj^^XN1UTd(dXkq#j)mP%bQue73NX zTNjm?$vU{QSl_4LM|elwlW#dQ$q7_!5n16~$KCU^e->Wq3%3}1B{e>d%3ynrP2leu z`D(TDH))k4p5hT~!%DBwYSuT-2*MQI1sO>c#e$aVJ3n;>dQfU@65#7<)Fqi3&87Eu z(lfxmsh}QezbusmRt{RqxC`V#`*L(fDd$}z?o22d&jSc#|NC^GFq3ZYm(n|lbx-gt z2z+!Qe^Sfp@~zg`$`I~)1St_wY~dJy1eZDqis~_*BEupHBQz4eY2ZG~jbFcc5G~KN z?8r%57a$j-v#FPr%S0d-(}#Q{g(nRH5`5W0C9)}ytVcnZAFaVROm&AiKGlLw(16ts+w^*_SXYQ0P*@OdO1&L2pw?tf4ZGpMudO#J5VhkzSQ!11{q&!%>8_C zMMso1sQ~#)19eK!jpWnv0@(Hh-)BP-mC`aCd4AI-GR|oK%mbWcYFCF7WPkUCiXpoj zTYAEIS}Dcwma=i8WTriE3IDqGh2t`MLvD_u2Wu!7J&=6aH?`W{5FxDWKINX4`0v;u zf0)_DIhce&)~2AFh=wC*x^+pLzEGME{I|`>ID+=X|Fkn)hAK5%=hBL?3`P1Ruc>Z% z-3*{U*jl_~PNK&wr>_f>MFFMGJrv4)-{FwyhP=k~s3+BC3xPLuFHvK}LhzxZQx7XS zlqB>!Pl@1IQ|@60Fj6D(WGudfbPF2oe>;OpNhM>TusqqlsK{$2nZt9THe(T5Y$#>c zq<%XTOjH?6SSNGgTeDplPwE#RhaM6sDddg0nf-3UAuNsAvnHk)L~4Q-T6)EnO+bZo z9!v>kez~J4-YhFX4g%!1tYDg;@E8yMazWSlNAwFgnt1R7p@BfM8!yil-z|$^e= zUg`qfxCwEr56=NTH5WJ-j9r)s#B zWa9>voSvBTi@8JE-sB2c1%ZLEAk)qd17R$qET+OL@YV{?ZaZ^J16N?dj8_?y;iDwz zE-~wVo6hYA&r+bca@nbZJo9}4e<;oZF{JjlWuN91TVV$VkLqpnqtVXGkxN*(cjZ|- zszk+PN-@{`P=$N?z;yb4%EVh*{=CEsRWS)l2VTiu2||j&-}o>~e_4$^M$=*WTmfbe zm_1p8dEP2T`+W+CL}eNsrF*~6+ovBvR=*3+LkFr>?y1lYqF@cBP_ig2GMEW6+m9piOv~>CgS38 zE?O0Y^S^8A^$sC2f7Y+DY9lfCsD8=qXF8V3F{J-M-kj~Wi@$(bQDM5+99ZOw&HAnq ztm&TVWrg#T-+c!Fo-y-sfUvK8d*@;n%y*u!{xcFtXm9+oNaY*MgbrTlc@JB02&mBU zeyBnCAnm>%%QnT!0ALIrC!5l5TSGhH_;j6?Tqq|+3bRaTe~bM^07~N|zsKvBt#Ij7 z_DOyjY-PqzM2=n4bdF)r2~TB+53ewa(Z&TrICV1)VC{DC8kZ^Lt2 z-xWrrH6hUHj>E>8epoZr)g#{#q84pp$KVszMsH>CqT}Dk^mF^Wybq6fB8oJ>am%kw zfvy#Z)#vmie<1sl^oP!yN$+Ec6piQHx`&Ymq*7c&{gbK1iEJHi+T%w@--|$HxMe`= z8&igcdQnfIJV10oHiKAXn|HU&O4SJ!O4ob=KXiV5|ah1vAf07#jmN4)u*_6u9J7_X0@NYx? zFxBZZuH8@WON4s@Ib0%fo`-k6_j-xB*D8*Qg}FmDPyNan&Iv!-5UVPHBnH z?%DTDic=p+9LleZ2%s`8&r0aZIqMdJ;1ozBCiW5ww`$wOS(H*N^|+_x8)@`~8I6oC zJEs9He-Y`^R&Cx|s)>vl6s=BmvY^NI$6Q%3ZBM^11zU-MO$BaH-q+HBIFO|8Nr#li3j2}62}ESjf{d-2oS6WTwRjwU66JISMxzPs ziJWuL!$lBztiyYzm`U6F71DjV+3%}?5L;9v#ZQT5IgY( zfA$B4MKbku1Fe;@Q3JQZnVch1D2%ypmSVhOePp*MgrNXzoD>ve>2fu+&_b|*CuBwe zYRXiQ{ji75yrH&1w|8C@unz_1)U#@KbdoIe5|k!bSpcyeiLkx?dzNv$oID$KP8Z{+e=M|UuoodKAe=Xs((jByIn!VN_VepURK!SU zx(YvGLlwdfC5jXA@^Hj>h}auGQ(zd4eiLhler{VN29r++0ZpJQO>=ZHN8$SCV2Ws! zY~8F?2Kvb*`4#!;c|C+nQ=voV<(LAAA&5;WY6B2;^>ZMH>*ZMmKfP#|(gT~Fe~Oj9 zapi(!v^c_vFz5lB^GwfqTr49q?#TsC%W3J`1NO@{#1RYjlUNO5g=s(~MX;+0&`pre;` zEFJ{tRVl{N1Bqp|%diZxVuUC2-q^qct*)CMKA6@KKAw`K)=NWs2JJ97bP#t~y(tkf zptiw7qN<6u{3n+YSL)NqbTR`(6|e!o>6eA_pHR^Ymi?Di57H;qF~ zsVbP>Hs8`|g<(}_^s1QotJQPd0vWO>Mmz9=zztuiTO zfit5J!`VWabNEcWwR%z?f1@C4p>_NyiRBbys;!)tA}L3TJ{Lr|3>L{+UCWA(*@p4) z`mACObv_-M?#(#4`#iGzc81p%FJlW8xFP&aST*+3eRO+UxBe{D8AFs*@8m2fdtTQU z46PNNek_oY%Dg5>k7ArbtVtN;a{T)-s!omR52&UlO5oslRRC6%e=5>LEd@7XfoaHK z{k7dJsd%>1e&MDC=})%Fh7+w5Ysj*e;EY`L?Qnm6`DE8+7l^YzHt-`|z=Lc}bjL$7 zJ>Rd^-3mBT9gFqat)876mT!`?={8ijNn`G*o5pc6)}?Y)D?E~Dd)3t5f)t=p4!_z0 zsd7%xhn;W?H+V6@e~WElcp5Iu-YeS+tbIE~#;zNLSZuq)B^-ikhB!+caS&v!$P{=d zcx7;!Q(2O2O+)?)fHS}RfbAShO^(>s_{)gNtiTU+Q=k2oHW0`uJ=G2zK5RwDTNHf# zFfS1Mp|GmyR420BUdh`DfUPrsEVWd6ZBf=-MOEoyQt@GAfAP#%r?~({5G43jWm|^W z*IX66Oc-R8X9yJ?dmgEeDH^Ell0Vyt76U+Xpae**-8)&|@Z-T^9THk@Yt}zhbaA_% z(tqQ;r^2&afg;l1nWvl9i3xm&(g$29$q*W9&AV#lo z)m4j>K$odHi~VvKx^4%3hEhM=nSP9~`RhpNnyOSYd_&vXQS5uy@@%SbQIol)3w8}- zgR^<<2)7&Eb$l<^ohmEI%p4i=UD`ew<=6{*HYxs9_%LaTYzf9dMm* zz^ww$31Oy(V)tf9?<&8!*v5=3hUCY1A7Wf`Y0dSwaU=*EG_OBuWL#koN&&=X;fb8v zwEX<|enT#jPhc*tc@Y_|SbT{kXun%-1aJH_ttS)%4 zN5(=JinSshL&2D_mIUE}h<;B+^UV9G0MT^nIjuy`743O%vo#E{7S{925{ivvRvE<> z7YGb&{T9fI7x2;FgYqWNIN<{!CWF@9Hw!Ztf7Mb0>9XQz5qovPc8i5Wu-#mtBrI&h z-prHIW6}J_cgRm6eoPhdXFY_HTxvI3{FPq#C91sM*B7K=w@%3*e5e5lYK~App@;dX zR7(3&O_|W9(44aB`~i`BXtR=5G|Z0x0<1JApGNSATfMJ;7k*}`IIFXOXi80}@La1| ze=cFv%+12#Je1v{vb|#tGCO5a$NbdWO-(A};QF*YJoPq;88HO-2F*|239I}%*FHAy z0&x+p1BrG#W4FCxh|lD9#^U?omqc*OIdS#$N5-6@ROk3PE>#o7eeBYSNr>iutKXgZNAn@>l;|~F3T}ZwHxxbFv%UqM6 zk;*0|z0KQWh0|bZo6`^YSHf6ic-ltsf;2z6gV~jf6IAeH!>^4z2f@BPfpZK@f48qw zbpWLZ?lPUzJ^GfRi<5FYFy{{*zUq-?-TUtX^I zTyN0A!+t!#N%!Evkpb~;1W4hYy-(%m;>2A_CyJ^0X4w8IiXvHVCM;P3$lH^cz_HVkpohhXvYUMmjg6R zdILgw&cl^7#c^{9Blck<>~n_OrzGl&%h*PM!SoCM1Uh=KhhR#;#h4V6f7m~55`u$1 zFgJG4mm4^ZMP-T$idH0aEh-N$k)ma_5F7X9AljdMeB~G9rxNXKGI*vrw9`TfLE01g z`iQz)Q7Bi);6|KfRu_7tyr%`e=z2V9K_w60Ulq93XrL;ae9Tq)%d36fT~Wml==*dRZSEh z@%Wy2&{%l@&6$l&O*~w;^i?_6e`JHa2vW`x&xOOX{q9&!cQL2cF+&?bt7iuKy~fv_ zx^x3;>9&**|CZD=Sl$aX0Too>*M|_jQBt(Op<3u%9N0`cuDuTYf2uFWK__w48r*FlE{fkm=pe!+YrX?HMj$Vk7{P~Yjutcj0I z&1MY0hdEdc5GUm1uil4(#awt#i?s&b8q4q}9g%s{g^B_vwg~E2+8xgr++vOx&T+CY zJKv-8Nv~|(UJi}Xe-C+iN)TR?nfgeZAeg;r{!`cbp+;HQgQ2{(CUtahWgU5Cn67;@ zq%grZ$C(N^^)#Fg9lzT=S=80PBe5+aIuSb#XzSZ?z-RDTa>b&1i+8R+#8B2+bqp6cBl+{%jyB|;~ATe-DW@AOa z1{LjqzLHQ@4OY51MAW_fx~f@})lq%WQYPf3 z5xD;*k?iU6}4+&0s%(@XOyH{MbwjB zRREt0r&)ta^-D;6(QWu_S0+ZI)WG{&msPtaGkD_j{VCa3J8W8{5O78r@zh(&GrW$L z3>_}0QBT=pps4%y5AQ93K7pQS4i)W<*-!?se@tpzlIRO=Gj3B(1LcMRb2o|qtE0rm z!Jj8RVKrWBp)fgf*SR4a)Yo&dJ;{pd8gg!lT*5p#S`Zbf_mjL>UAq)E{N{X#1tPM@ zGcRrBSb|Wd;q$=uBD0de=F3-4>euyU9U%0ZFdt>z{~SRRZ|~{6xE;TqJIMerzoR3eYrz0XeNaEs2aOWmy@V(H zP(|k_tCgzrJdwy)0sY*WZ0yuf7|mn-W;YAi^)tN8WvJIzOU^z-jj&v8O&S^(3%3q(lEIe+T#| z?Z(A}s%uFAO+d205HUAnTFv~=HFx%|MFi+#_vWn1%WxD-jekS$f*)ErMmnwlBim=J zJg1LeasXBFD~c?ccs3CQ-B~J5;t8pp?q4&yGQz!qx@ZtWQ_%saGZp}QT=j;MI>SLx zPSTU58ZO*W;d$S2>pg6D2@JR7&G*<@FPB z$m&|{=LQV*3DHGm*FSPN?4YHqpoY>(UmhF3eAQim1k~kCP9J{15_mMr)#wpRBMo=g zc#$a%WmUKmPK)y9;R~eENf5Pab0&fS_ftAQwi=g*{lYkn}yTV772_ z_A}GhE|SromuhVKro8f&JqD2`^*EZSbtjiE?^wwVlY5^G7(YB%lFV#-RO_J$ATOfb zC<16rtQ81e;vAkTZS~ zP8t$srxB4n9)sZ21>|(aTn6;JN-S$fB#;OURw8r2zy4(44TGf`p?s*v4zbm$GTyVI z06&wqDBL%Z7tkpPohl5Nq$C9?lUP7J2p!jAjsh2K;j%YSJdOc)w9oUe8h=e37s>)S z3C0i{dJ1;e(v1>SlgCU|WEU9IIiA?CX(x6ym)`lg><>#8&qe0Ugc*QP4PO@*6#Lrvnj>MwO1%8#nRZs+kfz37DH%6xmU(B zGXJ<{-Z9`g;(U?V zlwyW3Xe7MBzN(#rgMV33>8!)(URf;q39~x!n(sO^E@BW*xEw6a2iYA+7Qo7 zWvJ}ur1OJm^X(7;aDxVr$xlqraqg*U1jeU9P~K4TH$SPI!GF`OrGM(>U!|U?rYIxm zPZ*?>G_dwqKG<=#f?)eeXDp`jM9+#eva7wdy< zKaMbAln^csivSFQAWXk5_hW8zd zv@7B?YeFen6Ew=*smGvV4^tzC&}Mbth0;fW)Np(?Sbv4P7yW9<@F2oJV)|lLo|jv{ zcs>J;c~r^?tP8Eb<$4>1Kw#lm2eS1lxyD_GeI@n{yLg^WMI$?OoM{-Tq1@KqO3Nj2 zDo((#DoYwlaF%1_ZU#TSMLikhG+$i&?NIWq4WKq7wZ1!=WESRWlkaV&(9yeD3)RbT zg>d6L=zj|PL>n=|OQ8l2u7Eov>OKM17WJLjB11mT=>$j|5y`A=<+<`G< zwH&Rt#HrjxaKmqKlz?&u87->%wdJvnV*#oan15e1E8TV@sTE3H3CZqg(9EWW-F;GO z*RAWpucF@--;;QkXN{uvl8KNV&%=xi!F#hF*h3Xez!kYvn>U)DuZCsK{(cscdG2Kj zC{O{k&O{pnCvilVR*}S@D22M$R&<dA~I(QKnBd z4U9!HC9=ECyNKnMzML$YPl5M0W%A+BIAna z*XYX&vn0-CS-(=_h4K_@ANJGjSg`}h!5$VBT+a7GXBHhJ;39}4A=PqFPmJ^+ycy&< zNCD2PMN)v}zqa^Z;XNPn9y zb?CVU`in4x(A5)3jkz-6gKi2-Bese*LVh!$`#ADVmuVpQNA^qAcx6K4w1sKa@cm`U zm|S%3FmIxuw&^B11s-z_M?A#u5{WkqevUbW>JXK~5xQRz90Q2hR>FRZE#o@c*FZqx zCKb57uRV@E*y0eL{)UG^U>*Td_I62hczBG@;p<<7=P2Rs19K31#v3Om&IMi)q^vIy$#Wyjk6=)f?K@fT3akQ1)(3s_V@A z*jGvAD*qx?7%*Ux%0)|-ZTX0Nz?eHQARhn&27LJRDVdqI_I}@AF0yJ*?P10+R;&Bz zex7bY&ZAcpzFVujo3C==g@2DZy_MxE9BXVe3GxZ$6SrMCswAC*qPJ*)o3LAkUhj<4 ze)8yF2P>?bQ@@;f^O@Tzj;rM31?Sd=)oa}5`SPCgk@b+PcknKP>&jSv_=ig0SG`nMQet(jiRg=tYO6AfBHAhZzBa#8-L_fJfy>a!#^ZX zFVhebKZ^W7AJ5L`iB0Wy|2gc8!+E0aEO$Sx+4<-@$NG`6`-g1ID5AqG;BSPjY)v#<%r&q%7vEc{i_C<3}(tyKQd9(#z83)@iZWnVqXW z!}J{Mnfd58N*hp*l}kk^9IrVL{%fJvu5T-+8W8`)5|-rtI1aJ zXY1^Fd}fd1^M0}WZTOhqH%(r*W9?G~5Zu9lMCNG_edv+CMB z>SBJJwYL}Do)FQuAM;jLlg-g3pT)J_erjF5*(ZDSQC3-0cz@nCnjJo$mt=evqdC%+ z#Y%I-ZM}XfYmVjB7j@k#07ToT+jxdpTU*GG%2L@lhd0e{(F+IVs=lFiI+ z={cq2tAE3#^jaNaBRyU7?DYgS8oQH?^EpV9<>smKEZ8p|hI$-5MvHB#jbXQKjHL#a zoxPXqQ#zfU-{b!1*13H=6~=OPUQUXKU7Bfhf8L*`SbuIGkNLLc>%3GB^JVFhTj?^|Gyo^}UL?BD6MZ>e$t25Pv-xdw z7@tpl%6|}v8q8HNr4cGjd3tqua_Lo%>gbZ_=#jkcYU8M^8Ij$shr}spOM&PaPiDMs zK1ONF&8d9vlg3k_T5E`JKG~=7Rc+?TRpqE4p|PR5!787YlZciWItNsxVkeH|>9t=+ zGCDdw=(2k$_p8^-d7tvvb$YOun?JejH9d9yb$@!A_H2U2ILg>H`@?;;+^$Yr zBb-l3WyGy+J zq|;TqoWVJ5SM7S8O3Llf`LoCQq)y~pvecbx_3Tq@_X?Sn@#J-5en^nT7CGk>B6 z`*PGcZ)NsYrq}U)b7&@sW$M`S13VA#n17u6I4I&^J?j)#Pgj@f>T1N9Kgbor@a!Jt z@rp?8@{nlqIS(vv$IG`ne-{2))%Odu+h1qn&|U8&h3w?pEIXf9n|k!H{U*FqW2GA_ zW3lo_<0u|S@n_SnC+&K&cP9sJb=wbS2(G^K!HCK~k?@SWhsWD|@b>X)_jk5`z8=pEKOV~Cv|X7!wvuovWy^hdcvWaR&5J{D zn!Lz9E~n2u+PU{dDP_UI_t0HE@A>w)cj5`<`$(NW*d(sVHNG8C5GF7FIUGNukE0&o za9SN^pY-^YUupc&p67BliIvA}9)B#Y?dP?6Mrn2!!%;ms0J!a%_A%s_M;lLO&qE0E z@$tNDFMe?4=8Z1;*vMb&BUVn5`=bqGS=gl_L=6BIe*pealo;r z-QP#bvZKcrdz=%+@>DN~UiNbcfI0xMXauUskWT{5WCT6u6X;& zMKZ|h0pryNH!uI}t=4+J-hb_ld!oz~Q&}I2`BfTwyLY1=u2X+sO_em6Z&QgtaN4X6AnvnTUu4s@=6`hUmX8lt zec2n2+3Qnf}yIWvNr@Lp=n&ZWHe6Hv=ZwpI3nwxhrLqN#tsH791RG#_M3=nqQO!=Xd4I6i6{60JGBV~}ZS|+a z`t>;$-etejM>nr}TA3c6UaPg4rxX3I+hlft(l94(dn=wZW5FKImG4kJVzn`Q9v-Q#Q?x?|rT>ZH}-CVTdw28`dH>)3u_&DEok!tJG*CXUdFl}PsGmon9ud>9olhHG`RBT0`srw%(SIR>#q$X#t*}^m?&K3Grtwj+ z^_1S4dMt%%Q|}sn?&LkbZ+&ib7wgnJ#E!4e_dY7?l=R->)0D|{y-?-R zh9mcF1qbK;2*)}eOSN0B$ZBCnb3JJ1YeQX}g5<7G&hCx3;YE_WlQo;3tZjbUeCBDs zU#yS4e1Fr6!zO)dr-?1o@#~e8+qLN|)>f=3%vfRNZpe{$zxRr@dCat|OFen9>3u0* z^|wb9=bI|kT}`8(eEN}}{C@2z9?PoCh_|?fH}#?IsmwOo?nK-)|3r)3bYZ4z+y>gq zP!bm(Ad==+`@Wj;ayK>1({1_zO+7#M_2K+J<9`VdW#(p&8(*fHWO^3QF4rrWLD$}$ z)oIdVdV6={sML)^a?y;#*4(<-4^D4(oB6k-qvl+dF7D!Hl!aN7QV#aJgxO`4dr4ge z5)blmOxM@baY5L-s^{9=yDmo^TlHnE)^1opnH_H4y`#Gmy}#pA9^%`oobBdTn6|t4 zWPh_bbdpKlPsdO`eMkBP(b3C%Yl))!YWggft7Dz-POdjQ9?J4GI!58;l%3v^J>pv@ zP9I}!FWoVp+|m|(_OsrcK{kqcnRNR}Sj`@bbNyZ<@#0i3tGL}rn<`_U>4udv4KMf3 zajQGhp4+$hk`=q=l2awS_uhOG>Sr3NQhz<=ij1t7TKdgX%%$UQ5?UkEjt}kDD`$ma8+YPPd);|I+SO*+GH*RTEf)66%`-|>{F*O#Fn?98 z!UjBZr;{}Pq_3?pYY+abRPP&lSdZt$t2HLz`ANjQY^!2UiYOvj*CMK0>w8r7A%}AB zkW#%r%p6;q>+Yo>>Z(Uxjt)_BxnIg9Gt9~TRxc!`ANM;`I_7z-aeW)(O%lOncYkM_ zr;CvbtKGeU!xN9Dk8l?!uSM-?ZtTM98Xm7SN&FE4-WmQ zIII#}N3CV=mCfrtC%bcRg^O;b&HTF}+qY0kTv}-Ru%T~;vnGEt!Iv74N@}7zVvbsN zoEhVOj7!#=pl5pJ?4{|0Pt}9IVvRE%%|Lm5MxVy+)Nwx^y_BSd6o0afyneBPf8~j; z@*U3dMBnk9M@XHe$Vo!C!+__+NJ3~8)vd+*CElAGknXz6BHbvtZSM44lN|S`c5}bo zK9Z;{n(8W#QMN7vSfEW^=5L+0S?wM^ntR<`ch}wf@bQxA!C9W)ZdWy7?REYL+ zANzq8)>8GX4b2Az}AIzMGyZRALYkBhEw{>i=`FwUc zMXOAx#)u6FV^_2%eSTo~_a?JkwIHFDqQ#cIDRXMcTR-EP@*IXkabpM}yr zmvPNz3-eK#N^z`RpBWwLWDOb1qV5h75cAHEO&(#5!=6Y5n^vzio z*qc?K&K^)~-<#?CGvZ5FY~T=&?D6fT^CUah-g8#ZpFFctV_lAeS@_wT?J`qe&9?j0 zA8oC!_-vIa&VSb2*3G3@JhgJXJ!_upE_Y=SMXOXwE-hQSRqapT_?FX$Ebrsa{ct$W zCF3cbNM@|O@%a8uWAf0OJsbEik`{ysxl7u<&j)cdXt#JRHN86=U(;DL^}R@QiLETN zMjed~v4%cN)gXW|4roFsHIufX6)*E^mghM3PROJty?>NZ$uGs0bsY}ugJDxMSt&Tr z%Um}t%Zi;ee^hsRf9xOmrMOl5y%m_d%e}fMld1R4%X~UtEUb8a=1H=nS>|Wir7+0# zst49S-Y2_cKG!Z;X{E2E=l8g-o|~smUW-YwMn$r3)cv%ckDdpQP;Y#5*mmCJ+9aLh zczbeO)qlutG+xTPoekO9gW@(T3{7%eXVp+DOWicfiXvQ}5OQ7TRYs9CE1YO0_xycQ zs^0y0mbZxa$$od(&F|Us_9^37&;53{OZNBMWpl~ym*X~$_4jcV9+JJec3-dTaUD(5 zm*aZxQ8&#-9ZB+1?wZ-nSf5Zb?|Nr@MvktQ z%o%j~Zmy1B@$B#_UJaRzM~DSlMmD5P*~PTtauY`nZ&maaUCBHQh>Vl$hql_>$%7D!jEqz^xSEIDs` zw>J4C{mfQ!b-upqwA!e(UEWw!9ooa@bbl`0zRhEI-fp)0XTCk^P8>}`W3^|&ed)IS zb}ug{6Ul()I<<}0>{Qhmzc_U3UY(dV=_t``bE=_c)J*N?u^5|+_h%C(_4yd}`BB;4 zqSdZgcjve~+@&tQOclC_=Q9_p65TE6GE1Z4wD+aS^|Y}RZFkOj?oYJ&dGB72{eS$r z+c&C09BQ|h>q`yZ#i6*@eV$*+>4doQ0z0}XBy=-NqY4<7H+Lw97PU*643%5(Y5%TKMpQ?~1Oa~qx2 zsrjq5z1hs7Y{5V=Pk-0>C@yY0I9Cn#^aa^Je3v|)QSzKUU;b{NvdWlf zp0L-NxBQJ>W^oK=)l(0w32Pq>-!5bPwi|P@@3AI>f3jE5o6Q8Dd@4gMa2)FVJ-N zLdVD1RF>_yo4j|2L4s$1#$yH4=KuMB{@4F@uAbNZ>;L%Qo9cZ2-~ZSD`F>vhkK_Ke z|G&=F?KOVA&j0seU!NB8|M}Aa@CG5jK|Gu|AE%7RK~VzOmLz_e=$_BdESVe4E`4#^ri% zSNcgj)oTE(N4(cJ*-rPjY$6`dN^zxo&8l5YOdV1W0C&R=v0ojwzAWKH9x;Gt)`)hW zbkJ8tlA^;Ut^usKxD>Cilk8*w>YF74z|U-WK6)qdBbM@`bxJdxZ!nYq}}sklC5 zQ>Uj}EsG-5BnB!=LYfvKAU88>bOuk@5HOVDk;?onKoQs7a$gYv~*V@$`3%fg7g9u)ZI=kRqDV zVQ0vRZW1hGX=$+bB`u3tk+Scz7 zuh!pd6ASSP`>$&=tY~P{rx~E>WPaAr=Co}YiW|we?P?wSR3E=m0{7_w@zO-iXEbFF zMvdek<~kuGD&n<^=ELtT)JVu=6c5j3m7y|n0Zt8-DSui>B!pOk#Q#I#G7Zee3P&HX z-#{9O8*~A9%#zJy9~R0Udir!*rqj;ZXj!t=T5_X#ay7f;_VpiOZ)f7P(N@dSSZkSq zcv8?@*e>baKDwG+y3yKXs||ZH*tpOFXF|nmNW#85*J)L3Lt`64E(ur>j)NDfXIDg`inE%Q~ju+#Jpj_z?n5k*2YS)z4F{ zsj1+OYFIx#pJ=2_&RUzCftdt1#sZo;Fm#ys(|>A%bLCBn>dg?#Tng{O(0MC4Dlb!{b!E-NQ}TcCa@R&vSZ5>}ANpf4ts+3$&v*Gjp!A zqkm#=HyRTAob*mF{#GPGsJLbqO^wcFL6JHrNYDNx(LH`VdW>qo9ThRhRKZw{87HXW zHgF@|-w1O1iORU6L)k={p9Z99Tn*6(At$s~$a0%Tce?cU`}!ZBZM2p~m`AqB5_`C= zv5=F$5-;SjJ8Rb|l-TGM7Z_OwoKg_`$bV#Oysk9_4Mp5hA<9n=RGxr2$n{yIm1xQ` z7Lab0B_G)dr-}(hy4+04xrdS&Fe%bdI%mX5+!iqDR{4-#8hDIH6`lzEClzb~)gpfO zn^hR9jV9uUd~xIlM}M(a*f14)HFHCDrJabOMC?JY1&)py84q|QVk%2xr>9f5kAHfN zCHfFI%^&O%uMrM=nMKjhYf55Y%q~$ilzjI!NUnAx_V1vYJ-fS1F=6oC-xNh$P9u(kcn{YL6!R8Je^(vy@=X zv4AGBqZUs!in^K1cC%lYd4%yF+uaomMW~vUHOK7%sG_n0F(ctU@PiCJ^c;&PG^Wxi*imF|9vF zMtviUKzxclY-^Z+H3d`xdOB+W(pjy*`@x11ZT{kxBkV3>jHmjM|1+<#Tz}*>wXOf> zyq3wIbfQFMqTyyTDo?0c0HqbEk zihP-Nj>v~?;{W7(9d$##6Ms3A2~2m@h9DL>sEs8fnBP%LRTUZo*yC_*Dgq;4OERD-HG6+&RiGNfkN5JHG-O4I% zeG;W5P#GN^U_5Pd8o~JG~wwUjhCCHUm2Hs;;Q3ij1i~fSK_IhJcPZ& zk1_(6!im97*c&%g*n9f7{v7sO`1j2ma(0vtk4rhz^P~VSFICP|0uI8xtOJW&HLD6I z_rdWU7DwO_V4A9~4k|eMcfzQKx*;1_Hv$Lsx*;&Eua7c~)qe;-T{B>SfXD!#8er6* z3Oh9EaGmp0U;g;OMuWYr=gvU`$nux~~B7%VB8mbDhPH(N~a2VogJ*<-r@r2yM zi8Y=^0Do{1V3%qlxxjgGK`gN_G-wNUi8*mlvsHXnS-9aAEG1F*-^QZ~^pj3 zAFxY>Dx`Y_B92~EK1P61F3(BpMjoOrWXu4jbkwf|cB{jb7jps#WR*6R3(a9P zIWHr%lZX3p1U#ff+|NwhkHP{oElcc()I^FQR ztDI7O;(u&z#D&~RH-QhH)EzNrI)bV))(aQ779RYS>WA{e1WKTZc1B0gGhq8bKOTdY zuD!x}1irHJfN0<|+hdOUYMf952XD9KEtXmwhfL5LY$Cp+0(bFqyx@ij_Yxv5;=LZ? zo&53hfB2BN=YQA4xyT29<25l*`;)uFA(wyP*MCMsz8HR14Ex4^a8kVPO*t5oYN_^0 z!!n`3YK^R+wq*nglqH_6BBqji*z;NK+EQ+RiH#c!CWi*LLn~^nzXyR4OzM z(NLX1s^ofqaZ#IOYgYN*M2dhD1etd9dOy~&Bin5x(~cdqj%tug6B@B-+kp;@;BWql z^nXo1K^m72z)OOFh`J)yvEb1l#t+e?u`YleFdGqnQKrq&K6SHOK04}zVgJJ6F4tdmwAaHa{_c}Q(zHwd~r5c626NNPznX>?Dx3> zM)h&Q-cDC~h^5GWIVk)rdZEaGYKwhFaDT7B7kJh;`4{)7$W?+XiGBU8r~lO1LmmCa z1Ig)=Ui;Z!r>7f@4R!fV3x8@4v6o+r`R2)MozQ@8P~)uE`Q0#D=b-01&jM!PmSEsS z2TlVP`37TL!qPfJ=w4@O;ph?K1oqq?o z=NM&GiV$@Zmf$CrHL~5UW0@5yiau+<;ewZ2pYa1yF5>tFgbJNOwmWAhcsT^gP57ne zr@uTR9`gB5J%xTb$Pa&Xe(fgw8>Qcv9%|X-7e`l6uq$k%*doctZV0uavF_=4zI|@rmq6*twc&{QQ&_wS6 zOEv;0cqN;<%BAbNNOnfn?CcXSwO)J00X~&7m|^{GhpbO zFVdN-I{o02g(I+Qs85>X{NVt9UhidbqAB6K52E4-i97(wMBd^~EQlfp!&R+M@MD^23_%wWnRlR1w= zMTd+s?=cA5bS|zA19Zkmz7wZ z5M6h;Y(ctr1GY0ET6&x$s5p#9S~9mH$0vb9gGSCUq7INx)+)UloOV{9MuV-soV)wlD4P6dOo*Cjz=2oRRStXZA16Cq(6JOEbrCFu9@NVnr%H>b`Py2`40rXcWlLHFgWPSh=pB9(Sw? z%&gw1EYh4HIa)2ZF@Hq21Vt${1Yt%j;7#Yz%D}|eOu{6utgbGkSYXa(bFvGa$m{;q z7+_2xtK)TaCl*dVR83d3lyf7H2YoUW`QJ%V&A=5@8QalD<>_C2@?Z5}qW;tcGxrx0 zt9nn{dZ_R9jwS!B7a>ea9VHwwMi8cvp07t;&JryL4FMxGV}E*%U{Cq9cT z+(d|QMaPgJQ@(iPCK35q1{z_BYaf6LG?9<}$i?uhYp2=w??suJMUvZ*6SP8CC07q* zd}fL_uI*8i0fSqEP~)iTc&e&gJE$x*R8~`9aj!NJLVwIL5J-m5+W)=Yfv3B}dRsbZ z)d;0q!Iuz^v>$zTYK8(k*lh!)jv8_;sbZkU9?d>WjxwquAbezCI(~z|NKZ|Mbw`L< zW)3t!VoQuPf%3r+@1iL^`#a9%eajzVbI zeVzTYM?kURA1Q4s4I<@e6v~55n#9Z+2RP9nZxn{h2Sv`=1nFg>CIm)o%Ri4|M$dZ3 z);Qq0ntk)Tw;oU6mEeYB{!^#?$z_90{!!~{?LTXw z^xrz-A8jk`wSU$IKX&=i_J`}t;HTdG=WF)qMD$c7Ud9e~HC@!CjHGT?W=8n3%wWDn znVYzcljw{RU_`@+u5q3ej#eq_uu;N|;%c)=tYNY$RbqHer6cJ+KbI2{>WKpfbBvMZ z_+-|l+n9nDIHFl?czH0;+k&G$jVs;sP22LgI6uMk&)n@{g*)Kdu> zS;lo^iRRh&44myk-GI}8GWcsD`f4KIe1COY+DEM=fKWGu>RFao*6SDufYo8bX03Xq@g0#F-_yBXn{AWoCGNts}-<638f?9Dl)^ z@3-*PT?Jpjd1E~Ri{nkA5BHb_gh*B^|LU$+^Q)`=^d|oBs=s;lFYleY4Ux<|&1s(G zC?Bw0*L*QzF!UB(jzzjM>(bJ7bGp!)Hemd&@0f&uPja8l;z>^K zq^ZzZjUM2aHxs9O;F1axH^STz`hQlZ7tLoEGtqC*w*`5Syzb#Di<<8ZjGCILVcVwP z_@H7UD=4E|tiB@5DY7{sm>a~F7zIWUYW&q%@GeHNT5d;+{=#shn|U+vU0p3J`06R+ zC)_LCFc^#aRfA;EQ?xeH|6p1EN2~kot$kzit2K8&I+pS6=ln;lCu*E;?0?VT&U6&F z8zW#B{f2jh6GV1*p;TcNoCKHURQ2Tp0-{4BaE0o?E}n;+^PcS1X13~m=~Al@2~Mf2 zlZPcRLwjTj4rqpNkGm$~j7t+0rx)=$<2Tn4e+4Jx7=AgrouXvv>lZ`!MgI>(o)evR z+=BH4REcH_eFgP^y+8HtO@GAB(EA#`hW?hQQ42S81jZwp(<|jkPKO*o*pF}guKQ|s zdyF&ZPSfktLc2U;G6ufz4lpyj;OiHd(Jl4mOyksr#zDX+) zsQ)=sp%oCk2HCSjEl=D47k8oQs}Syx+BOm7yn>9SszjzB`kA@h$6Q`8pLm!tWC6Ls zXF8)n8DefeF=xo;zJGKCMxZrMJ`;U9V!`b`7rfpIs%ZS?k(HMeF(x73OZxw_PyU=VqTA(O5eMTS==^{cu7%P0| z7Y82jQbfKV^kXY)zBn-WvG^MDK=H%PH-EH=zgpwVI z)8*1hZx})XS9y{nPRy|2p)t+$PpFSW@eStgQ z9Q~WelNlM}b?A``|H}X5Fd2qigj-`oSe;pZWC)G<>vOd~&WP}xjre<3Kx}fBecTBQ zdXD6V7+Ubp=Q`G({PPnhr`uEd`kbQrCeVdXDffO<>T*!H)*+-#qiH zL&LD9Z+|V9_~fet6a(hwFdKaCi$gy7?K`&-3XnP)A+WVPJ*W_2;E|(uX^dMCvGQL( zAMT}bC^TdXexhO!_xt_AupTWw%R4k&#KaH}`g#0-Bf*absp5V;Jm@;JKiX{eliLQ| zYp`3e$r{9b4V$>ZkORN_*wd*~h~Adiry(!0AAc?IO8>;nZl`zbs4)lQK1ozw9?d2~ zz><&@QmUdo!#R(sPqA2=#W(_XZ~P|NLBFFypQr=pq+`)|qn-wi>E$Idda8t&?%Y7G zX;x9x{A%&LaslYNn z*ayTbkL1+`16>;}EAh74;D*1Ud3l>A@^mh}q2bGa#D~Dv_aDE` z2)>zbuV|QxQDO|(IoObuXy4%RGc{GwM}HdfGg0lV(ou$F-Y`#R&1*Z632y!!54wnl zG7<6c8Ss{U`yhYnjls8s&wP7J(8Bg#eI+%S4*DWXFlvPs{x2H^++=}@>I-fghK0_~ z9NjO|z1MRa8Yj|#Ch(bk{%ftAAjFE%V!f=OVJc#YAC{%2`wSzt9*rAE`^LGzH-9lo z=*|Sb1w(&R{x`k}{BP;zr@#G;0fBWbiT`_lQ-2L~Vu~4soZg^~bPcf$`7eHw%v_lb z7`MNc1Rb?FY-JBI|Gv-3U1RAFhN&`v9}GK{1YsZxPLN7UAG67I4-zF2TSN0_?b`TQ zA^GJp%A@};Y&(U~os)LGx(u49d4DIFx%bOrq~wfzuVbg7uKbCkiHIXL5^=%$9_M?HDoDcvq>6`#y*sd^OS)MTN&}v+-S( zFdih+#Kpd5sz5~ET#H^@3ejK#;fn~#I^;}XAR5gkE7-FVzB3qvj+e>!#eaqVAZByu z_|uQZfmox^M~d-6^tJ23j~l@^&8+r8=$j3R{;MA7|1qDGr7`^NTm8-d{ZFq{aQQm0 z^z8V!`i>T5N*qkVB=KagWpk`H_Sg!N$_zIe=9`-nDlsaV#CWuUO}w1|^Q$e{S=I4j zA{b$&Q4+Yp5mBUPG|;av+ka4RVU(9hEm3ms%k;|r@_Ok(m!QqE2(?w~qeii;!J87d5fIY~bh~8wmq%`MTX*w%nm!_s_gsp%_)O zHL4ulr%2_F>PwQ&`qcf)4R?Fi@K|Gf9yO8%+iZrQ{)gYvv&QRzAAb~ygivXkb}uII z9nnu8!;1_4HvugXPHVj;?KXFha>#aafaU;e|nnV{*~aj z(jOoAyVt*5@wea8n?0MDMf#m(`ufF?8z2d+h7*sq|LG5c-zE=_OD^)ImswC1SgE0g zOspmeEx$}vu2@z@Cx50-wx(a}D#~;>-0e{_L2gC@cC8ZxMBVt&LC>O0Z);mahv?D~ zGu1?+NH!tjKAo5nDIhprSW8)gbj~}oqb~AIBViTR{c5=e1*=+had;VetfBfP&Wnk^ zQ5kuu*)F-IeoVN7fgizWos9vN2~8D>WAT7Q|HV!Po<>9Qn14^Un$w{{cl?`wwg1#J z_@HTgfA(iy>?fxF*-y^@)ZSti?3bf_{kHpCTSAar2fzL9%jRcar*0U0dF+0)AdO?$ zfqPcDw(+=_@uMmx+y|c_)J8r6NmE%JL6f>F@Iv=tfALccEHN#=vRFyQbs|@>^G#vD zzP^Y1HfG8dn17@~f42iv`}hw=6IJLSq(>oQwY)(>7bOT9r()b!5N0{z)YdS!Q4?9F zTHA>49(dMl^e&~qb(J>*kN)L8XjU{=L7fl{i0aqAm2>{5uXkpG5BVZL2f;9BGSmL$ zmje&}XCG7S!8bpPx=%c2>38Plt9$;=wr>CQ>wh&Co_~6fMZ@=B9o5~Tn|n4}>0<|8 z4B_2b=W=BcrBz#JgIGBfp8}!RBt_?EYXp!U!b+aBd(@ zFnK#Vnp`e=e#DlPIwm90E^?G@mPV9Uq0yuaF^14v(ov?3EFY## z3ifz}kbkd;85aGxO)>iF48wf>lUWx~C6V_s^qsR4GYf)4i*^6%3_o^pa$yJJli&Kj z_!lo|iI`2h{?yN3%n{cB3E=7=l5+$+-H{;Kc&Ogu#z7d7(#>R7J1-U*lTL2Vd<;az zZF;a9alQlS&@VmISO%&iZgb#?j*y=jeq@VzRe$&_pW}C9|8NKKWefk-7&Sp~CI9u* zfa!nu!i$M|ZaK`m!GG3sboQ-YA*RID4X72?~IUsm#^`E@+1(4(uwT&ka3#7y4N(%vHGIRowp?v4*oYrip#g|73FBRGOV z+GyA}iy5m*Xo@_+-}@Ky8|!w!*Oac}cz+asS&+`foizvj5Ic!*lR3|LhIeH`sjO2uS47V=Yb2=fv45dx4~eU`3dQXAdYXf@!##7TfzvZ)tIw%TZIqEm zfTIgE)Kc4IEk@VruvbIxLSKm)em_ywZ@k!udCQ6sTlLAWwot(Iasy_M#M{av{9ngLQg09A9#b%$_5{_8v1YB34d%6y^3(C zoBkDV-&!sGJHOvX7-bmF8Q3(eDNK(QG;*W)%p?X9HVYhZH=DBcAeT+dUb!DIL!CGW z*sN0Ej;D6YS6gAVw$~r8CwIIbY+1t{uQ_!PH02Rt8AC1;o9X;|&F%<(v=%1j0L8?Y zsJmsc-UBp4z7R9)KRxCIP=7)DUmp202ah#BD!%>iNc6QP|KJpb&uTOg{{2}vkTvs| zU62owjyjIW+GwG1@aOPgYSh}eVG$f^vq!bAG6lhS`WD&(4*6V{N)v-U8VT))QB7qs zfj|;^G(ypi>WE>mC3~iyIxJlIUgW+lGSZLg;s3|SXnfm$W-&pkT zZ;ky180H0pRw3*;aatn$!Ply*m}b_kb(2`&CF1^5paCR~I5!rF`zJuYb4-Gliahjl z=0MD@pgg^~{eI*%Ie$hxb&;A(+(tTxIRlkRmI>y{W0?Xxr7EY5oh!BusSX(k9rybV zh3hef^LjiNb4P#ewGn*?I6EUVh`#69KH~|TBrzH=2BL^Qw8et-d;Tl`ouB%h8~tLz zpZY9xHs~k6h~1IO-NpG$U7BQo;of zhl3*gXx`f^P=T4=X~RRJX&sAMt`^3czJ`qq6$kJm9Z&qlY)`z zvL5C~ARmt-bZZO%l?wl_4S^>O{PNiU?mc{SRqy=sywfm?JK#TT zY@I4VX@7rg5(!E326K&ICU5`vf0l~4tOQ74JL)a2;9(9U6dc3(>f%G5q(d+J&kRNP z=Pb*QkBfZ3{yjhT?Y+z(XvhTwcXqZyXTC6IVD)19MKV&krfkY;@c=sU#JsG+@}nC>01AHEIQp{=a`lLg!ph^Si^I{ufiiYN+qJ$?yRc(y-7NN~Q$r^^4nzmL4RVRx6OeZGkM z@`!os*D3N%yS`BFiS7J8)l5A`&xfO_Us{7Alo{G>ujrt{q~6DR6DtE zYZn#|4QMUtrdi`%%}lVVlRK(yeH#OEyXuW-Z6R8@03&%2kF&&)-*q4fNYzBt8=x6$^9tH!+Xk{e!r@3}XFF{toERxQ0!BLI{V%{hrphFCTkH$ORp18r-c zrw^Iy@eGWduB*FbmX4=TPG(#;m6zBjk=Y-$-&?sEn*cHfwxwQD3%IEk(Lp_H>tw@}6N%g)yjNnerbH!VtM0va z+!8ZEE}10dWEgIWCfe6;-acQgSw5C=qhrD!WNh`|L(VAVzF%e`jqx1UoYLZvXlHZo zHG(m%`+%Q0-lFGxoO_L3Z)V$L0*?1$*7IESLXd1Owhg_id*c4YKG=V# z>w_`f^}|m0h-()aXmyjlHJF#mz%-h|t;@90lwrS%d)^+tuwMF)*KwU|&cvQ4@X>^W z&s|vP7j8iI@n#8hBMT{sv%&tc#6H@duY@jKw&~*YDTS)=xBM_|n_cF}TM4{BlWbT2 zU3F^6Hh2bbFbJ1lvC+hI{Dbtw+dO|`n@x8w9C&>0!EUd{;Dr0%Mrp#_1fE!|-no|B z1Un^030B%i3SZBQR!R=d2L;Y4Qz5)N)N7lH*{UXHT*&D-9Mn1>y3ZY$31&M}BhM3i z%Y*%Ta)79&Z-}R59S%R|NHxUx=pQ-*xp$mz_Z$~>wW|()@15_$@^bgrv&`gEdVM&ji3d_{<^b_<5H0`Nbt}IF7;f+Qof4!d`z|-_Jf_5_Cb9 zih@1vOX3&!oX53$jPJTQKnxo7Km2@HDpUt}s$t#LVSatSxnSzHRf1crZM6BcOT%Bb}+{L}=a+}9FbwM3s&-vebZ#4-6 znv+yVK8eRndE6@{oW_4zL%s1Rg=e`@LrB!fg-Pj_nMX{&hBySWw8nUe+(w#jqteV| zT+fNkk2(8T(%5c;Rc;wch{W){KKgc)gaA3VhU*8{?PZT3{!TbKdKIVnB#IGuI4-e1COXjx~A=WBS@Fd(#>e)&e9TyK6Zwu5_F*W%{em1w5 zSF_n3Xs@es+<$+EzcdR@pSk^)bBsC$PPDZ&N-g!bmwTTi(i8C+1CHxpL44hvC9}*~ z&}#Vt<~^FlV-qvPum1aa^RmJTvf@K-^P9kGoX3@GiU!QxpLwgao{t~t#(ML4adie2 zOS!RMQEWEJwr)LrJcHUHxp&NN>PBLPe*^Qa_2dT10>^))2G;RYMyS=@pX^kaV{03D8`bUx=gLiQ$^lj8IyK98A<+ z&RFJEnlX@*Aa=%q&0}VlLZ*B&qjtP`wJQNUimZ9_2lt!7_jyfEVb_rP9D4yiQ`wTP z@E(XskIR3;pl4xmUyBZh@+&UPMwd04pYrFSf8w#}t38Zk@y|F2V-5%Ram2E7Od(7% z1EbP#lNW*(FG1-@FmlduK_dPHOc1ma|5uK4bq2->RJ7GQQ96Yvc}Rd$VvOhS%$wiZ z$cyuYn5@(N_;2T$Tn~n@D2Ua>?{Hpi-Sf$5zLkGR4&u3AcjUH_uI`L|z2cH{mZ#dt zsjpqUUga{#~ zb35m9U7aoK@eqeq6v!8^!)cl;{)J%?AQ2qtO zjxo;@@e4es2uWn+5%q_fbF4Ebm^=6XpAaZM{8nu=&x5Wft+POxhJ4!Z;Wy+@+*C!T zxlxI2IRQL>zNta~E|vBFFNCtS7ap`{UY!*saEgahtY;0t{W;dhr(He4@?(#`#|VEB zbJQ2J@+~y-9~h#pFNOP&_W0V&WS>5(a_pmX6EHB?+g&Kodd7&!iFto~v=hlT1ycvL zo{?G;{0v5Z!#?AKkFlT#e?$Hl$WuRYihcC-4@ge*H_j8s`qk%zd|KpZAV1dW_LuDY zJ3S6$Q~cEz%&}L&{(&*JR~W=oPosa=yg|*7+vC2wex0<8tYA}pc(Inm4wD#iK@1OJ zKhe)dC){eH=-r%}Ea5hxq=hG8@-Um%GYaF^kXk}p=}VjYV5^OQJVs#k%?dSP;znt@u_<6=RNo1QE%$+o@JRK z=kY&lH68P5MeBk?!wD1-v*Q<^s5s$`7wT$8?AQFUy^GbH=)^79 zo^XhL-R4w_Jmc5!&bs6;3xW5vardP3)suE;hojIf4|L-a)4YEp@Sr>A4GIEPRM{r>%AD?;=;50LM3Q$%E|Mr2&irKm#Cd>!h1MK;cwk@wL?7e*wgvy>~Q z6@wPN$VZ+r%cV`%f_c!w^!dhY4mIB_&D+K~OY9`akf z??Wq3dETqmI-qm%!7yjj?`{a)*uBNN5vQ?>-Z*m2Ic37@?$+3Ul`+3%T4G)t#0M@3 zgilQ8BW2%iCfZaH;JDcr7Jm&lrZ6Ja$R~D#4Tw!%rXGJZzD#+rk;av@a(f*d_6RE= zGXu}R`kQ@xi+lN2w8gcyPuKJE_oz>}nu@ZRhe?ymw)U7AXEIh(9v7b>F7-ML4SyEhr<;~9rnC=qX4eay_5g&W%t?9C0GjOdzopA9lXp>Di(95btzA=I4c zNe-p|GK;I$ZJo!k;y&L`W<*OrImkp9iM(v;wsU_V+hQTh>_@f@Nr}v0?$6kp*-^(% zR*A}`k%WxqT*1q**E~^c&{4`0GZQf=3XLFTqlo3Ge-IKdl(JpMVUn<996{+Q3M%lK z>ek?Dm$?sd9dl4Ww8H@tF*s=QY(l_(bB=Z3`BDc~o%CF9kNe};=jhJI+pC`1;;eT9 zCtQE#xsQL^GksmQz0&fz^y_XrZM5>tbMA38M_x>vjmKhN5)RISw>DL{IF*O56{Yoy+t#T| zOyU_sl~I9C>hFZ8$8evO-sV9H$iHSpCt80vnr-VHZ%xk2v4pK;wBjFOWm0d?KWF>y zz27sEETK~SENMc}5mj^Z*_{I*;nB(uSZb_ehgi3yVfM(iIo2ceMI&b4^_50PeB~)8 zdwtyVrAv9~#+9*UlY%x>?A2DZ`4(RFLhjWq6g#(^2p6&l;%Y&yrDf~9 zMHcfo=hiRBoS6sU?f4yQvZ8ICscCG0xMkx73McbUApe;gR$c&BtJJpEx8;gdE2^CFX?pDfVic@72})jD4|A7q9<{XP|T8B)_-|%VU3!sxMJD zGivolqE?=hd#))cX|2uN@r-~sdz2FEN{ZTo4#LTlnTT{>n_C6iri&_%ah%q($rn66 z4gPffptD}%Bj&a0O8-sT0(%OFORPPc%|T}@8OOdfmg$ViVX>qeCuUC3B8En6Y{54< zha@8q8=oiG*E#X`qYuO*&Mkl0AnrRCP0!!I?i(Y2e|zNW5}Yfg1MG6;-jVDUo0LF!VbPZ&C&d zR2_h_31V?=C6mL0ui}meSi>f#gHma_uOuvs9gUSBTK%|sk<%DCw%mVe2N|8?SLA5% z&6vYWP6+pz0*t+C?Cl}?>E;iagY#j}{DX6x5<(Bmd^fLu((na~c0!bMdO3dL%f`!9umLX zeTcYSW(Pwgu?1aZG3cJdv3PA^21Y9<+Gs9U-rb*dr|Z4G=MtRnD2<*y2qMtMi?W%Z zCU;$3e?uH6MLVg3a@W=|=W1x?kT_p-zK>}pd%gxYPlVQ0I(Oga3;Q)#kJ+G_kSVv} zz;rbm&rSKRtABrDJoa3idCz{_;kIvo_j0=$*08H3o#y^{zn?UM$SJ1Uo^^O||6DK# z-Ti&mH@fJ;nZkvZZ;=ZsH!HTV_E_%~(-eEHW>`$OyGH?EBsA4svhY1!=B^v7#EmnC zx?EzPd1Tv^0qWmqYokLkW1H-Y*p3^)sLkUm+=ri*`Fj%RG3tjU@>)Nv%^#20y}7_M^^Ea3YfvtZ7?>WLX3ly!C!LuSuBksG zPj!vHR=a<@?yD%o`9T+FIsPH@u4^1o-F$n-s3g^E!JAK)J54%l2-x)IZtko-3J%sa zphcbVTul?ba>W;r_T5Z}*_X?jhgEA{urD4E#}jSik{Dd9WrO7!R?cF&&o>eZx*V-< z{y;^*G=toXc}S&`zzGXjt&wByC1o>@B_*);18sl5edMfIC3sT&VNiOM>Q7ZV)pS#R zP_5_E8WURN)|~h-awgiG#~ZnwfhAz_mk(oh$b)5w_1>G1l@mMbbyUWK2TL6-ml*#D z4O_(Zc&*7eRJ6zSTzLTWaV+okpAeT-TzLo_vGVEo)!XVA_PJ59WP_i23U7AC1|xn$ z^Ot{!aOQ8If=9qR32EwJYhmsfGOExMX$mq+z|;_Eet=c9AhCBijvC~g6Jl-G4_YOOsO4-xMf@7lGA=MLhHk%It_xE8YYV;cF>Egx~qnQxQ^E4)C= ztKfaa22G9JDi`!~CE4>+V0#^A7r)FVSSRn|KKdK?^G|&JQPcg%!FS|;!>g<&{{erW zA|Xbd$cIjr`UyOYhv)TUY`T0GdhYV}mnP&szv3F6IitOfVvOgoem&+&7d}uI`{W&W z?WnWG@zT4Q?r>5xZQ5YkE#vU)v%Sq0F6KeeX z7cE7Q@=pImC6$2sMfUO1xR7ID6l-i&+zQ~~m-prTkXi~71@dX($g1l18}@%!zB@eT z0eFTw!ok;21hYeCUEFW`PV223-(#QUd|n^%v)*5LzRP@W)v3O*JKcf5<{{OhpW`zQ z`=D(2MF#hHKqg6C;d!_Mn+raNd$)boAuP#~jfWX_SIpg8YTM~FE>ws0<4ERwyLqJ9 zPrz(MTlFlH#`+X}GKw35wSj*MV)9zPJs9J$fUjVOTAPZIm>;yxQ*C#dJUo9;VqR!9 ze5cgOI24>z4>q7`?Ytrd}>W5@XhuqtY*De`jZuWt_7((ie0QY}~4(@Mm>0}me zou42sUc9zg3dk69Dg{gH3P7EwzCQl{HU7Ual0eRB3B8Ut)5pKp>Bfwy!nn#7p1*nC z3)CRo6n7jrXEtJSJbr~&RnFS1q&i!WEpJ8H=ee<#23BL5nqRzv6mzCni+9+-ci8ds zSqSTjY;V~*O6aFPj&b19wlQcEb4&vf5YD|KRm)`bf16thylfBxAPo= z9@A~hpQzSpo@w>m%*KL1C7Y@XgMi2IbV!be? zgm5GOa2W3hv)_U=g`ndJp&5Y>^*!6@(klx=54)U0;c;Q^P$#m}ec%)7Yn0k?j(YAn zoc``_yx}Uk#y)E;Trq6w>k`gaFEDXh@0f-pG1zh-^=gq8REY6uF{x#;v8`o4ERhh1DyJobJ!CoE5=@rZH8ZB&o3cJvjr=lXET z?Kl?urg5oS5j&pZI@p@=RgJjtNh=di7m8Y*%TAw+{DJkDyhAVn7@z#5X2kZm)U&q# zMqTM8{{6kD8E-8|9EjXZIK6jL4>Vm^z6q@k;nOZT60(22NenuWv^0PJ`>baXo1g}~ z&5_spul2*6zJH!ybbI=G=-+tEvsMw5e9ATk*LRfOl9&RZw(1@cnl4#~7^ZUTLjheFZs-DnWfjW?+u>6|N`lNgolt0!}$^ z$GO@U4%mM#mpBkDP-`<6He)bXVmHXgvG+3qgpYqBcB0^e%uu5JC$QeA$bTkqimPvG z>X@*@IYxNMzz4=wAD!Ovq0`a)TVLpq>Y|lV{?eAipJiQb>d;{^hO*86 zn|{WP19m~c(s-0$^TFH8T5f{1-1I&K-Clps=t09>j~_hyHtbLRCSH!Y zD)aqcnrZte$tmvfjuYLTZGUO19r6S+5U8ubOu^tasz@g~uR~G-cdePN-pqp9p@gL* zqRM}BVWsQ0Mo=O)nQmN`sTmZg1Bf-4@W!tH+z^v7Fh^`dsW?18ZHqV5lXa55*PPXW zxEN3n;9h+L`mW&-*^D!+$Lx#B?P_4^#D8}r>GJmL32Ajkta52-$)>ac zYN8<@i&*c0kJ0CYr(2T~DAgU_dTobp-w%JD za`1)PQIbK6DzU@+^;?7b$3y~Od(=#4!|ctBcD2ip zPo~+!+L_PNaqgpjOR~L%8;21<>AHVh@Ya_y-+z9ZaZ0d0(d7fN$#liHd!cwMJ%Tz; zS|u(m3$O-t-btr#4849&g7L{g9pi?+mvUX75x>r)bB^D67+lp?P8hI0mxSZK?KLAW zIxnwT!1UM${TxhVofRAG@5^Phe;{64mkJ8ats+sP9Pc2}OGtar>|ATp!B>Bt9`&rI z)aqnu@OFo#X4?po)LNjFZ7vom|F266Tnjd5L{E7~b2ej61bFuCJH1i4Kq?WHHZgh1%`#9F zJ=MgLXvN{zc$P~r@2-lik!91yrlt#hypQKgH!(F)h$H%{!h}Dw7;%57>-(tFTd{qA z>cf)GrzUO3CtsMWKJ`g=sCYz-bZFFBXHT-T27`O_dAz+~K03YQIoX_a9gaNsUvbIr zdF{u1DJbfST4x*BV#}Z=aLAuL#C4cVF+1tv6I?&s*e72%1qZ|`XNyG_E88vE+lGLW zq%1?pPX18QR_XB616TRrAeJn-zFp1RBZTApspvm#5Y{f zX#Gg{qwTab9~AYi%gTk;EY+c@O=)PDIDf7Aa^+3c!HxU+5Q+`JM}`rd>2Oi~xOe@4 zC{>Tg`bk%ViIOq>ozI{2&#!zSdcNZ5;?aYAMU17gbHDS-zvq81Ux;JPZXZRg1sVWH zuRspg8HG?SSvdp_wG{Q;nwV;~o!58Vn1FM5gVV2m-WvQf2YAJmfn`#!tz}k&roiT{bV0W~1f7 zS^F_+^CORN^2L8lCcyafIpPwu4!l&>k0X8?l;2TA;hrtw2~i4Zs619n zx#ugN6xx3y`k77sy}vI!HviVES3rJT6KdsR3_zWSXj472qaR@aSZ?B=J00A99dnHP zJFU2mr+vy@vorpx$8jHXS$TEG_IK%Tf8tZ0HXGgA1~pa*wKd^!^&v{RbcxCXDh0;4 zES8Lv)`eC^L}!d7vUW-Lo<&eLnCAyrC$PHqX{&#$gu^q@N-|PZ!a6?Lt75i&xj$@b zmux<#--qJHqspV&dE_O}8B~O2mfL7kK?<{EN#e2LmzQx6N z>EC~i>sB4Qp2>DG%nb5&WgxWq;-$(dQ~{5x9VHpoG8Jv}S8;jI&LzBXiAq9k=Gb>& z@RcwqP=|XH5R!PPx5o%j?`-{lisL5Bpk>pcL?vj7^ zAEL)pQT<4C?G1K-Qsh2zAFQc6gZROwqGUbTLQ_MhA>8BG_Sd`_j0>1iov*@iy>0vb z{JeF+nWAPnEs;Old`k4(LybhMDFyrU@u!KH_V4{NlMFJ>u=ijaC2m&hNr#+h}H*wL))D!+ostkalN6 zYCfh+KZ)2Bg4Ta1@HGwK-wQ{F$cCgqHb1rjx^1mR+&slCVDIV4O&l*kenH#! z4!qX91hEs)WMX(MXOv_c*_xP{QplJ)0rFI^UJC2MJ-89hbLT@2P<8k65r-j9I1D_$ zkTZXYq4vMyj4l_u@+p5qEk||MSL!h`$2y4-OG+S~r^8%mMP^zf<|haePQ-p0DLSFV z${z-jdMhir#h$nnBF zyM1pch)w!xVm~ccpVm?ejv}Dmy+!use^j?Zn{DB~uY#8QVN%==EEKMl;fzAWz<&gz zycuhb_4`h5I>h?@R#QCBZ+mRDKKk8X)tD~z()nNd7QKH@-a-GW*K}vs0l$Cts95lm zkJ|D6?$+<&bdKcL+Q$98j(O}M+lr&s&G9_zJgd98KE_g=mO+Vn2Iwi2AGy%|{H<*! ziFL{p`xho>VsBr33Qry&4$>AL;$nylS;uRao}Wk`wkl6)Rtl@8OvnBjR7wytSUZ&D zO@>~)5{!S5R=F-h<9AqZ8;>N(pTT7Aljc58`6i&vfT4~kG?K?%-8=%cODs9f=3FnB z^Wqshko)(~E?$Q0F&y%@_8v@NTB-INy=%TaV^-B>VyMnI-PAJ~n@4HQQ?MzXDG{D* zB^H4gvzB+>?#8=$z*?x8`^3%yp>s_VQ;|L2GOK?C0fF9c3w9{hB<3!R;w?G6&;6a_ z$ZhcsM&iBCDdbq#m?G{8itHiTAigd4;8xvEK~7!NnKPupb++mFD)oT}h-^+SCWB)EVwi92t`#0ha zC~|+`YYy#%&{)klsRZ)B1IS$TxF@j+ur|zWhWcuRc33@5J&=^MYQ|zNi80)V$d>2_ z7o%tUWX(6WBx&)%y6rEuEXN*cRZa{hJ6tOYURNvs%p;Z95dHGpu!yNC61LZinYHK# z5Y4~@RI7*(2)tG&!Z?qBG$_IoVk`9#^9+A$x3khNJvw=$+G#_G?yJ@c!rs3*=trNb zniFT+Y3YHCDDuUOO?fG`+}o`nlA;0FMJqm(n zOy0*&WsaD$4D~Dfn5l@-3xVtIu6&|~Q=+8@)zC8JQDaV1iYBqr>-v$pcIjhG2;%N- z@ttlZc`FqsT4NuJLLA>^x?#jl2r+*JP2W_C{$OCb_hhgqCH-+W%#7)+d3XJ?==c$X zLB<;J28h=OJ16BsGtjdy*A&C#06)mzr#ABinX^?~pf~;FDpPZg9(h(R*7L4^6Ayn56&a&m z5X%5)hW9CAD==5?V~|#3kIlWT4b(Ewuc%SceSTa&E!S9o5M>ef@rbv)jQ3Z=yLI8X zpoccqe~-6xnYJF;-_4ZBQvw-BnNIZlES!!TN|-GOn~OVvICO(O#Ec72Cf7B2vf>Bz z`q_`Op;i8Rb zkk1)y$kdr|zGHFDB^?EjOm5zSEp$vI*F|G5{xewa}ltj1Op<%?ke$UV`LP?P)5 zocHU36anHn@wH@RV|*{wnh{X5r>eo&7PTP*Vs?(!g%?)iJ-paoYw4Mf69T9?m3vU# zVGsVVwe4<>)Y-u=e8-G+%1dL?;V zocTu%+neL_0mu08iZ_Ypfj(*RWM?c>s&D(V4bC|}?wx3xZ_hYK6B+PnL0Pr zYuke2*!rwswb;baxbw7ix&M4hCVXrH^9{4Z|;In|K1t3)Z@@7?={0-<8 zr^WCWa^y{)^JO^2G1(QiDfi$NTtS4$TCM%KAlupC<{8(zH7@q1HisZdhkA$iXk;4? zTJ0;+a4^QD+wp_c4`LeGZaf0pP?F!{d~+}gv4V8)mhFG5>>=}k6K3m|)x3ytOw-|= znCb?xt1GZ|)t?Ku10SF8vp}6&h_?Dy-Xz$VMHa5tDdyFOix$lxu#*=qUdlYZ_5D9+N7^MsHo#!(szCym{jJmd4-n;?1y znpbmv6Dxn?24H1Gv6nx)a|ZQTfWb9f;QVhX;#&>kG%`xJ-5P+u;vc80zg&2G|GPUm z#@G}+xbXcI<6r{NVHNL-B?X@S=kwnwK-UTKwLf$d2sR9(tg#_dSM+~A>yv|%QB%Ke zKAQ9M5IV-^8TRp9zLoJYU#995cOSX^UEFj-(yM=aFwIXIA~ z`94@YJQY3NZzO?4Y-9~qCIZg)HV=(A0Hy#)?^}JTC+dax-^Ndgyq%}j(%tr-^M4*o zsXKp2c9m{*0wy!O%r^n3|9NcH_2t>?alN5X?`;=1^nP!6p7eS~eLub9gUk$KP(I`g zd$&*t4w8{Asllp<*}*F@MiTQJSC%u}L&WIV41pm>^m=_W1CRasKJiSBI!wQHdTv4I zEVq-{V5Wp;pOp_egAx=m*`UN6N(1IXIfZ}hi8yy9*V-lUC(E$r-SJ}2&$qxctwVz40I-DUOm}hkez~6+=PHio zQ`Cn$#?hC)d4CRdu}l{)kCaEp&4lP&bz@jQCu(rm-^aB_Y%wKbkuaTMt#?o)onL?Z zFMXCch`R$TiUc5$8qvpBF<8I1K*i4qo!&oAIW;e|LfSl z|GV+Hy?L_6?Rdd)Pd6Um z+X!k}p@xZ_E|CKsRT(yw7+w09OERKs0qqBS1kA@P#&|Zuegw6KP+Gizm3aWB^Skfw zufbque1?5q)Gilx%<-;;mY*@jXTBi*$Hw%C!|MA;`czvl81Mf-#&J2~TiAacaEwb) zKP&`oM>EV{Ou^}J!`waw#Cut|;|Pu3(O)&vOe@fpVkYzba!jGDcT~FvoNK(VAeBVv zX75?!F>PJ4`7U!SySpD^v8@x-_^=VN#wh)Ba~RxDSox}MMh6`X9+if96A@dF;y!<2(suYb6*6`DuGMh=i z9WyAA+@Im|tOtiN)ixOS_&ScB*y+XluCE1k^LolJus^Xq=bvfUGsJ%!BUbp_eO7bc} zDgmrBDq`~HZIQ=(v3d?6cNsVyC6hh1yoD6uNm>w_;`+a1sUQThJEbk%9`P6QXYdA- zoH9cspmf6T{1kiH!%KgiRlUGJJmbccba`BlQ;=o0xa{vE&gi7Uf5?Tu=SQw}@hHtVeOJi)j7mfYsj~WXM6{geOAAI;5>GH-|CqUr(5pbvMKFnfg6}IO5esALl*clPV zo#8%W`kmPJJLI2rs{Mq({saQ^_y+nb3j@8N@^ZU}pRj1TUiF`z5T zP5LlLk7}#w`{4ctw0HBSMW=@nAa8P;qmIZe>a+HF_}{p@bD!<(F?aDbIre1_#WmNw z?`Iq?-1C1co_hCl`%*0lJou`uJVv>jjnpC%?{U7JyKbA~9r2hrG39OXBDjp9jc0Ca zUo}zhe(@C(-Pk17nJE2=!7>V4m2%@vL@WcAE|L36*!b&hIt+$c)$D=x>pt_D zaX4w8IUTA|u~(r_tS94Uw|}iPr0^Zw=$Oq~|3UahDtH zyBI5$$?+@3RV$8pSLj51%8cs{YBOaVg0az%ms~?Fwg?~%*2?8+6GNnUcIvZP!)ViFdvM_ zy&sz?XuZr@)zI6?D%hKiM{)+SV#JCJK27A=Nd%S$oXR&w>bLoy#mjutyeK{4pdIa_ zMnwr5-|r!0TbezhGGu=3jBQ*MhJky06H^wtY+ z#=U;>_uyf|P(%F4LkHBulJT#7Cve}1QXcqbEa0magAa$1WGrCt=L_ne8w%katZVuX znjwBL9EMbP)q*g1LIwKJ=rl%zUaXo-?M)` z-R1K@93TP5b=8b5${)A-`rsxfKQNgMD(&*uFW((K)J#8=PjOjnS}AVcUS1e*kLlXS z&&EeWxZuz(7cx#ByIP}nJ++QU;NfPDTun{V?sKE(HG@8Gy4(vFd`e%-Ib7-9;Tr1W z|Mb{9#T(J+i$ppW9ugHIBSu#7UpW50;l0C+8?hY!A`(;8(|05~CepdK7 z0YBH>&AcsLF&t6aJxk*wW}KNuM?0B!XRs+Ki%XHW*nj6f?%O%$U7tDTc^C_i&UylK zJ%uVmt$=&XbxwKVboQ`pGz57k+=t3oWh_^y?aT^V#RraM4p>CdK^>bBp?B zE)4!u@YST$tS8zQJ~cJ(eiwi0`T=a=NfD>hGlsN3Yvf3sdf6$SCzl!@vBlzAyTSjZ zYnJz(S@x)opy56+G4SaNNS&4o4YK|0;s_aceT&WL=uP`7ON?1|9Vd{ z7hWd6U8oY);k@}vSwc!RCmd|NwmA=4l|gG()W`=Y^M>m2OlV2xGS`1hPi8C@17h^2 zAd0!94j|?mR!$ZSswr!+MvAzl0wU{9;K?Fi^|{X{&srk=U4O0raN(hoA1|KHSv4x= zBAcemPydj8`IS!t20sB0@3KcenFQ6egL9>`FD*w zdFFTcyW4E8@1mbwT%>VP?J9|4QCc9w_NZ_2;?3SyZD0Qeg?K+5*m;EjGqT(jTEof5WPW|J|Kp3Q0L^s zxR&TKp3hjnpbqlanFOQ{LM>Uht-gO?Ct7{xx1W0AM~&L? zQuw<$OmedSYNpm~2^h`IdpaxY`*35IF3xRRC$S^9Vdrn3dGojNMy7n=XZl{~axo+I z{fuGFqT%88Ctu{ditlrkWJdVt8~v(ks^_^Tc?4VKM&up6@+gIU%-V7vJG(C^3F_*I zg~$*q7UUXvLhFCQgPvOSt?(_3&<_cSy5dY-#b4JRzn#C@FYJgo`dJ=*D|*hSG{1c8 zPP)dg8QJBt`EurO^prUBB-j_Fw|!R`n>=~LW%T#=p;355%I|NYOsMI` zS%|H6rrTmIp>$CrCQf5Aizh$azHem=p}1YD^evmy)LDOrtqSrz*R*{erR;6J%tWx- z!0RG0@AMT&vWV%-RZ~iN|Gk|Kz?^ty!4rID9Z#_H@k-8mvS*wnGtvW{mBIuoHW*Df zNDsEg?R8)^iR&F1{9r!~y)|`2Y!QLU1E?DYx{<_WB=hfrLDP^P>lnXD8rf{dpyCTpz05|ccDr-mxTryH zEEj(QGGfMx#zZD`GP@a6$@F$!KX79NiKRRETHoBFU$^B%TK@j`=LnAT9i@8MpDX#5 zKV+XHcXc&5)+eqwjEhb$R}&gfUSF~@DJg#=Le^n(c_hlJ;rY% zs3sxpw1}&<|2TBhI^VbkJ!GnYqJOCQo3^k`g1nzES0aJS*yKvqQhhy zXlHo(jQxEznAGrW=%EWn;IYGT_h*>bpGjBo036sYjBG z``4*_lC~mGd!7j9&oEx%T>Y@Ja{RI~-kyiVdB<85BjtGI?k7_>uF^QsitFRK@W+8$ z<9k4}Q~Y?%$2;A_eLw9V{JY{cYFnCU9R&NzC$!E)*_1P<;B?cxQ>{NbFQtF2ndMHE z^W-5)lWN80`~$Hp4DR0I(!^nP7Y1b=Wl5bO918mpKs9xYr-3#Q>C-rO<9Uq<@3p1- zG43^jPPM|V>UEn~PWpJi(;RxmvDRDkE2uZ#eknASYtQIWYH^(JvKZ=35Ic*A!^UgJ zJgUZ=&W3+D__x5C{^jIZn{vxw-OHqiDnwFVXMyAOIuVzMc5iu~x z@4$F&fjrd|d0A|XI%NOPte^Ch>lQ*L`5d3uGo6=4>6?%D1OCw{{h6;H7`vZrr`{2ai>s1O437K_fK>e*(H-&})CU(lq%$SbOU~ zYo-gyppl>#2WGRbuYtfq(&zKHzzUrIEubpo@A<#3O`hV#Q@^v1owjFQ!3A%^a`#Hl z`r@&2I>Y}O=S!detfNF*7~iP2d6G%CjyGx=CdT6j`KJ0k%4u4C3WDoee=6fYgtT(; z_0Z&K2G5yzhW~?~CF6f~f(z4Urui1SUQ*4M-a$Wm#x6n*5e1$@=JelB^MK#fs2(^@ z%s33oEO2#2+<4-6@mPsX3rrg<3`edp#EqR!!vH#E_%MMN1D=IJ7r&IhJbSydoMFxJ z5|iQ>roS-}@+uZDj{LAOiw;Z52>G*t|BJnMYgS%o*2VrxDp!9yMN*hEtbLP~EZZ_R zARA-MO$rRi#+(fRHbBY02$RZxPqhtMTZt4g0 zYv*31PKe8G5Ip+@a%(()o#~|KOt) zqFhF44h3f97~2%B8tUz1Sh#z;v=2f%NciL0d(KQynM0J}*Kf5f=BNM?I+Lqfd(zwB zcp#3gGiiaYY#rBs+q-JhjpQQ+5wY9*@hC7kL3~&_F>4%DzhlCJ$P-jiKX>huhz!1t`Y>9wF@!% z-NvPeMro9hQg(kjaKJOL4Eg%ex9a}sSKBy@C$$0Ur^j!9|6@R)iE3TTO4?O-oR+b3 z-+M6wL;aI7c3lT)P;snvQ73@d#)@a94XoU^o+de`goys^Qi_I8u!w{Q)h_>G%7*mM!`h8pL2xk6+GL?NnuZ{ZJ77AxCm3;P% zne_2J7S_*yI$rv+VmyP_sQl-au?^h!?|1s&-qj$Nj3)+aRUOYe(PghCJ}yr=m#gplr-%iPXB3LAdEYBu}(^S zyJ@|FcHK!1wg4bsJKi$((c$e+@wtnXI=b@A`~2Fx&|8861Ulc;_>rJ5Bwg#%Et%O$ zQYw^B>A-n3G}9;+Z4R+#lGK>!h`Wq9KAezea&%tWLE|l$1PKfAz-C24q}S zP!=FLw;S`PuZf`{*_f)6gZ5vB?q5xkOvZ*fn!s)Sb-Q!f4?zXfi9a}+7&A#!^lYM@ zGf|~|&^wL->O6{fJQ3@AXOsUDqZ=_;tG(fW9dCURoaQcJ&;HR((v_~7+Y4cYG>ki9 zInqqExTgg+zif3;TO7rDk9cJNqVqs`Td1YSj^G+l z3*h$g=dEN7KlUMSKls*fp7{EMe(eb3L!f@zL%i{F0cm^?{n4?D80N9$`r1MKL7L@% zE&f#RK5*3E5_%l0O%~1P5ylfh+nx+Do(Y9^9PQeIC9aqpzWqRX6h*p!_Ay76cvDpz zksWWk|Eh2LM!#e|S{d zt6puEBXgH6%byn7Z{7Wl>f}Wc7UIC zqLQZ}u44FOb00KDlD+x7yf1y^%ov{n=jZP@<&FQ6yr|xpF+x+2<=)R=$pss7N`y8> zETWmLPpp?|>XH+?5Lb7q6?WQxwq9;F(&PG=W}}gx26qxaxL)sBROB{o7(U84i8mvG z$U-_ZJOa5x48y~7)NC?nLVluK_@0NGAKorwkW}a2BdFi!%@NbC|B~sRVw`#Zcm9vohv?Ni=67`pB!X zE&A`Xr2xd3jXBy24=l=@m)@6`z?VhI-OqCRK~zQtHC zMQIyRhB$85yBgI{?nENIZ=Ud;KbF7bz}|Q6zxRw1%gbO@oOZ+|*?+L}n2)Ad?1Ljm zMn3bZJNt6JXb(q=5U6f-Hm+`aK1&jcc-)GeiJ#?D)8&w5PG-7)W5mE`lOC_%`&C-9 zUj2c1O`Tl^);>De$Am=Vr5&vGLhO@9W{u@qO8{|b`6ih$PR+|i7f!7|>2dIYFHe2+ zL*;bRQy`m@D*i_8hH@Ye{Bfdj*T{s=2k^Er1N$c}@Xy88UGyblRo2RoP zYCfA8^Fkvv`1num_S5R!s_6AL!8G&*b$qWHA;w24ha zD->~;w7lt91fClkNd(+ZO-Ps1*(31tMr01W;cF%%e6|@dXcHnLVpTYRyZRO7!&!`` zcgM<(-~H`d@`G3ZK8M==FB_g`85am?qQ7y9-+K1H<7F%Nn<_S%L)-OSHa|KOP?yH0 zd*kz?H38p$GyY9KgF|z$@RQCu&fAo3TiRuQdjzva@seycRDbH1LM7Kf?$@s~$#1ND zyNLAaEQ7w2KXwHB=7#RVnFRT)&38})LBb=E6A*n8Jz-;!^+{;3uq9i^9V@8&U#b(S# zlO+|IzuVjOyV^L2k55kt#{bsedbGD6coQ&mupXDOTiCSn{p23*p~kjcU*!qoMUe;Y zGntFH-vZ8&0MV-zfc9bkQXeVlS?MGWU%DDq&03>?v zS9qskUs)U9iJdX!n)@Sf`xTEerd!b;*K?rkd8Iz~@7Xm$WMWm1qvYkU|yLb9HhdKWSVrwvcVI%uy3di*Ecr}Q_5lhe+B zOcF^V@G#8-Gp+o@LBRyJ4C7JVKlg%>?v_||h+A37yW4tc`wY=98-fV$mERcS?rY*( zh-s_DMqv!bX`d0q@HX}_m2qMR3)A+QMWwMC>c$drZ$Pjm-Ah;3abMUA&w;8YcfVTGeFFZ4_;6DV+)B` zscedC#TVlm<2IMSNr^4Vh5L#!&={>>B$+9Grl2U1!>sT$@)$9WezSn`iZV_tqjY3$ ztY_UgOKxJEt@_S==B5v_wV_E$ltQ$=r_KX^(Wb}x%?BR z*8={6pAesn)(8v8Sp2kiRQv<{v6TINd;bN443_b)_WQN}Iwl$DzynyIaqTNcyQ~$^ z?8naeS3CYfPEY>S{+$0~{4}_Ks(+!~q}HJSLi2hr_$wT>>!WYNGC=!3@%B%v&Pe5* z%dO7q-8R$BXI|cOOd_lG-r4Br>zPOs9?xno!T4iXpD67KJ#>G3pZ9;oxhk=1iC(Vp zXBmO(>M**m#7Xa#Yy4QD-wss~E$?rk2oU}8L@cwYG`$oMVtsm9AFlW1eu3wi^b<$PeYxJ~k)tfiSMc2coizBOUa+L$F`G_x!?BYq`I0<0_d4lSD0C3fgd}rN((m zdTVnJRBD`x!mK~0wSGH)ayLRVsc6tq(=|x|g9x$vq~}sjL#WJLx@-9$uHHl_he^i9-ukfHjX+F?lSLADaBW;j>FN!#4EG?=2LUk% zC!QBt;xx2oQ7{oXecO?`5C*gha6vx8GdAAg%kLNeqNhhWogN&2zPinqJOAkSdx{Y| z?TjeatC$`p9~sCj5D*8Tl87lzwudYR`FyNdLZ>vMh;Ipk)neHd_DkDD4SN^#A&r`dvGMjxXuNS0 zUR8WgtY`js%V5NRTu9w8pAf~b0j~#zCzWpv`5=+1Ge3!fxc5B$N0y8^q^dfflTX{Ywe;m0*C3~ z2&Mi6_e;{m{gOz$XW@C}_g?t5*?9ZgAM7H){WE2{+qB8aAm!f2xfUaCjTk-$Kl>p> z{P(^`x}w_K)+jGfQm2;JMywZF>&J_oX4j8Eo@IOy=+}!(pMa&|{5I4tyZcg4Tlx`E z6p<321!((!Ie|WHt8V))U9?MJ*K3S1x}AD>BEICrmsUzeX6a&8yF9v=QimlC3bSBD zp-t)wMHe^@FvKG~uz;X&HY{cizASn0# zAvm=Q7;})P2^<$1s30U_aztFHdL~Kc#`Gx}RQkJr{u%Xmg@W}P2cr2Sv;6a2=!-oo znfA+0)%t}u_R+B;mR&A(jbLIG2PSxAU)IWb$*W~-7Ouu8;Ke(HdD8669{T2y!M(k-2%-}nY@F50c- zrSk|W?TDIRdy@Bl^6|&{!y5nTBmJH!e&bKh|L8*@e(Y7!50;L*BZadW&w>R=cRDTl zOxNY!S8TXS5^hu6b1^o0?ie)6e3IO$WKYNI`-|eVkNuNpCTK^|pELg~hS{P%O7jzc z6=FhFrqh7kNs`%kFL~+hC#c{whKN{tIwpxr<9WXHQqd`6lsr%OvG1AY@%DtoHKzwP zXZ~q=W=}(y?W0L~hOq>#L>{Np#X9V1=b@fORJET|#J7Lre9omag_lWvD zkc`CLfT7-tyy{!&+~F3%4DFD`kNwb3%%oDbF@MMSIQ-FG>)U6PXZ?Quk&TsqyfE_L z@dnGe^Tif#jC_|&lxI1w=?hzqAeF4RkSCwsDw zZSdG>A#+8w(}Q@?yo@23^08rm5F;!=x5@$+KmX5d?@sN0i>tifQS=z6XDf+39 zxWxHoC`X>gzvAJRaxR=xI@(Ax^y@A0sG^yHwj`NyC}gLxJ~B||W#pRB1!8J3mQSOF zi+0jMem13ZCy(BcBsc2HC^F??xl_3b^Ap)9Zq@hR)<_s1p2j=Bk@sL{B<&-{wm+_iKCQ4lYydCE#pP03uwh1N_ zc*YVY-S+mwG&AddFe3fibQQ$p+>MS*wJSOmj7)_y1w$)0HG9hEhIq%EfXeq4bu$7)n;|O^9y=PZ#y% zdKnW{xYNl~OD7F)OJ;(C3QUh7)&^oiu$Q1PNo5(wlt99?3DzNhiZ7-IF7;+|)CHp^8>B?d0*pyYE}H{+#$+n zz7pVh^#{X9a>QjuIu<3JU=xM=oH3c1(A%^?xhR7oOxmm3y>}x|J|xQ8P-$%l+M0RX z>WRwUoJbayegdd}M=wfSsW)~g;;&ROaz?f1GE~B+BGPX?v}rhb#CUXjcgF@r3>L`i4XQdr4dsVo3S;8K|Z1EK1G=oa2|S0|8)O5EVL6XF$Zs!>xqs3abN#2 z-Y3qFUA+sD2U08X)@=VP^ZwlHzW({8-h4rf(x`AM17i(9+3{Wxq zuj1E#pjhmKck;pKA!+-A&(r!77E8f*#INiWxc|%U=?9PKbI1DO-B*d>B8bSp?WK># z3)g{qKB=h$#$?R?0lrJV)z8_42zj)mOVMx&{i}7QS#-BBs1y#a^+zoj6BP*JU08_c zY!I$DjfQXgb;PH!sN^Hmi_WTXZGC0#1v~P8)HS&H+ME5C`uWeXjF?Y6?a%bMK5BRP z;@32Z8~hWx^!`2njBmfOir`DbwEItJhh?hyaqzt)Z~-yYgu9dt<&-)H^)yEEyB?bi z*JE9L^pNGb=D)nAV0?+R+Hc0f73lX$g4sX9NUL3QLK8j&VVl?47(uI~j#Y~j)fge3867GBA9!Nn-Z!8AO13kw^DdzqHX5hMFX}Qj`Pv`OE}P zNdo)%a~~O~bRl9kR2c=|E%ObKrcTIzOzZ=pLqzuwxwt--Ho>?yN_Wg8#v>EHj&TN5 zL=443B6AZ|+e3IS3K*9 z&s5Uu=Pr)A%j!P=#fl<&DZ9e87i~Ws3Z>1Yng;Gau>af+PSBU&o=VW^XT_}WkY zjXfwO&|i7Giz#E=?@M_^Q2+MlQsP^|H?H)Kz^^^~-`j@5k59lbrJx)_x3$jkCUwO* zeG2Yq_xfB!OMa9ICFo;%qLi*@{ouxEvKrjJeagv7u%J4}=IScmVQUvd%JOR*UC zsKZ>gX&{{^JT=`Hqb>{-9vl_X`z%!6fVi3gY8H}MGj*}JlN+-vxmi$uaQR3iO*(YB z1{t9tAA&k>=`C}{^6t@k+nvKJ)&m$x*^>Gp8t2eo=}m3&@L<7)ZBHa-ns~@k8x}g*b8{n?Gw%|bkNzt3 z?j4Ip@jNM-)`wGZrYq$|_D$A>`?FKi8tD}OVz`|fd&%Jb0dpAJO^!EEqcOvxQ}6fTD!O=Q!m|^yE|4i zU+&UvMz7Ou@){Yj*46`U6lIOJ9`#12tl8A#{YdM#Hq%Wq>IZGMOHN}q?ikbkeZ1w} z&F)f+3)b_ekI}du_a}N`PHfpPUa!kU#DmqVxt^?|L3i32O|Gieo%U*zS5}Yp+HO)a zz|u%VxHi-@!x5{08}+i`UfgY+1*1T;J9;;m2A*daK^8{ddSJTQGWFEn8fWXX*M3@8 zO^$uz%vF&2{@k;(?C|vCf!$-Lt)C5s%i!|x)y(muOTTZ(g@rmjEEpsx$yG zBpJh20GA^Ixm>yw1&8u|zep#PRC7D+ByF)Ns(!}Rw14~jvE?PCFs=N8U6jIq4!R`fWy6@H4PkdGwND^u?9J-z{c z&lcZ*bd2;c<)jHL!uQuOb?OCv&q8{T0>AHJ+QsoE<@nD_{^`>m>cv0&Vx4G={nr>c zvaBC*z9hD9i1J*L;rKMMJtO7#piBH-3)|yV^w&G;5B!ZLPyjAb&Fjn{r->5`PK5--0E71ETCcFc;IOie8gbH9x?^`p1=i4I5F? zpNYu{h*qU*vMT>h6ElL5@(!TKf6UhL$bZ|aetQR`TnAG8zJ>e?K$(II@=y7F z8Y4g5k$yA85G>564C@=I;R@%=0p~-1 znKlz_e>ol=_6Og8r_1reG*tZcGrz0(UiPP?-?x1MmN&$AJ(yMHtM7f%3@|;w?@uaz z{A2x}bU8lu?vL#m?W+F3@VkD2tC#i(Wx5=H10*$&AFk5u13}y-ddIXOtM-=kSs=a5 z7}Fa8J*1~8M;*`=*1xR!_ZRdaiX?b{pN;|$qFP@QLGG9yDKmzlMvllISq3AuIDjE6 zp{n#47KjsN2?=d-Kzif+P!;_Q7r{)>8bnVJSJF6HqTW8&du|i*AD^4Oyf{y+a|MDhoDdWC?@$2@hIxX9$Fxw+5^kElhj9d*TtCXOgs!Wf<@n4Yk~D>YlT2DZB%DUl zP4zh!ee?sEa!)1}WevW_l5=W*UieB&lyA9w9i%|H&Sj8N${n7#shRQ(Pa)+6-}5w2 zC)5U>xvA&doM&1ZP&?s=AYB%AkDuH$DTZTy&eF3#Jn=kA3+g=hgY9VH?s$*w^CIln zD{Zfe`NKZAhgGpX+qM3&mTvpQUH_7gLDuX2YO^;PoPh3dy}JTc;6!kL2i=5G3@ya< zp2Wlzh7af$FC80hlD1$xMPeP)cmrL~Pink}u9T31&50{b3h4A8?Gy?=iW40%);u>c zDU10uNHS_3_%cf#7>y`aiAFha$y4HUi_1J^DWCB~OF6|jkOAuY`#ghG6t=-LnNEFM z@B>R1)V}cJDV6Ea>!D;)->*Hg;Q-AI2<7vPBcqR3*F4HkJ+g49*YNXSDwze{^kyEQb-R{;k zx7B`ZlR=HOny)Rp_05)=Zu*1XvvuEYI-_p4bvYH@XwOhyIWur|9$@)0Ebcq#w_w? zndI!=2IzQx56qx(j&fbBa+)k6Gm`0k7UjVA(JCu}P9ls#ljI3s0fZ|6QH)rQ{x@tS zMcP#07mAzY;H-c;)J=H7ZaKI)H~kkp#SECO=VYW~G+%LlWz=9Jn)`t+dO$_PYjX!& zj=!~0vT%R}R$5>@C($G{r^enw@6&9g=dwM6fXCbcDvA-!6PGXz+b~vOkZ3VG6ozEw zd(H}&d z_mIw~bHW`M21X+BIj`y)MkJTHzQ8)v=m4w8*e^owt{@GS&PKZ&A;}y7zlZP2B!{PN zYIkMr%l9{a(`+*!JA5AXqyeVA2tzLkS3ri1{et&@00JY~W4jj)ghG@O< zI!hLmDx!@<=zztwp`vs|&4QJ6M6{_$sw$d-d*!D8Bu$%g2RN1`*(@@9jOfA0h|2%R z3J%1GdSDDwCUZ!1&QCJE0vYz0B7Ud{U3j5MgxNIwa3*)5Z!EY&AZVlt{Cve%1VaBi zcDEdV;yj1FKJ4)_W@W_qEeDzBknKe3${b)P4yJ;GeaOMsjdd5%^rbpmf^U6iqM=ba&Y&&X|PWlbpP+`%<-P>0F?E7 zX>?Ff1iL8k81~&+!ML3NbVPHdxd0;r+hG=N68fjaS;53{7=^7(cEuKy=OPrwb&hzpF^I5aXH)!z$V(YV6`ecc%=U^n_ z3f{1BP{76t(izP~pW4Cj$$;<06^K#tgS!t9SVj;%a)8;vaG0ia$eS1`;FRzlkjVc@ zBmd%<=2ack4y)(k3MN9^VE?Vi0s3cu-obBmaNaR6xh%)jArKk+k`E)Nv@zb{*oq#& zi&#>56p<&f{?n>xdNGcdBSntB49J3@SE>>Nfy#? zco8q)o`dl$*BHO49AE9Y10p&gO~=YU5NBq{9j9FdbH3ufv2a!(9iU1t?oR;Qm>H>J z>4%}e#GgSKge?-%6vO5p2RQnkB-C4Dj(?^m-$1#cv4#CUmS6 z<+Pu3lPVL@5OHq`iC*o!Kq3e3!<5}}9rB#^uQ~1G`t>Z=uLAy_&*&(B8B@31%Q=fB zcVH}BMHs^w$4BSCc#w@rHlsPPFxS~77XdMXS>zhoitn+k zn}Y+BVeg-o7dh9%s^5Ca>+kZ0@t^er*Sx>$XO$1(=Q8H$?cLa68hF8l`%N`Um7Gvw z{eAzb0*3Tzs!^ft#!KCQa$V+JSJEr~exJyJ=r~rQ>9(B}bjmf9M0-x#((3`>+=X;z zr>C>UwHVjy2+vZuk8*8TjDZou6F9D2+)s=v%*#EL;+{CG-dQmh*icc;y)L5DofjY{ z{Dj}@{rR0hI$6B)iV>bIOs)-|IuM$}O&=B%5rv|1U!A2dLc!pFm4pfm0*Pg%PF^YTh=N!XClVrRHhG-fY(wDGNVwP!A?rAyLe?fb;<9ODf z(at-twpol7%|<590c=av4g>ku#Pi}`jRCGF;ODy>7Pz*PVGj&kukl>*SMA30UR0fP z=qlhx>$w`*E;ThBk_V;27K!TC|pVS2!GxTi;^ z*yS+Oqp&;e45Yaki<>t$5D%f`AoWHA+vnoSptn zKAvNM7-XzQ1*TP=>)FLwkx{vBX2Gspr%$I{G)=$lo2G@Lcz$3S^5evD5TZZA-=h2- zl)pX4;pF|?FMn6hr&akFe+O0h^7pJNU;YlO^5yTj$B! z0L;aIa!bcS-=TN80?PrZ%Beg@IcHFoTbJd)D9g>$qO_-}%JuVH0(f|aGXZ~->Nm@C z$==U7{(e7q%kn;sg(=GN)$gDzU;U2C^40Gc+X2AJd3-ASB?!ZIEPZSTfLE3~VZX?` z+_mbzQI=a({U=p9t?GYJmb+K|f0x5^nXayXzt#=A-Y1YwZkfTdob(~z;`w!Uu)N#F)ZJy%HNep zvFr)!|0#>@pMJ~s%eH*WYFKaW_ng_{IbLsHbB1S0QZMUH?@Hh}aj44o2*t7z&-BxO zPgzRKy1!*zY{$23QntDOi;mPN>n`4O;kZ6Q{X>_esLEF~p}eg7TQP_XyK881_ho3_$_%OD@2{)m)Tn0uHpebkNTj znE$lFXo!ity$WBw@LlWkeF@{X=n{iB9Q6f{8{Y02EzJ!<5R{#xJ(cOAAGkz)zl(2w>KR6NE|BuB0EHv%0ojA)!cveUm~G&wA2rML9@CK` zdsHECAa<2TE&1uc@?8Y2{r%lGe%HtHCW95~bs59&BnalP5ab3{hWQElb1ejk!05wN zV15QGNqjH^{9Xf01rdIqMHp)1j^Fnrwu@u_9>3#CRDF~hrSAK?|4F@na07$m^CMUR ztk6Z-;{WCHGl}%%5G!&<)q2@Kos}vn^uyyA%)=6}kXSx|HIN5_Pc22&{*r!Hgmk*& zAUhIX=pX+-HqPbvF{E47uDEKK;UeA7_BsUzl zH3m&HPe}hZmRl)oR~e6g`TReFKmMX!SEOTvbTt25-udzc)R3MI>U*si>8{~>94fww zRW*KV97hw|OU!BN1#!!EDx#`! z{nK~dKh<0DM_Be>l2^&!Sf0Y04_Vb8L%{Oy+D8HV(M0+$1?DqcZ}WG*T3Ej1o3dQh zE=1{kzo&tI0xBy$StD4negX&m5Ur9=OFoLr`QzevR_z41BOPN&jzIg^?@}+ke9yDE zocDcj6qa|f+!^_Qat-8#d|S;2^_xE)!1}n3xNki)s-o*sLGd@+4zc9hK1(7O3$$l6 z(IkfAzJc0Ug{+RI7BIoZp9!>QbWy)OLpyT=<1CX5hjN8ow*+i}^sxmApe|KGhB*H= z;!;tG^XG#2`VQh^(l*jb;W)XtUg!e-br541yWW)MoU!bZ;p+2Fbu;rb^yB!Zg2+i(V| zgY7~J`UWY`2kHUOkP>YeGH58&OC#>29>JIq0tg=yK~rJSCJmZG-y$|H1;nd45vrIA zLL<6l6Wp|am|#A5E-`@3z$@*KBXBt63iban5~Ts0gOcvI64XKopTgnH-{wq|$V767GaKhNzF6 z>cN74lT4WbY0d7-DJU@oYaMpZuFhX0wQj5qnQ}9?^Ug0YlzNqK>q5?~C~Vv!@hmW1{tb=jpPD`{AO9!kf09^Y{Dg z>8e|65$^Z#aiqlu;}xsKUmWdZ+435(oFyxBrk1cz;-mdoH=p~(QCQL3E>^8|BZhPJ zyk(yk%|xT2QN)KqYheyrEBSfHeymMtf8Q~^`=?Fg>8Bs8^QL{_PP6t;{+muagN)>V zJ3nBZPreA)C!dPX`+?2A`7ik7>+Xjp!ARH5Pdn{=WzG)kPd}Poy0T_U4}bFG!~VFm zg!^uP79F11soC*=-SaG-{218c{RRmZqV-`%<{m4^Qyt=+l=SYwD(fbr;L| z=(-AxpLXn6`{&)vjV=2Tn}cF)?jNgzk(1E4FPhK&x6JNae=!p0eAQf^qo$EZVJD;E z^RZ~|cTxC!#PWGw9X;&ZK5BXARkLm2d#h&Kikth}GJGcSXRLNYed-_St{pXh2lqwG zZeSn3&zbmqx8cHVAJ1p!^})y&VQ2T{pYRfsvH$U~Zf9n14x=q{<{WA9Y z;Lxu{GZ!@)A9CyY?txo|9<~Er2FDOhRf+@{(rg4Tk_w_$N8<;Z~h~_ zE%#|$JyDxfCb$3SY1)6!uknA}ulxVDUf!1ffthu6+HRZweYMOt-S+>!)&?d*8UORcK$C8-I4p;VvxK0USn_|rXGz=+S9c?<111?>OQw@@~lrUolMuIvCVs~%$g=! zx}!yAo6)e4F-?%Uf=e(6MlVh1yTr71nKA1gv_l89s54r4<_T&-CHkqD?{sa&Fpu3!g{^i=!u4~K6dqL$Tj^-(=LuTb7jZZUVDGqPwnH`IUeh`+*G3OrlQ&4Ljl8-@?Ne; z&}!UQr`BM9benmXW2%WsaGG6KW@~kBiP^*HF52X>_iE2=*gqTl70^@tr6yq$bn@^s z&(dVaer$vJ?X+Lack7kg?=4r;&RW6b#Jcy6-o5SXerMei+lFD^8kX-sq^-{8?RefT zYQ@tp`c2PEhw9$ik05Qk(~c|Dv48A$3(G%d+}O^4TdVxiB(<4Z+GDMs)Su_oVOR48 z)}__zjLc+3>pZP56aCm|X0G0p8+V{f8Vsjbx!?CDv&Fqh((A=Aw0Vv7k9oa5xNG<6 zE4k#o-S9cRnA~d0g3k>u*S2f1+o0|)czyDCY3aJZy};?Re`euj4zaUbtzY55X`f9k zYTLShGfrz;a=o6r*`_PA<8;wI40fyfaJ=1VZ7pAhyVRf2d@UZQB)SYeo3^i(dyzxG zXsQ#-%;vGS&ZEXk?>4&Uu-}=Z+FH=`*_-i=?LD`&v(gu1CtO^5+O^(UTZL$QN+z@Y zI(S}BLbukBDc`}N?%kO)-QRNK7|Km={E%&b4QlQ2V9?n-?X4MQlip@v-v>^*X)GJ3 zapS?bWef_zwVXd(F-@a%etLAYh31C2;Wu6vsyWM$*`)2z#jrJGwR_mu+}pGIaqwvL z>z&Pgk!tuHMVq&E^AVeIN8xmQALtJ=$c;nH>yXgZjMQ+L^?4mMF_^G&fZpZh($PD@=2Wx`$SY=L>bHi>D5 zjya|y8uRnLWgm;VP0XEBCx^y!J0-AxfA!QfGee^pXcKb~WX}3^agW#Q69#!NPiFaa z(XJ^oWi@-@njPaHQt9ok;XcUXXS5Jfh`k;by3xz^gvdtw8QWoPw!ZqoBj44}%$w?;B*`FuIOk}O6JMIbRYCE!Z37yG(7LK z(KxxckE^tMx~_!mZ9L`qazXdYdfYrmWIh{T#=&iV=PGNo9Aj_xW_G$K7yV4vFtUS{ z>bfUZD<*m%Y0>STjs5YQbfQ}0a@}3h?QSDHBZ)-2Z}yz!$lB_XEqgsj=T_q}kJIIA zFnO`Xb877uS}0FqgAIn*`ha2@5H_P!o8_%p9#&2 zTH4n;*307ABLudAF|29~|#z0@G2k><=_4`XZX%=y0Qly}PA9Jx`sC z>?q>%>|}YvmF^AOJ8#HdsbL@4IUKX=e9V*6nm@DdrgduC&>p3K>(%Vx(-YOZ!{<%1 zR&POe@nsvf@>i!;pWN)7OnSG8Jg#b!+_w(3oj7Ij^F$JFN|VE$seo#yFinC%+@{OB zdl`87>2lxk@owD`Vz)@A_v^T$$V>JP`sfsy?oe$Sf$krTX}g)uy%VExk7_4*KlmHB z!F6?dt+sPLY|X=eczZZ6Zu{7J=(Y@xodbbmaGdrCnCIoO4?(yUv*olk-7JRl+qtOu zohQ6Pb9GFae5iB$@a28)_KZ`p@M7I-J_=*#5beV&`WqzApXs#;~RFZ0?TZG0cN|jTPB{olIu=t-)Twz>nyr^%~cD zbQ!zHS-4ZXqId0dp6L_gDlWS2De24nx!Mh1*60#(9^EzzoP0Q(-30!Bh=hsw>lIXy>*_H%ZpqHFor%gLQM)6K*Uoh&=*_TUuH za(~-%jP$H^`%%A>blhn+d)VT{lEc2M4KKrfZa=PU@hVnbsZv;(hf6_nZ=APIEN&lm zPw^ywS+JtQ3D$y&(IUHqm+{>UEX`t|nGJ2V8j6(4%$tH9JYdN8$}DE17G248K5Cgv zEo$=6b9>%pnCq$8)enp1zNI(xixcaq-qlaf{^sOKY3Q!~|59|;*cJjX6kHGmZYA(= zcj<60vlbOeTI6XP+kK_IPo&<}3*DfGSmY2QcnEZX!uA;c}1I+THX$N3o ztU1KJS?ySeL!Rq?MAPSa`Zj!!83SB(XG(9-cDYM8qJC@)9MlZ8#rtslGKr$l{whQkaem;!;ZO5L*cU0j6dP+u*RKm7@M##yW866k`Yzd z3mH(5CmwelG@7zZBj4aWYIv11bfy47K)%1J%MEwy(3_M0De*!#F*RSzBX7H?kU!x$ zirylTbxcPK0@hR--zHa7RC8U$Xf9lLLT$z1bm5WpAKKxTnIDIq}&wWvaYF&`d zfSQ2VK)eYCQ*jTZv)tZQHvmgrj;+i+8tlfFvRf@sY`HS?O%+zeRM#VXm5$yNqMxO- z%=V!C(+xE;(Oml8s&|XRJuG8wZyvN(mGRRZZSiNlDSdP5en-`FvzK3t}D^t zQ`!=0V2raE8m9TkwR8~{(xL_Je}M1k-l;W&>P#@%RCjKxXod*7Oy{La`)r;da9_9KjY%1KIcW{ZpCcMvMi{r$r}iykMx|%%fe|yB1e#HY01b=%CT$ zd|MYcX2G>HF&>a@$kh+!S#lto*ibApY%+ftS>Rw>o>dw(=l14YCm!g>aORVaQ|>Yz9gB)0Dm&#PO_Cfxh$dHNJZ+Z1B0YZQeM0V?`u(IM$*{>1P6X7M zmiG1F*V{_E=%W3CvqQPco?Vm5QeOsEZ5_1y5t(M$JNTGTW;rWSe+Q8W6W$BJF7!R+ zpyjO`%9Q)Q?9gnVZ4>*8`XHn2mcvZv5_XeW$lmJbzSFLnVV7H-5FFRh7Fpv~Wh1=5 zCEKOjeVkexpNbkvl4EN1g7NucT!CXrosPd7HJgKT<7qY)p|C+}!!{xB(H?}y^*zx9 zDCG-qDpJy&NXg@5f2v$dyzqfSw?h}xlg2wpTe+}DeG3&YRDs73JoO#{Wa&^}kizo4 z3z#}RgX#7)kmezjif&UKIl&{}a$d(wJp>n*HoE7^O9PpP4PxXD4~=SkPkSQHx;M_P zEJ~Yx70QYrkO8a=;=0JB#1!pGmuak^uH1WM+Q%4%9XPNo@^&|lubfX z;$Xc21W{qy);cB(Pxy(5iT5y7Yji0|pHEBXBT3LSM7~mvr4(n-#s@IkemL3tATWNp z<0k{Ru82kBf3nhSWrT2`Orl6zGn|3Kp-8IvhidK&Jpv z!L{J`6Ma>egk<4H5Stnc`?5QlH2Yf`*7x}2ZG*)TnU<^ghm8CN^2X|CM9_>c`LZJX zA~Np{d-r#G8y@UA#xh`_E1j;wnJ_ zwxEms-m#A)Y{9GN5n@Yl~%O4QwghE`E;WVdbe}Fcj)C(hSaKb zlr2|o5BUw0I{+^pO8ATK=d}*J*J3+)xe>fCf#XwQSNa)O44Cg=!vGFqG*BoYP=?LT zq$9#Wf7pZb2lp>52hqSbQDT(rcR279*I~TrEWytf(|8+4lDGiosRD#q+%V_CMWgXJ zJ0kY*-}`+5lhtLh4KOtLxjN$7a9Y=FQ3HFFO31X<52E;$nge0e-d$!KN)&1fIlyZi zkp97;$pOwtr5Fu_8+djNRF4M!D_j{zU#(QAf98>W1j{=If8=>kUhA6h-@I3X*#?@{ zKv*x3nt$ju{*q>)x3k^$UhO&$-aRMWsQ*pbyJMUT<;NcUIvDSW-0LR->X#*;b_mpP zsN34FKl+xEyUK)ZRejbZWE+M{e&>?OKUsdaAEnc4cXV+u70%bK6f?Aq%ULwHYK~xX ze^uH-r70>_o85*}>p+CSc$$O0fwU;`=v<`eH**Vq+H@3ae$N!s@9EM%^-kcP;fDm8 z$hCjI?3)$GP2o8#xKNs(<~-El?y?ToJ75q)wyi6Detum@RCcU5Kh@8!@INA-n(ERy z==EeDiE+IAcKb00X}EqZ?b@RBwVl1-f9l<*&xFyouedmg8yZ$-!y!fXSjWS_6s8Bk zz~E9Gi+lF`<7m#jtUaJbb#s*0gPfy){Cp{zO`Qa+S#(#OjDh5A5zrw4bdHDR+KQWL zHaXbaOQxxR^7J6$fRe@<6@C|bzuzXS0z>)6wzMA(+GrNar3<3Ei2ewDWRFUPe~R?E z7x!XdNm#pvmh6i=zE2x-SS80Yp86GbT>oae)@=yWJOtrci09|A$z=6$H0p~N54JQ_ zXrdi$inmPm?c!}kM$s++UB+1>ahM^aT$W63ouN(_-j!}_tPh8HuSXliYSJUzPToCR zV%SR`jJMz(J1$!1Wc@^d7tiL5e>UbwBu_jx(=cA}UK((AdQ(CMpjOCV^6Q1AOZ<#X z7#J3b5~jRn$Y%tXM;LEOBmc^9-_}ROL6?ncshRZk!q#8H-k8-?^Gz^1{LMgyhKTrS zV{$A+g0(n*dP<@=7VIITBC~Lz+b_QWmuO|ot!;OrOk}DJd;r#Nn`2gde?<;uf#jsN ze1K^2{7%=yZf!ofr!g6ixIFl#{SkB>0`Xac%@Jld z0lLO`WqKY-k?}5_D%KlcLvb9u zWQ_7RMuv=CDo-N$<41LBZuo$jf+7v6Si9b#0y==&ftI4sPpjs7fBS8)TDu!)@3HF{ zA;?PwTxsYET*ng6C*J$Q^lWrjt}kb~ioNz(ltGu0qdU zl*Vzr1^ftbpFBI}%}Wo9YL7I;ey-u*1+)`(q71_?Fjn%um{uq{m_LU$7Lb|AFTJR~ z`#yUrpyHKfCVdkDe|W27H)h1MgaU6&BwsA5m`4vX4+K1Xj}7v&o9~}OQVPgR{qY6v z`vd0187LdDkQoQ-;X%uVr1kmIw#U_!0?_p!%PbVm)p14yc*HYw<(S!cU(!W#o(eeU zkZhW%=CG3XRw`N~iFGW$zAB_bUhj4mEa9BQ2fI*+6I5G*fBd;879{sC1A6^)9|5al zS!h!u@guwOT@fa^-|dF-ejm}}k3XJIQQBd>u3Vu5ew`mi)zfDYq>Sw?v%Nw5cKj;x z-|Dj#?>87x73Q%W)pvud1*hFxZ*IyCC#CG_Q0h&URe=knxXmKO@vLpN@X95neiS*^Pf| zdfg-l%1TC>E~X|pf|@X?PzRlaJhS1pHlb)swWTC}&Bc21kV4SDIYubMI!qaozTWzm z$5MD=y-|G{PT+vS_CS-} z6e5*O&%_Mw?`@n*fQS;w|5DRd%_E#o$m9}DB=-Sw(1is^Lyr; zxWB>ie}!t|-@)+Tw#+a}IdUy<$7kYE1$N!0UW)+py7?8E;pZ3z)HyIlw^i6sMW!S-3GVPK>dchX%G zl<@=foOt2(@e6e+pHVVrh6Y`~AG93DX5c@2f9MJq&6eFCC`eCb-XY5@>SV+Q;=x*R zeCWQXb884{+>#LvRhD*mUQT$CvR>W1ZkpqgzwJ3?UGR_xX3Q>r zZ2BNr>ccuuN>G$MY@UFA_01m<<#dsVkjBs|j=0R69%1A1*j>!^)%3AD#|X!AO_Jp< ze~aKIAt6h}q@Olp1~yWTai0e;T(u)rP;ac(J~ffgXo^y)t=b|!2}$6aN_N_&^Lu(9 z9__X|$f%UQJa$%ruG!1SP4e~SiKA5Q2GmhH7BfgAd+4a!r*k&XG+Es9ig>jflxivy z(gAa-2Xtz_dHZFUJur-(f6)Pxxcq_Cf3n9R8%eJk*sAvKD^O|$%n)H0rAJkTP%^8c z2u}B4^xN+>o|Jl*S9i+?w@Az)C6)6c2~2pv=4@xw(&ARkC>1!ge0x^3Mb^=DD#E)V zJabiBV~$`P2cID@ZHEZeoJ^|8A$&>kKNe}Q}1 zB-BGaye*pqv15t8A&vg*cm+_phoD_jwL4k)!^-Q{C#qD(?-Du&6gs7MoIp)Gdf5Zr;d6ap3)e;(6V-$NK z>+lIvQVT0rq4(`@lA)_=tKFeu-M10M<^%e??bXUJt&2y5QT(!PdXa?R*J;B2j&F#)qx>d<=b|HFs=)Hpg5qgMBNtm%j8kZBI=rntLZ6O zROH@c+}4-@XnwAJfd%38e?C5u?Ihyk27=bNTOkLa|M0GxVsmN_vxJKDhrNT$9aH72 zt}>|>OpLozr;o^swD0m47lJMk<16J08;z%I%g=mgra9a-P{xcrr@A$=N5};$nA9yh z9BdmjitDdXyt&B$9iqb+E_mJt z!qjAiK=A4JEox~KHQprlLUd>ssh~F?(@>!OIu*O%1tJNz2K^JY^6AF0)lXBx~9+(t_D68Z=%;_u-oi@+Z zlSjJOC$T>qe$Bbh#4x^L3p2HL2D;x91S?1OoI`GF`C@PQ)j0J=(p&AoODdxtkSIA2)C7fLd* z#VSn2ev;?aJ251^B&aLOehKD*&IdvZsn=Sk!Um$bNMV$RL;MyLh*`vdCOd`e{fmf4 z;YX0(*7L-fD2%W}X{ue?25ao);Q;#P&8Wv*_IMoOYu&$VB6+WflE zPL#`AtRNOFmBIj_$)bWpi#YvWAu)&p-3`qVGhZT9oi2;3%*~t_J!s zf*|=%e|iQa!FsK)iY>BhNj+p(gt2;2jHLGh8$l7HOwhvHS7Q2NM~+6Ki552(s@2_$ z%FjMC9D94z?=U5YuZUPQoJ(n9Z*mE)S$TuwyQt!_f9iT1Z(*a>e&Kt0L>%p$>JDu% zZyfBD_VF6&{5}44$yw5KCUnM-&G}Dm*7{Spe{43~SHyz-rEVeTYoczbd2#5c%%y|) zD#;P=J9fTnTvkmfO`I!@CPNjnIwI1X)F+asCg^F3`!xk%Geyp(FcaiKP@&Ov`rSBz zk%GmA_KpayS*BW@>9Ogp z3gmtp?=r0juIsQZHCn}6XCx@uLv}GEQkH1R&|@@;;pxQgEG4ZT#sB6nk7t`~#?=+8 zxiNlYuE{&Ah_xfH1y%SnP}Maj1SZ?w84(qxnR%} z3NS>umIIcsLnaZ^X1~xF!T9VV=cZqSBHtWLsZq=@Z+spD!6fqpS+cVW?5JU% z``LNoE^tbg?_`oxZwaPi+CFes^vARa)v3%DrFnvfvPJzvxSj{>)VOFr2H&_mGjqCM4OwhfAjU0McXQ} zGCy?EHNXm5*dR$+C$Jk5HrAZQR_tgqPUfMj4ghI?bI?OBz-%fOYVG7mCTADJ)&jRN zTMzFV$Vr3Bi-hf5D>beoBKwQd_s7R|1&gEuGdZw&ekGN%zH4txgBNXm7dKB9j|ERj z@f#Fr2!H=8s@%f-g^`6Ee@GQ3d-jyBxW*BF1Rz3TG;I!eMzm2qQ7kxx@WQMLHm}a% z78sxNk(s=5M5e(C+nR5CJX-9OxM|do-*}|6$}}1rH;_-OhBQ7dL}i~R+2iJ?AF@BR z49NJfZXNkBLK0$66C+*r)mg%#wWzNdKa=(3S5qJvo04*R&*8>yf8)dUpM;UbWfrgG zPgtZ*7%@MHO$c?*OG}=(AaGXpNeg;jT`_!sp(Qq=Bxf?!3;p)~m^tt$NG8E&2kz`z z`LMeEUq(%2CI^n|qH9=%I@K=IT;{(J;XSgqNxxcXwm!v>Et}>X#S0KOQg};PT8>1k zHNo>a40{xmF7n_=e=(21aKN{s7tpUpt3O#%u7DzanoGukYh`V#?O#v2z;%=%}Aj@Eti+?94UrEev84Ssy8qfP~sC}MQv zR_avbO03!Y>g95u&*L~DnzwOQ0I>NR6gL>S4FDXC(__*GUgDj9;JndImv|0``YYZ= z^hwzK(G8sH#ZT{1dCBZbH;iL6ku!?wBoA{iZBIiifBE#mGq{P(QnYv~xPsEuns?u&ly;V+}2iE};WyZVfZx}W3IffGKX89aUv06v^t z@O2{|rq*dk(I%Ns`2OvCNPkM{Ah`>IlsWN8^}II24d5e| zrOKSnX>Ta1Oz{hjOES+?THsN|6B=0hE0NsV=s(yBUve{#?C8Ned0ulb%P*f!4li#XHV z3RjC^Pr(0!M@5%&_s|ZEHS3GtoRqe<1OJ}3=!>-`gwXrCPI4xa^$>cjQU87yIv!~d ze`Yt1eCn}*GnSz7uC}xIwHV$d`_MF};UBE!Jm5TWe2?g4M1;Gbv=1y1Gc>DiJ882V z8C7B22ws|%y5gh+S`InawIdU9wfRQPm}bFQrFgC4!-%$Fu6I16)U}3VDw$u?rFzx@ z{4>i3dD zJI5Lq zJP0k8k`0qT9yYN+V?$@!`zDnQf5Kr`?m3>rOZV?T1%Cgj@p4s~fw^MhF#2R9R2pT07eR}NC?4BT{Gvgh zgSihg!sFsXqN#Hu>PP&;KRNhM&*uuq;>>_&~Zb`uk zwdkM=-<7)xWT@)<{aCEY^^E08v`@ld06i9u8)lHrc|h8*J|YVNPC`5lSNO=EK&;#T zr2!im)p;~{^buL22+eduDp-jqP4(+f%;4@9L%-BbQ$Y7JF6N$EG9G$bn6w z7vR5O7%!5*oTC1mXSznK+WO!u&$f?Y&a_je3a^h4@+QXjDycNVn_ zW#1{FdtvKPZ+TevO6se$NvC+|kxtVv%CdBIXviD(>fAD`i*7L><;%+i2vDJs${z7V zVOrnbe-0}ZbgdDAf4=to396w*n4mokIlyUO>7U_2m~g}mZTs7>Yw9{Ja0}6*rPZP< z1$-?pJL})acv3`)eKVu}(y~2;UIJu_f$y}gW|;Y@KFRKrAjY@}KokN<4rNhS^u7_K z?2;k&!5Mth^`QZ^W!pbyuG%U9*+ETU0u46p1Gc~qU!ldhf2^IL<3`^R+w@HmulEV! z|8W1uaMOlnx?fDelULHT**TKzAcRB|yO^O15SOGU=E3b$ewB`7EHSklJ?Vd$`Sm!a3c*P4gGO@0U2B!eaTRmsy*Yzeqq<^~B^G@#BPR*JIzHuuFq2x@P5>w=nu z$!zBBo<9d6f0_S=0&r6Q@^Tz;B6c5H7zXQ>m0nQ?0}!YX((D_L3RW5I!;B^#PJ#3z z!Y!(^`1kP>2;wO)*s9~Fl2jK=z}Q#!fT)`mjh|M+`vok5CFRVcOniZOl(c=J1QiQGA?K>pvDG|KMfu6Me`s>jS|9&KYnD!)7N#=xCi+^?<&+M|e=_uIn;PI=$lVvip9maO5>Addsdg8| zXS#k-Cm-Gl6)9(CF*14@seY_k+^f=J)k$_)7!7;pwk-QGq>55YqP*OYZbeozO;qzf z)m%9a((Sml{APBfk*aI;Am~8-cBZ7fKEG8;trB);uG9C{6F4@&jrdP45fi?A32W(n zfB3=t1UM9TPy|!HZU3|r$zeWA^U>SdZ4HnWy3myGhd^U{2>x`fny<6EG0ImFMco72 z#@M)T>zjlFbl&(`Z)dD;2Vv@8Ds572>EIO-f4d3;+DvpI0;WTL^{G8V03tX)bCZ6xWnC}#sy&F5+pz1Wvsk?$t%zV`->f^F$BwN z0>ch23-mQAm3ZV-+lIFbBgD7$A6_XaXw5GP-QlwvgI@qd8owq!Y!3}|!&W@@bs3EH ztd0v2#WS>%%B3 zf#VN%hq=ciU8j2B*z8M;E8qf}ly6B5ut=m-34a!r2Lm;Hi{*ole-{~}>p)(BB-h{# zdmBj3EEVI{J*%eg>1UbFa&80`$e9ZN5p2E4DWt>J4sAIY446qDCf+RDf7&9|+6$AY z;OKF=6Zb#SjI*dW_nd5rv*br8a{(((kEBAPK<(}?=p{Nf+p4-|K-0Z;f6*?}z}Iv@ z_^@Puc_J0;WKvo@+;K-PncOd;gQfx<|F_jH4QGYgVs4-YUk*^oIMZ%Iw!WS@#Q{5N zumu6T+PHmV%E4k_!Cnb{e|SRiEJtVU2h)N}Q2O^%=ec=;YP6e=e%IvUQF571|C^=~ z*g)VZ2$b$mc%Yuw#O0y@szBgi0K9{HjTnh+E%4$k7Ne*z1Lu0azhlFQVOM$4`_6)s zs5G0$Fo|;v86t{R75s_00XJ;i3Vn24=gR_m*{xPohPq$>AcO#3f0rrSuP%%14+8S* zBnv4x(vb00U}kD!$ob)+(fS&iWQ_k0N0?yZii4sIr-S5`3bnE`S>Amcmm#mKhlJt& zApj`9R^OT8-Ty&X^CL47U^FNk= zp>QnBBaxT9Q0(kRNKE^p^`Vv^`D!$b zIo$4eQ4C@LU=7U>`iNp6k|WIl^>))tOi+vYpb2|@{U%j z8Q`mm*4IrY**CjGaA|pxR5$a`F3x!gsAWM>>rCkqaJ1^rQWY_jZtb_0=hUIxQ%>9< zf8X?I?E?}M#QWI9`|DZFlF=Iory^dD0j-fd2Bj8?f3A)(FzrY+g9Zo{%%i_NH4r=~ z9ykjuxAUs851;Z zFA9!3BV>J&NVysX>Ar;Vef=6LQTzaxgNs8*e}^2~>VU-6(MATA2Dj=+;7V+^PXLdA z^0tF~sDS7LFaeL>UFk&N(|HnWRBUY*0LIhfqy<*IG;?UbrA|=7bCr_)L72Ed+AfpF z_$k2;ER-gZjct$TbiTO!J@%{sLi-mM+YcMf+9KZGS6xrbqMDAST4&Zr^gO)O#6# zfWtcKf`)0IHQ*#R@KIKHx0HrNQ$8{INk^$r+G3esd5ja88M-A;VssiE_pjqrf1wy8 z1e5pMc(HViBDULq}cWSmp*G2q7uu|as+|>C}1;S!9q@Y!+d>bqQ z%y$#Hiy!e5$QI0_f%EnTxr=C6DT^gNR_JAgq(CCrlQnkZhGQ%N^b-XRV)!a6aPWE^ zST*5Bv-1bCS|)(5vP6fJ@ptb6H^xb}scBC}vzvK+E|T(vKM7=mC?e~}f4~(&WgZM& zNUk(HGwT7M6M_(vmE<8ajIZ8nnHBu2Qp@S+hEeMX}T24HAbcDQY((hG^ z^TdlPr+HnjkThF}b!`2vHn@L}rUZu}UVc5M^{BpGe`d=D5-wiYQFRU^Dj1Rrza3I< zMZ8asa^;75)Gzvj+h`nff8iaEHT)*I@H3@ENz|_=g-8+1)N%@TC5nhq?o$sHj0mrR zE4?P@QDZCf=!g@ZNHC33=@9$z_ghKd2)M5nLls{(9I0mWQ^HNakcpdUfeIG=1mK_X zy4=y7&#GGOG>EoA;4UAr3u)Qg=o=<|AkHDzsnaflV1T}LTWBvae;DwV>p3vdwdl(! z!(|>@&M{Ss(=s5<17(HX-yQv?yoNKG0q-2E8y_eHIfUTtbCs{GS?#Q$6Rs7f7b$*@ zMZaWlkMW%w++!F7!fb_94D|2`*CiyV(*OYoz%ARO<1Q{De?OGY05Vzx%J(NwNvi$? zi;_`|sTM;TMj=kTf7uBVt1H5@?Co#Pa+@t}$8K34=jvRWPGgf4tYda`U~aKT%mkAa zTDfh2YH&|nZP;K9V}k!lJ`ndQNYx?4Py_Kym@Wl5cJ6S8V7bie&4Q`_)U_1N)cx!*jPzWi}54F=GNkR_()C6G5AiyAwa4jJDpO0_z75 zbw*#>Z$!>bD5Ltb8+*|B zp$B&xX9nX__r8p7I^uS|AMNH}QpZzEzQb)Ny8XF?Q8ZF%Iwa0>V`{3C4H75|5}da6 z+t5eA>B1ITmTf~5-i#BkSo^M#E(7$;sS?$%Z6Vq42Bp!F_OfZwH2~QUaN^N$Rtu(+ zajhW>f3il6Y|nMlq|}@Y%I*B91o^hG1dgxUzSb)WvZtYSF!n;Z&!;u-9sDeMlC%zb(nOYtHsM4hXr>FvBt482G6OmGI72Sq#Lh$%0xg(_YmdmD$Zm~DI9BUd$Op7ZngGshr_N<%m&{Wi5 ze;2;XOr?~>8L~F=p7EulEUFNJOQlgtX8|tQxw~3xiU;Q8yTBoK+7;4h!ryzb9wYn6 zFkSgo1yZWHZ9hR9c#6~c(ixZa*d$|6^SjbWB|98RS}@+3gho^Cwwi8{aX%P!qA^l8 zz|GhtK=E^{-Yq!RMRni3sHflKBQ%c4f8)O2YmcT(9Q5%3r3|1DYdx}j5zCpPEzO-GiW~n0uX3mnsV(4AuX~0&+gJOwA;X!s`GdiWeX5tVR$ZUb$RrPByc4HXblQJ~Nw`jH&gd{$1nGE@ zO!jQ`D`SLf##~H?+o7OR=&dvvf3D7BgB30tO&!N^JlcpY7g%2gMpWEE!|o&b{s=pd zmF;0}vEEB&!2$O+$iTh#?wNb<_3_n*_Y3?va0bC3Y}klx35<}n*V+RG^xgGHwsR+N z0m*2AqKkXflZms?eDCn>NgB$)Eu%Y|{&Q0k{egZ1VdREym$Sw6DydB^f7bCR$;AFg zfHj1tgH3ECL@)uq(YLrblvFUmD&xrIT@kZ!Bxvs~>Tf@f6OzDcJSb@T39jGs$_yY5 zf3?Qf0o@saa|{bk<3z1A9{!7eN>IN5S*=9K+_b3CE|t|J+&!30j!Mp0>9;(tEmI<% zHo=`*7l|v@Qe_kl{1Tv8f1h5d@Xp_ULy=3@zJ+_@xVSa8OC&9mH&q3sXw6J2RlRPu zs}T>2-OY9oX5Y0E}h2)3mV zGMzoMX72O!SuUTrYDAnu8`zX}<#^QkcL7G2$5=B|yf|C4uYcDie@4!=Z$B%?VgAxw z2?RYWuj26jl@~PBKJ~{Q1pZ34K{VmDV+)6gze}RqCO@urzCKkbQFb2u{SR+mR*fBvC-6g z$UdeDS%ykRqf@pe(^@%D1=Sm+)nNwja`bIk7*F9pT+~;Ke`BE7Ao~G@M|Xm8P8qO- zaihFtS`UOWIh~!MjBcOn%38w_NyN|E4TN1+$_Cqdua=bU<&0d&2ZE!s6l?Bh%${R)Y+7krpvIH5sQNv3#*p14tKH5S-O`MXqZ=~ zsUQg>S5)G`f53bdj$o@73)882ogg+JTb`mbaMd!5p!?u>Mk!!I_5!+rKkh=!46;5i z6$;Ha-2qW`{}wJO*<6qWi8NTK#gm+mCJQoTAc~EOCcRMH`iQ(xCXID~aOQILPbkmQ z#{-MBZ|^P5=)CM*+XW*i%yU0L46WJ$0Qj6Js}T6Ae^VU`lHTU7@&Q%aCD#gYpa6Ks zQ6H)i_GmJ;CUJ~C4JnMnNDv%RcN^|H=dkZF91|TdJr+~yOD*xMZ9$ z$xEqu0reRU-nqiTC6sE-ESbFP8g084Qf5moFLu0}e7P3kXyw)?rpA_qbw)@}PpiaC zuklo%e=#}2?@DyXlejh3Y6gH)(L}Y-aXoFiqU_D1x=8zbiV@?*V-;LrG%a7=cNxYp zAy$~xAYr?v64O2G$T~zI@mUuUNswCYO|&05_-L=;^h_ zp`vNSA5S>=K%9d*=q-3n>mPN16hm3Sy`x&U;Q^TGjXDb z;8gKGE`I!;5Py0*2Cm9ybx8~NkgrYuYB?=%x%7;Qz6HhWDpU;ik^{t@KSdhw;Rq!0 zf8pm(yh4^boAjc^Z1oLho=SZj*;%blo*LGjl*RXK3k!aC`$Ciu1cZT&Q>WtKxts@# z4$IY1uLK1RZ>4P2Lf9SrU5e!nhh=bPw2?$Bvj=G4#Q6Z=659cmAUHeYp1o#vOk24WyV}IfAi8x2m@g~+K$I z^@><|xu5U4*`D($V#P`gUBEp+S5-?`#X3u718KwxCyk+|VtQCqR9JXV$z(ZWj(y_j@BGdSNW|EwGz9 zS+VjS4I;dk_v(g{Gn{FHNJV2fA2|K}aRB2FU#~JB;G`{S8+&fs)LJh%4wi|45EzXE zxmQZgnwipfzMP(DvwwVDPN=3Of3a#X8F<(8u*o~D8S+Y!U0YER$sx6g65*?XOAF%~ zVD=n#;Yi<~AdrfieWwVFbp*&PoKc^;xGUt`}m=yQ!V*3Jj zKEF-kl3Qj4Bc=r$Ppxwd$^XxtHQlTuBMnv5%pc5QVB|6L`^GxDDMF8?ozCJYnfBf`<0yc1hrqExcoBfjmfPYQa1htI9Qz4aqYDOUV>uLX! zLt#eJk#sGUZGryF?@u!G=MVU&bY~dGNs$*%|89dnj_imt3|mq1c)4A=q>=a+Pd@*2 z1<7BFBoxhIQ@f;*z%V@gPi7$h1#pgxHU^zZFjO%NV}XC2iq>B$e;2#Nf>0hgVzvy! z5dTKV{A>A2WNR{v;I@+g9`}ElDgQS~R(c2|pTsl$b6Hh?{Yl-ww~&Oprp6~nP3TXP z(Er7U#a|{glKcLJvcL3i{Dj~izIXbY2VEt94Y5_`CR9#HN!J{K>l?cW+awaByb|Yd zoFjNb*Zc0C!>W3ff8_}hyPlExT1Man5CD1o=bGykd=WO??Gk{(_FK>T1FZt5+nfuA z&`TAdi4i0>LF0Q`R9RWaW}%YLj&{z z7oU1EF-97kP;zg+=z%})-mA%$geRbdSI<~5sPlmTP&iDXf28^IGv1O%){tY|$@E{r zam}CE5~%!E-~#2{$*=wDv}5awEaa3`Ro#UK=gB;3GUAThr^@q;pwlvE8pL4YP)eNF zNK$WqNE=}VPcg~21P66^E@Z$r(Xz-oK60Zn8x%l}#-wIr{RUR)x%<)?u#%Uz}by@;A zjfd?xjnaVE+Qw~`UdU(iOj9$CVIlvP4*%nttT;S-h;2TImObACFAkDPmPj#I@ z*C@>tk?bKA{t1l1mP;CYG#$_BU&LX`Mlv{w#=;cE;`Ro1pO|vQ!q46L$H!hWHyh%z z(kVLQf1_>krR8D@S&gcwU-wkbAIG2sb&eeiGuq5J+5?&{;%rFk)L*OE-t5efG;75E zc&K}vh!24h$H2GbZE)#)FzqF37r+C1RU1+g=K*~>zdFC#m;dyz3WB*IO0Q$ax~@Sf z&T@(FhE$J>bR+;i0kL?uw_bym$tRUd!-7CNe+p|RWum;gj~@N#^9H@liJ<7{73kow zaD&SG^jqCbo)>gJln3h>?T`{dg~lW9a{XmP)zdpR``*OzexxZOfLEL`Xtc^ugmWg5 zgWnEs6k@!gF$NdZpwo4!w^b1244uVLi&-CxEA_soN+OTVxf{IGMEORdX^W1n8h=mg ze_$6h_2_~@&_!T<;q$j{*D*fOl|BAy&#twAQwh#c z(-%Sbzq8mX%4KGDH*gCk&BJ}6KR6jzf5)_Z&5M@lk*{n@uX+SFbj^>xx$r3}nl$Eg zUY)!2-}!4Yyq-AAY9qy=C%d6-GnrPgstk)shp@h^UI9(sp$=U^stF#6m6t0st;(OV zy)ma$IBb_gETe|M$$$Nkj4?c#$1;Xw25a1}zeu5W`9nN}=(jz-e7 z*!%(il6bebjM^T2C@=uQlhMC)?Wc1wU5}hn841>b7B2@9a<6ikC|zzLnHWx&Uw?O) zBja1a5_e=s&k`D>k37!G&0jw`* z%P+KXPlm9A5SR)VezV~s2!8QawM<=7y#gGS)P;`Ub)BHt07;}Zzd(UbKX*}|2;}V- zG~|yYufo&tS!L z18oAmVBI%x1doNn0;g4MsNpK(X`e1I2sN1WJJ?!bg$shoqd1+obZ3aY#F5Lkx7cW( zZb?5-?h=*((_hGRGb{4s)6VzY=W}Ed*-{FULVu9i`qTvBD8fY*f3}d`j4HI_m;6?N z--4guEH(J^r)qd!(Q8cFE@V!X%%8Yi)$iRjXOTVRj-8pUe&@s|;pl1tkpHML1$aE1 ztwd#Xmi&O>6Fi#}0>Nk}!5^1Nl3x0%*@iQvXXDt}?HHBW33dr^xN$}2b6V5`S1p&l z4s!i4wH1dXAFYK^e}b(anwYB&H)!Qr%If3*8;*zSrc+T_t!xDwW8L@x)gqU*-w!%DPQV!_*I%T zJ=I!1BeQTgKr6ep+|MCGmc%I+?{>x_0lV>Pst+b*?HSZUf6zRxFsrHl%#z_q{nSn0 ze0P|@+3wGUL2EuywUN4tcB`H}-(N>EDNAeSO#O1kt_$XAox-{nB@qJ~q{*A1X-8b5 zFv>pcbb<4C;!K+)gKIyx(6T3e5^heciB=;Qpu{mAL`sbDB1Jn8_x&6Y`d7awn?K`V zHT~x_?9{&De{fI~tQJ3{nFd-8@Vviw zt13r|XW_ft;&*VGW>Rs`#8|Unz{yQ>)pe@8VzTGte@#D_BrwOe>?i_{4G_3W^9Ppr z?N;CyRq>4e4z(jgm%4o1>3}V}dU2z!P#!BRsc8&Fx(;gYxVZemTx>~yJIVvO^^h)% zH@LnK7la9-ooCz-!aB4JD@V@DEl%*O<4sB%ab!GcgB-%>>K=}>UD3@Xl3pZW0=;$^ zw$Lh{pS5kE{Ngb1X#YF9rqVn(&A2dXQk8t3f1>?ip_)IdB__q{T4je?n-pEZ`O`5>d?AXkCKDFnz}nAR%+-dI zC4|^chJlwBb?sw1L7A=KTA#-U36GmYvt-G(B7ZHrBb2x0mA0mVpx-i_m})?chD|H( z13bS?Ue+2Cz7$E2EF|@#)7(#ijZNUtmjJy7UnqP=LX4uft=x|Vt~3j#5dH3D|McyKzBu+3b!^0#UDc?BHa* zQ)IH3kg7A9+?edm8B!Rd?ck)>s)mY45)Qwazhrca7RfRExrXEKZP9^2koRyS}6VU z1T5q})17Rjx*GI|F}fdvBpI8)@D}`1*V}UlG90x1{ZdUq(b9U*<-q4Ejcf5oiPiN( zgnYdcn28*OX$d}6anr~_ssk3?YFO1@E#-qlDGGfu3POq^5N4@r<>EAsita?GLVvW! zt4cf*l7KC_8Xqb#T(U1~{F7gJh%eP*g3=*l>IouedBBU;!3}WJT-gH(#@LcqG8EUc zkVTz_5qhP;%+ghOr!xM2?$^PvW{>_yJsyV|Odq4Ixji*;c#e)evJ>y%MYld1DPm{( z!`IAr!bF8yx9!@ZlUE}d;O}pz1AmZ9N=r~iPAb+Jc}`*1*pAK0Hs(I;FG>=zMO>8X92b6JeSr*Phu z&G=h=94^bgF=~f=3PLfp_gQs~K{fhbv1Qe~ zE0u$UWEhS%DH(VR2r2hG;5=v$gxzn&b~gL5PT}0YiSWAimy+gXnjC=3m6SbDsSLRzQ`9Ypu7R;1G9Yu1U0_ zh8s8U4%t1a%5w?R*nj1-u~9{DSl_6lgUG&-`ghV`27Sl_Lj4DK0GR+-FuKFdTILI& z97Ip+vJlZpuc#!(nz&LxgTDv~t)^dCIfZhcEd5QsmYe&GkzAM*=G^r4uQ=NXU5q}bqASi~y5qX?WwFq#} zIUz*|qi5Un&*6TenuPfCYz+BzvdJp zU@WnZg8Tymo5DkQa}E*VbmA#!Fv8RFmT9tnY5y9UXNk;mr7Qm}P`w{3CR8iXso$ikI3mk`DuRr9wiGS0(jFz|%D-JA$Yr$)q{;`!; z>FmqZUw{KJKe`VR_Qv?KY!~2SVho+(FId_i{@>BFX5Q;se)f^Ur8i3341RVSS7;fG zH+j9cU5&4;doMwkz7dX{X_??cnx% zWPTb;a({o!qSN)t$Xrfd1QHmei4`Vm@I5^LYSz@CNfm(B$C^tDZ+m2)wZY}!#LSBX zPm{E^&A;cdHa&_fFN=LMJ5%Lpdx#mwE-;3=xUJ9Q9G=`g%f%ZPgwhqiK|><4!zCK3 z3x&Z$zVC|X!_Tt-S>ei)Lt+q( z9oH&5naI7XHh;kFZibb@()B{`@1W~#h(i(J+=}ld4&6TT z$7k|sM@E7T1DZ&(&;Z-+$eo?)E`B1PDxgL7y9Y$ufTUveqt&~f;KYzBE9-$72;Epw zyhZaMmj3r!TZO9lAr3y2JQ~;bMUtM|!<6GTQ+w{YbPNg&MrC4?Ot@g~O@4gzrhns_ z-^@M%!Zpy91+N1+VPDOL@y)`(ok67r{11Toez@ZsYMpV>^yQb{(uBO)6>T)mes64NbstC*kTZ+ zR6xJF`1sb5;sm_bbhFO~)dXzTIDe)_dt}53!S0-LYj(6kB-j9!e~KywAMJkHOJ9waEH7Wx zsU(ZIL0BnSY zmy9dqM81f2O)t=v+^F*E?K@g0=RjhT#8K+k1VMQS3wTH`D3v(V>~iSth7g5 z#gLZ^B2Oi0Ob42|P=TwRoMJOy=;1ZL;?^W6*Y3Qot>BS0uY+^6^qT-P-F{mt4PB@1 z$kbK|*y#wF;&Jj5mZAP#l{qyncW&=tIe-i8N%ABnlo#?NvZq|b9)F9ICc-6b&RCbi zaQfJ5<4b7ShtQV}Z{$q|Qh8&53e2)-hbs8&J%{_kNyJ395OPe1yZ;$33{ELHGcD5eZL13U?PDV%8pvFEhi)Ux6ay;#nN!`jRb+=@V2}k#^>9>v z_z8t>Do>PT>(AD*yo_;`eU>h4j!KYSG^^5%gkC8DqZKOoSAXS))V&GNhl<41cHqm9 z3dw}IZu10^(J_oC2Y`I{ByEw_om3>FaI9>Iz&=S53W_buHEG-H#eUEN3u5aXU<{jI zmU|8(*DnC(u*{~2hwx)fnJfTaaOkRKg^no>-NbPN1eq6+dZ-O)6fD-IF2BP3S);fQ z8ieY|N8y)UV}CSuQ%w-jQQ3U=>mFhbqAS^Nv-syGq=sLlyybTE8+J57b1aVPAQ|&A zo(PE@|e$GROQeQ{<5wmf@k3qW}`B4xDA%ESPyU0|XP`Z9f*U=CLxUKmO z=fl8QnI47NK&Cl$>A0?5 z3Vg~3A<;CF^8>!rXMARH@3m@@XSM@DU@zG(#7~It>F=BEKrSq1cp{Z~75T{F*`Y;O zI}p2X34dB1YD8wwDqCJ{XEQ#LgdszwE}3){B% z;@yX$7d6z^gY7#k{pJ2_IAL(<=`i-+N6?F`s()Ex=hiHLTVimDa=dPVSgM6=e%VsV zy-BJ4F|)EBT^dIxi4MvcFQmz`+bNkq?J;hT(pH9`@bBI%>IGYp!e;V?l>uQS6xrvV z5|nW@6q`Qh*)E|U0ThxH!zW1b*)tWuoLuB#k9j^3ATgIlUahBs{Pui76O|(!%)qRU zr++jpG2ziebxfr<>a|hu6rc;3cbXt`h$C2%V&-(_8*Vk~OB7n22{?!jH#Ct%*BY;^ zwsh?zK{OtWF73l(;LOwH>e@>^&jRsru~obX8iv_=dobRTpd=DQGvOp!l!UOqY`yaA z>A6{LT^X~u zzd}f@A(-f40fy!Ga_r$Z5+mqL^?GWqVXXB-Min$*bZQP$>u1@6CdONZe8549G+5qt z83jGB?x1@(qUXYSB02%Gy3g!?;d_u*B-tj(_Wt zSb=dJ-;`oGzEo*<&2R?Rj@Rsih3EpUm7E!R8$pfR!u>L*jyr_kwv7dA>*2cn1I6hz zN48NE#;xF4{j6a4Mbly1sv8pbxNz-E`TE%u0TVU8OXw?5$XEVHDk#fND$7ikitvzq z%aTU4QG(|Y8Y#K)OV#zy-x6xfoPPuzr)^54#}FS#H-my-%Af1;8A+VpwpyiN9xJ<6 zZ;lja^O=@r1plu$__UhrWLZ-2ar})UeL*iGkWfon=U<@mw#vFm&%qP09wt6qGzh=R%zmP++k`^zm*2N+)< zMAch&m0-T%lmvYaDrKOmZ92T4)(bC3SnwP6$I!%bG;(wuOtMK-nSWR8vInB8O&kFP zAi^9oN(uoF#*XIb2kIHXaq?p#+*Uc?e1_@XeHfpwY3UF<_FQkaeDkR)sz)FbCaZ4- zk#YqU1D2hm0m|W6+`gYba52#W>9-FNl>>A_O1DXl)w^Hi{!XF8wv&K-pWrGEzmo;5 z6f2sPcaIT@HYR6zba5OEVM@)=W_tbRDiITC0{cA5a57BBp!1u$?0EMf*Of`j#W2CQB4 zgXu;morex%?4Z839r<&rkWYIt&pk+|tZ8o*4RoWU2wWxIXn*f6>Q~Z2TOX1}qbioQ ztJ~EC8|*I)0KWIy=i;}%bPJ}98g)xqZSs}T`0H%+=iJ42!m_N7Gl%Lr9g)JYb8~fI z0!25%9(*n&aT7LaRm9Lt>bKG6J^M7Nk~HVk_j@?NTyxW|PA2S#wAO#4p+0r5zxd9> zWv4CT=+ajx9Dk~%Sm(ILD$X#5fbZh#eHZi|dCa-Ee;2>Ls3Ti+p#zmHgNxLUS&W#D zg@jPb9t^JDI<*zZ!1l7vFa7y&a?a^qlr&L?4qBmYQhu4b>|`6?Ru4F)VcC)){a?ot8gBF|9@gCll-#)d5*z39P;; z5RJ_6qJLmk*$qNh@aph|LU}!$p$e2N%0HS<1NMbv& zwwu;C(MwE#+CqROU7R?LZ`s&54|DJ5d4EMe+)zeAJX{p^J=n04PM z$4FJyaXH40^B2fcyx86_S4*tm&M^@-m6a=;^XwNmxUdn`pZ!RiXBm3}F-rFV%ny2P zP>vTeB9xh2=L+WcCNm8ybEy|~p;3nxt@jtD5L_RG%J2t_Y;YnC~;>^G-=9RQTnsykbCn*X#e@Mfs!MrqbFZt!X>VF^h z1v(TWEG_Vss>y^*CDQVZ1ce?P=jP)bOZk1RvTrzOFJXAepc2W)49@u75XL^rsKO7h zuj$#7ocMKT75?_lXpj7v`2s)dfH?rZJ}AwxGFtzs73@Yq4}>9)n*xCwu732KOz>5K zsoJsb33=*{K+^r}A@cdCHpKBIIe%b=MQ9&b(&Nl1FClhmvyptqeJgP=@MwTmhBbTN z^e&EA0!ADiGi!(DR9N8i@%Ad{xe<`374WCwEQkK&_nu{#E|4auVBv$ijJT+>t6UW>Sklu_=ibDjiLs@3nXit$%8`MKJgw zj~c1e2O#AeJ}$jV$KLLaAFjW-PIA#x%oG^bO!Vo)?@dQY6>8lN<1LMZ3ooPZ_J)4s zlzlar-+`jrF=;>n40~9ed*7xg%tDgyikNGfXF<9-3~ghdzn!3FJG$R2(K`iQ^Xzdk zk77(3MMYdnmYPn$_wz&&OMiy=VXPNG2IJCe4n*!H5yhRJf3=@fy)>$Gy!;#Fj*c-Oo6() zg{F|7O@=JYKN{1L&zuvui=5D8@b|)hdv-{9H=i#SbiRaRK$6++a#LN;oXC8e6{{Dq zx*b~ElHI_NA+4v*M1Pw-9_L)2U{1$*3w$=9{+`dv zJi@cq2i5Z1WiwYsbz1}3wGp7ab;0uA-cInLJkNm=m6g(sS&bGM;_(Iy-r>JHBi}ppv1jMB=4;NKwJ13>Y+# z7NxK~XS|=Jj(^8H*%JAdnm#23U~S4|i)qR)aC^l3Tj2Tq6|6OZT7)q2V+u7`y z!fd^u)v>Mr$hMTByf1c=9=?5%^6xCX2IKk+dr8lh;I1`L#CzO(7 z15uW}J)4!J)Vj&sf?B)t?9N4NLIOZUI)t);nId4t3FMGoR}TbuknM)Sy3E0NW7?l9 z{zd|vCCBqS^FVDBG^Yrp46xZSo_W#z@sGxTbR`SY+n;($szK~-NErs7@@5E8wcam& zD#)SPaw-r!}fe*=|K|MEdx2k{cdBzuyz$j#(d=HQq z_iBbdq9mII2bgnxyss~QFomaL)ZrU|QReEJ>ssc}lghVHOVo(j7Z8=#&rJDJzj_vCZ4=Wyj#gdr z%70?&iiOulJ^_XLnR& zsvfm@01Lvs5i4ED;W?Tl5nk+jn>$HcdJ#(Y^opzw_pOib_Ly$IcTlgB!i*n7UZ?q* zU!i$=Mu!!YA_2eGh8YKMA_f$OF#EKDd4C5k+BmUwKc;DO7PLZ7n$g#Lx^ELaCG$it z`FJJ<9}nY&UupO(cld=4NQ#IJ)MJB052Hrc?|ibN0M_ChY6Ymi1FRbuBgWFd1Z#Ol z7dQ6U9OwBa$OC)3jeGHvWW}}2gHfE)`Mss>C~D*6yulruCk~dC>9iSwY(GYM*MBg~ z<(1Vi^Jw2?fSXQ&x zqx{aV$44619rRc7{KA0kCCL9TM_JI>p>i^;FS`M`{uyKoOy` zYpE>LW}D$4^#vUM17weL+O_N!qk6? zZ|9ECOG*8($nLU|AZ^Nlp*XR-85bi)J2W3)EyOD5+I@9X@jk!YqKkjqnOrE2 zha7}Fl}Xzmy8r=~K9%7+#VywFdVd|@4T6yFz%=aMVkpeRQHQr1Z}~p9;y>`7PBzps z1A8$NcYb^tq6RY@?$Ehnzojy@GHri`JsdqcF%}d|HpdG+x`_gaD1_Ktz&spk8en=w zA5-bpdUmuse^*GM?}aM$yW}@6wG%c}z+#o`K2HG;^7*ph*kvaOI3YB_W-5553t|bP z2z{n8rjdr{as*30^Xb7fAA2E(LGtwW(B;w#2rCQCy{gB`2R*M{2)|!n_%naGCZihj zv3u>cn(aB+EFt4biTGh#1J|Fjp3Gn^kW?L$OW(hY0Qf;KbO0dLXDn+0*2#rL4Cr;5 zSJ#?D#6;pdhyuRLPmpjysBDTZ zh~oG5lJ}ysoQ{7}S@rFEdz%C$)9~mK=FxKkFFxWY-*{U`PBgOR&ztLh-U+Rst4?ff zhSwpP4KKfM&5H#gx><@yKHFG|VK2W)nVV2wjLan79x z$HgtLr`J_tfMB9=oVRL46-?^a;GdezSF|{B#$<7fN@ssFIRSw@1!?4ZvQ>0hczN$` z^{kuPfX9<0ilr0hFL(^$GfeD6aRt#Mi+h3W@rtJOvyGv3pEW1$<|*=>#c3{Al!a9; znxLGYc;2%6Wf#|pcRlLeW00~<79MR?U(REkPY>Y@OME?>E;}6Et-0iUt@)Pi`XfA{ zwvVcf&U=6JeAImLY~Iu^>|d(3v*h~oQ4S|H@ZA(Z7@xG+;}Ku>J7I`VGCJg5`#eBu zfzLiNXy~2W@y+@6=}3Izymy|5!$x|&r)c|Vp0_tM#z^bqM_&kvx#UJvQkK!mdWpnK zdCu$ynyR--ef_hP$u`z4mX){q@zy1!0^=lPnrwe5fxq0yBURtnXD0DBFYcS=2d!UN zh-YWd7LPE1K__wP#4)cnpQ(gw3;>lJ3}9P2@Qya8xyw5q&ZH z{%)On&1pPQW{qsZ92E}w;{|-J86+v#-e=6aCv{z*j*}0bdN0T1_T*KjSYDilPvx+2 zPyBx-@U{JWc{SI~rEG^wDyh3yyB)^D7D_hjc)sUV@MPcj&-m`V$s^rup7Uil&SVo6 zurhDKW4i>aWmF^bGMj5t+_G+7=Ap~bj_r#*6W>jh$^YF|w?iUv*DxZUU> zLRt)pqv!bdmgLXPi$5a!&Q{~+H9RK2c!z)Mlc5m zyuB$E>5vIZ_p#}F$(YxrMsMk%z;on%;DN!r@?}=?5Lf2yoE)8HZwObn+2nC^r?Bq$ z*OS+#e0QCi+iRKQ=I!64#*s8>ablLUgg)$VUbk%DAB)VzW_QfD?k-EGGw{3J!#Q8C z&Th}|9|#?y^U3Mv2j#>)!?U)0G53F*P%N4P;@4eNiC}-$B_YN1+yYJKwi2u9uz9v`~9s~JLmg=?b!vpdEPOmN1nh7Yv1->uc8%}`Xf#i5#$)3 z4F3~*!nQ^*queJJ;2v|M1)_zYiDTnQk*n7E9+2kT_Y zs6Erj_BTSnNaiBEIv=&=P;6xPklUMbYd3~qbT{tIBzt_zYV+m?9GHJ9k8O*0rV^Pc zV~-#F+HO#NWN+)!db1yZ$82qC0c?259~UQ$glw$yB$D67vE$0eIdF;!Jw4l+l!d|h zd%RqxsPT}f7z4`OaAsNUlMHcx6qQn8Sjk`-q47_T{=`4@>fy@dc?V}9% z*+kx!zsk$beL4PN5MRf%Xf8R^#n5_kw|Iy@8zB`qlUl|{0Q-Lf$NGytKb;+Br_$IL zeeN>v+lZ}g0FNgn<~FpEwm1o&{pYL>w&2n`sd{8znH-Ej79TgOJxKkw^Rj9n$z~U> z4|46f!zA;j^+0tHtCh<`o0_5dF}Z2Fj`d~Zy$dcnUy4P`MOu;mKK0>gaDg2&_H3Pp zKEJ_oZ*?2hiMD^XC8fxyt@&o8b6 zn@E?9PW-ziG>RQ3Q_Sxdpg?)YG!%l0$iu-E^c_i({7upinHgRva6pt)Lv4fC+f0AXc#fJ&ebBP3S=Sei9^i;n z_9-v#T+PafsRxuW8o$}oSCG#m8Y-BK#_M(=zOg@3Pe0h7mgom6sU8MN)2Z)0Q9(Y^ z_Xwa?y4iV2>a+05&TKqQ{lk28mJq1p?LwVT&rFAH&V00^|I7lp-hK*8y^@q(BSwyw zPAC<1;$?py0sw7!|FZSn`Px~KTAA!`d}nxC!Q;w0{gBwMMBM6NKfI29WCLWBfxV-X z-1!(%`Lr^h<6E7*IoL3jj@8vjwWIxM9BveTxJGub(%|-oV9q5_YK}*%(@Q-aM`_aL zS?imNR%4C%(6U$PYx8a+Ml<5fXk@GAch=63mydtpH}O_}Fv7*w$I}Bg&+O(5X|%4h-7W=$Z8-(+|DzxjF{~-Ooo|Z&U?Hp;gl+$&|`H;3WZZ&TzBT# z#bVw)qYF>W&uK|U_#oAifAUhz7&+fC4D#yqaBH~Uzu&sOzn9vK@Q!R`7rG32=&C+v z`qzIWX4_zX_vK;p2qUO#!{`fz3nu@1aW6Z&yf{iz9OBQbc zRLWENKD@ncT;h#z*uRh!-fR8^faR@hUHh^FR<7(o;yd<(E=V(XrPd^e ztdSF%X)g3%nI~B3JEoGp+r*sh4xi?j0;hiwe2_JSb?|`^J2`hJ>h8o7U7whZY+)7$ zxj!K$R{H4G0=+*o>LuK9dLJ8`Nq}@0cBgr;=+SIHAilKvHr*QbD}A2${pA*Ht`bL| z%)wpvi#$B;arW5XnZew1w*+>63Old#cf9za%;Kp*Wa;nXl=sZ1cZ<)?Z{EekiK2gH zjz=1}*?!OR*Ib=lGFpHbJ5f@L;|`}E)pJNcKki#NTzyAAH;bV&48EVe@v!U%(-6Ss zvv5^J_TsJ9AO5+3YQ3>G%cal;U*Dfjn;3xCbrrHY|5kXS;jFz6QEj)kjjdyUnGdDkg+2*~Nc2_kfU+z`o@ZKJubs?Az zS%lptrCi+#@3}_L*v@K5IpbOdG#`#zB|U8He3m_nIa`_-d&@N(<21>uNd@vN3qJf+ zZWj>Tv-bce@gb6r90Z4}*#wo@Hip!Cac9{lOJw?+!G~?mRJ>8|SVIgSA3A^X?s&QO z%yfA_&JPiR3-38E@Zw}5g3awYsK@}iYK?{;YV~ouLFz6Ij14W)lc6>ZHy_BBKW-&yGn4@AGd9ciJo8qQ$c;5- zKi7>S9u*PzWku)Pn=A*{5ubnicxOKy|CpbmSu(lM;#m7>otB1-$%gaCJr=`@bY%If zzSBM$2u{W=f6XOllhcdjt~w?;-jm&@xE#acUYhfRDY@5dUOMCW3B>v3+1t|C?0KMe zxjMB|Z7|ne!ro#U?$~vCwHwXWt-&f;%8sN}X!gz)Ng;cQF8c2N%ou+!eKP1H%ESF; zG}k4P5q!*L`Y`M4j498(pi6i{1#{KIaifh*}3&0I0WkM zkQDFYt*Vb?`dHG7Iq!cCTNmt}#aMEVBj9V8=@z3rq|b+apSI>@v_Okwp~1@W;lRx- zHoQxLe$t)jrRv~xy--am6;ah}*a2%RovzR8m~R6#m?3ArK3q=5w7)k7)wzL)kIVCt zH23_~O?F&8-@fg3&=QR7l~9Rs&leGfI{UoSg}ohYB{{L$h|qsg(=a6!-=x>aHSoJY z^_uX>@6aq_dw1|=V!e4y+aN%-83T9hYL_!x108}`^`7<0)1-*}4s&=1Yb zdaoOY?hmpsstJFcMSTjVIc2gKXEhf))P6HQ^3(1CuKUaTTqocI7;-6`N6pU9 zhjEc7q2|^7$#I@W$FlFee*!UTH^HIt&W$Gn=@M_+V>o|2h6PF+RL6Vm!dmzx7vlIem;zqs>mE#G|1;mu4$v*p9}}OG+NIK> z)LcG`+YW{L90pm~9F7n6nPo3lQE!$|>-W@jMOfzr9~=`b-_j?zb93&Mx*2SAgDVx+ zx4WBlz48Dlm{wa;FO>LRBR%&vJ3f5`;&VD)oKJt&KGKxG>qco*T_?SliHh$~EMC>S zH7tGsNLA&+@$5>|vxCirq3PHANAP(oZree=Q8)GJrJmoqz;^z2w_$E(bT*8jn<_nO zPBwHdBRjk<*FkFM`+2jd*=FXQ&>QQqtcLB4mtO)D(#lCb0#}DERhF2CL?5=wX=5I@ zEpC4TquI{Dzdw4$p}R9RJGa{IYsw`fbs6`1g)@FxStD97^`d)W2SG15QYHaQyPHG+ zon6>~h|g(h!9bOU@%eJbG-W5}=)YLzxY=OPu>0d_V}KieKJbER~8KtCB0{aGAt{VO9P2Eh7VFYdc4S^_j~ea$68|E?s5 z2=D>a03t93h6P2U08miT$Hom%B;~+Oq1>V^iA;Fm6ihM)ZY-50l_eUMe!(|c>~nwP z2*yb~5X%C~RXTI_p6mzdmAk|PJ7ngb;VA(sL*IqQf+!9MbT_i9g}W(Ia> z#bhE#QO0C~ZKou|MfC1><%2T<*DaT~@%d!DLkPiQKeLjgjl)?ZlOWfe!_43H?M{091 zH9-o*3M*RdvR8IFDNAg@#Y-;=W~gEvb;D*=ClG=p;` z9bGYIpw$R>M$o4$etotbsJs6OCp|T+0;F=gDTm z>k*gyQ#(#E)%MFIl@Y$}gyy(qQwU3l)l!L>?KLWb!b?gS+l&ZbjOc&u4wvhcO;&G> z-06MvZ$1{%DpjuU%d!g}AO4viV^{{C)8|wIVB~?sf5$J`!ZuOF1Jf~*mOQ$n%bF}r z#)?PvLj*1`NCjsEuJ*l26unMgPl;MjKy57FVVhiQ!12^lFqibO4luPO8V^#nL}TL4 zl5Hn3B%xZhLBG(OU{-&1-9ISZ$&e(>+eygzqFV+Bj7}yAz~E%^cwHRvYTvE;5vG8E zumX(ZDG9JhYoM-|rs`pl?0w|+1QfzY1ATSe@nt6L${pAMQ3Jpht!R0i02(xRTRv_d zj%t~zH9-)Zq((9w*|f&d8t>7v(6N!npva%OQf@}ea2$EVW?Fx`x8KK67>jFhD&-=s zUh&@cJN^;gMYx~CyTVnP_dGW3YaH0LG|zdT0(n9C=0@)r;5_(yJS|b+V|9AEG50s)7I^?K-l+3V863eel*!MHC?e_4E24>MM3I3-qgv{avZOP zad#__L4GtInl8G&Zk6%VoR;G~d7iijwnEh06aU?=rfYG6 zTb`nea@l5`u#`Q1F!JQ}W&`haEq1Zhy+_2kS^2CwuX#grO(1#6PI=d(nmL8lGKNb{ zj>%C=&%5{iu;KKJh_k`p6aF;341$mPYswLT(ei&XM)0hGOHd3(sUYo+>jyPD7bH>| z_m(LR>(xEkw?8)?O zKdJ!l_j1*i`B2aOE72~>718dU&g${pNHmt^CjtDuV;7M5M0z9sxJw6nf81U;(=+N& zt~Gz_oaTwHhFT6<9%p#(JeuNIcKVP}?B-Vwmp6kx?fuAXPh0B{-P`PtAL{XeQv0yu zgkE35OPh>AF^V|9g*)ZyeDd-x4)X=J%CZ*=TfN#G4_p4|_O>p*->)T~9If8wH znWvyIuGuT;%y32q`qcixP7YqJwDINnXmNj?zYf=C@A?p*)O#{EZJRk&DIJ&et@IFf zd^WtT4XFAFbzRJ7?Dm~WeEn8>W!#eEHPmiUsIzG)jkEvJ#_aHJsjY{8s&;C64$t;J z@;%nP&d+q;G4W9CQ4bu|s$NIKh=cz6=d@D%C^v(p%dwGKL^LB*x)-byiU^KpM> zW&a{FX$R6h*9r0)Nsspk`LlHwJ=Uyv-%ZmU9aCDD;=+1vl?0W=bT^?d#pxzO)~p7j zD-=)~;r(b){JLD;`@8Q?G_Z~jR`i;L9o%Mr258IQ^>Oa+u8^3lPqzUV?1op+rl#5o zxqIL1b(=-P_A1Fv95g!rEK~k|)%SmAwwv|maN4G!SzZ@<>|_3W8GG|W>%m~_7zJn>L3ZQLHR&rz3lj+Z!e<5;3E zzw^UQt2(~!_vt~mjB*^C_L5LiF@oLpm1=Qclm(y5*v}zldI=KO?8PKJUzY zyjR)%u)M2Gj=gg&i) z8NjzajI|`fBpH~_Z)^vM-aviq^h2*{L&$BOo*o$W!wB{-uRizZ;05yAxxX$?52TxW zd-Jx2o}58iBzja~G3xt+d>%Osanh#00r(!x;G$3YAX|1{^gXPu|6@J?|}mA%o$jWj!fiTB4E z-8%N3zfGdt?i4Irev>8Bx!?;;U_Zk(cRANqZ-M399zb1Aq9kv(S^GYptdA*3@sp97 z9m`3#hKKe+0O5a$(RiaP)2^T2cjL1hg<~O=`zL3m)#I7>(LJ`=2cIvWnSE}nQxgDL zz1sJBeKbD@Ivy6BM`Q-P&up{T`CdF<&_SY|FFLRfP4Vfv`uIsOTTWc=<=*6AMi+;x z>&~jE&%w<&cw6PTxJSF(>*a%EcrU+<;r8oTPd z{#?P29&IStUPvJ#nt1meE0lqirzdt{g@UpC)Q9AJV_CKUY=uZXm6~^`GPv<~l0UuS z=CYG6#F>A!@(w8Qt698%^>+^muizo+VV@{L1x+mmfr6Ht5b*Zk7Evo9OQtE59P)_5 z@!lr|tYyua&t9J^a7!{NY4hmN%U04G`rD;RuvacH8oTOkX!q)2XZ!k@Kdr@D^4+zt zB$q97fnTJW8Jw$k_r~M9gT9eQljOgLR@G-)e1CuT&nA9_&+(Gqu=4tN_i{0mri2?> zT9)3T*M>1uhqp{&+=Oap_q(`@W6jO&xxbRkG(E=1uR}?7mc;DtE~c|o@cXRON zFgZWovwsQ4pDrnMUXXoAU;a}qZc4h_!0~w=prW^*Y>i7GN8GSZ${eev| zCc)KaIP0Yzt83TT?~%3Z8r#f~oyz3&G~R!o;g}mwN!r`HIoI{)VQ4bdV!5=bZ)N5u z(jAG<38ovJTz1QiTGjh?9ZvBHnJ0cB8}^vpUSa}%@lpfLWfwdtl3f9H0OT#DKjw&j z+3J(8qPW=Euccs0!A75jKV6Fw%TPNbFAU^KT-T2Cy3~nV+4G}Lj>1iWH@11^ia38b zv4FFhWU>QmXPN%|dMcaoc%_A^Z}T`>ik-v>pKQ;K*oA~j-!=PR{dV(umG*S&_w}WZ zUdg$;Wn251Itw{C*_(eqyW>^8?-{l^FrlE32((>Wb@>qa_aYsN@)?~Si#x~!ei_;v*{`+a;TM_aJ{d?sQ12Yc1HYQXbpBu-wc1gzD(Q%OEqr;^vDym_ zkN|*Ueyy-X`Rl4BK=KIg;mZ@ETJ@0=NeRY2v3V?j17bU7v#dYyp13;1T?}i3HyCQsdK>z^O z%Xb$8Z^&YgSE?L)6aZ;JmcM|+TdgF8&y-V>csOP@ApkP_-Q{7|e|JIn?s@fyBq)jXJb9z)K=GjYTac5 z%s`7)Hxbe`ENDS~z98S4yB^OL(IWOb&LBhU@o$z9Lc?hd8l&2(?8*OJd`PL3_Z~I9P(&bQ~Q>4H)v~bs+ z4_f9dAdx@|0oFu_4_AqVA|MEVik?hMrCJb3pmdX`5};av#MFF>%t_XiT5~K3I9her zkS`!pL;zs81{WTQY6B?*@M&EnfM#Q|I9p0MyV17xV$JrF{9Lye{&@{yZPj|MDcY>{ zb`WykW0sK9XrgK)QMB3W^$tJ?kP51b^)Xcuz@jTrB$C{bZ}K^ZxdZ8cD7n%I#X6J_ zB$|!~2_%{VX(B;@p-vJ5S}Fh}31EdRKr{>CB!a+*J7P%yBP3z~fwU5vCWD*;4*+GH z2L%M$6V>bW;WhU2H^x;M!Zq$RQ6WiGxFkDkS;yx09w+?s*?NC-@_TN+{T-K^UR$;7 z{d`Xs;7Y2mnhU1E67xuZ@yJ%A7P6{59sqBOAeY3-@*rrTN;MIZz+8Z(LJ*?-j6c#W zV@(`JfHfWXq*f{#I2c)Gjd$v7c?e<$U@Z~p2m$6(gmV#MqV_aY0M$f52Tvv#c>zbH z8MQ7gAV!rV_OOq*=B2nle$f4+iS`u4N6w$-P@*XXq zF+@@$ENt+CUW&(mO-9{bAi$9;Y9CLsS|fakkc_QabG1>05Ks$3pebmfvCWMo!lMjG z7$TC0fP~nA`C!gp*P5^kFRDb1fYbta&F5e|DCC>ZvA*W>52hB||KjAPHz8*e@RQ?z zv6Mt|@WtI4+eVxI>wWkSmN=h~~3@a+5jQG|d5BIXq^U=h$nlBYxi2^9fFBA_9faxB6C$m{3O zBff++$WkOeP6( zATI-A=}=6l4~Z*5?GyRACSoCmSd#%pGUAFb;CKh*+(n|AthDQQdpqdy)Q}1}jjf|4 z(aHdXL6}tQt|WjGN%4guI)g+HS`*??ddgD-2$EobHRkYVLW8b6FA*aI;s9K{;v#ve zQ?lX$>b13w=?{M_D`$MiK9a#-KH1n({^iu)_v;lma87R8%0)kQ;7`nYohXqEa460b zLS7;Pti{%RC(d$5w!c?=shV(D>nIpYPsD-_NThq@EcsM{3urLmRe*Cb!~-&FFkXNl zsujn7I(=%Va5(Geb8Wh+m*6_;e5-rojjJqC#N3#+TC2{l`}~(T=nudCCx`s-f;L;qzp;5U0nxoM zAO-|Tgg%ZH5z7EaiL!7tRNfB6M28s5GF)qa0BQlT3 z;0Br|L#~iX9=H5BdiBZ)c&`9!00k0|B=$%(2i!xT1Hb{41MyivJ3u#u za?;A9Ump05=eM@c-+TDtKmU&7Z=T=&9H+0GNdJy=EU)L0^}O&8kM{bU*N`TW3@?p; z)}X5d+ouC~1S(C5`#lx`;asg64S%Bs9?C#)mmHb;&=atWat!Nqal`g5wgek^FyJ(d z0g!ch173~-Z(Q~D_~{+Klz{RY+w7=nE#F2AKZ8= zR(`Yu-dfdq|KD8tsnOqf@uIa@zp-BV7c@vG%@<}1aH#$S(5T3j|ki4N3+q)^&$yKC-sUd8bl8`ABh(KItooyA2`8w5nS>tk! zLJzEQS*-eR?On0IXg>PMfp2|4lNDQ255+8Y*3J4YKD$!*+|5y7>89!N+GnyF4IzKmpSo@Z>@(kN%jP-$3hQ%pHb2gxM~Q~>e; z(Ik;zEE7vyYn;*#t=i?~%5ebmRP~2W<8&7ighcv#+?DVM%ls8+5L9ximB-feFa*h2 z23UiDY)l8PeE`_8j37nS1ON>HK5ZakG#eEC=#}M9uCDR==47LP*Q@6F(RqJsJ57`y z?)t`d`J4MqGXH0+wf=7{B$_RYRE@>)ZQ_-CiH(6KiC!3qKobGhP7Y`v>FOpv-K2WkX*1<_*kAw_IITty5TZc$$O0O!-zP)R`s+_1 zB_NkZtM$1*sHnw%wT!tARYh)7K?YW=!c#GG|?XWW&97tq;H2yn|s(XT4BIE0I3tRcy zOZ~x4xhS{ta`v+ux~H;w;#Y? zzSg||&_F-@U9a`dQdVxa{@(Aerd%<$9@~8T%HP^(eS75}Os#R_k@bi7NWRvV>Z>Jw zYVk7gEi^@cP7)~*0VQGx_!!XSEYlDHVvRN_u4jz5dSH3-G#C&J0m1{q#FFJovjI&K zanbJLJeqz)%CJF$L06rS8)B6xU}+3`mE63_(tX06m}LC_>OqhN8(5w&ZnN zKY*{Yy9fvZY(famfNE?0etFTr#sCzw=C3aiG#g9Kp(f|PHFu5W8n+pJb;-9MS;uJk zYVjX#o3IN1*4RHiKNf%M(4Tx?d*IvpoZ8Rx}SKN$XXbyQ?4IG=n74R)!0@ z7UF%7A(DUjbixFQIzVeLXF`vY0L8LM;{xJVgH`Bgm25~Pgb>*L6(B`wi3jPb*E5Ep zp$Z7m5FCAl<}!%e412%@RFlQZ@*)0~NcV4l%-4R&WhvMsA_Xv8`y~V7|Lm7)tNDxj zI`4mCzVha`_V0f1o5z3j5=+BefJ;F;n%Z>WcWuj=}s zR@~`0o5OR|X&-4J^M7an0{-9{0cyNg`x-uNQ^H?dZ&9Uj>;3=5+P9BP=3i`oW54Qf zYgudcUp{8Q8s|S+{a?ods)hgJ69vwH`5)c&505W-V2kVbTL0&<|C6`TVo&N>dy7Cr zfP|CfaYd)&Lj7|Luyk>X9{QZNXv(#(h6>Q+h52Y=Y<%-a94?o+X#b6Q5Gr8@_#geS#+u~nA3n481N7SH3c+ANf-_Obc^Z?N7^yWWov$X74Tzdip~ zBWd)nCi>>;uSfDX*VlUX>!a=LO53d1`g>nfBFf(BifE!i9LnNEU#=uTBoBfaBo7S< z9`J_-6286Myr6{rvTk|K_{lHbgsJ zB2W`#i1max! z#u8dfG3{riA{rn_hLXT~VcvR`VY!6pG6WjL8jPtXisg>{)x!a=0FOXKzFzq+A88^Y z-@^OL~OTIXd7);d3HKYDY!>Q^*<^Y=e> z|98GYi@v?$^wq6@{onts?tJ@w^5u)aT6X0I8Gye!LZ1^!2AU{)_+R?#t1arF;$;Xz zOKq_4h2e_NU;KG&Nd^eED3LeTXQY-a-Kqm1+;2>^|EM*82u7XX2|mK%Z!D2gmHP@% z8X?S!0EdkideDQ3vc?Ni4u{gP>Z1@+O$Hu@=v!z0VoU+lJuwv5+O0#Y=K?W-fb!p( z0m%QXGr9P;hE$0t0}Ha3P?A7yfr>eRGcU3qmtnz*p9BF9dm{MJW#91v-|_nDGMW5u z5YZ%v^H5&%o~*U{M=yVW&nveLphOh`y4L){ti8>@^fdfF z5kSCZ^$SdDumECUtUkDpViAAmM`Z0EfA44B=&vuF1h)9~r@ovmd-(iMKbOF>)?WbL zgzodSEACbZC$eqN?&$xn6aKHg&#$kyVg`PDDv~e1_xH!Q{NouJ-@GAiwueZBTojqekhOD46q>BP&S-&9 zS$#XBxj@0s?q=n0%f6CzdK{!3mZD4MIW{rGrL_HiqF@3#686 z*H}SbqL>v`AvX*LrjUJyi3K=Ks{|sh^%H7_2=ZF>&|54a>L>`a~#9TR#a65^FOM13{C(%9JRT$bLz)&O>5@&A3>2uAme8s7vty3=A z>wM}R!2Ksq|KYCl-?{DIxfMwA=9h>5=^2}U{HWjg@8&-|mLCs5`4_g8e|gIPu1U}q zlir}(-(2{M@yGwD-PY69Hgwjp<%PcX0^hmTgh5q!kudgDlCN`{zD!n~G`{73I%z&s zaCJg`r`4cK{)fgA(*{j{HJ0es<6`<2qte`iz*BshFPg9U;_}~WKawR0bQVl3zd>i| z>z(4-r56!Sd;v((%49%*B$^m#H3B2LOz7wWZbCSR0Auk)|HEJSwuhhMsm^-rvSJcM3f z*8KQt>_0rZ{nt45f9Irs{OdpS(YsZX{j2wFG^#&D+KK^6wgDYQo1Sg4K`fl2;~8)y zYDQa`fr>~d12U?RBBV?_vOtI+@VZdiI1)gGSW(uaFH0eViWXV9K~S;w?bp-3miq($Hi%D7X)yS@@4TndTdEQ}vNA$d zRdSwu&7%6l&QXgkp~N(v&?$A>AR#Ix$_P&Se3XY$D;Mz-k=N^gsCyNxQ<5s%W;*C| z6sodaUdr~lucsso;-D%lNx0@VZKqF5mCP|5z#0m9QMX|qQzbrpD3{`ILq%B`WU}Bc zo!@`WH@v|6peFOcSV+gAr~G|18Ufp39*Dv3d$0h^-%WDDXeac~*!Q0>ma}ai#(QFi z3suT4?ejJHZ?RT?UrRpO8ZG4GYd~QUbclKUz*lUw5*Xj8ga;+JLKCp(DW4FfX1tX# z!|Ad`j-ef&a5>AMI zMVnkbdNHeC&=!D_4kc>c;|hwOu@MJ!!bgXUuhu+-p0{*=Kbk@~hPMgC3hbuM0C99! z$iXiToTMSX*L`gm`a0IBc8+xz=q~(=-WT+rJH=eG!*amMe$9>%9tADg{8NHq;eQ6Jn^C9&15A)cf}CxU3@ zz}uw1=fUoO29g-XMjOMB$AdBdV*5d1kzI`mj$or~NC^BAU%SJ@*DYc$2C)mw6L6ev zNB1Va!*O~vI8KtD#%X-Q8W1cY?Xt>M%B3o`Iejhd6|8OOaorc2a4TT1uSBPn3xSWAtV=deatsS$Dwi0( zqFrU>+-0=v`A~w=U%2@B#@zP7(Srqvp@IXb&{u3P`kyl6ayrUSz>x|T2~=_Wgb&;m;>epX#i$INsqIC z%TvxM7Kss&7gwy$A98hiu6Rz5gUY_v|A9OBtzNEjM#?phxk(5pC+_oa+4B<{P_p|cV4%}U-%zB$A4L~X(>CI@G=;l)72Ubj*^bq1{h=~9JQq! zGfh-4kF5r4Z$zbn<&_`Nx}05=KbDZNv*D2qNJs9N*BJCcR0__T&(a~2O z@r6}tt<->PApZ*at-+!96Ft9mBF}!}RHJib3@>5a$9uPPeIAQ%zSR{^Z{T8o#ei`p zaypkdmBguDj~FeynTYL?Tm15CI=cVBavXAbNn2~LP2Rd_%cIJ2{CE?&N_jk2f>el+ zCJYqX7A|`{QcF7edj9ore2AUpX6G0_U_YiJp2?6Kas!Ch0T+S*qo6a{O;-L#wJy;w zc@|?3?=lTPG!K8~zzKZ8xax8UwyI+cHUa7<6F;f|`3vGZSHJD=vyf;9&-Iz}c=a5&Z%)V=+v&%{{t@T(vBoY- zmJgawV6Q zwd6TVTdXmuVAk89zUDM#Pq&5;5~YhaOH#gE!m=89&_v2M2_>PBQSvI;V>s{^%F!=y z9uQ9SG43)$+(qKc{a<(+XC7An_iK*l`5rHGgR_nEm?X7Su1@QJP-PsndY8F=b}%mO zs)Neafy3jIPGC-n6~roEuk3uAKut)OY?+?sKCxC_lp@xY&Bn>2ZB2hM6RZ&Xj0Ya- zVKmZo1Kt>z1qmRIRgIjX-(Y>g-+G^zbwjN%{iM#x5My8l=Z~oKstrU+q!5ZEFe%_~&r^F7+-WLscvA~)J z8;vaj(?^+4w|!g#nma2KLf$L`5HEpFq9AsIB{R7L*^0c>1ApghG=XP}v8T>9-pgVc)r`{G1D~(_w<<% zJzW?c_{Z_Hs{;?~h|ZC}dL-o#=Nq?t_>~+db7<25X^e}?*nHW#TV*Jefnk7s0U4vm zMPNtG&Pjq9jtRq@0XV4n^cG%#ekrm>zq`n%=y%rwvh|6__6y472REm$FC4G=2z~rh zUgl6NUrsT9(j0W^h(&OTorFrAOE1NluY+@{(FKJjVkU`6!LvCt0}n?fe14n|Ztm0^ znnzOTPru_Er>z{KQjbIl+H^v6N(wUxNcG`Bls+>uy^~$-^6mlrPNvR{+B=Ffmx$WA zKV-=1I@P%_R&~~vh=}2iPoW5METC2n<|DLi=3b$H3Kuv##YGg>LSNE@=6LwKGuDozTmwkm;9S@<*>w zbGwkn$?MeCyy9)b0%{QwxgQRhd{KJXdn$bs?cOEGyf%G&|ABT%Yi|sq?-L*NpIzD9fAY(JKRNcW&|P&A1LZUF3lqg(c|blS4pQlqQaz3etitRN(MD6s|I^8%=K5(U5gsr*1SY z$a7|kyxs{g#8Ld|FB@fczov#WN*gYgRm|Dz*lV%A+z{SQzBr_4w<1|&zloM}yOaxm z*R^^(WzRbqC^qJ|z;gF)O_XVHQ zAfK)rT04+isiY5|0X33pe+rTESPQm)!$1)&IVbfo>XK)wrg;1TA{ns!kK;JszDCyQ zn34|tAT*#%-;aTQr|-u&ue*~yW2Zj6OU!bPPx!fC={rt7_cNV%Y<+&R{GPvp_#JGV zZkmgZy?V#$Q|4N8olr{PU8pbz6G*n;uN-T2W@-yi4&>FF%Ioc8p3q={UHdJ6G~{4d(5jRJ;&yfDTCk;7V{;gZ4w7+X8aBi= zw;1&`M7O~-rA8#+Ut)WK1f)lSS9si7VgKvzwyKq_t|3HGpzr~1< ztK09ILqF#DIr5fRHkf+nL^s@jjuV^GDh^1R(1WrF2q zCAD>=k;QmoDWW9idCyCtov@hN60$e0@_1=1ZMQ0b$RdvZE9SIEHDFAC|1~GUDu^E| zip`=5cB7FdULT(!A2Ub?yp&(XTiJh%x7;$h;W+L^TF4^3aDsXsdKAQP#|h7a7q}pMYhQO5V*g>?B#1SRFFj(%>vfb}uc_oJj{bW*e)LRTe&gHL z_v3Qr6JH&xr%Oqb1=liiF=32rMhvH-Yo1)XXAJYC*nEj1{FEvoG_G42drTPVuo&lI z(=m)w2ixTeksN28+OST|Y218yahgGCxLC-gq)>p8*S=;+Nxh_h?yDOs9XZQsj5Z7F zS44-A8|mCO+9q)tkkayqs$MyKENPqW?DDY;iJXOTefo^r1yxgsj>qnD^5BxJbMoes z%#|15Z=|s#P{$6@d#97)!KCK$yxykxyq5pB=aqfezE5`G+n=1cUBk1pFWlne#h09M z9lLB3?c#jRd1Zrta(kY>l54M>R9+Iyun-#NY5tUB9mF~asCI##2ue*j8ATZOW#+Y`@VdCbDRdA``PA>4xzQ;qOzkv zQnk@fs#@lv$)CjQyO(DHC>_|P>w9gr;40rP$2Utw0s+f>=@cS)B+}0JZaYsbo0BS~ zZT8DuiXQJ6{dyVAwHx8}MtbNsN~-FPX*tws5aVS3hHo}BJWg0&>%zK>?(b!=|J`-J zR=N(a?w22b)wxd#F@Ya^4!=xlh?AWIL;aSu#JBV67j2bGbQ9UEak-iLh(YrRwp6Er zw6eEnG>#a9s#Fo7(SBJH;ynf(fAr(T6Yca#Eb2Ks4*4P_hU}HRsweb#wR_(!!&xz% zF&)gESU%hN1tWQ98Zi>GN|g1qeFd^=ty7Npdf(@NjRgr4GvG*bEe|IgN%T0BDtN?mHF8P|6KuKS)3vZ4Swdwk6LK3G4-jGy_x z39jhC+OrnI5mREC?_%2UobRi%7qiNh+Kik?`F>OMTtBn&U2goyN z=ZROisUq5FFn8X`@6FwP9YEA9Nmu>$X?E*>5MA0Bt3fKLX%72M%6x-$rx(PDGQ^1# z##6^E>HAzxean6Qx5z#LIs4^F2jM%eI_J^f_jd06{H{U#%n@I!?QD@g`;k>RO0THl zZ$Uo!*mc@jkU)BowRA`#ZIx~o5$9A-JJniar+k_}b*b+cYU5}%VG&<>IdZbF-y!gS z!!CXID@_=!6_2Lxc9iUd(#Qq zWbNdVQzhvd`hEmTSa5_wq7Rm=NcT2>k(YRz#KGq*%ABB>&WpP4>li%JB2FsLYnv5K zvbhTt)~rtT4o*qhIbfN%S4LCeO+|Zk&Jy;8B44&V1a|JV4= zeAgpS+8Jlfgz}UhU$2?*51h3<azk)`qD|e&z|TUm55&hDk@tr1Q+919?KeNWCO?8yOiLIm$|A`460V zDmJN2t{+YvFG%ilhVD}l(4VA{?V5;1P%nm6pJ*bum zt4jYJ2suh03=f@n5)hRslH+(b$&$%q1k;MD*DkUi7lF1U(P_lBHDh2EA zzAU0ddXwNZ7Qvl+9*fp8F-!Km*lbI!u+3@_pC8=Z9W=jGA@>3mB2G}2cOBdqRiI@AJ zZ9ZMw7^$QPFbCDj7Tsh;lpe;dmG28}f_bObEU$cSS#J*V(aVJP@iQ)V9d%J#E2nFh z%uH;Kb6Gbt$r386?9rQlsWB^qoz3|3Ox3YhCp#=g5nUzGDV}3RwB|}yqMa(YoE9^> z@j}W8&5dUnZO{*7t4fJMt>_(bswGO&416puhC1{&7E=5f_heUNced*oORMjkTK`!4 zKgYWNzU(_*{_X*BM4p~wYveaR2dMX;lpsXm+T`{!dzUXz(4uaC7p-yqLZwazD?+dV zE6te>EYRxccJh_O<#RLJjvzUL^o}2?(7}fr>1T;pbKZEO2qYY_#xEL|Rj*8=w1ASJ z+sMo1krwqYp4TryYf?KlHlWD4p1zUkG~{Ml*~EOFKf|KBQ(fZ*mKCr#8#_dAe8jbM zy%*+r&Z6Kn&!=O5s2ZH-Jw83pC*%*l@Q_28KV#4I51%q~TCFqu2d5*S6wP zvpA+V{oIc7V5I^lPz?iRJ^I>Q@GNGs&)66@sM2|$6A~8kSwQPSlyo8{6(O6wV$f`& ziR;Hf>a^3q&Qsf~WQ05q3E4>HdhVt2@AY5kUot}Gt&h!p+MOB_D4xG3~8aqlk@Io@qaxiud|H7tU;X)oXF$tDNc- zPB4tc^*p`CP+zX|7SjD;Td+2Gt>J<5{j1`P1c4}YV?!-zgj^s!cUqIk_^wL2R5B8d zM42`eI46XEZwX|aRgJORW}7bHefJ+SihZ@L?|=IGU-i{e9LJ#S`vfk3G0@N4>p%C(N`2icOXy@uc`${v zB9Ethis$**>;5-Au|4gkx>m=C&Tf^pCqUXXi;y-k{f-r4l)E*~AFOH@D|70AV6I$DVC2z^nA@gj z>`8gi+PT*KB0~?tcRk_CHaH>cH6G4(`u5CW6pKOg;YY7p5frf6hPC1Tl9TZ}$&M`G zUu=IP)=qW{QM*YjxSvyVBO{R|`$*2LWW#@dr_6$Y(?}{X);l3u6FaYw3&L|?EUr-J zYbJ>;p4*ALyqn%_l_BDle+2gS_bjne2G09ZOrD&@2VTxia?==ZotFy_C^L;ho-Vxy zkE;Otem#!dJ80}z!D^;BD8H?1=`>L9a#F_{MuXx~H4m?f3JdblQ?kA{2A!Z5o(HOlkz2<^v>=e$1hF!ukSk1k zJ*O>jV*CU4QnGkD>orgMUe1Vrp%x+N#3MW8e+4)v+!dd!zWc`{eLMB_yQYs*ZbKrk za^2n!=A!4qocPUyo&RsW6UU22hRLws|G)RY{+_uIn-4Gz;yw~u{jE8n?EcsZ6T0*Y zUG?peLSc{ie>+d0HcEBn6WEo;#Gn4+j~;Jz(!Ee*^l1KL-)Uc;;<%=NW;XKJdY#5& zeK^&pPFe?p&+KjXpe45cj;Fu*gJ-_mU~eSa`deSiZ(rPA>=FwU4_M`T+-{5H21AUe z{hMR&eXIKPquQYn&PhXI*-$X$FEjr=LR$&(&9i&Y+1AGSm)KATAw(4+kzP>a5Rx~- zkD9_Z0Vz@xDN3Bj+lCN-(=?tHV6D<-$R5CP(1}kW&7*uw$o6ed3$#OlpnCBPrg8ds zt`{AZ^vevq$-w*(&*jPTAM%PXD3kG(22aoD`^I|z=$Sv`3CP($rZ=ZJ@0{~oFb7Mg zp=ia{HjN{ThxK*|IfDlIzL*OV(Amhz!&#VX4~n3iEhd8Kic`japB{4ahV}(})sXMu z(u7dOZIRTUzD_ySY9&<0`5~ZRnsZ^-FP^bNRff-s=x2!`BCf za7wv6!d}u-PrgaLp>J(F+k^b@+i4o>W<`xco}eV!j2EcqMzE&t=Q!`y*vBgY_1wnU z>O&`#vwnSJ?oniahwu7$pI00QJa*^@`S=VFL2$1hSfk%vMZt9}fDeN_g&KTvTe%1Fz zAP)a0-uBIZDL=2FV9<0t<1rUa+_R=r*7yIM^3rQPwkr<%VE^Y@_OZel&ziM?qKV;w+hm?oO*UZ%>i}xR0YZv;le$@{{oRooZUqW8X#UfBZEn1t>lKtm?)LU?jpgRs z6zyNt+rJzJ91wZk-}u)Rw=uoWY3&nUcKvM5T9oIydcWT0T7K7z`}KCsJwL^3ee8d# zhem3DUFV7A>c+E+RFbZ``KswUv4SmYbr!KYwaN638C?^lB4JYIA(jtPYy@nGO1P-9 z+9@gqOG2L~++~PQuOemlnzxb@RYe>s0c+fJCwsEX_yf|!w!aeUj_GkfxXNpy2RdwZMNuG!4W1Y;#E@64fX8h(Q@ zn{wUOvf)zi^d;ijp&|~}xfIKsKl##^e(uGYTO@yK2M_vD-+03rPr&Dp|I~Es@tY!h zalbyEALHPyS}oNs(mst6mzseCLR)J>jT8*hj3~Ka#1P8rH*V6Aqg4xMn={^pXGVvh8qKfWJ-Ys>9Ux@c$YQJ?CqegE9ar)VckmQ%r*@mMt8 zJQBH@mw|69qq*V^)g}Rx(if9#wiU1JGI%M;kk`(+yaXLM3nG&Mbtn=AbG{ ziWFy;UnA3O?7iGNWJ;+0YSa1s9o1D7E+%jM7<97&o@@wagrrN}8k-)GyNCKTy$*>O zbr#(!hXL>Pgc_=Q-SVDKc(%1OEzh+$oTKaS*yoHR4m^bmpQ(?xvu{o7(=oT_a3!4I zK4NdVPe|g$#q=3-2tpnsf;?A$GYRN2d7zpLyM;N6)QrPqZDUPYS+ARYb)Y6=yubd` zrqKLaD@{3{b}9#~F^_mnKZ4 zO$zhH3m$-)r6j6feKO%v2TPHg8JH{#jea3T{bEP_!HtVBq?3&x!J}_DB~-_f1Q`_{ z%lWM?Jpw)x`Y!C$jABo)uAb~7-9S6#EYdb>n_Eg#+fADLsLHw|Pq#%;f7m3?eQO^< zt76F>%fX&4M)$_aQI|G<>UHjmn>a~V1o;r(ItAY~qVC5St^2(0qyc}a+a6{2N6$lF zdpY%NpE>j|@*A6eakZUOn1plryR?BDsuCA;oiFN8gi%Fg9?Uq&o}CHCf2ci_e9hzP zgR02~>4KXePB^X9gHNXxgJR>;Dj}%LXrd`ojVHvTA*!VT)E>`&tBA&F8OKWTHZ7=` zdV#JZ2Ca;kgOUh{7fN#0^9{9hdt6A7RnH@_ng=oPNV=hi zH2t%`{N`$IVU6;5dVh+YuwUjtF-fM z_&PtzDVWL=x>lfnxWqm+YRq;G({)7QiL1I@Mk-#Y?VNH&s0CkAWRj4;BdyxJnduV) zi7_d$Nv8J3D5j01kz5jHM~*qXLEhd$th!>MlUD>v==IX#acc*MAW^s-LN3-8AHyMB2d+rR2c zGtd`(jrfE)Uum&!XCj8e)dA&gl2XLG&KwF;RmvFGL)m-K!l%=f?2<)l0(-VV@ejUs z)(L@JZNj1{1q8n!PcD242GyT^N64x9=C*0ITt%4@Pi07HlL=iB3$({`bKy+~0Bq>P zk#C6S+JJh0swv{RofoSeH9elYzOD}i<==7liGy_JFfW5+N`7cwEPAZPzFE3EuX|*R z8-pCc2fvb(TFq^|oT_7P?POUmP=AtVT#vqB=fv>r6ecBOl0#(4?B=F^wm;lNH^eO; z??n_HK96?^2T5l|*3G)Ko-6YJFZaLpw7zxPo99)3R8vroV~lTItk5QZks)mB70@J8n0l&iR!gt97dMDUt}_%Rv3tfkqcI7 z=c#9J_ODKx&1PL4AkKxvADd=(Of`L7sqYV~@j3?_f~vpxgWt8dJW0-4wO3s9!JnQu zK-dp|(d$-!bB)h+sh@T1^I916NMCv_60<2p3?Cv-r5jZ%36mLZxg+A1xt)2jbL-0` z8nLNIQPhFma^Cm&Pt%@T=8%9>iu_Mg&LSQm&!3e1G13+B6d!nmyUlvps?3hkaK;p3 z#U4>s5k_j|km@-g)|!%FCRLR%dSiGmvjRYS{%Z1E(mP5xCONDbz7MX?yg%;5Qy{@^~+d zBGf1!P{Z_?92uzviS&(KhI=2mq{f->(19F( z@YO(4Q;(mHwY8PqU} zpX^_xjC8pS{W5jI%GWGzc0wB}a+NrLq+ZeWa!gvah`MBfwLx14Uv{3%N1rlv;z>m` z_WAZa-NzkR)FBC+*7+|u0IqX`>&GX-fFC0_uDjF+F%Rvo{G{^}Te!+?PjQG>ObB#p zNmQ@7#%RQqgd53P)%f~h7cZNfK(_Ys^aK#Ett9vw6CTl)^!h^J77dh5?GkH_6u?NM)?EabK3 zjCJnOKlv@R7dLaQ&%D1ucc=_b_k78@d%G=IaH=mQ$!VW}@?2*g;)XwTw1;sy^I;ZJ z_jQn)5Q0J)tiudi4dFtO7`5(yrzVbaE=aUdOzXX%cauRs9eeaqi$O^+eN4fJ-gr~5 zDPN0T`-Mw8R*rp4pWNu~HjF1&Um`SzjF~{OT+<1lc9uT6P_5!c?Q-2XyvrLK8iy4R z2i&`d=+t9vBBh%tb&jo})vqa9+#jy7z!*e%+OiA}1SC zEwGP1bt+e+(N3=O?Y66b*OHEgy*YpGS4KW6uPW`Qoj8f9~jV;p49%SFzQgRrZ6Bie;39`82HJ$X< z&d(Q=%NMbBqouolQruNDi~|*?s7E0ftepe5UCrO!s1;r$Bj~c3UEF!HRST}|)HB>$ z^2bFHtG%rhf&C_<6m732dG6M%t)FMDnb3_ziJoj#7cQcr*Q3oxcNyt0?`Nn79encP zd7op3!)ed1@k!U|dVivqpBh^iTXN1p&UWMoQSDP5!%wn|rzdlEx5DS20!#lm$Hvi$6M!=up=4iZIEo1QleKk8ZEx}|M0gL6%MxUZo; z+DWXRT)0QAv44N6bjG{XcZ4V*N>*UfS3A!hL5YliuwWG|0DEax$Z1WCeUCLu(ZW0L z_sO&UN~J5OZI%=%0?KkhH%n?TF3^I#z&9Ur-}6xeyl$p@KI*BT@VmBijz>cJ7k}*^ za=3rwUS8SE6TYqOX;5G_7HjXx?TTh!Yr6`u$2rhFS8XN8WPahLwsH~0NkC(zLzuo% zaGxoEHJ_@@C{q>UPd2QkPcyL6Szdj`^tJP&_YP#$b zl3ijRk)QbA1Lv%NE!0JOf&8HzI&dc6^_ZxCh^>u54ZmCDB#KCLVwyL*tz2hig^XC+ z9)e-L@iB8R6yD+5yK7n0Q?L99NddU{x_t9K`=6Nsi`=LO^DiO00!TJmQ%p6-ZA z9~ojCZL*=D+<;6yXTocjSRz@bPmZ~zj&5cTf50V;YG-|{#v+nSW~R8Gna}cU5)={Y z0Z@o0mh*x&A+D&_4s||$<;Mq~8+Bk1PzKGThqkkPY`FEXvfHV5;gxvc)> zIo=*}(0}BE|LAR7_cf+mo^^oxJxDMf{nl;0@X!Cs#e3~>ea-DnPdt&$ZDjtbr)^pY ze>3?Y;8Gj)wKfyvd(CY~*o)?_7e9spg&xob-s)0_U%bTw9X;)T12mz3#$sIgizk zn%fT>@ogi|`Ur~?UZCw~H&RcsXgv-SLW=q^u+oF0tSmVK3I)EBJcz@E9!JX9EYf!z z`KU3U&!a$wg?aZB>YCRtUVi`EsSKlfa_dGWCML-2mOsfwa_hy9mSc$x2R)%AfAHne zYHga#o$hY8$fY0@B6b6JHc@YWtGUC{t!SI)OW*#hL>-O;IM}zpyxDfaRs417(;xga z>g2}*SBxCNx%7ZsZ0G+JA551*UD?)4?I^j%Yscc)YgYcqs|F2Td*|ej>%fK*56+rKs1u6Zb0%n_y;2CO<=XD+_|x2traKq& zaQG13F%1|yC_%U@Xsw0eZcJ+*^dx}yz{^(rbB4UH9z#XGF6uTSXAAXQbGImT;dHvC zt==$*o>R<^BJ0zIl>Ob?4DgnxAf+$eg@Uo{mz_%)1I=ywdOVE4`&?b^bE4{ zYR-@6M*hORK6`ufBOVB>a}0i|F&JkCZITek`u=lA?Vsm~BJ?JDK7dNWRJ`Xk4?O@Y zVGbilOJVO`e-Sd_6menof4U{zowBJ`&{j{gYQi9Y&Ano>VaE#T#a6Fb2GjYh{b)(o zn5CJDlyBx#SfLvQ3GMYrl!X23hD|cphRIym(S&GW@uJz-aQo}_`S&wuRXZg~SzEo? z(Zt5SLS;RsUS7=w_E!?DJI@KmxnbWIP~50R)=-7@20rXHe7%Rpe;;~|L+u7_{FGj= zhYQ!T2*mfEIsLfaNcG2FI+y(?aZQ8=;?eZiDG$5{fjVDHXI@O=lg`oi+HbGwe&79whhB$o$m4(TC%(0@u4-uQ zb*=i`2cFH~PD>8c1LM)biLgglNfD_*r%flg-{#hdrnR~We`X}tYi7|qj=AMtte5MD zZJ<9E9u0oGdGJC(kSMNVu4G7bc0(4aXKO56NkSn_ZLl=&eU)K@-B8Sq)l8PJAG^5w z6v)#~^6~N}>F9AQtz$b;V=oih)m$0hripx4d2 z#8X#mIXs@ZfAPs>AJjiML4{D;&qvgMD)^WiEf^@~wek?$-2CP$>D-rB+z)yl7%d4& zO<-Q<4(zkFmJ3_2fu?N^Dt%^K)(CAg-ZVkfBmCYm;GmdgvZ{SfB#>t7}7Ef2x~e5o=`niBsZql z$|0}~tZeU?YTh4LJZ3QY^Ed!FqX*Q(+A5s?{hoE0Kb&mGcW+U7wI8oKAN!+6>v-9t z2m7;LDOfLVq+^16lI1v;lQP`%px1RNIp?>p;i9b^zmTcZW}K<|T^P4OobfS&0J>l+ zhdl;8e>=WoY|wFWTU|{SbQ1DtdUT>}=cr_%OiFTH&2e4=kWZ8(=&`y%2M(}hu(67JOPfgSYh#{43jW$L5lf3JKKL=Tai^7mjt$PD->NwR$LERu9N zCh1-@*;++U%m$Ti+Yub?^zYe5!3T_S zpUI#Fma#X~Y3yq7jMoBf*rS07Jfz?me`f?_LLUvh!L$L-_!iKHKjQiKcsvxq5^7=J z!Z?9Yg*DKIekd5hiv>al8rYP+YWPKX0&_Jx0!^{cp~l_^W!yV_Pw{#*>bklGzV|e| zL8(C@M+1Pq0DQnE0UMn4Xr$;G-!Bc=MoA!=b_!CcDsYXv8U3oLgZpOY9Bhyae_$1P zF|dTU3g6!X(QIxYh1UQ)kze|!+9F8e|UaDVl^iM_QjTKFq z;J%-O1>)x~8|)HZgc;VA=$&bQ-W&AI^WfqCXNqibKffAg3EXLRFtaE44Xie^1N^Q~ zW+#G&lugzAX0mXK5Y^{s2%+(e?~r=zCU;GBkyJUHt!B2Z#{jTcaI}?H?2nP-N;FwXYFKs zy08DkKWcy)-2hpx_;MpD@jn4(N%CpPKHRQ+u;F4Ws>Qmabw$1er9LPgS8-?~rdIlQ-ZpbuVW^3F=U4pbAR$E*B7m zcnxI(M6K-QrpZ&%f36rCLOTQ9rx8h*%8r+E2Pt@D6~%37Au z4segeyn$f9TDrRxf90i&KD`yCxD!_@cLY{ZqZ^j16}8Pw>^IWqge9R!t!2LPp-zO( z8+H1=n3|yf9GL!d0Z}W8kn9Ts{Imb8+(Dq)55FOI{pX$PBmX1-={*E$Jev?je~Q&|U!}`~f%s#yif0A{ zNjJ;=M%u9vIF|eBqk*K6JkbrPeY#S*fk&2z!jLYc4d_zkO)l{}R`Ah4+MLFec^#=5 z^;oEa8cOlpjaZS-7Ss)kR2nYM?ERu48)&p0Li`qbx4-EPRK`| z?Dwg>sq$4!e~l7h+5msIJKHExU7nr41IPElRo!QYQ&f!!^>-lgZ_<*b#EOLF;=Cy~ z??BRfAgK?ISBwpfjUMjvj-~e+d+l}qy*{d}><ctf6Q>_JcHva$)`~`hw!O%5^H+`qk9;609UE5Q`iD}z2O$Gs(UPPxYD&4R-ef#B ztm|5o`fltfkLcKNTt6$&(ucXS-ugL%uchT~8bVn7j`L9KhQKg?2ydhhDm`ostOEUs z0sW*df72aAY7DwKfNluxm>7ICL^knFlr}1=B2iTh-0bD8{%A)ierXb+b&HNX81EYvLIme{32c4A+Qy=W{5Qw26SaiDw{6aXv72Nzw+lZW&V? zKU&naE{ZU32)s#L6D$7xKEnA+Sn8j?qgW1qe_tL9vqA^&KN?d5hrj2J^kLSoJ0Jzl zLwq*MCYyd9j?rzyLRztbWM+WX3kMDh-^Wiv-Z!#S3x|S6E@!I#fGaSe9 z)%|g==vo^0$3K0Fk+IPJ4!(R!bVG72Ir=7{)qU@K4Xmi~iyD2Y@qPI6?%@mVe^=1k zuT1~@JcBY_=(6|alsFQ7Y!#OMnk;C)mK#p=YK6}#)N;%xcGo|j?rILUOm76vEAbAk z_s#`GOR-{PV|Ol|d-O5T?LPmf=UYC7o6~*Y;P0Hzk5>QTKJI~d_y2OA!*LMrr(lt* zU-DSAA@HSF@Q;S2jbuhg-M`;If7*_8p+6dyHGZB={T)p38R0nptDo=z)YvwVgyoEm z%?#&|hLsz`r{qrtdVeVQar)*(UFe{_UfFWj`_VY0O{ob)NrKOHR&)l))`;HUD)fo_ z;WKv2fY1g$m^WT|ei3NNI1epXgibv^$x!38r|3!e`BCu6=`FX zCNb<9qYfl`-KeVfn_lmmt*nf?FqhUwH?Tqf{ZDgDqi;UPF@%SDLuZC0VZOofg@~om zC%)IcPul_ghq2FPhu@wmS><_p8K)A`F_e-A&i5a9!yWljp_WyaEcau_5m=>ije|tO2ra!N{3bbFH zo;|8%+2rt#W7vJqK}Ag|>79-9JbH(9E;CY8Jgp~j+*Aqfw2yMIsm`0=0Dz1@##?_ZcHXQas` zCAPGl@Vn9P1@de=*@?IR0r!6?`@f{UmYw z^%4_jh+8qv=1W2Fc>3n=i1%NP`}o@ssYSywd>{Be+@Y;q#Ar7+0^T{ zU)OvzLN|@+fA`hF```F}KmZ8C^HxQ7#6Gj(yavw$Nc!_K-$xuq^yjT(JP*il-lS=; zzCN7y>|?INF@__$f1UKNbKjTb^!K8FJ^Z~K&e!<58qV)u?}zjI*N5T!{`E_L9Y8o< z$Cl!KafonC?Fz30{WG5T*58*poj2S+8O}4q{U`l-e;>pBi~hX-$Ju}F%FZk6g5aw( z>Z%UZ4JqYP+6M)+y!T!=4SDaq3ia-Vob%sn?Q-a@9yd9d3V;KUz;z@{GtE754-=OhkoyT|EB+E zOBBiL>xW7Fqt~SJrRg7i;rylFIO#`=|Ka~H+HKn`?q9U! z54-y>TJjIy|GH*G^v4@ezBKiR{)e4K*Nx*=HD##3^v7j2+rCNMkN3Z5s~>LnU$pEW z9RH&I^Lq;XuX3#a;3oc&3nTpD{rn>rDf>(Re~(O>Ke+!zEC1pBFWTvcU$^8KBYf&Zxo{?;q-?>^YQ z5;+iTfAMzyzdP)xton_WK8p84fF*#BK-fA=!K|EA|CW}^9j z*YngbT<+iX!vC&brv{6z{#`aG)?%?=8hL@BGK$71QTn;mMoOrt{5P{q^^1d^e}6Oj zi)#6`G=Ba9R-&|@-I@JajcAS%JSw~BnG!N8t*DCPH7fd3_NcU?+ux`9%MAK6 z#sA;vm;1^4zw}%E`c;4QkwqUrI?MX?!%;Cs_mluph4riFDF0XHD=NHb{a4N@diuo` zrx(3Zg7xDgQ=;|bvr$6#!!7)9f29=-{mz}I7ybOk6tFrh`PsRDHqPe=!;tyPws#8jrl$D|H|j@{Qa57 z?vLE6zw`JY3xcmMJItKa_@`uk`7P|7b)K|;~Ruwt54HEc-)^px_@x~S-1T^fAag^^y~f~`2N)o z|IhfPfA$ID?>_NY5AT2W1MjE*|JV4a>d*N9$iMs5*T4L(DfM?;`H$}(z4=S`2N%at z-hZ~ED4e3Ezj`8CRQ4+~1f^c|`|IbA9*KX(`~L9!#d%TcZ+=ASci+nS8@FB2+TVTZ z$LG&JaQ^sY7v27|e~(>%`2Y0#?|Rt&tA3~#ZT|KZ_rLg;K~(!s|MLI&-_H;D$Hn%? zOXmKb6Z|hlHJ$z9YU4jsbWs__Sd{#`hn2~G`a4t1&#(NGKlGFH`OkWIesLo@MSXtp zxcblE%yz$Vd@1&#OpE4z`3CHY7XKVFuxRDCUR;XSfBNbEfBfCQ$jFLze<1u{966u; zz{C_kQNcxL6c7D9SNJDK3@1=lMl%#AQ@=aBuU|d^h-NF!{B0VX#W|F%(LBZZlpWFh z#YMk7TtdaAzhLU&;&RGHY4Q4j;b^Ji+8-#{;s(mee&A-xnP}zWHX8Ad#HB9FrFa|W z_cY+dJium+f7LLZ^1Vdxp~#B0DEKt7z%hZ)F`m%z9^8*VT0f-UB)HGC(yc`3SWndF ze|Wx@+woCwUx~nR(eUHrk5Ta1Bg^mN;D#bA?=w-KQ-M5aaqe@0W8=`S>Cm43daL9 zx%Y2<&uguQ{9_QLsPgBhCH|-X_WvlSbx8LA@qgIYqu8Ih_#gh;|MLGXr*Z#(l}T3o z&;R^4e>DB`A4=jD;r~ZV_WHm7`Fh)*zW9%3Px>bN`^{A}RlWZQ3PJyUTeJT-*8YFY zlRf#5UnO`=WeNZHG}#Iq{eR{VDQML{XF9mw*{wfof-3%GaRu}LZ$4dmps32bq@)_M zUyMI931BD*eo=8S)mUuxR%+#=6wOe4BT8b0e?VI0N+~($y!0#xb$PQV6NGZCeRz6> z7uzN@vOGv+oYI*A)e#7)OfJfCcHI}7XAQOMHu#JgN2u%zjDt90wP0&#DBDL%?7;W> z0nJ4yVO&~y>=i;TP;oK_+Y%t8ny%i9uaR1dfX9Z5`7}?&hYtP)bes!PeSnh18@Zuh ze;{VJI0g4`K_#UrY78x|)C&G+ypWWUueWh`@zbzM_}ul1%_!EwUF{|dgX2f7k+@sK zsRR^_QAy-}S%AwDf8ea?Jk&ig-Fm*-aK-G354;j;KfBu6&EiG(o{}2TY~Tm%Ci4O+ z{yR;f*niUwsyBUGt)pK9h^zUhxXVxAf9JDYD-|@?;0>9r(jP9R&x+GM(8{`(w*{NA zMOg%kMOA=QnyrRrL5+Z7=OjYl1Wq(Nj&HD+0I_`Yr|SaHl)m_haNC?uwk>tX(?1Y;bl$_*tzuTiYvKjZ6O|e1W(>aqCLWHt`Nk`7zALF z_$uOE;VwmH^U}Jyp?#r#dL;mUWrF|;z!LZIErQ@gs=_8?D%aTS_gH6+(}ep7ZmiG#)d>^Ri6(6|bB&a2znomKW{TQQu7j=I`|le~V%uZ1$kR z2Q4w{!jT#0$KS2hIeArFWdkI0*|*tWh6U9Flu(#>`j=_}qBk_fI!d`ZisV-Y*o^cu zgDg!qG*^V>&R5gpmjz!O1`!cn=`=l2(s~F`2uI{qke3v~1;qXH{5s>>jvsCBU;`Wy zTS0D~1VK^L`f_p@oq67ffA8BHD?tE|TcIEB>|mC&E*GE@(GMb;SS`lqdMRXyD#D4W zu;zO&D#hhTsw`6MOn0EZ!Z*Y)9{4(7P5BNv_t!1xS!N9B^G)u)r@m7{L)2w;0)O+A zD1SU#5CWC+xr8lkH#PVL?cK<&wDq-*;mT#02Le=XWa5I!&N=%_eT6tRh@lhyNIxteeU+b=OgWZ|=ZQEB8rn~a~l`v{)v z_7&-JaJkqC%$)6JK2zJaLbEsHZC#>5p~A(Ui%E?UYh{Zse-)}#0SfMz=q*q@YdCEO zT1$ZZ#2kJ+zfN${Y&hYHCC&!$Vsz0;m~UgOo0VRb3-PQrl3h(rvj%g7?#XsIJ5Oj% zK8h^Z1qSqaXI&1&Dhi}kvBM09@kQ6bW;$1eKG7Kzt5dV}mMH)c=uKre?80bYG@HXE zJ$lKKJ#3nUf0?qI+|4)%vvBVm>{+50d(XED>FA~>03&*V-JDr(!h--k!hqZXi!48= zaA#2hsFJ36#RuteIj1$)e(5=8rc&K1$SH1mW1^OuM#&){mz@LTLK;T!*Lh@Pl&1Wv zi`HW5Mb0^x{;?1VcANOrJJU*vPI8obpp-S8(VOowf3Y8)vS%AzL42{Pm7O(s_xK$} z8W}vxUWv-kOb&*4(AFXZ$l^2AR~&7J(o7favC{9rFs*w!eIZR1+wCOKAjDD`%fUBf zNz>K&ElFsQnoNqhUzbS(Ib!(s0j3ynB^-O8rpC2kyuq1BvD>1NO}am zb)aeCe{?o9El9$JBWR);i_j`a6KmjLj~{KP7d}H0hSMkq-x@tOmU%OyHS^1eS_Ydv zKuJy&FxihTP$wHLa1yw4fDcfLXr>NjfK@7dxx2XpP_ovb%t*4CH+|S`uB;C&w{8MY zZdYLJf%jh}Xugh{rDbm`)fL%%?^9M$R&k$5e*p5aHk{-tRK3?1k-NaK5HH->$9#ey ze8Rnhh}q@=7Sa9^AWhd#_%Q5V;WtVg%EpjNksg{isc9ivP>6SHowgOR9Td92tCteJz%Gmk*i*z ze_c^inL}Ehe7I}t3@7ui7qTi0+!v*$-j8MJgEL!RI0acL_JwUK7n^esUq{ZAeZ|BF zDb-(Yr>5j|#zUVEl0Ayq<R9dFd}ry8*(E?PT@vHa*|IV4r^ zez~_SEV@qB;xwu{h*dC`QtFWjJaNv=@;-yfvSN_)UEa>m^5M?Me@wo^APPQ+E#X-; zm}!FMJCsYlBRGE}@J`&Ii3d^;;X8si!QVc_CzY+yYJHwpBv|{H8<`I7kvY>ye;E@= zl2OKzY>c>HL+q`)9Lu^fU5RrlUY&A|>U)wx5uYYzp}N~99$>rX@c46SF*Da4OG^+b z*A0cyJf&&AH%A;A*}c=T5W}d^^`>#dql)qw#FUj-So(N$*6Ow?SG2q`(W6OSKTB+c zRr5!Zaav)8X|$?)u4!hwT8e>!e>65(NP1WqXcPXVwuA?4XyO_=;rNERw-)1MVyRpY zqVRy}9;G|TM5aFkqLmg?DFf4~l`xT#n+E+}M|hve6p|WLIWZAaQ|*4#XyEYM?H&GF z;mjH_5kpeqxmrZ(3eP>C336q0K+puURY0IsCHXD#n{I$h!}T5~0zvmfe{UcpM0&yy z8(h5p<(A!I)3TNOg~insB%4LpN!e74h8g$Lz+;G_x<<)E=xv&=p$J>jtoqhC+!Lhy zVO%QlAVZ<*#M*zkp&zLKgk(UqD>oV;sM_b0b~0%>b$Ufx;1NFDH2qMalG0B&_F;B` z9#D-35NQ>9f0>jeT0W>Ne~SkWw;7<4M~I2cl=A8g+B#P238I6JRXpti5cr)KH5d91D+Zeo@1 z1?!rVR~kWfjl$cNTfA+dQ5loJVsPXuPR>pH@u2kIWsnfS964PGjq>lE0Qd-pB zT3J9o+MA9J!EBB;LLL2is;AzL5>tb5SpoiymcH~>V)CE{P8^nbiM?hO=S8-;X3_xeI95FafqR^m*bfm3R<5Kv3 zEN(KIi$EzFe@boX<9nJ6et7{h%#Ig7N)@O;5QOfbs=LG#M`>84VK;IUDA>JT&f~Lq z=g1$0w1Geoufr^ocoP7ZsNoVx?vH6KZa;R6Nj0Hl|E}< z{uH+K`(&^u33B9Iz-e?>L|~gNK;LQd8bwvg;~Wt0#?A@hhaIzUd;DN*&x(uQ1|YJX zxj&nzuppk%7z%bkUKr%VJP?4q^o&`_;`ay=O)7^l2f~dcg?{*K73ohcGtaSyw*E){ zrf))`f2VRKf?+;;mffk0()zthMN(hKyu=Gbe}WZ>Mx*sbJkD;K1cmc49ipCIwZ&?Rjt$vGB+Zp0Q#z^%pN=vdzQGnZi8mpMS?16em^b4i%yoq;f0xt2qz9s|Ez7*da8uv+3?n+FoL?!_ z2JzmE-uf43IA&I3X;OOZl>?_-h83d~j~a4mk%gBjNerqpI54+c`(c{}as!h$?c3Q? z{!?FLaNtq{ygmHo6sy9ni!&3|#z__SCNS+kxOhaNBLRRQ!wBOI&)X!p6P16sj@GNt`Wr^ggrl`Prg7V=(0FuC?(zUlzvGlC5ex_|3RPwvh_*e{jNSry5#2w5S-9s(eP z*i5&Yfg`22%Eo9;O#N^;A+!MfU`{t<9M3ZpULVN>4^d2=bs!C$cg-zf+K?sUw!&}z zM0_WsSP2ppmW#ERXJk!DWq8fV04jE~fSPT6Z`Fh-ym@(`7!|n8SA!+Yc*K+0K9!PAM3mX~zE^<+& z>oA7r*ujL5HS&4ltF{mN7tnbX*va}j4LBmCi};!!_Y~DwV75+c-c6rH0i=SnYkeFn zl9t7UmTcCu+008)Cd#5re{vxt6#2Zg+!n$DeslDm5!DDGSmRvbNvMLkX>)*Nh-4_} z;3u%-P?~bXKvw0nHuI31Yrj@U_$6quxPvs)`#ZluVe7QHoVk$RB7vf7o1igSRC`P< zRi@Ftj&o#qx>-f_0e9U-yv-?f^}H5U>(!mN2)+D#=LH9hh#Fwne-_d471NI}8vZ!q zjc_zUqMWYE&mP^H!UKu?rNgjjBW2KVjr5&Qh#(iG4oxBnm-OuQT-Ymsp75%kn^1pu z#I;YRO)ysh**Ze3+s-96w$J9WIJdB%^ymnIslEZLS0k|9np2Hfje)Eo`gKwJ{Dk9M zA7`#BGqE<-A7TYNe^%%q(ThYZY{@&@43R|$6vL9)e#CV#q?oU8{GHB@m@ep-61?$gU_c+~C<`G{g7Q0xMmw{5kP{M98Zj-JVI2tjW6 zE!b6(1DWsw?4zd@o;vy(-^0vn`vtNQCx!6$eArObxja>&f0%wJ&nk~3EmwL9lyUBX5d>7_=c(#Vn$m z`Z0sP)~Xboe9qJt;h0fV$n%^=ow;!yy)am2y5jCDb+t>Mq%xESZOiJD%!=%479`$M zEY&`H+p7Oef1ak+Ivs)TjeiF1{O|y@xv*EgUZ_q1t~)->nOZf45w^^kq9W)?Zi5f7 zr7~p>EZB0DkoPO(;IQpfZv>Q0+RK4UNO=)m9B{(6?_U1MxupqAjRsHfRnKD^Me=NT zpeKo(Zy(=@?Yy{HCNRQ0$g54hpCU@DK;-8H){J(|f9_$_dj%LX23o6r+Qt0F7bSZa z6=*8c4hS;|n*o@B6#}!Saxm*rzG2zSpYeC)x|GbUNC2NVZMOr%1b)lG5E~HqI~I%R zfteLdU=fy0J$y!AOz8{n-?M>FYw*9N%6*);nZ_y9Mas;{(f+b@Be(9?nkPU|zG(qc zkjvs-f4N7OUnMk(vn62XOBAS+sW!v(e%G9a=FsPn|DG+7;zUEoETI$IPzCzMyj%BIOX%jIA`*-n=&Ocm zj0FVE6lAhs*_=uqwxYWY5o#_ziCIAk%aJ2yf62&J2d78!HJ`PWBAVBwShH5blwKL> z=PA6#@$4l)xP(;0_dd)o11^pa+3m`JB6Pe}D@~qzXzBo}e(DpBlUHN^8Roo}SZL^w z7IJi8hc;9A3emu!k?uFFk43Y5UtAhixW_L ze+I=f1SC0e?5ZP?#z}p{@=R>mfL`7ngzEORx{?zgv0TUBcFdBCvMd|bF}>C1d-8cB<=N0e{Rka`pxqg9z!b}b5_hW%S*-`SSB7npUApRn2+bD5-KnPiJjf3sf3#J+ zENuf@F}r z#K}Fvlmn9jt=kN~7C0gtl1KV>p%EzX1u?Ja?kxwwfa8oDNyTL=-{uIb8vDBA z7)7HjE_K}8vsL@-B(qZXZdRIWh}NlRfgbMNWJhv9AFsa56q-1QZ)`mM66v(%LgLes zK4??cH-`g%U@$iK+}jC!e~Ob^Yd%8_r=D-@fniwj_%Mdc%3!N=N4RUq+HshCyTjh# zs+!5f;HHP!=R^&g`cTl)O`po3R?%`s*6d8X7cK4A}QnX#t16Rc!|5zO?i7x}~&{EMRfw zIaPhk>F80(EmioDYvH@J^kpA|GYqcG>^w0m-Ew3&(tLAPe~&_+fNMf~C1i6xd-^8G zgCA58oy`0Lyh{8{58V!$+SKn?>*6~9lw6J+N)`m8HGH=Hfp(2` zi@sFlxs58KMiQR)C!qt$x4>eK)u~;bBHKdL=oKmq=I4femDhS2@|L;kv-1#Vc@Y1J zi;oP?WLA01e|2`7SdI=73t_AdSZ}fG(dr#Z#@H7fnsuc|El$Vum13M7JN@w%Z+GEa zu_a%u19TrAa}hQ7J?!TDF^w(KtQnj6o%A*3j!{JTQVx$7Y-W{FCsCin3Vw;;C#z_p z?8f;NE>#R^vWjJ#P!E}etzbvZxmcgpKF6bI=6Y~7e}iT;&k$+_yX-$hn@33=0<*vO zJOF;a_v9@J>UkK~jQgY-olefDbrk&f1iz7CLGuu#u94q4bK7|^$E88Htidl9&YvrG z?^!5Kb^|5FC0MrfLA6|QpT<=L)wI=HO+SjT4Aa9%(7Za%H695d?S zdWXUGf1q&dG|ZbbO!nc6OckJ8*}q$WcxQMr*kwfNJH1Xfgu!;Dvu&(%_4ZB!eI9s; zs{~x(B|R`FNZr{;pyQIDc29F=sjymAbx?l=7Eio#%FAPhI_!O3aGceMPUDf2w80{2 z>foO}4TRE%j}**PC-c}Wz9k!?zTxR%BdZW;9A2Mi3*6EWzA`oqM}BH)QHS|E8sDv z@$=H|Y(7EH8tx8Kvhw0&3B+U@v}1O=d>Tc(PoyS$PHp+}Xwr7d>n9c#v?Z~up*A2- ze;U;6D}?BsoEzW`@eA5KHrR6Oz1Kz#x}ZdPgRwbXOUd}86ISfRz;E~TCRhQ;tIr*= zOc=hl&^MivT<`L_jZAKWDjg41+4BkDK{9ARtN5f2uLg(`Jns5w`*8R&4&*09)i#~( znI){eiQg&*$Xs3v&G8kaRBSX?Gy!Lof3Ti7p5b8Zs#Ej9#zP66Y4x_PKl|<5a{zB8 zz?zu#fgGxqKDkLSKs&Dqu83C7XFT?vgo`5>UWwZZ!Jjoh57E0EuzL_5{C^0kAgte22&Qj!1wjGi7xc8R|N36Md6X95$|QK^Bko)e;|Qi zNT%T?CG*YI`uOqn=3bk76xqlgh3pe4T;f9EM&uahK#+Hps6L8;1`Z64s$_$_W{A8g1=JPtUmVFPOkO=KDIo z6)U+xpf`wGIoKgKf3-t%vf!y`3x*SNB_iDyZ1b1~nn@slruGBRz83)`wj)XSMtGWs z4Ju}+D1U|J-BHn3T4cvk!>%^l^MsEWQ-kJtfJxbT=R9UV*qWFBx+kyH>DjhzJWD`YbgSx1l+UXL*Fb%He>@FYpF|kMIhYdo_N|V-?5+?g z`h;0Wg^fNcY0D%Z#rBdZ4Em<~9fFHM!GH{_JBrd1hvXZ>p^OEBZW3rnvi9~#J*&Ct z;b4%7x?n`M%ytH@ndTYtnNLpeW@KU?X1pmgx*#>CTfKRzXc;`7i};dRa+MFGWlx7j z3=L|(fAQ<{&R*u6)CqiGj=x44XqocB9`3^fUEN6!Q8Yt~5SQRo$jS;Q_XN7_+qh2o z{bLsKIA7oWd|g1)R&T7uZ?+VLT<6lhz8Rcy!Ue4fgHhRSOGUZs>Zf@bKL};NL>7f{ zTy&}gf0KgGT@8;i2&a(iQ|SyXa8UQ7=gAg8e}F%jYem%o3G^?=K(!KNXT_K~_B{v0 zPE|TAt&=TImv)G_rv9ywi!5yntwC zf0>v_R49nlSa7ipddjQ(C`T498pIq*f1V*z7qNp*7hSF^FY;cz-v@%`$hRH!ukE?$ z=9Jqs%pcH%*G>~wB#k3f zQ2UR1M0>Sxp!~g-m_pwh+fdyb`e9ale@XM{1N>5Zk6V>xQt06|l-@VxNf9T zj5(YGQ;ZQ-RV{yZncNNek}e!IL}77{$$9YaY47yPCWHZds%0V2>HD|^WbaoWe=v?o zRA2!fZvl9)Ogwr^EPYRiC6Mz~j$V-RRX$?mnGxy+lB@fP3X#foj!SH``ZStUu^Wt= z$^HEFe>sZ`*KAX-G(kl8Bvkk(sHWA;bC4uGnpW~LOHI2vtJ_K?Aau74rU?^nObbG- zOL2Us%Rf=%PhyxpG-CD*GNo}Me;MCCzMze(>7LIMB&}_Ta;d6`*(Pfq8j~bVtyT)r z7orB$rBw_$kaE$7wNDg+Z_v~Z=%0?_n~nUrR zNce~@)mjM_Ru1D0X0_lCX;+^ z0vexZ3R*cL<+%#E2j{T@;}zJz+3Gyn=}kZS zJ?f782Ie14THpfgW+3B%$fn4b`$%DC`AA=xvwR=C!aHqeF*BMgYB~pL@TI zm++R;2+6e!1@#+8``uA9u$8QxmdlHG6P!$-0$k2fmaMAx04)J_w#n8=19O!SY3O~b zy#(8}I5?mtWUtfbe?@EWl5gI-pWrR`5pmBS;vKu_i$d{6 zYIg6gKV?$XuaoaWBM7y%_XhYr95C59VScS^p*V~$uErsMS)e%U^PMRo9US6o8OpbD zTS{A8Q+X3c*eanfsO}^!80S|V_XpphCh0zIT1n@2Q*6OaaoXG$3{n__8&o!tO>9$p zkl=17ajq08f1%JL?o{uVc)6yN`z~(H(bG+M?-Gf3>fsB3-(Tb#;tJo@L40T+X1`k7 zB3FTYDt31bNEAiQ(&qAQ_EwZWXo!Ya!?jIDh#_95q8bx-X<(l(v*3vUK0v|0bGR_J z!%5+L<<9D&BdoaxQU!V*UzSg%& zI190^+w}%ovEj`<0jo}Uc12KG4RjX1)dY-o(^*pmx~bL`M8)!wpqxu7gfZ!U383UL z&OP~nNg~qs`+Hf-kGVhiNNc(#0`w%k#aC7WwWr^;B!({}8m)BVsx8N6;94?#d<9{8 zY_xLB^+gkqoqr6M+Gm+y3-p&>1k_)cfwf+c5REDid7dA+;7o0akMIEAMM`|?o5+}4 zs&a+BRTLhS!2wFSIMB;Dw9gNb8_uY&Av17b^S^%*N@el`JOq z{n?B`t%x)^tcVi4@fmT#$qhEhA&pC6e?pI9Eo1yH!N7snmL;Z7aUYO zW$eMRrG_?XOBLUn|-Wk7HDs zgu)1>UT@4IYnooyVK|wJNJ!iWA5((-Lte1!MHkxjied08uNx2RLE)4I&e^3;PQlVI zj*4Ofl$^7$T1^<#4*4>e#UgVgV&!$xG1&fmCtx=_fwmjB?E{u*fU&ha$~vvFMSl!k zuR9R2ICdRpn~7YKAdTH5&!58^LV36l7_T3h1zZ!;0Iqgsr`R_3S*>R75V;>>{X-(ZIu`P^hT8ib){h>17U?@EyV#e`Mkm@4)uX6Xc$^_Q%iJWl=gITCcU5NFY}kY~ zhU)O>I$~?^DR9)Y7}ofZ8mZHAp+*G|&nlN-bY%~oy!Z*PT8p2i>8Ea|&waV=4SsWq zRKsHE5%xi*0{6moOw?9BK!2LBkJ2&F+%pK_feB*k* z02%RjL6Jybh+1XD5q+WmQ3@nk`MwpzGCjwNK#ZgFO z8{H>=*Y1$=iGwOV?yWvZ%sp;Q@d`T+V`Aht;pY%R-m6ZDYXNQN?0=LL+Cleti&Y)b zl{UhH4%p~1do;o-%l87y>I$`!Dq0!j%&8ttD%7Bf^UuQ2p&X;iw!JNT`jb<6DL%9+ zYNxiW`;y&#E*VAM@lZwY!+kB--G>Xf{mdEedwF)46LZ^1 zFK0GI!@jwmeQkKP-GBIYQiNo;G471IZW1vXuTcQjgnvlW{$JTTkDXm%EwH{zW)YG2 z=rE-B-kH&R?>v30sj>G!q9c+@ry{|S*+P3y>72D*o4okrtT#MEUtgi_Z#8+wrA z2h6=7F-sh)2!GG^D;g})sz2UO!p452Y;AwaIvt;a1E8#2`wf3(z`xU%ZW{jf{x?6AsM;38&uts561cF*Itp;M*7qihl^(_&0ueM3la`V~kXSD_oNJw6UaYXoVj$sv%Y#*~{yxaiD z5={qlki{9;)8>KdbHC}@@uwP$E*@z(>6!|=?}gUbq_7G(h5&#U&XZ%m4TAd@Gq>5z z7o`^VbANdwMe8)F{1My0_LUFVf{cRW(%L_6aNVZ|G`S0=Z~aXwoWZK)4PjrFgKN0i zPB+u{tB>HJYB(8Q?9$MziOVExZ-&I@`;qs|lol^qrxm8GpW{Y^9T~?%bj8xMZTB7| z;0D)tF+6?}cMYpJA=P^BX&{HQIAw}2?^z>#Z-3-D&{n!sd9-2#zQT-#i9rWr&-+fh z3{+)3dq@_nRSEX@MS|KP@jlwWp-QAENx~H3O49V~mGPDzc7Vt;g~6t?vefo?!doog zPE?)49oc+Q!Mpw#LZEOxQAABP(wrF4wpmWO=OvC5@!P%507_keRy%FKP1q;b z^?xD`lxq%{?`(_@VSX>&x0U+{U6zSKI5ba`{31zNE?IBZ zXXMcf?Qlr?;BaWG)wO=->lSMctbO1K0`Jbf80H&|en6?q6&K9C`tqsz7#-$b1Fkag z-G8s(>WFA$)KyIEs|SvwDO@5xHL-ri&VR*)2y0|g7M7=NB(m3BleHa9_^P|+Y(=0P zQT}cvZ8BU7g#8jlPtkafz&)m=9Wz(@RI;kg&6uTS)mXEY1XL{ep_eNVNlxg|PlOUs zZU6;#HV)ZPSt77TpkrG$gxxk>zkE!WQ*{+2i1FH?9jNJp7S2)wkm3HugnS@pXn%L~ zbyGu;N4{mHj-K7Uo1NGpqe7@2MDH8L+qr1|oc17ile8Vrk$gsFExu+NyIqun_xRMq zE42Y9UJE7S_iSEIXi_i1Ec52I+}$emPTL~z?@&0l^njqqocm*hhc9C4g+!nErMm-e z7y38d1SqRL8-S?~_@rX76=Hm1n}5Bhi9A~%8%AT%?7ZY&AK&-=OZ)J4cP(gIC9|Ep%oSwBA@so<(zH7RLL8vtG5Q2;wvW!tL~R^W0scqPNAD%|?# zlRJ|VJ|Gt7pS{s^I2ZH8g~25E(spf)(z3s&MM5a68@CnY7)H}Q`C0qA3V#p|*hSk0 z{tlz)0d_FzY?(k+W7a55C0lEp&2uexEgCLTo#^`yC zN%2VYD*BGppg1H80A0-C7NBKi$SNT2!tk=;oYA3T4|wD8afsb)paRWBfg@ZQec&&I zp#*wFrqJQ|0U1Bf=ytQcNPk?OS_nN#4Av_O+Yb5Ib-fFpD^-o60ML*0 z`=RHsWwVtHd8x+oSL$kch!^DizTr!sGnV;8GvuW$uIK8*(uSYJ#~`Axo0MYj;@zzr zfR`C$)3Gq(Ue1W}wSC2tWaFa~eGh72kW-C6apH#k*S%2uYo!VSrDXdjE z?jcYO3p$Zu{fozIm|SxbMij&Wb|9dIYr0Fn@M>=VZ2TJPXt@IK%@b6 zZx+^F$gy)ToN&2gSLq$(d3K_==oWaHS^6t$kP`wtipSQ%{96r!8`3MhJl2aBcejFq z2`&;Oa9epJ?47h9UglNRihfv?)WI2mUyCmJX%U|L^RUa)O`C9k#!DS7L=nMmSu$jq z<;og<^()9IvVY`Bhh10~g)#u5tx%a7zNzBi$t$Hz6x2GyKKOTuqu3?y>Lfr3>3?BfFk^;kX?+ySj;N+1s0J#up7bV6}BL+ zhZT>(ACeo!aQf|HJw}9CqN~e~9QcW0b;4cnmtlsEQ6Vv(FOq5ILU|%TWcv`0M}m^& z7%4#w7k_YtLmr%xbM8hPFJECma$N^s?(=)z@41XrI#mN*6wJj)sM^J0_pMAw!(&U* ziimO@mR$~w@yJ1}WCS@fmWsjTRGlv#Dfz&7rRjiB>6y?s+c zk8q1OuS-!|RFtVH?@uU(P-zZKpW}_fo>_Ey@PCc1Tx1K z=oMs|0ZpAv?)K2W@LB@hM+kfo?4><`B?sHiGD+0Pvnb6Cpcc*0nMQq7hbn4^@WZ1M zg@0;&I6kle83x#Gfg_HI&}U>FT19Cex&n2_2hdp7xzEa`+j9WUFRDs3J4Vfz2dk}% z&zOjptfen$rI-ITU2b5_L%=DkK)x|pwK0kDzXRG zOp~--qk5rPoZdq|4{d9n3UKTRtOov4fS3Pe4Hkcp(#wL?=?5U9ee&D~Zua@f15uDe zKINnfS+fMFgt@r%xvrVkjMO;cl6RFjkKZHyT8*{mHkYYat&?TqxDNsQAOAdP!!Ljp ziF!Gi)iFio;r|jh-c=rHtRG)BqhIejh!| zMmK*y6jT&p7j%$diL~?8&zrtE$U5ZP36}$CB7lOU-_jwLtuh01jivY65z*mNde2KJJp&>x4O6syp-ii0 zIH1|};}Z-6W-e=8@pdy@G6g%RywZYu)N&bi!Brg7NYj(7A0>>^64M_F0tv9Ba*Tic zKx7xh4X3Jix6j%6u4_0_d7+~?BLRQNl91Q4)*|p{TV@gb2=YQ}kSf^DPtv=Df9x;{IY7$uPYWL4Z*B$z+y!V%E)Uy@ z4A<4ru6cVGygX6trz%ZjWJG!=DN|Lw`L<6NyA;7)hAx1TrwvbS^SuhI#NU4r`5Gqg zL3<0TtLgVv(%gv+ptdPSTO}FLYuI&1az%Nr=JF!7Y}3!zj&SR{S?oDY1OwrbXAX$V zBpSgE2QLE9<&SRpkZ59<9KCV{zV3#!mVAR za({)(QfXd+yb|?M94sqk%H{5j_tV z>(ylx>Qjp-*&R22@uEs0b3kO5^>wV&8qVV=g78eCe@{mBa5c%EfRBGqvi{*v3E(4m zI-6DNmYvQguK0t5X6U!)sx%i7m#=CMg(+d)?MI?lR44MCnnFj4Cs>txp&sxf2NM$? zD7)xE}0L>h@(Pw~} z;={Ka9EBLL3%&{A8D<*_1L22tSEBo=1I_DOX*ZjScDriUgsM8LqV_x8VR=)N7nxOe zneeswlL2OjyIMOQP!iWNsF#?_6`j=$CC57ATUH&SA$*TGib#LuqMev4dye?ZnG)*! zmO^2U9Be=vxLA4kN>0+l;JB>4r-C?D>IV*kGG0LG&02ffrw6p>Tjwc@(DEf%dT>-) zwNhYCM&NsKU!Ox7Z5o(E=vkcIZFzj>zJ$h<1D2ntGIAy{v!)Z0+U}&do$JP?n^p+! z?Q&?@HaVKfTO5Dd3CITKk1U^3X_;6vZGN z1@n7b7Z^GHj0aSIa@*QVXqfxhD8|(lh8!VFX$}zM3{m#kel+Q(Gt1*q%&#sDw=u`6 zK1_K520(oC>b^o@WXLphI9JW$khq|z!Kv@km6&sz2QPnn+;j$+imGK%u+8M^{e3DN zfgIXJG@}qGU1O}R+$Qv~9)iv{Vd71Lr7i+M^I5N&;_Ao3D$0AbU8(%VL}@h_$LbG+;I!3crRSS23og~1vbk6U4+ErB?c)$cWBK%ok z+x&MQvs;U8C8I8cH*ajcBy7=Fjx1QZS|!q5_msGpEVI4KMyVaHmYFRfx!LX<=QiYO zM2nZ*U_d4@Z?CcHS)6q1@|@aIa|AusI+1?^vusZYU1O50Y;JcLF0pUT)4sBuq>bYJTe;VBgsBE zW@eE0GGV=9kNiMo@8<~p<3_t;@h|bmYX-=I-E{Wk2o6Q%yUtPLr{OP zfStkg6Lg?N@Isyy(tW!vAvDf>p;igIb5&MqMP(mi4#j#E9Hi&$Deg$A;!G28NrLtF z*s>mNRYb@3ykYj_{zWV+B^9b()2sp)lstRC1;B>{7ViG|H^9rH{Q}UmXKc*>hzy`! z`K^g)y2S$h*<@7tiS&JupI92bd}4pA7x!uNwj;4VSoDy~Jz7a>X$w4WK|RZ0lx;{F z;?6B$mCK?JD&@aL{$1lnlZ&!d`;yXl0#8Xg*-1CfKa4X~u(EX%lPQl+a1S}VI`eT$ zbs;I_@n(2GYCqd@*W)X7yY!UR+hM+8R6;=w@MA-q_mj+Ra=cJ;EF2&xdq;n3@wwX( z0X(7ezHrNG0zC{RCb(Wu93SG`-=E|uKf2*PO?yKbFLn@$0Ha$S)ZJ107jqYX9ppn{ z?^azCGtz!ym&`x+*QjoO$>|}kUbdT)lO)m~iJbPyn&yF9=4n>u!z+dA?VCY}+CTe!o1owLO20;~5>Gqjh=Yc>AZ2$St29w9~Mtdge_a%NShjsMA`c zO(dn8Z)a)CkODdC33F9Jac5ZpAWDvC>EV11Tvm~K-5DwLqOk0iO52$UxRXUv^DepE z1FPmPTl@8sDHICtK_n~@z0`9Z4Tr__qb;q~HmY2QyqDbzEGjcQgzf;0_`N!kTv?7wrlUd$nH=3w6u|pHy9z~s~X!DNH(6<-idO*4pq4r1@qx zXo>y%ROPNVgqzInfDeCqKyi+GlO3R?JLA1TYJa+Q_@NJKObP`7=|}w$=WVDY0dOq> zgF9%@3y4k;f=l)v%l^My*nim8|M5Tn*Z&K1{l8hR&VN|0J^3G&Yk&X$J*fYsxLW>8 zzBEl!|2(L|KTk&O#Dlhk#t}zGtnk$>_1{wqQK2Whqw_`#CDs{cpZpG=x&s$x!!d>Q5> z6ndJbN#P&aKmOao@l&v{*vv`D(=-SF39M=9?^3KSNwqqrBJ?Z&w8H$~!GGof=CmA% zYmYsHjXw`BVf=rcWc-a2R~oFI`61Ml(%QeU@jcQt zP5QL!JvaT?^>K)G#r)6aqi=P+Nk_!r06nI+K zqR@>Z1cO*jmgsO2U=R$=)%xx^<}9N7I8!5sdF4#NHRZ2!EiCvrTwkQ8^P2rg#gHrj zt17-3SA|kzK^olwLO294ukjVk74yEUzA0)@#<4mEj+hOC-i^UVkKixnyToO6hZFAJ z)JT8e+8b=MVCDyAtVUHZ0p)2%hWVblzx962ZgMb0TN3?sYEio?#(7-}fPLMB=9k$k z&YSBq22gXwf^`X;_kf6}S zMu2Kt)O5C~tnLXUq4XG)-fFan@Y59?kK3?mbYkKmt$xs|Or#!jFKFxVzqWy*r3U9y zv+R+CG%Soyk2E9HGnqYSe}PHxB?lY&xtKGAT@YR@3uU}&y8gliI%E1b!E;RJcvUy=>8TW<-nb^3GFP%x;kL z1-;n`jIlxTiU_du)9q!K><=)$MrHZ$-A=_RssqE4x9WgzsDCF2Iz#ak`4pA_$JKtM zK;vH`a@wz_?LtN1LIoUh#73r@OGtnGjAv5$ON}!-EiDW@M5+2J6xIhU5J=rMHc(Ia zY$w|1z1kz+D_ibIEV`TEXR!}cG{@s_`~ms&{Nk|W(g0_M_qTSYM>%-f=wC6Z*?aa@ zBIOc{wuvAXJoi8z*Sx=>?A*s96zE|1>T>L3VLgd{WD2b1YA|atXnje4fBJu$;$it5 zpA%Zj8&p=On#7t?ofW$aW$jG6RNZ2a*o#8ZYP*e57JDv;5it(3VQn=I$#N3|)yNT* zb&d<===a{I;nE13fB(#6(0(MvQ{%;P0dxq}{2jG4sR*Jx298giUZl8;Nvg+#S|1Tt zccKMK_;XmEqA?3{9SWM5P$z%%F{CRju{HcHO-=)z6vVW+A^|WQTBQ7P=ItUI?~CJ7 zjB{?^vQ;Y<+G?g&|0@Exj@<7db~pX{+EGPA7YHQCkk{{5KlmNAoE|>Il^sABwoI7_ z<*YHU<^kyVJlHZeZ@6s}at$_|5OHKrz8wDQA|D0g2C+OW{bR-~=2?FnD7oRn)VV^; zTHQiW#yRTVH8d4S_Vq&G@gq{L{Sy`ZDnD|;?0qZEt0i2jzBwGK#v6YngEO{7A%x=S zYgXIkiPafEG2{cbC3)`iTaiyze~Kmh?h4_Rx*tfqfJ5C=@Rc}rn`2%0&>!lb(31X6+_dwYXz6O?*< zz?O6cdS{h6<0XUpQA>S&OBQGpQqE|2f3qeO@R#n?!=dBgjmwz zoBdra5f}(Swgslsfv%R5E%(& zTNk9cK?D8oDux8@jRDf(TajZ-LqShben}BVk0ke*^sRlp?M%WLI|`@bVmkXz%1KrY=P{^J z`HWB0g8qNr-|RkPm0DPbUt0&jnX-<69GVX7>&*EBKQl7#4xxbE>+_>?ORH378{TWF zIQjR{(sSeq&(>Nu2XP8M5-3r38ZOqUW4R{bYC)pV!%m{EJ@_Gc^^6yPg|>erN7cq7gq8*3!S_Ov0{{tr-B##r z5OGUREKj={d1sjS7Hg4Um`AYI6v*{REVJg@VIbpRg4dR`1EEAfBdUcV3(dJ>6@De` z8(!UuD9S_ZpT3X=^YB)%JP`<{io}SdRpAA0MR=V0OX55m$rgqX-HIrIjcr7aVf!SN zv$ublM&Lk{Mv!n@3>@2AAwfR`iGF-9Fb$A807hbhr#QG<_p%;+##o%)l*}6h&Tz?i ze5*~QZdr*AMox(mRov?L8vuM!4MYrvHBGZZbj|W?bIn<4ONQi|(_UjNvAD7Qenu;}8b zP4Jiq$P#4NI0mn4-Sm4X8y{Av7$NoQPS9(gt?j;5b2h(nq_n^9`joYN!!zDQDOau} z^eB(OUxHQpM|gQs3|G!9atWYOSy1VvCF-hJdwp;`#-<(2B%S#T>S=9G&}3T@yfuH9 zZXcfz_xWfe-$p+z(}+;HTcOZkmw~*bWiUdo;5m&(!pY67I!Jb_p z{v9^GN$F&-#m3G-ypH|!*myZVU;BSE3%!o8s5I;;Fqy4eBj8@}5@yqqrRdb!uO$Ve zn3%??S`v9D zWvt01b*be?GC$bM9)9*(Kzx;hXBAjCeK1Y zWB=r-nTERpNJxgB|-_d5uhG~MxtynGYvmghEOmnozua);HOE_~^;nZugw(3VxvKqn|!;M3!s1b2yyI&ikWHVBM>l? z02zsnC_@VXkV**duG}Oh*pNUGrC&~J7i>nP3Eky>k~1cuxMPNuVHlxm^jyy6P~Y&( zBe%O!EMds-{obX5#PA@qE@_+IkHV)HQSdQg?qnj)Ijai%`jNK0AufM6lQLCQW>suZ2u+r_bspRc4ve%y{m}*J-}fClc{Q9VDrty z2S0a_*-gl{M-nWsT|~}IKlFE0`NA8FMGtT80|B@rdHX88O>P45YCsVHb7`32aeu4! zknjx|%^6`7W;o@X7dR<15BgyRRs-?DDk6!Vx6(3H}!w0yg8Tl3Z8U)Km|8ksjx{NaTEyK zSt#{v1n>v2=BfAQp1{@q)F1l&EP`QS$|BT-qD4P%o{9rOcMmY#YsBkWNxrO^uA>BM z*?}nDPFSANK=20UBDgcAyg*OdblR6utxmv!;}3t}uLz$IL|ImjZUG4f5i^-yNYEtW zrb|emg)i0N%Zi@yjN_p}#i&CF+(N4~bnT+^j%5TN>`4^dN}Ntb*T`0H)=08Kygk+x zTR*{}E}1KEvljGKgNU3lO^!)GXel<5tg_-NFp28!ah=%~WHTtRKi&qSrE3Wc-8)U7 z@N9ov>2{$Ic$y37Au3dsxHz}g9y?wxxO5c{(ro;xKQf_}vn^MjUz5v7^3y7Z5y_8i zygJ(q&6}?}DE;j>S&GN;;V_J7@J}M)d#t`(*ZMp0z5PWZ={<_^arQMCr*`hi2&pYe zj9il1xBRgP5HIlEL0$UHSh|gwV#%vd;*L$?=z-%6#6%)Dr8x0nV@>b1qkBv8R$Bxg zC2&MYo_wQrv?l7BB1mNa9B;<#3=8u059~F{w#&5<5FX}w253LTWl-pXR1m$zQXIk;k zU-CNpT0H2&xMdA9PY@Cyz6%LD8@MM~+rE=x8A2+d-{`1=fKnqa6l3yM~2UNyC(gpGL0uX9(ltIPwWBNU>8`^&*Trjr- zQhsU0E=xgD1GaHyyW!CF z(Zb5yM_BZVW)rzG&Zsvx?eBjVR;pHZl!XN$4>k7 z=x;9tK0OMl$~5J7h6sNgb8jJhk8E%EB%LvPU4OR8er~pgW_vHVsX)J$q$l}J9@$m= zi95h6oK0N6P9a%(5b#n$?KpuRgK}O=eLn0LN5II_Dr?=F&HM>F+ii**T&`ypjktxR z+$LQC7ajcW?;?#eyl)R{d4gaf-%2lvp zG8;|>Mjtd^U(w>eA4LbAtOEm+9LjB;IkIQOeu3=33@8TQ;T$Q*y2S#uYxVghsYu8K zk%ONXK8Wc&q<~)E)DzDWdFW7loOVBv@6q9rI;-Aj;W%aEaqFtDi zN5w`^49?j^-< z0|s$2Xus@vOIPI*_vT0Oh1S^fr8OpuqD#jx&E2n={#;UGF=KBzkY^ahVLA zQenz|kYmy({f6kfHtC=Kp|_=N_=1)%ihJw>P#Gj1ib;IZM@-i5H~MY-6@BKIvf9S! zeS^Efs^a}x#1OPOCLF6yxrmhqcBl;To2@$wT@f9NJk`z%dwUnt09XUvq8Ri2+jssp zjvw$hZZ>ORRtICOQL~g(*nw@nX1S{2F8>ACS`g zIx?CsEX;;Ir=r=4-Muv{mo+My)DQD4jz(wi^_H^SX zlC#VKU3;R%)OHX}*jZ#)9JH_Kh(-Gl^+5X>YgH9HAnzc{;#|@W+J(MNvuS&Q7^dj6$t05^ z)}y2KH-lbgQ2x|@d=&}q;d!RBz^)4MZI*T{DG);XGkquiKrc~;(5hFX;3H6;%NIn9Nkx#rn$VPwD z@b!C<@NdxpkTNZ+5sXwVaVXQ!lW#>K80!ha{$v zwO`g#)!jNl-Phuqe`uWhzz}dhhv#sKBpdwZ%Ig_9k%O8JI06+L; z@dx_Re!y8ntR!7*@@1#$=dh1R^c0&{WV%#jn#LW!e4q2M| zP8D82XI5t1Pst$-`_6Z$X_0>wRoaide=Mu7Xghv_i}fv*8rLj3j)H*Ce1CZ_uVoHL zU|)n+5CG0cB09Kmj3GHlhmb#j+vk|4J#-c*lN*Fy*hg>4gz|h_Xz% z!vqINT7&q|be?0VSFM6mRKV60a20f0|wSBM9BUT|F+^$6LFjd2zs{sfxI!L!X zO*{4e{7c&Qx4j|F&nfGS$&&4jJBO)bJ-iWyhZ72w;4)lXJ-TRyZ8FpSfsNGtf+kx6 zk)-m9rZ=l)n>2r8r|FqHhZ0x@NZck7fbb`6x?JHZ1>s4m4J7!X%1!dsrW|hyyj8ZS zMl=Um$$t#QpW)^}@}o#3(DZ`#e9!X`1rS0m>(N4SziU9Ld$So1sq?+9*VRlnEVxyG zV2PkAu3nPLM`bM6*X*&+L!hF;Zc2(u)fSUq*aU*2^&y$5>?GLwBZ?161}F@Y@^eM7uR?!N}mA?R`~J57mlP;e7ds$bP~iqk%YC1 zs7T~ITA=h~ZA&2A8|BF|K^_ZwhB>9M_3=kNrKRV|f5e$0T`1I`x&Kxr<$VZ{x>jcu zPA3vq94oC+pe`v&AzTlnOFk~J2u8Gkp+BFV2srJ#; zQ2DqK`L+!Qwe(`O*+0u$iFnFPC5toYX~Ep3#>H~z_tx(CNKJ)npQH^p*V$i(P=*g( zVaBbIPj@ijXTN{ZMU>8Qn8%<%uBLded&jIpyK8^@+TZZPuuylua`)UJMAQCx+lTAP zRKMF(Hf#6CcS&^~5D!fCC8s>kws|VGpJzkthd=`hO#hp$^Vpdd*1GfsF(9Ww56GD! zrx`it%+w8%Rm0y}Oa@3bu8 zwQ7G2TxM0IAXG(JikB8Q7q%78d5DAIo?18jCGqY*`oWA6-fs*a>{FtBZL<~!#?_8C zwxoTpqQk0Go{uUj+0PGC{rQ~MaKO~a82L+*wH!j2uy7+l!RGhHQZQcJcM-^g803E+3Y;wQ`r}!7e-lJ+?}_T48*=-!#xU6p zxj+qsOK{t^?^EwFzDqkt#DCOwIunQ^;Rn6nKAc-_cE4>e3KCut4!Sc@UE1lb{xZghVI5pP)H$*}~^OJqfjQnC9m@ zv*FvbWe4gY=b?1(pFIYyYq+!F723C0{sSQ#1Da6w$R*!*OrUZ~CmI~3XWI>Tw8W@| zp@g|i_^<@MAwg|gB~Sw=32V&QDhhwu&XDfl%o!#Q`NFGzHPdixmw&Wz>fab%VA}$+ zQ86$UT;nqWcr07%4HFi!n4e8Y4@emku;o7ynts<1#i^HeXC9Re1g>N~=EE8I>G=SG zTzG5sNMS7wWaZHM+`&kwFU`hsg;*(>IW10?4h8I&y-k|?q$Q~m`Ym_1OZk7`3tK0n z6iVS}{ifpav?5zINLq2IQ4sId@s!veNt^`EX>fv&24fDUYTMwcV~m|xNEC@C*&px% z8AzKAp%f zzpe%@!r~<=)c?#5y~qYl!1FULzsdd2l+nsPFRb_26C5+;21!}?{XTym1LO?lLEM%* zsG^8cpWHpK56F_jxrQb=5K{8knx)!3 z8r?R9wagD1)62~3?3jNWj0<&R%v;l@puSUml5B}q{L9ze zrR~6JHp;S`0%8&rKDYNqY|2rLxv+w6?V8|(GF8Bpj-W>FYbWvYVb4jwdNN4v@(WB1 zLi8k#Gg!wV@B!Nh#8&1H1Wdguf_Ny(eyZ_erIIk5gf2*sxy*mQP{8(u?B~f1NAGd^ zyKDmZCC;T+F7X-O7IjYBWXa=0iAFWzu*X30wY6uJ;-&6d;!Km<=*`dZp2>yce**6b7-qoOWTAkTY8BOZPp z1>{#wi%sS&V(&5bxQBP_1KBMGoGe{WZIJN|O z)@0=Q{pK_-7bcdE$^=Nw_ykl}R&CYZ_9LQ|G+Uran`p$zv0^-hu&Ihq`{0uWnN1Z8 ze9l)58@OI?)QyW@7YfdsW`MUoh46qR!``BE7aV*4`p;y_acPKW8N&p?E6utVwa?vm z=2GjYzmk7C2qx-(W!1cm^wMQ6>Q~v?xkaVk(Pc|oN+eI%%M|Rfyx7d1CYH*om>M2# zooz?DL39EEde5z}_TuajD{DBDr>qot%+1o?zAeF6m~%*M%crilapI^fdYai!8so!f**gd6-jUpK4HnUTq&>l!3+(Q(swRB zacl%R`S5|m(OkT+6?}y2>M7s=hLdItHS1_P4KzKYG7PIe9LG0`9ov|Pex8Si9p+|d zR^eX(^p+t?is1dlGJik2fuRTRs?Oi*uN!=Rs+fT9$8JUbEgsL0v=erBz8XFx1#nRaVm@Z)RcADikpTuVAtks zCIGg>(UP49u*u%b6U-54{RNBRV)DRyLRtgAwZm?$GgBNQZ5NU(sW-}2q+MO#evc15 z1i`GhNAiWTT$jP)f(XKs9qm;8zr# z9nKSCV-DV!W4%he^MUpx4KWcIQSZL3*0v*hFC&n~0Ox0=9e+)%Q!av}fITOdq0eew z;WN`p>H0V`2b}q_C3L`!VcJDUxGBz%+rb7tA58A4Jcdl~AxZofz+ zXbkm=X>aW1Y#o0`d@E98L9fy}9Gn-k?O|n_b9tMj+M*i0%Epp)3%3 z!bcgvE*gWUgo@Qu=Q+d+e$Zm1;bwm~+~pOZt>c4Y@#)|(w!(9*&$32|q;!)_K_Vv= zNqyz8&ju}gVxA|Q52a2%t;?sGH9aH`7I2@*TZbYp;A3kRuT4zj(}1rvbM zBZZ_+F*M>5b#?=SpBJG80Px3K(ZlCG^?TF$_CdIW`}v_UN(Lrk6JDtyprn6PhR~0- zXAMyCyRO5+A*}#GK)%1+C4~HW9AXI+`3{x%A z3OBt00894iHnXmWh#K$fYAB005Ctj+CB5J#EE9cgTeA<=0Fa{%f;#2DvYE7g?ppB` z2K>`Xy5(N8R9l-ZUs0W4N$m~y16VrQiZ}JT;7izJndbyS4=5pYj#lh=Nh>&r z-B3g4={{*fWaAIN(r1X%7dJnDtZ`Ys2p{bNua3e#jRDLK2ViU(C=6IRrH{T1Lre>S zUr}Tan2~r1sqUjGbI=;$gL_QMY?EnjY1wju(6#JH)t%H7ev<3mNVo3*!hm!b$vgm4 zHd=@#xot9F7nBAME+8!i>fZHfP(X|6D`ThcmjnYW9&uR#oZxaN zpQik;CgK#m$KN~sSP@w$l#yhJOeE9C;NMJa6coz+H%SmisKIGe{rubgWZXomAUmr@ z_hiCyVxK#nt(1BFXt>hbP0LYg@)9x zKG7#}3cVr&_n{Ajoo810eG`FaH|^wgyu`B5HDruBzzRq_i zuV6D}Ral0vF{WOAg|#b!ahNC~d~Ago9K{OEJP$W7v@4%~NTaS-j^dz7*8i0jj;P+{ zMmlxc4AGSuInU?K7j1LyH!SXR5sNM`uV~IDCIScLMTi;N&evi zfAM-iINx7c78J(;i99@v>hpv>+bR2+n*rtN3P*2;!kWARd!K3{bB}XGhB*7pxwZm-$Li2!RICJ8A}EQ*ig`RYiw-H+Fv!cXzOIEwXa2Fo0R z1K&^x!pS2>cjNGUY%B*a5RcJ+d8a2-%#(*7v>4Sqt_T+vA0`|?s(hwX+hfJ%96<#f zHB=jaqOB8)gAsgc-ldJ9Ai4#u^nf|IWT6a*G0$6MA1Ze<-Iuyen28*kByfCx1<}TC z`h*ShhSFUKapaWy*+CRL%w?zg9V!_CN)!oMzW_pu}B<&XnBFMpm_MECL|4GQnpBfJAi>6`d} z<~#0yKMttWk((uNuiE{qz^&eW3;h|aFfBYKyZ3xwLLl36-JP_w+{wC5jDn*6dwcyyNnLlbZVL0ZOq(I>3lE@tHVDHO)C= zYFS@qKj|#W6emT71xCWVRm=6sg`Fj#o3K_Yh|pzT1WcUHA86TZC{gFXcbD*-4aI*E zDrIMNf@-LoWCh+c>JDp@CodKOv-oVYt5VVDPGrR>3^qCr2xt*1@Q$wwYg!Xk*N&K7 z0dhAh?qHFh%CwMAkO9xK#Go)C4U{r|KZ36jF8GlFZSQjWk-vrwCopG-iaA8V>fDdC zpe;e=CAhsc+hkPT5${BeqDC{}P(*+2qYvz?BfW+67n<=7vdmlLw!-7W60znTkWTXA z=OF&+J1{~?<3achoe54}Y1MpGe!Oe_cx|PA-iWRirG*{b&KIIe|xO1b<3M?=W z-DVEa8y@ayS`9ySN1VEP^P2-I;>)9f3zA-X0VNPW9mh#8;H03racY&ePyl~saj$au z+ie6VOK!LAkX2A$9Eu1oTWE187sh~4IqPOCdCfpKx{Rm0sb$^Y)JGeYu3tN=hH8+g z-vyMnWoA}b$HOj=1Lkpatq;gBhM$eUMOEViMVvokJDJSK*B&mj--pLX>u=((*ww@U zi1Ssi>#Tw#{pNZCFZ>wFEp>l~#FKC1%1^1-KgN3wK-3VDjSCl2W0shgt9_PDH>rfB zMYj2o6p3n)R&1sJXySV&#LW17g^NqXMc{eO4WTAas@OqvZBQt_IAfLAbmt*wB=aHg z&WEc+v@i5&k*-R~gKO^(q?yvea)Ija3RBy5{W+p+QJoU^CDF&Lsk?tRiPM3+>9^rX zNz12U1oUm}j&0M^ky~y60h9EPOXf!v$xq!KQc=9=5T2WP*5r=c7Ef(4N`YVZ;BiQ5 zt9N|bGA^C7DW>>3j-PS+N(RnJUY+;h+vG$ExCrf)R+(3$fSjb_mc8WHlczOyu92Fw z{!0uCn~&4#BKM7Z_j-TeMLhs%d4uh7vbciDQX@t}PFlpg6R_BYOYrH)UUAHdVR%fZ zWi`@o#Jd%q6!DgS`xN#Qr8#ly^663?-nYXt3`|U9_RQ2`{s=AcTA;4Zl-JmxIt_=% zB`4{Y!vR`MOa)ao3k1TvG%g93NGIRA8$}S7lH_#6c}0U!3oU=UbmKIEP$FuXUs9Tg z#Pr-xwg$K*VLWrsjy!Pv@$n>-anMbckR}u!qJbr53mO#pID>KFwD_io9e<7%kVg454|Roga?;Y7sbj(iq`Z- zR9EAIATM{|0u&ht@(b-qk%lb6(+&?cpf8tYZVrGJ14DNS77uW9P$TuR@6HZg9V~;O z`e8-|UR{6AO5~JV?<_LAH@7WQbw;*Xm#I7_&`{uq4e^x7PiHmc)QZlTbDg($W=*s~ z(svH=)hNdL#|wWyh)i`#ZYAWR|xm@ z=URvXxx>3{Jlx#*rX?9uMhG+PcBvy}P*Wiw5Qu-A8Fn+<>&o%i(+IMdqwU)YxCMqc z(5s>+2I?ae&1gS{+nNwhAP5V8H?m#gfc)X9$hmm=r&9J@x8?H#nGvJg;L&ixh8EW^ zp2x{)+02y~{r$EGeq}Fso`rqC2>6YDTjMQM+q#7uvx@5+h$W+$i2dK6f2iJk4G5P` zP0fD;=fcSYl0p?kM@au4`Uz#j`Ux#FJce{rmw2f=ARgLMwKA@V%N;>DofW;a<)*#P@iM6k(G`9LUhz^mWsDH80Gnk$h|gbu*#I<|En$+eYbx`W6M zuSl6kKcu5NVZ$B#o415-4G6L*7Bw-oMo~$ZG?7MqD>zI4Z|`M)T|YK$CkSuzM~58f z=@;%|U7vyiHV=-jreQ_CNE?4e%)>;E{|E^Q1fs*V*OIZEAw|^-J)~0Ww=Oe(O;pohs9Ip$Ko5n?*~%(}>Y)8D2UDvsww$mySguj`lNJ7+dR5fX1S;#MCE& z?6^Y+Fz8ksLB+D&SQoa-Ka)Y$|3kG+;dF0+o+f0^K~wj-hqvQF=(>LqMqOxg%~V>0 z7NwDM5PaML%{~=%gpqk}MVx!gs=#PH2gtdE@85aa^`b>q!xc9K*f}paxXbK(+enxt zhvB6%^)zypKcUfW?d479aE2Ns+RwZs(iwHYfucfRMv8<3+q0(OK|iJVkz4IoF*+#F z6~zvY&DBOC zNS&q{mOJ`%3gv&@#rPmYBAb#CemzbQ>bkL&8KE0x0oM*IcKM%li!4M4d1E5mFndFT#n1G1t@pIW`F-s4I?KNzw=4SM|>fCW2A&gg$Cx{StuGxcjW>=9w|oa}{!UNd{K zeZh2eDGhnf1J1=pwpHrG;WHbx;&SIvo{nVpDyU0>XC z-?)EAUZ}sQ!l;SHKOIsHfF+rkeafBJzWqKX%fFZk1>x_>(Kjps86PItuiGH}QjrZXxX7<%9Z zaeWx7Ip_rrG2iv;J4wc2y~j4M2ez)WHVCb78{ivu8hmbGIG&cM3Le^plxcdGe0@qz zdE%876Cvc!-u@ZDVvGT!C7V4 zgrJB1K;Wx%$U8{EGz^2G&U}p;9HTXWY-ct(YQffrXb2rywhWVf<#i_d1xIP*0L?$J z!6!%ESAnpR+#C19VZ7vg>ZN-#lhswAFGwB}aAK56{pO}v%*!A;vs0n9SM-0`doPT! z?*zuNjXOF!P083z3r3|Ov1GW!Bk&N1Jm?y~;&}J-9^X^hmG9RDnR}&i+t$eMJ{IOS zQ$k(I?Dg~oRhQotfs78d6xcKWVGowKCNc+C~?WHZvLcm#i7#1EVnJJS@9Bbb$F=UG|o^SFDTo4z=*8m%75{xO!%#f&|MdgpL8PKamfg9}{;xk@{og}!m+A_} zcWh$|f>)6%eIR6VAXtC18b}-jot%Ywa5#E&Oe6g9eLoU7EL*d|WE?g>pT6iV;sJe2 zFMQ_C#zLK@mksj%P97rj7s1(~V8|5&k`-i=zR|&@xnyK9;~A7lPP$h3ieTPY*hn?7 z_V?DPZG2Af6?(}k9>FgY5gHy*+@<82N*r2CfDkhZHIoZbpnHG6sP{_yR<$o($~rLP zjuFS)v1DAK8yhlG0}gTD1*dml>hba$2J8JCa>mzb_3jteJr6O8)$*rT&ATPLehXS~ zaaE}nrhZyIS%MUKZ#st(RZf1aW-(IrYEB@%zivE3W5*;J2rX^VArGhk7t0A5mD{To zu#F)DFxk3oz_ovD{Q$w$m$z8uxW8*XFa)`d895GEn(B;lo~i!9bvn~oZFX^gOaEHMNC1EvyhIBWqKNW;mZ2<^%-7NZ#g zyZ20KE?q3?`TezQorG+ij(TcSJOde4BPG-=@ycts49kDcv1DSpp+7hW81kZzqrum= z#@}xOXIMrF_7k}wmc$$bnV`&@HVqLY>@x#bzXrp7gpe_R_cPknPneqV8wU6}Qp^~2 zuL;A*Kh0{?*-1ip%@ow7Fk?faSd~#|g)%V1DGXzBM)0jPBg~40RO@s>x01<9=XoB@ zcf9@+bz^_ujeQBe@7_Cc_lkVWr3&5+?e`O5FjErTJjtEfi?X0Av51-@!cBL;R)67G z{PhthqMs5}mXZ;z1L!{|zXB)OxR~B#?g`fuR`h%})#@Xboe80D1dNh7t-? zLTG4i^ooI&r{2GNrfd>C;}yz501ClWkNP?s5{*&(aKxd86Cr|MW=-+#88dLzg_mE_ zM36<~8|;cJDea2=6{=V{*`Qk~^(~hVFgLD*zj5i}^6*sr?~6yA<$`!Zb5AyBJLQxc z3jTi>X6Q!RP}i;dCd}BN|MsIk^N3-OoNjyzBRltynFkR{=U|Z{Au3hqVJ?U6tM2Kh z5o^pm3Li!dLpr zrgYgzam*^Bn*F3ZTjiJvBq|O|oj&92GRl9gM1WQXe(mF6V0$VxXD&{y#04ut0+ZQl zhuS=g?b@oSoaL%U6>a+SkxRVM&}tjL&@Hih_M^IT1P4WCQh8qrC#+Zau61EAa&N1Nk4Whl*u7_ti1gjPkR-p zn^KpI*@T7=?{a~$Js883na;G7Gj$_U)gMpvnjkvj9hz(7**GpXeU;DcJ07u;N~LfQ zXY$Hq*ztEHid)TK%K&~%=GOcvU?hK<%qC7EDWp4E>x->ogh?K&vajS!kJ*`x|JJS` zt3jQ!+qUx3v8zeDHxu6hghmV^So(~}iFRLw#3WFN8r9EF`*`~PCvZCYO9z3cnz*Hk?DC8sp9?0{wSX~0Y~KEg0WSOy|z#$)X%jna

>zgHZ3=YoLDw#ErzZ`+R>v(M>rZ8JGlz21+~yem!><3yphO>e5NuKRcU&wg)=M{6$`;Vq1TeU`wguAvnD1 zHq)rp=BarE?;eo;Rd)aaw@yy=-%KrW^e`2qzkSu%#5h^^5^i}Yt2fy#ktw@6`YRVF zRm0|vA>VHSJq6p78#ANU!P;sVn_P=u{+oljYgj)RFdKd~+FPb<9&Sn(l@O|)8d7)G z^Tzm`(3EqgI3I{Ar5JzT1eWzV0G!f&#`|(aT1)=8BfG~|P72f5%XwYuKbVXJr7cOF zcI~)0THh~@e8?eHR0*7`3aRLndg?9LN7uY`jq!H9MqP%6Io(#l+t<-uR@3RY1`_HeIP?`QZKYaU^>r<4|(&@+Vt9!M*9R1fxN53 zeX$i}fV~|ef~!SzdU(^X)Y*hf_Gk<6~w_{6;A-~z*AssSbrp!m=j)UoBNUHdoAqoT|{1aFS&Y3-_c_~EqS7s1P#IWbClj&j(NsELky@YQ#%HG1B zs5@HT=2?FWy4VW+RFT5*2;!%3T>gU7f1U@D$Y>=vPjl%vw@2`|sS@@p%0$AoA?o<| z=jk?1w!$=kOMdFLG%YvtAsC#+Pgr1uLV?#@<`!g`^5hx6X=UV(=6Df7vi=NdH z5#`ccTFNBZoKfxhv-6;S8O#Nc>jc^c&c8*=#EN(&or74nb-+v0kNG;RoN!Ob?6QY@-i9c2xDK7#7c&-Wr zwk&_HMUynL#I@mz_j0=P2rSc5q;7jjD($lL)eEwAqYzZ3>{2SI>MO=oN4_Z|OFrAH z{O6l)F5S&QZ7p)59Y@P;0hh<*2@HZDFaW%pxGa-5AX|D@b14on#5^3r9m@3nasM_1 z%s?^WK#rx_^4JRnBVhz(6WtvmK`USos3XCKpUIZ7#buajpA?7kw( z(LxzX!2pkgXy5={dyb2~0xR%vvlERBxYko>W&u^o572(=R{aAE;aFMLUZ5|8T!eqi zr-r&=+Iq7-?|v3^qEom^FF9nzf3^Xc3; zxjBtGP%i>dMI#fK^1uM1vWiWUDA)x_WMeK0y$gf)1qXuv(sGC{+GSr>=hndh9->K= zVOU)xKtW)d#J^TsM-#B%s89$cvXXzPeedFj%Sp>;KPW@lQ;n!e88eZ*w;jkX-@9Xi z!rYCsQ{)wzqLiG~ceRy4OT2_mREKR}TC^r6p`?_zb8NH%cBJ#IB=V^aahZoaW z+9H>R6S8Dxz>J`&ficDpIdS{n{A*)LVQLUGc_B6sfhlW_!_59a`p2-vlc?lQf#wKm zqBwX%E@*Wy4%3V;G_fN(MkJ`NJ1kPrD@Kl<{P8n> zk}YvY2UboK30NEM-6`vHZs z(dn6&UW2oOf$}nngnmPkkZx%L-SnpILiq^dT1Oaj*(}{q_EAakK$?F>S8Wm#L90@m zhACE;!L>@!wrSah>$lxDnkSHv%{;wZy1)`CoV3zt=@9GF2`1in#YF9;joJ9DAC<}rN8Cp#+`?hbL64J@cl=B zVL^~$`1^nTum79r@c(}x9ggGw5FP%b*f76;0@%&g9&B7jAF3|lGmP{PTfoH9KSIMl zA_K#)9rD*`JY6x_H7nL7c{5Co{DU8?$;3`ssGMMQGmPZ^$!$OWe~1kKG$NL8{(Wqw zOdiH&hTnh0g5S+BB8{X{&?J%(*+O7V@8V zGfbTPBPL*(M0c@gcM>cxjKuv54jumy7yc0z{-_hlzy9nQhwu;)A|=)glhc2V&YMg0 zSC}INkvqf4&0l{OjQ$7<>_4Ic!?5rEcdD-ZDOP_(3+82%;7>_p@4vJNpWx=7;2Q5R z+OJV70a)%YlL>!+=T9IovmYZ14AV2}%_xUMx$LcQM!TaXAAd#@2O?**jAx(BYqV-> zv}~PA>>UT9V~64se@2J#rsq$IR6J>+dyMb%Jex1_ZEH+}J)B*OjFT_ILp3^^8_W^h z6*?fqEhwsczmg-cGHW~fy{Lwu@`9cF(l;O4eg9MN{&9aK(wTj1W@ni66YZEa+TAm| z{4={8F+L>{9Xk@6JTyKf6dhYqUS(A3xKr8~{uf7=Bjy*wrX9=5TZAm5#7S3cEaU$b zbxDHH#2jPm0wQWLYv?Rf$2Kb{%q>K2Gs{e32+aK#d&b_3A~Urw2@WGOwIU0wAhEVB zF$ zsO((}Yz-N#ZDdTH!tG13Os%l1z*weFv!;V_tdoBy8auOIH`A5}Nka1%ALNSnZZLN* zF}4jbwW7-nBQbT!F}3j5#xf0KF}3+<_kHLegUYqSaS!H6&iuE!R`9La_iuiQHp6bF zY1+c8J^3p(zR%XVEV6MVcE~5yUH;dOn4bL?Z}_iItNb??F=}?k z``>>&O6;9SViW(H1EOQgY#qyNYqaXWYbCSf3aL>08Q0F2?9{Np&L`XUBU{IUZ5tBT zU_fZ#4$YQ`=4Obp2=g54CUJ2b7rPEWNO8XAm0{cpAuU^R-QUgY@cNwLlhaR zP58bwn3`kj-C^pSWoue9Mq_86J`rE0lx%;>(7ah^USqa7zqJ}lWNd+D+@j%FKje5k z;2HI&xPGDhR=cE7f z_Qjv^mTF=QE>(@tG|*?(m>|}2zMB@6M{yKV+Vfyd8z}5Yt}wz zpNsvh*`wm5yFT7}>#fS@MRTYnC0u_nFbyG~xP+2|d3cA6@I`2x->a`l3nh@=5+D73 zPIL78l>mGe=j$hbcy)2fqQB$wYy96~I>dw9sk4b3W5TPf^2Ir3pgq~b{e4)Mp0lU7 z$)nJvh!(J)>r${@Z{#(+5<5xIXNbvzgzwZ`j|F%)H12`;iEP4MbE_ykteJnki@n?r zlC}tHyYj6J6W09+>hbS+pRcuTd=iz9IdnH*Wa*nifVI=5trm+ampm_jFPOcKS!a|e zA7wJZIQ@=sdJoYWn1;lF{LCgd0da9Un{F{qqkp*P#AgaHMi0;B;@{C%hPZf)CAa8r zc>gY*_xNY!lgio1aBkb;dMbY>t6AC|BC4+`NWqx=m?ltgdtWZ{o=>;ch$glWy*Hy~ zB6y-po-~u7@3==uPHXSLpt+W5QwlvMYg)8^Ql-k+XI_>0?4UoX)r@~E#X}@N!=>HG z1m*kVhyZ8QbOAq1ZZ@7+kefxYsP%Kyw~KkRbGZ=M^C*H~2jv8&0DgbhPeP2d)4hBN z5Bg?$z#M`0?w;!~_=3w{0U7T9XfwqSU$df?$MYk09W%(8G@KKi3kKybysLf@mmmMO z-Y8#PY}iZxm@8M)13q)^h5O)EU&%Y@o@#y8Dp2xS!~{DB+uqMmQI$pK!av0pSP z1WDxEoD}m}K5-q?$iw;6l^#OOdLzlcRI|Q)59s|%v6p{qJRgY=<+Gp%AQX6q=MyD~ z20R~y_{rsl;>Fp?fK6b)Cu%7YmAZ4Q%H_${{`&rXxjp%rILIMSSBi#q$k*ztOVVl@ z2rxcq#lh%VQiBhR2_?Eoxh~28q?nVOfmV#@F3tPhQ=skX`}3)3)$dw1p!FYa6(e7h z1xY#`U%!97uC1iRP$bW(*;~-$>d$agq(mxh0@ZOa=t=m9U8@{Fb2*9n^-)NqP(~Wsndr$$?h%fh} ztCn6tOn{d_KGdBA?#}<{m+ox8>y+W#TrRxChy!N0#FVSR$b;HWsB(6TInv=$v~5~@ zJ3h_sx_MU4VDXj{>%GL;r{P8mmgLkA$wq(fr>|S0f^>hwbJq1C6q86xMKxzWZRW(> zQM5bOgK#+MNo1=Y-z(uWN*3$FHiReDAFVx3LCR$@UeMK|4ANv==8Jc~l7!ZVa1?c)FMap3NbDIo5w9 z&W5?ky|6Cb&yreh@q7XOd%*Cwb0bM$B`08fYDz_@MBfzzV)3>&d3HTkf!OVg{!JgQ z(FPPJy?&60r^YR5`JE4y*#@;LKzjn(@k%dvgfCTzYja^=C1aJM&K8o07MZl=qHl$k zglLSLAi%1SfNUE6wj>s9%J5NX7y*Ayn8%wU96Oxp-k|tp={t~5#F>R>5NDDpJ7dcM zFPLH>oNqdRz9O002S%;j*XrXrx*V#u?t7g-7Du}(T&?1KJw>MwwF&g* zA+Gb&*|ux)CYc>m7jvwZ#5Au2!MM$RnoI-%T|JP^;(1l|$+F#W!=nhKW~6;%wv?l< zHXvHz{p7xwKKtnhs}s4HrEd>pKW?a)2nw1IqZ6i)f-;Obf5Lx!Hzo&{xAQGEoIyW% zImfHphQ3=`xp+n75U=Q%$y&b1Q%)POgsYQXy^H6J=k^`X0>RW_iggn5d&i$0?oaK- z_OG8i*G3rgv6lL_f4s+HD-k#U3yyIdj{Ux4QMu3Ax;#Dn&YKg*r4DC5CV{@lCBz^P z!wBS*?RU?<$TNS2xzpx?=zLBNxkPSaX8okm{5RMlV9?A-s(rBIbkUHE!+7v5tmn`- z_MXJgbbji%81&Q0u1IzZK`@^E#6u{UDoZ}(lhT)zMVyGFn$J&t+TROh!B1RtkMVAlHu6$;SdxmjP+Ybh7 zA542MprC)q2cZ-z-97@)qiH(($;31cYelU9poKDp1* zrjB)9`NjnPmcOJ4@JAc=4Dx_kcmKzjKNh2^^XCh5uTpi0iPsV>8R@T?0!wa(F2}Qb z-7`cVAUot?S6uV|j04@j_k@v4|LF5f893Iq@|#l{a_*^?_J^44;`uip`&XPmzdzPQ z2YG)z1vLkUm&&DZwWU7d@*y@1_0yTdPa*Lrf*xYpfPt>jwu#o~n7|$0)aBF|4%eK( za>7~FXks~I6^VUT00g6y}5ntWkfLkTFvM- zyiT}Y39AA{CO)xw%ok-Pg~B@XGWY-Re`0?XVj42oYLgFfK#EZ(i3B~vb{n;F4JJo! zosr%IMCX4T$8P*5NQ&jMu6u&~y5UFPR?OA6pqZ~rB3ORCT3F%!t)`(I9LBo92AYP0 zJv?H^sj|@rzWJPdOer_~Ki91%d*2>w{~;e7@7mpWE-J^@-yHE1;-cKuS7<*JbJ2f) z8QZmHjIBeq)r#%A71*KHC+Qmbx7nOPLv@p?rW=K|>3t|U(RNDe<`35IFd9>^r2w7# zU|Zkc^BK{z2wojF#`sE!R&(w!tiwLSos%|PJ>8RLuN%Zhv?^9v4mkxCk{}#WQ((`8 zbMto`MIHr}ka)xgKYsuE9Z+2g*vkFrNeiiS$ts}O zxgP3`=`H4gg#o|w^75kI!25r^o*T7yHA8iIm*uIymx{hT#mnQ}=quPtyz2`rTVM^% zT%1FyrplKWKK8Qf6TxA^T%Ix}Oo$ehk=KE?WZ-nd=PW5H+yjHGxYh*Y2gVu9cbc)L zdGH%GWu~Npm+4N+jG6fe2-%Yc;~M&4F%2x@1Q~(VF=CYv8X912=x~3F9B|OrI3p%o zcy_!YSbuAUmxjNO(gfp`+KnEtHyUncqwx);To5}!4C44$^)-5jKN>ruD_{UL&ff!U z>25K(y)_-Zn3vK?Xc6Dr+7r|41@# z#*+%tdvg=abY`ZrInk9~$BaM<|2%zqc0pu^+L zNW}3sr?Y}&a+x2`zWnujJe_$H1zM^pUwo3A@Hwc)mwvmFDPn)*!EY~lnRRX7h}R?` zDG1X!m3v9@g_e4+AGm2Rcp0?GHYdO--A)dq(re0^Lqa5h)uYG$x7@_K7io*gFGqZw+2W{2vZOkmc@nI2= z{kQ`yHv!~jBp_B;x4`EQJEfM-8vmF7E7Q{PxK_Oe^~8TL)A-`H+0sKt(Fc4f%xhmD3+xZvm)kNO{1x>evJPwM zGz#AwV!8b8$rszxpYgm;T-Oh5dF;K;jXL+D!o513^M9x>^S^oWv>5W>fI+E9^x@3q zWcnv3`Hg=y=tpE{%l%|W$m1l2BG5J$tTHn~3r%!^B+OF;X0aiY(D7TwP`2;pf?W$q z`Oi8~Oo~PIZ*`yuM6O4Sw3*{H^H5~f8<9OM|H384<-w^+O;4+2J?*&&8>6NSo@MV* zcF;3oY#Q!u4BLtJp__6r8@g?@s!V{7SfM^eD|&yfYqTL0xxpULKaV)DTQcWo^Q$Y1 zzq5!Hd5nW4?-e741~Vm)$wiy-}!OZvMyhbHIVbWQyvq^xu1M-$Aznt z&i7cAw^Qevr`l8BK)n+>d)x(m!z#_^;$X{ptCe_Bx3sb)G39h-WXce$o18DaE@Jh4 zOpSl^NUK@Q^1FylC4n;D(Dt|3Ha<)6yroR|+h$%(I*RYwBJi=unwzKI(y8s91FI!m z86WS%Pu_?I8E0E1g$FB06=iwnsh93ChK^_sK7;2SF^x}fhzp$51)u}^rb&%>Ft$JN z;HmaG+3sODmvX>`r+)mguDr;B74~ceaq)la*tZ_j(BbMF^9<&LfEanKqZ|eam?>&l zPadAr8IktLLsoVo666oKptVY1x?0C^z{t^@dT#|oya@YQoY0PN$5S_(U}u*UYyI7uh!TG_ ze6HdC4dxN&134kRzVVati6cL%l@HIi8G#bHWb~C8@*%|iKCIgh$VXmb9$sqHTeZ2v zYrP(HB0X}#Z13pF-YVb(b8mY#eRxZ-j0KTTk*O zB_5j@5vFTNN$O=_l@IS71;=MA>3SEvhjq12<~JW;)lgIDa7>^(V%TG?ZE}iS)5v(_ zakctGpcTUA5z)Rj(@B(e9`MvWZ1jHJe<3Dj%PkMIZOI5=DEy>#omz; zG@I1^`!5ZqNSW14YP;N2_i+f^7evt*IOSwgAm2csP|>!HV(6w}qN#tYm#r2e@iJy* z1?zqe*wd+Xd8+dRD({1QqB-)FmGP)i1Yf&A^?2deYGJf~6=v*ue5lcc_ttS`0z5z7 zAFd8xV}dpgPi4UC<*(^jk3hEp*zgXt<3opj#g95a_lbVKicCp_YNJf5=&pD8`Zv&d zmXl;AOoKMgNkx`Qf8?<^zxfjIc5~63Q z1ylOt4K!a8-H~H%ae2XqQV_7@VW)qqep1n%2VhL*a*sXU``&~O6N!5R>#K%9Lnw3$ zg8gHyG;{ryQo?_quQ?bKaa#~-%82P4!n}*^B!xF=u{laD>#(@jDtt6pu9QOEyp-Jp~Udlgz%6UHRHPkkw-!dr*dDe ziB=mw-K8{9TM5efAsB1tt2R6OrO#N^V0`4HeewM+pecVO$R|*H=le`;P|FEgW-0YT zGv*ur(8OTN1=a4H1|QKyM3#@Qk52~si2?Rlkgple-1C`UU5$~yA?h(s`g>};kb{cA z*g5ujK}(+ycbA`Z{l41xo$JTX5I=ol-2o%?r^q_JH&N$atUJpq*lL*W%ledim{E&k zJVhPC^^t#XboT>r>hBh(8icO;MN0Ji&u`S|VT>>`1=B_I>sDr>D4Z_CX$d%!U=oV9 zdiDHD^uzrGI9o0zGF{{)v5h3QA-?JO=&MEi!Wi}#K+?tV8ByM}&bKg+(HFa!kukyh zB3e;@e)H*Yg5L2oL5*;Gs=sdFxg~!z9Dui2f6;$w09in$zik(;{z~*^jdR4%NA2g} z_lxkvm0Z@s$6D9r1;bCejZ>ZUde-;%8r>=mpov}n04WJkOw_QF*<_714;X1GMhC{rM<4lfP z5QF6>HT*Vz=30}6T!+-_gNj{UQ#4A`QOIT#>@xCnmtE(KJ04~`y4U^-d=wBF>}S>2 z^VH^GP$zxPgLq~GM|a`^zR$)(-W8w!q8E})=|#V^*GElc`CZTK8nU%qC8nq&V>gWH ztJj5n^qaA-wu*9$4J04#-p+r`wG3VDY!s7L`aXewLZ)&Z^gM<4E~a{W^$UHbvWNrK zf)m?WZ(Al_3FSc{b|i_Kftg>u?10dN>xGc5ZlC=O3Ql3AJ(P}fkYA%~*i6VD17q}~ zDQ%n|2{`K)T@UBXtDSoACr<4fdW3fdEXb0yf1PNsH^ERxh+5a8ZhX;w%9|%$v*AeqHa=k%6zMdHqh7f&FYl6`Ok7(TzN)jcoUjbSz zD6EwB2Hkz_MUa(UBCa1YqWlPX_NfvG@{)fKfhyIwDvL?MAxSPR-PCe3qRO+lX$GCOsCm-^DL&=h@fX`vy z{-&B!u`vB2#hSi*k6uYgwc=D;M#x(_)5VqC=e-YIE))BER;hsAB=fLd%pOcj!W1~G zWRUd56!}QT_;Y-CTu{>}qPtZJqMH(b{W!12$X5!V*!f9EuB?1PJxhx9<6+|TMJL@L zN83qHcE)x@3w($zQ0Pm|V01Mz`AxF7A?w=~ipeG*CHBR3MWapT714I+quS9Z zr;&|ra4i@4P&?u3B~f+Z6r?vzh^Z{hH_HZB&Wru>~89x2%74>w%W-uBEW(+;H1| zg$a~Txt@zdM;X3dd@Pl3Cj~TE3=jGwZo81qe?OwO1UVF8Cx_=PUoz9)E8B5qnvbOC zq3er~`RHDA3vW44OPHH~TS`&e>+)ia@}aO5~I;duan#91Jo1#33Jb zN%ff{yP0O2aac8}pPGVKCgxfeqQ?0a*B#t45aAA-H{h~}TM&J$+3&euu=fEw^= zplH#95f4O%h1eGbuZq@JO(FLD-uaO+^F=Keaa~Ti4)30lBJy>Au~>jbJ$f=*=&!Ki zKJw;>OGcw?GzC99o`b-1i1$(+^V9Jh%8PS2_8Clpvu@{em?BTryI8w8X}8YLemcvc zmK?Az@VBsP6JcfJ_p+c>8#B@!xaJ&Zn3yqsu!UM$J|lUTLBT-SVb4bLV0G~~t2b%E2bWfy+%ts`*0aDi7YdVt@(hGHSu-hilP3nZx~71IU|o@nC6HH8EUj1U>N^3*Vgd zo9?_xxxB1BoDI8s*@SUc5Q7{oD4#$hs8I#h@A#}+t5jnoVMc30?2;HwDQK)dgMiwI z9@HlT$D8^Dj{0uY&R~Dqnt1duo*_3AXyk;YUQ}3rKjfSoO09KE9smA3x`yk9{4wN! z_7x0zjFqelk0X{F=1CE|&7j6-R6p^ai!;YRA1>|iKgHyK_N)Jj`LD>+4>jq}uRXvx z&$L=o(-uDRNyxvLvCk@9_{1Cex+C;(t&2nxO7lM0!>u+dj8Xp&U8(e#5k1ZcdE>ll zUi{vF%wn)EFC<$Y$dn6~j0*~?ri90E;3?%rPxibb+KXA=5~a&IqdMT%7Pa$~7&Q&E zcI8tqqguvV;QI@0P7kM;53BE(zk3W#gTtd)xXSNYU;YMjpEJ2(wCZxI|8U9k4=|I} z%*y!!_P(+0!}*>XjrTNnyqUhdr}F!rEban-QQPnHJxPS~5uSGugE z&ZpE)HjvsWN#tvoi8{KkAy7>BF02z_Fo`F?Rd*Z(|! zE39+k(Y+2zcxaG{f5AK=Nf80KJo4S!MsB(Oo?nChqVMXsrPEvte`Ynvxo4ik;zcb? zOTb>~bU%6FRTze(n88Ah6y*y6{vGJgO>*ddr(&nIF0@~f&35!Vj1J28MRAksVqIiy zoACzVYqTC6#$=-XU)Vp~;JoTGTq`Jl-Ms3LdmQ}hLi=&zD@r$u8W^WT-k&+|6SsNR zpAI_!e%Q;&uMZA09A|${^PT#;U(H)&F-BhM+hpi;nXP9bSvK!hIGiIoDto;k$ze_d{Xxx z8(S1~-+YENoTW6>pu2l88s~^Byq5#`zknG?aovwx#!9aF{z3ibh(Y-Cyz8s)tc<3qOGlo9nO8wDS69j zYVJAl6eN?UY1T@bW&qkyYLq6Xg*n!Pee32MT>VSr*^e&p>Se8iHn%+6_zCCY*?!}Hm!TGXu`?lH zL#_E4|D9cWuYOXv9TQ;&zM%OZ*?XCvnI)_8OeW)qR$hIIK zJ0i7tqlUkS6h1^HduF}JDRy2{MU#Pdog&}3Q5cV>7>|>q9{h;G27T}FY{z+7hpYaO z^u(8(_Az|de(E4!&zgaM{#y@x)GiKj>b1y3kNR2U6ggwP_mz>!T%uhkdBAiTlDc_| zcVQ(!KKTjcficKq7jatNFx$SxX8gr*9x3yKO>cv614~|ULjh}FE+|ZTKgkV;Lb~2E zC_#vKJ)@qNKt2noN;&0o0{JqgDrKe#sVNsq7rCq#FLN`=gMI6N&WR&GlDD)pgKj=f zhI9JHp#N#cFn9iRp`Ay5`lPYFmEEl5Ht91!q-RGRQNYT6CvOu1E0GceH%azEaG!Y4VvG`carg&oHtC2*<< zlhl}apXc$-_?p7xC89cKkmgUck<3nJqfQFCZ<*}NUryj16q%c&a&>Z$!9$13i2TkP zIQ5G!G*aI?8_pP}%$bq3bhzjTMjdtmoH^>FpQ4``|JkqFocG>n<&hXn^TMIXh;0tO zImaGJ60`Sz_v%#{HII!FERzo+8^2LnPDAj2#}u|X_wG#2N4x@C93bVSVf=+V{74Cz zhkf3K*q_=j?x#tYL_NIih)rXv6zhwZv)z11^2ZK3nUI)cpXV5FPCFaU*rh!2X3IhU z{9WgT-?JD0r=9)4^ZkjdEU(?%($;B739IyPFml6xtH&U!N(rgfVS?Hd`5snkpPOPD z=Xxe;uhG|>lDb?H-3Za=IAi;LmbwkYto3=?SYBHm*7`$g#0gkDd*s7TX6&g*Xm9r1 z_P^##3c8g~EK$o4cs$?lQx#YZ$)pUDJDB%f82Qa#_7_wk+Ur+H^w;B-Pb2Ex(*!wX zar3NyONX~5p0{byPj{{#U`ukuW8eN%{afAbxbMrnh1%PT8t9_$^aCrL=k=%@W`T}( zIi}yUe?vV!GX?ke_W?8Ic4oaP>~kAsY8xb~P^OEx_584sYzmSEqDG%10v)+`+zQdkBq1*AFFq?cbE8lohK1FrA+NiGP-KF*%Xjk@G1W&MO{MAaOfUKlLtJA?%rf9j1mc?SHZUH!{x+tahI5t0Dtjfiicqj zmy54C;<$y=g^kIuhs)Xc@*=(*_r!c_vI*{~>i@Fy@AI#Zx?mTB&-=MYs|V6^FYk8@vRA+(y#!5@RMF{{RuQZEKw_?$vDT|5R7ETP(a4GiwL z;BqQ>40^m0Iu~<5&13fz9Y~o}R#V$$3~e6rFg%#!92uvcj@l#NUBjS*_^CHx=N{Yt zJ==cAQf0`KVJNBU^4j%aj4? zNAl;W|6C^)XQF*YOz-=sI~X*#IEy*^F++KtL3i`Grx|oXD*ho3xJU!}CKbTwHUsRb znbXWlj`bC?YYZjYKr{q^`}Dekf!ouxBc`k*Ev}~k!(8PFFIVz4G?GexgUjet4*KD$ z8U4zghEi)rq4c@d0ygUrWZne6esOp<(#1HV=pS>`5F15Dc{Lq&Z?XH4o4Zi|n7h!! zz7o%jqTY)E)Ia%yzqRwnoexgma;jU78sP7qPkBR7i+a$fK9}>6@0tid8~b7EVn*hK zZ&VFsmxSi6e%zIJ`EvJvzVW^_^pM_#iI!6e9!;hGP%tJ*Hj$z?;rx}Ou7DaNnHejF z^S?}60XqArA>pqGzh2dZkPt4%@}DjX-m`SuCnY-J1MG zD=;!z^66WhL~p1Q0X;@d8zuIUBp?|zGNy*lm__|%8EB57)A6l;xHpo%npgqHJ$Nd0 zesef-8MZjbeYayL8*HCf*n3vPOvrH_NbY}VjP}$0XO40s!#(|Sg_$1bm_zM zudC5Vi~F%IG+BdxK8SzG-%r)VjJBxHG8@!NQrx$L`>j}xwi&{E4k5nDCg^a#)hf^6 zm7Vunm6fW>#d!wK@wCOwG%dEL{VojWYX<)IhVSJcsLR_+-H)rK+&tZ|%i&_z&aE8w zfU%*6a{d|ryfbDif5R5}#Qf%a?~y+CaDyje{kJF0Nr2pcKkAY#4+6D%C#IPl=d}U` zWlN8`X@Pr;tT!b#=P&D45M9I^$`-Ut820gwP$YtfkWV{+ts+^!S8m-TM>*hHWL z2A$8e4Pv*GV1D%*q7q{i#xPnK=H_n?I3uEbN+-wH_TIFi{lSLUkvzs*Er^(XZb-u&Ak%Q$f4&a*KN? z%Q9MYDX|^78}z(51Ldf{)aINKjK|6+_YEb3p0mVw{Dpg?1kS^&TVk>Qh4J{zTMfFF z9&=i8(HkmUjy&1(DGtH+e!-tMdeT6g_|(IHm|e`29p^keNsQXeuDyeQu$ad~pOajs zDx)=jCpJ%o=3Q_wpUHk56VnI29(0^M-5r9ZSy89sZ59cUl30VeF#}V^;xK-h+m=z? zQGvR$M1K~i)L3uHq=en;Rc1I*ng@avcY3SVxL0|98od7o zV{c~O#Kw-F!q^^N<9$BjJ`As&y1AOe_b#OW<1T~mcyg`_E@mKx`*yzY-}=z@v=7Taw$>)uUsJ*;T?_;XDiudK6Z%Y|uQbaZ2o@7RJQHZ3R z`@wE4ZOfo4e8$TYUP_`BFV{(b8RO0&nk+asCJ?_&Osa80wDKd`@<_Ka9L(nH^8ufS zfj#Z+aQg;lL~}{ftL_@~EZly+`h7B`mWK>l->Ojga^Lj^@Kx$+I%kOx?6`^Xcvi{VO~b01-+y=!Q*(gVIJ)e*5!A9KgXS)JG;H> zZy(P2;boXH`w;_Ay>QoG8)m4A^Bud-(&cI2*2~4+fA4!b>S)h1(DiiX`u#YU)aM*^ z`AqY9pxd56@f>K>n&jcEUPHR=#%r&5iw|a%<-Fv*${AIJ+fLBF!9Cq8u*EI}#g+@V zyE^tO`4y4&kn?Gr>?WCi%Mp+ZsMa|D6BBNKF*kRnK17AM9sSA{;XV?d7;~kPWr6PJK@>(y# z+C(@!=ycww)7^E0PFHBu*Ue`8F&*Y?z^BoD&;xXOX8PO%-{(Z%&Dq%)ajv67` z_>2_KDU@5-zx}g+-z;&UmRilJZniuv-sdT=nb+(z_MSn47)`EV>rtzrMxKJk`L9@e zNB!zi_j8O%-?KMGAo?>ef#-++C2lVDd+*z6=NSFLfoA&kto^J!|JCXTMcQSY8Op34 zWd~zfHk%c?eORUi*C12K%9jv|Yi1>3+wp=OlEOC&DLN@&kdpue@hhSEdo1Rb+c({8_DL zfh~i;y>aww6oGt3J%Q_Q?mHLPA-VAFlOB4opAl@&JM-$}EXwI?H`Bv$e?x!SpDAyY zm1|Ok$y6hMW{_*tLv!G`>xe)iXGx}Po&fs&OhCRYwypAQVdqsb1Dyw9rWT;(3^o&- z<9d0$YZ8_(taqCPJ8mau1f+Mbrjj? z9?P=MgQ+2b+lzTZ`4!-RrFK9>~6}!+JtUGJS}cN2pdiV)$36&73a_#lL=i zDrz4@(!Q_+Qo6HS0nw886G{?U63rLXcHL9j%jr#+r~PEctEp0zU9a=*S!kUQZ+uO3 z_cpbEErq1ubbedJ2ea^Bnrpf3UPI94IoGb9^GBn1*jUm7A+DZFyNmgOpXW1hJYzb{ zLdjQR&zii$k3H+B8gTzTOEB!1^E)5BkfnOR$auwdgZuUNyizM#<;kj+k(P_S)GB61 zFAGl6IMs$FO+-n#@Lyoey!?-_e#rVF^!Ge}4|}yGt|cgP8=B z+C!YmeYCfU&}HZ+liIJ9CCOxt&mYex9z2$qHy5s7V)St@=$M}72OLk36^CPw5})M> zs(je#G2HuiuIn$f@;`bcZkGP$q9*&+*zXGxVb2$u1o!9qI=W>M2T1<3p?Fyiy_8IrEcT zFgJ(fc8X#Sn^QZMTyvj6@zUh>ZYsQ*q;00O)qV{D@$a zA0NM{75u2Dh#9h~}p2c7+!t}7>Pe5u1>rkL`+ErM*a zrk2O^#zP$)eKcw+VVuIM`Kg)H-P||)ir3|(E>4Drc5~Ln4LUA0sypAueOxwp`G@|^ z@vm|;!afV;qYcmorUs(VgZv0__F&i-y`TQvrEtVf7yeS6w83tl!eQWL%iHsRPQ?Km zlR4^nv*IbVayp~oLrf+svbT>@-R{cm_0=@xTUD!EWd!wqGX*OK$)upYY-2h>PE^F~ z^TqF}c;t`o!i=A1L1(9BHPU185*_ZZ=e8rD7h(EXCgj%MC!|$6&yRZef@Ij|a%fXM z4Z6~w8ohHr>)>{*?k{}e$LtM%`qY=1I({?g)yRkP zzxA*>J?=(6>WnXY$QQbv(`?6(2Uo6-bg>uth|m62mptqZSZAzCo(R}~nkA}|x_we< z_ZX-8GbCN+YZ(Pu9!#w#pi1$@-GZ2I&Izo!MJ>OgBF!H{I`)XM9FImGY=M4%8gP1UVFo?-l$CBD3?DQAmP9G|q6_2z{q z(~S?o3=%8`-ng%KPjhA`b8XWQjM(N2zD8hR%Lv~2l!E$#uUiINm{MqF{CPr?d?6BJ z4Kp^V^AIuBlJ325*dvVZiwHT!p~UU21$m4^uNSo_#+A!^znDFLn9%ROs`CGj8BCAZ zhnp#FVwU8R5=dF4Nn;0X&fG|y`Wjd1Sup~Y97`CZ=~-&1tn zixCX``C0e(Q>R&fAGOm?M=!tYy{2`uC-D}Ldkf$uHG%P7Sdxoqmafo`IdDJP+tZAs z?{@ryA|JMs`-cn?W=vj9WkW3PXN|r(7zeILhdN3a@{6+mZ}*0%VSbAo z_jaD{1at60o$`qudb43&`hQ^{)|y++}%sUkK)ZrlX&qYlF+YJ-ly2*2T+t z_usxCjChIDo>g6nEA_!N>5Pn*oECS~XbD-l=1I9`z#nBl{^Of#5$J}pv|$$1n@o8Z zTs!`88}>RBiKKtDpvY&_w0(?h!$9i*>h&fumceT!F|P}UIW-g7&3z=e8k1j-ZP4cd zH9P`S%xKSlQv!yCURQ84ma@_$(|n<5x0Q)S9aN5*IgAZdhZ2&>6(i-8WW`fCWLFpc5_clA zI}*qHBIQlf2s0-6c4B3c=kU-Fdi@s64xe(lAcRzZu;Jo;KI)neWs735T_>d4RkONM zslz9zua@_)nSgfo!&hiO;qhH!7yZy{)Uu%cf6t=l*8`?KzZ=cJ+^4EeyWWPq;SLMJ z2H!J;`d39rZ$yFbn6XpbdH5`BgFA+5N}&z<0_!=S5n!M{g*MNd7IZG57->){u;q>? zW65ZL&qC-$fqD`^-sEsK>dTVrJf{8rsqBrI9CZ2Ty7|0MZK#Xt@;F}u-uHXrjyu*4#qF%-bgh*R zz4aQ5<-*u&9lR9+#Ikck>_X+4AL| z+UBEX_vNhPIMspuU+;i%=P)LG$xRf&wo}-{dEW7b&!7@!tcdN5%&P-6`~%*Qz$*`b z(w2!xCsSezDVlP{vEI$cL^e6@s-31cW=05)nd#=B%hlpga=O(|u*)eUO%-tn!HlOA zETeATZiMzbLHi8=KC!M1`*G~%15R^2XO2!C&atfj=HU22UvP6LAKZ+SgDPns4#-~{`#$6#`x#HC%0@J?4w`4KH z_cEkHNBf-#6Z5Xp$;UFC47%F+=z#qChXuz|`sO$fGWZw9)N?EtaM#7Y*>9~#jqw|O z@K4S6wC@5^k9(4pFW7Ho&@q~+T=rwXeFq_knvxGqf;~`@bW^F{>rj|n;BGX3R!Fe9 z>8Nk63f!H{rP$&p{>`e_IDB zY$T{sz&M$@{UixnLjI_=x1bExRCB>|)o|qH`)(BXw{-9nHh(F%&dPe=tP0> ze`VWo#{I=CCAg2MOoWjMGdJ(9W=;~55?vb3CBYY$BzEcR$!$P5;r)6!z3IB}U4JM+ z0@-w*u+v{NU1I&Q9vw7YV%;?C*}}zgt{-`!t3SP8>OxQR%>k?LcZ{5{nX@;?1OG!q z{Da>zrH_J`fDowQL-3RXbSlO2IZTHUl$<38RZmrNw-}5K-22j*%<~L}mlj}VY-|PtF77DyZ|`&VKBua? zq_&o{NHQ{sh>V;fBO`7cs9yTe)$oPRSymZ-;`%z*rpq^O4S25kiLVK5%vjYk^80W> zezx9~^RsUKj%UAZ$Q17vzrgP`{6F=jKkq@mbD8{yy@~U+H~G^YGIZwkm-~Y#?@(p1coVpYG{w3f^2i-Z(oZ;$sH8F$}=bIORmmiP%9>EC>fp@2>0Oc?TFnD zmsaUya7LgXhnu$<3J5yree459wm>!wu6cB}!O?2uTqjp0&X^UL500zaPI$dukbkpE z5obq%;)Sx{?)HDNsU(o&ETkVs^NE@xyY%2=m)`xdOaJqKn1=Q^zxwc>e69XvzyEXg z9sI>^{~NdJpKR5C`2}c@4e5|u;^2t{D1Z|$D0Y2TO`!=W4X(MyyQ0Lc!c#-WeVl`0 zB52Z@tMY1SqVw@-r%S$YSJ0W`E+E^A4rH^r=T zQh?5L%8cfi&!B&Vt)U%-}=-1@$c@iqWMDt$R}#` zdw+xWFvPd4hdUddiyP|vxna8$9q>p`F6aignjimvi>snRhs9ra*4a@D(*NDik%?2-RcJBlSBUa$&+cdo$>L1 z=mZ>uNY`dwXD-ODD0od6_J6B`JdEvbrU;@itydR*_-L3ShvJY>q8%D0bh|9^DMDTYXqR+t zw}Gth6vs(9DZ&*NZTCWcl=T(}T0O5)6>t-Oh$T>67T`>#+EJb)-V(7&${5*@uHNxu z?}z8O;0~MNyPoI@0ytpo>74+W?JTIFCx^+^XekAZ3xj+!^Q~q|Xe(C%55r>KT%{MX z9}Pe8N2#M&{p<^V>-2x!E&Z`&|JXtcmH3CR`tYCT{ZFjcpZln|x(k2t5A(BD4f#rc z34iNgrc=(>!4B(S5O*U%etHFf=nFZxiVxsSf(T<>f{Mg(YOHvJg3eCC>I!ky!IM)S zED*55nqz7cES?K^uu>NM#GNB7r>)N1y;{tm7C2{el@;041zw8ompgeVA^R#1a<&;rcT&mK)%4vp3Fmv=V zk6eFR_&=}L|K(ombEodl{-~pWjA#3?ll<{Zt>FI-N4JFomIYMe7@GROz9o;2R60&WY8Pp|=WN6QJ|jt`5qQ?Z2$oyIHQy_*9Dz{pF{9fPv$tETsM_1q^7+2^l`H%I& zuuB^=m>__{8o&}(7sbcFceel=DRr=as4MP*dw&wE zA%S&>0&sE=3a5}`Zh{kY%&It_SoD~W8LPM=IW`>t;IH`Rt|N=>&swWi{y~rPk9Ya= z{l|aO=S=6Fl7r$ZD7X&!Lji)5zpcr~eC@OT z0U)cm2A$u0f1jwIF;H;yKj*6z{Q3zM@Q|I9fb5u62e6g+0y)kZj+TLCR9!$3Q(|Ll4ar}Vej6m|wx-lJLy4L4$bH}C$hju) z=*U%7+*}68XQpx+?xpxbecUxg*`0Q197mP#?&t-qnhiIJOICFZ?mBm!Rn{2frXe&t zeni}D&`Rp*?oUI1dpTp`y<%NlC%hqUdLb+^M_c+fz0&j3U=^bf#T>5fDBiypah!&jLN`&0A%0-Knj`(SdQtU{3nW| z!wf)KjTWU3sQjP+#W5~`S2%VBHMj-91gZjHfc#l9pv!@=S}WiU-bV7&1wcmjxqQb7 z4yZwj0}V(U;1rGnpaxe3sH^-0d=8He(9lf+1BeNL2E5xC4yq0qKzIT!=CcZfps#@F z$UWc`$YNN30x}9Hj?4oXK*9k8h>-sXg+b*{NH06B-!x~ya|L{GMe%Gt{NxfuKP~_R z=rp2du0RKt7?{q7A3P7h8POvHEd#LP5&)1tW)J`kApFe=TtF2AZj~6oIS{Wv2C^9F zK;C}vX+fj_1E>J-0_U#a1^NkytGoaXhclw52!K+5DQ5uDd;~5ant;42G-{980S>4U z{+fmG%PWw8VFJRcE&wo`zaoA(KwAywTORTtiGc=0WSI^t#J`U`>L#|wAgXK)WAB)| zI@$OAKmU*jDDVX&2YjsRG!2Ys9N-Kv4BQKW6aW~=hvh&0km%AI0JMew@lu30H$p^! zifVIzUa=Vphy(X__!LQZOHiM)jRpDN{*FNKcV*vkDfs;VjKoNZ&7pv%K&!xK^5oEe zz5`JO&p-ow53CrRCjMSo6*wFGqp}`=>3H0+BY+7!TL>G~mKgRS3DpcCkOVLQ%Lk{5 zzpYjQ&VV0Qs|V+bpB&o-Y``;)O;Envm>7nCVFw!{Y|O-%5*uFoifKpYiR}+eYnjJu z?=j71?&Tg~s?8MTM#}H0FsmQB{y+RC047BTAPj3U@<8R$72tbOpFrYAeY2WySv9e* zl7`d5uvtM9){9dig=rh_W+UxFJpq`Y5HJev?Wo-y5^{P5pss%+Ak7m{CS?~|#UodL zu&-638@vbu;B?k1;0Cg~!ACeKGys8E_fe)qz(M7dc>>Nf0b`tC$VZ$}E4#Fc zoyq@(b*C!fxOEk|6>>>BH5KFoQ;hL{XoX!ED;eO`u=gUOs+C>Hn!c(g5jqhkt&Z4b ziVmnitm=uclgQ&X;1IkUnPMDyt0ux&$%LQ!f@Qf(hRMO6^W{>IsWDMsj3j0K&0jFlba7b$fq8d*C>YuWlwDN+Hc{C_K<>5(xkF&}F zlC|=J*)AXeao;D2BqkFO#whIC2|y>3!X)xwH^GEga5d30AnF_x6&(m_BzB4pWQTz9 z1oGAyfui6_rg*3hvC=aj=|4OL`&1>mAjh3jO`t-sk{edpPdjf@3!;E74G}W z1;#ib5f#kJX~>o1_LcKYPTA2(jLF3r;Nn}lg2-Ay#I0~2<_UC@@}<*qNQ;61HmF|& zVuPY|CCLb2OhVy@PEFSpm9HQ=mVPRk0)PX5`Z>};9JiheF5;Q zXV$LZtsnyP1gKB{S8(mkqjN_*bRtg3lwC};MwlnSOp~KDgUw& z)idDf!W=}pAS8mDs5&qc&!qGW__{E=kudwe@}ylv`TkFSpz8tvt|E$- zF-92xNY2ziL$+4EGYSr9LT&;=^pS@>bRu1VNLCR4oZ+b{%FxDt8E~{lXeS`ne@Y|y z1rEur-#V23`yRQkoj=|f5t>v=4pJrY`z2rB0cAHD_(uB{-F zHW5_gq)elC|K!WfzH#uAU+TJ`e)73#LVW(!{u4+lk8opU2g?M+E1Ex0zty?5MKbfN zUy3)Vhho(#+<65T;WNhQ8JKm>s3r)T4^L@ui;=u=Mx~E`0Os{nD`7WJ9GHL(Y>gHG z=)xeLd(bwv_z$qM}&zUC2_~fhJzCrGjf75kA{N^M6#-DtR+c&s>@*lb`DBpa<-|myI zH*00<0LC1Dx$65`#X5*%4x+gU#)9>kdr{w;IO>V!QiI!9eP31`jbFTg#xMEPFaM34 zDWpItJDSU@%H2UVffFCos86DC8aj~AEWt`3_6tL2HFbTbTz!^ zhU{kDuv#N`YCf?03MRme|FiHWw^nBMrQ7_-`VV}65dI4~tQC06zoGM+4`BMUScHPz z7}VrDQ1pgc&j6`&fKi0}-_x%H;E)0uUyeFQU2KkAEpDxB#OHrIParcu6SWl|zpHuT zJ2Zv@u?Z@@fijBN=Zt7fx@k?9x9)D%idZ*jOq+nvI-^1XM+ij@wd*iV07Mh~%(ud4 zP7J<(WhQ3@&Ioj8pZlhvT7?5l)1vQ9e=XyI#obL=A<^0*ZkeRKV!z518fXrluAqrluKU3wLh_sNe>>ii zp#9=ut*}F5yD&~5K;t!9S8#tPGmuN_?>Yx|3JrMlAFZ9F-Ke8~z^6uI5u@t>h$Fb5 zu?6)z=*5-(t3BT_uT!*Y*ZB(x^9 z^lv`GT_3@U%6cB^FG1zfpLHu*XG76-e0>C@){v{${15Wcw}B7{G&rKPkJ{QB=1IUm zEvic@hD7hTmslnkwPvy@H;9-Wy-| z9X&f=_|J3jh5tOqU--{+K21oEAQV>J^#Fa|H3QiIr+kW zpTF?m=P&&Cc}I8v;G%vkRfsMKgbz<2A3OjQ5M0_Ix`ZFN=}Uj`ft$Yciyye`OF#U= zQD6GMa4#f7HaGn-ZpZ+w-x!UC0F})>BzNmL2?B!OzkL*ySyl(NlWd^%@Nf7lS`+)f z;fWu3_y>H9>Swk6kFn!_`jKsXoP^IfKz(rXmeoOWBX?hT8gdA>Kx<0>H!Q8|0{jDZ zMY#NiEkC$Zf9^AfPuH8bSnLabNgJP)0rD;HU0iu!@4}{sR`_?Ei)p zKK1T@;-eCvy0Kq;sS(~h^TQWT{=)z8M5$2SzhN7MxBnYf`{40^8+MweYWnUwbs$N! zB7n*ik!}XfSy7;j6R7(M7+oMYf2ca?=~+paRdy z0kSg&En4z{YWS6&VckhTY1J7g^{WU}0JlFk0A@6rf7O^3(6zBc=i$KBg$~5Ei{{Tp zY)(kamk8db0Z9pe!iM4ir$qV>tk`9fm%~OB=b(Vj zub&H$u{r}nn+Y5cyf=&-2?(z6Kc~%uF%T=H4er3iW2~b?kYX5HG6SfQzEuY(F6rt> zG=`!3L`sNCMW`I-02CVq-Q$=m>4fs_>XXmS$p6t8!{vp45y&3k6QA?@Q-Q1?S|R+p z=xM;$Zs42#sIvc*Um;a^{6~6)95My@m4*N`$awg7+JPMK{(y(oKj!m}ij=c|q$_{} zW(5w4w<`lsIB*A22a**I$d7^oz<~V6X%5osBE30Sk$zWqz>eZ+8xBAx%fJP&2eJWD z2i}n@Fa@B0D>j#*!26UFKnCCg z(HkN^iLwIn6{VRkUA%z4zoED|3V=AOKdBHM0jlSPzMDF5`OP;uM*LY&$Ish6fm;SW70-*Q9pHKT;Q9DWq z-#^I}2Y`>-O~Su&_3z}ULI+v{lA8h1pRWl1BbPtzjQSUYRDJOg4J41o2hSCMuHSb3%Xf{x)cci>=tDn>e;{>Vcndz^du%b}&!Dq-ogShlz-Gn~f z0S@^u19x=Tra9mVIHbA*Gr~t-QU4Wyf%-k~Ao~as@HwymI3a#m=scLm(ReK){<$kM zBItluK^=Y6cc6vv4*+!0y~JQeNCaqqT)H4+A_u0(f6)cN>*v1iDhQwZMU^0bd_{fY zXiks^D6Sp_`BiX$D?p(VeBe4zbc6`FIbw1G+yP3+_mVmxpHc;I8H7v?JOwH%LMni2 zCEO8mnj<`cH$fd-iR{*`h9ImkxYk)j)bJQCsSn$^fmA;L0pF?fQztH*U?fhtRM${aQ9WWpigOkTVj_jpRTAXn z0^e!0y|T(ZCl|K{WQfO3h^G?z?>@4lTf=CsQ4|1p+v+S*yZCqgDi3Za0^I8dB-uHQ z%pKv8-|82`!EL4r~9n|E(OxTRQ*O|1ytD zG4GG!zx=oV`~NJ5`}}{DXLmQKZ)`rofOD;d8IXSo9fz~_7! z#9Rdc$CiJ8NdBj8bOWHORwb%#IS&Pe8zjxQu0HT%P6Za9RtrQogcT5}d{uDSxzP zMtXqCv^O3Rls>W4NZxLce?4MMd40?Kymh=6R5i#!i-m;EUg$e3v+V8KtyO~Ku&WGV zdM{FSdOk;9SM*?-Ib0oh9v+7xd|%Ei*d46M&d%q0FSBMx!_M`8)z9(t3xsgqag z5?2q=dzmk54>U&j7zNiS=gVAC^0=39{q8lw-& zd^{Y=HW4(ObooL}SHRd^?9vf7hY^@2Ob)AiG|lrqClH*Q$>_IdQ?j>)Lh~{Af6?xq*lh87INpc?Sl8x~D}Vy|0S<6xt^kN`&G3_SE!@kBh0Wm* zm9i_Hc{%6wyO~!Be`v>%eiQF~Wsq|!`#BmbsN#6Yl%Tu7%fJe+P!8SHW1op<4($1z zByoHl*<@Nz;(7Eqtmf>nd=d6MU#RoG)8@%;u->$3=Xk{=M}LwZ=OH8=d%In_HlODV^gJPRmR#ZW*Po2iw*2< z-UCdklRL%INfBiTjx^)~7d|^WsBWfqUYv?8jvh?Qe+t=k7w0E+C&Jp?X6%xMQ#wYP zWnApVyONnvs0WWJG^gp>;pXt1Nem4?_psJzDOR48n(T^R_CQ_co^5By z`8##4j!+4%iHB?ZP%-g+OAaEZiNttNlsZfCt-NqTRb3ukI178zN=f7ZGp^5PLAV*% zT`>hae{W6gZ1=-Eex|p^P_?&yhep~wwOOtgPp+f;wR`j4WY@Bao%&5RO2^gRDH;#; z`bgi@=Izmoz+*A>ZV}?yjSpNnj`#_ zdC(yvCKo6Z#1R@9AZH)b!_yc@yrp$jFm}fCf5|zacomMRWz|Dg;9wuzn{nO)^W3^Y zLv;~xzOzfAJLlG6)?KU`yQ%z~*)nnW;&X_PcyT7)O%1AXDP)gyTR}582Z$F}r!nJaRQkDF(?=`PlI>tt$YfBC{bG}Mf5qKl zuih54+rGKP!+SLDLp`z2^NPDTeRvCRCMejs4q8)a%$dN_87Cld^D>%!R@s5`iL>Y# zO?5h#Ra$qP`Mx`SryjT^C_^)OD70`H4hq?WD$GbhkxIK068t2Wny=oj%H+iTHM2KE z73c1KGKb0#JdKX@9Pft_lnGFZe^YaV-7btS%N#F~4tYg`EKbO5-m{b+9^YLqD>3cF zfl-(Q>f0x+^c`b021g<(p=W6dXWdtDex_SEHp|ty_D{1G%#9Ntwn4gs_n;q^d4KTh zPrDt2e|cbOBD*;bR?EZ<_B$*a@U{-79zRbtULMre5one==2)5vr(i zD!maS#}qpIiH`PrvQt64HMQs|&!kYgyC-UqB>uE z1@G>4X8R6#ezft4UC7YHf0UMu#n=$*2~4}?8CZL1<25-Jnqs6yD>)JGSZS7y;#pGx z%|s)A4G36;+b!Y>sWU6yWQn_Z+A#5n%hme~VuzU$A$eT3+wC5&L%F_{TVD%YJLB>* z?SE|lM4Oz97k-X$?>apd-`2Oi8+<8et&YR3%__?EpVOE2m}}P z%1Mn&{-IcHFP8NSg3h>-G8g-zR4vZ2sx@skEQ^mRsLv;-D*1=^u7Fy$PncP`dW5V| zs<-~gs{Y+P9V1HKf9mK+Wq1C?wXa4!+EX_|(_8CjTo2U(4u5OcIJ)&ivllwgvf9Pc6+r4mwEHMm4v6+=ayAGUIO`5H&9kvC3W+j+zBq8VP` z7=&&(ElwM2-pl#x^)K%+?Cq>+;$Fv#YQL*mPu)Z}*(IyUa@jW6$Y*K3HJ2qA7$Uun!xae;pLrA@5l+MLQTy6+){_V((;l_Y!3gL=X-=XjYeIQuvPF@(`L@$4LunadhZdx2HdS5eBA<^ZtG0{TGn?Hd<}P(A(R}E6W+z&u34LxydN*y`>bcmW3eT5L zKb)a#e@U@OS;|VyCI%Y;w0g)mo9x$y%~M<&ZbDz=>s#*wFceSTHT0l#lGX_aN2#la zLH0&=c(ehCy;v=_D^CP5+1yz>)`vXMG3DYl!Yi7#SC!9#?^cv|oeuY%Q+*@RhTM5* zO%cz>^KkV$iZ#cFe;Y@d%_aMIN}cDMc5lIne_8#=e*8P>fDTHe`+_*FS^cI`=ONbC&i(lb4Umn*@homch^)er=6y0$&=bf00t6(@z4 zM&I6@HdU1 ze^s*)M~E*B?{j=AJ|9AJm!x-$DfAE$qwYQi*}YWsv+H56cSl}z+-EO##(o9Kn$a9T zD`rWseQ*1@*(uQ&qm%mRUMf?`jKl$yf_G70hukiX@w?=Q_6nxU+-QPvEQI&Kt?E^A z8A(317xH$xiGw0|gASFab<^LPy3X(lfA$O1VKUD79$k2$HWMW=WyOWkqp@a*cx8Gz zYmZT6q>diXsnJvv&NN&VYO}9Q_-)nV^5q38b~#(`?&zLLIqM7z8c$D!F9B%#{dsb> z++nBuz^s*M5LitQ;XUNA(*=5vYT_L%aoO@ zfV=8onp^)J6kuzzl|023ZVNLge;rz4H=Al0KEXeBP zUODu}jpLXb^y+p45ci}g&=5nLmw88g~b9hJ{BKxQCYqh%ohtERgjZRl+v#Hz&q|HkB5#I zX~!;=d0Vv#e}Zp{`D1G8kjs@(z+)w5AHJ6kHFGbn?`LJh#i+6+Hf(MWg#&-P zzZoS>*F(HR->ElBmXuC=$(gv)>J>ZUKqRgtd-a7VpxZ#WI+m1H$J2aG-@rY0$Kf^H zQ_!$vp7qrwor6X(o?D{culL<64TeAFB@giYR!EO6HqTwTf1Dr2&CD|YASsV|7bVs{ zEI2I!O-!F)?YT*c?<9Y}^Y2o`g*q<0Tqsq#W29VSDEyV5w)<2l_{FRPc81E7Q*%Nb z@`m0wbrI{ekq*ZzM`15o!*kx;|TpQG2zfDTH0_tneg8crsb~`g$j?Iu;Jmx{`ar1QdQ0 z-_xzb{pKEXOvST|s}0#5Jjua6Sd7Mxx7agXQ*H>)e?CF`NsG(wdO1rv5A1E;>Eb!4 zq$iRTQ^2fK;)9$I^31XI&wI0Ug;|Md5-e5SzMRB3EZI#V=g3w zc{(X`Na5GoJP5mg8NzLSTs@a_w&rTY_wpFk4y!W~CS2ZGR6ifG^Mn<3bYgu!4Maq# z)=FH9f1*;$g2#fhWKq6H$9{VoUd}ZkFi%W^!^MqKXA8l;zcI2oxa2F4_EygpFX^I}ME;cZIJD?XKpu4^icUoy@@n^>$% z!&e-pIgQ-6I;s&tEQnHl)rbz1Rh>7JCM zSXh=)oB)CpoXF~j!Wre!REWk`=vg5)4Xu^ zJ{k4r{5TooGc)=A1`aQVm0aI{^Ct=?r`>$9+M&sB$J@}IoX6|Qwq;Y2!!}o%d*r-n!L!sTN)Y;$A;ppPw;BZk~2|TdQq~_!q7o57&0WjrMM< z9xEKgexQe=?D*$bNIiWgu6EbBaXst7cTtf=x;oE5TnhD=oJkxkuTomtKD!cLBN29@ z65(Cm9XU^@yK#4%U57{Vmiv$He~Q67eczZ#ag37}w%ha3SdQ8plkjxDp2=_xpEyHi zK^#7@oya{kYmC982zbOy&|C?n-(*2%LU}3hGiEy-a0xG^F4n zsBU(7Z~`^?1Qlr>sO~Z)StZRHw`(h)_Lo5ZtF?+l`1jOyqGQ|&n>={{&gM?x!STKR+HD^P1qWj;#n&_ z&9KA8YsVDA!a0Kv=GfDVe+H+k>jBQsK8r_gyG%GB88MS@;=1VWRvtCBNbvA{-r+0D ziOVEZ^i|ru@nN5~q=%t@Wh?Q@n>Ck1EV$0HoGCMKr?9fSL#OwK)TkX0{hl@PVf7b1O%A^2`=Jxhv zeI-y>*@-uI&sc(@$u6FaQ`x=@Mn0dWbUGR9@+imy+4UrKsH=zG*st-hZ{(2{ zC9|52EnKO939lnl8;7TL^$qNWM+5e#OFQHbiyl1ZKGiaX!!U;ruIi4d+kQ1j@ohGI zk-N`$(unEQTqfUXe@!cED!srD0-4#al4RMju=*_>n|&XejlufGovXgE>Sme9_nf|K z;sxep9C!N}@J>+?e&LGloe5q=k=pCk(ue}SZY5`1t~oDH-o_+4v6p~Zq$@{JC3C!0 z=Q}+lZ$UIg%LSAvt*5C2eQt5IV-O46mVJ6enyAOnEe}pjXA4;bU{l<~!?7g#n zv<^ikqq8S1Hs51-B~n)z{YVwWx~(dz`z_+;cDq%j zc0Mdn>_wF{)HRm{ViQ%>d6rIP26NSCDO2b;FkqQ6&rT}qClyn|AG6P8aK->+l zYld579n({j%oa5bLr~MNW9@C)&E0g?yN1F{!jHe_#LK-<}(M5P``sqS=_05TEcW-AqPcNY}hPcGLFYuJ5guNwJe(2wP$}o=` z9-sQ2;xD4$QFrw~zU|oWF|E2tmuyi_*O98J`*oq-hhPjc+7|6nto-t@CnRlTxy~!% ze-e?Lb@egVA&6J~wD96Vjc%E_d;6f_@meqW`Lvz&BjATqC0(YGi>#ugPyn6U7S1B2 z_BJbn)mRi#p)YB^o?h08$dl&kFQc)q0x!89jbkI3t&cG%U+x|!j#-Vzl_Nr8mTTHD zo8&}y2}gP>%~+zZGW3WyZNoN)Le%bZf7Krg4F@ALdj7nZ@I{iBM(kd!|HhzrRC}e< zj-7JGm0W#=?^`_ABbPo2O5|ot-|a9XL-}Rxl-C@xz{eg~M|yL+9nTIe*ThNO!`h`( z=6Za2>^O+zRV7pSsL1r`sRc*4`co*E)iq*b-LbsBVy|_4ygA#rN%teqSvs@rkVo;?3!}qiHl<@as&reBkWFKLaa9Z^` zIyf1`)Mk zn^?Uq$e2OWiKV@t^iAwfex8$DI$-pD*YdoLA$1fZjXG?ypq10sf4WyzHAl-Zdc2R@ zgJIS=xxsR;qdTNGbmkn282xN6btnZK!5K4;mtyIe{`-JQU|r}SPlQIILYu_d>m@m; zwHEA;qkk;du2}p)T+QUJH}8<8&oAwgM%goMt(|-hNk3!lczDMz?b0sAbkL4ro1Ybx z44Y2p>G+^vr>n^ee+@}tV{-1Up>Zu%EO7hYd*X9q-DFd_i$FBf@w*EV-?uhjf`v%cJoLxqe)8JDv0J zer%q%vV1F)(d%Vb*Geb z?_y>v)pmamMr_z|^k$e7AvPspHb-{2_?knV%!0_H+^I#vT=n+uHP1*Cx@~M1IesTY zo2oitq>mDdiPu6hM}~Sm)$R#7;Wgdt>eZ>YsGgTQiRZAn=(mjFmDiR!iQs~6pSlSv zO*R!f{4TH0f41Y2LTYfS$hs-hhgjquq*1WxOrN@DY}cv3VOdSn_Hl`ssL6}lYuK*o z6#IJMC&}HSALEp~X^WxFcZDBS`k*>ZhaZv_?g5W6RDs`0a`tF;jM(wD0G3?vcj5JC zjntdxGGUcyjRGs^^3dNV?8VXEQj^b?+cNb|!d2+6f5#r43ZI@iGMkg-$mnQ2;$bWb%+fRYguSrRxs(K6iV{50y}><*s?_R zf8(ts`2B{)A5e*yN#3iOVPHJ4$U6Wk^^`z!aewS8e+LlEoF|3C^(d#ptY*$5Uzh~9 znEBS0<<*)AsmP^_$AFvVy(eBoSl)ae@ESs1kNfxXTbi)sHM;fhKF0v6GbGJ9JA_8- zmomHISvbaK%#SSlcd(!pDMl>KOekxrfA|az>yR8!Y-5BPTQCI>aR>&-wKeIEJQ`hc!PKV0Ny)7ngg|!HRm5_6zz9Rd+x0Y}P?f+F zne*x==Cil6REIOFc(SNL4DvEhElfL99r+>8I2;{b;=?-}$^)lrskk5gRTJqwC0*;U)eaj28DW zJ}H`o=WVtS6*znUOpm;f3JJ`sQ#7Fzzn3S{;p9=oPf&v)t3Yq6e2k>0YU+u?MTN{a z>Z~K_g4%}j=$SErf})^Rls_f+e=yYn$Ba))r-_3tSpUJ=y~4vf=G#C`NHn3J1uMme z1z6{F`y!>U42tEq=gIip#NGKPVS*jrGHe1gomhyhc-A;{gHRT|fyUAVD$}3~I~h*6 zH|l2Rl)_mogSr|pnwl5ftZ%{_bQ2k`37%>WehgYFm~p5g1g{y4 zYv7t>r@6S!ZhZuHO$37%$(&{?)+SfKc_Eg3YK|G#+HktcP>q0k`K%K`k#hllalaoY zH;{%Snz5dS(z2F_R`M_qbE`sQqG`K`()#|U5l`sBeUWAGjrav*e}LUCpv8jg%f_Is zJ6TBmPUCW_Wf0Isw}{rcG-sG8ky|1Lr;8-$Ss#$0pdnY6h1C-nJUIO%4?pdtIiVo5 z*UcF_aZJ{4yc=QRmJ#%W!DX{o5{PiU(g?d+s3mp^oD3^gvm;{@5~ob1lWfi%tyFCN zrQaq+@(lF9a#pwHf6Zh(huyV}&FdLGvT%KET%VxA3F8$9>^khkXH^xj((fbi*C~<) zAcDw~uZ1jjJAR&MNA|<}`tBMmE#!7t5JBzxg?d1JL~?c#@TPqQ%aCbd<**S76+TH3 z&H!1p5X&P%vido{FjgM0kzCbJ>h;dumMTr$ADD%|1s!o?e^Rs_Da*tvE2Rx;Ydm=J zNulqp#sX;z%5O*MhWE9awrIG>0JQLdRT?)&9BU`;e{Eza8;4*IE&=c`CE(PJ^V z4C zQ(h4a-k1-K%B6BncSm82K~mZu3S|(Mp%6@ok0BXZI2N1eKlig|qYv%{&nL$(2!+}X$rI20#~xC)1Cl6`IrHS~ zSHw}6Ykd>`z&#~kHi;7Th`h(CvuXNu085S0wafQHXZU68FA;x&0}#cfPIJhAtoWv9 z*-s$!e-+-(IY!4;`jsTi&MAfotgm=eWbiu~pcwc7?$c*@H=P(iwb0oI^k9+G{4JE( zne+_Vlnp$E?yMppEF)~IP;J4>LyRhS)Vr@HExg~4YJxnEXFASD1YxXueVzwXMcS6h zaVPu@Rl7++@zm)TdRw9mML}hjrf-P?HJF3Ye;dtnuK@4%p&DVms65cYDTcuf zx~D_K>P-eo9g90>t3eG~9*`sCq;U>7 ze~A+9fx&`sPGpWPqV4ls?5pZLVoQBs>Q{%nQ=|edb*_%qLjDeI>}&}JiXp$@O*6e! zs%5`RzTXoTa8>acCBKJaMT%8)VgmfSQp2QRkO6 zciUN6VICVXDF>ktP5ti2Jd%0x&igTae}VhRmeZ)OZOMN=8hXZ@`BYBK^PoaGTly3& zHLY8y5)A71_%>CHn{%ajsh6o{!X_AgFIi&1x0%ie6+}B@mW` zmH8mn9JORwl(O1jxOV<>Rz;imQ#kjXHM_8x5N=|~HQW@ly%k!|QtXHu6hcBqf538z zA&MT}Mruhw1?`=5Ar%DE+ZaeCm9*z)tw5%`K}OPFN(mY7C9m8=BT1M%L_!1qG_lV1 zLS6uyx)Rhu%>p+k%w<-!?_R^%Nq_%pF1;rF#H`FbJvO~s7 zNQw(=y2f!(o?3zEHxDE5Q_Y~ve>z@0(4e;hy<4wyxz?+-(7OS>GlNRk z3M;r<3Wc<*_vChh!7-e@bs%x{nGj-^FeC5T9Iv-~XE93TlN8gs72@)Re<*(S2TRfg z`2>{koO+F^e(p}5_5j`><1Yl}wliyZ3&rc#oWEr3N12;7Gxkjc(t|w#?V4v`&u!A( zTp^0<<2c3JogTN5=Zq`MB20T;q6K)cT@E?f`*7}UJmh(=6X^GeYlEowaMs1yX4)YG&zafp za!zRIg(jx3K`YZQ18&4z>~u>e-ioR#m_Msacr)PkoKg6DX4Jn#e?k2AhgTx;cnGgM zTOURE9);){gkTIpw%{a}@I&n@wRdy-%(R>`=%SO=Re?RBoka@}jaS=`Ea25;pFA6v zZ6Zn;62h(eRT}ZTY1(Fj!Fw~2PP2>`oN;T1XtNrS!XI)0K%3@Ia}%8ln`D#5Cp2~$ z{oziBmJtJ>V;La&bHh2(V6satfO7lFreR?|Jp zdwB)EY$SJI;c4&u3j&?Qrzfikg)Xle;pcxi2b^ z!EGL0hkhhd^_cOi=JDcMPs%Tz9futiiz(J`&^XF z48S|atc#xyqdke{&MWqbfilFInZO&LgpnlX`P(kHf8&sZZ*5rz& z+M24b)KU~5(Q585N(nM4AjkP7Ggwg&%$8k!@SN5vf%|qiKUZ}o<-NXV(+vI2ZY5!F z*7jXDs*o5^QUb(B&+xV#i+bDBSs%)CJ;w#ba;a+`KQ4}JiOjCS^J`aiTKHgUq6APZ zOa7!Qf8!JCkFkjBQ#(H@U8o4xT6QTsd|tI_2Z2qwFDEu9y@aS=`%Aj#Fx8&Cxi%&!-cQC)a>@H z>7ZKcVCb5ktnRHyboq`I>&1z?a5g1!%~TI-f1S5=nNzRxC<5?IWoo*sv40GPb60N} zv1*aQtJY+`s9CRYWE7;Ff|m<3KZPhvkDoe!boj-X&mne(nPQ%NG4g~Z+)StwMl{q> zck22e#lv+qy41S`qj22cZ^s)wn}Q^fH*rr?6w&WHq4Xy64i@6jHRsDzR`4u6edHEU ze-{|>>t8E3p`^4=@VbWC+matTrQFf|p1Za0y23~$eA^x@Lz}lCVggl5FOv@O#H}i% zW<}w6tXiyW#w;!s+`P(*rNSgRThD)mThLfprxjqkg;ym)$D8h9jWJ%1W(IdNOjzQ2 zNnmP(>qc)p2HKre>!_jW>{2Ts|5U%Je-P={Qgqd~NsM9p^5=#yy#=IE;~#a}I>sTnEBWgvqNu2a~|~X zKIi_HaH`7m)bs0uh=*ml4yf5_msEIO#U=MpB${EERH&e6dP&{?&}fr=Nk|HbVV|Q1 zYz2PpZS<2mW(ar)FcoIRHYz63e$d079eTV2t(7^gPAa3vyATd3Y39O;e?zJhMcItz z?8ncbP~$U6)VN|E_KAI}@$K<=Uc&xXh%Q$#TQCh5==x$m5t~_afF2(|-j{6e>Uq;3 zrB;t{itJ1W#kM78O4_0JL0?Tcr;~j3kKh2a#vgEIo{F-$$#z;CJu97aI2Tp1AImM_ zqG;sGH3$Pw^7kUSnRoJ-e}AidEb_t}?alrS%qm<$Utz@8x%@YdMl+Jt3cH7SD3#Sq zv_Af*F(`SE?Ek)W?*zn7huYZ)=|(da1{$uKnk&*%!=H``w`q_b0eF~a2P0XcZ8l&b zo3Yy#QA!%yhuPcz&N$#KL*S%e3Ji>ZYeB)TDYHj;gv83xRB)yXe-FZtkFsq0#t`Gg zg>cw9M)zTQmU;OSpHM#xX<@SUPV$$_8b2m2C^Bsjpx6e(VMrq-cOP8wiBJuf7bs5H zFX{r|n;UQ1q+xUcV=!O27?C^7(i`DnTp_V+W$%`D^;1VT%&s{R8#wT@TeuX_!G8#7lTnCCw0*pLis4%aWlh7?Ac*6Bp;Q_tnQui?r-nB{sT zHwCZCLMS2rJVE1jq9T6+{rUwQxGsT8^(N9`{b(Djnr{f0tmW zjYz{)@_5H@9NSA(KB1w@Bwvp9s7SX&1~d_oSVd@@ ztDr{^rLgSFk4l=G4Ql~8*LyD#v^VI|VM;chpLX_4RJU*|Y?Sx2LaoN$fBWj@Dd?sS z=fLlt;so>I0DIhvweN(w(4J&>T^Tr~`j{Q$%SX zDx;MJOEG%w)E*+HcrW>mK(B1UHD`sf6hnOx=~x+Vj8+1ICie8s$Cb+v`+3rM{)N+^ zQHWM90I>A(eX)ivg0pJLf1Y=rUdD+aVBq~@G73;Ds6dN$%_4tY>Lm}V2~!ZV0e+3w zvGUn!Thgk9Ld%Yj$=?kXKwAs6iYCB;Y6$Y{Kaa*JnS_0qA_WWS0}?zk}C z?+mGEE;;aVV4VAoeli)3zHpRg?wf2_vBmHjZ5Qei%HhhKd;3X<4P#=qIMx`T4IgRI*4eVoA`S-G{AL5|n z(*aEj=JWxsXzqB&^EJ7^&apUCX50xrF^P+3TrT7lT=0ndZmAag)dU%!qUgdDgPF5_ zDP8gFh%1s>%=_#Yu+VNIUVO(=wU&4kAF266a2g8(ft@YIfAlUZmv1#B@HsSvpN{H{ z7J@LspV$Rp*jccB%BBQm&TMHD-A6sXkqexpI0weXB;S&bY&_L3vYD|IDX`L1vZ?{^ zBQWei*z_A|b+hUvh{Zc)xif5XJf?;{+JlrvLN0gR;O&ejnAOETg};{{k1Sic^!*`` z(@^Q%0lu~1e=hxei6KX0i1C%yMv(X|EHDP+S6+@6C}}Bu5R^S)2HwIMz3`c>H%4C( zgK6fAmzH#h`24;PNVlf!>i&Nfvgo`hatD_XBAM#k2~_ep){cN~wYwz4-mlDMI%P zzNxT^B2P@`$A#V=s;qsH+juAIb)K?lFKCQflV3kYdK{#yvHVI1+kPF`N)NqY&l(fB zXeMkLhb>l@K&Mw$<;BgQw<6W=H<3u%&G7~Ne~y)=xM56XA(HVb9%4GqQ|(FLM4-Lo zF-H*t!qt3d27MghXq#|Kve3^|EP#jE&CDRBUEMn#h}Y4f$zf$-Q#m8DjK^$P?)D@} zv(``Cn#!zIiP4*h6J)uLkF7yzeq3m9BxJ z{oGp#?0)i3-f%!O9x<=Uq%#u0BDBzBtyZ$3uob9Mgs>uFfuhsUdAWAg<&vb!CSQQZ z0?2|2p3z7R6h7_?Ft+S+4Q;ClQf&+Te-a1QG9db0b>=_{1u2isInG`lx`z{ikf`I0 zf>{mEsV;;Bv-)tOj^e7-2Rv&1c7GG!gu1H(U|+~f2b|8 zF|avz7&51`!f}uQ4DyWq+TC6AQ=|l49?tCj^_N()%)^RG=(({p=H84yLRx}{S%(vN z_IvjP3&i@j2MJ z`M11R!4~sslF+4KU=i9Mtm~5ie-c;WShe#;NepsL&R}vP_cC-d?3VoXM2tK)dWFGH zeU?qWiB)=-t|X+cKX`5m>J179@GzlgdYYNYRHshky8tcN(6BLX0(MPN{r;w5PcU1x zG7tnkx8Jlo9On-q4yj2cK{*8<_SLeVE&bIIYWGJ;l`{MZdfk~LVzjeM9UzJ|90bs6&6|-#R_-1_XTrGf1#WdxN@L{wP0YC zPjk<**lX_qVOw$If>WPPat@ zb8;Znapql87YF5`e{a;4l%);K6zc11K` zmOyrf`r%E1P(S%zWSF7RFKktzXhsLi$pd&y^)uDz$&nFauHNYn%&Gf1%mhm#>gKeu zu~gCR@EzfK0ezzAE}vVRauo0C-uUk9Hq&C~nY)@ki%J|ueqQ2-V}^;se>XSPyT8_Df!n|oN%ZJFRA_N}G7(D!n;AcvjE+*4Nqb2lct ztG?z6P^bRAknb?V$orFNIKExh9Eb!+bL#OIR)ys_m^8uKCX2==%EueKw)p;XF&fJE zrdM&X9ymVQf9G9&+l*GEi*uhFSfKIs(z3eQl?85NH62ff5VSedj~uk4QOG!s*$xL}-@5xwI`W=BGt3yt zHv*Z234qZ3sBuOIhJTR>9P7OH>2>j5L&{-Q{!XFgy22g8Jz2dUqxV03=N-pAk3e7h z89o#qfAWRS81=UMNh$2l{ZZPDLV(g9!R!oWD*K!uAu%*pXS!0xbQ82c-6=7p^L2}Q z=<{LemlgC#$KYfe0HFtrd9&<6k{D4aAyXP2X}v%*9To9sPRFl>0YBK(x2+Afz{h=9 zT!MR7+Pp=eTg%G6uA zw&0?ak8)meB#>m=sfl$?SVN8yH5ya?oPeUf*JkzkS!>mf!Ag3op7|OuwM@kjHao#Y z*&sl4Ag4G?Gx;oJYv3=&WS!dtqO(zaqwNMJafo&jeKJsEC-EW*zTPoz!*{*X%5a$OB~^ijhp0_BQmjMI@vj zB2{l1!T46Z634irM)5dD2ajRfIDtO$9Qq4O%Emjpo8nOTWVB8Tq>Y&N&;=)j;`;Zu zlx-)D-c<@H7J21LA=h+_<=HNZn2Q_%f0i|gJ~_wvTgEIg2JC|b{?g7wnQ&4e+;F) zQs1nm2$UD65P|tCa5)B4jT_eMj5Q#JMcY&a@a($7^YSA6n%@Lq?lq}?QnQL{8pa8G zNxXlaIsi#H1gwvh4#hWCt+d#0oZdUyPs=rwQBphs+PEbf0{|@ZBcl4wp9i+CrvgS4 z&d;E_z0Rrw_bV)v#bHiIakwVie?FLmu_;(D#@W-Sv6bwi+}QJ05_md-$L04kPcU~) z06##$zg)mdvQQ`-He>D-9YVuJM8F$yT_V+a?H+oT39w2qz7QToeaJkDpV`3JL;?PX zQ!Prv2!WvJ88<57yJ@n$axqY;NTA~n2;jA&rpP1nN*Tdbynh%HgwIF6(X(TVl}An*Y`MV*JhXpoQDSM((m;_# z;*mtQ;oGpnHTd;Pg}%@%7!93$;EC_AbJvG*PvtI!D~`o~{(JUrdSS3-JVhvE{al`~ zVZ1Hj>OHjP(fEPPA6}TmxsMk%{A|)|JzW=0imb9^vGGsg6bu6ZI*H1)Vt;B?_SEn$ z78DpvPY1Q8h7Ru7G*e=nnxxN*`76-6nL{wi6%#EF*cSSsg8~mR;tun&3p^m%`o#;q z6mjAo0R-7PVio`hWp;EAek%ZV#903R}kYc+KKfn@Xiw5|LOO- zDpIt*0_Yrf7F;hEH5=nr@pZ@=CHMIh!2Fp?sV<0^69r63h|rDq zd>C$)rzBj;g;pvq(`?)@pI-4JizDI^5*h43DJ!pY<&Jp9ifJ;(;hc8;a|ahqOq6$` zaI1ow%emMUEJ?K8d4cD|h*<7>pa?L1`%QZI$PB+Jis{WT>3?VJbbngg=pRggKBb!W zf~|hf3HwT=Hvmru^;t%k;=4#P=*v5Zc<%rFVY=G>aikot3h8h>ERa2ohh^%1U& z9{mw;%&WMlqJIr>0%|nl4-v>ldS?3M^OQ-LXMK*w$ueIM9OxGig|V5T3!MIjHpPl(W7lw;sy^e zB-y(m$z%-&nPloxD)+u)lxD}*;F4<>9aWR~Y092((0^IhO0Ibt=yCD~@qS#cd~5M8 zT>q}4D<-2p)M;<7qZ;o_>vaYa3i@6Ip?49e0)kG$;+vVFL{|?u#!ZI)V9o?r;irYs z6MYX1p)H$hdQE1>LNw*2Rt-yg&+4BYtA0!%8cSmHHeBI!6OF8;eE=>NeP(&W!_nZT z#7o~{RDUu81ie>8@Q}+T@=71XOyzHipLqFAMdDQOOWuzS$S84t;LNQEDQ(Mcs%k>( z<<<6>5Z=VMfI71vl$8TzZ|C;>ucQV1>u7qV?@1zIqYrTZ!>zb;qLw{Q~KU}*(Tc#@J8w*tC=NSkJ{(Ua)nGgMr1{TxCB#yF%{iB&b}^1~8g((D@3jh3%(mgzs+Tk>0(G zuXJ{7c%gkqJon>3i@>AjPNtPeJ65GXc%dA}Oiw;tGaDP{)z$jcLmNK1QxmJ+oq@~p zDMrn(S?&i4=-k_I%_Qtv?Q~Xl^q#BA8h^6^55+oRcgs?bmO2t0YqpZv6~D?!_GDc{ z0YR5A6csYTr14frppu&{a)?JX|un=k(ruBsIw5moT2vp*@AO*dYcyMx%n18b9 z=FQ&KuZbO42}N-L7qQ*69g|K21-j-kPZ7NkYLS_d;5h1v(3oEn?&dyc zoA@a%K3WVlV2%Vb(A`6|KmN3KcU`>w!Z46V^{&UyCK|VDVeYF-c7HCXHGh#uP6$Yb z_>trsp6SwcWT;0Y62B`5-R%={t`#Ss*qQ$p=J=QBVqJewC7>vmxxfd=kP67;PkWg97oIBYvKx zc1$V!#h_@jx>Tob5VNRELx1!+IZd3yxQ$q*XcV5w)bPxU;mB*=3Gt#Rv7DdLSqXBf ztP)FTs!CsRRN?9mX5_Z$R~jBVZg=_d`C)a@Q?JVmz0nk?4QC7Wb}UR+gcihj(z*Ex z`cwM}yhCcv$YWrPX{xP6Gbr>I_^V|z&;Cw^EqH9A+EMB zSwER_R@Ti0xDDd^>3{KrIfz2&aT)kQm#Q5_beA~J1!hg2RVHB7hJgFj1?S)I^uzf> zjZ)^cg|vS|X3)2TwQt-5hZ&K@2`Bw|4FO9>x3OeD=tBqe^@)3)uuNJ;cW=XArIG+6 zQZC>Upsyj*O#4P%$=dlBC*|$DFyuDkOW^A|SXIc=xKgPfunu8~K?k@<- z$7OMy{*v&ISoMLyi@4As`D>c^yFhvcV`neW0Qg4)D!4eYImG_Bd({IQl-Ug(SB?kq z6*ESBV}CREe4Y1h=w4A6DKAbiL*Jb8pr`Oc;`EhYe7Ij1TA1uQ4?w?VLsK3~`-X`B za|g-Sv9^r*IT^TI-+O5qaA^pR=QSXI8R^T~N~v?92vr*RxtZEaNm>-xx(R$p#+y>0 z5?@AbDwjlUi6x{M*J)FmKxN|$kwG|p&HylG#DDt9ZNFSk{Ibpm>@Z*2R__UCDosQs zq0Jx{=(X^UD6S;hKz6MbL6Hms8%E)z5tzw z*NUhn*vs|%u?+d0D?vn6`bCpP*>B6u9X3ELr=ZILq1ft+tQAl^3U`%^J1EWoh{K|Zj?q>r0erQKD z(tcNSbd}uvR&th+vl4g!H!;wB;FKP@s(-c^xC7k1k}FA*pQ{!o$t0jEAPXeeL=^N9 zkABcAxkhds_xSc?dFIfjMw3*)&X9YkrbsM?h5B$>wy~%=kTvL(7v_2oHjw*wgg0P#_@LBqdMDQ6@TF( zJO2RSgN(SQBUWCaDzFD=Yxd|X-gKlXnU_Ac$J^4iwu53&F% zDi%&52?4@ATm-BsVz?-%r~!WcdC)QDeBZU#{&rzL)1e}yUs`XyC4@u@7ngm!uC+T8 zYmN2`D7O!oR;nboGb8WAQJXJXyN#{e@fuL);H-VPeBPv|O|m;(-1uqzTz@XR)gpGn zK2ln`-uU@UZ`EQv2WjrTt<6D@+)cRD6^Uw+dXr~DV?LJKL@G6~TF?1TD$>oFw##06 z`D;T+;N_C9?$?tf9*$!|l1P5uTiduE*0(BKMsM^KPkA)9L$0kuupM<^F5fqSb@B1! z73T5`p8ep8D4X_!jB-k~K!5DyatJ+BtjuQjoGr_0_So0MvcBl^mA^SjOK)qwNt(Q2 zVs^rHwkfsww3Wtk*&Hp^JC1hXob7F^*~rJy^**!C3-xfdUfn2G`!*>KhNQFcd2q7R z{Sh@6`|8J=;xNRu7qP`FB<|~heNm_T)V+2GKj_*~L6%ytGj1S^Ykwn27pG-n(x+Oh zMybx4N4;y;uL#-xDNVS1J+zGz8~1CS9+p*A9wR?|fx%r}$^7-o59?=~_{O+-Zs$e4 z$W{cJZj}1;5bdhdcuoSVet_p}RuEa5g<6_7?>$zN!!5w4!?kN3bB9Q}n7w54C^q|6 zIeOR2`hkb$Dw<05kbkPfUU1f$T|Y06M{~$k;J_v@F3P#{lH#bT2Q*xi8prfdJOz}A4j_)qbjx8qGH@AWd64TiTB>mV1C*{nY*&7HHY*DJ>-)ZSyDl=JG<7EL)% zj`os+}vJn zXd~v1@_+QysP(ON&-$V^&|slge7%JMUaX8wU+p(-og@qCrWuu>;gSdNq4?c>HRM3> z3YEk|vldfpIx?TL0&YfD`gM%sl(O&=oz}i=URx`GnD{r+r7$`ieC(4$Y3F-MT$R~jKa?#z z(0`kxN3@--8$}6A)myxemu%;m{%Nd|w{$6rnX#FUub0cCYFpdg=z-O8l_7lla` zVeV95hhHJ4)_=l&;ni*Kd(epA?|D?Rr#k9Jrfk%8mBYB)-IP0%A$a1wF((B!h zPKUiYYlmH`HC5McUr(adW_hW)ceK*C<1?4C84~46UB6fGkX!rGdM|ggbQkgK;(slnuGgh@+{h>s z3MOohreGd}XWhyz#?rIFGnE}3-+#+@tHRXkuza=GYebMWi|`^0=QI6Ow!-Cf9hT~Q zwN=YF7If8juFiI~)pbS10H{}yJ}U*`_{J7KsJjKFO{KN-dz$SPOWTAPdx0u&?UsW^ zSS}V@;VpR2ZiA9o8N6|}6FTYS2-f*D(sdclHVm5l;=29rj9-sin5Jo#Jb#V!Kw^#| z8J|j%=y+N+F-cWm#7i+F7^Ux|XYzSem(eX=HD-Pc1npwF zo5CWxQsq(^DG{&jlPf$-G&`~7!;rRR(E{xubnm6rr+r5+IzPTLc{CJpCB=M+J>H4Y zh3wxsJRS9+v{owjlS4>Dntuh)_z@M$u+PG6?)2$`0w3zp$!m|<`nA}otA6v?@9*2f zFK|uEd<=tgn9tT)mpfrt&Rl<16%OBZs07HiT5>w_`^?gt5CCcGmLkS+9%RrE^Y?|9 znOzFEqv`L0`_2*2@_ZpP<2@X%7b`xN*=gwXTau#NB4_N)r1$=`34h-O*W%{5nFA%C z-B*KYM7&s~Ii~YId8zq;@7w$G`P@qK{h8;d*9o%z0=x9}cEyG3%!_-o+zOYgDOBra zvBi&VeaPdLW;^+a*)~n9=UMJnv86=rGMO>5+RxrlTc<_L3#G14-n#4qT~i3&#K>BuW*ID zdny|7wY%8tdB^h_$dA~21X+hKXSfNI1s}7G)|?c)$VYR%U8u#ol!bA7+L@Pgmd*9n zes|V=l+KFeJBkm|r_a#)d{;~}Pr$}wVSj%f6KQ^9Qre$qoA+}w z*7~X4&9>bwZtaJ;59HyrKcyF2b;c&{W0`oW;ygR(KZTwZsYl$6(7*24h!fH^^Vuqv z>sQqty?k)Yiq4nTksfyIBlhmzMdRE1`7jE_fx{GQZ{h7N&~coNZ|>ahe`kPyD{4rG|kE@|{`qVxRGj)No}rEdEmfxD$L+ZYk=%Kd8IwGLF;mq42R zj_1o|ejTww1^rHlArIa14BnX@l6yO3T0PUN@`6JtE`L4mRBX3(s3K#r2VHd-qS9wT zz5{|5;ML6)bgktp>Y_t^G_7D3LHBl=%d&Hs-+DsigPnJQr!rZwy}V+=NznFO@#P%_ z_tR79vA?;e(+-lyH=SYeXvl{R)gE%)`NzG}S=4e9r`OF@n$TSzTWbGOIO~K6YVXT~ zJxF;WdVjP+t!xK58*n>pFQQY`^OS{LH~e;R%7})ywCLlq!6zJpEU<#5Mmq1l1@6(M z3gshxr_$?^bzn$m>gMD$I~Kv=x_h7ZQEs36-Z?r=z&6V*4v%`M0KV>;o77#ftTv}^ zPiMmmN`d-(9G<<4lK6c|I?3?I^0~9DexDEX^nbp5^hrBwUfo*`^v5uNXi4+l!UoGb zJLn>tX-WRLo%g)HDUIAwJFRxAQ5IsTjd$G!S}~AJQSZTiC>5X`kvcsHTnbOxwe7^0 zl4_Q6a`o*w^%L{CHuL4pmqT}T$-9uAkk$FH5&dMO)Sko2peJ+`g&R=PIb=tmf>3G@r<#lkj21v7E9xP$+N2ig@ z!{)%~>a}=YP8;PpUu_{d_jy>#%gZX6(|@K|^ryvQo?hhBuwM)N#vG2qa{ahQ!V|oP zgD$S$!X893d3}CvZch0Ud=#hgj4ym)CA_)?jZdrB>9&QdE*e>7dfVH~--?^XDvy-F zK-cIU@`i7OMbZJOjmxuo0t=dY?q+ta-tWx0>~EIa*{-!s(u5~?roN;+Yp)tW%74aD z9;YJ;=YV)^K;(yT+iox96)pK$P0aPlU%S0iZcM?9tM@{7UvAtPD?waXZu6*xSt9JD zji3w%OPJB`{e*f4rpKTD+(h`fl}`i6`QA4t*?(^@BmgrIO#kmFsz8*&o*N2yiV6a@ z1QtwL0O~@e#xeJ0XxezoqaLPQm48^ege})rEW1NV(q+bONNY(0suhZX@uaPZF4U~S z^#Kxr< zl)dHRJ`MNKy<x%dsEw7;-9qdKV@>aYb7iY(CP|f@x zIP7Q5UYGj#nI1>ukrLUy&fO^Xs=N2BFHh8*XALSn^(vIlr?oE^D}TvfMBOX%7CYBW zoWUW|Vbg22rnT2HRP{!$?Q;-l*}%gqR@|7h_g9o%ot-_aQMe6E`o4Hu6XYjn)(f<7k*Nfuvy5;&)1cxPmVaS&` z>raZ=f~Z-!%Vl*)7qCt>xZbX24oM&CN)TLOb=toPF7GBVf$22x9f91rRjFoW<1x_L zK8Ea2TY+NpErxB+GP`JdTfqw86hMV?d&8r2`RFfHQGjA5w0bYWV+kgP{IN&$}K(7vayj7BJ4>f%~%flx;Jv`@`T_u0LI`+_u zjOT0d%GQf>%0ywcxo#KB;J{tbl|%eK%*EF+(v$n#WygI?RMW4nW8HGPTiervu{F!z z<#KQdABT%`x_|Fpw@b9Y)yeU8ft%Z9(4PVbl3e`Y%8Z7r$A4HapWX?{x#&EX^O;;eZzyTb z@9aLxaI5W<+evt~XoZ4YxE^;QQtS?_xC2p374`j+RiJHWrK3Ncd36vDFK-lwOzBJy zyTaybK6Oucs_#WPUlH{d2Mnp|?R6rwIJsrc?hrRZmY~1a~l+JMK-j(!fBI*4>uII8Kj^nqUdo2-ovPJU6x{ub zE63}?KeP)uOIJxIVi4qeM~xQY{n06nmTWA&34iXPM71Txu2o%%HHA;M9M&v;C#`?g zKmdbyEIMsnEIieJYid&p$E&s(_51SqZVRDmyD0ahM@c|+h*G5K)IUL>i74HQfaeyv zmxxwk8S3-(AZN36Yzil{R#ZsVF@D^P^1zxiJ-uwG71!r=b+(tQ+I$yTv@a!oNLzN> zjeo}R0Uc$rTub{X-LODbOb)Y!kQn=dyx%*LZB$!o-Gw`7yXWSfKTjxTwYJ8u)b$Ql z&|-HuW(fpfmu-RvFeRo)P~{d_riQaM?r^?qQq^zA)uON1z6lI5zqxv++HQP9=awX8 zUzy6u*uByv1Q>5~7nxo>Tv-*2>JWr%c7HwqR4Z?sjod@_c)JA~9$$FfA4JnrO(MBOMqn)T9`k#o>69V?WS3Hc?7j?%$>fe9)C-H zP4D)V?O`{gP3SMHlMD8)JFcpLPS3wv+F_}SK^Q?2UjiXo2F-O78^PQzP+>{+tBTz^ zEpF=KzP@47Sb5XVLXkzDzm6@kGPJQx_)K0LiyNc+Tw7a}L&?7Ljs0$>3ERh&&&Z>= z6%VI%mltI4Gr7vya=6rw!O~`rn}4kYJACz5{$h7gt9%#L=V961*YRnU43T1K;w2aQ z$_#?55r%XI&S#aKcPRFrgIB#rx6EuTowV~T)FN!?mi_LdD|=t}RLf7s86FF;emp>~ zRwk{_gOuZH5uapB%SgjJK@6oBOQCn$3r54cJ!j`ZI-pzPGkv(2`DkM+zJESmhp6r{ zJi6;QuhWIMR;1aELFZ)#NBedU1q}&e=hR!gU0cyg3;LC(E9*Q+Z`%CqG^`IVqBP{@ za*>-!neX1KtURo}(ce!{=4|cnb1PPKNQS7dpuu<;nfksIT}~Zz1d+J9TDR?j)%%5RbjS1SjOGB#OQ*OuY?G;?;hL2#`M*8D|O{wH?x6m$2wLzC#*nS92@sGkbO>WuQ)G5)NJeJ zer1MPzmIc^#-W|g%W)xq`r+H+JBUth;hP&6-L{KIz1g0+a-Tcy@^q~0e0GY$OJ3wJ z(VDTujg1B^M5Am;rp;0jU^~%F{{(zFb~`1eSPj;>oi{H-KLHEow#cY`d;j4 zy;RZWV4o7r77xs7;!9EO`K2UsaU0#$Jk-u4RK#wb17H?^IE*G5`R2vo{xtU56`kr zlw=E@=jFl>n0VUHLvvH)afo6+)xR2B4r92;Y-Q`w@YkGW&w@O@#w)KO)_S9CmC;{`qjFm_C<~PJr{{HLUEZNJ2w=Vzn6wqPU?t5!n7@JQol6(p zlEr@CJ?_Cno(b-fE@y|WqNRuX(zqOU<9Yqk?T!3iZ`*S!a_CQd^sVGP0VpzUQCAo( zo_IMg?te)iU-u5ww=kp9*auX(n$@LJH%Ls+&FZ#gYtscQ(ez0{3x2@&cjReZl)AZ4 zdePf5&zmBd)i~Jh2q4c#VAaFTzZYkBk-`{&wCTgOxIbHKVPl_iBKzAh5$T z6m?N=S_gUee68^QJeuKp*s4qaCQDWmim5{?^?wj@@}j8V<%>>ynFFyV2E5taR_cM# zMrZ_7(=h~RWRkSKUFV>S7xt}ZsqlQk6_wG7%ER*g5H?4-FM$jmJDX1Xj8ae|4`U>l52zaJ|j2)43n`}5-_l2l|h*`#7+w;^qz|JZCKDnoI zUw^jkRT5y~)oQE8>sW7(h!9Dr-W&6r#!egTuiyLofe>SJ|3;MdEXJ?L15-i{p1Yjo>loivYIcX$u}Qg>#4 zD864>R>1ec3{L(o%x4>;DW7>hbM~h8@P96@vPhI@DJDs{wE2CkpIc)$9K^G;U;BAP z#<#nf-SYn79c+CFtNX#tR_5-qUv?_73Uw|_IQ zg_JaPl5AnF9bWrI^oV(SQ_(>)%Se*q+w)euv1%-KcWYC zN|Pp8-mVLJT$~`@KGV8+ErvSt`m#Lr@5G47>>zdxj05rYEE$h?*N2lCLML8VtCe}& zoaf#M==SiWKmGP~gsDjCY3Cx*%YPj&ejoc+e_d>j58Yd2%VnIL4bQH(RLyG{zmW8J zW#XN+WqY+sd@};EGKcI<%+j5$*Q<@B>&>I?f;`V(hm|^Ctyk%MxqQD_E6TIk%zPX- zn~iihozL%g^;j8#aDUQ!bf0UFX?T`u8STnM+SFOUrwtv%o%+O*n-Y;KW0|Rgt1OVj!@8A1o{f}+4p3gtKX8out z?nfNqkcD&mZP&+hedGY(x}82uKf7ST&#qShK%@Nbf>oUw$^h^7i)rVp-Q;R`*f+2_ z#^fa=)A9FdS1MQe-Sz6Do56GUd08|^{c?3ZuiZ|&R=Lyn!|b5#ihsRkH}qKpfR7(( z$GLuXx%%vCHJ$&n#TWA1W8zErj=wn~Fy=C&9BoP4f-rhRs``s`u_ z091$1@$i+#^Ghl|X9Wi!K2C22Zl-O=lOrm#HwQpneZu#+oOZE-pIxf}fa&K)Ei!G~ z_4%m*kR1RvfE)n3$A2RZvJnUNaJkaKW@IPi-2jdl00v)Q15tb$B~Jgtl&ZH$pFop-GQ##O*quEJ~u@>B&P z=1@&B<`Duc4Pd|@Dor6^5qh#r6DnJW6hlbE+u)N3aBI6@1b;!FXgiRG5Dilqzh@BtVGGk#;;5(#p^z+-(afAe|s&G66xP~A&837ED zR$%>9)H8+42+%IqX{&%}PXNc3UWg`;Gz9(Zfaz(jVeBg07;2t{pX2-jDojRzw?dH^E#U^|HWd;$SyDysHa0T=Ud87LyKgW49HAU&yECV)nR$#FymGR6Ne*=DkvTBM_0WeX>y zjZxYqWa$nMNdxK=A=}%by53>*BCs3uD~o%xTp!Cts?s>VSrCovQ4c%r8>p7jj&HK z_DKi=dqPPL-omU^0JUs@QxF4q0pPw&cTdkBY5+#^03rFt+k_P`i`9O@kZfU(6Mwd+ z$&Wq8DK71iES)0T{lUvJYbLDt)P*fe)gI$`G3;OS0< zpEw?zF@v#qxhutXg-g4(=;dnB6d{1Q!6m3Dutpq1y|B-2$t?w>mOIiE<3fi%Op<8Z zVVcgBf$A947#7<+|ii~E~yGdYH|)c~8*aJL%}okWam;q(9k zo)*NHnT{j^1j(f%kKJB+LrPr00R$?5FQ$|~^-JqGS3pP}cudC$E7Zx(zkfLVgYOQq z8)J?Y2vkU^jg28yK*7&|lK^wsgUo#Y?==R4P>^*HOH*DQ0XU2bBhW58RR!6%c2`qf z&wgv?Z!YXG%wiBrdO)ukVT_O)Hvq*o;hB~6DLK>k3xZ59T&b@soimm}L~T`R62*&^ zS-)@H>HOYV*1(ScJwL_M`G3RDct4mp#i5M;5l=owQ?2tAFwhF^d5Hik8&Fp|?G(V~ zo1BYi0|@y!_p1OI1dL+9BL52T8ieH$Q(K?TPdUDToJ@FaPvzQG7Ww7+;W$4qnvJb%UF4?aXY;{VjE!T!X{509Ep9K{PTGyoxbFIj-$8u}fpZv>%U z%n>t#y42TNfoM(BTxSQr20!(8k9=52IlO5UP!0fvR9K}zVkiKRP1j?C@Adp2Jn!TK zP9cr30#SAaoMbw;5iV>`;Royg!N=ePo-c-b;A4R4l%L`!KYyK({?w13akK9^71-jF zD(0}qKJ4eG6NnD0lIVb8tiblD015%iKHbn~OI(+LnoWJjVVs6!m)&y8d4-em%dx*> zTW}jg;6Mjr1+Z`K*NiF!uoXbD3Pcq`hQXiw`tT7DPQQ!j=XVoQe(+-fzJB|;ZlJ?! z8mJs2u*(oC+J8W!SQYLaR=oCrX{tcxh7eBtDN?Z4_YjiLgtaLMcW`dez`-6U5kOXS z(>Mp9`qYygOx&f2{`kWf=_em1E~yUbmp^{-fA^^|pBganO!}?a3={9d<~nU5ME0^w z*^+PyHH4E`rx6+I2yu!4C9VqXy$0=Q!m_EC7=~!O*MIf|D_G4V|9+_b6$~*wNZDYZ ze$bJW_yfGEA|<;TN4U2mx=V=Axd=%q-Y1bht4})DbfJb zYLHnMsDCA%@&(&eg21me;hD_l{wFV|78P!oWdo(1^5Bgbo;Z;B;W>Zn#kWuU^a5G@ z!*xFVT@CHw4~P7HzqCKK+zG#906TkO@>ZjMts)l12v7zXC7vX4t5#hbUZBbRcL93@ z3t9Wq(;a79%o&76k9El5GAuBl&lvmMm~K#ne19clc$)_z-RdK&p++&FM8z!4b^h(~ zU=f`NDC+=ycD{xHZG<(`m!RXoo$|16y(tetNUvld-`?$$FaPLq`cH2Dauo5eUnZUx ziAE_a6Bl=?&p+rDb*<8LLs(~PeG8E|=lkotRCIrK()vhc*aMWR5Nx$rNgEBNZX! z(f0Y%`7?6?6aX9!$TPHi!dmo=yR;(xA76d_`@vZ9rys{3el_`)3_oL?{Jxv=-58_h z!|yoJ)PNoUi$aL97D8JE&2A2TTLsKIAb*~k5ZE?`wn;hD;FNPE-7Nr?L$I=VX@^dg zX_!s^jsiHv%JhF3T_M%3h-V1UNI^8?w@wc6I%kam4uDR6OeR0RCcfiNe5aSS58oNm z22Z{#!QbBJ!!PZfs@RJ^G3?X7*#3>*iI4u}_&n8*eu@#q7=*3*jmap4v0nj}#(%K5 zoSpEoO>4U$Fx3!EDJ`WnVVOn3=!|>-HO3+k(+F}r=?<#ab9EZ4A5jcAp1VyV(jaod zn3l>Ay}W1@sfE14H2dJ9^`3AsLn8%OpwtsC_9kdQxCjIC0>8arkqv4eY4GE}sXxhf z|1(!pn+Tu@jf|td~ou&-u(7j)dx>1@>3&?Ar=2<5&y)XNmuBpMt`=cZNd)1 z#eR>OX6iN7$)x?zNe8mA4Q{2`r4wEtz^?*=(FOIO!1P6CF^vHI_)YUBETU6Apds|T zy}}-E;fWi}fbthlI-oP~nq)@w$QIU6mntC2Uq1LZ7JvHnZw&ruBF2zjjgVfA4+ec| z*|+DUQx0b0^!W#$iYSN5ynjgt(Yi-=pirU$W_MZ;)~mSH0LL$f;-gk(N2Q>y7D#i% z_L}W81UbEEV{K0Hh=6dz=h)sL~L+neh;ANHPfg!-K7vS!ubE3jBdbwrG zl**g!0!av96sl#t3t)F7Ks5gquK+whcCaGJ;x(M&6@TK23l1AmDYt z>qOyxf_nVccg9ogYkvh6Pf2BbVNLQdgP-{TiRmT);uII;=~EX`^3gvboBE^QI%3Bv z|LBGuD9QEHn|*pVuLL}pG(HHa^Xbpvf8lN*lo14b==eYZj4DDWV<1rl@ZaN7_|XGZ zMYv4;Vb%g=!V^%=wE`P}ee_KY4)&zAe*Mv>3HZPI#u-r{p?}&hX9PU>e{kazdYoWb z<2eR4g20;%AcYXPiZe3!(T_NJS(HG?KJ{x0R7ug8mQ1`^``FKRrX`V+{XTtVi3^>P zsYg)1agr@afWP@amkOOlFr=!3cxw@FUK5`0o(Ppy;X0 z{?+e3`0Gtx%ztXcKls~D@jRH2dtYAq!xz4G`CCW+a--1Xx%*fK5DUAHl-LfMJ4%?g0z?JAdx8Fb6joaRA-`+tavZn~M50 zZo^M^e|w{Jj#2c*w!i%E7t^NqlZj7|f%rdowgi9qdi0|^ezY)SNYNE!#9xq;xzQvL zrHRd+H?wE9!twt7WLI0l46-&PjKxwsUk9P+9$2eskR^~~h=R<)B^YdWdmu_xgh>cD z2M%^%2Y=`mk& RcpW86LYZ)LBVbC-V8RQ#``oc?t+i~)aFU;abWns^od@~!44PIP-Dknq5TFf+k6 z3cyI`&`&K00Ttd^30NDzQoJfWHRga?uezSNEq_8#(oOIzQ)Y^jKUENJD_TcC^-Z@G zj8aYl`0ZtMg|;YTG!m-j5K>8-f;}nrG66sV>eLsT=O6v(GJynoWP?$NN65fb$CgH! z`5(;s_2ixflJo-v={K-}w5(SJ*L zT3PLvSo6%ka zQ2?nqpWIS!6j#s~?A@Q<>7RYb|2=jDJV`wz|G3U%H~-*$NJq?> zrUDx_Ao_CIf=VCtb+D{*)EPsV9HAB#0 zU{C%UWc1Oc3h>+GPIb%!znpvOAqx88YxbvSl+j18_;LmRmn(S3tC+>W1$4>>u0RlB zjCQ&k&&zZna*dOV@u^$(ez%xDw-Jn=i*8-Ry+vOLL=Yq5Q$Q7EpzQ3fWTem$(DY?- zTb<@ag*TXZL;pAK%-NI|7Jr-K?vMVx{D(fVBjNA-*$-xaeet&k_W!A2oHAHYmws)gU+rKgQ z8ONjwPPU;Z$UG!+ohW`ANqbrd^9z&!r(_j^J9#(%*__jw7k7T)<( z&+u3Oq#xbq*XJ?z!5=O1GY4N~3$_2^pi^KvaTbHo-u~JGqS*J-T6DqmYzW@j1lzg! z=_w-g1V3YcSFWwuP%KQ9%SD$10p2X7-y;4-q{XSbqD!KYH2sdN?T| z&;Dl~{q=L=j1u~tCx89&5=ntPgDNw(xT+<{m^BCFZG(^BIjH)sVboclHxH~GOwlOKj?(u4mW0ivl(wFBJrlwO&Fj%=tfptYaX^3I? z<-e@KoT2MyY=5baePy=INDR0g^0QQ6z_h~CKDC$>~S>s(&oV+Z^{>_AnfsZ25?8j;WSao_!D zX`k0S%PW|naHgM@03z_saT8_#$ZTmE7X$ZT|8j-UuC1n#_fLKP;zKp{wvp&D z+<}5=@aA3=P7V9L=?W7c6t1uF{;BK$H%rLt71!*9_aL3d_1zJ+}^4uRb%j_yj zowc&O;eVN>m9OitEA{a5gh6CG;D-+Z6Mpy*2bYNt>3$vluG^XVkbl(H>TeyMa0FjX zPIh1IVba3(AAekKV%cr#eR84yz*F_4kM+Y-p0QTa9P=OB(}=wu*$D#mef$Rw7{0PV zpq<+x;NSH)8{xvNY|@?EAv#N%S{P?Cb zl2u_rNmVf3PHSOYI^j}%Qi4=R08K!$zqFq2=CnbdYATF={H4!I^Lf;KTir9NVKC{o zHADp^2AF?lTt!LH)x;x)ZbSt|+}Q(4HHBvoa7-YORh<&<>bu9(e}3|<{p1@53jgRx zP9dp3I?{5|Y3+*q2Pe#O(Epu7`-40Ezvd*sXT3m~RcbiJ#>d|p?4tuIw5NKfjdEo% zX7o;rE`W5xRQz&5tAjaolW&b5ZQmv!?P`nf?ErtDY&b|i_(SMnh?;DW31q5u3S@>D z)fJGcfMNymz^_SZ{LMX$rHJc*>L#FA|I~j)7uJkira18@U3wcm05Yu(&; z?y#rYG}jGFyZX@zzd8A<0so!HnKU&4R6kk-!~6iSW3qN@m;AJ&9dyH9a4%%U(>2p+ zZghW|tB^`R+_fI_MHrn9XFY1k{N!N%`<;lOs<+5uA7e^ym_;t)a^PNMRk^03+tdA2 zfB$g0lIFo^?|{#d+T6%-X=_TrCoWG(p#lhO!}V=r!e8@)zZ7-g%da)?GtXaQ_AlL` z{P30P%UynR-1a~7YJWB3xiju3eXIpR_EUdrSP6!17eXof2gimwT-j>SDJtzIT!zzn zh;RLjPCASBfLb&8z3Kn@6I0-WDfTtQ(_H`MGoEkOEQF6!t&)z)l69Ikaku z&uMeJ4{;GRWb zv`f%OHdLSkcww1Q}q*zJVhru83{w^Q}XgQF=1`!am+4U{2ohX(!V#N4fZ40VL|byV?XwrR#cmb5!&H3oPQ%#?0%JlE+^Xd-={N>h2h2*zpeelU*v@!T+9m`k$8a`_Y zyQ3x{z+?=mds>ZxzgVGRV#sIp&pYrwGY*QtxJr2-D2G8 zkiIl!05GDOS94nfOx6L#7;cQ(1G0iBMrisydhknb%{n^KOKt%gk&~a6T90kOh^JKI zA)DsAKxci{QGEnI3r}%xPyN?G|HQreSLaNeDq4Kk7<}U4&ssl)lWBiFoBnrgbM%YV z4H)Ga>@)xWXYXyBmgSjs!M`%KKkZ%4l=!Y|eVI-O5Z*K(Abyz=)PSg{r~#+uzfT1{ zU#|Opo@cGS_taEP)m&9qonS&pI-O3Z`{)Mur$=c^(6jXwagm@`(1#ibHttfXak+9|X;Q28doy-;``3PaF~6`V25OkpC0>8=@3%JZ4}Wxk>q+;iglbkU zfW-lH1{Gp_0i0mi^rBFZr4HN$!GhLK<2l;wnQ9m$&X(faRbYP~oHfnf=VoA23bn%# zUnsjlNq_}Ql(jYssn;S^a8<*OjuuJt!J+A}Y;kB?DPJ}C#({m$3vF#*^w(a|u6^dd z))%B5ci1By#_`^0VoE$>u8#cc93t^x+8cATn$|pU^sI8w8)u+idF;3JMTf4$2Fff9 z2K?ZANGr?#dLDoH-n%7!UeYqwx!-*3RZ{0)uUO#`#!p5)%@8WF$Q|RO(@(>Azp$7spGyC)gF;S_i5&|uw**f6E~9A8 zVjbsB0U^b)QXpWZflAu?3HO4V@(kvEq8+*;ip_oZBJqFz=;pXBo~r)x1AOz&+pe0G zWac=-7TfgNt1}2T>>JxuZN9NhT*i3>s1HUWaSV$V$Ds7g%z4}0@!OX`#h0&;b>Mq^ zKfG#l+H+a^?l0WXz!7PG!QOa}FT67ZBr$uei-lu>^jiw5K{3mpnS($1?U-5IBLYmXPw=`Z6E6t}hWiXbP7@Mba-@ z$X{%7Ene~L9Max5hDu`eQH<7VPmlbm66Dp)TNYNUYigW)+dHuGKpndncxu9SxMi)*_7_`XnpimLX*GXmTD>NEK{|3g#fHW5R1-z z6#_>y^Wj!UrByxlg(}GEna47IrC@9rxQwa5OEnOVhMI76U(!3N<0*Q}ldY7gd#Qha zsx3ZK;8vbEQhvSOb0B$&MjyWa$oTmDtDnDRfB0uiQE;G{f(c?a%K8p4uB(DwsSq0Q zfJz^*l`9f6x1*x;2rDz=ty|x1;lp41}%=>>%J(~xDkR{fyaMQEOGQe=rmu zyUWKP_lFv+*n5v=pGuN%}wT&5(^iee4S5-D?u?9TGm^qgA z8m3J3_TC}DCSNCOp4m%K^h9%fXXUAW?(7g(WXiHrn5ApGi6g5K154vOjmLk^9JZ@~ z2Qm1~g+jami8aSWM}n%D7bm6_H(I9-!iC$OXYf>zY>Svkv5}jW`KlaTms{EpYNO)j z9*VQQbo$*&>?kYN#48;Ym9)j{h?akPl7G4~{!DH6qid_zwsBnaPe0Pm8q?I8ovD@y#rU+P9xZ zUO!(`&0}<4?FaAqlX!n-s(T*G_%krG91J8}*|y}ruH?UI%YUilzYM@{GagDFMnCPr~USasq$UU6Se_(tsR2szw8jJKe6-Qm>S7DKe+JU zvj5i}f&RoROFim)ZtCUzAwLQ??`PDLzTei@(CSUnu3}m*NF~4jCw|woysX}~8XX5k zMMHH9Xf)n@ulIlTc#phku4hAyuZG{_KVPPr(a?>-BeehCLjrX2z9HWaq!Cfm#Ql`g-GW-RHCeyh1$=9Iztg*NJ+Ze(<>GzwaAwo%aW$`=&cI)X=>2 zXaD7z>MeiiuYDtkY^-P+i_N(r2vLf&b>Zp44=yMZh2xbglUG%IqCacp7%(iTZt(ad zu|ae7s@C;TgV>2@EZA5$biLq>Ycu!L4(uz`YjJH%j-xVbPQLZR!)I;%VHf-MTYdY= zzxaoI=^=7F{t(k9dI7Hpdl8E;URxsg_LtlKu;qUjtgeZjA5Y^R3p6}^m|R&H-m2== z1lgi+^tUeNqvvGVy|XW$Im5blMvYiHK)Nd38fH`66hxW&wr9x@eY6H6lsGMPb2x!7 zx%-0wc-w90TSmQcJb#r1edNWL&)LWz3Y>rm74g!Gdy7# zjiP@6+nx|7>fA51B&zaN%>(=fs?bu(w zwm-16zhZf#enC#qW115CyaRc$mjjdXue5|xbtcQB2RUr@xnGjMsvZnK64-ENGw z%yd8cd;QVBWK3KC9lM1U?e~Nq6T|z5IH_Y+9kSRSfIA?Yz)b`nTE(%t9D&{@XLe0+4$eOOcxG++W5cr zNm%oZ#t3y!PS@ko_9ges;r(OlCm5F@dNC$#Ybc}d_4Ivy6u!=;<^t%|uKoKt`^F+1 zZ=!I3568G{Oe~{C-?pUA_U#{T_Yr?x?&sSAHT>;op`3liGycc{`TrNT!udIO7{B9q zZSHJTh>7W^L}Q{opWvqCh?Ohk7mR?S!$H{_qFNz!*hMI{RVE74D^Mj;7g~AY8J?0+ zI$W133Zy)+5T|C-*Yb~FAhhuDwLQM$;AEg3R8!giNgJ;QExr%QHS||sz-ND+{<$w8 zOPB9>gLd4K246OB{HPtY;f|%Tc-HD&B90BY@6|)8D6CLkTu~NS&@TgMprVlJSRE4l zqP5CA+stKr9496-ES!PfJxffa+~X&_E!Rh&z z{a<3#8?7E6Nc21=V4V46a+)$7ZnCR zR2?);R7P#fyO_XI%N2%&h0xHXK+e)Uff%}oJF2E02?)m}D6kj2G9-VV9crm}d*vDk zijG49B3>X)Y+%zq)tQP~cMt(Xo-^Kj-0DM^|Dpr`$op?R`}dk0xXrhHC;Anm5juxo z@p!_}iP8(e^X3kMjSqpPigah@X^2^6u0q9wro19AunDfDou9Z|QQP_Ea=s~aCh)v=%Iz(nubwEThI`^Qw~Umi;wzxl?wnQuG( zFZhK^>0V&KbFu+OmwL{mZJL^1U_$0*ZyZ|*H8)HMInWlz#-M-gr<8&4z1}`LdYXUY z3%}*z#}0)AkI~{q=V~@R8ZcZTAUdrT8w4ZhP~CMDuM8ySnqUZVEyfV{m)xNSVprVi z?Tl);r(!4~4VPw{zM7*1jt**sJWmH;NxjlfH!?rPcl>xJ&th-6_U+42)4uiv}SqWPeOxPm!WLz);ZVZ*b;r~;d{(r?o{f?nlw8OXkCvsla zlpMLLGS=u#=3z_~qsfPc4|^jM85QPq&dHoKqN57=SFwNT3+8H_#;Wv_xiQuyZb9M{ z<+)7S%iB20Hh$!1jY+!6`1eDM=3FC8#wxBA6?~e?I2~1!{NzXhFN6 zEg#WmmjUT?JN)yWDUt8_V;(+wck6p>Huj1ID$N}gWm5z5sqU%{feF(O?LNs2Pzq0L z93t&dzs`TLQeOPeontrCe`5OpB(rooLM_{M+s1!S|Kd-7%bMSQ{x`0+JvV$~>fP7+ z@sDg565ot?pc~%wfO%`n|D-i^&mDg)wD3DaRU@~s?$8Y5vJ~2q|d^mn?s~12@by!Xtl@Ir(_t|T5XWPaZLOiiO9%;2m)hw0zI=*wR zyTwq&NCXy_JV2#je=yEXEn`>AcuSvz(>0`A(q{>{wTBjZ2Kt*mGIzmmj72Qt17-h| z5BA_Yr%IdSp;9pw)${SSTPpWv{1qK#SDf8o?+PPS$k z2TghUg}K5f^o3DwF;;UKW7U1Cohkfb7m`Yl8o?XCZnt>VG$-&CkM(Wm0_lHmqz%nc zUh1ZdDUm$Jq334l9JtI|M&GtCq0Lah#~QB|GB=RMsjotHaSM+I%4q2m*0)&Y$ap+W z%o0@e5|sEaSYAT?6MuzjqIK7?+v?*i2(d7qkmZ_sQymo~tN62pX#GDDZ>#Duj}oL( z-ae)O$MnjDjC*MPKQ&|&5k`M~I68+!JIoEKNWrV_Xm%N&APSW|EV)9nJ<53ACj>I*)Z2Q zSz|sQVHSJVD^PVPi5(IP9XuTiI!CM$rcBV56e{LcPsXKcY(FbRHAixsEC|u4LRv31 z=eOHT2 zMZe}k@*5BOe~@j`r5o=Ed_%&#R-eMbu;8wl=M|DH|jLP5NszFVShEt~6e_zvHF4_d#cn1^U~c#j%tTm=zS| z@#)((lCz_XkAj$f?w#W810foS*tGq??>^diah>qEFxhv0#iM`8+xP?cjiavw@W1L= zANqNJ#N%Ydqu$@~@{s?E5%~Cbx)1U@_9gy_eVJ(8D*lPN;UoZW(V6Tg)h`VE2Oss0 zt9#q!oHj3r=}SLs+&F*WpUCmOR}QhXcYgWX{u;+6`NVwZGt!aJFa*IG*Pugd%aMBH zs`J`U7P@focEEq;`=h|^T!DjQ{%dYC;&d-q1|R8r_{6GV^3gBT6th@6#04hwcmh!r z#-B1&j#+()Vu9=XvWu$>UWVo zX7dpIob7sift$3B*l*9!m4t_|WJ#AJEp``FCuAwA23Jvn5U; zZS9?q!6$#$t;7Ix7&C*zu=t-WG%)VfzNq@nTiT+zk?sBxzGSEe3&M z%w;(l$#UH4YSIkFe9L#1T0W()AG%9OD_fo;Z^38n#wTE8T_FfpiUz9GoAdbltYS*z zk$0*ws`Eu5d%Yp z9S8Z7BP!LuZEk<3TPsh$a}BjL`R2puTOa$Y-T3umn{i(;u2FiBATx+mx2E%@B3|4e9x>eMLP39C zi46P?Fo|s*X;mOnAy3GyLfRMGeaw?{(C%aHybx|S@dn8+o_Xa4?FXaozSMM1Qp!__ z*~+PC|H^ZU2T29P_Iw!2`PSj>FFF6g?`Nk{dd#mpVN%P5t0&J+WvDHCr$5ukgDL|{|w#S|0tJA6aVuZoDb!H zC_iWRukx$>|6ld{|L|A+Kt9sEuQc~_4-ieIJ>da3P>&2|kjLOKrh%%c3ju#mSW>y} zD>RF*H8vjI&YU!YLRMVs4GS?6wfRY|B-hLkDit_q{sAPx*iR$pMGiTfiK5;oY7t*a8rj??1NpGO|@wP$LV50{ zCu#RI+Z-}JOpm#_z08btsmwA-lY3rbO~w;S?xpL0ry=l z+vD4Ti(79a#;%EjC%X@6^Z20}FqdeV?8{tMG>5 zk$iuT8yBF}`+e{CfBKKkvV7UrZ|P6vc&Bpyv{IHopa@_E^AJ>_MgScdiDtF*l6jU; zD(7RtBtHz_^lj%)(wDUKK|~rZ_wRrG zXOsY3pTJE(8v|dK_}PDQxWGBLrtN1mWd40de+1L_mHH&0R+C@t2KriCx62B+!5rRw zafrD6fJMmx@M6WKD0}-U+DPa3I(#=Pha@{8-ui;}eK>-#tbqVF5}&(ZC;@E>JB~Us z81X8N8#a^4-fB3cxE8hv_Yx1-Jb)2bPh!4ryY(MA_q;i}(ty^61~^mJo^@Oo3_9I1h;#Y!Yl5ymtD)eMh*)Z-a#NRl5p zuQbFWtP7?U3AJGuXgsUbY#sTy2G&!($kpP=7nX)%nvhR>0k72eb58r53W3I3I_D?C zE4Z*o2ibofZ`*?l*BdO{qih5EF}m|8Jg)T#U^4-S_2&XKGp+#H?Xy@HDsd8j0=xEk zG_vD_GtlQkCGOz?0mR4SdaY~g3dk)Ai$ev7MS8f7d{k6uty0BY+X-kkMjEw&n2}ac z8w}P%aRupt-GEYsCe)X0XxvUfw?8z6rs8Xx7;%3=c?De7)xD4>6(cTF!bAIa0yrk& zp@QH_eJn6>c%EZR_NQ8cW+p()qZY>+Byni2BkZneSn!IVWn=@g>ME-UT7S=--xsK9 z1P+{AC0UfXoaKFiaU-zrV%lH<*u`-II6iFzL~{WV7N{xj6%-YHk8{rY^C1G1%XJZG zxnO@fTCZ<_;=YuY?Kmylrl!h~>IL*l6DrRJAO5PUf3OxD8w2-zEWocMoz)81F~&&C zb2pvX`lpKZQf1CGn~ z#Q=Mu7gvZLSdO#`hH_20v|U4#$98`j>(J&bUji{PD_gn-RIvm^daVJ!RkXV4O*0&__ZRj#QM z!c@nSHa#72B;z4jVOgoeIWm}}G7;rg0nm0B0AjfyGZGtJ5<6~9DoVMKb zn;PM^{AAlidzVZ4#%;NUn;L&LZTZEv34NDK`W9`uoeRFO0mU-IOCxw}LmXoy_a+pR zdAs+@wO8TcRqmw@ZiL-Dge~QlN~HWU+I9Ia_>A_+kT!BJ=iF<6S1?lJiCmLXP9qlC z2x9Hn&Q*-vY>ujSew_Rl53-@|1(pDBBOL!)BSu0mB6X@F*rj(@rdi*v3>k}zo z1~wk97Pv3wSn!3U^S|6DumIx9xl-d7zgKD@WhiKQ!0j9vmCQnSlQQXKNxLTo?`J9|fZf1m&Hb3?!vn=I%@6W@)!(1- z;o&u|LHkm-h!5;W93Ym!igTkjk~)UmSG~w~2!(*_6Ni{Q*hGQZ?wh!fd$Vg$B|XHx ztH(Cz?f(6bcueli&6ghIsj%0@sQ30!>o5lQ}CtN&2m_TljE$6TO7%64+AAdvD}>J|7O@eIQ2?5*dZf=)n{ zf2HoEH%=D)(VtApqN!~INbjwbEppuzaZ%k-Z4x4Ir~#JqV}ay;#NDJ5-8qZ}rE2xPIhV=+QgL{RO7<#S`iS_j1>0+m`!Ic={P%)6}4rH~U5ecudlZ zN}XihR9VZ5Ht13(oZIhm*?0Q}LO$#Ja=irHo3whX)U%Q1v88>3zLocdpZ^Et^4&lJ zGJ%1-U&^`m%X>wXR&IK}BjvO%_2{qn+X4X?kh<0f@+Yn_P~IeCk#2@ds_a z>OFp~t(UyVkKXl$@A1FuJtYq${PhBDV@?Ce|_I>z!&2-wf^M_#VSmjv_I@SasS)b!~{ff|=onX|q{Z*Gz z+4fIe+2`eVUDCGO{GuZbWZT^@x@5B7>hwbwY`p7#XhOMc`@3!~`@Q_GYqkCOu3LXq z6|dgsP8)Q6BJql@CT%lv&x!(is~|g8;1q$8|LBgs0@7C1NSlX*m>*99zV^HUt^u~V zYIvIyypkr{TaaLw65!?6{ZZ!s``nxCRvB+YC`#APtw<_AMY*QM51G0Gh)kMx2tUsO@z&CY6D7g6 z{eB4_ULRm|vjoT?A@cKyFBCByi1(j0`o+@krOUs!Hb=a~G~<`dYu3gB;}w`GMAB~E z+F7l>#5u!a+Ig4TNamLm%-D3hN!JnW^PhhOr1Co&KnRv(vRb?E*FBQf;PQW`9802! zpK?_?r<$L80=P|L`gN}Xp=kbdum7doh0_r2e%had%G5C+74#{jfJR_Y`N@*A4365$ zmtzjGfQ#&j)8;1x`5mFVj01C|-CC71jR@!gqyk+)Pa%>10L_88y$>Ma=pg^vvOQ9~ z@9|J}?Z4x_MCNh&^5p0ZzSfsUVkJl^Ad+sl1{GSHZ8Czkf|K;G*^ zNrz=i4=%?q{lCB4uVjA%Ssnq}w{(*IHyru=@k57`jJIX-{kr@}7>a*B?Y(4w+y2S^ zNE5nBX!|4mQR;sXtoHk2C2S?%8%%=Fx%{r}n?CLQ_2u7nOSd0Bqk!fuU$Qr! z{ipd#e%}>J$={`x$louL->3xV>xEpW$4~o5(A(#F^1dCX zLFIirFKhN**FHmz?B`M-@m}A5d$04icCnDZ|4Tk@$D^?~-~NBI{$Y^g=u3KSm@Fp{ zbNRO&*Fe_aN_w>Q-u+?{Z}NUlX}@d2QTbUfLf+fbUu?AUs+9++9Iqky^wUrHHP3h| z$F0lYoK0JgLE6fLDcC~Rci!`3NGB?K$@TV@i{dvwKjrs$z4kkrZ!cZ@Z_!aDzy9-o zX!7wt;`Y1btqXroeuGzvB)LB1k`yw;eJ;^J1i)+Y3<3ff_a#MOyRWq*u0CBH%7=5n z4w&*g7&rz~=HJ4F^d8^7U3GZ+Sw|Ec0gB91A-@GI*VUX!Z(Rio>1S7mQl&&7Q&R!b zB>g=hJ5hkoku5v3WN0qOAIQ(rXjJldAoIY79Ia$0BN>0kpjEQ7ThL`LKs*QPz=5ed zaAgi%HHJAs2-HM=>}|l*HMmoR7?IzMLGqi*z)FM#aeygEmEWJUDO3XYCFJ!ExQ7Pe zW3vRomSAk7(Zx~e%N5&Raw>#VI3pBnvn1+epL(zPl4+ry9k)8sm%4V3PhJMc>;gx0 zL7{tD_j`XC>V+chAFHH4R0BP`Ko7k(?9-+~MsGZXA^oC@|MtJ^%iC#l|F8e$etYch z&1Lsr{@efczwgWQ{eRnU(%t|0xjN;?=HIN%a<}?F{LQ)Ycei~1?`>IiZ;rcv%kP_G zp1yw(yPRkDe;X>wzgM^P-yXN)f8B2Gn}3sqT~UAT_x*oQHq~xC{9m`qOn3O(2@H5L z1!zGR5YAc{Uhy9}N6`s@b4DY6N>xNCejbAX2jp^Ez|ei*p*9A&eZ|EF4IQLX(&%DM zwcR{6y`d4~_^t(!8(E8jcu_iw7u{#$iFkY%n#c8mO^S>DDuV4~c{t3oB13LHPMc!P zMtOht{5tfn$HQoNZ-)Ci^EPNbnxB@}HD-H#+M9;gM&+v)QHS^4O*y!WwK>Uf_my-e zvq4Tmi_;=6GdqYq+sC}j`B+&`=Hr$3+RXNK+B0T!u;?`C#T3QzwqCTE412HPV3Y-$ zu=7Z^_w*Ws&Shb0)8aM0Bz|-49+;mM;b?#SAjrDR{g~^MMc%nrgZqA~>|YCa(0R?x zbAr{2wz+hgU|?AJ;L*&lU4{CG(dE!LX5-0VckDaH(PK>QpQ|bz&W6V+@62}5sjN;h z3-!+CTwO$;#ep++O0yFztov=8ZF}?URq@KQlNyuhV4BX(vsYA&LjSQYEQ6b4GMax6 zOr=Ww&Z;}^`V%tnx1EP#VD6fJu^2YDNZ8f$weJSU#qhZuHKD`yo5O1d+7_b1v7h$r zS$BK0`Kvb@93t-+rQ_zT2cz9?Rt@S)zpk@N9r%$mYU;h4&QM-jHYHjGVs+z$TCmto zwP|;Dy-z0h=inYjr;YL;GZGbaJB)wX!Ev)KHGMy*&un_zC=+u4Yb`Nn6Jv9)yQ4d6 zl6#z92JYrE8m-fAnPtejJmT(#g)oKjP+iOO{gdq(hGp70tA{D9%LBYpZwbb0f$p;@ z?vK;R>v=rix9K*nvu7t- zPbL#P>h_QK-E4dcp1ag|olk$yig!9HZq#kF==BZ@QBbvF-NhnIhxp-?8IQ-*HwQ!W zHQ!%`(D94#**UIW>eD4Reoa`=SMT~k9rOyGcHycV!c5VMJzZ|L#Ju;IMY>)<$L^i6 z3*5F!B(PZycBFK~e(qaT+dU|x%DQ(RiqYX&IFYTOB$zy}*>dSDCzpQ)ZOMR~-P9W_ zdsSfu+r@5gJNvSCKdlFKf9VQmXC~)UHcI&s*=Ai@wco@;)rjd3t68Y=t-73=bmPvE z+U5468(nRWh7(bb%}az$eLI*L>vg`2M&4oG>(JqNTXkM8ztNZH>simn<<1@E=RVF& zo(Kiem_}1NCvXThq|twi%Y@Eyw2J#zt6y>Fx_lKFJUqYlYp3i4!oP&c)w|sb<)Xj( zr^3%RmwBLQ`hphlG|Qk`Wj&KYmn@?TIH##m+=4TTo_hZtiHUM3jc~Zd&AQLROwK-RE(Zj1O`%|*1mY08<5f-J7$8}xLnzin3z&3}+_1t!na8=#A80qtgx{{x!E6(n?AA7k0h9b;bry!|lEp8T87`VKvy}{(abq=e05=$*C0ZFuM8Bq=lHy+0a-& z%meOJ(Dzq#C#>8&soJ5p$-T$k9UmU7U*Fc-!DM+@D`0=0#MZn%*9(o8^V;7`w9N$j z{jLq5xaOASo3axlY%FFjw+Xg)ijm$zspYBTmwFBaL;8 zvvG(fil6m0i>TZnVW8NSW|Lh!V&$L;^2nUhf?n={=2WX~SQu$KpP7rM*Y$CUtxj$p z(#JTfj@f@|IC;FX_0ZT3%*u%uEEDe{z#$&q0GEcKqR^D;m zuRt#sUK+>E{Iy^Cq@DrKS9DWqPaUChdr0Di!=1P{?*(jTP0ht&9Q4cs(osDbzVHhV zkzMcI`vs9pz29ALRucx@-bD-&PNvopcaiR%+)IBl>al#gJa4BiT?p(dCwtKMJAL9A zqvb+V?<~Gr#DC0*Hqvryd^GNn8|%T&6Sqx2Ga&SOV-<4(Yd48+yL4_DMTX~UQaw^9h+Hm~vWifd%9;s@UK*Zc(M`^D=` zXz+hp5#`2|`nkLEUAl1;?-)1NyS1pSD!m{4{Cum1ZtT7i%8DiL5Bmg~V~~*RC=C6f z#m@eI=IR~d&W9_zTqE)qkDhI{QATlExOk0r?q2;VRvvqpsU*960hrM z;?5Q0k$Sh)^D^67E?wQdhf6NAO}*>)?(-a%2YM9yOgEBbeuE^8YJa&&$CLZ~w(D*t z>p{PX@6biA5T9NN{ruiC5GDSd9J)}hwuiiK4H4eH}d5Su*Co3TKX@y&n2 z`eo(J`iowy}RB8MpiSGH&|Qh-@Es2KsVhTfJMHsMBO^oVkI_z|!}}*znL%8*VRCFF8(U z5y#Q|rJ=l^@?woj*Tqd)#kktUj%j~Vzcaemh@-k}Rs{9RD;D}4mdR!eQK%jLNvW1Z ze=WxK^*I-J+UV?1l2ffHwaJ!4Elxy?JKRsYdpnasR3gVa2e}K~q08*4lW29o*c{LL z`>S0#SO2xu2VM4<4MTacr~EMP494Z{ntysZl>8IJjQvI-_5}59e2ky?%$s4xVNhAn_EBJp035ghS0c)<3p3D+HQ)6p za)lGDgR@-~D`$r{W_NkEY?n^-xjNeSH}qVr4mRCwGiL2bZ+LZi#+=i<@4mF7?-zGw z;C%)pt_2ul4WizOk1yLJT>V}!9Upux+kcB6yVDOO6Q( zd85$XF0UVP9$uE?rS*#9a-1V7R8o=y2ZrjoPz!^Qoi>bUz_Z)R&I;UtJYNpIlRg@g z*e_Q#|S<$EykTvbW#YV2EC`Go8p6_vdIkV$2cdwZRu{ zVbe>9eS1AxJ7vnJ`RIQ|x^?5oZ->YJGIR!mTvMH1SnRL6)IT~!7#+pxdfwQhjBbkZ zavoTtJ3d|xX*`V-HK>Z-v2dPs;EjL}wR(LFc(mRe6fZUlg+26XeN&oSrj83|IwbUB z97*tKiuBbCB!dKSn(NhBFNTWw>fdW`P)$Ozc>wU6hV*~CSL?TP+#{~Bz3Ln- z!vzf&!A7zC{fx~BUZx`V&-?CRc)W$irh<5RAI>K^Dxy0(EQ`d#_agC%vaj1s?vy5U zSyHXkPATe*L7KX^A8+mMX*q}re;4ZIr3j-ZTTQOdobOg)F1R79Cm#2JO#Luu*H1egx$u;jorhT zY{{tc^5cKlX>OD8Y|XXf`_?+Rv$;u$>*)KKhxymId)UL>WsTViBnSJC82i;5OekNMw z#=DcgI&@yyKJ*^1W_6{n64$B`Tf=3w^(_@OA(?oia%L5y>t*wtXZY6HUc$8-t~Q&? z5%$H)L{BB%yOXCg8DF6@+Im0_I6tjM)#FyGljX2Egy+)nVIVXuc`P8yK|qg`lZ^PK z&O?8}mGW*sh%?{xcHV5;TX>VIyu}Q+xwHB zVxC%R_n6$5PX3H8C%hZZ^GPV8wYpwSH~rK0sw~Gv$vemLlH8k3GMsC}Bg?a9GFV;} ztab9VETic9+J*a2wTezCrs-~s&z;C^tbTtQO=xtC#6{&|kcMY#wtrnw$~Iw%oa6_jul=2N+a1?$_7)MDnPQJE$|WjpH4;&{_-M9j6 zV?Hb6=wNysJxaVZ8s-B$II6SRHE&qxjD~X`&oKcONU~;1>6aNQB%SpMw?KUz>fezBkPzy{*=VS2iCFd&RbyE@JD}+_TAF+ZGXsvjbFO7PUFRCP70x0rBKxwt*r74KN9$!j^+v`fhFRt_MBkv2c+VKCUR9D=T}gmnQ4(RwyAV z%iP|*ncttLRfOthl&|l4L%YVJx{B%04p!LFCT4baGYln% zyv=qJ>cg|;y9*YM>~&sV9^uOEzK-Z@jtzOLE$a?b;!~#IW;w5l=->qNY0vh&tn96I zxP{KSzjU`dgcfS*l^h6alcV4L`Hb?m~kKFjS3A1#xjZHT7 zI`;rQK*GQ5Fh9`Kw4dk}_m??K`G?$D2Ugda zjgv>7z3@I=D&AleA=UQAr+)B%vvuA&+rvP1z880f3@E2!3+0@DvpQwzRnFn+7vJx& z2JBW6$r%UbO-BU_GF*nOv7r!Au6AY+wuMln$aaOaU`VQeNIUz`CF0SUt)we0I>9{ZjE4F^#{%!;T*P+SpbJ7L3$jgR-YL|` zy26(G)zLw*6_+Ay^aUP}mam8&8jM3N&CpMlnHbpP9AuiA8KzHZ#a(#0ZU7oyfO3%E z`KI($Id5Q<0vL@XG~9_rsCLA73=Z`SG@b=)vEyWK`fb~PwM-77#!K}kp=Z>h?|)s? z)&<5P%)?ge(&}&Ca{J+K>9VM#_xPujFmah0rMuvQxrOQicO&$GLSC=s5ZC7Z z24MNd&GGV9K1;%%4L5Jl{o>L;QhtA;>mL5JG339F{hp|oZ_19xR*_`*5G@!sr7+d} zDiqEsel381q!Z}1D@{_AkYcuMP=VDr+Rf{*H37kaOLl?{2*7Z@}$Ww|+O znLt9}60y&TNN(+}UUj{JxOM_xC<_;^@v838?;9WL_T^VXHU4(@VpK@-;{2M*VrtU|fu)GM?h9T8=n>L`K@Ju%Y!HO)F8B(`abP~+T1l1(UyN4T4W&7a!o`f;y_kAq;uJ>s}6Momh^ ziQ@L8NR=K)%`Dp2-j!BN-M*?+mMowpwb|xQ*VUfFCCDgFr{+pLWgEt&+{-RMW|!vXP2` zi_)@UlhN%f+CptL-X7o}*)D4wn)1z4rhWVjFSkVVCH=v$)BaJy*M~*az1}NlVuN+m zff9$^#X?>}@(otxUc!i>tV5t|ilxAUe0kvBsh~w4Lg3v2$=mo^U|489(g4P_%0NHH zEMdgQkQw#sR`a^N_IBqTHts-w`;qjF_Ci}C`dHbthkEIeI>vPZ)!~cKRT|GojJm3l z6)Cyx55Ita0PcD7#{EX8wx^%R<+Ult6gaFORrch&x+vvtvDRkh9)uUD8QQq%`RPJ1 z09`|5=Fp;jW-Jm9aVRygN4rE(*s7;*O!QK4`_Xd6iV2}0s(Tzoa%yUaP=5HGh zKeN|1DuSV@2tzs0Yo-_OF7@}DO~Mfe?|ov@9y(;x*Sa6hKIDqDcYNy6K%~s0%;t}k zEOrZ)r5RxZ-jX5u&m53?h(n(NlP2-Z;!(^0$wEc;;N~tJQ#IX`fRXwNe{Nf#086C3 zVau?Z*E!f*ajAAt*{lJ7$|=<0Y#D5*8LDbgOWf6ocyz2n{qhzwjDi557?N{jg zCcBq@Ez*N<&j}8!ub06-lfomO0BA4ZPQ`wj%_MVk@nIUEqflLc>8hbE%sW7rk)A>< zb8$G>!nvmYXmbuTwXe62G$<3Vr^V{>K@`2EAK9D5C9bJu>nHfehmw%39HPZY-JDDY zaYPChb=Vlf0#KIEPo6I~^AjNFIe#@M?-ifE_BS=Do!lnN#u{j;yGvob77B36Ho4Xe zrLtO`e)biX^%H`B?dauKC{NYE`MthE`iNH9A>M8&sF3JBu;xFjMmkqG+e?9ChT=4D zmqP&GvBxNKj_+?Vo0M3v=Ybr+2Fep6%Yd4QcR(^=v~+AcG(@}c(Kh0PEiPGZlw17g z$*VvV1BJAwvkF;RT_%BF6U2IROUyLHMw81PW?Jn*ko} zTuC?I0AL-pnL!T%K7K*2a?q&@?C4^cyml@)oVY4(&i$;YOoj%qBS*uC_SxP?7DCH# zKxG36KbwHeMs|1qRdCIJc=DtpBoI{T@;1~*;_N~bBBNhDXorT)xOQ~u36?7(M?y)! z=yV^}O_u?G`$>8bJRa8zC-K(-{1QCBAnM~1M9t|9@p3x`_&qZ#O_Z}J)4z3w>{ILS zK842$PJSbF1(n38-xoEfZ2Tb?Ni$ilb6=s&jep&1cZb6<#eG_o7xvBU+J|UC!9q0t z3|U_AR@2FRD^18wm;sfLUIv(x+;^x>h|-Q8)MnRzYcyYcA(P+p6W!)RkF?Z?lqMUv z&BzBIT{?BX%Q>p56)b?fI8;C44T6eQgw}ZM?vaz>w5lfc0kN`j5{pSyrU9Ro;LgiK zW*z;V1&qKtBNa*msfrr^3h(+4~=-UMPN3$=?pkq7If5cB`;kQKkR`tgdF*3I!B?q7sET z2smdE=n5Hd%2+bt!Kn$4xn+4#IXf`>StFxM;*H_=70w{`H)T}uy4+u;Jtb& zgnpva_&cO_HDdF2j}}1B!v%1B*56n7iFJbR9!2}0zZV3E0W@UtP?^Jy5xvz^v&f(q=j^{JfeZ?sMdFHPy7M!1i^JFRtS^4!M#I& zCDKBik^#+^K;0!R2y50x)Mx8QD<8{SNct_^-_Nn5beLxnC)sd(6Xo$Me~OTq}UM4M4%GG zdDcUA2b|E3?Tqb&R=26|Nn!=A*3XcC2m*!D8L?Jfp?`oIH7TzA-OJgojs`@+<;H}Mb=~wlF{KzkwgbKax3_4xgT;5*@5p^dMqi`Sj0g|nuaN{+u{p3ea>iOYK)1eaB@5i{B%_)wHl7~_YZ9Sl`L~``7yJF4<0mg4yCp@~Q zh9BpV4(&{>gCk-Rnm-et`BV3-K`&AW5fP0j` zj=V5UeccFpw+dR9RAKu#GU=p06S!+ASyh#{u~a{x6{RKf2A)x1=Z4Q+-}B(A&&X*>q_XUT~P1D<34MIRdl`l?n|e`gbaYFVanN9LSb z?oP94vbWJ!TFBGywM)xse+W$IYb;cf@pr~@zoZsjM=&iu9LVDCG_whe8s8%lN)XeB z>Vyy+l-gohCxmym@Jh3H6fYzZ400l{q-bdR#o*ARIRjF0B4j@Ye!{ol&>^LybZJ3? zY13&ar4JUs)S?d^TEc*T6j&eq@`DN$nhzJavnjJ&q3b*0`hcyYr>Gj?U9q+k>+#}I zXo5}dONU9mhz`K5ks>3CuY1k=yfDI{wR7H{U>zU1 z8FkFHUA}$&Y8l&IQo4pCCw|ZeV!hhx!t*wOhK`0@9lem9>Q>rOlMJ8udemIbt6a`nK zPut0lsdF|V*APvh3Os!UM<7E{`aW>&%RHjqTj3i^+U~_R2LZfv6rU{ew+=x}w=oU! z!D-AM1Jog8vF}eo+qTA8l~)Hh7sI9ZOnXoA39}%;pBqWk^+qjR#-D^pDXdP>b=<3{+r~dFGji-4@L~QF+-~Z|xQnPtxNw2YPL zCE2c3?p!(-Mokn+24X$MCr)A&Za zbaKkbIE_Ivh+(-ob-61mO;rzXw54C|SU!%gr||o&r}+XkFV(IEXSJRz*G8y`jd!YY z7Y}?6V4<+L!}Qm1{+8c(!bOpyL$0lnsL&0&WBWJ7f=DmF+6|ZRUXnymf9ioesODvV zgUdFkJ-krto|-hN7suYqXHLLc;g`pcVlQ!=C<|~e3M^l+`xA$tzq@UoY$(@cx5Ewv z#bJu4mZtryZl=6d_KtE$cjCh!jfQr9q5@tv$7ZIm&mP_D7&88@{rH)68=QMBW8GA z_Fj@y*Pwst70qBs6ecqmO>tE*nE3g6NWwQ{js)~x-Y71;OP%j&*KcO;rRd8UJ%T_T z5U-big;TebU5&cpt9vcXPzy*(h^3No^z5Z2iVMJV}9)#=K`{me2IhUU#Z&hJD3o$M#{&qU_tT)FNuK<&t^P=g`8@ z8GNPbZFn``>}^XwoOc@*q6mpP&lW&_S6kujF9!sxW8yf#+qDqsXw>|F!RE}=QWC?J zMtn$+U)NVz!TdIqOZqV$vWw@Ci+fP4Cn&P^&g4Ky+LKxXquaF8g(h=~39Kk|JebWj0PzNj6 zl{3I^+k@IBPjdbSEQQy9P;$CRciCYI2QXXSZ$W#kX_XfjvK%{Xx|QfMDvCb@4xs17 z$M_}e&A#@U`usq~Ywt#Zdauu97t#;pgb5{Fj&G+ReQ!jQqq5P>I?W7z*{gW2w(0L^ z->6PEz9-fR=;7Q|{f=-Icqg+hyp}B6`5B<0c!XoCrE6hx3|g^&CAdL_%ctJGVp5LR z8mE?tDE*Iidn1FaLN;@})$S;Wc#AtPrL-U~Z_ShvVESqmzM1Td9xcGnVB~V~>>RK0 zMC${f|OgtOOFM+j{&6fsM!Ihd5Nau=oqC` z7QWVi^xBq3@|>1`nyyXRv!wjhenL?lNUig(ZZQf$OJZPa4VP7sO*0z#=XX!AHuOMD zT0_BmORFo+#zh_J4W4`NtIa64fi01b>6B<%Tn^yFaG9KJu{#ik{3?JJ^*U?E3;^w> zALrzq0qy{6w*_Gqd3+Z7EDkupNktt;;3V;VXyCiBU4T+ay3+9zq**J$cLmyXSf4g?%o-vCj(UQhw26PYqRUo>X@Okm zjN(%OA8IszyGJOL#vGg@+E4ERTz^{2rEJfB^kNdXdc)`$W1l1-WK^tI9VPLNs2JFH zo9jp0$}hef9dx6OJFmM&LFAmrcw#D4B?{h|-iBbLE#u{0dv#l9=9HJ&?(7_wU)h*A zfH7DUC;y?QI~{=F;$KNq;;^I5%t+i+zheUYjMV=PJN)ZfcMjAPpR4U}$CoT=y8%^~bnFg1e`lE=7QgQ2V7H{VR%+QR#4;0hu?A_`sjefVtWzrfb_+oEMG(%$QuBMz(6Rv%MFl$S*t9?)7OD*ylgvzQvjj(6zMTelBo5-vvSq zSD<=eIiJInz2X83=nz{zECimvgJTLIOySgaCY#ReMF0Sptz|-jOt~oU zeQ69t&&#rDe<@bF=x7;2>|Y!RM?pd`9J2AZ7tM)Tm47uGAFYeRoF1>)(9-Ty>$6Z4 zaJ(zIOrKP$*70LH4}s6QY@0l$BXxMx`K=>3k)>(UQwF;;0_v!k4;L+D_;kx{j2u^1 zW$Gq}jBt2}jS|zP7vox3WwtDlL6`6|NsGGhLBu=3f5~>3Ld1ukm zGd6sK;#lsLQF~J=FJ*eNRAn_nZ-%dTy>pmH$mE7+$*$~icP^A9mq|wNX+*+k+#Qut z8x=-0pJDl)f0!-(X?$#47)I5%g%Ic)2K=invwB)vkw_jr!r+r=cg zXXqnie-?}&Z+c!q#C?;09W*PgN~UihrI)HgDl^Z?5-7IjtcFp_N|~gBYRy`P_QJrP zGs_ps-1h>;l*S z(h8$e_3xQb(GO0tvUFRWC)QDn`MYwdhw~Z!e~GYlK)uw$cGQlo*i0!z#_ z7x^d171s;3Pw%8aK(rD$hwMKX{=zVF(@EJR&V1Y#jbKk}ho>CI)wiegjAP$s*f6ZsyI|UwJh%7djP0TBjm_wS9fO|p~%|^)>i#>Ky%D~_I1P5pE zi3mBVjxcExm-%t~enRh>z$i#O1 zplkdElZl^hXg_ie0(5b}yg6x>lXAL-e^`nuR6gZCK&_2K-NC)z10mAq*!9+u>%qn4LcEc>jp1&quE$ziPLOe>2JHme@d>y zF}cp-bZcvD3+-yCn@u9|p?uqA}wlxVf&r7DMWgfNk zBMP!bEudwBZuYkU zu{P*=aTKk}#3{>zp3X|hbWC?PQ`0f_6^BTR?=a1isc%Ofp<+zypK_S`{_|mdLJZ(4 zw%*&_X6V0&-QPad$^E$kc+KQACj%A0YDG%NeQC6gMNx0&@lqWHJ9*>pe>LuE-ym}D zpWsnS;s0DEB8R`&{}ZD1jf4No4cJk>@cteW7miPy%jNsxICs>MYBXn6@abX#eN?I zCCLOUN1YGEB|84Nt6ASef3v|2ah|l6?TLKArrxYJ(KNN$OXe|j=`La(2bf;y5LurD zs&AFoYvwg;LQQl92Rh^p@7O-+J>%ZD@llbbSc4Vj7HbjH8JrRyeze^WGjas>i~WQS5~|P4`d0Fs4qA?7K=NLKf56Fk-PqKge$AWt z0^uWc*y#~E=?9oe71aU0^`&DVjiePJP-ikE%YgPEr}T%nKRKe}SC>d)jBa7$Jco)$Njs4!*gHCdJ{Pn9c5WsI;tIT7UHD$0M6@ z1oH!uDU$KP{!;Bp8niFVzP66*4UVfVGGK*DNI_?q|M-vp63PFQum89I{y+X-QTzX` zX?Op(rkyXE|D$P#@BcrT{lC=gcJNYv1_Rqa%BA?nWMCUNWw+E0F^}ZW#f!W9e~|A# zke1^Bx`@dMnnHi529x`H_&*1W8f}u|Wh|uf`1>6B>uLLYj~Piv(zR5!1^O?(gw{=oY@fhxTDjc{AZe|_}E|8imY3zF4uBF{(hNF9bTrN7CP{@M%U==(L5 z-Z^UiO&#(tpMPdk5*wqe@u2LR{>>k6{ZWyaf1&8=a5tJ%nVWguQ6ZaEXu{isTKc)} z=}Ip6D4p#hcd2TBJahFW-Ozh)lpCWVeYW^5bBmcxPJL@38Crom(5K%nD8cP*iTQ?x zS82PXUjg9nLtr)Z!xDcm07UImb_iV9%csT)$^AMNqgHG0Ouy`0E&rZETg3-Bi=pB{ z`rG<~#p;{EU$H>#w|I%16@P1K#=06I(zP%QIJ3r)?3aPN!jk7X9vrM?pN2vZsi1JlME1Sl z>#i??dV0{^9jCR~i*GfJkFlK{3FwnpI5IsGv;^02YTaKfB5jO@-IgH>LopRggmDuL>IH!tHk%S>1#Dibkap$JNmme5P#7I)>bZP__z%U2#~rl=i#U~y`q8X0;Q9Q_dj-y<$bVs@ZAE7Jh? zVLP|pJq`6B7SMn7n1j4_5!wi3ND-9}(Ud3&uPGTuHwQDw)%8{Q0i8|ZX=yle3@ zWRvqKj9|IG=Z<@G2YSRfbYF)ltAeqV(hOL+4m3_6!{L|<kRN2qlb0fDZ1C8i^)GC`_bx{fcoUBKM2INt&rb#SJ^)T>zZ@jAM712x zn_N#%89^qXLU>LMd!X_R$>iz}Fc>y}(`{39q5B%LVKlDMQ3G9%*TT|^mz|^}5G!n| zGz(@@%F+jEqx_6t?hl(q~u z101dns=m*okF(Az-`uOKecQ!sNdTQ|LMAD?@F)_YD|_#ZVaf48%Oruhxg?H%GQGr! zac$C23@j>7ydf=PTDRk=xpfZ{O{I<9id~!5hid?U!M6nW+nP9F6#M@68^qnKCz=cFoOl zO~(;~Ez5w+cqS7=N&(;}IJtpgb)}G*wwcr!9Jes5J%MPkLzCHk>OPVflz-x`=OZUZhfr0QeBkdwzi+6f|`bAUPMq@nL z;Vd2OG?>m)4*`)gmwc58e*Nsc_8F0;@nfC6U;0o05`y3?Z^|Fa1(0ioIwk|d>Xy|J zET=yRcZq(q1K$TcqN4~=JY=8qJ6$XXqq}t|+SeAtJ%ZR4_X(nm74`v{A)AtNGXcvY zSiLeY&B*|do>0zs(_}G!U5+Y{+dPzV17A-|?mDD}kt|!tFz7Kh4Q=&di<};6vacEj)pPaw5(w;(2fh3waHSEu##i1kkXXepBj#RDx*aBbI7$;KxUXdo z0q9*G(fJ+67~^>OeY#uX5QwK;Y$wk>A+)0~>{iVA-^UKi3BvyeLs){-)4+g25E1!3iRSOgj?or z!A3v0?d1ql*!P)JD&K#t4fL0V4z8vb0hh>X)NWgZQTOo8a_Bk^Q+Fjr z31+UTk`D#OYsS%2yd+k1`!z_(dZtH$YoLC*J32p7XGr^h));c4^_w3k0P!K87YPKU z)~8k0vW-kWr4c$0?COZ*8y^fFNk6{F@S_^B_rm3+;A)y6t>ApR?@Vk}>(}sYx;N8^ zhn1Qrypwrhylt`q;$u)tX*tIYJyj(!EYoGqENuRmDnKA$EOZBQa$KkPotDcqxH0sl z1OzCO&*MXXZN}ilk&u?~95yTMlR*qqdG%uIMV;X7Uov*5AXk+(YAa?W(1W|w${+it`Zu5s3e-!$ih zn!KIc>`ZmBVsJ$(;P$fRD}s9xbNfQ4c$LK z%~IAkpza*(%nI&bkbyod%0mg=*AH-iRfV=*z>EZG$hZvk>ElzFK#|Grl}??J-#}&k z%L$f5pV%3?ZY1G>&g%Ws;DpCX$|RhFqB*+DN1@X!=1%P0??=tQ*5xELBF*ze2z_ry zrMG^6nbB=4cVhyo+}$ghrFDg@)Dbf#!bPJTzI@-Z?06~o9qH7^s%Bb>_j;M(mcL6& zSN$+}5nKRAqS3p>NuR9wJ)k35fh0zuo8ilTz(Z3uP6^=aAbhL!=71O8lpNfw=AIWe z90?9SQ`{YFmgS*^Ezz7e)poELECVsEkv+qID-<4jGy)v&B&&2qD4tXTk}9f1X4HojC?7*@L+b^=T;1x`_%y3LS&*hQ%ap z9*#I%Z|-VT=S&niFioA`J^Nih=6$RWr)2DqO5X$i3-Z&Z7NoZi-u#7<$-7SQ8#e=g z!9HMUN-WdxC#5&`ot^ohPF+imP4=BT&8E2C_FK5|(l+6kLzlw{M`V#tf_knGUIMyl zA+9J<)&jN`kG^L10y)NS7l_ExD!1SMj#U9xMH%x~osEv8y@|)L7mp?-(J{Fb`~-`< zD0HxYf5)mVDbh2GrBqOk4XsPKpjFv_1zH9PvRmkrLblKn#BB$%a>+WS$uDq!u6G67 zROvCebks`~CqE9k;An0PcX18D7x;Q%&i;UmMf{|^tjNt^d}4?g>LD=V8m@c!m8!D! zu+1orxvXd%VPtxLcZ%p| zPe}YUFhDDnmi`I0{S6@v=(4Hm>gv*qH%a0OH2B?eJDc6*o&eGY7m8jiqgXJBqOS zoLM;i44dJ##Sfs^H%nOcj&MbPE|=;rrktw2>-ktqKtfmxM2D}rVioTYyJRya%&B^B zLF=(Lv8K)A$Nchi&|QB78s+wgT^p09tATXXNdzMgP3{O>X3<4Dy-w)_8I&+_THh(& z(NF)rg)a!hJZrr_?_1Cc*l(2EsDT>veG-;*HxQfpg%ALk@@g0SMW(WUlB~%r%*u%N zxIx{X@$jSdiuF#f8K0_)W}Zo5-zY=PbNM99IlV7tK3M3Ef1+ZO+@QQW;~JO0k$c~yD|2q-n8c=>T5Zh`YRsC z>W0G&JTKZw$MAAo>T`M9(Eav)o8r1Wr335K0EM+gyRjoi#=37Mhv!O)DZQt3`{Xn+M`(kTXF8KW0P^@21tIm`N62t0>&oLI z3+!&6_H?uH<*!3zi_l4Zw(xHhV@zsm9BR8}pZ)o_0^^kMY+L=PgSW;aRL&}WhY*6q zcr{tpnALdS)GF3C<3^f%#k314})1H$iB=e$Mmxe)qr^W$skq9GA1~w$menOE% z?Rcph5ocGk>;xN*50~Qyk$V>br4xz1zsvsOoT6BN3n+9n&`_?NL_7&N+p{$!FB5Ql zrvy?f{tDd?(|gbF=3^D_)BVUvL;1g`bhc^2DF3M(K@&YSR-wA z0RYCrA$Nvji*8xSlvSy3KE%Hcw@ZsAD?wlGNI~|NJv>YJowY@m6B3u6H%)vX0a{>U zWk;Ra4xh@{kB>ZcQYU0*j`CVR1`KQNJcA^ltFCa@9>F6i$wF8SRyI`OVr<(AMU^{$ zfzcF+-B|T?E;d8B5my>PS(?LeuDVqHnj4BdecPr=`K-WOqi&U^(KQfM;!q-OfCC?h zNC&!nZ3kT)>8&cu{s7X$pWr!9n2l{nUXA$L&aiyEQ*S>XMqzljs|};N3ySq;*9QSc zhDT&JKl4W8B>ae#!&ux8GL^9qhmT^U>hPgo_GV42)roI zmvGBJB|?t~w=u(e#_&t&FP)mXO!KNp63uGd62QC#*qn?Xthu^I-#~3Qr-gohDjMls z-A13}PKW1krNY&Gr^ibx%dwf&{0b9uh4k244>wy&=P*Il-+0`B8QUscNEHW7G3!sk zI>l6xc?Guqt~ZOtF&D6}?N$o|M5AXOQXI;$Yzr5+@fGK$T+U|8Uop{`u9~+9`+5lf z8c<8;>B#^us|m=5&u+})G54x}|H(^D;l1k{!fGM_Qgh=!d`toKIR;sT$L= zx5}O($vWL5xM`4;2x)y6al9J6go~mry$RiX)B_01Bt8bOUBvO{F!M1PPxH78BU`l& zP5V%HVm@r}IWCO&%O=?HLdjvTNCGRi5`9Jt;r*?G51xWARAhFHvL>p3ol66oB($mp z(5R~nzfScf%0F%`4pwmeqd`$HpHhBT|BSyQRFZ09z3#|u2!aK`A+!)!R$JEc|0L|Y znjKY|Wq*(!Ksdo;H{N^uCX8^x2oJyhhBUMHcg{W6tcZzOT9PQJuFlMAiEI{qhwRcV zh8sL5O{_m=G9R~mE%dd2$jxHgxFVX7>h3o(C-ej(Rz^3@rsKrXQW!~b8F`2&OK(vA zB+R@lHB;_mcO)fa{w9XT$(Hfen_cvw_T2H_%331w;hR-i^GwS1<7@Ebp%f9)>AOa9 z^`-a_C+ae{C%u9T%e1pnyAAXFcQnZ*{rptp=Mn3+7krjXv+|gKG*QB=T?#KuXX)sK z-%m@O7cv|SC8XM1-NdGiIQ_L3Ggzn3;W7+fUg1^9L||LUO^Ubv3AlI~?AMSEMu#uP znwEKM`ab#ICiGyuC4RrDXI>OuPn6b;-UKyqb2P2VFAP5V<(yOIJva};%1IBGmLz!h z6n7=kvuALQ1*>U)c(fcf&11j!XRbPbYMNFftfYeWL0%*X$$G zG_k;xYuQ8>2le|+qv1W`I}W7$Irdpd*!knJqCR26VabAj+N!VAaFt<9QIouRg4Nyq zaxy>{_k6W`Qz*m1n_K3jG=fdW#j?uV)p1GHz?*6pM+Z<3ioF}>-8&q^BICh^WX;K9 zM&qkJSb|p*y1FSF@0OrfL;G&w=imTP0S?IUB`3b#T{9T4SNDkmqK+PeOnOfvb??ke ztmuhvN_km-qg!UCL?MoaR?Mhl>6I22;q@q-1b`kvTtYRVG2{?GT<^S61-WR2vJau^ zH#aRijy@Y7;XdMM4ZJ%#5Yg?RG<2^0e*Yvf#$Kde>gvghlYEp9V%+Pk4z%Pm@NtCH z?#QrLG4s*036y3m;d?zkFB31n4mLUQC}6aBv6#|vL7Hlk=H)k!BsbnYxwdFG3Wj>>sm7_|Q*%(6MGLY!3u|yMQoHd(A!NR1 zxeS%9s)gI!UeF$&N1lily*AzKw?G}&nI%M~5|#>W_JJ4b12ZrB_GXZtDR_oK$$9k) z-(PD8>?sH2bdM5Q zR`ZvdmILy6_y*1Dq^vR>PUZ4-(HpBH$ek#xRPs?poQ)d28p^TVhC+^(POLqZo!P&C zMv|svT6$swBZfmkeqv_k^+fFCRHmb;-wrEXl<_i1{Iy-{Yr!4`0xTr3KQ{x9?*WJe z-SHLD+Vt&h#?W1?FT#4`$=>r%P-uZ(V#h_iR}QqPDdbFtJ1%9B+~(L*vK@D(mYHIQ zi+yasoEE|e=W%@4SjCmt1n}`8>CByfy4ZM^$w92bksRM{Mlpb!6*sA=xhX_rOzcp7 zRF^rLma2=n$+5kmGYrWW28kuk$wjU1_6S|>`0WNV{?$AMpMbhPKAnn2Xkj9Z=mx1P z?(Y@9>L^)Qy#?F zR<6Hi9*Tt|Jo$Q+3Y{&%c*}3M@x|HK@Dx6jHw|1S^}Je$g@V(U-}4FJa#eV_ufEu0 z(5kQL#!xyaK)DX?^C1?4!p}qyCGAxMn(kxlpQ-!foG9Sq;3K9{qJ26yJfKI@}a4n!WS zt7nF6#Tuh6n(OH~;cgJA_cWia{@Cq=)CFgJ2t+0|oEwntgH#VXjNCr3X_}tlL!SsF zj`#&dDy4>O$R^SoDdvZjg6>d~a6c%gvga4;cqn)0?(*aDR2$}hvjoxmeRfiUcUDK` zaPSk!yfa*tP=RFXAWw7(ZSp1x?oOw@!0a@0vyf+=&mnFk`TP+#LdclYDm zB3TgnxY^nFrR=2tbk^Ky8^G@MWkE2#*^3))tZwcVOWaA&^jxT)(Of=|_p$(wb6y^n z5hO&CRq`b3_1Ry4+x;~r?Ii@T&{DcZ2j9*!4zI_O5tc`0WJ~QWu7HlSy80_xbybg5 z1{G%_#|ZNy=sq8dbhLvDc(&B;qU`cbs2b!-xe)NA-wdSo_z>Hy3dH_9>p-s4t{jZ%^ddgVM33G=0L;IBMLdMWVNijo^HZ z6ME-~Yq!MFcxvjH@zFIyiGoJ)qxlg;hV{7u?IpJOr!vR;cpM8pP=!<}fxS4IBcD1^ zaAOtLPmW}NiXzE?pHzWE0os;)XIAkDVQL$jCTe1amsx}Bq|vQzOK0Y3)(f~-NZ9>N zo_k(maH<f_igQ_O#?76_TArM7 zWZ{>|tG1}h^LWfQ>pai}1M{&iQ4z}y0?zP8sf#PS@t>p9hvu_!!#dyO`i36jda^;w zsa(5SnZ;L^naoRn6_qum%x!v>Ceu##N8*Oqjp=YxpqVPmmM5LAOv=+Oj-bRIX@%t( z7bNF@xV^^dx*lssiOT1(q(MNCH?rz5WfIZDP1`nn`sYV|oo>DY?9&9#cqVSiYe~|L znXvZ#DvnQHz3xR1K?MqnsXq&yzDtSkH_~2AG)Xfi$?yGO$E+2TRpb<@cEI(*o(AA~ zZdllUvixKzm_SRX8q;X&l_~d`>Jr3=dOg#BUr0()QnjqvRws`r2_k}cQ}mfU#}nU+ zjI6Hd!9CuuT#j6AG49A2brXJ~Su^`vk8`TAAOpj#kvfF-i|P7%`ALg?4P|0(hGW4_ zx|txBS}*6t*U0_Dh=2|%B9pM!!C&-Mmcr-2>WM^bHRpZ12?_vO-4C;+bGf?=aBwn-1 z2jBgJku@aukXjxEZ@t6XWg3gelP z7hRiYcXN^sXfl=OP<;uH^$|x*AC)o73OR4~z3sA>t|6f!kT>b6jjF$Z78dV+4;xH* zPenIfX|dxF@lsL?3B326Gg~4<5>cPTDtV;CJ04y&%IU zqzG{RS(X}RyV>Fz^`@0$V?B|N$mO1Abh}M=PE#pJ30=lo`i-N8n^N0GO-F!t(BfGI z2>G?$pR=a$A~BOLvb#3f=a%V<@bfq@B1>*w%ThSohKxPp(;4inuAno2!=DmA-92@< z9I|?2Cw|Qm7lwB~nsk(|3`lv8Y=f7lGiep>Fqedk6M{)2E?~w;rGZCT2z_zQh?gh6;MDe!5mdPj&v^R`qEm!2 zZE4+G@o0fr$NSX>Wf|&v0tisUCTEgbqUoGj=6YKc&KZ1y{g9|G4cGWvd9unJ+A zb}hlF*1JU6*X7g=HSXeVF1doM-5MJrU}Flu5YnJut0CRwwyI0apAROF>2)AF<0CL0 zu@b~1?}w$Zv1HE>15h8ey66eaCa6UFu{;{G$7BOW7bmq2Y8@W~*S$;y22!rC0l71^ zAm1fbO-LGll_Rgfugnp4<`{;vHLjL6Zid>}u9(eUFI9X^o!orXkHptZO)JS#F6;xL z)UlLhH4OAicSYScE&aM?xnsqY?#`E!D{c!YR$9yG7 zpVuX(rr})V8?-ONHKrX3Iu7d!-zhXFluV-ws)rJt1L~b@!vf;VR#*jJ*w;jD5!j0Z9GGutb~(wJcXUpGh3Q`MXy{bT{D% z7JK~*Cyez@h9UpSaI^^#$X=C>BkXQm)o?>^3=SVU+Ad*uAOZ>7l?;_dlhjeU4KwPt%4aa+f? z{JLStMMTIWMXYL@W9#`K+9*EoTZPGa{+X@wWC)fus-i)8&HfugmMcJZkRM=u{kGcj}nvJNw*US@i-XhC+mpg^7MOlvG?E zcUs(mfA?V?us261h1*jz`niifw;he^w;Wu0?Q(o5*!A}#v+FU(so32Ba@vEDlEW@$ zG8#K02Be2a(-omsa1>#qh*O}xN!i!5ED2w?#`O+~Y9l5BT+S;th2o?jrG+Qd(#E`& zG;KmDxsKvZ&>>sT&*N2L^|n&&k?W8AFxaTje=GM6F(g}}N*WqHktkYtq=tFXB(-=6 zV$Uy8+k&LqUP(lf()4cf6|WWzsH=)Tqg$Don~F}2mr+wfeokrobkk#f_Vj)`UQ%yS zk8PnTV1yr?YZ9!=N)2eL5fc^oyehA8oD#P7WQ&ZzD!bIQBK-&{LUNx3UKP>}d#DU} ze-qlw_sxyrVv(>mHF_Pfq1+wix|4Um(D5yUsAGlJhzQL1M%qtIo_t!Yysn*5in{eB z9`PMcVeaQj@F$>nM>vg zDpdni{QdG>DY9R~fs&adn^TjDc)s?Rf5s}D1t=;Pq(;OI?3k>#*RYQ~U9^wxt54;0 z=U9~xNzSMFB~|rH#1dK>r?`|?xsaQT#;v8u7SdYpJ;}P@b&>K77gFVIG>m5WNNn7B zEsD29%u;gMKZsg4{rkI{j*M=GMiLmgfpbpZsX&;D+nk%VPZk^BM+1qw+li2Ne{ZML zk_%ve1C+F?cfaG2_YPlYR)<_o&XfPnWBP)gyu2R%bJF`Hs?N#lg&xxh(PH)SJOcxs z_+<=K9_Q^-z@ML57P*os)Muy;;fQ6*SdP?(=gCxVk{&5a!d73}Q+)bfl)ckBtb_XX z;w5!vK+gDh&JN}0)(YEgTpbgwe}lCX-nUGo7y*XU@fIKJkP3|`cZB&g@{VAm&x(7H zNO0U1Lf=&brs5(;An!C;v=mG#Nx$bUl4(LEs!j^+Y#&tX9u~ZKe;DOq(ji;1y2`1h zrt1N=R~12vLX1lf72NYz@4LW3XklRE7IZgGZ8u}iFQ0Wam^mfO%s3*De@9D*<&yI3 z@|NXHDYclUK-~?S9$|%G0&88lSZ;`7InIH;Wqt6zfM(y=ir^@B@N!Z~Q#C&u(n>uw ziq~4b_2#0uO={@GehNm7PNASSnu0xX|14jI$Fwx(-qlBZQ*IrYL>9{2{oQux7P_im z!ve#?OuGy@E3)qNopOW5kDho}l_iafSMD!+sK)^#;IrShvh2QX0F!6ux62o1^P`Bhd z^?DIFzrAWSN{WXtf6y0B7~Zj!#PdF+c#Xf3P1oLu)+p0_b5?G>$I&Z3bAKxMw{1L| z*tz?h@;F1x2wtD zJ>cE%|EA6Iz@g!6D*{+&POxxsIAX9 z0TiM#K%Mw0yzQ{4Qcu|TC_D;Tco5N6y+#oZvEfv@D9v$S)re>eFc*5b!HQuPFYk9ZYkd&Wsd?&k(J( zsKCY5e^MSWtK;(o6t zCZ%KouMnXzmrYI|M&w~4TPYKNyC&u%M%j)JN%~RDm$D`rHedNp^KU6j-?93#0x_RH zY8uWaNHgyQ`IZ<~iVf8=GhCK)bOjh+wkSM71l2wNR4q(^@5#1qybw(MFmubo(e8(dE#LLL-I%n@U zKItzI>y7-qrAw^J7%%62lUj|~i*|j+yUBs?1Upsa#ACUtiy7HnLWF~NGsS21;20en zpe(>P1}ui!$T!~m&}>u{fdd%?-TPCde?l=?_2TB5ynnOPeCy0*!=N{T7mvr4#na!) zLA(E;~jBl{8_%aJ^m(|Ih{lFWFW%&4lpe|B=m zMkX9{c0%15xd$?GcL9S&NKsg0O3S7IC4*ZMAcImJ&DMql_Bp~kKBZq9_|No?-j3Uq zN-tAaDAkLVvvh$P3x2=R^)j+i|Jt(8MMFjmxG_hTfhAeDK)N*WSWce-xf_rjY);r# zS^y$*s|{%}?#&ZPF*AmS%eL%@1!h@poKvsj=N5ThNOF8n@**7tJF&~Se<~#?C&SEmkP#(BNVhBFweWsb~#pt0)z5DS9Tf}+Z z)S7oi*>EZGlPbe48k|l9E_B1InnkL@EWz1Cp|6p&6CaoK!Iy2QkYXwdt z={VV{Tb21qP3+HjD97MZ(g_+IhEqi1$B)vbIdz8_hD1tI$=epl*+bBY!Nkl{43@{I zSaLQjTan_|*tDLSJMiY2W)TD+$R6$lRHEbvDC`=*W5r;U54;C`e=In|MPf%&CcUem z#b`>ipqt^yz6Lp}k4EKkC0Jt|0-k3IlU1$o?8?TygwyrP+sIbXcQT=Q4nArYPMk=% zD$nB-vSQU&x{`KTb-NEcEga610H&I}=v_ny@Tg3a!Jb@EFx|qU?(l|j%?Vv3vd*2A zfMW6@YI>gmFL2U*e~L8E;Zo~MG+#bRZ?G_@H8GR>eMV4^2zkP@#spFNeNOiBbrMf@jGR(B-)eE`ycTxT z`t;j5<1dri_BO^WiX(1835`{CQ+@n+h{z)7Y_tr|P?}GQ?m=1-M-^^7iuae%mxuR8 ziL+2~tWcx62sSdSpOlqfL@zdJkWE5y*9aL|mbi^9e?_M(H56!DVpw{V2;UsXi&ldV zE>ve`5>#+GXw6*x#}DgZ$V5XBBW z1f!J9Vh>kXBswv$ZTf5)gZ#r>k-<7{vD!WE^zkVxB!|8Ys+ZbEnfaD*pGj&GD3zLLS$!s6Do4T09|D zFFTU3(CpCD<9mH!2xuoqM>p5c&B{?9&>tCWe`+60KG#WMn0TJHZE?$e?_E^IifXqq z(!=b72v2lm%X^<4aC_pHf`jT7mTi1K@7{oasMowwaYz;OV+fA`4Tj3zu)uC{fqgvn zJQ8*ES!#Xwu9{?j*D_s=mr33|@g;j4I%6eGKsA(Au{YOZqdIr@wR}vtm%eC*g?&^p ze=vATiCgPAnwLBWxw*`r)}Ar$9wM#!aPrlSf=7#str}Ila4)WBzI!iPlDUWh%L~U8 zq(t-N?x{CL3)3SqTfC7pY2*#;67}_SFu~)nO$#X~N?VM&l4XuvNUC%(PHi;6;s$ol z!Yx}O2|rS2X}K|wQwt%R<&f^cbYkB&f2-)bVRPgAUk^ZE7FBEXhs!HL`_%cC8$U|T zs)AuWQP)3*Pu<>9KYowz_F+rPee7onwZ;6IC}>{Sp-_rZ$Of?w-pROD!DO(|08BFO zAf4D7_VJQ1Uf%WV8O-moLMc04(6K=&^rCXgXRR!{pP*!xx448-^G@G-mgR80e^YDO zp6!YUXb17fF}_C(Ess1))=MOsb3k;;J1tVW3GL0?F&Wj!dV=NRsC zNc7@Hs)RIRmq)&Ir1suC?L@$xLP~-`7(fy?;1wGXtXFNaV}vEiJbm1Wz9A-Vrq9wmUo#z2J$kw zT6>Ppr+S%K3_}3o{_rR?d1(~#(DOp39p;Kq`%{9eXi_F-Xa7*OCmk#+f5|7WOZBLe z5pZXu39a|g*pM6H>Fx4rv(;X&_E%5u0BvOjj{2^1?OEDA(j#nZUk6YtjnFsrdX^ZM zik7|W$cY1b*>2meofmQ;?8>B0U zs{&F`ceCa{&9F9Bzbz>%c0I#JXKU(KJzChZIy{;L@1a)Xdg`{6e{QTQcx9jP#i>(g znjaTcnNB+kbN-Oco=-DI*;!vyJ<1&hEe9@+n~Q`?*6cIEq|rAUu~VJ;u2+g%T2pTO z7Kt9!xzOLAGVU=MSQ(3{Idi_;Lo@>gJq#p1$s|JY5q~pC$Go%yyYlmD%%UJO3MoAA zKIx@tCV_MGjVqr!e_br~^i*!wa#M=*!M<8E3rGyVtr@GRwesP~=ZGGUCLJC=#Zcs( z?c(5wsN^&jGtYC{1|4~k(PG{69UKTPyl`u~o$m<`<7yN!=}c>sp)M@;95ifSavI}P zl04|q;{i#1p7R5&b-EgiLv>oZagP(Zk@Mqz@AZZLdbEA}f3pjs7~cYDgI#)01a_s% z*Q269Z}jv+C^uRaF@C$YnY1=*cGYO=!)mvR>%K=ZXz#fl2ukP&ftSf0!i zJS7S8EYDD5VGiuL<2Y`;fKAXeGdtdOUHIP4#`L(b)o6Egl=J9lM6uI4vH#u}e1I62`Zdiwmdjoi;Vr;) zTk*O-I!!zJ!;#$Ch@Qtrs5_~2-?9)(Qk!S~im%sK%Cijoi(r;}CM#1mbs!yeNmkH?j zGlVjCe*n0ne2PJph#GD$0{|TPTlq2mI4k{}l)e}FvwvcpA&ndY;j{JYtYm$Do=a;g zGEJPH;K~R9xcmJ5>+O$|($6^w0Kj;^_ro;~@9)GgkmGy)-uEorXC^XNE-<^NX8;J1 z{j;BnzRpST=ad8hw3VN8lKeG``Z`sC6UYGoe>#9W0Q$jSyqjITtDo=AKJ`)#kYYYT z$^%fs^?ST{S1|zQ@A9I&IJyOQp4IEwGcNrU>{x=QUK?J_8BK=jl`jdd<>(xLh&4|s zXBN89hH(|tAnh3tsXAf{C$a=Nuw^zXzvsmYAkQEMR1CljXgiP>7p(<*_!Fmzaf8(x zf5Wr!KQS{AmYD?0Sdvc}|9?NP3LM;RZYMO>;}1~s0w@Pv-_buakJ)Q-& z&$8u?a^K~XynI@<4ne7}+RjU`YBlW$e8J}bFMt1-cUo87`k$_q`VV%CPTagVk&0jPC}Q zVfvNf-#i;t7Q#Jd;htT__jOl%=~etawj^9~^ozB};xFErnV_<-|8hJ|biOu0iw1UI zWx>vt_C1gMe;-2!t-@eOt)2B{!S;nr;yDfsVi6s>yURWVC<}%ig5aJz=*+?$e^ofZ zbBe~uHWgTn5u)+Z+OmD5h5` zChyKF`>ipc1Jgoe(D>}5gbEORe-HzpF849<(U}y0eyUe?1C8G)XZ+3opPV-GFK7BC z2e8b1;fN#Qo7X-2zOGUBOwy|?6de0oZx$$j&-wBx-;E}|8?4?bSBx1)77B-sf5${{ z>`*x3yHRDRQfHFh#NSvL3FK7RbzW~_?mD{!Y#xGL<`vhLfxSPpeTMArf1C1I#_aCL z!XejoDVr%@su1uTu=%$Fcm)PD4l7?^FGYL-S(Pu4^1IJQ)gEd)uW~@Ilr}aB428hv z4euI-*Di46s?D>Uml?)R zs5+Qgsf>BZ|>w%{>`OtKK1K|18<|z|BeNaV6E56W&e47 zlL7FXBma!me~zbocVE8ZNZ|BKZ@zQHN0swCmtrWKaVb0Pp%u<`<+fokYIh&CB_h|)15+EEq~K$9o~9H?XgmKFm`UE=mHZoS&L z1+aXPtj_L(35ieOf0YdfWQp^KU4-Oo;4)Jb_QSO2?+3xmcRn}>@&W72UI4N{C_!oi zb-{wnN9`Wi`l;?breE?u`I3Yxj)dxmHFx2NuRmk*t?T%gH+_$#zd8Mr`!nvpm}Ois zZu~Iqm*?-}=!2v=vy-rg+zs z`JE5oITtShs0wRC(HBw^=&^Xprv)+&0$+A6M|Ufyio~*or7UnK;hXNxf-*M!E4B@{ z<=LtoSulU+f@3;Ez>CC9+_PM9febuf;u|n3y9~juX~55L)peA}Mmb|AbLrhxd?{;e z=}-APK43W;f84HnLfu*>Xt&VeQ}OUgt;ngO z+fXYsQeFh@3$i1?c93`9u>lIfj_MHb5Po^aW^Z4<^!7h|2`4>ZOa4?3{fkwoeCl7W z_=8!0u@hM#nrtMnGma-9Uk8VOqWx{5GlicGeOI~xeU)9Ba z<5lH1UippZf&CW`RnF`ZjyU`;nDDDj`N}ZIW~B zlJ&s`f0ZqD17PQv6Dw9aL@PVI0#yR~-B-Txpa=X3nv#9&Pyyh1DFDDP7U|V5ru3I< z|HjY$aBZ!B`MX!X|Mnjb{NZyhGr_-n|H}*hhs%FhRDec5jmF1}Qj9q#w)qg1SU~vP zo)~ICNHWxUcz~RBd;!|Fz}mJY1KO3@80Br7e`71)@F17809E8e7TD76pwPZ-E;m&e3c3XeHjZd5Ps|){wF_jAUm-SPoDi(-$40pum;UT ze*m?T&t#ypJje{o=HJ|o+|Xtap-sr_g}G$eYOFo8AnrVNr&Sj8HxkIQ2$~xKP@UaN z9ykOCYybg=Amz?EK$HRZJ+Hmxe&_YY9!?J6VSjZhW?We#zXuhb~Hvqo-g%r1GA1%*)A1pMk*#g$q6 zdp^R?8TEHQEcl*J_P?Ev--Oj)UHnEND3t>I9B5MnmP-8N$MSa_@cTaC{iE;x^4cHH z0>&;=u++cWi$(G;<_$7u%MjEp3)Y?GKr160&B1+_p{;X(=Gote&q9MxrThk;e;9~6 z*a4{TDnuP%%YSiSKZXC!mH%M+{V%3RO8vud30A)UeXai0;a~prY^XM11?M60PRwzx zJop^Ixk}*FlMbXNn|Bx&c0D(eQ);{uQ|4iR6dPrms{APIT@{Z7kMIM?`@pIv(_13z z1it>^QTFij7>N{Ry^&2k;%M3NRZ(U}W-u&UJ zzr73l<$QSR{kMnEuMB$JqJi?OrAiT$11HpqW$v>MBAT)6+w|FG9rWl08sLJRXIVQl zpld@|?i(m0W&(-=?0x4f{flGYev4IhwF8|0#j%n52gho?=1N)mSI;#6e}{aRv9Dh; z_E&obvKJ%ZpB!H?Jh*t5Ieicu0Qm}}USG)}7$7L;OMasz2ToTi6I)4k8*JnOfvwm* zA1QF@xm|`2PA7y5v@*-T!0z`?bYY>R$9${f3aW>!UEM*+Ll0yuU&X!gMji1;B7grg zKTHexfEuu4`E9YEmtxbue|#Hk+{op(UcMUn*ROo9A^LlL;;(1?>WP0a{8v}m7YDz% z{?&3njbX6g_kVS?fBvrd%PA=_$n_XVb_Rh>o2-9-!=1RPj%urgpZKNk<4 z1I=&#yT954vLtx=7ni>MpCtb@694OE6)*Pw%&WjO4FWfsBnH83Yc=Zd0U@)FbVEuW zLLfsPiUuY-9~~nRRW!@>zTKc3%)c`<8dXfr5lcJO8Uo1@&?`-<7Kr2}N70OoTf3DXuh_-LN+FyKF zzRO6g=2P0&oV$@H;a`4^@xMJsDA<`KuvzzSpYaESuRl8Y*I)n1N2UMmMgC&pKl!*) z%xi~Sk{re4f8tcw>52uVrqsluw)Te;j1)QminVO=w(S_l-BsZy_aV>g{vbjYHTi~K zML;V;N2al;WN+5bpsnkm)*E2kws;QCI{YI|mS#Dum*(CdE06tRjPgf@O0HN*stQ)7bDj>y9L)O!}?TR;G^CGPL@A{qbtHV<5 z09yjX{^H07x4rUID;t38zmCff^LnNJkG}lX%B#Rb`jVMn6*u zf3wnTXHsc&3HZeAj~@AH?b-;(>1)#7|f6o8Zl`;qc%YtKvSqaQ<|Hf**$L%Hj;t+qu zl?=20#ooL2D9o%I=0|re*;ePee@`{AdE-!S$w&(!T7r$vH!NSZXcR}Dgk|9 z0;r#oX@px(^RqkWgW!c7{6+r%x`ut@NZ)PLSRVhjyKsKUr;YZF*9F?2jnY}lY~DA9 zG&(_$Y_hoO@OdnobRVp&;oe)cGT78cX1DCaoHN_H2Yk6F4pYk$M9qQUf9sVyXZxrM znTU=Se+UjyvMtlzXkG937F_mOZ@1j;pn`QUcooP#CM{;hiUy)d=Dcm#cR4<1bW%|(ji>+X`0Xg`(po|tja!@hD$ z#o8b}hR|$?S~ej0qI;D!e`{If3u|Jzuqr+|fmk?@9!dJ~evC)Wm#2c5ycN!@g^>e{ z&ZDH?ZBZ8n9+?g`{+gj(be8lYVGAaQNP>4Ul7UAV>RZ%n*$;00^y{l|uE%a3-1EgB z_A#ylyBD^RKC!=-?I9VCeG=$33;$~@56f5ZkxVib`0*TCAr7m(e>}QwX?ayXF4i%) zc06fZQHHU`Hv?1b>srLeEh#@ccf}u&E?3kn7fjS2d^Zl~#+a+&cN_3P#qY9Gh}TX#`)KLTEmm&QSS%#8E-Wo=bw%y(>u(YXSxz zTF%EP=NDY3`E#9qe_^6UhV8`l@b9)qJ_yUiHWj!3VyiuJvh1BcQr65M7L*7vPRsE! zIf{fF#Lj>0{ZJk$sAL(k{LO{?ew!@=2fS%;H*8496ofE+m1l>3@Zs{YVo@Q9WOJ)J z26oA)`4+0xx*ps5<0J$PFZ!-=ssKFHH<6K=;u$|;k3ie|e}CGQdb}k&@HQ~@Z+G!{ zt^KdDTvhQY%cwQE#O?bkgR(v2I7%xcdv`jO!!1!|nu_~-T#aW?$`hoDWRek{}?)TXe`{yXKd<{#pxi8-O<8Ju+wuIk;&)i*>r@T< z4E&A{oWhqPh@rbgBp?F1I>@I@MFoLvLA7rta;hV#7Iw8)!Z`p3G%mwlcmu|mq$TuR z*|};+tw=lq9UEef_CI<#t$~p%a1JUyegVs^w`l^Zf1%xrlzc7>tj8G`1<7lR2tpjI z0m;HZwfRBER~USJbbD|)qP#v(UY)1^y?r&1ZpMh>qiuy_c>Z=Od!$kdTPsw{> z#h-o~woAu%f``TvVm5fSGXeD36}KP>$n@~F<1IHrcYroq=*Di3+H$7s_VJf3aSn#-(2NJ9`AF_ABGpJo>V{n6@27Ka@za6J&#* zEv(tpN|JY1NUr^2#RzBa&5GayR+5-9u)CWfo9(#|rJSKK`y!wO^})V2D5 ze;=@YmJ(X;StMUrZE%Asx3xUqlikxN$p;MQUL>f6Hp%nYCd!Gj2i(HPW;$2)94bYF z9rtLQNAZre@xFI|%vtS*{Zdvef9qv(kVEs)3~@Z7_)ySBIg?MxNF?e%-iOb} zL7tZQ{6OHi7?yrMj){E57r*5$$``CG(onKrxqBpiR`Nk8(RR%V>L^?XVV96;n^Ie{ zoBvw>O2tB>-}^ zDUmX@(~Okv=d zK*749+yrr?wsT_~uBa5>YclQaS)3$ws2U5`xGKlE6xvYTVKC2a$SaIVP?qva`-#rF zE0sUO^YTSC>F?Zk&Ef6*^a{?X=df3Tcy8{j!*e z+Gtx!hkA4pi^?#TyVW#CTQ^d>q%WBHnNEoko+Xa^ZCZ?Bp7gAHL40`+7`XNf^0@`e z{il%vDU*yy$<5vGSW&^awY1JW+V8JB&PbV@nDO-vM@Ar47pT;9e{l=~*O;75V8p!i zr}KV;@wXR_ugCLU>u&)!jpN5$ym0)vw)Hg!B=`qg8vnoix<{dHwEBtS`f)$8#cv42(-|^~H`Vi*(Op@;Izk2lZ7@%$nRYH^#u;@m%Ur}E-sa>m1{?x&c_^CmsI5GQWz1buJL zLzo}!kipb@Ff!O*=bp+*j`!E&yH^^VfngG1vx@ofPs9)`!5ncwS6Xja6fG~{KbFJHh1x`1b6MGF)Z$CR?ipClSvb#w_zZ*UCqK|IK=>NIks=7+Xqj3 z%a#07#{+O~Y2L<@yN3GIQGDmNPmEaGAVV;N$H~RK_dGDCA%6LuxiCvO?)iqCczSCQGWcKL}y0brHsdzwJ+l zf2hC=9-1eU@_Eu1%Bd+i@}Ax>5CNOnwH2@Z6~_XDc$D;grJljzq0-D%PUEi z-V_-lz8eYdg44-vZyO>w6@VB30DN@If3DVV_`U$&7is@ju>>%7`4EF}9-uG0bD#3j zLT2q(GHaNXY`0|ipxx@TW0IpMz?;G@3qcsI2`@g298*hRzv5R<1M7!h zg1osn(7X!duxNhJJbA=BexH-^)RvZT4OP+Ikd~%9PZYabw5D;DS9JdLCxTmNe+s)r zxBpjvY4ME1<)~XgSj(p&=i{v-<`&SMh#(;nPqbhk}t%XhQ+UDx-lVju06`}GJejCe=RW%LU#0Fm&6vFrm;{K5s78W5Mnm@pu(=#?l*d)M%MIwY**DD$qHdOKm7-xx;nHUl&+7N$q`%tSF&WiYWa^c8N*&&al`0<~@ zdEWcQ$-m=jPhTd4Bzf6Q$T`sK*iU<NoPh9OWSece>6!`_C{`_dk~K! z5)W$}PxD^ysZ7QR3~i_m;SUB;Hd70NEkxlbBTYgM)2kur z>U{l_yN~WfI5`vwxP1CAUX?@?aqd=QOTTfkpV(#aikB7TpLRXk39s1h16B>8!1(kr zj;&-;&62PoP{dTroHDo*d!N-E~C0~TU`0L2oD_*$uPaJyj)KQuXp?MRi>rKf~ zx2Ht%)QZFHT(XJ_>F^5WBp|F`j7SL^izv!~xlU}kwn_qXf0y(?NftZ=neUGjgmroG zmI#%bl$iI7D8`83BvyieQY4VgWTsfmOJ;&}IyLG<2*8|;ktav)ibPA~XQR{v$=qhB0*Nb63Aql?G61daS#*^f|iogZf#f~IWG0I`)+na^vIs? zIp{dCH2y`LSm(XqdiNW5)Vilwe9IiW4E^9Ce=H)82I|)AU{EtDE~HHEUMXWHij3cn zSBk*jXYznZ7J*_pVuNrS5;7+?l`NrBP#1GD?lK~hJE&K5eo#3?eJ?eiLu$Ti>9%|Z zfARYhYCVt9u6l8|3kMd_4k~#v)%eSM`S|HxP0jf+Uz>RkvaoV!2*HNe+(_-;v6^)f ze_#Q9@MIwvNz-H!4-2CGx92zwP_9hv*S`F>Kk&O3pCpNKEs2eb#)>p96p(Hjn6Gsj z<1m=QWLna3fN8$-rz5}PFc`}yrf?VOll$<}naC->|L{cqDyO^TCwBlbtbh7j>$;GR z8xQM^W1z%x-X?F>-QdVy@ppB>?V$zpe=%7I`Ti^S_YE~qg39BKgRyn!_=@-61m6%i z+Ndi)1q&|5f+MF1d0cTbqZ!L>vFP+vLZJd;iQ_v+qvPa#jJZgN5^pMbI4k?1e8pR0 zopZ#$B^16Uld;X zXLV(%MBDYGOn$i#fAVfHmQ{)!e}5#{=g5Wl;l#dhlwL@4g6l0w!?h`nd!Ht^ga-S$ z$w@%RSE?@DOHVhmWVf(Pa=0gkI2;eKZVn*0=}XR&d9R3i;{)4q6Se_`2X7&*I*=69 zx9=xXO$PJoIC<*FU3Vt=3W@^z_lXBqf%)PM*?{#$zWF23+y>tM#D{;^e`%fLQ8=%* zeE0bv#OEI6)o;fXq}*gQ512G6uM*2SZKOq;ks~=H758V=ut|2wIjn|(sT_ygdR>st zhKj&q_oHL(`EYJDZG2~*LyVA-b~CS7H{Ye;%RAHZjrP(C?Tcsp&4Pq{=L2fY9p`k?s5T@ae;Jv_N3HYEm+iBPYGg*pmYQ-ZcAg0 zF^s2-*F+c&6&xMDUALQ%3pgQ|$3Jn&m)7N` zkTji$hyW7JzxbepR~BGi0k=T0a7y7ors7E2Of4%A$nssR6yb(rVc`{^4iQFr=Z0_2VxYM=TNB^Wx{m*dDY;R|hW+ z5SOX__RV!@pdxi7f{w2XVXY{La;1)?2SeNRr$@Ark4g&Ke-0}G>Ica>FdHwGbdtK&Z;ZbeA# z#8OA8^m?e8pj;?l7C{HN83HBs7NU=e~ygt&|L`mFNXQ}sF*ptL6$Ur z%#ZOQaAD&G@CaJJ9TKaOX>tgrW(~{DO)QtJrm@jfUedS^;Y$M6A_JCtok0fA5bc+m!3!VH{bS&t-fLxNkZ<)?-&4l&#zC7BVepO(s9ElA0{pi?!mbEO7ll5 zqBtW^f7ISSD6-qgH<=3rRLW!`h|E1C81V=eHs(jRLvocIaX_@hB3KNzR=SOCBe@6hqMIV&f(dOJk?v9pc(}94|04xW{ z7~gAa{o5v$#?+B(Q|ed{z7KtVTVrv5U>q=SK4_xC$mC1A4N?>88LjdI2Oqk%eqXtoxXP$~_-AA;5Msj8lm`*V9*;JE(K&fOiC_(Q@aPD4!x`|BS07599;! zf4itOSK^U8$Xz`7L7gDY<-#t6@t1hC+j3iTes=i-V_>j={j`*a*}buxxR|jTET_uvgmCHs zpSdyH+Vn_B6%Jt_Ae+d2V zu4Gogau0+OL?$j)w z`tS|!UmAG7_s6);F&<`>$ZZKe-RqDqV&e=|e(>WV{Sv)6k(@*Z#)r)uJ`|nl{?DI! z;2D>A$$#Fd0C&FlKfjeByezT+2d zPNw&( z{NgJ=wq)`nTa?yUzm(gsRu*i6lV1&UXSWQHN47E=;>K@tDnbz}f42Acm|OkL*Ya%S zM>3s{IsO;?NVNOH1>h&*mP&Iq52r7HgQOt6M#t1EkyzkNju@K-4jzv2UB^#tjFVA6 z2~kEg-W~-HRwkt@*dau`UKFLqFEK}V+dH2>-o5VeNXE#6>yBoYa44uCBuPzDWqouL zMM3bcB?8Cp4m%uFe;AU2aHaRZ8FV{G(*@thdEiTU$NZUa2YVZLoq6yxiT{?@wD^Ok zH2=xr{LZy6|B}mP06{>$zx~4=FGM;A;k9XZ&P3N0D?Xzj>|wN=4~ILi6_uFhYsQ+` z+RH&WUxq6&q}_e(UHrfz;>&1Mk{uWO&4P7+M?=B}bvH5V_k1~STz}~?CRG+jYO!97 zhyFZyv)gLByz^xp8VaI(jky&XUwZQ0?H_)C3INA5z?@j*Gn72loQe34oS_%q0jh8u z2*_H6pO6^+P$e;wQu})ZhTW+PD<2YihJ=w>FU5RNHv0P9RU+ALNR|Z zlgNncToae9emTpen+n9O2<#h^+(u!yQQz@%(?`8cbv(H+C42Gyiys>$(7D=d|GjSn zf9+fItPkcTT7Z3!tiNG=T zsv|k95@P0UNPlFK(zqa)-}>a8;2IqI$S>2p%a%`H`PaQaa43g-<7kiX*w}L(@y9pb zrv0}+u{Zjz_{P2;GZ7~~;zBF9_-5NonJ7{-R^dX_wtWzz4=DCQq-C`_sx_Vb9H0>Ny1ccH8la|(elg?bAK*!De7%*qWTJ8_^) zNG>^9O3k<2HtliXk!It7pfh!(AREJFJ>Na357Z^a@i75qg=#AUXW zJ%9LccAaRR6v#WoXKJ2065J49djN8NR}c5j$)9;!3K1|O( z{adc5#*wnW3lW4k4WOZc#nO(C)(p_$K!r-Iv)Jo%dEOdRWKM&sj5+rP$IZz&Ut7@lJv>3!x7JI zhc^&~ZHwk+rQ#Vkj5z5jcdy?EaDPI3lam7V!oP1{S^S%}+wQ+=yTbfN$#4&eRZ!Lo zP+~?(H8T|-pnp$PzM|Fmlmg?Fvgr&M{TILge`kNZ|Nl!R|F8d23Cxb5v2_5Vx1iLI zU|q0>WCGLz1M`y{159ZOLHBUWAyO>E`CbZggggO|jsdPRsU!_VIn0z5f`1thOOh3Q z2q1Xfl4OET&4++M>gSOOK#Fync@eqP|={e7SpI5n^ZZw*AzgCZ;V z9K#3rF<6qOX#ehNNTs(1$Dns08Qz~NCkKl0bdp0kgj9uRuoPa;;R>*kZWr$#0_nps zhYWZFW=_Aby=MTayft{tY=3~3_q>V z1_$MXp5em5$M=~WE7juo+~iZ9Cct9g$+vvdQFm;Ebu`~mehtCGZ@K@6e_O;KmIbLn z>mgc6Pj5%N1J8pq8h?3?1QYF|f4C9htLa$4^l|z9k%(oT)~KdAg$=Z3cM4<$*D-W3 zV}(z)noGOZe7xsO65kI4O@FoYUyUp#cu`lz)ZiLJp6^t_1osnNDDn=@5RO2PXk1-^ z-Czkh!wpbM4(xgZ@x;J%RI z;!6?;K7b~tt?+ZWD6>}69Nrr9Q%)tk2j)pkkDLTJsztZk?GC%$s@rk8!&&#EJ9%`k z-Hy?HA9PQ*@wHohP7dAvs+*4{yN^fS4Hsjh`-r-sIey$8VK;clyhG&B8ov7D|CxUc zK*N22G%c+x27eikB$#{4Z(upk_LU0BRe9E4E-UJ#X{De_%j+|NewVjlWw)$sfGisz zCO6_1hd}fR_18drADNx`r2*dXmhE`lSI{cC^aIl(35a^Cm=PfRlQx1U!tm~{2+amu zGj*hFK<)5Y^sTyqNNjzKF}r5YJjJu5;A^15n6_q_5PwlBiDkLrwM?-&)`5=z606*2 zYo39?`}m~+(KHa887C8bJPR1vydHI*c05?Z;DDN?67UxVqU z`#9^#lDBMK?WJriA~I?#VbyEy!!WUA*_h=ri|HLi&y2Fr%ClH`7RmXVy`GoI#(drv z==)4aEPtu{xjHb}z)55_;Pa$@9-O4zQWfs;8PLzkMP})KX^()qYY0Sts=#vd28gfk zrM5Czu#QKt)%TM(KVM4$_EecH++87oe7|2VZEM*8J7;OVYk-K%PM29s>V_;$Zn#T- z4VF)d#eHCIX4TvWNAyVEK(P~N;N0MA9^*4C>3^@m_FsJpZ6lXHbMl(Y1|*iXqg1XN z!9roAJNL2AH++3BA-&lZ1>QBwd1~IU{5{Xw-rl5$@xigA0lq> zoxg_IXO|@N8C$Yw>G}{Z<8bJ&VeYd_l(EbF`wM0F*N~@$RPLdvRt0s+2asGA$2nDl(rDlojkhas2W)Kko812(Mzh2CVXrbCh2Y zeX5?%;QQzADE|{P8e#i4sE7V8J1tdajq^@GUQs=!4I?tkP!27pwl4E zkUZtriWM{hXp}~IL-z+5Vhx?dA+cnUK6Zh<^E zygA#g9+M3F%Zi&zLyD#mEcM#HvN3xJ(#`+41H z8d2!$PSxHi(bqqGpSk)vZ5r(7@9}S7DdS$0^A@fRSQ}hFaNhbcE6lGKLw7xVH8t`G}jTk_kCYcX0H@J+} zFvb2baZViw>DfdQ_KUm69`zOwhjaTP1TJ z@|+S}n@G;4dhNxz*U;e?uB8EOmET^KkNV4GsJ|RNFaM34@je^!7T0nqqZVWXZ)QHi zIf;6jh|DLDTYEiLIStDxY4rF6)gLk_hIW*A3Zj(?;;%aqER9mBvVW+lblFxx1LO05 zwR}jG4A;+ghg#NO<&*SZv}39uzW$qbKCc5=884fY&8lG7%muOHW9hG8MP*XGdS zMzO;$XXHNEdj0-qGKOn&`)$WWW^RjJ1F4y3k;r^tqR-a)+<(F~EmLs#Cw(6WQrjF= zXy9VR`A{e~lRwKeaNLOsBxm_98e!eS za!UJZR<$Ur%71D@+CZ?!b>%r$wgM~4{R+ltYvS*}*RXGsJ+4{bWl5DO5~uESU8?m5 zEb@M^` zGQvC|mpDtMoLTj?nFFy<|1tLWa^*$*y&V5{&)aeOW`FCxIm1Hq%GmCaWkz9~#qak3 zSc3Xhw5^&}%|DhEcF+A=*@(7k+Fm_ukDf0_?3@3rJy%8Q;M$m|eDLPtLv*PZn6+rGBFgLyprXQm($ z*k2rNl4;wNS{8k<(I%Yw&+Axs{|pN6jbofIiHt_tZbdtrSdp0fXXtxXU&Ymba=q1g z>Ioac0MF+**75PuFhlFjC=O6h+h|9Bzu#YzsekW-hxWr9?HV?28VL|p&84vbkID0x zH4SdFt&ce#zkUy%d6w75`Lnz}E}BNuGy{Bh?)!oEF|hqv+&&Nk5W3v4 zX)wO>EVtI>z|!Rww0&UPpXCfJ2axD;Ti?gH-m6^OwgAM!M!8exhA}MPKIbUjb5#Pz$qCRN z{wcpfn|S_HzV|9me#__h{E658>W+<;ZGYq3Fs%oMx=!&`36vYBewH5*ieBLh*M0csoayLiS63x;ThUj8jyEC%%S-w!-!Ojm*OcgIf0yMPpZ!}F z>pcG{%k*>GKl+g;`0Va?zm!;SbNuQT>O9N8`h-h-_D|Ug>plM|>*;#@lx>O^QHc!%ONP+Uqf0ecBb$=5Vq_V91DiPddOz?7-zU~7SEfK@X*2#tv+#h)O zbp@weu7T~=i~@@vBoj8Ui2WDgyd3dQ8~>hB4{#mJd*Hs~xPLH^4dR${K|+k{0mjGH z3Sc%KKCgRvADN4Ef^K)woj&h>{i~sn8h2$ByUF{;WA!-!GKAyrb&4IJet)lK2p4W2i9*XNDFoiOZ+aA zBk(y5p6A$Q_87|7#dfj=?@O?K8GcU)+l}>c0{n!-{?)Dn zPy=E?4E=uG_RS6;R;0vK&!r$a<}oLDZV#~^=ej>7mdn8xf2&{ix_|z_^R;@mKM-)m zewr$fSbhWKfbZMr_l{wu-#h&2-;KoG?uIZ$NZ-VLGXMMJ;5oj!J-S}YSG^L2<5J`A zS8VrEK!d->uXA5MpFaaK0H=VBf-8<+uInu^6|2P1h%d$OzS0b2hLke`26uLk>3 zk5m5l@>g9SNj||GVt-z918{^5&I3ol3hQO@SO4ho>*L?0?zj9oj*h^3O|_f|ve*6e zUzHQhYd)g9AF;m+^|$BFv3^UGYp1~aw|GBCxo`sf`z>Gl*Yo#a>-&0muG{HwJlEr* z=YtjCI0jhHxgzR0-~aaB;BU{RdLOlaE62J$mUznTzp5VvY=1`|`)j4=2ZJ2ryU%tF zu>1!5LzjEjO9&FZ?lUl-j!JTT%@>8|zJeRRFIP|fP+)rVG z=_HkR{M}brK6s9g1Ni(0&bOzYRDa6z4cHhWNuE3e`4bN}QC|P`KQeyB*Y&;#M2GZp zYz4=4hDw~03V-vUrvPJIf+Njuz#mLQQ91=4z<)rR3lIVH!7Oo5iC{K_4OU_U zVGRSt91xOF!8{NRF@c33hQw4%;Ma>9Ngtp?j?@4@aW>i8DKH5MVAES4Y8C8)0U4y% z74F;os9)DzQ;65~I;14ol5#p4-+XKSU+$%MyQI#m1iKfKj^yZGPZ)n^lL&(+6&)7P3yyWjrDCq88V{6%dmSzi8QXc+&k zIT!!&*Uz5fw@~}vA3og}6FZQvfHp0U07HE^$A6ucaYIG;_cj$d07xl#CJ%*47^hcP zw)Y_>bJQ>luXJd=1CCN&OEL_I$ysZ^A3Mu@8*JuRQIYvOtN7Q{4G-P!^8+?$>*9p1gbgTYj>x`_1U%t?#TR`(ShO_I|+bqkEPwK5nuXX3LM! zs&}fzNB%w;jnpCnbzHQ|eDh`8jU9h`cz?NfW81-yUhu5dar`R#Giy_GI7)XZA>C_*Y)2&x`VSlSC@7;4gEY{Jb*iN+waUe_1(V=_RFvS zBAlipzYb=d1yNNLa)+|AST~=Ow3mIFDLKXF?XWz<_7IZoL9XItW1UVN?*LJD&40t< zk)Pyj26g|lG(ZUm<6eqdUbDbh}I*=;p0Sp-NvJ2Gsxb~w|~rATxREO zFTA~P`;)Kk_FB&F{Nt*e<6*L<^ZMiC-F|ex*3({*z4x2z?CQ@3XNbnmu6wxkXV>{- z8(ydOSMvUzhpTn()-^vmm+j=8Cs$fM-rspR7<``Qg%=07%6yI2>;Cn41lpNTj!k|H za=}}ludR1miq77j%|3@_K7SnF2J*Um80qkyx=R*2o;4rhKA?2mO{__%r*e+V$2Nk<*z*?=-s{BOV@RgVG_Zef=IBW%~XuL5+s(X7Cw?s^zxRD~q9V zJE!mQc++@$wzgf_udB&|Z$FRuEW2$EZ_CQ*4BmoCGiWw9wlnP2et+J?P_FwMexJ|# z8C$cX%4>RXM|Pa87X+M>9=+}OUcJrt zleg53_MRLxS+%gXJ2AKOavTOM-lprzeM;}l%H9M1W>j&zOcq%Kr~SOBZAFf}Go0r$ z+qn6Un|q&rT6-1W-+$CKJHMZ3VteX19|`YX#gMQD@H zl9(2Q@vO68w3+z>@wVRu9}DNI7Rk<+)p1KY-uqJ3lfmJTuGYorly%GCeU(SfVLo)6 zbLf{3bGEa3U-#6xHJ8bl4n40I5!d^8*!%LWxTyV+jDH40e-iX}$9{jiKe2Q-s0#mN zMDzCDoAlmBY`+Z8>^QWXcGg?pzHBdm>0QlFbs4vHHT9}!Rz>3I8p&784__yyddZjb zz7+wVd<~PgVmjbn#m{Zu>MRb9o%VK*@#Y@&2J6peJUa7Z?{QpBli@BMM#i-o#HM|t zC4LAtv41NnWRvv2OaXav^=1(qMQ3G;Y47spxwARHtfp}j-j?;lI_BrkFFSkeC`>}* z6TDn4ty3J`;;;4g!^x@0(F>e&o}uXJm_NH5zlXm(oo*?lp}W_{y5@AH4zV zfAaF$|`)xg4_ik*ONq=&D^pZY3oOUz+YxBO?u zl7DHlD*Rn7_m#s$@wFmdTI}p7zVG`@>eLNgE`!DxMWt*8av!t{$>TISw9NMpeCBxG zzzV;@WLxa*3w*}chjF}Y=f%)^KUV~XAEKSj;=S{=8+;~@_!y+0(#=V^B=s_Tf#I*D(yXWU{TAuiP0r=lZ=In5+BZ z5$}d;5?>ChPo9(H?YJuUy`bl8Wizl(BO1(&WN*KF^Qf)t-DmT-cL#6u!TdZFPJd@a z&b_l+u03gF`z&(Yu@mKt&&HqmZR2>>E;+4s@5Soc=DXoTT$^BV%HI6PGHmy|o&(#m zF35gvb<8hOy$#6QeIxSpwBAQNT8-pGrXNLvaUSJGULOup+-)}CU2Q?Hhq-+K=Z3Oh@YM2hSv8RjXE=|;;PK=)M?mK=xp@j z3c0w8gC4x;Ef$6!c3gSRdy;VI)b(;aO7~4~JgLblE#TSa3pR}07c0u&=zp}#Zu$3a z={GWAg533VZRNRLvANB9)0?|$J)csOo>G&XQj?vo>)>Ut<`GB-g&8%3X*HN>H9ym; z7Z=pfx_*n&lfG`CzTi0dcnbUJL~T2LpLN37%|C_}CJmUyPOfL`JI?bBRCrMJ8wS5T z>854$mbCbzX_WoN>;)Ncz4TC3O_BLir~S7>fw3}nA8J{D4kGGz-@HDR8X!h0&_z8Au( z14J`TpVasV7Xz(YX;ElYlR2G^Zb{&I_Obj-uh6tLTmdWhZQJz|%9RI@I_4e7)9NLp@0_W@hj;FS^2nmlaU4Jr_B#K?L35KrP>~zu- zSm>&?mU4VP*j*+j<0g1Vc*sYv2#(Q{NtWf)wI;->GF#@8OvI*F+AL;-+DZv|_DuJ? zL(ovRHw0tkg!Hu2u~g-9;X0&Eg}@kMdmp{Th+kASiCQ84xZzDSZ(IsVc{nd{CEL7C zcMJsM82VqP(Ah%pB>9O+$FfldQl^Fo z>6AVcZ9v){yqS@WJK+6()WP{#v?D(lSMRLlj}NtU`>ktN{MMb_CD9rgDUiWU6pt-< zQ%J)gbW=tsjbW?5<89-NcNf^*i-C1#Y=ugQGab)M~`~V}6jlz8=l-2(#lNXtd_YPfK5nOdKZPc8FxS|NQjc^H>5mL_< z1@`L?{ODlxV#t}w*%bF4<(eX;H>y@O@pHn29mU4o&$X_p4590P194E^3Fr_}1LJR)K#Tn z_;0=HncMvib1aCa)Ly8Jd}l{>B7RnE9(^r-xjA4ha%@Flxk`dZ!FPAK4Tom~`N58O zN%67^s;NG@Ssg~JSK^YoZ~OJwI4+!3Qqb}+xPOqWz@t~!0!M^ucClU=0^_w?0lfE3 zEPa@wc=DJBJShBVxpI#s-2Hx0)LKB9lniG-Qz+++hpGIks6MB#T#hhyzkR;NO?wN|7R5dxURN}-{>4vdWwllL zL0Ls{>P0PPgmJ^yjYcU8NPOK~Fz@Wj@qa$uQMI`F8A9*;665DIC>Yoxc?R(1M}gC; zH~IL#Tc8-AHcl&oNdQrPO|x2guDkODkZ=%7US|W^ z0xtyx_INUP#thk19~k?+Fd}{SO2$=XCV_#|$&A`!jz=7AlMvx$_o`!Pj$i*U1Poq?QRnfsPVytmkDeUZ{Ezx znoSP`gakE;>!jAheM2p1ha@JY^so?Ra35uxR;9m}HF>KW?!?swXTraIo+XfmIxAqL zZJpWAFXoB(5_M_NOj4Gl&VM;4!A~i}lLM5(l_VUFOU-zd6dp$uMl$o4WmjEJY8xbK z=||7^yg_w~SPsI31zDbc+=6XRLABU*RIy+VCeRP1v;?FVicl-)Cm7QCjZKD2@QySd zmGu}~wDPYW0<}R$MdxglcEr`^`fsm>Ij3@RIWb_)e~A}NrTUtzKYxso)PIA!z2jRC z20H3>xr-e#v%##^H)8WaY9_}{rXKl|Xkc&P=0)Xo^YJ6~w9*;V&lOWUTvM%FV}r4$ zP)^QydK>5#Y~mY3O3-~puGWgIqt0T06-kM#FNQR63{gD%y!R>_R-AoiyYMIV$g)nR z^i&@VLM)k;LfgJ%?tjOg0GrBjUV_VZz*rLZz zmS9w=MI&BprtRIZ(?3E7Bk)dT1*LvSkRI)u>t!*2=oaCOSAT_}Z#S)liHIgM^e;sf z?23@{0jT`(gmMKcUVd!7rwDb+*C=?L%yI1~J8~$aR-{&4HE_eC;vRjt|6)E?VtH_C zSSg(@;x9E(Ljz*SN5nK`&yOgR(%_u&ij;;hkXUI8BsCl)?VmtI`MzhOmcN79=8Oo@ zfT14aU;JBk-+z(6wpNKI!2gtvD-~Hq(GK(u<-^g1BcTJ)oV2pe=^+uGos)BvJ^7z9 zoz)Cbp8fF>faW5w682KXXAgZbJrxcu>0>->;-~T@sK-xSBWSsknH>$eb>|d;d+<9#%P-J zfxF=iXS6(IRhvY-M$R)z0(3q*g7XoY_DXl~R|~8wioX;9+xA)PC%zyee_PX$H@UvY zpr-u{uNpF&7{Ut5gZSCa;<0Go31rq%5;hN$+hy#Uu8EikZ@ zrRwFXRDU<17Dt0+(x4$nliRN0;Q^ryUYdx;nxZA!O`JY42A<>sWq4R;EJDg1pTeh2 zNX9YS(Kc4W3?FYt44!{X-boyA6D+Z0?(AlR-v#d_e#M5M;U#0MgP;+*f{SsIFsH|} zjQXI~=b$!xwm6s_%JK%ha#`p1i><2qa7C*v(tkIy^!ab%0jY#vMgjmvleg_+5!tX# zEfEHApp!AG@Mk6T9+G(ie3$nmJgv1pwb5O=S1Ils_Ast`dVhXeHSazO-#!1)*VKTD zxd(xHq~}T4rgr>Ruu^ud;(ZR{D%Y_V$7GIq`ZA(IF}LE8Ux5%bjS=O*Z{rDclS6{i zrhkn}6WFbr+n3Puv$CZpQe8@GOl{b$;SdY!qr4?TJi*XVNBI*&5JYN**xY)9g`Rq* zQ;q2Gh=(jJ7R;xt(l0^q1TPP*DM%*-2p`8^IJxNHR5WLlzvC&8dFDq5>9*@33Sf(s znF*KRH%yG+Ri$n$H5rv}WDx-2I-8;`&3_qP$kKJ%@f5L16Nz?d60|~CVuQ55s(OAkb0ucs{le@v{OUOuGFxei3z zpPYUL8bSnkFp<>9N4I`{2WV~#WP|&#?2poMAhaGzH{&YVxyQ}#tZrS$&Zh@!0y1>0 zAl}=s8!qocY2Ex$-Z549dFVISr+;U}pxW?RR=Wi*J9Oy{d!mC6v3#qZMYGIayugj6 zGRH?c+!BhlwA2B>`i1D3b{Vh#lk#efKh;$ijyz#$(&3aDoI^fj0-}VI3_k!x8zPvN zA5)q~*Zb7`7TZ?cIo}Hbn1(~KJTwac^|HmKKJ}hLu#_be@R1XdU=e9>fqx%YTVW<% zT4Cn+(bnEN_Xo?u##5dsTe=a8yI{=wPV?#a!%|f^XGHDjnp&>-RUhdxRUYJ)oCZNV z0KGDtKxq(S=X>3!r{TlM^H6O6%-yV4Dh_6q2?$+-+-sPxvfGU1N9 zv@$ii?8PS>%oY%F7HFA%mBF$S)WBN$y#ouMVfoV{rfukZc_$1M%LX#5)UZdG}anF~GzRVPn=}j=W#$95U zv3ZLw1$h|~x}JfjQ-4<)Zcdsowc53y{^p436sjlYJ?JM&>XP2xgQ4Vg#RFPMV|zt& zT)Ek|7w)_M`KodN(jG1nSo{W0G~iEAZ2`iGNdVvL7dqs*aLO}hg1cmAxQ`qsVWl8p zeS~xx2iD|h-6=q%V_Z}NnA<}7#r8-fK)QMNF+P2*1hfisFMp^_R%yUfT2ABWk+d)x zW%n|+&Vp-q!pLY+Yhs+f+edECEzn~$-dnQ1D$^bT!9VX=2j6hQnYK8YQW01c<28NX zb}*%E410!|+%`P1A_(4P*hChXSe zwEcX|>j;MV@PE;5&@jNW(}peQ;`>O4g8jR7+qehWVn++Sc)O;}ZN}{t7K|P$#EOjI zgpqK%hEq$F$Y8OC0?h{$y22l=o7|nzDl7Ww_kKFt5F00@oFr3WPE#$)Ya+B@zqbz#(uIoYEnE`%k;SMYmtKw@)W{7QeOeJ0XVMW*%e-5 z{yvv{(Fi!orFBW7+{K!2Wk*=vg5_K14VF9YQh#C2H9jAemeb%h8<3yhmPnRMPTGt4 z>2mIc$3?AF&jmwQhEbp%{^5SwvNH%1bB7V*=RuNz4+n{0VHM?&F&i-V!ZKAbxs1Lv zycr`AiGCWi&ng!P&1I$tDoQPPh>I#;JzQWBIKt_kzkDwhFc9rfRw% zE`P`eq>NSc)V=yCjW6{d0IRi|xO2fFl84rT&!CG-e-_-H#)cEd8ULJm5c~p&& zU0(<2?w$DETOP(2uE}v-E^%aN1qq)bVP-c#h@wRlt8~netKDa8rBIq%+Am*_-vR#w z^-|GC3UNpnSxXlNE_z1?XO(YPJ$^PGQGb5#MJLJZ(J4oAWA#*rJYey4e2>B`=bUvx z6#O+kiExmSPSa;>N9PDhI~`hb7zcBQwOdm7I%eag6Sc#_mx{Mdt;g=K0pg1u%&hF* zjxd!r#SqpcI%+lSlL{gCy}3R-k9>bMG&ip)7+EU&LF|s)`!df*EEgM;TRJj9Fn<<+ zg1=1Gx;9IGo&)yNh%N0`;_U!)OIsLe5Ycu8Ue!&7-;AUXaeatJZHh6eA{X-1B#sE@ zU%!+7n@aJRVyc)tEI+PKx!!ML(iho-tjn1nP9m0VRzEY%xpZk$z<+cbB`N(w4Xu|m z#^WF^%qk$8_)F(sFd#5kl4q|tx7#XFi))lO)lZl-9=n)R0D9z5q7x#$rk7xLRXTvHs;1f(t{P2EvBA za+gnZhXGN`Mc`>+zxHJ4et+Se<}5{qyP`f|9akG+=Nf183QNI2qe!{WI`0Lq*HK|O z9AB=c#!qUv+B+%)J-U8GhW>;E2t*y0J(Va@tTkri-nFh}iQ#Gb!4ZTS3n^yTy5sfJ zr~JwLE{cLi^HwWf{8iVu)B*gX4+xN|TS%JyXF>VH*tb5gZo#@pms`Zn#Mp8?A-gcNtms3_jU6=FP`%e5r6>=y#x*5ML5=p~V&r&do(M+Fb7q$+F6y3qk%`JtJb3G=hG z49osW>OjZ160D@0^avU;)%iSqTvl{GjnKjkesVUJX%fmb>D45$yCl zy3yh*LNPtumT4mI^IssOg?-iw^~Cle|3*u}({!i%!6RBdk$)l#0jVf^d^PBrh?Skb zE8Ik6s)vpUbJM@;If7?pM8XTCKC{h

|hci1^4&ln*~XqT~rLK2+|^35Zj&<+qZO zvha;d>i%^bIGA68gqU#qU2jHugbxdcShgxUsxkVPt> z(+QgIeu$o$?TP^)i&w%L_u25xgX?)iJxpt@}d0IBxc?Y__(HS^Qgn=Wg9G3aN zF0538?tgs+EV+rydjASXRo=CLS6~HNZ=a zU#CES>75H%>MWRR4EW%Mb7XusZ=&ABH-6Jq5N3Y$m%F5PE%RO$^5K9LachE%?cn z7A?}K@n*$UMi3{)?z&X#`$;fmZ_x%Q4~FBCXVcK5m`xDFTC{iUh;Pk{$i?C0U$<_;@3L28$+x#Gm5kc!~q4xy&^2qPVjuNH4n!N^kM z^5k{Slho0)CG16k)7VuurS z*Z8)McJztP+bA5gs8u*)Ax%EuFtbFoiarP}Zw3->tw_TA(=IIxz60%IHao>VlYhh( zNWsvVeu;f9UCO&J9J|nPGwJs;U={fRv2sv?<=k08w9#4dc4e!}QQeM)Ue-j-f>FT5 zk3w`}Z*}BSVgyj?mTKak4m%7pb2uFYQ9_7b4@3+}NbDu%|v4!UdIjI{Nx2z*$7 z9t?@W^T-jD2S$|z{4TUHk zokxrqVQHh5-4@UIWAaK_d4@kiH=K8)LR!>-kUd&^PDv3LRK~)5lP$&`nBVFEU31*e z6aBFV7tV#9WClpzfxDOSo0FsXgevMwl$D1W_Ps(nNUnQk83m(0XM8R>QRJ$?cz?$V zi+(9~@_Hw5^Kt9}BoRI9Y=5=8sCl4nF7FeSNJV&qm?s9!bc@E7KbFQ?uFiw!Cr&Zw zMp`FWF6z}6p@a2zuhgdZ?^EOBn!ZdQF-eFhKEuA;R*s>|dpkyo=017h4kFAs!}^Q-3NqWJ5eWG?k_= zD#p>ay*3sWrP+I`Mf2-&>p84hvndF14Cd*S83_;z$a0?-%VeFkVV~dqSbJ__dKkK# z@H&D=uw@~w>Sy^?xZK^953u;0|USMOTl5t@E!=sB)k$-4ehzz#SFBtsiJnkAaqst9~> z;6HXyZL6FenT;m93NKu7dYL?NPsJ_)8W6EHU;Rrhc5r4+@qe#IS|xjrw#G_6r5$() zNHV$w@d_bQHTM1Cl0abTpt4w4sS_9xNCgnoXVCVHALYapr;Xa_dXfA1wq7RNCm!|%=~ zOYZSSP`*(p|M11k?)X51ee+~W)cWFzc+<(>_Bqo{Mi=5!FOPWPg8J#;* zxOH`%1jxzd@Oztd+!dFq*c*DW8h>M^*aHDh32A+1dV_i9mUeF~s!JLhzs!DhV$&a!jlpi0G-6^?MI|2j-`dUS*c7+=|_-strq8 zv>;;x-RzP80ljJn?IUm8@HWcc*z8)!@{;d$-wvA5@yhDl?^$WR3bV7T368rst}keg zDwwC;`iJB}oc!otCx=0oAY6}~Sp%0r+7ZAlvW}Sk2%dSdpP1b|{bwBvqA0SI&VQjB zfmN79u36vZQ%9~?nNQ=cYu1?EhxLod06MxIfPEKUn}nK)k;&7r6gFGEK(vU-Ezb zqu-x@=FvaSk*S@`RNB!v;^>bh^W%Ra%|BU%Z3CKsNimv0fB2*3{u}ZSj0Gtn|J6Ov{(qaE|C6Y+6Xd~@`kxGgrdiJU)5H@R#0X(A zC!z4yNZkE1LUaFq1&McxMZ#uILY{x7S@&=M&m&FKtSwTv!K5M#m4A8wA_e6(ih~(%%d@{%;*COEcR2v$Ih+5}T&A;BTHd{0*H& zE2xL(&QRk|!gc*SeEaWHA`_vE{-Dg8{5!w*->C6#(Ujq?Rk1WyxV*z$gvx)gAgGnu zB7a_z(O|h%I#_y_^JEkGvAn;|E#DeL-na*RBU2I-jQtQBy=EZcg4k|Gd$0oosB=vE zCRaSC68z(JW^-B^Cpk|R5<&DVAHywXban_@g=MkFlOYf$x;*!H8#neC3K1)j-pcR^=XN~D@ z!id9ZfGiYqoRUb*1ldjDVs;KE7&73u1PR-phenu-9M%~?^<^$(9i&LNxrgCp?5`t* zADHaLA+=~{)RaAxI`W6?@cWpF?pnTCdq&IM{!DqZ=ErUR(j9jRL&y=FU9R=wEsZ#JMHsHyYNhECwNA;kU}5 zPxq@=1rD_5zkP)c*nSLNKN8WpA*QytJ35RT`+gvo5i9lp?P*?uw3g3cmX>VFNW`#{Pi$Y3=3s{YQBYsev^NS6~+N}(JLnB*VZ}Ll@<%#Q_ zbZ~*8nogZ|E1`ctum~eC(hJh@fM?@Iuf1&T2&mAWo^(x=G{CA*m)n(QcYS~FUZtl_ zguW?)%gq)07=GSUoIff*(dXF+F%`Sd?eT`0v$g46tS_QfKh@$328YK=pZ&a=e^?2U zurS+(D=+X#q{33iO{rv|qU*fu0PBRJC&UrgG+aewhg^Si(-9M{xkyFoHdgjL*xg!5 zhN;iUK5FTqtJ3mrOnVzEbt+v-vfAKXh}Twgl{Nx0x1D2bOjAYLf`g0!x+pLh(dhPa zO%9bPv{V`2#}%q8WBP&*)FiD~=zuVncGjDiVK`c-RgP+m&%IiJVH;p)y%2YTrGxnt z4Q>X$8Wew$)zxTv=Pw#?IfATXP%4R<*v(#=dcP|uimkrdDgRrTtG)w6Jc+UTqVpFz0j7u8_eIjPU@eU4JYT$lIH{`a;ue2N(?^aL{r)^2mt zg9Ao@#VNZ331SAdc%jVHT5c&5RT2!#G+9e|xVe9+!iH;;q|Cz$4YVnTZ0|2YGH>70 zX^fgMK&S81jKjoHGU-x^9C-+FR%wIG?gJ>2*t~ImCd$`Uu+A)TXTPxzZ_6==B_u{d ziVP@tEH+1j&&Fg`Q!RH9i_?_Ufy$#z|*x|Lwkd7X*h2(NB$xh#LV zwF2R`PA=qxMNwKHwS%8jo*VI;@}>0(6-5O=rhvARb)cchIh{m5w5(%)G>^AG2D)en zqURX70%*f;V?hl^MA5JLkw<(**^84uA*P+5rqx-BOIr1T(+4zV$@n%eyFH9eKF~RD z&uy@|@vI{oI_r6k^+ioOQex=f@&13%_0K4X2>y#R_9B8u3#v%GYhFgvU@(@rVq_&;2ga7mWrly?hf5aA zIz50O6@#;I3IxKU(IVH;!SWpK*04bSxBlHz{NQ#R*TRIJ6iBHU;Dy55igvKUxqRhi+j^v`cmg>hvZ#?+ckz& z8U@^B$aYmZ)22*)GCMDoU8R5RT>)Fu{7L}WB38~?5f9Q5$Dy6R-Gnsp3o8r^j8fg> z@9tIITf-Yc))>V}jZ_q@cbO+|aOqhLFf+VOQ|Q25tQmfQKBJ%06FfLK+@%%g^J zV0O#$6g!=RaV4?d?@T5JxedACeAUqDVh4BhKu zUBKyZCq`$cB2%m1%M>x4coSiRhwmo5f`b3PGS^C*`}OjdfkI5qN`TLA=reyX{(9py z&3rw%e5=2lo$Bu7cs|yVrsft0Fq4nEk8CbOWfsrqXj2yo;`)ChNSIpT`x@$<80dCCs83r7^Sh;_00^HbP?l*gSZ;l_nQUKN zLcy04?FcZ@b{^dh7}6t*GOS@RBv$q>jZ1SW|=K2Sctp`Yv-LXhDBev~H;lOTRYSvkjj( z$a=1g?e;Bx)20qR?+ud1&~QGFW2JxOo`m}jN2W=J%TT`DN)xMg zIq<2hE+HZ2_lh;PV7(_Z-HpXoL}f0(EqkGMbFdaEz<-VYV$oR#*f?e9CCmer0rg9( z_xCkPERLmbyX7W6j=;UFH3!F=+uMWa#Q!1xM@*R6-yg9py>5~_d23v$U_@#?KoM+xE9f{gEv#-=oa?l_t#6;ox_$!U`$?(OubntWk%$lkqS{vBkF{9xev8pmUsX|xXjE=+{qd> zaAzD8v2@e}iyry)0kBW&;x*%@yXs1n(KCNs6b8sED~mY9JYCUarxJbc#XaHf`*(UfycNgm zD&?ws4A^XxPmAxoAc#kq79a^r=KH-ucNri(vYdrc1x}^hwk^J+kV-r&@I@C_VQtTY zSrP15bKq&YV);J)4Pl_rWp;HO;0}K$AXM$teVB4%p*4LYH!UldK?KDOiMqo!>7v`9 ziI6Z&1etrZhQ6^^bx8n-vVy_UUfnQuDb>ya@}fSMv2-CY@r%X=ne>dkuVQNZl6;2D znB!Zil}BTB<(ZO1+-8y96uHbzQ=$2P1Rs`_W$$0^I2t%%cZiFi8;Tx}^;O z#g5MTRE){2b7uDg84pD<)uwp)`ur!dgmH$U2zXlBoKN~B>O>+$&(rn$D@<*>F2|VI z?cPBgDyDl?s^`(()cKA(OX+_TfC3eCNON@#1q=vEtTSxtDx}M3R39Svrk3!et=|$x zQEM4SDt{qgN@!D;MYd<`1qvY9dfyt3(vuIhSv~Fbl&t&ia1nI_j;{}X@c>wff( zv0Kt<$ZVSLg0L@7{FX1|RKFNGrIJQVorQOtBzm z)}RT`ySq!=2`N@X-PaugGpj7d$k<;g)>J`-yd|3sZfpuD7x7d1U?)6jl^c9(Mbh@> zUcn-#F=!8f&Zu6znKYHrn4G^xj8`LUV|b$oG}$AS4N@QYTPr})Rm1f#jk$@@10*}o^$Vbt+i}}ghEphGXg0B z@zgE1u_3OGn;f~NC?vj2tqN@)6&_QBd3(Csy;@vHj2#A%_oB>#`r<}jc{xN0Z4hJS zTHSAo?_}Fhg<5}05Mkd}Ef6#?iLnOBIciNB8;dk{Pgdf3Zsev_ujz0_dWbyr0mm`vY&Bc>%5jYgAP<>j z=~_xaT{h3Of8^Vw4XvZZGSEicIg`GpK|@{PEHHl$@+-eCO5))4C}{8d6NdrF z;^>28B+bfI8j7nQ&t{1$a_fD`L}z(M0#`g-yp2;z+5H;H3X_yAMGju8qZj*KjC)S- zXprPP_Lj+h@o+`HLBl?t7dIovag0_yS3=6lZbQ>Hqt7l^(GFtc+8DkQ#3B1Bg7s?R zm(LL0@0@>PL#x;?cjbmWgZPB3WcmurmkxXsJ!S`0R^q0S5jGyl`a`Uv)n%U8@`g&} z2pS;<{9TS)%6Vtzyf`vx@OjKLY<~EYDSxeetzSG0NUgq3#GBpfS|4tdJEjgrWf}MQ zB06iY7qv}QB-PCRzPyjvdRD$GUK2KSuX{HiH!6RszYS5v57Eo`PtpL{nD6(B4Pyz| zqXZxK{?LPt^%*h%Zes~MhUtKn;;;6Imrs&VYejxz- zh8hG;5GJhn6bWeAcX3I1{envWUS(fNJ~LKV-^*idXnf32dv~)%8-1NE_$q(yrV49jy{t~+ zaJgE=T~(A;gt2b{g2fsc(Q{YEAGq|XA}@afkfFZqdw;S{Gc%Vjg`(c~HKR=gQd?L* z`nZ83vJw4I;d=iX{pGz~m%eLdCbxz%qZwl5zlG!2@`jtAn0{PTyj<1qdHjR8rj*8c zMok^d1}l!`_&c>OhZe7GFB%zTw)+=;+#n0r8O_x@4OZ=%1qLVX7fCU1fy95gH!*($ zR=;}bhZchRy{-e^?+mkCuEL6${Xjl$>Lh%xybR?89gY`Z`Jzy?%Nq>czR+9<4}08H zj|%de_i2bb-P7Eo3cTKR=sCZ+qv0O)VdZ`}KNvP6WPCo<+0-vEZh;`YT!o zIxL|p$;sC(%`*3~*6L`iZy&Ubi^G4leHJaw$h`e6j#f6*4~50!aSas04_WeM(i@I> z&AvB0ap6_HuCKU2$4_7Kc;%LM&sRCdMMHQSw4&6b`%1`*C%efixAh~3o_vWp9^2#{D6{;7?a8IGO5b7GV{~VWAkS2g z%?8S)CZhTowb8=5n`EvdYBXY_>R0m23h4Me6FmaOjw*0bE~-#VnFJQ|Rtg{ND}Ek{ z6TdhgN_^#fbV^leAx{TijQxKhBDDwU)6`^t#D_zCp5i8N_G~qV`H|b()nN)$;-@LpT@ROm5L9gFpgP^PN~I zsKdvFp)RYRm-1FFd8sW~kdaY^J~uk5RjwD%wq)T4vx056NgTB=!aCOL@e+rx9P5Qy z+=62P@zIYqg)qwTH2r^zuI%kSJPMftMDSW(8cAm}#`+*{w>AOZ+O3=@b2a51R^=%N z!{ozq?upfZTMgmW0ZiYJV&H5JX%Wf=1L^Ul}mj+=bsSaTC-`Q3g! z*ZDJ!^nJ-_U6mi!!mdw}+am4jUDP69qaZoNzOFX1bVebx^Y?!0AJ#`8+V)yt~h9cfJFohrWb>e(J{%wb>!@;y<= z?@^PxoRI5K0kama;~IX z7Xw?%_+>pSrop>)V!zYXvGZakcxyjC1^r^bEZH)29F-CwJR0@=cI+lE%@1pSv;yM< zQDo%@^_~{Ii70vq5j$-@sTCfLW42qx7YVs*Q8SgqaRKVWF_~!rZY5@k{w~ zqfc{qgOh)YnfQ_QzEw^^v@1%{->mf7{6y`d0zRFZ9)CwYzn#1SyuF zW3_Sx(08TkEfQ}JOho%FF+tzx>hs>~`uF}##sh{Xw;jA85ILJXltQY)xc52nx^kbl zPw0*5THFW==H)!<=CgmOOYgm69#{!mwu9YHEE<2;8LMBHkC1VZz}{yPqwwxKCS^^U zYw_8A+ND_X0-i?n;v)<*9%BMaTE%fgPWJxAVsnyjqWx6hoJD5+P4Lf{7ITBX}K z;kV4J0#fp?eHSTQ!f`KbJm}400e7gw<$+!xCnkR4?iVpVI$}(i~1y1e%~K9)7*cJ zLt>|7V!o>Vh=M6LOyV9o*Ft_{`n1(~%rz@+-?lv$BY*BLlxvx-pIJ}R;@+4-tVu-zUwy$3w(baGZNkd4CA`dgGyho9%zS~6~yw%p)>?Bk*#FY zp&~hwy5KDvdwjkJ$y49kilmSjuc9JfDd4(Nv-?R$jo*(sP3ohk+@t5j$cyjqI9_wy zsv|yTL_iA9!qxSGHzwOYxM4Q5J@d6;V5*O_fB=J}9ciCS$G8VLo{uYtz5IV_f69bN zSgwda3&L2SL2SIZqxgZl5*l8>tUI)|2&D$Y2oJ5kKnvbdS%1nAG@nh$h$54|aHApfyNrUQ+$u4!@?_7U|^*K3Vm%|5bK6o;EN>cj%q~LyLP7m)@ zbpv~Vui}h*@d2pKe094sr%6DopO#h3H+!Tne&cdXwn=`wiqXoq$#F2ulv_H1J-qiU zVUxS=)|$1CSmw;5-oAfKI+;S%XTv+zIX(;%CcR3Lf7EGFmTnkj$0ON@ZSPfahD*nNMYy{tC+;LBTRbtVjx zRIBZq>5ytlXV9vqg5m>dp01{R1+z_kdrVcE={o#NB?`nRI zvKaRd^6fdbg>IzO(f*<`!+M9uv0h1yPw^F}romnXyaHH>?VTIHt>+r{{jLI*DJJ|= z<@fm;Zl*xyRH=XGH%(ZY=PK-R&ZNK)2hP;nccg|u!b)bJM@g8oD*T`oTjytSb$f?b zEt_+2c?GkDCr_Ur2o4~?FI4^5m7MoAfzVRx+|R<8HlUUCA_&jnQL>kd)7KYb2|s8a zIRf*@64CR0Bq*`@&QHPFRnsys5|b*T(u)dR(16~a`L%zQ$NZhiE+{lkqwp6o$L=j8 zmFx0>H-4zw`_a|?-uuae;uAkLh5DX6&ngE?BVt&@r372|L0@he1Guf>a6TiOvB5dY zRFOx^%ITdnz7jevbk|L+Ui%{p=n`m%=*|~pJn7W!7 zNu*Zvj>EiIGG0vxFW+tp8N!9f!?r)R?!!G<)8~I{@>2LSo4)H+ztBY<>j677FR@ik zp5uy;6)nUj7!kzX)ZMRqEe)#^FG&6zI2Y&-UrjS@{p#6lB(&Ge?tOXhBg&eJByD{8 zCgb!PLRwNyob(C5G3=7J8ByPG)EeIl{&JFIS-*vTUp0B7(>+` zXa0Zv;pw_hKCb%yz9v0tkjbl_9oZdY#2#!l2sKu$ut#?t0QnjOef1^HPwA2QqL#W_ zT_8DQE7qn=5uwoVA47S9(jFdEx(S!h`%cHj+n{57kUx$8DzsSsaKl2x!HT11yt>Ks zx^3g*LUrydv&q*x5b5ROP13jq*+RXlg71HPd)hD^^JM-ogEvzhnAT6Jp1k*MR1a#| zOoG7b=krnw$jun6|9rn+`!3#Y=4;dV_Cv~{`2G&ImVBNwosZUc-=&R%Cs?98kb$TD~=x=%9LAbm!i^*^N#H>PRmAo28LsOL8ym)`iH4~fu z+f5q4uW+&0yK-;3ekg7Xa1uIXs9qyoB~s+v8nQd%KhrjIoO(h*1M^(RpyD0YEA;WN z*OyQCi%~!j1<#XozfHG$;aXqoP!dW=-w{K66JG&k%|EPX);%N6%wLtCc2yrHB15-! z!>kN;OUT}?st!b1xE4`L%XNSKN`{4T$86DyT^5lkN1&{fLp3mEgK%89j8lWv zRd1oyW)VANSk~7gKPCK?j$Ec=;z7Zo%`A4Bt`lZ_Y#DnJ{pVU}UqMkS-@BH1e650s z^ADEYiZ?zUk5(!|selfaMpn(Tvz&;AZN6a7X5f|O{!<9%x8=70@CE*CY5v!);S^}Uy1b=WuK(<`mP+9NioBe~o#YvCr5u3AEaa7)ExgykoHC>88%CM(DB2)F<%|F65;aW27O=?-9_|4!1s ziuXYJ(vk>V8Zz`&p2unYa@+^YUM^Yky*(Ax+2$qx-V@o#x9Of%S?`d?zwB*FBZF8z^@mbjnHo01=68(LnIz@Gh))*pf#5dC% zoMO7@J=Zz8Zagr-Qd?_y-uMv=b=$LZN%xmv4}L zc8JvHJ~8(!8@m#f!>wKK-fJnI#q+-V9EC%|f=rkdq|7*e^?B*}kY4EvYkACFUqBpx znWCsRftY`{+eM}?y~S#m&a?&$^AgGJM2 zszha~O!cL8!D@>l?e&EP%iU_E5}>;Ro2PIH+!K8p$oSW(0kkEQ<1T7%+}k{pNpPH0}oa>Z~J&SbRxeM2=%bjNi?qE#vNH z04Ze($+=Yt;WGJ~^yL?;%o;i#x?Ff!XS#Rpn7$zf)~gmf z{nVt-PoQ55lL+KdeK1>W&$qYtUevqLJ;YBp66>XoP19Xh#!f9>_1E#RKPefo!+L*x z#|g@V%P8U(&J;+QV+roA@!k0Bnxj~UmA|-s7I4>|1@@BNOCXU+~U9DntiS3 z`+31?6?5v%qw#vtO2MAIQcE9DCeD9qb$UY~5z@2q(saS~b*o6;=60}C`sRrjvO=ku zQh#ZC{Utnjc%fJ6uG}wK#~nqFUYNXjz1hI2u7xhKx@DFKH!I#%=QRs7SL+SrBjfX~ zmuid;t78t2x@1#UyT#NpF(jei1(c2cwWRs+W0Z*1KU*#V7#$yD2JafUrIdewS$vc& zMf9T@#g8&l8h6MP2lfwb@R*inlvBu#50pBZ<-HmyY0 z9g%4bN%rJ5wqI3%u2;Egi+rs2{*!8VG)vt^ z`^sMKgy|V|DA$@5-||#fLoI&?EsZjCbzaSbEjoS7R^;YS4;6#4zU}LbUHFT|L{pnF zIaALs>1Hz&!S(tP-rIBz90A4Zoibjnw8{PbYEhkkm}hf!eTaW}bxP-^Z8N7T zME1BXN)KU2XX9d#pz1fN>k>M{FH0uU^`i93d@0#ys11;-v+XF1*#ByC#w_h>>!E*B zJM|Wav3-qv4>zCtd%96wp(AU~{D{RBYtoIhvu%-vJQqVg)KiSoFbB67eO-f!xZzjp zhESW&J1+Zo0lRhJb|rs0PF^JOwak!?tx523v!Z?XcB9t$w$_Dk;GSEhL1kgx!1aZ& z4Fbf?YBais0tzFXSZhU}`+d1izQ3)3#lCRCYf^l4n`;cD3q9%c-cK%wdHbmy6SbCMcm~n%1#nAI{hwg^z_uPG2ZR^d*ms%&hLNrC)($T4ZmfBUC7$Y zx%L>jDNQy|5a!+%6=M3qU8ne+$Ev=v&6L=xZ&J>4^6q+EttkWrqUiHn@C#yWKG^j>Ely};p3BcIibrsa}|n* z@@TYjXk9;~Uis6v`qd9Aed;Q(__cPechB^pVe)B--LBN=2~2&oNy}tkRu?J9OP!T; zEWwwToEfyLqwD^9V|2?XF2Q4HC;O1Q8~uSdYinZD%{YI-|44_=>mI?#(tDQufx0ij zm%QwHM-%LWTzrqclAaSWGQ*-T1M}^+zq#n`w}u!$4oTtPg*D`Ydc6x?qrW4kdA=uX zXQwKgm}99jIq~9J7Vujn1bJwF#*NI=!+U|{agP;y5Mr&_mBC_%$hvWr<@(26JW6Rw z%kiEwR|bE7-Asr+m#($BotO#yK90AL2*$2P{HtZHdW#6=vcEjxNZlyI(EB4_uKFWc zg8YuAr-YJ-wI>SN_?nk?K?o#Xqyr{;&L6(J`O%0AL9ObyghIur*`lz zhMvaY_DJ=pLPFH{j2zE|CJ`dp4*)K+$=)ZsyVaLi=cjm&do`HgLixQLyaRWlCZzFO zk!XK{c)5gkGJQJH5^mc@C~Wd#guw9mJx4w6ymf_k9?zR{HM*n`cPB8>m93?r!>4>P zOJ$f=a9n6IOSkwzKQs>ij?dgBTw5Ii%NbrkJ$ONsFPE%cV&3|ioS3{Zv7vB692y$h z3h*ul`)EpR{6*)-cgNqCif;lSt518XIh%z! z(9w9H+<}5Z1VyY&g?zuYgj5jnCN{3D11Ehsv6alGS7AjzT*Pst#Q{xHEek3f{TGk8 z`{tEb7Snj2n68J3{Fe6%GnI^`vE?5&hj0P8|_-(JUk-}tp$td4JY26NYAc#tP+x(e3dR-Em)-@UkIGhQ z53o9c0fnM`04op0amPgS11OE+?sP?~)psLPd=c@8SXCv*IULUl#X>5OKqL0nAOES> z%hZ9bq3GqUhb@i#U0$WFvM5ahMT>tPl_jdH1$Q`W&YA=M;ezn^QWq{a9czwNg_zp{ z``m3X-lp*IU!O*pcBS)j#CcPY8L_S0#(Hsx7ZVuUH@QBLb@*I5=uKo?=VpIh1idis z7Ih(`r>^FLetErOR;c}#IC2Fo72t!By1g{A)axxlP4NfCT^Pwid;|np$m!g1ZU44o z=3!D6Ms5@PDsCeGrugzG;*xj5t!dvQ7~k;zFht#lbXGiGuzRKn(9zR|yYcq3ex+s!w>*~eNhyJ9+lE>{*zXoOJJ4#-xfqZ%nS$7L#b%$qlZ>#oVK_{z@bOnDvZOQ6@=jZ*I zgJltUCdQvGaMsQ2iqq?U+S9x~>`FfaClR#r`IN zW*YB7W0pKGdbdMp0n+5fgj{Sai91dO(Y`+-G%0#-(<2c zB5D<>@kZiiU!-1IQi6?ocLL|`y@|LfM>fabpy*K}fx0002Oazuvs&F`yE>xm#NT;KgV9 z0eHU+aL8h9!IOU#0P%jF*muN#Wd!);D1hdl`nuJSsa^j?Qgl0mXU=)OINz11x+$iM zJ}m&E`3tYv{VxN+@5KMvzxq3|?~PgF(b(2z-3oxUpN*(XrtnSbzZI2A0T9(+7zq1w z;{SKT9{{jje)q#Ajma+MW8pl!@8!Qd3T3YF$3y1Vz{-CCkf!pNCLuXb_`~0c{xcFa z|4#U8XHtVRqXO807X8XUpU<&Lhvesn$_5%~i&`WDEs_OIwF4`9f~gi-+DPAX5AJjikksRy)`M)S(-H`0<^pn+ zW$;eV%Fura05vHgLIz0Njm;vhEmYucQFfCP+VMX=?{Y2qU>8>oimd(b|5rUsd+8O4 z1P_p2r~VYW0Rhiq>_BXAx3u)LzvL{d4QetaIa&a}{s{Z9cH7e^yRmk;%0p1*p>=4a z)Z`cI&|=v`3S7_ctXKxQqV1Ln%UqQYEs?7*gu8!L%ScmUZPj`c5_XeQw(x)buKE3c z{Ki6Qd)~t!zt~OQz%^Cjmeb-eG2v=SBFL??CVSHH0a#N5mc%GSFO3mrz>X|nnY$w8 z9k3kT=P>=}HP3{!L|BfB=$D}^S2wW|SXh>hNO^=t%| z2yST5>%5V@^v|4ZVPpHJl?*9)OB4AB2fcqz2ffNkxTcW|(?s93NdCzp1b7 zU+mzNQTW3xT+g!ZX`|JDt_SeMcNoI*#IV-?|F$z*bQqMKDV1+UM4gLDQ7&mmg{dgO z(S#^8L7Gb&N{eQ5(!d7`Ad*IMX98>2PNA^bYp-tw*CC# z5}Y8Zqo)Kb4F${cYVCX(0|S3>mbsVKY@I)rRLYwm(_Y|r@t+bRh`IjXg-TPvNZn+$!0;7!F7rLwZqlHP+5H6j_&5Tvq< zGy#^*kd^aO>NLUJ^Ud7S)(u)i6s(&9XBlt#D6UWlymix>Tn8Hm0D^xv@W{dUuU&y} zO9n{#-};}N{vSGC`@L!LznlU>;?yH10QW;c7PRU)mc*R0)?ATbo@o2Sm@3&4rac$h z)k8ohUY=x5Uy_;1**n{@aZUlj_`ll`^1<5jSOU4K9mQh+lO^5ny%es7j|}C!fBIgO zsglgI9e0&>kRgFBPqTl2Ka90K+X-y(tYDre`jP&@(3=0XH(~e}FUg6gQ?F^eHt1U( z|9(4hS?AP$u4DVi`yW;t?k=I!NI)ve8*mk%DGA7R$s0{5hJ?HkBn0!{Ya{_|r6<8s zfZeJJo^%&hMA|Ta*a(@RF9QX6uqf$3TCW&FRw`}4gx;Eqh|7N=nos>LA#cr{)e7Xw za*9?G6=)%04RaL`t_ni)_vTA%@ONqYpv{4dfHH5A0s`%AH;B@L@Xxq6sgf(CYM!KM zb?#|F=>UI*LfP7Im0seCV`Q1%A%x~+9J-z=iy&p~U?MPAkp7)F zh@?~A$H_xrnKysG^(NT&w314z~xdH^M$mq7b?L?9qZ_~Gj( z(=__SPqg6=&q%3%bPxR5&{HSw4Z(X0b zJ*2>jv><;x>;N^V9fE64CIi91?ZM)&i3dosO>wkAj}*Ji_>t&z-nAeEGz7nS0I#$C z6&c;RSq{Ffr+Ci8`d81ExvKBy+RFd%hCjOck4`Rg%G$!p4u1NYvP-$}j`8bylB86v zY$mxAG~N@Gq%Qbl?+2DS=m(aE;cN$UW+7E`Ws8495L8-8a$+rmqU0RZhPp05*#a|+ zT3#{(jnta?r=N^3{sMU`OeMfhJUD)O;(?N@b1qWS7SciAEz%aPKhN~%yPrLVd*0iA z`upB$(&MK${?76L(icZgX^M8Wj&(P7?h6p-8BnW?1H7y{d@5TQgIcofFcicRiSEVn zU>$#Yme5K*m|;Oy-w?EFQ|Fs6j-&@hsMje3rKX7uF&V;qmv*95S545t_!58&ZD_?# zW2IV!L@QiXvAfYB7`yWg_RrsW zw5y^8#Z{hT^7F*M*XtAJ5TM?M=j%Qa2~B@#VllB7192dtxE3HmGVHPguAB^izM%xP z0)K;CI{RNwx$E$^4ym8L7V=)(wO@a-hZ^nFMZOiuqV-odrOAKrq;oBu9R1{&F?Cb` zw3aGXrLR?%Z1^pV3K|ME+HC&{W$^G_A)Iev8Uqg*0zz?c+#Hl}DJWZHpi%@#0Reyc zxx=a~%`R_bqT3X3)#MUEtpW)8X#3@N`|%c42d@wqD-=(^#qsG9oPNtH?eZ3e+HRd= zQQ(GD*yOu?{ILBGXZ~U354SmW&hIy{Yk$Z8KlH+JVlVH3Ph4yyLG!S%XeEB!0+!On zN2wzL`mG%rrG??<9@=8{k5uz=3zUDeB(CFRdt9iOr7f)MTk$lc0Hc%ku$2`%0wuBm z3ZYe=n@7rvf2mr}A3LLd7B>~UY9)oyt0V|AqT@ztMW6{nqqTmUH`bX!PcV1H)l+uZo_o-+1Q_l((FiXG!@S^MkOgan&aGd?2%YUuKH9zF)FJ}Jb?;XE& zJDhTt8|~!Nzj=~9l>GEm6^4K09uhVM1NTw}0zg58a%i=Z4u&lVhgA?lX$NH}TFuM5 zi6d=`7PENVZXJ(6@azQ?o_>E1^Q%(jmfVglgtl&$V-yb_5m%vMwrv2A2IY{z>_#G- zpe3xkR`|g*$*Uow|A8|TK;bXW%)kElIo5yant%Kc>s${fXZKvz+#O5Id1D)Wq*O2@(u;s3Upk~&D%ZD*2nc~@xFmn_L=mb2&{DO! zV~R&?&grkBuvI*eDuAvGkeguC8nx2yNhVzHk~Ko4Rhl1)0;ypTmNo<}56TaLJoIqF zbWnvcL5HQMpzR$nDgfnfaG+8n&~Fn?d~ySGh2JwLB*;(w^+&ffKfE0>`VYGwO?CQZ z`m{geEAWQX7We{%J9&S?pLj_t0|frq>A;G>@BUQPerpvqaj)0^<|iCHKjW|4fAf^` zbP_dzDCHf15y)q|UT6zI`_rzNSAV!C_I0>cX7 zYKbC#5ye0Lzb1lN11P?VYX~8-R0vk>j17SjoKY}1p7ctvfPR1W>xZqhrhobs(&4XP zl*rY;*2Di;hsQrQ?Kl2#o1uQJoB=*c7*vU{$__x|%}1I6DrvKbkqLWEKtMJCLV=1T zJs={@l0GG@rKf(BQyKz6KvJyvYh!*d*PrZx1{@bBL63wL00u||5X?w+PXRXC8~WN` ze=eo|!Sty&%k+OA?EY~3#|Dsk{nrlv^^O1H_vcr@H5LDhTm4xJfAnYSo2#c)gJPjj z3{y#LMF_UUS@TdvH#zC9GOWU0UPIU3xIk?`gNS4-L34@%qp`?ugb9%4L&QZ= z10Y^%j-jAc-O(V{Rck>W{9~^NTKJSFQo)J$LSD6>+^nAtumY*dPkS?HC_imuXO`BW zpT649XU;>bIyev@q8JFlZf*j;l+XnsL|}f&+o}W9MG%c5Sdj2RDmLNF07`=brPGTp zS3SeqU0Z*1*)A+g1-bge{rcnGLRuP^6NLu)qmM0NEmS0dQy-u4OZh+T-GAz+q3lIB0?3}cxy|HSM^@n#+e|hn}zG+D=YByo`zR-WnDhNF}YOiu_2>12j_-z*` z#9D}w_Rx22w}g)xb#r1#NCUOzgLeucj0}BGQ$>Hu8)1J$^2vPa857nJ0;GQcQcHkx z&)UM!s)38o$P_hvzK10FWKbj4J8Z|Sg`47biG}`lN8z4x}0@z0~=BUg3CZN=gKPSL3`aI zk#~PJ55;QP2GZVYa)p^`@_J@6FoD+^;HNL(nW0wbfAb{)?dPYY!C(D%^1k@5E%=wN zIC)s0*H!>nK&QVW1^?KSKfJiBqYjbZdgTOA@{!lkb~lV>7+#&Q%rCLG_TItur@TSs zjE@Q>iYEwBi(F7jLWWHK0wmcvpcr?%szL4aPvY}``Ha#y&Zzc(_;H@*g^j+2mF!EA zb>vZ0s_L5s5aA5}d8fim0!R|a45IX(cEn#hmz-1q^`CI)x?2eYdDJ@#ubTI_FJ%#b z6Q{hApZ~m`@t*(kpFL}nlRGdjcsQPN`SGuR>WmA)@Q?raM=obS=+rfT{hiaFB>r+& zs9r({yQ}Gkv=7!ZfSpf18TuvCNPHQvD|*O5E$L?rPA(*xN@+YWr zU*7VIbkM@jzxFK;Y{&s?u4;QO=zCsEo}AYpgu*6iJzAkOg_?wB2-EHX5Z!6*n#oa~ zPiPhZGA?3iJet?yK<^59HfKS*~Ki#&rqF8E@E%eud~&u{Cw z{yxVimH$1y)2EWE{D&3&Km(}rL5k`?Lgl3k_(!`**aAxlywgUyP}q^KLi!~QZB+nB zN?!P09;cnMar|}eNZ(hGpUg}!!?%Vw_=lJM^?UwP_x~%d`%f%4g+JZ$$9DgJW7mKF z&teN6Y(tcQ!6^}HFEo-igiB;;RQ_d#GQBYzEGm%KkxlIwbT-APltbW)W2*^ z^A8tH{)ugtxuPGaw*%{pbN}1c*8lhi|Biv5wrbT<5J*=cX&V!)l|I0KT-(}RtZd&2 zbxH9&mIPAD+=#+?0G^J#t>-|@Ga=IoMDAX61wu= z^CD;iC?^-m{8jt^*n8KW)p@3E?7#F|g(|`xp7%?~ja86%BF1C`K_p2mY(1~M@f zG}_OmRIG>p7`Lk!HSSJx?X0OA8|M8=d|2Khp7M44)h9 zr?pdzd`h_`%XY9Ws03lQtt{OMMuxXVRX~`IiO{8#7z(5)>Cs5G1!` z*fe+#I$AKID#R8T7_*|N6@5k4!YAc$_p>vWW|4+mF|pY!8*UBjYR?`J;oQAcBC?$}Iwka-n~C zOXLx#{y}CcP1P`eqv&n0b!#h#@s%kr3lSrhIgg_^XR4&E4PK*I#BV!Dtvpo;n<_R+ zb-4vw9mlI^yCkwq6rt~pUf)yPfVkYf5hI`YyhlR4OI^JsngIqoU1s%(87xo$4U;8~ z=_jrf3jI?tVJ`}CzXsp?g}uoB@WaG^@f+&>i49i~}gGpqvtKL02bQuCG%UI2puQ;049XHrE%nxa1Rg;9;Nb{UmO}B#LDLpl!JeNE(S3XaCjYE+kj)vvB+Cqhx0Q8 zHR1uc@W>i$#{r6%l8Et^#T(XKj5qtl^FRJU?~irq6Y})^$vvm+S1H1`M+?;H9C?ZIxSA#BbZniCG#a&O?HzoR8P6ltofXsNI=IHL zKaR_PtB!oh^FI6*3fkNXertVxCH?aI+ZQ?g`io+%Iwgqra^2^~k<3^2j)+)>WsIc< zj!GoAO=h8;nZ(V>%Lsl@#aQyGf;hp7-&K@OkzqZoHPGP?lkBmsGk9wVvI|>|scjHIL&9zbgns^Ck?pa~Ndy&_X>75YT>zM5_Hc z{rX+M3qWb^||pHjv-0W8{iIdE&QI5#isw@_XLGQzj5oIe4&VUh_!9}EAFd_+=5XA zzJh+~t6-$}@|)~wkmjgJY)q9Me$PUGg|Vy6MD8FP6HXMf9a$7?4VcI~+b=uUONTWI zelH~hadgXdge7T;lUrLG>m0ddiagSoR2TVPkK~LDAJSg;U?;nhfKM3Y zvyQZe!vIbDbz~{l{UYv^DqQ+A-}QGt?`(AGM-E&&`iW2e_9wTmuH9l4{9m}DBj|=n9y6?BFrGLcEgkH1@U5v|5 zoV#DIq3!5;z{PB>&`L2}X=%Gn>X)`EXL_bUS*|niVjla1v5U%MbOv&N90qlRx+Nyk za(>q0SoSJMW>d#-r=>I1w?K8vfbtW^+K73wL~kmz(p1covjLy=fkFR{>xh5ndpz=I zE@UEnHu#K@+RuwDKxcCGRDLiMDq&!4>Cw~)-ta)1g@-$2BWMFkW7VZoZc*ESf*o)N zH=v`3??OOYv`(Q!8~<8=6Ha$pNF4Aa*=2zWlss%staVF!KSdRDAnfBlC!rSz@AU$* zvo!42{@?r+%7V~tgF>wP^mm_E{k#_=syI+ycO~6ynE@=oyYpd3B^#j;nImc;Jkp7b zQVY>KaO9OStR<96C3hfRgd#@+HfWn=2gd-lT&Uj~wiL|A$gwlpyH9NHFYC6gk|my! zQ59uX?!L<#qANtjSaDm{yN1Zr_7NqgB(NKU2N-2DJw%zWwJ|2zXdP&rLKfhm*IbRf zz8QIG*;Pu351uD~r#f*{jHTsS3#J98_0=jXrZW-SOD_f|!X6OFrI8mdx4?CSc+?QT z$+?snDAnxua^OG@ij(IR@0>6m4ew2%Wt0-0Vqi2=7*|x(2zw)3Q}h%*JxYb%h!7`# z=3hen9sG{_d}8dbJ-5Y%y8wZ507}+gX9pUO(Kf76{%$LOWqvF#$y8Zq^t9$j!h}5o z9jK5mv%$CjJij60GV)GUc2J^vx0wL!-vbu_iyg~CL&A-TeA!Ou1sQ_Ry61!6k%@?3 zePXZuoa=SNFFiNP>Zqy_(Ae2@Qf*_M0<&c;ryZdN?9^hlW4bqd6`7sswS$O*iLe(s zQN0iD$N%Pkmmdt`zr}4_k^4t1{j5Pd`(iQugX?NTI~+UfeQ3Cdtt;SJvEfUB(==o( zS#aMEyzwCj6=tA~O=N{fcD9sd%DJq6^7)?d2TdxTEC2nuURC4AYh?Cx!bN)kYPrBG z?B4ii{>Izi`Pnai@gvh>PWZGBd*zfyck5hmqSUS?c( zwt>FaMrR8%vK~o?K<_e|;MzytXA{0RTn^x93x0$m@73o+TJkr4>ko`TZvXflzt_)w zAGK?l6$&jSs)RDi9DPlhn4hvPmoD;F;1=YvtI?LR;kAY%mlb-nhYU)DJF2fwqsg4dci=H)SE(|A`@Xk- zLQ$dXKl3C=1^+%cDE?g+^lJgTk7Eh^uJGG$lHns>PeZ6Ngx3(_5C_a94y~pKuHjxb z=|@g>pxFX1jPCZfkAt>C9;k-8Kjb0%!>;_wgS6>)Tz=V+^mm@RSPz1-)R5H5Hj`A1 z)~$`w3E|n+FI$7KrRfIgQ0Ded_CZpAWD>}MFg6Um(Gr5q^Cj;WdY2Z|7-0@2dWtgb($rpJJUN^zsKc z^Lf9*9)n-|x?C@0nA!$s#1B!B)dIJ}wunmixJX{{ zT`RwBRpxqOaA7ObPux<(NBC?HBdP-pZMh0O4^j-2ZPu9)Yk4T!Z(9yofWQlGGT1Lm zve66fgGCh^xZDmcf7q=>&Xe-ev*|q1?gyYX(nx8dZfBM_wcsHA`VuC8dJEhD>hn@w zKMwC6f4c40@@>D?+=|$%uxmZm{nO9!a}ib>neTPEZf&}@btMk1OhWh^A~~lzcBNu7 z_7=0l!SXu&>T#mpyu(@b;~ZEUlQ%JO$J%K9v9I!LDSgN1F2+ZNUM_{7cX4F<8^a0u zc#fR~S|3mP(S`A)kN3KN>2ExFlcifqUN(#x#E717_ON{Yy`yw*p!lrPS|wDhd-zG` zk}C71s&VZw0fO|?5(21=kP}S-6|>hu!o=Pak>(Mx&R~iVa2X?R2O`hX0~b_JjKAEk z_v^plgb@FQ1B@rAt5I^EofT*o$Kl*O=!yhe!?fdps%C(Ef4#eZvM;XDCa4BQ1JvV{ zMI2a+&-iBz(d*M5+wn6;grBZ>jt_6vs5#9Eryn+{mbbOS(6(onqh)YNf(JT$3qx zOs}kk{N%3{fIA${;j8~Yan@~bzr`4^CDsdp1>U_39G?-TF6!WBvOpo@s|T#H%+_(m z+D;^UfFcpQhU*ts^SAt=88LM^EGfWgs0`n>_{8_}n?Kxt;2s~^%c6*dxc}mt`e~tc zacs~4M?Ka}8d@IXI%`i_8@PxGU;i{1tVu_5CJ`zTL+BZ@#4=Z_FMrjB|1A#3(p=#4 zzH{{K_&bl54@|)pG4GR6!U^Tl!^i^}&aM|!B6g&h28%~dFWbF-Uc)g(S1{M#IMhe( zTKnw0^>-hCixTl25m#V+&hPAtJvwN4wmeDIBsd!acGo}Gw1JY5L*uh2;%Or!p4rcL zm+Cd_rue}B;@9?ZJeMk0$g_>#_ea)5T&(VK`@Nphdp*U8z{Lcg*pht_aTlP$U`ut+ zU@OJQ107?66Gg}11`iSvgn83^@`kpM(?;-77V%7f&OUtXdOm(~CL-2a1$gUE9i1}W z@DfGtVo$`UgG_IFYvYoKBIZ9S(Gb+zH*O*3Kk3^OI6N^w`~6%$@-F62Bq?+P5+4~| zd3Q#7NF**pxttihR2)V4nDxWNv4XwylLz1ifVKRtpZ?&NZ-?QSVX9`e@FOQ&-@v}l zFZ%+2zk;nGQ3OWk<467#NBGSTI6mF>`^Q|`O=-O_ymX1Vz(PuuDc2_4=+ag>KD14# zw_KM&(lpO5HAo#9q^l{MFQKopew?bm=&N7z{(n$kL0(I4+aw6cLx$#$YoV_KF)vt; z7wGfq`AFss`Rc0y^!{lee8BgH>EC=rtR>WcrV3lWEaniXYMG#TV|7#61s%)0e$tL3 zG-DdhXR-xEJByD=e{cn8$psE0{s$a}vW2AEotgO#KwDjU|5?PY?Wpe`6%c@YM3|(0 z@s7UT+Md!<=J+w{W9B@TQJcOUoZ@b_YA$m?Lzg1h>KIXiO&yw>*)5Q#o)U^!T23wwE3hpz?RW$mA!+*~UM4 z)IZ~b;QbW?@@L)-{2){O{LO&5p1*Pvk?UY9tYwK30Y7N09bMQGLBy$wm-mfyZ6nGO zvrDv4Ci7v#QNoqDT7eB7sn2ISvXR?=h1ho2AYsVN_5?L@xt{3L(J@W93mF_s)w%iA zYw#L>_CxfzvN9I(AN{dE!zVqSjEbDkx%au+J_aOo$6hvqzXpbY00sGfCqE&0 zM2Xnv@q*$#K%g#%rzLio5l2cfm{y1Fuu0+`Bl5-j?5U8m^lx(Z#lJ9pyW7lXaNn$a zYFvg%pO>5b#zUo_Ji_O`F)8{y@aQ9_-*|<<77F{}r|J|O=|GKQ@thjOC6XE?yMVp- zV_u^Tw{fA5HUSVzLBUYFJkh~_B`6g7PfkDIKX16m(5X#ZnnM497yp5K7Z{^{KV`p` z|KK%$aO2w#uAui1LY*5U$k|b8?ImVzjWT1aBiO@8QntjVa!E@?db@DG=Bf-;#aIdt~n=q*FV zKfhqCW0JM8z;2L`Q%$1=qO>7Wy@8z7J3c_M(jh|iu@f~a_Oi?z)wg3iyF0~}*Idd& z#u}`|C89B+%tpg|ZDAaLIv6EjcI-zUBq0xSzur3hfp7Zm0qNHyZ?T_H#CH4mDT_4_ z7y{28eKFSk9xz?3=ifhjWWRWW6osl@6ECHhlM_KyR(T@YRbPka+6I#J+6HBR=EC+1H^yq&xyY>+2^AAcNV_+?=g%2%q4us zwa3VHJ)Q*r+7A~bv0{!l_~i7=(OTUxS)l2DaI5Y)1GLk;kk zfg3!4No$<9GUfq)R)f)bD&am*4yEKe#BT*FV~OL0bDa-RAPobeqc$ol>I_$E)67 z>iwHW{`j3DpWg3H7kkx1eN-DdW$kD0i~bd# zuDiOdZ)8q3P`Q7@rQ;_YQY7`bJ}$P_>JOJu% zDV)TnPzQm3%DU``@2FIVun-{6`AN{_C; ziLxo$M_iP%pEB<0@1OGL{=Pk4!y2?0iffA!?*H}=EdX>>ag~1bkZHnQ=pAfBUE}^( zog;|vo#VgXDMWqI0xZ_j?;jO(7U$ql5NHlb);G(kkh2Ao=b~@LXex_`e)j42GYS1P zCeU7eC+tuB67|j}{~XQ(y|beVlO{_LadZN$h7-d~IHiq9&ie#A+|rDU40td+fwd1a zCZGBG2y>8YzOG=-80jF^(R(-?4vXQy8ct6`He3ydlg(f;T=>KLX>cBPi-CC>?}y2K zRlLT|&?%Oq)7u|9+EUuQ*F$@`%d-XynJ)CTLKUoG%%QXkde?n$9npuP0YoD;Oqwi4{eAhEBN5!|o?Ho> zSno)o?;)a+{X?sEsMIZ4oGd_$>eiV+94du{y9d<`S=N9pz8AcYFe!BRX+{`}Q%GWc zk05dz*2o!+u*%m;mFN~ha^NU`u@q*B1$CgXtfAlbJk0I>KH&X*Hlh(0e7>2nhdHiL z9HWLc81?PpGwgK>#(f)d!qhQJqOjaIpjQpFa=ZWts+~5k+|vR%INXIe3pG^41&_6enSo=&HvR!ON7U zx&=Dzu&L`e4&Jl1A*dRl=OD~jfF4;;Rk1!Qv&h4oDo)%0xvQPZsZ#!oowWzevcRC- zv+FSjV}z|e=;AkB(hJ>x0?;Kx$W5Ny0mb4AtrlozKIYit?DL*8IbJXVs?Pd$Eph5d zb#6c@V&Z+@vu+wzu7NlT&Fie`W3wOm!d>?dbAdNHMv86$GP^YgvgQbfOec}4j|oe} zis2&*pe}$5{_5z-sgNNtu1Z!#dyoy!$=shlNdLY~3rf)r^zHnA=8=$JeXJ1I0usnxj(x`6c;xQ*;{zSlZ3pP}`^)e%_b;(E? zgkXxoj zmJN(5eZn3MvKw!Iq{QCH`Bk^q!?J9U^R1^%L&2k?Fm7DkqheeGlN0ZeG=O9=N9dhw zAP;fl0yYAZgBuHZkc|+<4JAOwSnv<$b(Jff_^A1ohH;)F}O+$4i-p{~Y3teAd zSr#Fmdcoh|Q{Qzh%KQ4-vWWJn7yM;?y_t6{_WJs#g<;u%!mK^KER0x#GNuz@n-oSD zz3t@?VQ%HMu%+Cx;AJl0CG?lw>#vmkdpUz5_F2)}a*Q-x*x1r3`Z<~7el9lzu_zK_ zwe`_Z!06JDV1WvKWW4;zL#2_+%Jb3El$f37~dgaA8)9oBh=<7(;_q`qU z9NZKPfg1#3T-SNW0qwZb+gmDZV)eO4ZDl~Xfro64Bh$)lrpuk#Kd<}OdvrY0bSW)_ zZ-IpFpd9EOEZQ}!QH75o?5YwsoHFFWzTxzY+-pM1-fo(`ZI;r)?VL=P64pQ>?Ds!^ z$ruy1`IjGqh{F=R<|uHoMDQ0}Id-3-%zB%)2mBAb&-6~TL(jUDWNXwE4P0gS4%pa( zNx%JwG3KaT|D(Q*g+3AeSb!>YJ9tlyyfABezry$@nIXoReEb}h7Q#*nWNn1WUhkCT zFa1%`FcVBM-aZGDtBmbUagqoV$0}(osf#)sc}z z7?zPYZ*3rf(0|nX7dlTJp?{^z|6O~ce>?HsYJ(1Pr0DxVsbXDj6YEYmxb$xAk!)Eo zH@m;n!oK${|4K_N3l`?I7Vp!~m!3EOz4sBa*3G#sbUX<@Jp9bR^RID__h9Ri{*@Dz zW68ej9V~S`E!Ni0^Hs|trtn99{%GTWYz~v-znLpyel#qvufh)}bc{-U0SX-}y$^|q zlc{|f_5R~O&wI7!X>VU-UaV1*=@_ZHbQEn1yOTxxpZC)&i@-W>o{cygu;&*OKFPuq z>xbSB#=<9@e(IkbmG-ldg`Gr3KVLkgef(D8XA3O7sE=>f9-050dP_Wi8(iu)aw^KU zWx-77THh0TGf{*;p_85Pqo=?7H*FfI3wlS6kVn7kIsl}K>i~;u<#X+I9nkOZxw!uR z{-M{zb@-wGTz}|4*B|=Nb=ST7G+@^}8ih3t7i$b#ynkX@A{U5cK^NuS8-)Mb(=~z) zAn55*%Mx)CrKj6^U5^=m_|Q$8rpGb$bSF!!RfzE65bLjl{yMWP9esb#`|Ho|#fP4W z>+(b2UsoUc{`&l(@2@X?KL8QYj~y28MZgt2Rm-N&Ignl^ZvA`FFWu+;(T9$I-aqK+ z9-sGDdb<7R{l9d=?}Vk<{ni_)@Xus^Z%8jYtr+_aesU`OR89YX%u#XA0^y!s_{07W zy(E0T-}K)(P00`aAs6jS_dj&Uq<@C@*ac7E)Wv*NAE-55M9vD`}Timo2H@U zhxR1;?*GuP`gZ@EGul+NJunvtdV0}!d3tXMxm$Y-ANnV!dGly=BHI3;Wuo8y53Sty z%HR{OTQFJJWZL;vMT-uL^5*67>*p{+$fe$#echxupS*+9H+ z7{wrkA~qvzRzmY9$7;l(bLr9g?K@gj#HwUHsPY!5x{mgMe%ZX#B7h|GZT2~vTMKZ& z4_CN+6yy9`_5xL$CHg5>_{q~xV2y!g-U(pIT!?UJe_j=TG`FWX*HxTr?NO8q9ZRqt zS>m)eRJs$Vmjzq{EIenpRK`FJqYtT*8B6oAy=K?b02`KR7n(8$-|X&LMfK> z+Z0X%r2)@rs1B}WD+$El68c{Y=OHHIpD`eYdp(eecx^xVusC=Nl&Lt6+5sYoyf=t* zOho*4_?~`$>Kko1XBL9=JzTw(@6Y|;NFhbtC+H+R2{XVF*i7JGh$u60{&7MD1h#~Pqo=rkFW{)?%M?99wIQJ*bPV8q%N7Ik z|I%|QNPj-Jk5iLI&}X2EcsxR)-I$^h)ByGr%|v}n#ks)&M1W49^z}~xMGrWtK+|DI z(HRt7^z#B*0DIA%ri07SOSF3cqzXu4%Fso$)At8YKkqHq;<*;6L>&2DiE{Jf1k8(C;0Lu28$w3loO6d0(=yUc9ryUq6k9$q6LQZ4zu*7TuF~`CEcklhA301YFmZp%#a}M&EBf~l?F+eo z6ZdI-x%|kbDEH%1edoI&@`5^`jX-Yo^ZsAn=U>~k`u_Y|Iqu&lgpb_*XZ_Rr`P9q# z^*cXu@wb03Efw^V;73pQ=@$ZpeD9;M7WznKpZTJJD8Ix$;8f_bkDlOyE){b6>1Xrj zI9nWTfW#lLsNkyrv&*$O;`k-#WBwU`pR{jZ=)aG?-2W-hcnLm7s^Am+N|A2{GRXSB zGgiy**Di36uLc2~?awLF0ZM`6wBQ3(TniB<{nXUOBLac57NXSKyM8D|zFKs>=YQUx z8wk*!*K2SgRHaA(-i16{VqEZ2%=cFG&*mV39DpP8s4{~!71_H+fXG*3o>DP?uZS)l zE{?PTmy1r6;4x&0&iDc^O~ify1}e++U4lp;il3Qi+BaR8@;Nx$EdAxw!0v_Ucu2j~>ZoKSEHGz`DNis8Yr1XiFO zqS@7!5whVh+Mi2q7xn~A!spI^hmO9>Z@nQ?`mHY=jDkb9k4D*-?!ES@5~^8sP>JfR z$|F9z8C2O7Ds)Amb4|vh42?pbj;m%rp2~rmU15aYyI*ZeWCp+T%YFWI`NvmQ9{1<* z(Or-K)BpT`{IB8*EC0)BmmdH3@6|c4y8keD{&D-idFegBKK@%@(LB0;>i8cQ(L(k4 zg&%X4J^o`VN&l_6r~mQSZ?=dJxr~2(wdEkKRQp%96i3Zxmkt0G+q-a>{BySZ5BWR` zkz*Vu+$NNnBPz=wC1c2`e1eB)iJ`dE(U{rHOZT3CP~368f#n6lP+p#-f+f%$<%2s1 z?3m$tbhx)#GdKPE@=VTugK0X3d&7oy+iLc4bGt2+)Lpc-;v9K8!FC;w#`0vI&gOQhZslXYiI{##qU^MP4#)K+ygwKB zNxMjjNqD&)wb@`8E(Z%oz2UuH7$b!`o>gVnK)P%}zs7EQKMfauUNV=E3*LoSXgFTL zzK)6AX|XX7-#~xvvcqV7yIm*I`k7mk>wUewKaA00qp5p1HLWw-cy;Q$9A71ykvF&} zgKYOg(RlE1&Osw7{b7~BSfwYvi?+w6B4mZ|lJD!h< zIW>}4y$nd39{omtdigt+4tMAPD^TX*%wp16(aS5l!F3%!qbj-aQ#TyXPR8;c<1pS{ zrz%FP>9d>Qp&yN^#e7m#&h9n6dPf?Rpdab#urcb@BvjJJ_Nqj?nb8a%&n_tk+tW!) zui;3yEQ>|qdbrFdgNJEJa9OKKfOOzvc3zMnMQ;wvRAys;CEFd!?%HC!p1kJIOWH^? zzYda1&lMXKPOF9UK_(U!JsM~v9t)CRpMlR6(L9g>EmE_UdM&%1}4jpjSMYi{xJ zbaa%wcwL@UY^eX$<9xXN~g=}5kP7EKAQkv0@u4M9NHnDEmqu6#aO7f#l`1``*W)5EjgyfV$LlhDXjcs`n;Dj; z{-|PBRrA7m(sX=IqPTHma`PsSyQ|)<@yL)1eHlJVGwTAwmv_zVB3GMzby<*^;j4;& zP68q~Wr3aJh!^G6Dlb8)CV1E3{hm74uG!x&_V9ifEf3`|&7{ZV(haQSG}%i*VMyI; zJzFMvu^%svvlBAVOv=w{wwI^Ab;%poL`sA8yJjwIDoI;(#r0Eb9b_IR3v34*X0vSO zclAs&RKAX+=(3z>W&5&;<~jpL%h2I}cyxGZN}c9x@H`r;^nlLZLSHtt|1&oKM*mvCEL!=4I#}lkR?hp6!Nu95YlG=DWp*SFKdOmp`j$>sS&c7dd@w^?{vt z*qG;@k^9#9WOV23K2w6zRF~E(bMd(QyW4&=8qNnyr=-GDWre5b{brxc_i?AnleJl< zH)CUL$t1J!E%T=NV!BcoS*Xq4x{U3W_dGw9_PM`Ziu3$_z?LzPd4leL*Nt(L?8-T9 z&shShTW9T~u$@;;-77|m^S)Y{4l@*`RM#i_x-QelP0BBO{pgJ0m3R)RmgKU@cJAB1 zZb>)3H3;;_ppfLucueTg1f)^-j=SZ~JMP{^u*Ke?b7#Bgdd2HdwqxTSp!acWjrLb_ z<>qoIH9JV#VptHmxNnz#>~?<74)6FeHP0tse~pjEhHa!XYgg}Er_SD!M=75Bo*SKY?z1UYuwp>3W%2L~zYE}O;qa*N|__BgFWFM)iiM9i+;p?#9%LToyVK!Kg6ZKL z%%sG+)ACrU?mX7Cq&gl{e=Zr?)m@$l$VM<25y#KAU7ZJeI?h8rpH3d*{Ou0vbu4#w z&Esi#b`n@SMX8jMpJnHZ`iw{W&KacgY{cv+0n)s|Ydtt zxMC6Cz>DC^qsF_bek1%V)B!j!Q(KouGvRScyHb-PUKtIYB$Ss1~$Z*$7 zF-<5QS&e!#C+mQWpdztrDU*uY&O$m%yRAJ^ILIlnVkBMnP#=< zIbU|W`L!9m)@4rRc#cBZrc32~tMGZVcdqZZY3$dootb)WnAb2o$0>1*NktYkYrVbW zDmsDSaL8SvEe;kXhMMz@y4ZU2t#&pWJeb-|(b~p;e7w8{ulHkCaXeK|7BkPUEAaey zJ%*NfJ`P4yf7SWgD$M%R ztFjq?Y{O+f&BzrGaY5w!6|JQ6kgg}~!=caI&BOUDiX@WYijdY@$IEAMT84z?+ic?J z^`O-hCBGU2RNSI${Iml;Fb=QA&)S>u^tPkX>pC@0da5j;uHI?{KC2r%+FZ}gJh1L& zqsV%pmxJYXe0R#B(@dLXTjoQuy32Yv@$Pzm=Z@0pw%b0B?$8cyU3k5Dx=cgrYpSmu zxBh%h63a$bEWK~mHY#t+Qhi42uvp$~<{fFO=P&Q6((W$9c~0EpbTch>uMS?ixo{!M z*G?29)unw&yXefoOt#r+7AO&4QMNrC4|1xE3cfu`lgHX!T#7|Uz zHg24m8Z7QR{624;&DkH_yv@CcA7kFtkWXjNMhoh9J{Sj2Hrgsyl^$MgR$U#(NQ~Wn zv8%JoiG|8@KOL>dt0<0lCd<{s?s>j=+lKnNvfs}nb?st$ERV;P?FUb77wy!ka#f}0 z+ZbOhZ>zS|_N@ob`&u=(49Wk zTBb*@({wl6C$I6VLhC`^F3*pA=f}=-&EDzby4K6b!i|07ZP=Sjo*R0lYU|uphncV1 z!7aGbJ9-VcW}npQWc0Xom$irDTz^>pbI@<&VxN5t50)B^M_RL@CqYYIz04gGPURD?Ml)n%BVa`=ORoZi)@Ph?Cy<4^0LgS zg%*$TK(ouxdY*%MusEcT__)LO`nlv5kr)fc+hK+7Y`?rtn$~^pCpeRTA9Pg>FW%+x zzC`I=v3AZhsyFZUdKlHu_ttqU*XB)BxmrH)D_FCIGxxW~lvc`dL+VUYDDj6l$}{!2 z)>V=^PBvcDW4xPD<+i5k$fu4wduqlq-BONqf6DWQNcv+}59f5N##FM&W=H7jksrI# zctCcsJco1Z_VhY5XraRTuSs<%_3a_ec45j<5^Gxq? zodyv-*LKw`F8S@1vT->@vg!uo+R9Iv^~9@kskUyp+nSS2cztT+HbmdrfvYSqMvw%#tk58dH981K(&aA591sn2fgE1ggI<~k1CGutcA?QtL8@AvE7 z@R_uY}KA_^Kc&cYEho(l*G)43@he1sOAc9q^4UuD_P&( zGBR;)=Ws@ReR&@T_^I#J?a+Mgkab1pd2Sfq>+nd~$XZrWGGX{EkJ^;9%P?JNlXxeq zqpldXen&_1>~V1FtG!Cg*VBl!#VMF*`zBMO)o{37sME)P&S@QYf=`6_Ih4}tUf8eT z{TQ{ZDZ=F2xlZ##D_TTM3S{OXTy*HrTHp`~nQcJsS5;1kWXZr3PFPfqE{uGJl#Gkf3N zHGkjVEO}~M<#oXC{b~9ro|&&~ib!>@d|<12^3In2=w?sL_UKyU$$pcKrTFAX^*dRNjZyF)vaJg_Dho{*hmM1{v@-pFfY}>1ecOA^75wzpt z)YMHSwfA@FU3Lm7_QQFs;zn(%)A2RTrCepR(5SB68k(%aN1v@`&mFAM{G#y1e1Cm^ z*8`e27sTq(Opz0P?YWH=Mz7oQtUO=tDH~1frG7l*arK;Em(`m~=ex2$M8|j^uCzf{ zC!@PTq)-}d3pGg=Td2mYHSV*g_~f6N$7LHIb;}=}X6klb9Ax8gI+EdbFbSsqJU*@`>Dp2GVqU zQOq}K*Za2?y;k>GyC$QxI$w`$EjnBu=3;1TwF>m|m8k1(8!r{I0uKCS6I}JI{<+pBs4fyz8 zuG`zf^Q`DGN6U2PWwc&Al-F?4uq*GjNPR@djXb%cMe;l!lD2Hrkww+ZJ2$;iu`(TU zipGV?+}(3O3e+k)jc4v{IA~}tCC8&Z9`Bmax$VZqEIDl#uaWV*-*4}e+!7+o>xZ)T zf~9;Zy!Y1HR^uXx1Iu_UMyWl2Yi}z@r)Cybu{9cumE+|anA&7~zxy8BzgAlhPnBwA z&$e(f=B?!}p4rLM*Yx#{C#%KIdrZk1Kaozq);t+C zKNE0>+}d+dg86Dqbm{y6l8;x?#{|wXi4iaWznr2c2rspaU>N(lpFCtm;j;Jk$mGaF zAo%>E(+TV7dd!hCf3t?pCy&`bN)dgD(aB)i1_vh$JQKci8p;^3byP)SFu=lSa~3Hs9&YtxRsqmJ*X(!9-Kya*H&)6u+U!T{xpNSKN*!RnEgEoc(^=jT@J0!XqPJOz zrV^DZlB*YcPfp{cVkx5$h&45eY>vP&8=sP#YS<{0`>Uem@ThIXN$uMA%`LGA?PW= zEKVbeyaP|x;g@7rY!*x?u83J-rSU*aNeoRk5+-ZLkGEeh_!MaLGmb4ehTKi3QFQRfB$SxL+D1xx>!GDkr(Ue zsL}m85Jhqt8r^Ym$=MjQL)$bA7_y(1p$9^0>sF?>Tu%U!;^T|=rTr7zHB;cV3d!H3 zj7+?@Zd+x$r~r@YJ=qRa-${OcV5SVYWS~u^KHpz@&1*un>g~`aG7@!En`CH!yCFQ^ zc^6s&TE4$if9qScc36u%xu56zz<7<2(4#RT4Gr|uh$)NUr~L|%Y(lD~=4jc~@iR_# zFO}Wv>1A#m(3BDbYeV|*!nL6hh(T~-OoS+4;U{U~Q=U1sB$2!tIO0t+gp5RuWck&s zJIy_^RTM44qkhpI?a1v+^|d1=i#vf?1p@2rs@KS=f1_-ejv;!yt|%lpWpyo&Mo^3W zlMLoA(dv`I&lk)|xY|PEe$+Zom)1P!wm@>JD=I{RszY9^!QP!P@lp`hIK`F-)pKBR zof}`sG^AK+(w0w`kz{iDdi^J6%Cg9Bqc9LBA3!LH?(#;q6{RN%JHnvK88Dek`GTbI zS<-;3e~=gMjw_7JMkz)-6-uS)hl=JKOr+Sua8VX;f5G0EVRda2eMc8P&p=bx+y{%a z30w&!`Adk*O=MOwdzA}rJC8*GAq9xujy$8)*Sk{T7z@_W43?XC9 zuh`h*Icl z2&Bja>TBJnJt88hj^6pfT59 ze^l(Ms{4tmsK3iA(SrMeK1--xDZ{VUgFVhS1R5n>szt5k5Oi&)K0htn(I53s;~e4~ zddGrvsRN7MyH8!V5wQCL}c_2+7gwbq;p5qpGn= z9)rFNX(F)BRIBY$$bCfA&8jr;Wf4D=1$vu@g=BQiEZlPZH&Q%)ARi13P2to%h zKOzdJ0ZCWWLAR6@8?6IyZ8}ll^%bLaY|M^@HQvSyr>DMH0yNO!6vX!EplV*EpQ)x=Z7U9&@BojN-J5mExG)c0GpEF+v{- zH9bqD`n}_uI#t8XRFO!RwdGN8*I4Fx#o?4OIR+b6;qPB}du$3nWjHJKelR_!==dp*d0erae3@tb^X+60BS7zvA$l;Zic`a=h~qv16((-q!OYNIa* zM=yi$#cX&%AcG0ABfhA!e~fj0Q+dw;BdA7tZKJS6$W$aSNw~0w+l>#Q;80{vTvR8L zb$Xo~EB<6a+n&sR_w;>qqmnAQZNR4;^6*7<*g8+UoxuQ|``LOQM+5>~6otX6+YjU$ z2Uoo|^rUYyUYOrQpt5vfcSK-XEq04@=RSRS)R;X6a>?*DFBJ~He~bL|gKRTU z&VzM-`PhWjZ$^xff4#Ug9JM|Gd{ham-a#l3OcZWoo4=`-sp|e%<~OQ=%!ID~k!(K$ zV1s1_(bU&uJk)_ehbNbWR?LBsAPu_dFTXUNw1t<~c&nj7vhuN&jC%ZDJC&eakd7H( zNs+G(IoMeIlq}Vum_i~`Ry+oPc>w%f8h`xFb#f?0ugO%sf9M3IF?vN^a`J$FU(Xp% zfW%i=BsM{6$l>1n3+Ja6KK<2SvPT}WYD8OkU=hfYF7~-GusJ%0>gr05SOR6THaDmO zV3U$NmVfHVv}&s4^kkSn%5_w#hwJ2T%RZp!yLJ?fa|AtfIwWQBmn<^0FO~9J_3KRe z)i{NDfq<`;e?n9gk>N;{96Cd4r&Xk97fHa`qqegw5tbKh2@tCGf?D{}4n8f$q{ms` zH?PDP#ZWX*MOp;>78)bPuy#5;WRXCXV$v@1q{=-j6$0{w3(ad>;8~IGvh8Nl-4^=d zKadMWWaY2%$zg3lcSvD^Y{bHuYwNM=5n%bG5X(ENf9a}-mqL_bPk+ucFsY_ZPclVW z+7B~SyffB9kzqdISi@nAZPG;KiPEiw(*}E-fkdOdombTrUFPhC(kxT|Lga!YZH|!l zEe8kHt`enO2d~<{9@{4Z9Xn7Hq--Tt*GX=Lf$2paL>}GLk6ekOQ^0o4AmP?~Ei9 z9m61WfA_1>laA>J_qK{^QdoNnOY40I6>NwXN++ry17EdexOp@vNf;20^j?hw^9r=Z zmETT&6nS4c{d(Pzap*eKQg$))WK#Ikt^gYVs4#o0`qQKRJ?L{#1HFWW+)s|2uJpW= zN@J?&I9^6Sq6*R{8Q%imx+kV52hY<)EG6`wf6NQg;xKFribevH%G*?pYb-5~p>!@p zTJDW>Q<9%9jI<5@vO-%OgfvNlg6D!=D<{9JoCu7pKE$0Jmr#uEFis=96ee6BMPc@$ zRr#2bvT&>^?|RJD;S>}k>cnBw9RR?-DM0?#)a?b##8HazFI5jbop74W_}d~J)+{svE7nyCb;Sn?KpsR!draHc$h+B&iv zNYBT=6)1Nu-tJ4nW**_J2Cv!EoG&vL0(pd_F1(gKuB*~liUw)l?TNDH2+*!y?z$CX3D$cke8ju*XG}$ zlGjfYKGz)L`W;BMnTabA0URGddLH`hG#f|A^cN?gGTk%)!NMOYt9nb?CMMN;f0iBk zYM^qTMcb7mcN1vBqcK9+hFWbuzwDg2sZIqS>j3C2)yD`NirlT0H)*vas3NBBS_-xF z0&*42C^OqphtL4CvS4;h>eKuZ`*(7`i1lFhYl4wsZS(U!BbOj#7lqEtp8)P>cVb!d zTDvEm(|`yjM&hfX?$MGm<7m3Fe_IcP(#A7F%@A*laF6Ya8lu?bty|s9s|uHWQw@46e6J7td&(QX4wD6m*NDa znlc(rK8HJa_lgz%hnF|Ar-FF)uUAHgRHXhu7^7n&hu4ek>cYQW&(b>Ue?~M8cd)kl zy_OjglnALP18b+S6z*ghZf&p8Mb=lANUOqN|FBh*)d_*rw?&}RHyl3A`5 z?N=k(*j=f=eJ2~X=*ucItHneGhh1XFd# z#~Nh!S;9o3UJk5M2v8 z6BUuq2*22VC(;>*G*Au2KnW_7Q=t7V!W&AtULVh%(u%>f6PQ^Ve|huwsgj-%fU>Gg z1__iUlCTcqtDb{bYCLZUJ-!Ftal#P$>;AwV>u-nD3P91sISLB?V0fClwHP8ry$<$4 zM87h6s$q$0V9xXsU94FFb9OAH3k%fTU~{3+(7D%j@^Dqak4i7zBgss+>m<8UxJofR z%JD+G+HFromtXB{f4P#ObwJY2Xs&Mo3C|GP5NP^M0ag?}1WDLF(*kyY_gWcNj_+YA z&48DifP?eD;|qdYEHI!szhRP11elV8R{5?xM$^Y#cT|(n!%$4j+X=e^4T4z#R<-A}{bFJa`nl_Apg;;fuE(yxqGzNY1+<7hg!GMhZESF?e_?A_OA?FocpD68kauwChfW_>11iSCY-MTf37_QFvL~ry;M(R@Of*`aRtgNoB_;M zMxD@6yg7QRrHT&nwAZKeSQ0lfT`J&R**#)Zi3EXG>(hxTCd<;W0^6Bh(|VCj%YBZ= z;vJa{e_aOOVT*Az-$twA34#7@EG%RV1IRTTAK()|W}Zf&Fef`zlC*Kctij>-lm*ZX z#dh)YxB2W=!g(^rY9e#!J#vQuJARbWxC9WWP;SVB>+1nOQ=yf*A^3f7pg{b>aDKWZ zimfLN@0wE*`|mZHZfX%de;W@H(nltqVvR7Qu88Kf4hZwA)QhHP zt6B`KZblT1QCr2KbecT=NKM|FCOS2KW}99LI;BpcmoVJj%8lf4}5mz;f%N8K&w*6cxvAQ*AHOQh@odJzBjq!lIN` zKujs>akv>)UK&5uwp?fWvhUm_#VKGa>UkM^zHQv&ViruY=tbx!r;>(7A^>5q*)wgR z0Y>;oo}I7aoHD+NV5xJeisO$;otKKFe_HQ)_#+AGNnV{^N^uOA}*6uKL8ER+mKhmFMl zbY*iN&icUu*V&iZM&`1Ru7J0mP5;U)ok;UWj<4b8SS+YI&Yh?tbpjZojR_@5f7t&U zwek-s`5*uDfBnCp$$#feod4uZhWtM{6TAMuDJ%a!!o>1l^3M)h75~hL{6B=(sgvav zjU)c&nFs$NRK|Z;Z0Ppp_ZUfje}l^JA8OV72Zy^1G=Ge_Sp0!0)t|bM+5W-~P(5x5 zqJ)bk|L1`o{vo~We<>C11b(pOe_wi%{g($r{P~j;nwM`jnUhfXi#aU+RFX6o{~h5N zT-bJO<|O24nxX#PPx>c>v}OL5j#Pwx<=@yp#P~lbuS#ey#uxSsHvXQsjQ^Wtbr{y# z4WXv|Q@1nNe|c_Mn*RR!vZHV$Hccz=zm%2g@5)w`v&+l}=dN0k~df zB#~uo{g}l(4Eyi|us9j{f1iATuO*A(|YH&TiEJJXj~ z%Nj^B_#oyNxh_C2hiTNNYzLR(X;O`V7_xaW_$eXiqLddtk@5aSe-s1r!GKQua35Ep zPA4Yv*Vs{w+zu*Y7?I&x=9|FbX<{N`RK>n`V1Ir3k=54n8HQ(R^yv6)^`dL-S~Mn8 zW^CrAvGc%Vb$~*>v=-yHECHhJ$?%i?AEMr~`Ho^Di;Jd02W~t@qO~PgckJ_!PHyrp zx(05x+_fYUd&spge_U2l5`gB~HuPOt(xqKk?$TYzXmnomd0_WR&>gt$fR?GA2^ce3#UfwTath2CBC8xUVrhAqez& z2m3kTgI1wY#e8H?idW5}!o}$BRh@$;A)iD$J+i)C#!wr#e^J3!4j&|DI+P1v4?qnL z0>g(6TDunsD9v1@5Su3rY>1N-rXon2B_%#@m(Jd8wMgmH>j%z)@Rh3-fm1(2E~$dp zYeb5Q79cO;CDvm*jq@ec{KT&S#95kNOJ^4Z3QhG@LAQMy0^yBR|At&|VdctNIsf(s zAa0N7rH=1Je?kVM+%CyNLf3Z0-P~ts^!^(b0IYwXr-KqUXKY%SOLHpd)~a(4pU|0@ zgtq{`j>8;QexZvaC`$txiW}yYhm4K1Halev&n*pP26j})#&1{)<@uDHQ3H|I%hgT{ zzIUxLeI7X~E!J(g(yzQWZ?q=O4vhs>JRR*qS|B4if6Z-aidkqKI+2W1k*bn1dOI)3 zdUVcD2am;XZ*w=e*xR$5s(2J?&PA5y4-&iourL>TRCl=Ho_^ zs2}xN$ylF*|Le2khn5dr2RtnYbRs+KR;L|A;3D<1$bIEPRIj%LVtsU&4=Eaqz7HmL zeWk{efAO{%bO;*Re!^9_#>xj*flPM8GgO`Yl-LFklHZpLs?%_d(}W#R*EYdaPlOm{ z7peZygQA2=5stWE_*f&z`<(+oyY_iH<|`W^;P`BV+7^k1=iblGfEbtO6~&uR3A{3{ z+d4?tUxf&?LqKD~jnO$3ks zb2vQMIw$o~a-;7rttXG|mQ>u9!)L;ytJEJBDBceBYe4dR_L2c?FL^>kQ#gq(Y_dpm zj_VCi&DNtO^hC}MW~`|!YVd7g;mYyX^xRMg)B7oTub9!;u9RZ9Ipt@xt zf86wWlLLkHZITTPqbvmU>J7}C)5`!30QB^XN@L04<6dZ|e6gp>JxyZuaVD>YVlG)5fOs9Bt2;pC0^1SSK4yD#Zj!3CIp-P$}YAXags_CmB`^3IbqoWpqC88y;pfyzi49tiY3V z19~o>Yr$b*spe|#=f+ANCt`Jd546Uf7;Gno#*Z!NVlH`v`A1Y5;OXkc7QP4XW z;8ThJS$f1>T4<6p>N?a{H~2{RIv^gX*v(=2iNe&%P}))wAR!gdYrs-SlSXX;6zVAD*EXsv(Zm9L+sD~Tys7AIArf7^+-p476Rnf>pW zRD4O)6-5m8yW5J@RQ3hINnhNvNsNqgBZpm5J;Bi#Te$}^+e0o%a^e`^^UWTSzf_A`6q$~%VCLI}k8{&n#wNDzz$qwEnArpn9$K9qsN zR?62scXlxcyU6fbL{EqD>63|ke^2?(=8gg9$8u)O&=9qKCNJp#n%krc2YzV6rZ7|6 zIJ~jG=9qUpz2@>Ry>DH-Tg{?zLvf(G^F~dy_D`3M;hu`oe=$>e%ii-zN(&sX1bus| z>uy=?UBA4hwxbk$`J;JxwZP>@6I=J+xpFCp*u}f+7>;jpnYWmjZ||~wGCh$$02-OI z@W{ab!SfuQJ3TO>5{_csbtdGyv`)Icc({rsE`yjy4VKCL)UO-w?Ii4}p}-C2QF0nc z+lEqpc!-~sfAOe3C~u^70nbVf`{_a;@QGqGeK{pvuZzXnWJu>hdpswELE%j@w?qk{ zQ{1LN1L5oF84!>8SN;~8YX)zZ2O|-sHz^LnqbvqyWiN+yJC%F`STB=f{i=ykNVHyM z#_D7&eimXe#h5oJz?#qDLCeG<%80|EIz7wuuhw(~f9z)+dz!t}Aq~e-4_!PWG=kbJ;bFQSnUTCGkG=&F03!_w3iM6FyMv9_P!{$dDKZtLe=Vs+i zunBcYiKKHEXF86$&>bJ}3wi)VhB1V=%3{5Kj5`SsxcHc7g|x{V{{$Cm@E&Z=6o}pj&HDwfF1uA%bD5a%-wbMbyN>Bd)H%2J zyJ`b7&q3(sC_YNJyHChXBTu>R+bX`^$zNkcjGuuYv!Y=&W>pynRC18FKH6YKIYPc{ za&&20B9M=eu#2Uy+nn7@>K9SEI3Xv(Pe|Qje*`$VeA=e$3r?4xSfv4QScfPO&UY|R z^uAR(R+;7?56ar8s@tgdz zm_77jcgi*l(J3Otx_$fZ_5`|2pn0P6EfjC|njTT9%g(hzTOnULuGE6RKz%?<%!9e?2*n$_%kz#NYsLZG+CoS&O{TOe zp1yNCJk5(fKcg=SqC!^|_>u8O4;lK}Z3X!gcI(ns8Zz9R2JfTl^e9EX?uuvS8dFKx z4~-bw4rD=iF2hOtwgYO+R@rhvG)zg5L@m{(|TdYVA~c??HchAh-r7EdjCfB8n; z4Q$MmqKZ0dS@U%1$nfUrzjFU=7N`05r}w>{%)a=Irg&4i459bDhwn@9>pB>5`B}Zs zsE!#VQdT({n!KOi^y_cf)|0RMK)Xu|k-iVDalnwd1;E;*hoA+AU#Fa|gCoZn|FcSe^gE&ejHvh zVnuN^*_873c7qrt1%=NH-Ene)KYx2GXPgL%6c-3<=Z67rD=+Wwj`Jk;=)BaBY|iv*9tp9W6iQh6~=V9^np&~H=ji6SOf#KDM6 zW6FnGsxpkKGA;HD+uxND8<$2WER0#!p_UB8SPp~ThZT~GaJqN?f9{^(0x@X9U)8z+ ztk%yBOH2AIHQ1}cqO04l#j+-LevEN^-d030$7i-5c#`2e{BXa(wmvE+0wv2ad?r*z z*XA%*C|ox#rYlxA-m@Wanb2i^O>5e4Pqk<%l;>z&`mtEWf9qEG6g#c$Z`~hk*4}=i82G&-2aCly{krb^qN^CB8SC4ky@UJy{w>O1jbJe2~Zp}ti>6_@C?wk^%wFt(}%3i(Ln z*W&pPQ%*hjrQjtsWGz$+7$;Xqa5_rS8BIwhDyAD2DMV0ie>;`kLM{Z~?=q0JNv|t4sPlo^Q&nL`%&^{mhM*b7MNgCou^v5WvNZIlx@fEJ`N@&+eg< zHP>rLmNm(4KEnGbU})P9z53iy;X@GFcsJ{xk0kAfUd6uJP!HCY&}qyzN`7CmCG|UH zMsE00HBim(f6zP%jam7m*Q#D_p#sHc+@VRr^2y3DEM?!#g@2(45$V2^*Lju69F^`k z*O5-M%{`NR;(YCWZQk^BJ!Y(PbzW`7%~^w@6^J#oq^ja>uvSE{li)+Ws0M;@%%&(r zmP36T{Z|q(Gmha}?n5RB0NYa2z+Id$8JCXsI7Ct%f8R!tM+DpXN9|^2CZAMOaeR6e z%}usxmf(dA{x9+GSFRcC;g$#e;9uLcw>|sqqv+F>c@QOsiv|JUe6jI5Yz=&AuaGw1 zY%YOF=YG{;p{JfEm{W(^)yCcO^^JxBMQA(-1iy@im@dXBFXVsuL`k}Z9e4!c=sG}e zKk>H*e+8F~fub#s&qk5jgop(}e4CuwY(ebMyrd6({n9+^0UgN_NkJ%|TQHV2T62ufu{ah#NLB!grl3Z9Rosl_n7@e)`pexcRM!Y^&kxis37oecX5SMME zOW=vBL1W#Nr`&i-6>nMHma%5tc8EN-5@}mGf5Jw89e1R3WoBQIt+mg=Nh8U_LlG9< zF^Rji&^M7{UWq>S70qS3wC@Ye_3kc&@)9`Df&6u-s?+C}gg%adru85=e_L?OB8--% z<<^ZYhTl~o8p|8J=b?)Ul_;*JVP`@2`Y+APaMJmei3g5lF)K#o7iw_wKXVvpu-RA1Zn zw}WJ%tQ(w$A}j0=#{DGwi>5wmF%8ohm9HL8qPe=|bMJAKisyRFVAdqBtcA%t?05~D z-$!T4j1a|?T3*uG6OsuTie^}$w4rcH$O6|5TBY$7KXqx*LDLC?5`~v$>WV80f6}8n zA#7-#+2Ov00je?`5T>+UAD}v8AE2aFJn$%eDpb49wPZ@_Q_Jj{6odNW5G<|d!jbfh zB=9?-xxknE5(BXnEl#EGa~+tE>V&^jAL+dvOeC+{;BWtS6`mtnJT?rEwco%eF|&7W zyM9bnMJjl7ciWIfwZmkdiEzQSe~U*RMe#_yXcWGG5Oe?01gAqKJm3ktY|QV2flQ4| zkS53Wi`3Uta(^Pa+k1rL8_R}b$btvd*!A?xTeH`1aeu^iHE$O*H7RyUgH4Z_uMtD_ zaXDo~sLMa$l=Qb@JMy)o#`f*mtpwR36Gh#pGn7(fF4wym7GXKuD>Vs^f7n^FMl>i8 zhefHtCnrXC0ve|78Ggo!V6)`=bR9NzZL?TWNFw-=5v9eVbyhiMQ7KqgS7;C*+S&k# z5+MO}#SJc!dVHw3M{iJE4;FoWX?=V4?oxn6$r*pRK#YH4ur^?c4miJeC1>;G!JFzaSYpsBGtk%3~;sw~?Yg>RGmOx3h+-#3|D6 z+zsM6%($J@Ok4GB)~9c2b~_b4_|X9b=b*)%8Jv}>jb3rm!`!@C&cPiSH8d6Rj)x}bQKz$vXPVK*T zZV`00??CAsUI+-HehhrDinmGvj&IhdKEIY-l)WO-GsOb7O=?YnOqILt1p5rG({s_jx%v*xDe3s${2Lq!Me@Kj; zp5j6z*vmj<-wzRs!!|eXQJp4nQ*SHqY^xieM|r8ZjG5^kf0BQVi1l)wx8e~9Qys)A01avxxU>=L1I3cTbMg{BCK&qEKlD*r$jzw%!Zz!smBd8Xg5 zgbJ&kKJla-goS2ZYi-*DG|nOCh!;8{zu?^FhOOAiFyD&t60zt|!Rbz*bx@(bui+l2 z!XTdQ_^7l6pLU#_h9j)LlkgChsuON^JK?&Cv@V>if0_=l7$B!sF2Bd9ehoMNV4;My z)T>>SZ)6Se1!BP1Zyg8AJ6G~@wM_3<6nKZqddb%0WuNWAxhYRT(Ejy1EX&<#o zh;m6cf9(0KJX%3!hxh44fUJbv8d=xvN z$fSD-;RvsSAJ+*EaUh5X>`OLkX~2$?7<*AWUExWmVSFrV{E5a!ft4;8cQV#88Rg}x zmdQ3rO>dkM<-fe3;1GemF{E+=ey1P^S{Q1)fBnYd$39=+mjYpsGF}olASUUOj@FIM zapdnIF+tq7RsmhjK~Z&1-yKx*Z#Y;gVz<|W{&YhK`pOj6suc2z@J+scccMy#My@Ms z6C@8&O?2Mb&qRMP&~7Pe5l19Kp}$RMKziHVL00VDGt`@Q&g*6@vzRuI@lq3+-pX{= zf4O;FQp0WFWPPYEg&S6S+iq^6HMhtrbbO~m%B zPD5R=r!o~SER&x7H2Je`nzZI8r3pbhH;G-l{)M8x_VNo^iGT)e2~c z1f;vB*o-pM6P!J~%V29@s2hHoYy2fY_;X$cJi$psKq~^GwTp)`!~&>A_=LQYUmYa1Qn3P7{Qe}}#2eoT~78=>EsO(F(ppY}v3Q&vPC_)`>*9|2;z zz1L~si8y^^VZjjJ4<})a^k9^qoXop`_4@!+&(?0g#un$m^W;B0$#O6gkkcKghcCdh z*xnasNc?9GY-l5u#Zg81eVnL^ngnz&@8)^7+s25nO`@vMk(Pe)WMNP9f49YcY74sa z;v$z|cLPhSntFd(iUcO{!!(-K${jA+M$V#rn%~su1znt54UHsDvKCaLFTN{u`lrJ# z{0x^-%#(Xs?%FQokmX{a)3$Rf^pTyBsX2Hg!+p{d!*9vc7Z`->!{(W`Q+WyY&R_Zc zST_TSZNC#`pHWF`hH?*-fB2^8>(+S|5i?1_vvOGp4TOZq&~3xZ%?*P`XKr%0@3m=TeGnj5h9ifS@InB^iNg;nn_q4lqd z<1N`AGxcdp3iQ(3gARDbzXgGgjj>7u4qb36$ly$bhhl(kBbY#Xf01lk_frF>X4-6v z3CJH{;8x*ti61Q<%0hLW9@=h@6WgH3?MfjN-*$5g@9`7fOpPfjS9ffm=$l7}ptgTP z8D~z|BjdIW)?+($wqEl|ZW;8X~{`?d&paUeZ6VH5N$nP-%qw z*8sQA-r9aTtVTh~(Wo9Oyf}tq`*EDjk|18TZ?+7s^9sE!d_K5v)69+KKv%IPx)E*8 zT<1Z|8u{rD%W2h%)FuKe{~LjhB6ny280I!)>Dnr zU3;XJU>^W9q8{*jPn}v|7AFGgzHZbup%xmhen*#lsutqXDA@*(mt|D(UEt3^6)`={ zP>`?thlP)x_vtTb=kAKc@Z0KGS9npxxZ2u&_1x6ux#c~+!oT${cT}huv0u3YN&5x9 z^-fNHHMpCre_H#Nh{H?0e9xynxXN(4=ZTJWEfw<|GKKKqQ8z4Zy zuo3Z0Q3Illigv(X|Bcvo&z@_o=N|7IqlZ%i0aBH!tFEe$(=|F}r-~o=tL2w;@R=M6 z_jnN08(9mUHjs*XeqQ;fah7h!`#80Y)^5y_4V&c(e|_%y<%e6%>&(AL7Yy~R+4zq5 zoGv~sKc@%&ZqsI2pWWX0IDh!HwI1dKZL!;!P3Yzh3OS`T4`J$KG90A07^|@(aX49g zp6DFsWEMMpxptPT?F-_QT z;S+u+e+~C7QdwHPxV3+pH+?>s3DG>G8Rr=rmW`mbOf1A^h@ORJBy+6A_&lhCuO zIn0J&HKb55f}uYc$e;#f;5UT|z5jZk+?(E##MN(qp8_nEZv&}GAfCH-JNs2WLUnHA zf49MlnTsy=yWRU>V7}eQ5PJ={ufS6igMe*t+UGXQmdiURegPFE)|G9MTv12t`Abev zy|i5bX>bynmL0sA<&F~L&?PofxrJoL@uztmVU}Mmvz5016~L}jeY{K>*F_??!HF~Z zetHP{aXlpA=pGr$QQdU%*enpx@($Qye@R2j>gb@s;9VZ7kiG8)-N2j;-X`eW2!OW% z_uIvOb+Oiq?e)q@IXd6w+qvbf;8^T0LB92jcf;?dVl>_@roA_+-RWcs0AFWEvA$*3WNf060qp=C}DbH&~d&GxXr@6?BSAI*;I#C$zYA-JuV z)_K3~y8U5u7XrDjwJ}eY;rXV*V!3PE?Z!LJN>~>{Om1Q3oWLHDoqtBF_ibtCIbxQ( z4sEqQJGnWx%{<%s@3(yY$+ws7=%sRA!m5iVgX+d44o;C|`36}Ag|%?^e`7(=r1j3s z#k1JuyT!%t?$c9#c{k32r*wbH<6C`n&v^5WMiWI>_G+{~nD^c5Ha6P(p{$1pk;Fg7 zqs-gb!DXa+<35rH)1FH-3C_3VzDl;?aXvZXos!mdV~E>o%m?dLdVzdyFjH-IG~P$z zsMh8hftVN5c#phBk_ifwe=qL2yXn}u8>Sz*6*b9l-0hpy2eyXW6SmtS=h>^bZP-qB ziHA3*O`9)VLTOioy~nfaw%^BhtH+D-#l;n(^NmH~b6VqFG+A2XGjC6q7~9dM;>$%{ zly9R(8BW(_uW-Zpt~DHkv9~e|&C#2)*#_rNoMx8;5|SLwsU2?&f9y~eQrifV>NLLH zU*nbiPLn&W)7fiZ*!}i0yMDfZ1P5b@e^S5XyC?|wRgd0KjA)qS4d5-<*tTW{0NVHxG)!`yYu>(sj)KDKl7 z0DX6L7oTR{*Y&AN4Pz#qahE$oaoA6b{bXw1n-79QC;AxFf87D$sK5K>+->~VV~RdL z;)Tj4B0Rq-r_B4>bJmpPYAgvS-utY~Rg+_S4g;xgT9J$S(t-?A<6?G`GZ9`VO6JXUkWw zN_Pu$6eH^;e~I_6#(B3?=k4h=9n9bS%j9)DtMX-Vo%gLZO+V$QIhx<1!fgA7U-Lx= zh2g)|yE3019j8Cp`)%g9{q$a@!8rEE+w2^jgRF$qNbBRkKi|t+e_nA#OgXz^u|DoX z?ltrLqeSWaFp=-d+bqr>>-MM$$6}+&rW1;_-6Az7w=2PqlQ@ao=1{U*9P>%lM_%07?7eBRd3~;E!j97F-^h$`Y&qv6{Sns%6d zjNfk2=i#k)En_&n4sUTZo(?0Ny$#>q#O?(b<)*us%X4hlkOc$Ve!uMaM}eKw3P*R; z%txj(OyBcO@p4OtYeT)C8+KU`^jTfXvURQSe{H?m@|+F5>^2O*z8^f=y^`c*dH0U& zU1ff=+@M#1(@#-5vyFM*%(;FsRhRUcv3h$+Hv!K=4+o?8M0fLQaqQ&rZIdiBhSr}> ze|T@MqjKH0H~)iN0~Oz@#yxFbCR1q-UvCq*y}ho?Ph__PKD8hcFF~Gc;J}ZOl}Z0L4UTWY?Y_Q!o~jX#B~8ZhhgS z(WUbv;*P4t{SrCL_wxtu#tmP+&XOB0_X|JhtKg#=c)1+C?dRoa6FEosIGi>2Wj&<( z-ADKlkoGG4X#eV<&!#xMle;$@hK|({GJ2C}x?2U++P{rv^-D4~24KAn&gH#pe`)d_ z!gBkGtMxqWKdp`Zad}7g_m}duGvAh7XA1f9(!g$)uAKPu*xlWEX6?p;AD^!5Q}2$q?F+(Hi2F3pI(A2uGy(^)YB#yK z5tzwN6;y9$bgA5Adxl9oY8R2>giWqEa==S~IRR8X-&xJnp1q9MM{*h$dphyGWsqlq zDpCsqKJok{#wt}!{O1>pX;EDak{>AOpNsd9gX+MtTM#X09))ilS+ z0F|>kYOJj9UAD%|Zx%0YdQJHF_EScax^%i{x%d7KX4EYF zRWsuoyU3B%^~cs&OvywQgTe0XEz(cs;>p&dbR6=|t>VkaF|_uTaq{_f2$%V4%HNKC z@G@Cier?qC<=|KS+c;e&gP^xrAIs+*;HUz^QHRu_^cYul10 z%~t+#HTf`(%F7OQUM|lS>^tsdulcQ9(DXBOaJKYqd+-V)@BJ;tAM@o4AJTktc{gY4 z#O|ZzyR~dq`*h1*f6Hz8>E4W{U34rO96oO%E9<^-!!(;N-q(CH$0GeK)YzG=H`Du` z?54$Rf;YySu@h)AI^Gcy_G5p^)*p?xPC>|JV<1-GwY!E8_D9FZ>gu?|Vz5*1Gk;|6 z7I$k>ALKzC=Y2U}HN)&QG~3O4;F)(85TpXI#yOrn;zu+Xf0#hj|M!nIb^lsYcfJ0s zsCzCA3m*ZmLNXchPhRKts?ecj{}l#5SIFrVar)-p9$|2_-+Ty(k150oAbfd5ooe}k zKGqq#y^LD`Z0O@dVn0{JJz`w+741U#v33qe5PNWS#2P99e5zOWyjqFfBMTu*c&zLMKsAg(-H!}d-+-cx3=H+`Et2xXbpg`kMo87wc<^$aML?+ zel5#Gw>%pj^5Qzs%j9N{yNX$1zu!0H!)xy*0L72Rb1O%$a3hZuZ2&M0^^abm7xn2C zh0yEODSBTu1;9Icr*UzoG4BsYp?B$?54LR&YpTQifBj$}$qV=)SOqNgialoPEz!Jc z{BA5q?lM9R<`|0x(>}snwBfQgKdla28mWYIZ0>f_ zC6b#U0;rD$MinKH_k7T>j}=O16tOcAJRwMOe+1OF+`tl{GJs6ORfBTrn`H%aHp7$dQ+fCTw`5a%@*5@SoJt&JK z>sf_*uKT$^`j2x2mWgoMUiRlAM56J~IABB=6%x4xDxYE1eqdB4BsQ@%Jgt+MU)^X4 zNeM27G|X@v1~nU`sG;O8d#mHrGU|g^vKQ3B!8V|_F~nBi&K(HAlXVyx^;KA%f3p}W z&XOSYVmGJ>FMKJnxYwQ4n|CDGLT^Th`#f_GDatzfNl^NUJBSbom=MB3m|9rnK2in7 zKtn3>t=8KtVh*>>{U%})@tHAW2<|f26I8WX8%?A741Y!nk^0%3<^wL8I#;EmEtobD&#{ z=-7|ub`tXtBx4lOjO^8fMU%l63T{!DGh}Wp$?pqG5D@eTf?^^m2BFY{Se39EyyS!{ zKiB(PN(Jn?7!DVm!dh=F$Q{(P{Zl_uRHiAvvl0KYg<6mLV9&E3oz=%Ze{GZ%iHzUb zw|yE0_^E48+l5sAuygL8xuNZ8_S84~ORw|cx32yiyU*`jruzHS7C!auo}&;^f~4N3 zuh|8VqYLC51I76Mo}9)E5tZ(kYG%%XIQ&j20A4_$zuBi-H?9v()F?MeJ&2kGg}P>O zJ0p-{TXL6xncKB_#8E!5WKpw4{(nx9iz7)Li%6eN#G7-tnn6jC5f=fucgtDY&ae>S zWKJtM3t|I9(5ZPGnHY;;3F1ACU-KCVCY>l>c{r*3 zyVy`r4pNL0Fc(h6K4zjNzM4d+H(t4+7}N*C(AArlfWmTe4l+8lV`|ZP@qe}~Qv`SG zjVmBPoxwx|xwcoi-WQiKz?Y4=U05GXHQHV!k8vm8w&G!=rPdyq3`(OU+%m4k@IXB6Nfp{J-i&Wpk(>5V%&+o3jkDga(&>FMe zEctMG@)P?UiUzr=6&o89~#$e4n z*0sWtse>UU5W$cqofpI>V$1GsLv4eagjjIS%EntBU$60j=ST=21b>gQkPVSyZS9A* zZ7obkxh8bTvr*(lG8dtuc38P2lEOlYArRbwav9cW611X7iER7b2u5=P7>gnfz3w`= zFpPkrM{)vB2M@f{awru}0IFfluJ0IF?yQ4S#NPF6t%MU&F_o}mJHLoN|AnzEpQA`Ls3Ulr+*g}>ewh!5<$1RrS)b{ zp>X{3c?}}>5ttZ94F+k4v}{vMhi|s%K8Q%$6fx$MO(9B_A((hrXzbO_S;;(0nZA#B zOuBq#E9(0AntF&^&~b(%v9M&CTgUX%DQl1H%`G*G4N|(e&A^26GZ7SHDKocfB$G9E zdrol&JhqG8kAK9<$J6yif30!9Z`?eF5;UZ`I*&%*Q@(tuOtz z9e85eFJAaqm;UT7Fu_=r7&S^4GZHHzxYVAm6%k&;RI#KJLy( zGsT`hCAEKilPp5`zHX)a{L)j_vu|DMpZFU+ak_)#EPo=Ah!IfN?(5x*wkKSP_rM+k zBj$S=sRR_oHgJkjgGlwh*c@7fWAXMm&m9^WbI;%=!^HehqH&Y_?FTo$O&|`@o>?CY*&_4wpW=UK_q zCibj9u|!O4?~eFd>CLz0#+K3@6f#7eDM;7IxPPZu=AMOGh9tL1S6{VVvU@ZSqV~;^ zDaEp;C=x0pn+gd=LDo|n@*NT8&Vm#{Y-l?fVt+^%w%!Xm!)IF-CqIFMCD_4=YJ{Lo z5Y)NO1e4id4L64G0uyiq*xFzsq=yl#!Wzz~lQ=BR zE$`IAaZ(qdRZ)~Fnn{LnWFGiJ9rZ3uGk=&cyL%taah-wSI#w%ioWH?`nLsHRNHKFT zD`}jXqUW86%rz-l%E8F3imj1kHd4YzrN}|%3+vwbt`vGd@Eua@>MBV&rpoO*E2NAe zXhRZRce6XhB;jaYio1g*ho+>TQu_CNbSzAqooBpW~E+x_Uz-u>mff|xl>{nEeRdZGI+zhZv& zWB2keO#6+QWbv0TeEyC9*w9~ixZ=N#)js^oF1mm8Z1JP7k})TtHw#&_nchB93Bu15 z*%iFvY}=+F^H_4b)FHuU6P*;Z7!s}8_A-&sWL$GwC&Hd_;$~}i594KjqhT)V-N?ny=Bq(i4cJ1J6rV_N8zkeNnj}s~L#wRUL}}l2 z^(|Si?J`}{v2j3AUkUNRb$`gVtViWbI2e4wl;gWVG1wBYKbLdDeQodD_@PHHurGdM zs6RHW82@0a{kN`ta}BxXroOS=)6V_TMOS{=Cw~7kH-0#&KltrwW3?_hADLi&#turx zH3y|*e6}u9b@WgkVk(s_83o0cpi%-V4Q&VBh>(YE8s*8|@-Wb$`+ub$0fqwEr7SOB|Us6{!w*(|IQD3nG?KmWaho zhC4|>HjI{&$ca>+!i3`ca_{ubj)8DJ(rQ^!kj$~6M^%;Mv`7mv9Vo^by7ZB9QN> zgRmxEvs#+z48 zsx)a1ylnSevL&mGO(Z>A;sa5cz9v=g6vzJ0%)J0tPf5j8r^?)_{w| zV;z58*Y4&Se^t*I$a%&%##0}Du*W}mm51;7YYsp3=zph=l=bL@N82NA8-MGM_DK)@mH7v&G=}-cOZgYa{TF{&d}AHwANYF2RQ}_y_&vu$_rQo%hp=4-nHP7$ADFczLPFpUJVx>saL z&YZr8F@I98fh-oNVm8_5XONU2Rg69))Ao4gmVhuE)+>w@7tDiB3IsXDl5yyRwIk9E zx}&#HH3aqaOraTKl@En%B1_50u|Q$j-b2&`z7&|?g5a)R5U7zSqI_+fkcaDp^z`$A zYr10Vh+@X8SGk@Su7gsFK=~95jyqXksTfV_7=Hw(C^8O#Opz1SxvgV^m{A)v$7_x} zNfA|&!~xfw9&)6x(f&O1srJtWvSfiEfjz1uo7r-QWLt4E`E;Us0_7$rGN-sJ)+w}P zcF*qtx&o5CC5rk?(Z~A~$>-n`L96{u4~*(J2zuAanQq5<3*T|yANe{={={A1x;g&M z<$wIjcR2sX#Kji&k2vuKCB^tRC-|^~56s0YRPisH^yPOJnge>^uCIBp2yp(P?*mlv z{Tuqy)589VB_Zs*gf@buZ&Ak(om850iIwIZLO@A}7rHI-g-yk7$=!HS1gbiuwRAS*6;JPs< zPMZ0l3E)xt_ z3Q~gI4viTk>=JbU%s1%I2Z!^t8&972j~yC)+dA}DyjG;o+}n62b&icca%BJD5XR4A z@jtNl_*Xw6eCy&5)=5)T1%gyXhJWsz#cIoOjn#f#bBW3nA!u*`98bDpXm>S6F&iCf zcb%f#!(K};l4M4V(7UsDK@jThLtxRU+8kbUUp3YYHAn3~LFO1GG?|r!n;IvYjOIX4 z+fi%rVs-_As$0!(%Q+GZIbvw@@~};MSvWm&pfo*FP7K=7)K-t|@guIf34Z}r4d}w} zL{M!X(l7h)NB&yw7UXlJzJD>~Q!W+z!lucCJ2ZknZTo-Ng8%k`3LbIJBW9DnNkPX9 z*>Y7cIRS2iVj80eJK7}K>+2Ouj+tZ>$J$o3K5@-^X`3rcUiX&Z7Qx0dURDAn&DnyJ zn^lJZV#9(G1d?qBk`@xRCr3^kJS(qJ#VwFJr_@kTf)NCh-TQQ5>3`g*%%gZn7j7*# z*?uykH!vHVx-(I8QnJ$T^2%1l!YLJ|2jj99_7xPf5wfBOUu%=W3-E*HYaN9QjTiiw zrtpOqeql6; z-OCeix;t_-Z?3_exgXU} z)-L4SP4hVtxeSbqbE^qK1aX4ESvfDad5_Ks;+zoow0)hJ{(rq7xQEMpTyk5~7zFjG zl-lI^MC2X@X@Qa*s4EZBow8TZ61MpY&PpCjw*Rj(RlOdo&Bw`{zs)zOT50 zfFo2I7=&neY)u5noGRlG%s2(da>;k`}py*T(5-^Qhi32HVVi!Dn^Cyc~`N{VsU zL^d?NZ!B*F*wyQrY;Bb-k<|AEDH80&i9iCkC38tW`B=qp_Ukkew6{L^SsL#o?5H+4-?s{mY*GYR|`i z>OOv9us?GC!;#*LXI`&x{>r^PYFLUVpMU;`=l+TTo_6D5E1vnjhu!&(6TkKHsgsf# zZeKioThV{xz(DfxPi*jI>&9H)w=Bk778RO6V{0D(425yCW+Ww$A-%=xTkZ7&}uvvOonu}pHjHGczGQ~t0j7jdZczOcI4Tx? z1~Q2ZnIc>RkAif-ytsaP30vfS}a2-iX)bKI&O)mEuZ}oWrW!>)3@K zy1rynX)OC3jECbfszGVj)-*fz^#ee%7yNm%>59I5x$h~yr`{JI&QVyaSvR8DnW8Wr_YvKp=?j(zKF|W zB_|Qx%O#3TwRbsl=Ee&D&3}nlFfkkp%(+*JxqFu(h}WBl@=MsM9YUMl!^Y6o++1bs zJ&^`J&hKtFAxIRyj``fEEq&2)zX!>%G=_*_zoQF(Zd7rVQmb70Or=N9ffQm>Si*?D zd2ZOZ9){HRF*`(couWO8>>+z>X|T|Fxfg-Q3QYi3_aG8T`=fyTaDSg)WBbpVpRaiS zS1vjKaJwS?C69mb)c;35?oVCPQ#aBtz4*WC4!-=<5vsNTWyB~EReFq85G9A5$Rnp@ zU370YE}8@uVibhAv4x3f+HS3=J-c-{O2J%&a~`6u2)=94a0fbv?lK#;_Ru}Dx+G3Q zUx>;bs3MEYX6r?-u7AW-Sh~(Dnp3Z9F$gz=l1tq2b5hE2g22wEVg`aUh9twHupHD- z5X{mBz?w^g-LGpt-%JVcovm>)fxt#OrpPgp1*Njy6C65zHr#v3q{oU8P;}<#e6DQ|ogPhG%Y^8WD^ zk2&_Ml!T#lgbS=TRkJ5Rz@BX#?hyPVi0F_W4C_8;?#i17oMAL4;y}5TLIRRSpQFjtrj4#ew^_dbc^{5=KGH5!=BLY8NOg=KR2*5JaW9ritgIh(S=D z?=Xs_nG(hB7(s+BuXCdSLZ!lpN@wtyvKZGP$lUYINnzw;OJmPaAMWy}m zuX>>Vn>+qhcl=!=x75nRY=%4-{v5ubm4_pd@E2paWpm{XKEd)h^ z&`El-PSZ#%K{knlCJu_0oPiLQl*2%8juGW9h&G7#lX1GQ`#{$oh@4_FnR<2l>GjPa z&ghoN+J9$|>-y51NE3r(pYFX_5nv4w=G<30U&j#!VY#(%p;E$%i~tErv1nIFw%0W> zC{o-Lp(|P694ItAqSs`XG#wLpN+7ooj=&$y?I5lX;!xoan8duAOb{eOd_<_f@y?^|vb@`FS@45AC`zQbQB?iJLUV;JhxDh{+&CX7#PvgD_b9Sl3OZ`7DmkY!*|GQWj%6T zDDsNYCW;oy)wMJ6TnxSWJ9F#2p(4i!GYm>$d${4er=MQ+x|#3k49V2zNi)BSrcS#e zuWx!!I7)RBFpx(O(BwF-!<-``OB7bv}M@!1UAPFW+$;$tvX;eP;H1(gC@ zzlZT*mwxk1`{E1FJ#5S$TmQS(>hF5L{BIld&0CRQYaSkY#(%}Jg|4gmjVr!&>W8EJ z7e}VC!(SYSzUFG}Cn=_5HrSKNY>DfLNakDNqK8g^Oz(kgw1w24xY2os>cBE){;{@_<=H*G7o;)r#Ijh~(AlI9p`tP^jWWBeSq#JubH`lBVOJe1jBsnHU>Z zXPz8fhe!wu6kq$~%s2PBze7#r4x!X_bXbbO0`AzQM1)PEN67=IrGG8TAV>Vph{7c6 zW&+nMr-XTw7VoHQawuU%p~;AxlQ?QuLa%$VW&;xh`?5;}L%;!Fc3$V7jZ8e(z5G>2 z{+R#Yc|*-VFaEB%`o&$#KRE&`;am8Qw{+c=eDDprMoQPAb!`VE z;^a%}y0{*$Y|SHxYZwXN?rJuRn@I?5>PyqXaxw|-^_>DEv@>*07_T*l@Tkq@Sg+Zd zCEZEN$t>ElOBUstn?_K9jr*wON-b-)YvSDpmTS`u74q7e0Hc0cGtKBm<5tk zgc?bzHYbU(^bp&y&d3+WM0!e~P#La;jLcIUIf79lg^gt%kPb$XkobVbK17qmmNarI zqvjp82C3SHdozTT_r9P6ivq#i{BWhNBZZ+`8!p6YWk*JYI~pQ!^;m3js34?>Cl1aN z&9y^ZOdQ}86n{ksC;^#TpcI09SYVkZK2lhhggvlS|7t(%H>N(u`2!nTaT_3Lj3UvT zLK=h0F?8acl{BDwod=R9g21+hR7Pb@QLiW}o862+0D{IuNgea;2ukOaq@m`9Y{|E* zGp-;Cmz3#|?8Y|kCVFi^2D)xlWAU%`n>z@^^EE=x&3^`wguUiA&?F>(-W~LOa}mT4 zw#by;6-EY~iFAZy?NHcWv17PfdGfJp{Jm0B5*2}=rWZ0XMc5HV^Eytt09zpEyiEI_ ztp$PXUShIbDARlwFoPHQpD!;O45lxgOqOsppRZYBKW#71^^^cs@ca(Yb6_gufJ}Up zumY8X5Pz%(JRv&;A8=$qVCDich$mo*kb(t-vHrf25F#SMk)f^MP&AQ9!XSlA26rGi zNk0cKFcRPh|DxY`!~n){oIt4aUwA@VKp)WN5DGp7i$5PpKd-+Vv^iu_O!V{IRkD1J z!$1G+02r}^EdcbYQ_q9TWBW!9O@lulOUMDZo_}lq_@^&5?et<({gir~lpNuI?DS!mKqhM^9mxInp#RM~<{#FT}Wu&{%% zf|~Eeg(VOL`X2TzAd%VgWn1Rxab7i22!Hnq`kS)SAZI<@vo&PG;6KOOJ*t^cAoYQ| zhjZ@%4TLe@M{BU7xhz}YhivYyGfej^x_KGKDz#9~)(Enwl`SbnhHVvEhb8kVB57dd z#=Knm(4>|uTk-5K#yUdH*&5X2JdQ+$^*EosJjMziP@bb!wmLJ7ZH14pW7UW)(ZOL>W8C+Y) z+vk`PuSaz&HkGKb!dM@>v@&$-Kz|HLl!cGa7AzTMp+a#}G9Q@evB%h1L44~tq=y#7 zF`bt(Mb|Pt9C9|S%Phu9SV7(bz1Y32gjN5@&R>J$`{0WH)@v0NBZU1mF#Q`d*q-?S z%y6~dRLcvXdim7f2w7Z)B7020kn&Z2hD z=JlDyBSreY1L<0=9hDItDSxWxPX?0tFcZf4p)>mb%y5@2LL|OuJ(RLV@TEV9w#)~S zA6E)8t_vI?pEUb?4Urgutd1aCME9cN*A}rh9!7a9qB^qbx`p9^jpemPyp4xe9uI@> zwl$mAb&G;$yJ%Yhu^7(tHe#{Ra!ox&osnB>hIhK-R@d*t5UQIFr%3wm7Y_^Ue6&> zA$jd&4gw-TT!)YNYkyqDUg-YUG{yL)s=i&bhfH!kUURP>DND;YN-zEza^I)eirZ_8 z>b4d9(Qa3LhtQ*a?fcaFYFBM5@<+Q9_Z{jU?OWfc?5o{uTTwsSyX*rTYBEMKIXDT=zCsfIlZPC=F-0aDk5!T>T8RQmVfnFQQLitX^WRq>kI4Q z)`HdBJuS0%q!?Dvf_Z5L;AkG@dNsYMkk!IOk4M0?eo!WJJqOllZUY6X{ZVt0Z6ugC z#lF|I)uXSt?*M~-zRLG0Vz|Awn76H9Qa_)I`wsd?`@#1q{c6|an|`(TeTS+?d*l1m z`D)kWD=l&DST@C z#UJ*okpElHV4o*)|M2A)IZXT5I@mwvWQ8Adxur;EnU+-`*@#Z;{vn_KmkxGBE-kHB z)&)B6LM@2qRz}d$?p=NC-3P2lfBSzu-taLDpU*wi*MIpwKFa=&=a>TWrvKwPzuLjN zJl_OL#S6PvT>y0MRrK1Dyk$hmsQ=&3uWd)3$ZJcA)-MawU&BPpNRRXX^bJ^}svL60c zXrFlaI)BIR2q4_Tm3X7f^D8bH>AlDG`hTCp@Tb#1TcOrKlf%(|h=4qu57%P%>jFTUZk9NB={|r1s6t zz7UZ-y6Nxo;nUWD|6}Y+*6V)6kr~S}MD4zX6Mu;=fZa8C;&~rA#tPNpf7;tz+Y@~s zQ?Rvdhxn#YJTKb^u5kZLXQ+FmUpU7Wd+jH4I}#bDYt~!KzwXDXuPsq+3y`&_*o*4n z4=uNX+f$zV8PR%U{5l3JtZg?28C8MvxVI6V_-lCaeXXmoV1}ol&$M6plGPeuZpj-X zN`GTb1=X)MJ+^VCeb)D}wz~$)O^*-1^#AkrUcJ)uO1jv8@tq+U%4yDxm2)e#;*Fu4 zTRFAi|88ilcYoiW*)wxs9KdT`G)X-wRuzjBRTPW6T|FBI+UPd^*jE2v_wyTT6^@~r zwrYRIS)Tg$e0N5rcYoUcd25!~;Xl>k{eQO&+T7VH94;MM=4_{kn+b)q<@ox zj77N_O5w29)c>kq(se<5<|F?^{>&f$^L{V===A6L!k?|luC>Ckk^&VE?dQ2u2kBq($+|8uX0z_=n_G_;Q{Fu3Kjpu8`APe~_}}y|{x|)L|4rZb_b2Yvy>6>mt?T)`V;GP8v45@`EW@z; zgA1Db%RKYfAKd!j7{>d-WxD?O3B(^<=sq}x{l#JH`oyRB#g+Q=t|B~hIJWvpdq3$~ z*A@0pdGnM0EpLDE)syai@jvPQ7ypwUe(^u)&kr9AD?EH8+#{F3IFC-H?&0HkK0mnm zM=tvh?pJ>H7kB>3@BQGmUw`?L$cft{+SQI|H4DhobPY^pFV91f8q1yp${$pG#} z%RczR{l%|>@?eP{`hQ{9b+v`Ru;s)1`!6j0L-(KijEp_B0gYstAAkJAJC7}^AS`;y zF~9izw3~HX72%=%7q)u%e*cB#fB5(d`|~~p{<9wIAHJzSdf}uW+V>y5DA_Omk4~B& zzW>6iKeYeCjt?JyWAFRs&foXWm5I{zS-Xe)$2aplvtqm1PfiL>=638t|9kGJ+mEk` zSG>Nt7+W%flwtqz%zw*tfng!-$FHd^Kj|-D*(}%07sKS-df6OfkMn9XioJ~d`Qu|- zpS)@><4=IQC%@awvz#iduFW>jxGHc;pm)#S&CbzgXTXz}YYLloZcz z2<~ym*$&f4FYot0hH=C`4*Wh|FGgT&Y~C3etW!p4Q-Nay_kYUN! z$@$~A%)ImvB8giEB5=~e)xO%c}@c0g(t*g_0*R$?C{`on}r^|Cd{LM>@R)h6>~j$H<{-? zgVF5&wSRX1ZOYkl-sKYEP*6~F)RQ?Vb0$DYtSE5nRT{@A^2Ki^|j zjHvw4&rkayKlbpWx4-R7VOaB_o7%s2_CK|&XMdG~86UeIf9&S`7kcl9uef5NM~)x6 zlf0*Vdg8+=%wt#GV|V={SN8M!b07TVf8>*uC;!tPvHjbhU7Ix?{@y2h{`TvCsn_@y zy4?Ac|F`!0BadkR+UtMCAICg$AdjAsKm8)Ap5Gt2&&jBq}DLJytnXB?my{e1BMZe003{p@!< zJpb8_4@RJ#amDz1eU1mK^B?PT|M}lf?2~65>>0PY=Yhx!43|AdFfrWtghE-LaYL_| zA_em$EC_=tI(494)?6Wx+z_Pva+_}f|Bs*Yb-VlG-#!n4{76SkJV+V$*DqG>JA%Ejp zLE&ax7{3Pls+5kw1b{=7XQ#N>*o<6#Fr#wLlxw|+I*-N~ra@)Y&SxZ#un6e2!Fn^dBNzzL!hGnX)vtRDwEH~X54Ms;g1Yxj+Ei5L} zLS8lp;bAfQ;Egv8uS@fYjA{?e7}>E&jju7Q;s+8w9i87^;IE8Y0)>#95F_)$XA(5G z%FQDk7oy4gj0W=Bh4Ow%gJqlYjI2_+H<6%ueOH@IIRjt;c|N*u({TM!__g5;4SJo8v>uv@Unc+a;Z-js-_KF|l|(Kk23 zA95vOMG)1oa5cv6dMr%(q>a+DQ+Cpuw>;izQCe&w<@e9HPqi>2AOQpsmFy59+SSCe zqXwPS91<_+U2&|5Rb<6DY=3(R5QFF{XrP#aj|`)PS${IL3s?~e2%i>lAVxxPt)stP zE~GD0U3r8jQdW`$$)!ax(3o31df> zsc(D|KA2yY?;L(RH0|rg`;xG$d`gTDQ?Mf*H!Ss{Sk6aXhoFIgvwuc<_RM9&t#ViC zln9$B_PQh*!KYMO9uex19^e|L2RVK zRPWmCUwPzk6QAmUCQqC< zl4`A+Wqjg>-j)lOxhMeZEOZt$eiyQauc(k{*=-EB`yR0Q^GLU`39rg|GD9{T z7c>pLYqt<7iUcq{t?BnJ7bi1M=5kYF7Vs@l*pR)Dg@Ka(|?eAPh-^ahcmP%@FiV~mt};>sSpyDZ+S%U$DsTy#Wpk&ey^r>WjrYs7I<(3oFw)X0UU!iY zrKVOWEPof}%JpWCDQM6Mf2UuYK{qCIS3QcTGRd?z=Lp40xA2|QlI$yf%3!xsTpjeW z-|-bu3htb5abdgFOp-oEUmwbKy@6F?kwv;d5j^#5H1W23P8nS`0$f%=)vBW=yW^&6 z8jhNd>rc+&c?K-K$;(b=eNh`u^f5j7d5nb z+r)Tj)lfjO1fk&Mx*r0^dEGdm`>z2GyaYy5&HAc z!F&fs@8LEilSJE{-V#8K-o*mHY=5jkgFd5DUo7s;|+0U8-H|?4>1C7 zz?0Z-!kbNPq%c#m5m|(+YyZG7Ii+0K)hcmuQ9e=zuCzZ6RfeyTQxj?5sydoAiEOw* z8kN*TortxZ5uvX|8dKL!CmJeN8@W+3`uIGOoh!C5l3del(50M-%$)h+?R1*eHV{a$ zQNMHR5L|Udd=$U*NX>ODVSoFTlC%a0-KGxRW(wV=58Y-9-KGuQX7RS0WX*n%Xx%WR zAWB}W_Z$gqz6igKQ@tu^dql-qcrUOaAYni{Q!o;HeG;6>?T&CSq1?IQnj;a%bT=c~ z_5Sv2EY%o`s%_rLYChpKCTGfmBk*;xU+0{i{oZ|5j1*4pdp1XxXn##~d)%CGLof;b zTQ{`YBf?KFxx*sP%Rvq+t#n~Tu&@66ai)-&XC|Bb^mvnUi z==>I-A*(OwR5s+}X@7UA`$P`kv@@G51)asR%P*xU!^uip1Qgz#XD$L4qJF4g^%x^I=A`&7y- z1@H9`K=$snZor1&eScn1@R^#z;Gri-()m7TD(aBHD}SfToQYQIc&+x8QO6nP`;J~( zFnr(QN1x7&t)dKg7yh7URqd6o+iyb@ykIbc$9V>G$k&d;z{&Msrn~hC8J5w4BVB_&1$F0jzS{bfu?;?3WUvWr{I*vdzR1TPDe=j0uyuyXD}G+Qtz`-1%WblTn$~$g-``dO>=w=kBpn<>UFv_7< z`fZAS8bFcSOC?$4jV>|E1l-i_grtW2=j^NZ*Qp~EF5hNR8>EbAYpRw#_5m>}KG|SL z`?*HA7=Brx$ba7~v2-(PghNcsPTE{EZe(dC7MCk3B*4k@2FPo8 z^&M+ILP&+KEb{V=fp_*bPdJ9IjgzbB>tL|O7KfH)*5?w0ThG&Z?~xrU@!HZ;-Iyc{ zt;F7Joe^HY@nZ4Du+2Bu3MN6X9-%2C?gdj)YL0?0DuB3n-(fP-8yTq5H<4SgkRK~> z#DCmsRqb8z@UULYviQ0NS?g4XbN_vV^|ocP3vWShcBrQ{Dsd4vE@0~)utFi zIcAqi>*+HucE3p6>yYmy}s2|3J^ zD1$>K3`#|)Vwe{Aq`2`~hSKQB%WbdTLVxY-pKRBg1reN7w6J%cUJY_uD7~)9$LeTD z^Tu*7B>kdT*pd@nfoB&m1D+@y?PM1bt62I=m{Y3|24S&w%UL2F@-RSFzH-OjAvS~O z2)cgHN*wJmJ_gQ#sY9H9@6~$qpNfs54PjKPeY&= zi@TW)$J!#yj~m@|$+@WgQAa&3Moo-ffn>O@gtu__Z*f`w9WKNn1dBg99?Ywd-83*I3j`9g%d5hIEXxR~p|@hE{uhlUNj?OPbF68-&X*fq!P^MyFT1 z&Y*7>ssgTqth#>2^aCD_ueADx^LM$x{XX0G%n5 zNeow5Y+2D3JkW%b)_34K16YOqcK>U)I{V!$&#XPBY9y5OCdx;v7{k z__*J*>wP}Uw{v?k?tl3v+ZqHtzxkn|0R`dcL0lCa=mC9>^ieEO3{nShwZU@qS;#`Y z8uH2JpQUVkV-OHtyx@5xVq{B!eYOw#f!5V#Yj#TUdi}5jg}q7=C0G&!zQC{+LOhO3 z27U3i%3tb7p-G|5Z_<~j@xvn3UHMs!#JV)1K-v45_^CmBR)5YR8Z5;sl`oY|^4L)T z3bOc#lvgiZ!gSOTJ=G?@|aBbJTdIggs%zeDjQ{}>AF!`dN z8g+t|G$;-2-G3RGv@~1o=3pmFcqhiDXO@ZJ+rmIoFX?VljP-z5 z-RP#=+JKu}A%~8OeKNlbrZ$MBfZmrGMXnK(_Q?7I9CRz&+sM) zzC73A>mH_npNm{^S}YOEu=~<3l1(QqSShtEEdnL|R3^B_nV+5n3-3 zI?2sy0HV8{`#GOE66(trzg;?JHY@&`ABo|3Y(OOm&3^sgyshh!z1S_0QuyMob_v#2KqVP zdQmbq3WAHI&0v4}`)V&D154ke_yA2c>PC<&F@K-L@=eDzXy)b8a%$cmmj)Jo;BE4R zASy5I83Amo-aI>Il|ZBr&sVeLb)-^UU>8rjDHFcw7}l2w<(bS=>qK(i=`inBUZ5p1 zH1nZ$O~PZfXIuISOd5XE%l1{Z0OZ_v*PqVe#ImcqXR{YZbJcK}p?8F5VFy@x1lLQge#Wo#kn&Uj&B zS$0<{@%ntmzVU0zx7ggG6uYf(;WxBq#Js2M8^6~biu`Ke!t~_cZ?5o(p%1pO3i%d+4z43 zcTp)EiaAi%c}fATwamQLlT-XtFz`lHmWoqm$FY0Oi{iUAqR7`6H~I=D_JmmY3TpQi zL=PNz%ZG-JMj#O7_B+#r9n;vo1lbrC9q`4^`GTRMIp*?ywSgsvz`9e|xdU70qD~m9 z*~@QUI~QEj^K3idk083v`Qpdyy$t||(SqoSCxZTVMMF)(B#>2S?0T_M}LPKxe) z$tz^cD%c}H#nchZb+}Sp!~X#O3iUcaWC07)fZi$eJIFFsvp|7+>2<&`%zuA4))V=c zb3LEZe_p3_{#vK(tH0JMDf@Q=;D5bBX?ZvGEcBYw&w4KQvzydLq_-OU9t6rTD5-)~72ZOSG^k*aD zKXxnHF>>MYlk^^DkLS4EP&9vXxQpO14#e&`7xZVnng6*JiK>`Gf}evz8HOQ*zZRr9 zTk0x3NF*3~PVeN)hbZ^6LGt*pHWzGbioBjPKwm$iNskn4lS9*055EV8G7R*yTUEAy znLJN2Ne6L@?ML3H5~+DKRf(WeF6^12uSY*o_p|TvsZRt3OMcJjbC7@NLq7hwR6Li| z3)tEnDPBt+Fg}Gq%Z+Y*z67qL`<$#qs5t=U}}3d(6o z85i|6e*Q)A@Jao9uO%kIJG9fF@%oxNrx~-oH#s7B!fm(yqE7zY452$rs=^)(v#$<3nyonL=gC_{FQw!8vxDp|+p&LlOto7coy4Krl@xjl%Ic6;UtU5Z(cB@?a%}z#2MOwzriiNT>%wlG z{M39%z2U2(sO!AG*$ZbqhHE+;)#mp+?D(F#BExyH*=y358+dgyB!Bq08GB!$Ph_96 z=sVtpM`*-C>bnG>$5%MoySPt18zt8XdQU}i#!XR{uCsserW0qZApv_Ul`s5@y!%j> z()A!6w7N+Au#B(Pm}Fr^OOa9m9M-R!0I+jR73kFp+Xbl2+al65DPNnlWR(T-JJ{|D zSCX~cHBQ^bI}mq{<80|@Bmi65M$E2GK75R*lHQ;EE@8_yx!y7=tu^i2O{+uuR*lJs zow`De1Q| z!Tg?TMiEJH{`PsCf7r%_K^ra+JhLjRqeXL6J8Fz#+<@IQ3eihg52gq^SqZFNM8T#4 zxC`V13=jD@h5 zZaGNQp@k=G!I(P?yjVgMuVJCoNnPdnZgC{CzjCOm~myM`E2%32Gs>WP1G zEu9+b6-rjkIEAsKsc6%9o7*Ucwug)UI0JzAcdYD!zgo57HQOtj@K~O-GDxi6n4vM2 z08GihQIUKUu_S~YJ)G=4%6o==6J6pw{0-0*lsj+cBQgR7q(m6wMAJw|XwwZmv@M@_{BcR?lohwfid=)k^ zA5oUxG)~(4SBb!)a#=iMM?iISy#~OnLTFmOmkD;;k9rw(dvJ1OJB1k(Rzd?}!5>(& zkvKR;Z;3{sR*{5;r*m>e@yAP_6WZwpBt_|Mz zmkJAuW(ziQD-0ko_v|K|J8XXhlHWJsN4=|6pAupe4!JOAbZPo1stv>|IHr#SZV{wF zvGYp{gEIg!K(!T8#dc4?Rs?GA_RGCS>iuc#LPt{kDo&drh*6H#^TXzwl;Ke2X{yt| z0Az37C2?b7mh+jMza}@lzCqALo%ZhAvLTdsp6VM;k2kv`bb=a*C{E2;)`A{Y8>fQAvj2Qk}^cbSGkui60 zAE@m`-_=hxq^y){MX`p|-%(uhDfsFZa9^F;xkXb+X!E2;VEsqcS})UFzs}6OT(G5+R+QV-*KB9vS8{Y zyk>19c3?|}@7O;;;lD`(8#WY-#jwxyZztYNy^ zf~G4H0YQn6Hrm?d`LHasXg3Lfmldfq-_V@bkVs!W?pA-6w+E!S<8M32ZpnjN167xI zwhj1}e5n|0X)%t@!M{KaIL4y_@-iLn0-Y8;l(|AkF;gyz zkn7ZG&4>$H$nY-X01fbSq8y1U1W829R3_AvtIX`MHdZT(VTq05>f)n3HGUt^4Lmd! zssnkKg&Tidh#}eK<=nE=L@1bgrIVheJ!O4$U@_KGIxy$>1fO6heLegf1n|cEJBss< z$z@3jSa?EAS9MrxC}WMKBZT_CK&BD)7CK8EfIQs%%oYCy`sLk{nvicL`qF{KMGpOK z1LA^;v7PqQ`&+HO7JmWw^__p-i1Xei%sym6cenZw`Q0Sa%wX4& zBME>Y$x}}wt1WhbAo>e50$InVlG8&S>jHc<*sPj6#p5pcu&99S^0sw~@gk8W%3ITn zCfX1dDxbn=s#fWpCu*H@>*8Fgre#_5ULFF(zS_lsf#Lr=2Xnm z0hj@M+*%?I7$GRB@g2?K@u^eNW)~5%O6d!F($QT94F^T#0Yt6un59wG1iVkN!$B!b zKaK6T%US8LrPYxpn;y%8@rHnVmzf(_=AM6ahaf4?VNWy`8q&JBnYW(qRC!Nce;TY- zmY+Iu?ffDLju@&bI563;-b+c;zLf1O*2OEiM(*OnleykY`AsNo)>Xl!1lVE2)w!%Zk{0gp7a7 z0++;u@Jv;Qnb78TSb!=zX+9yvm_$b}SWPEHr+oe7QuUnY5|3!0Md-!%r%pn|^%rlT zXjAeNB@>DBj8CwT`2uDAfu~4IhVIp1I72LJd;GBGalH1rrD#*%HJJREQdsN=5)@yX zlH6CaYBTS}_8xLl-Qp)iVSKsPDm{NG0z-xbX7LngE&M#{Pk`SkSvXT}uQ}RE`a80s zY-JmqkOs@y{*4Qmr%RxmA;{uOhX*FC%S)+A2DraRK2j6|rn_S4(#!yT`+(Mu&tM^h z!hONDOKsu>g)rI>&v`KK6B^Q3p0&hBXmdgm6Tv9_FssXSnSlD@7w>MBY`lMwOe$SZ z{fY!Rcdamdn$J)@gOBUcRSWvs-R%lRxAhvl0yfyIzT`Cc6TJEgt3Q}LUI$!1@BVU| zL}xa&87*Y^q3R6KqRtz-5|n3yt~{JAM_FRZfMX)XWg3@zbqc|> zC}&5iXm<_gjcRs&8m<>KjM)+n8~+6Q`uPx_ao0JOMar@)kKs>i{7+p;7OI1@B1+85 z{o~*HE6`Q(n*6&Fl(S>J8kCR2-;x*}&al_d@7#*{t6yKz=Lr1j-S2;25#7v~4fk6_ zgbEmlbG7)k056>8PDW)U`V|^*Mnf(xB*-$~FS4xy+$(94Fp8=Gc}s-0u|_}V7OX6q zb6$ohT!Jw3#jK+BPnJyUhhca#}+ZA1Qvgywh*OlJ}$E2 zHMLK8OJ-09g^k#5ysBXgYX;|#NMx9`ahZurc&js3`|I<1W^jMg@G^3|iN(zJ)O#}h z-oEh7>3(MM>3e=HLw1b69{ygax&vS?s?P5#kaY_HMYD_ z`;a$2&FS(`mc;IHlti@^WJ;&EiLfLSX1BY~fe)~ER$rYlGv2mmfw@xhU~Tp&$JQa= zd4wFu(o(eXVMwmM_yfS3#rcAylhWf>xNPEgBU3v``f+~+C47(OP>r?4wbi%l^`OHC zv9!VIK71uu*5s`D7u5N*xO*$Te9eJ=V?5v05s?h9nt+W@PaTdCR5`kc5poTiv}5_G z`H+bO?VC^3Xk9^`AK~f?%Al?A5fs(a!0cwb|S1T_&@FOVyF zGy>wy`VN2T${_gG1@r5WCDTV=EF*k|x8uJ$VkQ~<>Vf;hp}7MDcDq=#2?V5e95pI7pj4BGrY0Efcyu^F>ePN58AmVj3 zT3gq3b5Efm?uP*BQJGF3=e_g#$&3t)#`HRU1ec&Z7F<1kXh;QB;{n_z4Te-rv(u;` zE9DwDt$LGjeP*7)`)#eQRa-GuC5Pw8=I4k&NGJP}Q~Ai}^;tBFm3-~PA{}N@!KoIV zT2y~^p>wY>r@BCGGvUl!SfR9Wd+W(9YDUOvCV%&u_i{u#F=n=O@K|g+E1Md(*`v`F z3J>ab91%$5!=9lkEQ~@Rd6b-xW5L(pO)$&?LSLyf>{#bFb5dTh_Xxbu}>} z>OtzzWSqOG$u^*DC;82uusmU7EePpomkW(U7xNyQD}xbF(gyoG&TlsbUSL`pEaZQC zZ7YW9>8!Kiw}qaiXCar**m8uGaPaUZx1`EwzMJlGwUR3$|FHL0t)j zO|d2E04oI0Dszxl+4+DUXUGLAnH1m!8jUelfrU)(N`+sUq3s6MWK>fMENr@y2{Oy$%p328qJY>GL z=$DYA@^#J?7w3=%#ow+5S0A0D*22|=3f08;v`uEo2J~@ec>z+DYf5!-G9t8$GqfOT zE;U+)sf_$MhB*Wc88q}5o*|WLsInJ9>`BF{W0jfov}@U2{^b|kYn@H!S3G~mu~|wd znY46Yfz8FPxJbLxviF@6js2H|v6L+%wX%I*AL-?3MtWB>cT;vih~@&nH^hl{zUj!! z#1vZ!qIUXHshz%b{kwAS{L|*uZ2;aDpkQ`NS|nrpoAnQtz!t{N;EUH@ox0Zj@s5dW zRi(m-nO=<+ zp@52F6>wB>au;v-i_USNyU31X@tg=5c52{PuJ5)BrSHtR?ZgRU7x08l>veFFzj5U#T3T&0YqxvG2R4qv8P&KI0ZWGRKthKA5LW#|4a zzI8Gsu{V9ny(ikr=o{C64dKe!!6%!j!#EpM*UGs_mGG-$Qn(=5L zDGs7|?}TqI#rWPE9O@2Ep2nkJyPQ}LpMVvWfb*w*znsc)UlNzI!+jnbwjd@yV!@HiJJYEb_H*ngj+rOmT!>739w@5lD2USVASGZ3@dD% zt?6U5S=%oSR5$?I)CuIqi2%nc&^LaL1PhP0;Gu}8&{)Qs|8cC*MAX9dPsvH;HsUW*NGns)IX5m=3m<_^}s?a$NUcolrK^5iA zD8j4N&!m7W@C6W6;-kuC@;8zOA|FZOl`%bg&g}U+AGLq>!eT$~3&m?{B8jhJu5KbJ zu3}RV);fPGa{P3C7~>w|+LNrVO4V43&!|n<_0>36I7iS` zE8~EHY;ASHCRVHGE7?(hwF;~`8@0yO6~kX9w^H9bQ8`GR~u)~p1c z)XD#U*}Jo5WpPGf*guE^265mpfiO>D6wd@T3@U#rD#NdTE4bFW@9w8}KfR}_uj0#DPzX-SRZzhc%K?kU3z6*KVX9Eod#m2=q!@foWI^Gsq^eE87Dcu6Rerj>view zmvepfBl&*UPT~o3i*9+@#y%^U!nS@kf+bD<@rX9}QgNR4!cKED8P`74Z~o)(v~OEY zyNiP))B@uaX}3&xDPFW7Y^|J^#aJ*l2QzK=CJ{6p>FP1 zdk75nExbt@zmL*sYwdf@5*DXe3=gmM(&=A&@fN-nHx%k%yUENGK&{yBPe*s5M3aZ_ zuYA+u*Ii+ysNkmVZViu5`RRIUR@3=N9R1+Y$Lt0Hr;*6p9R}Ma*MjxrBEMYfyLx|` zh7e;p@JVp9ll!#m>O;R25SJaScpteJZ}+YTN7%rCu_t$7u`6fE+x47Yq9G0vA^{GaSYjLGrp?8}06kV%|3LrKzL$h2=L+)E#aQ#zy|> z(J*gb((lEE$v2X67f$Bmr86h*#7=*QN2ofFs_uG>CtDM%QI*N1xhbAuz@R=xURWwC z`wSp#thHk^P>`K?)BguAzSGej|1t#R`TQ9H^7SUVH^0H(LlW`M+iBdg8DlyqV;*La zY4eDVc?1UlbS8*-+L`)r#Xs|RTl;yOg}1xHYn~b}X4?V)JGydD(Y{7-e8zv2yyq#c zL3z#f^JQ41!8`>yJ#?^$_gYf!G(*GBh7Pcp6X#y$5gYRe4FJIIG zv}Vu4IG9|M>TG+kiY%i8QaR_Cw#&PIG48R%Cgu-G%p);N@m`+%i zu*Yr$IRXI78GCo&jkt3U4Nx+SC=2&t1_A^>6ofJEv*Q94klgzYw3@CX0c0(7Qlx<% z+rxXMvF+86d#uoz0ssazU9W@bdgwpt_#e$GBB*i zU9(f0)!|eJ^yM2>zKXlP=+=N>jlQR@Ql*}*wqu8n+dTxK_DL-VTet?AY%+zR< zo^N`q>jb|m)( zdv7#(;9jWGN=-i8Wn#PyEFuUpN5L6_KrHRPKUOQ_s4D0P3DT6(kQG@hl)Ie6ikwVJ z6jb1)4ZU_*0Z)IIMF*6|pYn;G$Ii_6e7M(QtHSuRk7f2du|fC$o8SHu0bD}=y4JuJ39^`*M^Av0#SmL7!8|`xIG#z#? z17t{_K(2#*-U4yddnAM22$Y`I)P-#J21n<^QfG9*K*m^H-08#HsswLoo|t3SfS}OW zcLQbI$Y?N4BjXehWSWdK1n`z&d+Ts0jS%40vY_dK8|C0Q6=3&6FJLnPRV#A@&_Wwg zqQoh)>8pRNQ6gla4jH9#f0)TH3b%rNF%zjE1dmWQ6a^cN6Lpl|T;slVK%YSnq6F7H z<)T_|#Hw6B={crAg5;v)Dgz@;9a;n;Y$^onfEY(*a#T1Ndk7FG({k4v0Gt*!s0tth zyQTC10H}hpLwVNEIW%NQrDcSs&tE^_d%c_~qtbr~!%%ZUk5Wh)4FaUH?~Q!jW+5WT zk}DEM#M(k=P%2F+2!QM%mXj09fT!^ph8T%5`9JiR?p^Y9U#hAhtZtv1a}qo1-2YCBWHMBN)1%@hHI! zO3{CJ`dCNh?|DTBJ0jYnF3Z^5e_v;e08J>O^(BHJe2rFb?%91Nr*Gjh*K6~LMB@R@ zU*#(*KIdLI*HwWrpqbusqe1{NQ6V>K5D!&^xrY+-l>$-)G#gW1WRzDxX52a~A&>!s zWn$b>Zwb&ToB)um-puCXCC4;9^LeV`xjuiRd3JS!B2{}NT0QS(q*nMBEHdfFe3xu6xj+qLB{;J5Gg}KP)L4<$f z%EguNBwC`#?Gh4lxB7|M_i86ASqTFKtv27Zu@FdzuN0}CRr2{_Z?vi~L>G_8M~Hhc41S`dFTr=>@* z=^Y#p5&A@IEqu)XIxTOSP9F${Zq4DwPU}B(B_#fp<{K@qXv#_>D2|{k>bb6%k1MLw zAOq@<>0qYJ4Z-q*d#;(3);YZ`WQn0E5Gm}dSzVC>;Q$=EeiYF5n*o2dW?HUaB7`!s zH1U@Ld*N*TAl(zm7&b_=g@7(BHL zYeeS!u+r4o4c7MH>HvR}PSFTJ^l6L6RBb6f3~)SOKisI7&ALZ!jX7E;{g={s5D~%~ z6=0;Gz)&dDw*wnrCQWHg#R^YL#jA^lfg9^TvoA9F%XNI6cWVz^48^2VECP`J#vtmH zO#4^jc&73MhqfsKto@K{U*y;id;Dj%QFZP*B$PK*4^nucgSR;5B!sZCb zMztrguu5t?5e0%xpjOnU<1D2Fv0U2u zI0Pxayh(M)#X9LB=nV)laa_os%g3lZl#p12W7MEa=@fT!KCHPzFy{bIc6y-yAap`VCgc|NUe3D-LDqT3LSqjc3-1|88wt-GMiv_X1P@ zR?GgN(tW{>3~)>vxrbf0-;h<$wQ}qFWt^+D9oB;d+li`1=`ZAfvRCZ?)$>8d|5v{L zN6S|a594Q>5I8o3V2g;DDG>Qe3HA`44J{C5rH|i!+5c=;8Bl6N>?pYP5X6?A#CyFV zN5wS0%ME{})h$4JQ>n6kTCe1BNU=QtjZ&!gkQG1WnjlpMJ*TIs_BOoEwZL78#6C^&jQpyF?;lWuSK*JToH6o`MZ3u0$01G10%*hiG%#jOE?zby-q zOvn}wg(1UR1o0+-0LgNnIZEFioUgJgVbb{#I8z2!S;tQU5C}+yL7!o>PXDs5vkQC! z#8E+Lnwo5sn(CB*(E0aUHJn8CZ_9x^20LPyM$=?QXk_t-WIEryOqN??+B{T6%)ZK+ z>|%dIQ4q)34UQneMYcFg7FS>hGNf^ptKwK!n)X+_yKiW_JEw2heIGBNd^4%W_H&%( z;{pLFv@9?(x?dO=wiaYAxSKczMuvjeXI~MsBAL&WZ2*a5fF)Mcz(8#6UzemsC}<)u zJ~j(nL|@z2zqUW$|9#cBIwZy~UX?g6@F{;2bDDI5w{B_BGADrNCeW~nW6O}0GWO)Y zK#CckCBAvE8;E7@R-m7oBn$fM5LwCBmP&Ih)+F`0fu&QSjA&I$_xjfBG2a>j@A$Pr-kM zq2o>lcwC(N*o3fs+Nr4}q=kSqC!KUMAflLT4~k!(UAZ@)5HuQi1cv_Ctg@swZjXuofbf_z~C=WhAW0Md5eU#<^LYSDkTpY^fp zV@UpLeKcO}9^+PG*ETdx>v)d$d0jT^Km9pCMsq)oCwIU4Z#120d(EQvSD*fy`MxR! zy%VAGb3b7cLw2DNf)vm9QYTRcryhk0&SY|xSVCM)9+1wybVl^ognc0$;Z6sk19d{6 zPEbwDYjj%GV4Ep1nV+LY^EH2e$U;dea4^<3Mc5rv)Pe5_|M@R$@RF za3BP@)7j^O5lPL6@AU!pT>yJ@`WlaVnrVRfJ{ccrxtgb*pPvNG&k@ouetY|i#Qtdu zT|TUPA-Dw5`u$g%sl?mg`Y~r3*^Dqf$5Qpz^#vZ(vj+x%?9k+UCBAJL)sj}J__hFULJsbiYZ-*PQ0&q7op~)cWj@`B=RX@MM=vhn%l%* zc^0AoecD?zj{?v)wtC{<8hvg+oKz`A`nc;3NOw?%Oaad(hnOUgcmxtI&`HjO{?edc z$qQshL^ywqtu|j&HRyrUe;&E~Os)VvG&p^J|5r4);((Mu5f}})V|fL_hQ3*lf?TA1 zdlzsizG5~yps)GoQQW!NVLy8PanE^>@$fjWkxNc>?Fd=CYIW)|W zoG1BcSTTMOnQ!gtht{TV7(i>hb;ZA>#EUo6k~n{(&v*Y3C0?}P0(1+=j*JOdfIR{u zF=e1+u*Dz+O#$}U)G4Vj$FC+<>Wywrm%p$9P$V8Z7VVe2P+H>FfdV)dP#Q%hUQX{o8tDZKv+&VU)og5{Ix8q2zi9 z5|IEP`&;7tNjgWrCjaO^G&$%$Zhy>u1fz~7`%bXfnyIPU;%j`b^&^a zsFfBNJE!DAJu*sEX_ZkMRb1JXtr&e*bMonC5#tW!R-+?oz!%A4_b>-~2ToX~b$qHlY?72UK#S;TvUJ8KZGFs;=r~$1o&tT~XG8;5&S?EX(-p!e_VaYGP}a@mPOh zk^+_+S-dzg(h4qG<%p%Fydb9oD=ij_Q-0VSc(MqlB6Um1SMSt%X19MQNc^Y%YyfOZ z^r=Y}Bn>gpO9WsUglnM1{4MJgoOg1$%e0JC=Om*WmWIIRI%+IXM^o z&Q*hOoH<832eK_Yr7@ys2b|8(!U=y2I0491I_N5f1_^#3Sw*hgg|R|8TFeKcVFSmY z%bZJza}?{<2#6t5y10|ovo32h;ocE30kKgSHjpu8NlOPJj;7(89lwk!LGl-QO!-B{ zT7v}hWTxXgxKr@>9k6f2)bN%&>py2n14g zh_bVC?|ry!F-;osL$ar8)5$a6K76mwuqnq)$@S0gdipz>&mSt$iq(?P@4xtNYbM*3 z-At~#7m#O^`}(3g735NeJ4|f=!ZrOZ<>UuN<6gb~A={6kpos{;`a`-C6=>!zO@os1 zW)}Y&2c9&P_)ckFQl=P&J)wW3Jd~4?a^jG4acTpdlA@GfpK0X$Lw-X_Y0^9e&co2c z0Scx}`E?o>Hc6(fzu5Zuzt;l|s@WQ-Rz-Q#QZbB{R1Zq*qA!AU7o-}*nxKYhLh3YB zcZLl?%vY3t%#9p^U%Wge<%_NZXT@66BKl3Fldb~|B_BT=`9H3Vt$H@tfI_v`HBfUXD3>-ou}OAp4l&f)-lvB=(h{!=!%=NtfB zm8L@*oiYt)3fiZ1H)wy-ey6F(&H=XP?NhQ=`jG4@AsyJ%_=>%5sn7Px(P% z|BK%qWy&hoANE{TGFi4L5sE@rHb$XG)nHTB8EOGa!?0J@*#vg*|I*y1$+ z{=IUU7KlFckW7|migvVWn@rP!owEc%( zzuGp^KH)Tfix15nj)VN6-|x1K%ul%3Q4$!W;MvKWj&jJF2FQGEN}=+(_5zsM;Ps)# zLD$KfM<;J+{WZ+>*Gx(OO*xa@(vzL}TK*47_x!^)8^4u7VHf5lhT|7bsGo%eV9`3VQ<^Q)N?C^Lb~fgrvc zq0qZkPkhSM|Ly+EJwP}<#)3DyA29CwwN zReSqlmtjWRQuI1%aMtJn1l0i3%82_$OSiZF*Wbjy zm~9(q250>u-bcC5Xd8oGSCeX@x|ji96O=PmJD=sxb@N8A&Gk1y9$e)AW%8L)&9u#t z)xiac+UWIiG3%oHRa?po3<%{O6zOduK25T)AnBWY3IzW zJQ#lyLRonu=qO$OhxE|8$d@VUyzkZVL$7xrzWideoeeEFe)wA)8(6gN{u5pO>=f}A ziT?Po^=AAzRvmzGDvQLI9hztI?_o0wpVMpBciiku3HmUfuS-jr-tcN}ZH!-KEB%kp zbG!QH)t$MX^~XP^8;?KyJjrZ8GHw5tdH8>;3_?Kb{eQrr!+(wUm&CgKck83qiGW^@ z<9)AkZWfnMoF;_$YV#Wd?sT2fKK>;xe1`uT7k8b~J*o;2|8u_3y!p5LL(7`|06nea zgRf)%IsdY+<9;YR(AT*7Dku3&tW(yU*gqsW-G0_}xTWop-`4TpJBNey-<&J-{1|^_ ztGdv3c%Wn&t~-z@Svt2N5f)HBe^zPx@vqOHY%ZI-`TF9^j;M`@7$~cQHC;EYN7dc0 z{hL|m)$a^+ICI+F!wBcHI5YIdRDHr}+Q(Jz%)f2u`I1m}er~tYc6JcQXk7Jf)jm7a z{+n>xjsZ}GUhj_dbKiAJL%DW$t~Y<1xSs3jwym?M`bl3@jlE3U4~V1X(f?588C}L7 zs(fA!K2-U<9Db2Xd6xgbQyk@QP&al^}RutzrKHW=J?^v zqT@OKXE~nZf0na3{%5)P(D5INz5USfUljX?j{hRt+YcT8iTC*I{};vn+5az!o%`(n zhird+_Wvv1_#xSUt2g=|mVIW;QTIi%&kTA|&FAPB<33a9(+|r&$ERF5`iXxCzv4$f z@r4h`{yUvse6#E`hklg5S$2QQp~K}5;Z8Ypz5Qm{KXJ!zmVJ)9`61lrxZH56e#X(a648 z_Bmh5@tbg`c_JG>Ec+a{`DWSYxK;H{xX*EI+oo3}df!>~uzZaW7)pPTklLX4c*$9| z*1*{1KrvM){?>MU0@_#QlwoSqS67z14*Xp6`VIpfKM(-&7t4-;^~JRh0L(nQjvw;J)t!FWdPv6!m@w=J8C`qS85&nPOWEPAiq=V86C{|2Ws6RzfmUA)SpPp_{qM^MJzxHXB>z=jKly+E23khT!{V#_%>?^OrQUGyV1X__jBA6@*&$l z^}^;;FMMeAul=Ft5BZz)0Wz0sS}tb#oc=fH|9QXuklz~mqzfTBiTvkxbX*E(|7U3Q zxxCT!%;*1`uBU&X<^5A0`+5De-004Sf|tdg`or#_Pmg=_+l*+e=zJ!N|BwDY4{PE4 zoU^B=Tu-0ZGCfLn0E@)_U`D%54i98?JM zK>#4aCSVd$b|IA0%}qV$K)wqjH~=)Z=E^ZZd}gJy zy54_V%%i*?jJT1r^LE9qrF4H_r6iA?Lw!biL#}o#tnvRIR-IxZOjGcNwwrc5H2x zW@Dg*doc+JhrQ_fgC14q-J7phZ8p3Z*Fg)35vz62=Xs=y+s&Ds-W#+Q{5N}A9C?3+ z;osIlbXb>P9N&9>N&)sKh4(>=4f)C$UA1B*vev}Ivhx?%Z%1D3R?V3Aa+xoEzSC~- zaj$Ze9)Z^`f?a&Edj&)WTL=)}r9H$r{vv>l2T!8X?Yu{& zz9#yb7DG~#_3;e>ZU%qJlHA*H=AGPbfWGU3veRY{nD+>u8l9Z$bK7ahlhVbVm04L zQ|FMah^je-s&1^aicdNojA#TE-&?lVYw1y{Uv<9Kir12uXYLXbPffOuK#b9K3KemO zm1aMvnjngsLwr-%rr_1QT^P1~X!M=4XrHhZ1z8$VlOO5f_Q!N1^hL z!xo%y_Lw*&=>k5>aiQaDv&Mg9&7#DzE`xpawKk2fhw;FV+*MJ_Y0U_tco1L1(hu|1 zgW+~2h)vX-SGBbYykoFRH){hI<@qfTyXWL|?FSqtySQM^SFrAu#^`v~7L?k}f3`PU z$~pXLIIZXQKl~gV0+8qsj>_6 za&}<7lg_VIbSbxD}{Rb5-F)uluOP%F-j@_IKtn|L~Ay%y|)1Iny} zdVjvQQKkjp%5sgTv6;Em*4AHdi$No<-VewZZAZlvLFz1L0Hc=ne`pCrR7bs#Mb+C5iCTwsCAKc-U(??pK;>lp%5I~knq^m620@3r0?ZheoIX!Q)yDLe3o#Zyel zw(QnI|L(HAWFODGa|pZ*s;B+(==Z^o;^NPjTTin z3YY3>{dyMF#ht1NpY9Jm$U(-K`|VMQ4xm$GtDi2fdN+T((lLx4$Mqt@PPchD$h-=h zWe~i)QGr`i($iIVNp@H`A5B$L5q!LELT?3SXv*S2USE%EIpZX5smLQJHQ3;I?djYD6Wrz+i4?Gca3v~L#E>+F)993t&tG0BsKrfw}?)AoIir}gMc zSO3Ow-70^NgynS5^>Vrq=bbow5uO16RY0o0TVHg2?Zl*5qx;iYZMglEJQ*Z)Yo9%l z!&>lqSrz4u*BB0I;kA5@ORilKk!eU0Cn=}9abn0ztKly!kS>fTM!o09iQp{~<#9HX zmD9gCbIqQ{Cg)>CyYCFf%Z2EAJvfi-{=CM)!;CqC@_b!?2DhDExa)YeT$aKaSFxF% zeD>Pg=k=Lwr?#nx@It=8xEj_QR_C*QZyk9LJwZ5IYc;iVY2eL;1 z4=bqFcSEhuhMljprJ$MD?07zut5n#Yvo+D41(ctE2NUj>+3A|C(?dNag8$6-Oj2LL zo?jr1?_>Q^FN4RMO}S=l#!YZhIQI7BFHPWv;`^j)+eNTYrFA~6r3;?{tnDvxgR(L> z2Nt)!fHb26AGHu_U5Xs|eyPi{bBVm8;})Y!OO$pHwMtehBqkloUjlCS>wS4o&b!{f z3p1O45U%n1;+Ae~rMFT(Z@Ra6=Sz*Ojuqg|*^4wp)^^+Hn1ED`{Oi?Uoi zJ5FJ^zH-FTHmVVb+6c}@yn9PN!*=|;&h>oTTH`rAi9smuD|eM<2Docxk3&Vz`^%`s z!hMf6aRPO9B$c9W)qu6yTc#h5i{z3Q1Ajh$>CL-6;(U8%^642|HPuxb!mCRR3|QPv zsJ{nUmU$V+T#HqrZEyTbk(KTFV4imT@jf2*=#Vh49eIjv+$kJ>2`AgM;J8RO*XGXZ z1?$>xaAgmZzhv!8_B^h5m$j~T=jA8OuQz6WuO6YosqsRpmvwxh8#LKvP9@Ce5Nv%-c3cFjBmFL`Ao>sk-r?un{Ri_SjfOea;=`BzD zJaL~dcD=8R4JNe!xnlczamR-RrS*RO8jI#^cK79>=O2TKwoVYPSNrYJT--fPhbM7D z{fS{~zDkHr<;+W#hBiCxhs7~F$m{)oB!r=uhl{;=Z%}SZ3qQZ#byb+$`mG8pQB{}i zn?58`d@e~RkzH&4gPHn>6^1M0ahg0GvDfmJT8w+4bF7moH`b{b_KUt>*n!JcOK>uu zS9^J1Ca1M#r03nCz_7eC5B@p!r^j0!(ea27Lk`S)f>WB9%&ocD!|ph;y_mLt3+bxa zuby4)qtcHLcfL9;TkFwovq*`hb$=AXrrL@ZQ;LhwV|?8nub%ZXlBIIbM(MO`ghn{O zK__MA{ct|!lIujFc{+28clo+MFqEH<)3ZQ3fP$plye<#*_I|F|>dB7&;w4CH|B9UZ z=)2&_UyBXdzOtp%JQ$zj4{MEoSp;ER@=;^$fy*!7s1Q!(4oM6gZ>U&354?WJ^Cg?| zhITh?P>}eH$$9JO^|HAoM6nM=1v}BjVZ2;Am;BHozNz_Q_gYxRyF6Emk)0=xvtvb` z(sXY{=iU|a#iM(i?(p)`UJKfc<#&s8A8K(y6e1aKv#k^sO?_;x`~}i~mp;4hw+uMl z?DahxIcBXbo*X_!n}-Y4tR9A<{~u_A0a{?cskyEnM5 zv9T4W+ud0(Q){u)W?sg<6lfP=n+a0V? z*h@!=&HNXU*1%b{_*5?6a;DrJb+Km?<7D|uKv=2eyEGvOUD@Q98AGdUqRRTuIC>D#>CCvg@LJ5#14|L$Pta)@qg=uWIi%$Mp|;3VzN zy#Wu(rfY?1eT}kzX(+K=<9cymwyz$m-ASoeyN#gr>P;Ih#;R9(RQOAI=Nq}QziN!E z4;Fog#ha$LE@6{I@eY-t4ciqXZJAekE^~>2X_O{#)28A1TCw=S4(v`+>^xN&DfNP< z9i{AP`KbFl0->&p>*-avr+EERl#AF5clKm0TgiV=B(d0kULFzFudR2wD9`);DR?VR z^jY25G8ZGMZwt^RN6XmNo8`XW`l40_!bwW7A}IYi8BKmV4ew<-j?Y(>-?)f1jt@PH z?MIBF@YyBNj>%6O{TN)e@+b~hwG#cCEGhdL2xMMX~MG!|Am+T!^IO z^I>6+rL*#X@v>VbRlD<;HQHsYBx)ByS}@C)W!^wvU&?*DaJRmHuCAQQuqh*tN@@`Mi{OW^79rD;Z@9S?}lCom*TSBliFQQ$G97l%`w(jesOYt^v#*wj_;js-gl?N?YeLuK-Got z@ETap_inHFap&cD!&sug?pd_lXKY%9k0fig-Hzwaxi2`&e1AT6b;B>M+I$Ad{>BLp zVgFcRXS0(UOFCCa<+gk=H*j8vx44o5#*gd6w0J*{S#mesXrVr zh-GAdysBF?t`Non{c`<#X@zvt~7>O-USopJWO-N60%ho9un&=T-~NL zREIqu@jl@#eYKRHyt=VYoA$k9PHnS4*=)Xl+ZBnGO7?ZUtzKkOU17cTcU*PibFaT& znlygwuE%@ht^$+9_zd4QvYt&+p6)lrO&=TfU73qx`B)msS%|wxY2qeY1!sRucz-9g z6Ra>?zO1KL3mmTNbo*L&p1f@C?UTg^Q;=?vwV=1bC z>*93hq;hZiz3La9b6nJS=>X^%@J@ECFuY98eapOwrMw5rZl3#KdrlYoIJgayz1wI< z<8-eVp}I`p8?zKmB!#bhBRrA?(>{+~?5>|9yDnage84q&vwh(k?+FUaBSiS6TFp^e zAKg8Fx3ueK=>pMhqj&MB-rXj;9tS&r3Igr%Ad{}{ln2>pL%2M?R_o^NkVx5BPfuVA z&$y-73rL^O#Qoe)!jAcYmdiYYZ8u_ZUd z#!b@)75Hd)ZJ(<+w)vNr6{GrpW~+CbNpUXShhjNo?`|44p5@-H=H}aCP7htK$xK_H z`;F7Rx$K_E=9}l&ljUPeAJ%H#S<3j3!u|2toL0GY;ts2(E7bmIDjP2}gYDv0Orep8 zafK81mc(x}zvTllt)8UEhiXtmo4(xKb- zQ$z+^@W_%~Ul$i^Tdwie(E?~a^R*`(j_ad+*1cTl1NN0RJ6?Zn3_oN~{exi(wBnbV zjpZIhUedk>9;ffP9j*gAn(3Nv9nJ6EsI1KUt9~2h9%$@QID}|A0`uLs1zHnz*vN_l z<6XLurEA(;8}^o}Y51srPArIxRYBUJJIF~(jOA1Z9F`szgS}kfB?kRz*9D|mIxE-(AliF2lqW5>UP1)-A(qS&RZ?yS8r9-t_b&Cb5I~WVvoES#VPIxV1-rw_55;vbm2t?fCSL86zD!XN9jN0p;zwIcc1`Y3j9#UQE1j@p1iF zuGPm|(AD!O$vbBk(4c|Y_OHUy`F0Ja&1dm_veX^!H{k^$uY&M zNbmRp;rfEW>DKfoIrc;6&As~CO4@v@-5u?<@R&76IQr^;^rfkHs~5vaS1G84+p|yN zMd#+F-;~!#T%VE~w+ybkP)%1s&)v(d^f>VNUW)NKjJN}7cVNGKzaBXsmgzpJ0oJB& za_$B93{H+DEqdTEp|>@cQhM`51zuQ?_9dc|w<%KR-aI4BDr1{fh9_3WNem4cg+_!z z!)ih!Z$d+Vi!DRs-C(@zgUxfBzwE=+xb&hcv8Tcbgxv=NUD@oi|6}e{%1zlVR zMa&D_usL%lbI0zReK2h9#p>?f1_p`OQ?M>LUq#cfTMUD;M^9Vzx$afCfuh!oW&o4# zT4~91pjV$3ou$~ar?g!p{FLbL*Cjhk_;O#yqA;m{mp6}Aj<(weFX?InsNbj9a;I>3 zd3YytOTOK`Q)7y>E?&ItmFCJ>5QG0ZyH4Ln`g)e;|6_LDegDR;2dS1wKkT|Vzd2dc z!FEn~tQ^(&Vb>4%KVsLt>wn0uljiT)bviRNNR&3NrY`wr*PYGp?E2f3dEuK~7mDB5 zb@K#&ROW|Wr;}pL$k_oU~-&uP*yimD}CDGAG+O zHilKTmuw+1JmSm3Mo{oNgf9wn_>H9NYuCLpFCvRyU5JlEm!rM8>O{eoNL&Kdfhw*h&jTLiK z{Tq-6t9=z@cScgot{mzkPmtz=j+w#yjzvNzFYRY zHM(5}bEzGT$k&i}DU3;YtT^7V7aMyBtR-`|Uz(YL3;5prZbfJkK^^WG1^W#UQC1NLCKj}W_ugf zY?chbsb=cff1dZl!DSCO_qO&~sDhj0Oeo$6CW-I9EN!KQq(NP|dtzc;OqCP}+7W|J zYerc2sOnt|)u5VArwky#wki96I_6w?lm)1*H04paC-xW|AI9TQ9|%{w-Sx%y7aNyS zY?ZcxnPhR_1^j)hp6YGsn-wrl!9y>DE18r>RISk>jjk%>263x7Cmg`t<()@mjyAwI z7W^~jn0#EjMkp!O71X1Eo!#xIYanwIw7)&p3$H%52|SisPUn-+mh2*bdP%2Kob6LI zKIDoQpSPRAO;E3kE5n>B<8XUj&xS2ulf~PCJ8_DTAxJ9N=5G#PPUIaW2I06zsQ5h7 z*-vf1cB`uA4kVbAvCRq)zd;~!Zoe5$ZPjo)Ukj}@H-XF1Abn}6>I-hU=ozD>S}xsq zbh-RkMB=cOVy>>X@)ov#<31}c;&mfk7V!nE#qG&^!s)WxzNh6X_n6lO#fGz7->u}S zxzSdi+_QVl-@5d=`-^wJ1Y?sXVG|6@=9o_~=116z#q!`Km$Fq-Co&#P^n7-#$n=jW zmDS61V&87pi8*t4?1IIyXC!AKqI+xi`*>g8j8Wd-!?S72dU9WXk?~gFXb(ud(p}aS z8dXZFlsm2cdXXeiYwp&^tHcoDaqz8jrypNu`;mB0PMS_I*F>8KU3AB*y)%Zx_0;yN zeZNe1cHzel{{)U4_WL3$Qtoi0Z`v`|mEXZNcYgKEboT0Jbk@tPx}S%gmKON>Zk|RP z9TVW*3~#~UQTCsIPIsF&|Bt!*YI0Ra6b1Z)`~Vni<41{{GZ@|sHW`BrCjI(b!riOa zPBU{)s5(=%r(q$20+J9|pJ?6ey`?~Kt{IhDns!&6bHM_KQ*pgI<9L0=<8Z*1NyVi} ze2tcyy|5d`vWI=Pu#e=gJz zX!Gv}*vSl$8YO#Qx_N?(s3`0EYKWF?FxU8QDxFhB@QF?!`8rxq3It*#mmA93Qj7Cva-KT#M9J&ceJa%=sKqd3jK3EA?K= z-BXvt(A|iC)e}Kz@At;C5RY~mBQ0`LD_6C5!ae=2k)iv}@PX^^onSg#^W5zu|03n2 ze5Cr!f9&(BX?w%Znj-AuSJx21fO|#7RbeVh=ci-qrI_6qmU}kdtET4LLO8Lnrk;W2 zuahLiL2+|bLCZ>k-}RWa@Sb)N>F&WlBMz>ryw?bSslv|%xR0#SkmN(sZdbQ{5e=oa z_td%(5F=tr}akq_L%w@vCQLHKDBBvQqChK z8}4n^mGTO-U`P&Dw`#{c4i8$==FxILkw#pw1sMxbr1nPc78K_d(}BaA)FbTf?kKCX zh%Z%tl;7FOkx$amh${Q&%c(HGoh6HBGHLc~0K3V4F9^S*elq{!wuX|PS&Oy`%m zAEzntMic7V<9^o8QaQh~Gi-OUrZNYU7)FtQ6g4W;lglU^ir8BP-n%vD^mz3(&5Eo& z(cpKdT?f;9YOezZdyU7$x$F0s*+TuXKC7Hn^CV;huY0xRU8?=Cw568fLdZdp}g=}EWtDH$s1rTgT0ykj-ga z_flOu`dO!x0juAnXxPPZ9{1_-?LIDf!|elTHT$72-0W?;r^+4p%i*MkJC^hTE^B^b zYMQ9--qL(XZppoH%zN)v6!y~mBwtJcTGrh(lk!bcl=y0cm&nd=ejna9OxHz!@{wth zIlH^!bmvAN@|iji@(`WKp%c68p=x*K+Ej;yo$JTwJdbID&n-TL`&eQ zGt)qzDH#vS(OlR2(_F7jf=6+9J&$Ivzt8MJDIYKX(4E4H)9M5C9B&nwPK4K6*3p)p z&jGOl|9($}*$d8qQJ8>)vmXb46~ygyvP10bNR}F^_uB!m<~}f+k+Ayx)9m|b8YP3~ z-fOeSvv^zc+Lul**T}h4Em-eXP4!d?`F-)yT)qYc(OaDTs3=_?_6xO6D9V`~s5G=VCR3{QNvNc@D`M~Y>~3q^YR0_Am)x?6A$sqBoXT(PL$|OG z?{*#EMc}}xppkX z^OL^N7}Y3T`mmX2(LdA!MxN;SLUc#+j*};U<-OQ0WB@6hB~ZYag;QFowmE*d-J^Os7#_QLhqLIy}$R7B;iTekgDJm>3R=V-tb3>SbqT*kWHNKB<3YQ48f~BB(o>GBzB0yBDN-hJ z?=Qm=hKy!HDSGmnT7$iSL1P^J-JU$f_ZvoCqZPM!BO~yoI76F%3hujd5?`L|)T8@2 z_4~0*E7oU}RRj)aUd9Vmuyu-MsWOyF;5-XeqvH!l#YeWS$>{0G$Lg(j_ZVaJ-D$0F zp6k4QrnN-zHW_=rE_sf7JVfqU3nn8&V|rACThvogpoC*kmzW4XL!x0CM()cL?KI@U;OXbNX4Q5jJ$ z&%9Oa@bIV)#xZ5d?2`HAd_9S$dG}tQGyBBm>Z%gMkY4=3J)BO)xKCS%r=25JY@ra# z!;?~a*ug5|^eMM5JawzGrloA(4vuMbct$t#VX6>DdBX*NX{9>3+)u)l&v9Q$2t{VH z((vpAaAEBgfa<*r?Rg(;0WuPma zzrV8McL@mo->~C5^f7z^md?uC1H z9pPIN)#|dq{#)lzYA@{HGr}>i=o*rl0rrThdNCT z8A!B$>}IVEP}+lJYrnJa`c5?@DSYWvi%sMF+M6D*eFYdmmO_|uU#z$Ej=B|0+6zp& z%Ma2WW>-$MN^KXiu!;m3fDG4IgXRVONarcX;`QvJnfQ3s4I6mb1uNuwE1c$~nl#fD zwZtG47EB;xqdB9RZ`M3SKM!ldjL!t86qsv&9&l`>N0|qh#gLdvZnd4Y%&hKd!!#*P zh|gprH$Ieusx}>uEr#`Z!n|;h3p`Ly*kK33HFv-ScouxNyJ17i z92n=0HL>q9Xo`!&q(i2)SbludJP^*iLKg>}D6iP{ZLau|u)Sh(26k$cK{T;}ANK`u zkoYceGKS~7_f4|$n~@!_Ou^BL!5WT#H(cE&^v~?>p`5+^`kr45r;(0*!GnXd?nmty z6K19v4qtZ9a4){R<$hVt+ASzR$Xgb&9UKrnhymth8p70c+ZLuV`x= z=-%!?8&{&nMeh6r*m-28!A^~{DHLXO6}yloX_+=}<)OhbBYID4-0baZyQh8>)*g0V`xa7E&{(8=x? zDA-M@SRP8pfO~avam^m7b$e@nP{^C1Iiwm{4Dc0RL_vBQf*`0$olL=zcC8^lzndFx zP}&HXjJ@0*`=v>2^)*}KZnli8+LuY)#Jhd!})!684rK`9jY6PiPVX3n4Vz~G(> zV*s!Nx0K0vk||^NYTdj@fR}5szxOT8Mcy+JuWojlV^ANh`pEE9FYR-GaU9w=cSPq@B*m2scIOkJj z*@|x2pTnw`kJr^$y(vF?6#*Q%^U{NXcZY#+tlpK9r?bccO&_5IcO9rzg{9Md`7HMbhlW_vjjP*73P_KzAjolBD}Qaqt|3S8T_|)Ds-T z>vR*+-A#yacYanSMbeA36fnfDwE-R;F-=cuUF3CYnX_ICZq*<_N6%oE5p zX}k8raKznwxp#$zRn0gjb-UaRd7_imxe4ukJoJ?!)6KxIT|%!g!Q7>zUwC?%*Y}N^ zj?q;euIfHdVLJD(q&OU++KP_TSb)RxLP%%ECN*QM9bLeGdtME)x0*!=&idmeWU5_Z#6@`* zlNt+cyIv!IW^I?NO)k~$(7TKSM3cOMuX*9)Z6PP_+krTDg1djYShbWasqOm9-1N($!+WvxuY#Q;FVDyB8U&NmY*Iv)~4Q;^osBmS_j&^ztd!t6NQa$9K*= z1-rhJM@_JtK{N#sr@D^ZNUm#0erJJ|M>2PwNUVVFBf&Neu zxm=8zcAJZ4uUSu8(~&(EoE((W`+hn!OB<=WePhsw+nJ=><2BU0fQze#)9zl}$mejN zkB!rRI}90^wR*DSfgz)VcQGEqkLKu2>P?AwVrcS!m!v?r-a^jHLpf-I6EA?XgoiRN#45AHmDAOi>aM_o zXsG>S|K!|(8y@?oww`WA#Ew=-EXjk`q3{mkx{7uFo}lmhgjZP|i&1lRC3Tw>KIMjg zG;VK)kB*sFJ{FBl!-IM>8(%FF8#vjkuo}>>yp5I zkptOMH1=LzBM#+T3oX>2$JqOH0fi!eNTsyb(*uZwhdpT`SY>GU(O$;)S_xY#$lkoz zKER~0`YgX>^b=0XRPFpClszM{7IotsxGVZWU~P!ODPcXp^b#*0+qILf@Fs#uD+^o~ zlPuRjX6>~RcBg=)$Fz9;A zwz>6`nb?D;7V$>j$h+DGVnPPVp{Jg`B#qLwmJ{QBVD?AhaJdEJF5~8R=1wp$3&Ab- zNhn0&dhYy^4KS(pXPkTQBE_&y>q=g z32}Lv#=bM(L5=&gS%S$w`oqY7Q7e5&JR!l!nKg)=9?u-+jn$i7ul{3Sj_-ZDbmKF4 zFxfbVv60THSwy$6u6O)1&FpHpWO0rAuO-FFXzqx&tu)Kbv>GIi{$R9NuIcnH?X}g5*>!0;_E4Cv0IU~yOhLW zwmuo=}{IELJ5IvUo za(ZDB=05mJ9KGY*ljSu|oW*QCiD7Z>`pdI%>#i27l<=Q*a#BNerl>jSaDs8UuIs6$R&riCKVAP(^N%@m zsUWPW{)YB{F08+yz5CN|w0G{P#{7%+_KXkhtuEGp=(K41jrQKY0zl_?exto_ zZyc`ZE&daopSf?ew_o!<)DWJreMR7X(cb8-Kl%y*`vdK*@6Z%QGM@d=-XhaHgb4s< z+&(3c#Xr#A&EvmLd*j$8VKmF$0l0l|;K4=vMGXALe2XA|=PY1{jAn&N)McH{+8#bn z0&7zlk#QcI?g5yh%kFSJQTpx-T!Vm_#<2z0VsCZ_QW9W8Vxat$FoFbEz@^#7vb#DP zuG)t?Ktv7$mk-OWPr&AYw$U;S!1Z6}op1wAoCa(+kRYW&QIa5^p6b__lP_|&#vYoV z#sE&jSebZ#29DH8V57h(6kj^CcL3mM+ENtHjekV(L{*KWB9xr3zofo= zbJbtzk63Ur;n@k-&EjBllLz7Iy>zQ$!Bk~Jov6U_T+g9)tc4pevrwYo9%xb8YHHDU z>eS0gfxFDcfpL(j0ge(90{h8M9?$1A>Dcjfv~HZf^dXvGjkP?EooyIoX-C6I)@M3@ zfL3TQR>eXfT|8IQ!L8D?ic7^@;~>k6A_HutKP_<8ZGMtCFrkm9L~J)n1lUb1L=_hR zP|B<={#qGU`ea=`)K=Jl)Ph)^u^{Ui0&q~WL>wY2D!ClN>&Z)#F8b5E1(sIo)z{Z$ zjm91(M0RZa1ERn`JjyS&aHF;_A8Px5yL|s**){@YW1UghjGH(o=jCNiG%z)84bB?k zW;2O}R#y4cO;=Yg(S2%UnaJ>1d;$}iI9lTw#qerFO9^@0d$ z#1Q?7O#iUc8|92R_ccB{kZWxx=`SqM=Yv&O&DHn2GysT~h_SYV@h)O6$q;<4N}2aPNYO z8?b$aU436KBaN}_QiPPsuHoMQ^cH@*uH@E$T=*=3Jga5;@09cO8|943$^;%d z6E^b1J#=LB>ZjPQlosebmmy7_kS~g!&c9FG^{)z*Y}`f4Dsq$L3SM6NjH$ z2AmU)g%XS1B?r!&1~QLmE!vSlFd<#vP1)%1^PhAy!rMJVa6#HSP!|!?#@jzeg!7u(wfRn`j z)|NcBzEXaD=$6EPadWG)rACO%aLx1ZqLx}J+4O2=+Fh8FOt&y}vjdU?^4jxS-{$k2(d?;^VD@4&i)DbM0IQCJ z{_Qjv8Dr@Ivq*3>acdG8RHGqscqw24cn^&(i}?RS{6n692cLS4|6*UcT-$;5n;QDm zp{N{xF>D{2@N-NC{IIf(yl(Vj(5)T=p>}00YSr!O6|`Z%FVre7T&k@UE9U)0Vm%g2 zEz8al4+@Y=PIeap21qchRfYhnOEX(IAX)!c^G9G%zew|dg|E9-r9dt*f^R#y&jkV} z%_1XhVIa_dB!HBAf4D9OCB|;!8GD@Bgl#dK1&QQ*(ZY%(j$h?GwYJ9D{h!VoXj|gO za?M|L)hxMYrjgtIohRPb-XlVINt!>j`RKW2yXU`1z9RYb-!kw2^!*5OK!D70g$y^e z)k_rODe!sn)@C{vS8WGoD&`kJ0Q5{U(GIkvOmet?R?O=xV?ZD~(|{iBK)!{882dk1 zwoN=%CW-~K1zFcTW@E;1nH2fF!h3cYS|yiA1qhXi;=q(KOvcFp@lva9Jnu1bm*V34OW6l5 z#pug_FSgp-`n(%Y7uGuSO$<{ET(^GkUSI!Q7JHt}Y+ZJ!wltTYj(SI0??J3o@@#nFC}&DDWH=083&8i+iV&1VJ_# zU`npsBylXv*^7lUf2q&leBqh!WD&Y;mpp)f#RM~{@U7C4f%gx2N1Cd@wh45?qh0f^ zwek=a>#pwJzRiA>FJSZ{1!HK3fyYW03*Ii;gd1FFz49tUX?J!l>Ymj-4|CW!5PVU% z)DA+L9ky9F0sJC@8<0s4G)C~9r+Nm+ujfI)&mkwyy)R}Iv-Ca0nx18C$`jFJTB0$3 zETj`Qli+Mg5Nl{x%zGA8A>(A~DW0rYM9++%;`z*q1YjNyR>FRfwmUPkQg8~a^W zl#2_R;T3$-dfW3R_-03eyo97Mdnt?N{*9s+Q7#w6(?#7!_#XE1SW4z*Os z%2PgjTn(hFm}?@K(&NF>LLt+5-fI7UbR8a=(h>mo`#E^~&)s;$V&e{NjzRVCuQF@@ zVcX~4VHg=y)e5cIHTO?Z^fi?2;+8+3 z@hEH?kFvJ$DD(_sQ-DiPCPBh~DOq>UHkUczOo9Xsjrz&_HvZ;twlt&hJsN#Wv+m4F zAhzcTHp4Cn#I|v!-w5G5SU<;14=SsDk4HiEkKuAXomT8Eo}XFl?aXSM-W`bOIwc)l zr(gk~ub#_&)t7Mg*zFl~)?D5Wzqnw1>bogsELvwLWmR}O;9tfHEJ|g6r#^m|mVv(1 zx^bSzPOXSl=i+cay_lWA_Jgqxjr=?D`lYOdFsnB5FF{Dz z{_f%>{+Okk2<>+mT<=P;)afJL9agWES#6aUZe##z8$HOD@?+nBIrU9sj(1ACLdy%+ z!`169_Vj+dc{}4co|Io5KW7h$pxcRJK-LcGF?z?SdGz&q}7V#hK8Zx z_MA=gugoCOEVPOR4uDHgYy2Ol8#i+$`giadkIR7*mh#Ny66+Ci5fWR!F5_&;0X}b| z@w?A>5^$~j!wP?Y?tfHQJs|ZqrZ;;X=D{f`{FI7UXVI1mL~;fkCA zW@~q+Py4R{SbnJUujiDULv6#UwrwNx2Rtu zE!t*#->gasjx=$Gg;kCm65o>LOAxSzO&uX8cU z1bBp~9}sZ1<=Mo6Od~<4E(Ce05|4+pi24@7{MT*2-om1J!BzbKlS-yQ3>XQ>6&NOe zRst>sUz0F`8^?&2gn_naW;E$ML9^9lE>LOt_Y@uBlNP8f86ioDWp=;ABQB0s`eIOz zeyTj?N-O5{6o0*aMc2pM6721iT~`7Cnmzvg9N{#F#+6PJ?|nz&*V|t9h^D~*7`^ys z9RS9+7#!3dkkIGFESi^|&A`$F9nr&o5FqN9MAY$Z+QPF(6FYr>jg6uAwdNE;6Hx;0 z4zL;D0YHA}W2r~fu?YYPOGF)i*8K^Ph%VMAL>GV7L5|+|NeH`OICTE)jhtKp5isEd zeRxOI@#TSzf0zLE+5d++hCWMg)NxtT9Co1LY5E81H~}Ej*Z*DW*tY=jU!;zIRq!c? z%-1~c|4bbhoFiSOf-0s(Q{`Rt1Vzg!S&XI<-_D=_v|sZwcPblbL$k4dj#bA2(*`HQH3wR%M6_Rwd8 zjY<|KzS7xWVeLLc^zOtXf-buDMfPEI@nBE#y zhZ@eKz+rw7#9`n}%Ek+SvAnpKZTu`FOdHRrvK#M;()mM!bygsN_=l_QHlkSn5X1Wr z4jH0uzNk9Q+9P&W4toH9)oou!m{?AWofEik`@@48#+d~KV$7= zR_$quU*ulRhanH*(3hisfYBofQO_K71H`D! z{YF9m$~#0TFU!Pu+N(!J^os;$m}O!%_V&Z?B0lY#H3b1PK}3^*s+T_T{1;3m{HA;r zRh48mgRJdIPbrqb#@cS>tMC8jUeLbD;}4UFh*7H!8OS4^cVn=bPgwx)J!y!HV?d1J z_AfYL{a+u`nPhZ-9Pl~L&%FRx+0k>Gb3P0xptk#8!j+Y^CIE2X3K%Yy7CzeOQ$QoC zma$`4g`PF^5cKH$h-hT-jUcuVLChk;ItG@7Keuv-=-99FVMAoVt7jbqZt3Zz&9Wk4($_;kF4es}UHeun>cK2($hsZ3h()~c? z91d}r0nhZ1du?1gx<@=^W+y}mbJj)_11{Lg!{S>TYqe3sR&BLU1@W6<3@oMK5L=!9 zwvNg#unOw{-P3{xlH&Ozj3Jy*`vMBGkAK zMT{u24>cSiYWK4pv1!(4`?AdNLk%a0+Wl-tR9f<(h>K-rKGboFsNHQl)Blkh27lm& zZ@;)<=8xPk{|9cE`{ss+KM}{x@5FJEe-p>xPb}_#KT^&pZHRBqd83?F&SRMOR4o06 z%w;yl?n4nHPVGYtmx!7DP{N3<(>@fjv&`^A4L7RF@t^G|eZvn$j40=2+uo@vCw;b~ z^sTn-Dxz|~$Z>U<5dRs53}iPN`$KEz8>7xp8?CD`%cL&vNk#P+%c1&9-0sVNQO=}2 zM@b8RYRj|#j4+1%mOonYTN$({!A4xBeVsJtIYW!{Z9kg=`$Jx9%LIBrzy8zkIx}VZ z7i+I?W&JC>=l?@Ij0rgJe-Y2mb^x#V*Dnw`b0a@3$bYDHG}uWamib>(;Q&~B4%IKD z%1{@R*-r-ZpL53OUJh5p&LWmi0({|RY5~Q69zDuzENY_^-^?hV7pP9_0ascti2@eq z^9;(@_II+D`vXI3O~85~)^wC>D-L*Z}dwpX&&J zQ2CA6zh{j>`MC~bf)%RU5mW7<`Wp2sn7=DCkcsk7KWAf6qhh+ZV zhSJunQJeK078bz-9bf*mOO=EkbHcrUR(_YQf4Z@eY z>+jM~`zeDQ=>IeRm+SvCQds&|$15e~u#wgF3tTKBcLV}P0R!m3`5`>5Wtohk|8rPN zAmqPcqVuD+G_z{!_1iK2TlX-kcmDBS+3pWXdMX>s&FLG<4Lv=NwmziIhV~n+XIuaA zZ|@%z7pX>Mx3mCSTC-SkKxP-ZxE&j{JAU&Mzl||%f9GQnWeSX?&$qI;vC9&o?9pN$B2SABuKQtT&xr}#J9(YXQVaVJ2cbr&L-n+P`qAI# zS7T_X8!N6%F6!5CUz9K6V)-x1cU%6Ve7EH<%6D7-P`>uY0Dmao&+>=z{Vabd-_P;TPrmp@+XH-w!GLP5Ex&;xEegLl1vZzMpX4lgCQ|mz5N}}%=bItk!YaSWf8s4{-}h*8 zPW0TV0zo)Q9LRJYjm@AoEAXUO5AwPPTIGqwPv6nO0UE1X(3pqC!Ln{9@ZIK-%r`|0 z{z?&ZfGIvy@p~IT?Egd*SKD}?t#Z|e8HT_3;fQl)#_V-BUz*8Uu90T+W?6;p{S)j={ z9=$K^EJP585Kf!~tO_z2rvht&ti>6?q9F%38`u*1HpBzA2lH4;t4&%~ z8+uXYGdsW`D?F>xT^fA%UkK5}5lTe}l7M+t*(tFq{35!QcC@5dM(C z(hmvzb$%W94nO2C_#uIR$l&E?yY*|kp8a=eVDf*M1_o67J)OB2qmBdV{zo*h{TmJ3 z5^Yg_(ZB?yY8l`p5Xb=Bp$L$*IL1Pu*|&#*OO`9pPe|bz|DQS8uw!5AjndEW{4aEsKlT)%Nv#Rc8S^X?syzGgy&rVPpX?$hX-4_PV zV$bXfjQu&)&SjX-5!b7G{_ff0Ue+m1O5B@)Q)iv=Z?lC>XZ_`bJO>q*RXSBk^@^;nGEam<+gf5KAOkv!}lw}*~ja92I`_ob`KsMh+j zi5@AgH3w@aFO?lwP+@$=H)CL%mte7m_~2(MJi-Fas#VNMxetWrO`-?%yPLNy?1nRe z3yQgCPrwp!7M^*H4X`$rBRVU1i3~ED8SmqYYr8{(zuf$EVPQSC=a3p3g5)kuIt-IU ze>JcrIDt5%vz&VcqsB$CFew&+qsD_gBRfHggqtAw%3Zc7v7g0sdnK6 z*$9}qJY>;-*A#`vb9R{DOCx%`3HzMcJ-}Ha_-2Z2=;|U!ii$L@cRZPuRw6ayRcO@S zdztqG@#wF@EboH!@{U1o($w`Dd$r}rgj8^NVREkOc@zWOEWJ34)4grpZWtjpc9YYA z$9lbAnJ^%Hom+Z)cwZP!B`S~if8u!1M$#?n0V5KEC2YslCSjo;TZ2wEfCXG<&jl-a&7w73kql&bl&lZOkW#}tfT1hv zL#)N+J(w?1n3(zO93fq$4yC09#VzB>@J=KiyM)8Qa-c~nx%4STu6}8H&!=2wuktQD z;Oq$0H3SO(_TxK~R;*Nlf2S577gd*H<~?3=P26L_#RBLN+`uMm@ zZC{4X+nwv&Tdx8qTUZf1XX@5WU89t*$t_&LW&C#4LHMk&^IT(q>xg>JuHYZ8c|@62vegy32GGvR`@TEl9jlFrV$W)QAl=4Ln8wFuyRCJ_ zc|N^4IjYWJaBjmJg&IZOP)GMJQ3|jQcY!v!TlcWK*wFMu>ut)m_prwkh37`6kxIZP zkL&9>c`8-2f4SH^{Azj=($wd1!6eye)kNceponHzOU&Wuke5WmGZzMl6J zYu@g7^{(PEk^=o$dI9e?ro><&<(W)FdnxPkQ5Wg$gLtaETfux@;f^mY8|*DCv}Kp> ztRm}cf9>jZ`~n2p)#^);70(7l7i!LmQYr0P=u{3DZe4As%vEl2*}6Kj4y$9-Xlt@u zalUyaMbF+IxJ8Q6z7L$P3yZ9&HR9r8m1;$f-f6Oc2UQuyPV{Cwjo|TjnDYrC&K}he zmRz#Ru~2d6@OH|>E4w75eJ{Bp8xk81&(O5`f5Ppn#yE^^@9&%T71$nd6;H!m#$7wV@Y*Qz ze}YS5O9CJ1%@GakD-|AhctvQUa8A{5@TtPtJB`X&Jg`?gd8B94b8yvv;0H`TYlUq? zj~w^XYYfgOYNT`=S5J{v;nT$=DE!QbEp~m-t80rad3WSvL$g$|FPw(RFf2AH|?t52yMpA3sFcN#;Y-m`;&5AtEUgJIH zPmxZDm{Y##{e9Yt`yhQCpJVE$Wd5Ak5lCHS`TRn`vw1(?DeN+u_vHRQ<>YNN4#z?^ zQ+x-G$rJ30j-WYvSMXVu6>p;&%RLpx3$CBby(dal_LPQQ?`LUyW2i98RYxnGe<&7l z-^PwNjeUjkT@i~~aqrD=9FNtwjt>&EU$UH;SJ|`>2Qb*79 z4$371)^(?|T~CZhnVe0c*o(uJg}8RUUiP;;I5}aOV%RS8n0C|}Nx`0J&nHH;qryX^ zpHG)sIIj2jIcTwx-EF=&5aVNZf2|ODMYVr981p!q3=LSD=7qVpO-S!=YpSznJd=_> zHoHZ;}buSPoBp#yLQOZ#5whSd!>8huG6eI zcE>2Wt9SaTgk!K`r=`Q+)J-wdx*(bGi5tZKWA9GZRaF)?QD2Y^5JaJoe~k$0x(1;K zC^dpobM@yz$C$-l#ooF9Jei#}W@DlR7z6<##rxr6emOR4Es^M;7e`cokaKb1P6x%$ z7~?MCXw%!~Kp|7r2u})8rWT7&=VN1#=yAgR)8ZuknT{upa+%x99u9!i-MF)Jf_MU# zyZYM$!t{2R$mn#tjG03Wf79Y_5uQ`cNw{=_^DOw&hGxVqL9ZC4Cw!fEJg0VMORa-r zA2hEBJMA{Pr^;Urio9sS5v|PQ`sA;#{C+}?kIA^OhsTWo6y^+()zz_JKU?vpKL^Vn zibGmK0(l${h&R#AcEmdVSaqh~r^PWHYS_jN8h4MZKb~o!ipBY+e>{xnwG57jn}JoQ z5-M|jC0x3jh6K#xuv(*iyY!lXM>@&`MwV6Q(I2m`R8J0RgnQ1yd6OJzi_jNB(_dFw z5m{m#^}B{6(k&$gs33%T?(=fZ7h$1b36DB@IBAOhC>6zCX8kf&+No4yT}+rAyi@Gr zY*go?{%kP(a(R&Ge}yuxENfnp^`grxO+Di7J{*r^y3BVcO1t4qEeIS)_sMUXIb5Q9 z-6f~f?Lj2Zd`>UN!&B~%Fhvd#x+TS?yM9+A1x)N~(V<;eSJ6Nxc`FbJ5xFWUUXYu= zfO|>$MPevN*WyA7GSaW&m>Pkx-hbEX!W2gYqD;As>%ckN{Qp7p^ii zY(D$9(|Ku6f0^f-7IHn+tB6=<83|3?-=gix+cc6d z7Vg%K@6Eo@>7tz9pSm`cYi<{gAXc86v_$+U3$j6wO}Q9yb-QyJ>+oPb z+~I8ZW=7kg9VpUVvg^E{ym??*7swEMhDK$YDF5Ub(lt2sxkj|0Vka0jHeVzZ8>mqx z7n?p)!bYHs`eKyV^Qlx0$h%rClA)l%LfAMIe_iu-m+r~~t{ANiYPy{h?=+HEx#dFh z-p45wgoVgjg-}LXdt>~wk|o9KiZsSU5-Z3vZs)+8D^ ze%VdkaJ)Z@5%5n$5|vT@;IS`it)6wXEEU=dY&)Za1Wj#R(B5B1`wmyp@p}z zH-hElX9egVV~P81&L5O9+pdLaF?SWe2G`nErqit>JT9RB`)EIo{c$cV`j4ai1o_u! zzqoPwAJKld_srsVG@beMSStFC_G4D?e^=3d!ucMu?j`oAEeSXl|$6w1kaoC9OXg~gq{(T-gSviAsV*fkZ zFL)z`!dE4qfVh80`!VbMYqX!2zO3(PKl6P>pFh2y-n$If~9QqFdBxff32` zDxY9H1q>RA+u9z4@@}8ZYjJ+~PE$tnIOR7EHCy4n9k4CU%v4JxWQmIqf3)#U&wP!l z<(IQ=WW`D2Vu}6aw4NuE9_?1uXj`?sWhIvxI^`xYlZ4De6FNhg%mb&sI?WYnaAJFg z=yP-%$L&(rb*$<6-knbQW;&*aI(Br4Eo)$IfH1Dt$L#{NUd0)Rj}umvI6zt_d0o3| z#R)+-1H9@hVv{^5+X{bOf0=^<8>rm#d8DMJpGq+)-3p$?U}-0{40X#iXp4vzKRwQ2 zeb&Vmo}witZktUUj>5odxT|FT^T9!vYJ%^JNp9!@ie?OLGJSq8DU^&r8 zE~qDCJ}rPQ(d0G?h^Y<-Y$Z)!oK-A~ZOYVxa?nl~&jH`S@P~kuGki5DABs%7b;c&R z@ez}jY}U*2IMNI70P?)@w*IirEb2oP3scQ2Mc-QNM!P={uN(2Xp29F7FBC_K%_zfk zL67MDvU(Z*N^jTfe_&8jT~y}jav7baFjG@1!{@^;Os$YO@yI46w3!uoW}KCJ>1(Ow zqbp;!Ud>TdzZNZUkGBxZ^4M!H#zAN-5ujRj^v}tOmhtk6X@yOvwxMrG@8^j;GVAHl zfkLuu>`LnWc@-d~H^?ZqQIX)40tHI8&r^84vlPz|^DNO;eQbu(QRCvtmk})P7&p;v&^fcOS!0hZhS}V zGkzY)06IN=?Zq~J+|2lJ*`!#xc62ez?SjsS3N3-q1fA&SB3L%fBktY!HVVEZxY2n~f z-l?KuWKQD3hD2}mk=JhcF_2&vQHYb7CUmtsone+esAaww_I*-Pr&8?0q$&M{y)Ta@ zj)uBMl7t~0iaJS`!a#A$z=)HQ2F!NfuE=R%gY-DN#s#M=(g-gI$sh#9CtHs9h8_i6 zv*rYve_k9X!%`;=(9#`pgDv|hDT=yHbGA9GispSvzS5(dPVs(*+ zb-=@CFm%kUGUj|>!^Z}K{R*%MSU%4Je?05wNvM?5e6PkRm~{rUPlc+yDW7JG;l{Qj znptE_oJ`c?s(?%_0e#8Y{W$0o)sqE4D#8NzAe7ZWdUCDFiR%VLz?T^TV()}4z z1Kwx6_j9-JkCT3nso_5m((f@f;357=OwH}_OH9r2%Ar2^im3sy|Bk8Qf5^fd_LId~ z@Jme1`;7Ju0rkJc)M%H-#n>Sq?)}616H{Y33ybY1qto{pXZ0(lWdJr z`)T^O689sfrk_-F=N~^$i9ddiskt8i>zEqGLfD?3O#k^tj6N|nJ7)&K`evGK-~_m~ zsb3VTXt7YWxh0?{Ge{vke-TLEPH;{p-~@z_nJLUBW^!RdfQj^g%0rk@z;kJXgV>D| zX?^BA!^+rqgpK;5rinNry@3GZ>|V?dqbW{{00m@+4Vf?Q$X5Qq|Oy9@(Gg`LjOoz|XAw?~-S zKqf&9gzcs4cEJVbf4#1NZtV4&_wv`@I9)pc(kvc#LVKqHfweQbB7;L9GjqZs!!bsB zHmxC088cvkt;N70(>h%mdI1UqszREY>t?)4gINbz#UjRaLFC0o9(%t53c~h~2m2YI zPfU(zv3q2MMbE5;Oojpd`9M9{1Fi3mW6|{9&!WtKg7-xxe^wB`NO37s=`X_h@nOWj zB1dx7{xFfr#w|N5{v-N@2eZh32;7}$PJU3i9CSaN;y0_9yL6NVccy`7Hgn(6EetPl zAQL5EiY(1~1@n&3VL2&rO(6?tx-g^bvfC;9`>qkXB*TD&rY+8k^PMbYAq1>@UD}Gy z6w);VUKWlDf9Ld&H94BD&QRJvC(C3SsH`MHbB|>J7E5wr?ZG)PSRm36imvUQT-~@# z$OK5v^-gjo@;hNi+Z|;?mqaFZwn@|ivvjvD(}qB#-+M=sW`4Hk-u@!0l|B7MnELFZ z{vMz6&796E{TN^-gbk` zZTNvSE`WK2z5w6Hc~vWIkLv-6AQol;Y4ssxCS9Q{i~;xBpEE`r zR7RYcgTSPHqBT6uh3um9d9FI2Go>OOWPT1t^mBRjS2pz%nX_^52kH5HTuz~)UvW?{ zVN^n&7cD)uXT`afRPHkL)qa-q5-}_W{jwUhf3tycrYZ%CEP&A!ftl5OmI*FMXu(uj zadTB*vVxSi;{Jdf$WdOH-Pc~vL!HqY0L;ECdS3yyO&pp#>zWi2y+`9D&0gmBWuHaF z>HlKch4VF&3IsvviwWAFF(APgKf3oH)%`)ozA4k>2hA>SW}`6yu1%AwZqJ%yr4O2~C(8q-i&z_XiGV@djqI)XW(IGiiRdN037M zvmcF-MFz-wzj%Ltf8+`JgY^E6Eh*gw6bbs-kL>kKaA1}^Jqskr0u^R6L~bSv7FPvB zON$_}AlUiIqk1rwRt2gU0avE8M5OwFf2H>bmHkZkW-7wYe43ed-;d;tn6zi{0;Bd9 zKAN?tjN_2$LE3Yj8dIF^^``gzzxPhK`<0JYfa@N<_NS8gX0{n@-~RRY8Tg+pG?xYk zV5aYRQUiJT^w){BXU_cod|>Uz1MCr4;sJS0WMNkMzMtMdKb;mCwD0fT{Sas;f1K^F z=mu}6n>o1i(dL^32KeQ5&P0U;sCCxM_6)^WF<8j*a1%10f|wG z$yoyeNq*OhyO8!QO?$n#831XIi+dBp?>^05LONz$80^i9=}a2hSz|J$=b&8jL59e|;EEt}_r4 z?suMK037OUzmjwRu$z(qjDWUgpnxg?R3-_Z>EU629PjH25%=8t2iJ1leg)_-bKU&? zl-KWn+zeib5e~bC6x@msT?2XXxm0-SpSG+T@J~zb7`#a43T)YVO$6QHp zHQ{Byg4P0s-)&Z~vzK%KIKRU--*asDa{l};<%+#r_Ba~|f*cG4H;^lG-jEVG=m@q^ z7J{ybiQgy+l`;DCZ|?m0u^=;`cPbKkKI?@d4V4i9-F3$q)>`B%e*on#p!I#&h+^mY z3ZNyQC+NMJyjF_X&K>S7Pcs8nEiL6&`j743G){rdpqNY1R(EQOQD|88v+8mtq|FO( z%oSz+SDP~O3aj3{6=p%tG`*MIACnrt2vGg~f4|>two_Af zyHiu5xze&Ie?E7vY*9a)Q{#Gh=E5O&>pBpRRqosLY;sz#GdleGW=4OkJ0%8!q5#QU z$93Oo(ib; z`GEI)ezwuxe?^RAtuI=SDfaDuC)iYt0f4bQ7>HJ1ieB|*^_ZPP$t14lt=yuqnccS^ zAME^>@!>7u&RZHgZ`ni0*sr}GW?2uxr~g2<|4A9D*_)_4y$MOn#6VUwAnPGpnuR*_ zQ0#XOf3Y{M%Hpc32gh2(v!RD-Zy&k9K5F0NV$rLr9%^6XcXkli=Ftm!OSH{>y}I(u z+C%uEK;P7*@n$NmH-9+;E7nUI#FL54M%xt02 zBJ0mgv2Ro8zFsr4fZ4EQ0Wfz}LIq|e(VuoTck&Jf@xL&R@XhqRnM!V7zaPdj*crzU zW4W_D>W8^3)&+hT%XnuTKjU|bC-^XzJM*}_$KQ9x@iTs>cdGYzdHtQTguhaj9{NpL zfA;@ES^B?Gmex0AiT=)K(!cSU>o=b%(*Hs({v{K-w~hNng~B%z+FPb5UuZ>MJ{+UC zb3Gr%ve>Df4^z3bNaBaN+*zRyV_7Q-%YMf1?dyKV?@Va%9^WboD}2W9?VEiV%$+v- zFq)(C=GESu?WE8!>tUXW?Ec-vWpB3Cf9KysuTs3(##vFoBwxU^@4wXd{wvA;zMp|I zaI*6KUH%{PnEZ#pTYu?;3zLQ1-mkb9SmYPW0n6ij28Lgp>ZX95z#ae7@~A?CmZ0V4?8EP0oHpnzD&@I%$!;Oj-dsR37nmfe@uEM zPe>&Or~jPFc*@^|ozIK$`+L;Z=)TM#uv~Xv zuljEsqWLRvxG(_HZ!!<_Wu;Ivky+*U=dFL7<|kQ5&v4&&{NOVCtmi*$vB_~kKbt%3 z6Zf;(K&uPQ>ixYbzrUN&FU}DSf3yF;`8@H>v6_vDhC91j?(E{58;p1Q@>7psXC`;v zxcO7Y@l%fr;NL{CJ?sZ`a^W8b--Ic_~+m1!&O7i%s*Mt;{AQd zNyQJ6ur!#x-=M#gp=WyEchkQ!fJzJwKY2;{Lx2|EXI;ercw9D$%)B3Kf4;fTALI76 zwf+4&>$@y3?VP9E{0mF3e)X;TuebB{2bWsD_p=Q7d%Y|5C*{f0`GJkQ~KsAcOsHLmA~+c zyrQu3emwHaJpO0fFnj--ZRO3TXGPn+$yXD5lds$!HSj*xXSJMvzlPz5Wc{=4o`1LL z`}>8N%biLVX~>J{{Y|n5PL})ekL7z4d3x~+smJb29=OQ+?}u+@fAq~1|A7*f-rI}) zO$YDC*{sLGeSGj#VkrNc@yh<$z%relN%q&Oq5$GeU%n~LklT+ZL^j%wqfbBUck16{ z?F(vqe{??VBfk&3ANKJx{9zwI!yoqXGyEwp?8Dxt{Ac)6{xke3{~7*bB0pT^Hxv2h zD#I@(^21+#F_E8gf8R{xyIk_cM1EMyFDCL+?wg5xmrIrXw+g!-!@=x5?7oL{MG@iG z`^9_s^M3g$pY6ldr~G@k{*-?YH=pwF;m3#J{9-LX4CfbXS$!DJ4}bamFq~iIKFj~b zTF#&4|FD+BXZgSQ%g^$El_NjA|3O^t?II@M9Ok=C3i_M4e3zAe5SQ<=?Kg+{DZ8$F@WA_bXAgq7 z&rBMi-hOUo|IP}{Vw`E-pTW9Ep8Py^RLuQc6(hQJu>h0T{TRUS@4Q6$W-h@Wn9JS- z@)v_iyw8BGzw?))xu36tY;T)y!m?+-X-imH3fq1df6Pn-Vjg^#^{sJ{<~N0TruS2V zds-W>xj#wF`b}Q;CeYXo2TsVK2xcB)>UftQjY(Jl@?o%=qP|P2^;QU|nhv0wfa~HsvzkcrNi=F&p z33u*}-9IPa8NYC61cmDFSjr7>;{N^v2rh_s4p9();RqrBgS~t6dOKrz`tY7i`)_zc z_0RdrzvDM&F#K74yzIjeei%LSvz#9;Z~^<7e~>SRFWgy1!QJaU?GH6Z<~K(O+MQec zFoW4X-L~_P|FiY|UXSdXGn_yC$SII@a9?nSpFBz@UXY1vOhU1BzwERxkwRw*8iJr ze-uP;%YN{Z{AWFWxK8KIX`&xq^6&e{dwZd?ef)d@7L9br`%t^8Or*Lt(A7}Q3TM~+yBki{(X7cnke;&7- z>M!ot``_UsPydJcNVs2@{X8-+%QFB_dHzRyr1J|O`2=Q-yB|LC#Hlov0Z0R`FoBS* zt(sYLtQGoJG2zQ=6Wfa60`<5w5)Y{p+BrmHE;y0GKyft>PRB+)8)EPnc^Yx>k%wPq zV|PAT?1l7R?P`Wvh|ElVG0=x|f7EoJD=M4jIiMaxRV&Q-*z>*GEVLRPf&>%w@O-{J zk~4|S0#6@r_=HfMjU==W#Ez8g^jusb04uCxjv~efooqaOk21$7_lBZuHwPQX@xgfa z$N8*b*{#XYlS@pI>?zlScaGrwoJ?q_FLbgLltqpRrmW`>7PIVr>8@B8|q> zxI^R2qd+DsCU=!SCdn;8^;uxsOv=iuY$(T$_Rich6y2F~6eXbz`*LY)2Dfg_BoFJo za#5ca#o)BG6gEA3dUi3rif}D5!N{k^X`sv-(mW**R)u z*-nqDVb7kUE!*Xhb}^lqZol!OB&?^?sS5K45;FR2B{k^TxA-w@*ezqn;2NE}~7X*-I+KV{a zNv;q#5>JKuAvFO4UW#qSqJzq{(;ps~wF8faHSNSAlpy~_sxgdsTA zj-P89YZ&I^J#Ud)S7tUoo0GR0Gpu z1RqWV*qBIV*D=*)e__z#=Xr2$rcX<+@K_DntQ1@`;@5O+8_C`Zjk?M8Xxhr6;z?YO zIq(jNNTgWrqK{$=0){NFx`M54$|{fJW_$kA)rAS;E=iHoa}*c#lkAHu=nZt`mrQ+N zcLt^I4$spA)60vV@p!13kH>yT0icCxIcl!Qw_X*qGbbF4f9B*rJXeGIR$RTsLCwqM zWnJ`_Mc}AqkeTe5>3r$9-Z{_5qnrg|rj8LP)X61yI3~#YWFd&BMaByRXY`s!TuD#Z3y~e7^Vk(O2F3UprY`ScmN}t!pf^JvLXX#qoPqN0hSF^eg znSE^dCN1N!KCV~iV%g{;e8$9LQ<~fMOLI(nf4ykd)LiDm%_vLGQ&xLXxW%IsL@+VP#HG^eo}>7^0Q ziN-!JTi*C`34Ab?W_P^V3JBGbuLT|Dwf@ClISyDiw<0E#PQ=lYy@+>HBZl#;e^k$xfT65wry<){}&Zedx(99)Rp^+>feckw|kWb@gi6~Af9oJxd z0x;b3%r>+;$>?0o>WRg@U{@pMf02UXCu?PM^RQ9uv#IrSMBnky9nr#mZPA%A2fT>T z7?Pz+G+?5T<2k1W(upU1GgP@R`_*uU{c-6IvVPZj=Q{D(@Gid?%jyBq zPL%}m;A9s^wS?Ws*rRL#8&y5-B04}5+|A`J!CZXjcHtP!NU9@c2jW@KDr7$^b0S)m z6+k8H<&1T_Z`H%X#B?zR*Vd7bx{*p;)ME9s2rI>MR~mKN(Q|BZj<$RxwRtorn#1rl zlVmWWf>g7V>ui>hvS$!Qf1qw@+#wvDJD;wP{C-fLFdZxNVucZQRu8e!5qEM9E_f3J zNICHbuX1q7aS0E1^ehRE+T(?6YT8pQs~1JgKudXCY?uHcQAvP+T$N~9nD58-j%MIK z<%_@H4~&aa+;9!bc~~)smUZEo9GN>OWY?Jkl#ng8A&Dfione)OQ@qwjJlXHBO8|p-h-h zB;E*`6QiRiIZ&jeL6YiOiHhKE!C0!e%|vh?Pp#tC zbcHSBFDG+d{FSw8e?=HgZLNv6)ln!cv8Ixau63(#AX{d5PC0;-@tYJ|PowRaf{Nw3 z;WjwVgWzvL?Ro(LR@HH@f43*@brO#txC{mMB;^w> zEK$b9n${hG;90mdJjLXpD%bdRG0QDwB(pgr;XFS&;^6>@e0Wgash>B>PQ+7RVVnb`{>*q?vt^R5KPoA`CS+qIYRAK-gHKAnW)IW6#+i`l z%n6e~UJ#J)e-OT?+W~dDi)Km5g?#aAuN6t$dADa&?%t?@3UPCsTS`71kQ-+Y2kf+> zkL-zXkulZ_b-ZzieYFf^ZrE`29BH!#L)!=kysq^YMM8DMGaBYXe`arHIp8mI}=!9wew&;lpL^p`j*(Qi4pEZ z(>9!tW7QJ2oVsE1xL9VCBbFpCV(=24Nt+URe@=;wC&vAsJlQ?OMI&~T*T>gL7hUu57c4XMC$o=d$!i($qQybOO;kkxUd>?#7bl`jxraWcn2eKU(Y6$M= zOwtE2G9$+~wqO!br3-P$sN@0a{4|iOe|F{0FRYw0w5`tE$+siwDiUEtP$?*T?clJ% z)|NMc*^V7&|R-ZXaXl3r8FhNNBvKL%)g!%7CYFm{M+yGluazpjP1s4qE83GL8rf;DcJ{cZYS zo)tbYf=DxlXd_#;m+E`3z3RK9;X2VB=U78u-ece%XmXYwXZNnTuk~=c+%rSS$D{tz z%Pz%U2Z%ldOqCLlALAgF;}?DKCW1n<0&8E;JR%!%dK!fklFe<1bRjJpf2Vjp18~p~ z+@-9Oy`<~>3UHfvAVi&78FJGTY_p7QzSB_?i{2ey6g$I7mPoKX93H``jbtiVH?u&m zmXx1om!yq@eKj>~4NZ=^Dg|Xck6=)0~u6UIdt^iHlMm(xfNBrw?K(5}@6vKH^$^$)9 zNxbRfLR^^gAS93Je{t`Tz~UP~ZI){E;!%LZAxMltS`bEq4&=)vpM7{2Fk$h|qJI$j znY^5*#N^`s%jr4)tmQv>~Hnn8TOX(cyz!Qf~5wOR}xL(fAiVgWL`75)=JU}JdGe=;>RglRzsV^DqO4>YzNcvozd*upm|AZpM&Kb z<{YF6J3kP2)xot{1}>4HQ&3R_&o$s!HLW)yvk^HAt%>9#dnqALlzo>&gYK_bctza+ z$sc2mQ6kgfz|MyXy@*E-4>#7}C`mbzq%N5Bp^7P=f3{!7Ifv9{OvQO?j2UJ#FVCWS z0w*p8ZZ*s&DvV$qkbW{0a($kN5b{Lb-kwGI2p?zk*h?sSlaa`E?}yDr8D3?d7Z6PM zoM(uTHp^F3nXIIz49qb}o%kHnxti9T=OVGRK)2Fj%6IIvp&mPJ^Np)!w7WgTnd1EB zj?#?#e+*ZpeKI5Ci|!dlLQ{H$C<@QzUL*#%+Ptvh44|c;l&fILX`gvG3(B;PZ^e_e zHqa96xJaj0cJqm*IQnyj8zHSKk^Q)K5xiM7>W6J?fmvhF1|_J;?ae$~t4JUbPr);P z9T~d6>&GgvXh)@F+EAi}tt(jwLAyo|n;3vWe_>o-jOgS%k6k$;{;HyN>YwGO(%%#z zWS*E;(hh6Rcg{UB66$;N*roT;PQY=!>6JX|BBY~)*|e88FA^F^^|5DYsm48fGjdD7 z!be~`nLBmy8Edj@~LLk^0!t=VGfk`JTdy&Ep}Lg3!MbS8|{)2d6Xp ze{gIgPT0%s>Qt5_tOs${WafC@NU7g|M+IETuMxYD>J=T zGQ;##3`LQsw~p5D6m6SXuvB8Hg)7@)Y+vilL$W#LAv!nKH7~4dEoZr#ExDs@Tb63u zmP6N_93_8r)KtCFlZFvfzA@-P6`JP@us~m(ggFDP^1A~ z-q?xr??%aLsb&(D*$m~OR)Hh*Y64yV2??VMj~m}7&s!Nrc5n}clI#L;80}Ddf268- zpkjMw6FcNCci~!v+u^|lDsiZaRmWpu4nv4ox0G&QS@6I9dkXoM{J5^O`@sJhg{)$~ zQpjVdr_DEojJ~PDe7_*4m}7i&!fy(h)3d*#khT6VDdd~`_Y`uwKj3HJ&yh1z>YGAV zsK2C;*V8W)vY=*i{7nF4K%2iIGxYn-cYpmhNpDy#|E7?Y5`3%1uniKf#+z>nd6<8t zkms4mzbRzSFn^_xujwoNrjY&mogBMYg@2luV zW}Uy|} zNz9kW_Ati7-7~QH&~le*n#kKG$$#ThwS3HXtx8UUCvQ4yWG}!R`*Mv}Ka>~u74|mf z1kY1_F&#$DF3BahXo-EIGe&sj19nv?go5peJo{RFy^I6Y($TRw_JmP1TU~(bWVX{X zRfDsIc=2#e;U4=|nbMC3i?Bh7t3@akzWd@5IoB}1C~F`%N#1oG24F8f5Pzu$OD1x` z6r7^rx!x$&WJMeEd3!bUDq2v3@4Zt_^QWFfZky9lCn`ZwL5v5Xke`QjZTl)%pDo)d zRZ-wWIPCY{2Q(%|U69;{J(Ll5gy7t`G_M!foLU@smMr}+QS(Y7k6M%cLv7Dy2hb~C zhxp~WYHn#AT8a+ACds#r2!EeTE;1rfzuaCf&ckS((X*uXp6pNPi&O7c@4mK`WTAN% zI*dAao+K&vp52f~n=6}QQ;B*z1g2e?etJFGS%==3`5K~`<-&aE37tZlO80Z>cCH5k zFlR~=N{B?yD146KWbu@BE=>1kyfW;r`YjI7Goz9?e|YhVzp+4VR(}>A`)heVPJ<3r zUAwjeb%iHvc-?#=EHzJ4=F_c>D+PD2(P^lhq(q~Dc!ULl<73?G8v~nLGMw6HI-Be= ziVgW9`kMD!k~|$!;puh;bLX!Zkpir+IGecLyV_Ea{+u<BWw)8*xeXMgVH5=3xKUgZ;LrGJ-^Yv++A*I_LPhVYc7%i^9)o4JlRp6$a+ zE{sZ~MqPKllx|A}AUpvjA#T`=s4886!| z-uJ;|sf3!%v=kuMd@Ct|IWda0K@o6t!WtRE|1wE;>n-m$EWEFKn;-&MI>6!$% za~-A^!)feVUVlRPJU3`%UBxtDyP`b_FMmYG!h>dvJW?D>Y%os7qO%nO)Ad4ZC+!d+ z$U{lugTbfk1u3Tm(I3=mvq^ag9sNZl)6i&FLl+nEl|LJ%sjj;DtOIM9Pxr|vbp`ek zogZFttp?ihaXAK38>vg!YdUHi+zJe4tXw%SK)hna5mY*vx?PN;k~Ww{;v* zNc?(|?jm*E^d@d0Iq6nLugAn_BPDb%uAFHdadKovb%kJ@*5XN*ksCQipk)UN5o}lN zb?o8*pnowZSms(!Ul~~(FAwi#)b-hp854pLVJ?S@$s2<$7B9Fc-YOjk;EDmO*2JJo z#qh$8;~Y@&Hs+JWL?;jiD9J0Dq+wQ9x;a{dWZ&RW;u^*Iescd+$1^ zfg>3-=?IO_olj2x+#C(HK8N;(7Ul5zk~VRuw(Hc|vp?9|ARoLJv&P`9q^2rF(}Z>FPMqTH!egW=RbKeC7`CH9FI` z{GtlCcZ0h#Cu}OuT5axU^}(sf{K~rT``o8c7gSkp>qlw6+rzce?}y&JABSY!V3R*Z zMM;%5Urw{*_fUk*1q)Y2I)?`%$~*OSXn*1dC{PuTxw^);gE_uxgdXaKyDKYH?ce8D zYS>b6?tDK^?e>lZeCApqKX8txMx7Q2NxGpFmT!AAXz6=RV?dGk@iO&7GxZdz#Fzn;HV8eWWbY3S-t*LOOYk!iK z1+N3E@POCXg<@l#H)o$sAKl9>=Bz!(EJdH189b%c$x8}6fQWjH=ff4vs}{@PW!L+B z3}%|iz)l@mu)Pg?yt{#f ztM2}zjaomeOOWb-(@q+-^6mBs%M((>$EVS+?Jm1s{7UD)y;*ZC-cBb16pbsOOCtAI!k%!mT%e%w-#Y zWibY~D^G?Z(A)5oVqkE94^QP?tPN&MnRgxR&34{*`K%}x@5&eZSzbwp?SCyzobUzi z^+h)7$lGV5402U&%NHY1ohdw_<=2{8%mHS)+E}~mv+riYzD`Tr?~U6&xf;@He&p+( zuvoBMkL(`I{rb>J>NRM+&9R8f*Y(u9yOq&Wt?1ad{!*O+y5+YLr08V*+8s9!C0qqP zt*zS)ruJ`c+^n8;{La(kxqmx067^!I{JNJ!cW=!on$EZD2yLD1Pj(Uq`L#`|^Q9=< z-Sp@mMIx1De=)4>ZM8k^tVf8%a}caTJ+iBCINa_U%cmVQ9XSy0#F4qSV*-FN@CLgo_kFD;pr&Z6>rDz)n7 z?gk|SSU!lCZIImCTceeEH7CYe6=#Z{H+7JuQ`TOeU8~OZ^SO!kUgn;5X#?q&=BWDc zK=Iz%KOFU8syps@BY$?0^Oy>TyM}YO9|qbMM5e1Q!nF0WU^X9(Crj(c&TaQh(VRNv z`3mcCXM3gwc&2ukL(2F9k{RokvP z?dSj|4TexJ-T0!=p2`RXKY8!<&MnZO2dUA%$ zRxc46xNh+7Zhz>%(Y_elOC-I6SJ?D4=1P}JJAX3E0Wo$bty+GS&xc$*O%_WDx9u>Ft+G`u)i|ypji&lmXgev| zuFMB`Q}VNt72AsK?!qVfHhEob3dApbT-Se>K#D6dgGQg=urOEG{!5jvH+CY8y zYv3A-yl||rpu_wbm=z2%DFqjngFs0^gSqXXyI^v80~-QZyL_IRFK}cL%J3eF9ZG?% zK&3)YV6UO>A_{vp46x8<=K_|A7!OejJ4Rd$i3O)d^0TV|54yw*>HGVhCb{0{$MfTP z9$5B>!GDPaocX7JD@8g;+2sZM;2Ue@=Y?9xY^t+JB2c){0(AObWjm=+I)6v|*GY7@ z8;6&JiE}!A12!9T8wT2G>$E7SIa59`)Pvuqce>S4#cgp$?->7mY6kYvE0DgeAG2r; zM@5e(8@%ALD-My{J|g#)IMmTTYDm*(IoVzOOMjTNP=5=pg6-F9`{qj5jUJ|ZX@$-C zwR@Znw9sC*o49#rVylz;2pB5I-nmI1|62LVw19QN|Jel3tJ(b%(jig@}qIwaDV0QaKKz1e=l$wB)T z*?+1ae@m5l{jvfPBFvA&Ky}|xvsoYlE~tY{$V9soxhVG$tb{Ok0XO+FS1FCY!qdnb zuHFxWS_kyFt(Dq)DE97lNuKfgLahRR43d(fhC#=I8)4yW+3GXM)P}i<3Gc;gW;lU? zA%SriirP{)0vAMNX+<6Glys&d%03@ghkxyBJ#L>b*4>_=O)KI79@KRCYTA{3Ju#sk zo}JfU_8WRIrRcfycCPzqxNtJNbTA?Y4)l+EF02Zg_uA4-t6b z>i*uNBs^CTsiXZ)vjo*nL1eUv482W zHjKQg0l5{$cN@uPr!niS$xmFF&DBlwt6eRmn0icaPxjj`8gkyCEAJ@0-0s-UW_@%R zp0Dy?AJs;_U+ec9`?hpHBX7|Ko3ZZ27jtP;k2W`c>F|*fbREU>%{rUui^i4uW?g!x zSIXCYc!A?7%1%@~beemf-_F>S8-G9F`zM3qQhpn(!u~YZaR)ZLzVe^OTM|ugYe0bR zwPSfkwSOMKZoI9gaYsL^6@vW;`GVO6*9yde!M+-yUGGttXDD-T9TUS>b16qR9`lfS zZ+7{M9ryhq-peVo*956tsZ0Un%}oS(PNOp>->|4p7obbU@NBY5KFZa2ynl9Y>GZl4 z@2&j+Beu<3uI}xiZj>!8#U-$sunk}5*?w)H2WWVtSl);q^we7NqgxZaFVr z8$bir+wj;u?#SM=E~Gk_uZx4cRl=I;AL^P`R29d;^^nP9)2o?t2=gIvb>1s;wady3 zoHFS=H1IMs8+uboo@iy>=6`P7TlEA*PrbuU#h9!=DA!84X8Bd)jOM~tzEUP=NbI%D zs!i``(Ilw)8LB2f-Bj1G1RUv4m|D9~D63QQDD2`DG&Z`rboJV8&|!0j!&yNskPnab zb^uzG^QOK(NSCMapf-`KS*netqCtL`xT=KYD?qdzXS>sL4)>{GG=K5n>WLZJsl-jK zaeS}p<64irXQl1aeNnaOVH~y?p9ohujJixjctR<-yXOl6s`ud zhct6I`HIF}>u&DWx}187fl(28(#q-8YkYAl4Na0-y-Qmaf_+|#)i!FLRZt}TDlH$< z@;Ls%G&w?XgVXl7qkl)>G{u;#H?FAGcy6AK9j^>3PIX9oZdnTFc(A&s?v+*=MRAr8S!z?v`%W zesS=NV{;zPY=5k7<=52@UGds!OlC&6qkY2FW#-$RTUx!wce~4ud5ZBvI-E35UYjk> zxImra^yXQ^HuMaUfh?9bx!ljjT{K)(kEiU(5jt!ynC;r>H&*cCi|UY=jEqG;C?7K$ ztwZdHwkii5k${tV!wSQ{7Zg>5=^5`C2FwZ=T^-Q>OwrQPklOn-*f;ZzyE zqWX6Dynj|1bBr?I-?!G`lB!$#7^7kz9XHcfkq^gxlWUh~oyw;zZ{pW}eMM^};yJC} z=KN~?uALXpyFHY{W}_ai{9e7j%-8v>?r%(RUHiyu^X$Cy2D}mr$us(ssH6Q&IY)KI zhtr)4*z%F0-EBq*qZzbrW~(OWyk^qwuT|JRMSpvC&$!)b_US^YW?l1wkc&Hoex9DA zHU>CO%}3RutY4Qqdqw*P+;6U)Fh4dm13k;tH?P!92i|74EI41-{(9aU6i(deR&vJ& z<;D3)zHMh!kuJJXYpQUlp1!Vz_(VbZ%?c)+G)bFSj`%ukSt-WS{dMx7TQ8C)*IY3#_N%-E*(a z%~qn|y{_ZX3;Jh{4we=kpQXOqIyFAfqJNRx6^h4Jd#iV$I9untj^m5ng+ z0?PFb^wYVt`r;ODi>{oHt8BNTFgvU+`oOFjd`W5f^nMEPaXv)5>e+Jd`AC_ReSf#D z75}_M(OIbW$HQfBo=%7Ec{l?5906BgHuY)PcETVRc_B|md0t_0e9#FG2YteBEMAI> zQOZMvghtD)y)x)Lm=imi5?&unexd_!Y!09Rv?*CAOPf}+G8(5tB7rpkEeGxEQ{UTS zcD4qcmzkcowzBL185yNEMAvmj8GoyN7I|BFH2G;0`vM<`!kB;?9G)c5IGMlVWzPQz za7ZZ4D)XK(LawE+7@pdVS3Tbnz@XR*`PNw9&Dd01j?*UbVW0W-h5J|j^0r<}HBgTm z!~(Bm@`^d$MOgbf6?wt1eou;*T%h)@?Xogi_PCxUW52mm@;NBo|*dWD`A>PcLt;^eH(MEOs3>e%9C!d>nFA0u9s;NXUf?K zRJp76Ua$^&*Wfx{pB-k}Z+{JCudV8bu*MVJ7e(8=>eaQ*J!f`a3FV{u!ZPXWS+k64 z^ zwN+2spixivBQ*rZ>sYVeITPl29XFxYkj^s9ACpBF-y6#%=`S94vBeJd+W75 z^y7gwOQjpH?`=CDSfhIRyGIKKyAT1-8DL2~Q zWz4L33gr=p4kwg{|JzhTTswCL0I0BkPzlQuaS{Z2Q4h4gQVDXI{hdmHCjJL1p}G7) zB|I4qQo|3GKwbYrB|yRbgGyMQc$4k>e^3e4KEUKN3drB{`a&i65)6rJOd4XISP#)4TL=f{OBCj0BQmS06JdsU zEt7h1p!wGzy`)@+FbkaB`yf$Gd)%bkfNI*+xC%%u*p0MpPg-|cE-wVuAy8+!Q)GqW05G?M&q35`vELrMc-#LK?PlYCb)a5uPsL4bDP zK!S@Rh_V55SUa?2zuHVg!+?&mE&+@oH_AE${eKY-XbB|@(=G*0MUGqg6{_2DXM|m7 zoDTYahzvWm6dQLplD|52DR_`_TE5Z@zsv>+k8Xiaxcyq07~bGrdI`h-p${Cye?#(@ z$yOf|PavVivtXeRkOe*@!Z!rFFa{Qa-%#{<;F0agmcm*kGC+nnFl3HTBW^i{;Q{?o zmw&+^Ly#p{t-D+sgB)f^(FHw}vH$|yM>fqKRG6hc2^xwYs#Y=zNV^-%s$+y62@gaQ zga}LdXk=W&K}=Yb#DrP)%n+SOIf4}y4W37jxMD*QcOtS7g0PPa{wQDz>h;A=$&Spq>OLzo$I zkTHaY;J^@szH`w8z}hg=MSZ%&+<+kGC7v?7Z{5-Ff|n>HY=2a-638M7fkYMZb$?b^ zIO0#cnR<_NNYgMUDl0oK8L98{Q{QgC4)hiMaJ+2?FgV-l=n2bQ56 zO-n!g)YYt)$wqM)UY`@vH@#2YU_vDPl}yN76D)O>)W$N#CKY(-yqM)m#P33x&I4n< zBj99+8!`!GvPAZcA9gPF=3Y)2&|2kbzHP`ppX|odL584{;UIHxkVTNOrhf;75X>1A zbN0o$R&7;pa{L{Aeg4`_TGhJH7!O~sYzH;g&FgCL7 zXRBXj1rYPMvN|1xwqiC0w-fAjRoX(fH4|qw0jzN#;XsyfFq2RG4MQL646vG1%Z4)? zF9pDuBWw>3X^|VM&>aJn16}=JrAG^O0N<;T;VD4I^Mwbp(I{`fZGXun^v-gw;}X*M zBxO#vLN43jayLe?;UT6aZjg@jpz{JAkg4iNFg|n#3r%zyj*w@OkjWzlIu#Bn9Rpnf z8Ho|n@UL?vijVpX(oIDzJA)uk#{K&;?h8xYG(z-P2=Z9ysD68phS%0kZTs`&<9v_? zVI)s97W!d!b7bol#D79(Ec4e{Im_y4n`RonVtbl zwqe$4!@%;SJ*u=$#&urs0L-%lWf+NIz$VpGM|5JuJBP9lqkkjY`ieS(F@qaiEypEO z)CNR|3o<~cEsc1_h)>o4F}&r_90TF5ovseWH<=jxc~@Ym%~#>##c*H zua-0c9!iNuwn(=a>iQvJma*qUbbZ>_ml$%4(xpMda3-I8+Z4aLW{C_eL;{yfGF(}Y zaj-t+{(ostlv%Enp^^a(5YKh=eoz!}cC4e_kb0-6bH~-0&HeNT*>ZD)5e2;WYL%`J zaJaauh0Q?lMEXIfbAGna5IN~*z&X0$)*c_^GGbZZK-S0q*}nvgC=VcsIu8=0Zict? zZ~k~4gqYO#(!ZTg{{~*~LT=}-PL;TXM#N;^Du2Y_)+{0CWJ0iDBzZuIs3`alA*VG9 zybfimGG}|)QS?C=By6)$9zsCLL zT41SzL6ae=7kW)twS~DtTtkCQCsdPua}>m5lmGOC02$Hc2C$w~z*SBAJdyDLV8rv6 zLx0c3FRV?0ME3vrUUwkPPys&YrQbBrmjlXv=B3GZ4z5dTrkBe(sdQ;&Sk6WF6VTVj zj6Vce>C(m^3ZMdy@BV24f_In|oaGQ=itK-;h*lPIIO7Q|g|v*2NV@N5OL{(h+U{mm zDfjcy#Q+PgY*Y!jKRTYwX%WvG?UM&cDt~arfkBr)=VV#I?2h8iwp@4-QtlGX%_IcX z!ti|wv(Ntj^rt`hvCpw$&4h)n-Oik#P`=DNaO?PCU#)x4wlJuFar? zW%7;)0&i)}BQ)IB0fHHFNc+uB)CQy(G(O|$U(1SZXMFwEJIt{H)McFuU4JHX zR-^HR%K~`W4`b=y%_%XDp(mu)G<@A!th1lv*62Tu+eSa-?+@IZ+46Tj z`5oVv{WyQd_ap(MD=qX^mWcn<&VT-Mo=?s{AaFU%k-A45usDXC%Xs0|z~QI7szAh* zyrknk!(t+GvLYB{{*P_xb55G0eKxOFL8X99=Q)tcV9+_vJ(=k+Kim1It-T*yxTZW( z5FW>e5>R`L0zkeQp|)&_C5D7jQ^;+K-?Z2dCnhcT9DW@~u>JIFyk4#wvwy$z73t*e zC2j=Hr@!xR{6PAP{5^+muLpd4N3x)hdku(OxAA@x0>nuHBXVVLJCT|N&q@J=_! zcf#?fE?c%WoK*HL^HZvSwnfo7Wy>~YOGg>&766EQzo`kl0xrQzQ$TW&SZJZR5)1Tf z1P7#Yc#!#wfqI4}9#hBra{f|R6eS}AC>QWyB{#-|d;ugm!a<=7>I#AZdINi+IwXl% z!Z~(3dLm{3Lsn?vXEnu^aF~D$@U|%eKKz) z<*d&?eIfCdKJdr&j4~y(0U7~x^t%2Y@4#r>la< zVy@rUuNJbFghc)IM%MSI{NZTH$>HbU&zol`g8~W7dLOtE7zx;8p#T>aZpmEx#&S8( zQZQm(!TK7|F7IJe1bc?x4%mi#gQCG=pEP%ZWwa?^*^E4coFbD7&9PaX9f6- zvj$|q69Y^@=rosCG7f_&AVGOaekn+)32xB$O*b;%E7C8$MgdH7DVg`(S;%9BB7YwZ zs05o1%725=0$b;!0i@*fgpC$JhN}lefky{VgG%UDt|nFR=pY=H`|SLc+~(O<;F%da z5d70;?g=>n;z6&0Jn%{e?~nuBJHr`h892PD1|%AF%p{IlW#sb)ggkZpe1lJ1zq>0C z8YxH%i&|w;oc%Ker;~ufQ}%zKf-?p87X{Y`h=2KCQgF-H2Dv5l?Bd$~Hxyji+U8bS z-7|0%T=K+bX)IhuLW{;g>OhLQ40Hjv1v)3^P04X)EFY5l-UNX0c#5K^k~+@Rnp$<_ zjkxlC>?k)q>TlZErs;+2$HP|A0$hMj>i?%y+#mlm05n1d2wq@$RVJes1HjI*4?uL+ zkAI-!aollv7>Geu+`NF02|-MJ#0&%*y$lE$76ahNB)q+W{^YZOJuSS176VS>^jw}$ z-vPaoMSBJ#_5f1&4jQ~H!VI^F6Oh4mw#yS92d^zg)_^XPW!fO*8giv#0-4MaN~z<7 zo5YC=Dc5C;2V6OkHNauQq~tn8G}(tA27j%dgbZXaG$>$$lsB2{?HAoE94pZ{g)F{7ebB?7Xr?xK^%}!25nnE*s3^nEKQ;nvmlA3T{?5pv8YWl7j1 zXb%}7?jYyogg1k?CqjvV${#{e zKw1p2wrrmeI>!$YCuCU%*b?9B0U<+O+9Agci2Jl3WDmbNu!$qgQ~6=o1U}$6m5?RK zHaL0O45%12!c0NYLcVPeynpYOS3=cIqvm?K=B~eKR+p1>wE3h9geiau?|H?Da)s}J zkmD9u>bhPyy@iXzETP^;v=DLt3QK#?Z>~`MP;f|bRkbKMm2fVIYjT(1N%pi%(jt0o#N?poTSa>QS|4#}{AY{acf;$t^;X}O<-a`IR zafFEYP;e2U&_3yev~WLE95;!Xo>mPt(YKD7yAPI5_c74(|MmgB$+J z!NI?9aN`dLr`dlc=zo+y3A(fVL(paa!Iu4()E+6D-uiNTh){dnD1fZ(_Wly0-aEfge2tk4rt({9{#8KaVPWs=eYT$ zt$*_i!hdxfWX$`wW5r6giK|v^tppLBYbLUiW{J zVvzfj2}3$S>cUQiLGvQ4*-385_6~pl8}~Q+r2$c<)!*dY#F4Lq1j~8VD`pbyT!yb1 za)BV;Zrta*`5@=!@`psTYVQ9rK}F~&GUo_Nt0r`zIe&1KA%2M52mjd*fwW(d%oqQe zLR?dD|vkw*h68N2_3(&{^&`AxL7A4X4cKXugs==vO!oj9G88TV(}!qmm&ob{78 zxDgW1j(`7ycAma%%m1(Y`EPPiU&h(f^w<5y(=QH>P@7pGM*mEs{%V5|ka7Qiz_Y=> z=le(Ujr+Itk@Lht3cArC&_>|qJ@&mUI|-~~C{swM_d(q>=3tcC;y-Mn#|Qp0heUXT;^i~p!KZ)jKf%&A!{z*P;&euwP$r|G zvRQ>>e^Gse`usb`SCRhW{o*&tJlk?zH0=kYX!;H{ z|9?$7na2PqKyxrA@_Fbwz9M{_v5cFQTQ1{t)0BZz{MtXKm=^Z2Lo{iRrw^SLkY)U# z)0SoHL#Hjv_J>YemYq-jge<$C{L8ZU$-gZ7pZv@6`Ae;RXt*!6_95SXsI`TBi@wy_ zC*PM^`{etf)|PyiFSYg|IEW^(->beYle=n2e@9*^`zr8SR zc**}+c9;C0;hWk)4%A1^0L23xg{_CA>#h%GimX| z#4W@f^^1vHcsB4u$}K!wX8$m83+2ZBV&ay(`VT3$j}M$Sm&y0aKy{+b|=dCSX6nOx&}H%>hO zIV8NS0GU7Ljy)jvs`T9UH3UJh3p?=ZoHzVXae(_96*mIVd?~qVxdZkegxqequZ`?c z0Sf~+;2$1Nv`}Yf3nljg87PF}o7W1(1G;}Wxke_hP(Z}V!P@%G%6Wv1tA9rTnCA;f zR2~CV#)t#Ro}rILfUJLEYAQlAk^7K{Blq{zpZk-`X9!>bn&0Uh`9{U-cREi9qw#k- z0$c$3-{}HCGSq*ki~o^sfI5Je-=9suV}6rCej$zk+M{vs#S>!f|E#`0^|&0z{C_3fUSDFYTs|+< znof?ZTgbNWXD$1z{{%iX+l5eFB2CJ1p}(SqzRUhbvi-krC;zlx9Vy3rArAkZ|2GN7 zGlZ<`$aDM4QlEreBkf=Qmd{dun_ooRr(JzXKWHwL9sUq)|K2`Ge*r2PK3BR^<1WrGD^-cq5+|%FX@IkEjC9vfoSj-@YW| zU+KHQt@l&!|4JTy`u|@Dyua@sq2v%D_woRdH){Z1abf=05^~ukWRiQ!Dr}1 zTu+3N4J;iB0C~U)pfI2SSQQjA6a#C35=K`*9=!sT16zjjfGS|SUvdsY{X@$Q`V1!S zY&~Mpx&@nKAAe-O$T{X0ImcJO$vOO!4!NIn)BQ!(?eOrEenY?0-Cya}&-!UJ{G?;& zhqN=G{yTmCosNH{8~9iH=68DXEBzOlN26b49(I4PAOBjv!2eGFwSLYR{*lyM(k%xJ zpK`kiIuFJr-E~LfOZiz2TI=|m?zq$Mbp2Pl0lv?(Q-6vAfc;oB~Xa2QLn00Lj2tRB+`LFhM6PY}`K21(G%Dl8Xy`dwKg8(q@G{SryR$ZjY?&vU$1%^!SD{Wpx|2qu zWcr+;-d%i8-*rUOS3$9ITFUcW7bDGla@vXCK7SAA>ydhbNKl+^qu$H(aTEu@_MDi@ zndtFqFC$+pC{~Mx!gkV8a3X3nEwN>{tJV5(e&tOj*nwgxm-E*V6C^}7>LwuN4}(!&eP#L@G)i=4e#__tf0y>?gK-2&8gC}iP{0WHI)tQL+^ zEwb}?S2xKa94~i%b-CSLHu*Od=CO%b*0accJ<6oAXV=T4jU+=hYbWb*cL zzkb8B^}LwYaD1h8WNj|n^D$nbv=&D>!P2?mE|Q8uKJkID-*4AopA93wQr+b39S06w zf+;UgD>d(|D3dy#-&zmQnb}2_y_dfA!HpOk%)x${65Wo`3LNFedLD=O>VM?j`4i~x zy^N$gSXZ}FHj-1cE>Y8@tE$@^aAv}mL`{WuaF91uq$@5LBtg7>shTCKZ zWb4Lj$BAR(l(#)pOsW<6s54d#oC9tBkd-bSiXc9#7tEg5@a~G-?#^0|>Xa*Z{cwb$ zDI(hmRnNbkEgDaed)x2AkbeW#T0^E*@^~%=*d^Uj3`H->}p7e0g#>UEFk1HFlC=W>fRX z{cU$}M2QfP2hig+af*&HIpt5mb&<@Vfp>gjEyD7k}U;l=ouzY0)-24Nv|>^W|civ zZybxQX2jm{fW3TDwvr8UyO87lfNpYp1!}R6ae3wS&9E_bp}vS|q`%a=CGu0y!#$^A9cUehq*u*GI@Jlgyy)$-|T>JRFnp8Qpr>FvBVIsS>WQ>5N} z(}&*u$okO59(@r(%el(Lxq(^)ri&WIMfvriSk^;<2iwjIwR&d;n>5uCJm~Hq$2EEY zXU&9H`(cQ@E`R8&y1m(<8H9KG7}PaAsQq4~=f>Qej2N)m0bTb80L{&e_{VD)HgVSh z_aMpirR zG)Q&rF3s(Fp4z81GO4Oo>e{7QT^3mbwi74t)9U5nRfKevTj@7^v&XZj3s3)v*|Ib#{uDkA#cS#On{r$qGn%G`jGPd8l6<**CmsFP#l=d(LbRdIbLMbzPnF>VpZ zXXocz&VR~$xM|+i*6BifH;$}V!agmg&88i=EMEKb%Q`+1|A4z&Ij@7YE{vuc46#cq zjjt)P-rd?4SG*;(Gpn9C?;`_G@aP<0lf5dUAt{Yu2zSn2)14a|S7nB?E-UPt-6+%D zt9BCY41-&`ZO9bgAz4S&L<%J6M%vlND26WQv)O)mpyi0j8) zet|4{XzFd=@UAaKF*`nPGk003r=zSIaHC24$UX8SR;~VVki$+JhBOAg(QP(ReO4a# zKIps1d#$5G>_poQSmWM!TF0W8n8OsFE6<#@M$qfEwCe`Ncey?{d?^(q5URDsi0r)( zgMT&9`1d4O))X7j9VlKFoOXq;@4GYmhHa(aD>Ry0N9fqFc9Kc$lQIRGTpF!~tH(9i zdGT!HBQk>rcYTWov7PHp^1je-n^xJg1ojQs-;QG5!l3E$y?d>=fLG1+#W_XEnAB0Y zcA#$iT8Y@Dhb4XrVO8ay^?)eVlI z<6~$+tWrh#V4st`DGyd+4DLWyaU3%W)DKL z+niioXmcDpcD4)8%5^xW_rrm?noU%)T(TY4R=ZZuYwBI*l=SL?-J@x9{#b7nIe@YN zPx}{syBWRSN;+_L8bsXcZsZOy6MxigPf?mN9ba^ELMHaAeOCeJg!Q?f55=RBW!lUu z2Aa+ha7MX1B*)pjnQMj`m0lZ-2dCNx#dnL$u5v|Dz#&JEfO^tXH6r({1$TwNw3xoR z4;i7%5ko;%gmkik2NT4tF1xgiHdW+C8AHPLeew}U{g?P!JF zCl;$AH*zNuZh{@WPiTL;r1?{Us{eS>Y7vGLbPJPqgBKKzYIG>4z`jNP!MJr3gM3CD zb99%Z{drU@{=5#fiM51KQ-7YL8S!_%Nk?9AIgIx$t-FG}TU}Y@sL-1psd?RTab(h% zm-#Nu9J^+W^H|;uYenN+wGC6RMfEkr_2xOQdQMYymD+iX{&q_J(1!@7uCup&e#-|m znCRwEG2-fS+J&0NKjK&0!Q!ExgD#&>>t{Lo8N=_dE?|*xpl$AL9e?GO%lU)C%2%fE zPnTMChIirHRzq&MC69Ooh{-p_}RcY|9L*P7o*=)BXOwpaTR zBLe?&O@|RANu^E?%&=$k>I^G;jz%ETwsA7JGD2o=aGalU34d;#Nf9f3G*&|NwpR{5 zqWQh|+%A%usvdXwseHV`VUC;UbPeZ#PA`7uG<;A!`TaET~$E{yq+*P zYUOac?AGcY@6Rt0_Vr00^^qAm#Ky%GJl!^PJ3PJ?s=K)FNx;8%%&2% zOY{E`b{{*>LT!RYFNgs-4Z-A`(~O*R=IUGTSN)e-U4xof$OEu!OX&DK-fI|mwbc(& zP0*iCHPwS?HuI=-r$u!dI{UW{F!*RbRc@gl^C8|xy<+#0A0s4UclGTuEGXF)xdlW6 zE2Aj_$A9KD4t*e5D*6pko;^~XJo&LH z+m-R&uOoltuqjcHp?di(&h*)5qxFSfPVz>rYL5o=nK{_SzMIdqxUSQ1vNkR$vtTCz zo0f}0q%T{L9CMAhV4lByZJ5djy0w9Q?nep=r0&i)JKH_4kZGj$uT^JXEiYjJTHDsz z*qQ-9;mp#r&;_9B=zhVx$r>oWw!(J#h*LQn0Z4zQupy8$k+!b($lKl)o8ND_CR2W) zojv#cFK)%d|aJ3wqNv}nt8*B*dgO&K3)S} zI_#K&ts?YM%n4{5fnpAhJ>e3ZfN5|QKjTO@JG*x6$L304 zP#9=AX{AP1f8yK1Cmq9eaRQ)y+0Yzk!u-4DOb>dF4AtJ%C)f%ZGLJkLLhC!S5s6D( zUwUFMM3Xt$eTYU63VC|415xtcC#3Ul=r@0zNpo;Q?y{YO{EzRo{Ed6g4?=a%9 z64`hQn>ZNiHPSi!I&623&q4B%AbuX$;w;h21|NS8Psb#DDF`;(ck0_xfjkH|bOh~!+JtUc23vmm=h zRXBhW@AH-toJw3$)Pk^9$cL+Cs)LAL{dABO;|Kl9aL7^>reasxv?fr+O)g~(i7)b{ ziqm*cxj<4Lj^IL50{uB}XAm_Dv|E4U=GERk6wYk)?y7%LzMmBZy0SH+j329!`G)g; z9$Xv0d|I17%@8#>?rwJ9vZ=e?vS2UBa})7qU-B^;&F+XrP_a$aM@^BY zui&*tbaC8EaE0GNNWQnMOTe;@n}cRfIHXIiYtQZ=ROa74^>=KkE|muWQ5}D|?s(XB zaelS;uefR{;fywWLgpGdm40KbQuhne9G^Q73uAn!dG~dLp*VFz9=qtLJ_0iTLo%nM z)^fj($w#It&2Y*m$RN8zVcKTp5l<*9ILRf3zLn$a z3R=!OuL%CaA}PY{wr0F9Up|O}pKe~ZeabIwi#}R;xFQV1kr(T+O7efN*4t_0Ks!zl8{2e|(ijU7e=SMt) z`liV%f_=6By9mg z6<3r|;5j8L3K%PJ&!vB=?CpUg{}qm9$n{X|fYb0CyyUzZtAaYc%k+r0GQtg`Ocb%0 z(AzeH?BB4%M|xAnx6%{d%xc>jH&f9bvGM*8y-F0`22C{ZFJ7e>@`0jpcXvM%YoA!A zn~*=zB~|gK*i4+a4t%`A0^iGlr zADe=2PK0iT)Sk}To5X1p9X^C;47sd($uqxce>=)CjTPkhC;)@e$OBkR6Q*JF8P02ghn{}LVim0i%(PvAy%|n_~PMNA5w@LRm154oYGTK;BK5`&-E)x zFzayR$K5mDI~Vi{ZPaB2TT+MhJboNl`6*a1Kd?SY3rku?Z9S+tRl4ZbR6ibF&g4tjY+We-lhl@Q;Nh!JhXq!4vv^Id0cgO3NSYk=DBsS z$3+znCJBSIx2!Kh{yp|h0rIK1Q@ZBmeH5SGZEGn@?rv$}#wk)gNp_|L;qMA39gd-T zOAGMJgOgLZsqsjJV|5|h$ekgnUIP{s84Pe=i@^^Am^w2hOAX)(b%S|*jr?WAwcybE zF7w{4+5vwbV9I%X+_hHkr$0YHVn-JO>|r97ljmR382z!OSl%~)a*)HBCG zBcP5~{m#&keh^PnLkZe%M4nh@W}LArSovW|aDBC@W0GdAW&`!th9z~|9T%n~PGHuV zztX>1sJPB;;ZIn9{g8WftJ^2_kQCV>Gl8zyn3jL8S9$?oD(A9jXSLzGh8Sx~GjtC) zqWhPy)O`vSMw5Q?N~G>P=%Rjx24Mr{$HQmbU`@|_b7c(YWph=oy+;~3S9XQYPT?}+ zaaUg-$U)lhUCNZxp30xP05Y*&0VILiY(zgUp{&BLv!K%}*>b^-KakCbe=!{Jtg!Z} z2n>Iko&AuMA?|BHrgwc%-Y0o-A}cpV2|YsddQB%iET}%7#ME+-*(hxiXEk^wl_{7q zC4Lt%7S8VG^p}Os3Qwa8FNWmImdjaQRy{Iv+yae+2}hvlzgaXsI)@g`ro;NFknwvl zY$(PSRK-ZFA=g@+8%%#G&E#V*#C22rqShLhq38Ua`wn}NaAy5nsfExu%A1$%$G z;E|a2rWVZb*%{+UyKXWQ;(DW%ZQ$WRkT%7! zm)q`4X6qXnmh)vejq!X_H!i}Wl?wb)`g)w3#bE31t161H6@rderlWM?tbSHN?7z)% zF2Cl_>?c0`?X-wXMC>#O$XBM3l!PP9d@{7_( zggX~4y%}V~4V{YKLim&U6{2DiOKB0ng4cZ04(?y9uu z!-@fF9ocf=Uw0$c$8gk#0G(le>HG#ub-NWbF;Mw#a8LHnnrDc)6PO!s1N?uU9lc1; zSnHI`|9xS+8yB{;rLqXq(z4!bvE9BIp58R=5g|to23JZPTCrkz51WWkyH0v3=rg9b zNr_*Dy@TYiwkKL`0#XZ@#Zce+&n7{D@5av6eB7jJOR1P-w77H^TsOQ51v!RNfFB!> z2bNPUY&R1U?p(Xmq!+!|#4LYq@OZA<1}9|9OKU8W9xBPWPAzOFcW#k&?pV70y0OII zlT?>ua3qmb8WuIa$p-fqd8fp#?Z(2)iy$k#G(ox5y{x5kXXsn=Rt&ew>I9{a@B21N z>255uMC+WvqIuwyaB4cWlm66>w3l8#9!7~PH4Ncvv{-NUY5r1qwab6S96O`1U_4FX zE8xL63)D{fddKq?6!ztt@;1{OI^Rt-`gci-nl^7Ho>Bl@q-m%OL9(5OMb2BF4AI(! zT#DN3*M5n%iD5>Y@KGz`L1n!g9qX==_kcCo3ib62$`P zm6cD>b1e?y8Wwh;Ss*dR+VN=j+kSQ#E;VYkS78?d3+;EivdvJe00V#tgx-6*Y3Moo8 zscm#Z(gu=(1&CB!H#-SKrc|G|=VX;&vx6MC-}- z>|s)>Z-^$wR`2&AbRp>aVllOv9E=PC$!a37c&iEwO%^sb>@in@F8)YdRo%@8vz6(< z?|_i={?t24Q+0m;V2HQU>wSRHtx;e@L%?{(kqxI|if3a>#Dj8zr^uR$WFa!xjGwCW zOpi-U`|Ggg5Mhb>ITL-kNdCK837&2Z^f^WD8W3njXS5Agl5yJ+nYPQ4q8$n|dd`GI z-D9n0ZT5w3X=@p1Znmhqyqj! zuc7f^jsJfb*V>X&>NOUr7j=Zg!i^NWA|hM{Q?%vT^OGV8gQB+bX%DMr2ffwah zKu9|qC&0_h;R4&J$n4>p=(HJ?#qOt(jKkts1MqXkda%q=!+= zWsHB7N;+w!QxfcjlbsKA5y}JjZ}5w_}ot?Ie*xi zWBuWgx1$odFH@%sE}2p(H7alyAx z&*h@5x{$h6X&65iWlG1d2UL#8DT|Y!V~`k}I>*)<=0g6>S+; zj?%2-y)#E4X|7}XfKLP%Yay`Qd;=bIBvu|v(zmG;U*g^ywZD6X4C9q|+}>&Dju3wl zV;5m%a)y)8MDa^1ce|=&?iIwGf5$3?8>>Y9UgLOwm|wfW3MXnD6aMh(N6=T-9QW9W za1`SUjso4m?a-p4gtJrvo?v<|IG`Q$G@&FE@KIMD+xfmF+qvpWH19TV$MoQ4Kf($( z1v9k87RnL|(Zw6m^=Kr@jcd7lFG*?{J$jL~N@X6ntGK0w zm8HIakfK;8o^G+7ypM$pd)X$Um83-nbU1c3Zx=04!^ zS>!cOs%XT!Abxy1mut}HM}=JVUzie5gE}e;L?YF08K^Ulpl<>B`*wf@O!+J}=M(Pz zTC7)1r;uA9HE#K(JWP8CH#;NkegIx10OQCY~*55(JIJQbloWjcQ+orBb{5xO7&E*$BVwo5Px!H@cHzA+Ea zHbN^I+JeXNE%c|t5TJi;H?>EB=)?;w7a1Oz%<19bviXV=29y&c{@(t+Kxgx0A4GYc zXY8${O5|A+)@XTf7yq_ZpUJHS?X0|` zGXi8j3|cA|R;XbP>wRX5pb5uE)mm*&+Rz9Z;QDyr1h=5yX|jJ@XcAV}vhD;>p`@^% zcxYcHI{DV*G&oToEKM!4k&}3_Z!2L*JO3hDYkb}&=cv;Y`>{d<2(4}n9;QZLSw0`V z1x4J1Ees!KP2a2bluD|-V2xV9K_E6_FkVH$H8*Gr*5TgkLDj&Zec;H$khH@tf}fuD zeQQ|EGf;saMK6Db5K_NIi(Qo~qrn5bsR`QP*f|1g@OCXoDM-1%a`CKiW_qEJ!}jVH z)yMfU%&6FW8IKj#naD*63FYJ_%@2MN&8db;;{y@XNwr|uGl{*gZPVPqlJhF*>2e{A zFS|h1Fq$$wFK{Y(gX*daE>CpZ1`M3F3Vx<`42Ry@A((&ALE+KxIGMlADopl#K1Ic= zPk|z#&a7{X=z|}bw2uDusrAfN(W56=ceSWPoe?JSX(Ur097fN2+WZAm%Ceh znwp_6O5+kqt{5%^3z`4jdI~EA5ly(~jwA3VpvpG$6W8u%LKggk5;E_!++7L1V2RlC zQJxf#<1T*@5LH1EP`x9A$39!RGcUL4Qa%eeofQNs%GHk!*e0}^=D89bSl zp;%4YiODrOMBqTICA=0qAd=brdK7swJHWY9QkT0yfA|L|Zu9SR0r}#^4*=AHlqhQE zf7r_FBnB9`RqQB$KV^97yU{nu7Xb%O7YYq{m})9#>w>37b^@m%Fm^5{ zG;)5u%#~^T<4r=4w5B#-*`N?uQZ?o;W9ffVX@2Mq1O^nqS(r-H_XQ7%`wm+*^+{AL z>*SEJowsg?k7afI5#hJ##u>@IFm(bzWTYb8_B0w;lzhxf6=NR$L0MvN!sOi&P#-o3dFc{L# z2(W@Fa0-SKAxYK13DFaGff@w35ns zLwxscy~VsoApRzY4?mkW-{Yadk6pcN-8Kj)E9w#)2N3C?i!<;g|LmZr4mRh@3Y0;| z?=I=G;LZ_0M7ZAWubKlkY{sIx9;{3*-_4XIG%riWFM8_AGAG2Y07!q?>|$x-i9#U^ zAn}~i#DfG3!N@eT?($%b=dXgG>LC4$f#V>=U<_lSW}5aQoZY0~<`0j|8-9yNc^){CsuRNreV`c@yOnfiDrILYc8UgKE`r62=S>(>O!K?-e zy24N{UE`C*1)Y&=PyFj&W}#X84HZ+z9#I zTn=hD@!3JpaXo*}Lcy?j`b4*DoWkDDbh;-2!Mtz+3bS{Eq251S0laHspSl>P;gJf0 zWp<$jZ}UQwY!6FM;gf0D?I(9dT+d^W4EPp0dQ7bVa5p>}(Nd$?gjj-tLtn@J0*I#U z*EGhI$$@^wlE(3*Rbtqji@D^OSvSjjTn9%QWylDI4l;jO4@yellLoIg zpRN~h!vC8g()(=JHG9g|dncc?)?U0I{6=!$YJ2nR>aG_q3RgsSx^#o!VuQH?Me zz>t9EnVx^EA2h&@@bJ?sb~A!)&1n?F=sbdtY@II+K3l4VMu_8mb(+AAY>3tMCj$*L z%eBe#?MkC2i4*ms{VapB1kuK$`gS9L%ug3pI3n@)47(pW%3M;;461t_Dkg$ecc0VV8e}~A4qS)l?#GKA^ zXHcH|vz!p@9B%uKZ^~K$a8jRfZr_hvXvBo(okHhBKSpOCUl{JV9tlBrQg+-OumHCg zw`uMo%<%dd7I0s6OQ64H&04&6#mfOiti=xrK~4p~OJG#KwOjlHMk;ERL}mD}4?v59 z6Lx}n1A9kEdLeGEkz$mT(@%Pkbz5PtikdY8!Kv7cGk?GBy z2#b@(-RVK@qCB9B5)4NTZ^kVrc-xzbyb_2RDN5vRcfPu~)z!02h6vF`6kU5$Zpd)G zD8wPT9L?)+L~!=EG45p@#?+)0ZJ$=PBV(LNFP_)KVjF}n*$JAMvHYe+Z@-;B&6a;y z<}KNrME-!CGQM+n)i#RI-Gdbi*6D(}ZwpB9oc#;X10^Z^vO5~LLClNRLKpoj^ITky z+|vFT>7cbXK00)1I+~LjM)B7?XnM>2x%+kE_Mv7z6nGOf(3uJH%Jw6k;7*t(YyEkJ zKT_Bsp&nhLQsRBaO8;43a(|nWDM;h6?qGjXQ*ln4 zkz*;P7_B#H*+(wpnL@`z&wn8O+xb-hwOycmqX6K1_}NosKAUwRaR zuM*fmyhdx;m>2z7q6B=qnI}L^8hyZr^->b*pqI9FULRrLmh5n&+{yO?Y#ME?X^tGR z5+M;};tQ+B{Mn$b>xCL_nB`x)T~kIc_a^CR>M3UV*h@Amv|WL2LVsq;TG8pOcgz5)uoFL3 ziV!KYb9 zY^r{D3FgRFSNOcY#?Tx(^uVixjmf;a7=n2T-4=y&h`Bu0Q5{&>I@;IhFOzv}RjLJO6(EM1kZw5X~Tbb|qu z!Upkj1QXZ|!L2J0w-(2LicqB=4>v(N&I80i%YAQev)G(}30P^O#? zm!wV?joDc*RT<7jzZwc9L{c78BSlGS8=khqq1a1C!8T<<{btheKtTIE8;F2!u`Z@91ldBSi_`Sh*vu8CW_QCyVDQlJj<#uaqei%$E*8S!c_K2x5xw^< zd4ie=m2rqQ~0W(fbw!) z042s~zTzfU*>Hgqy#@?<>D>CY6yAc&$+D=*eP%c?-A~DUelhooeb-;SHb|M+lM>|@ zm=xD$?n)hsX9mqeEJQnhz|Uo{jX4n^&0ZwxS=RyCnzy#4?p;0=kpV}XZacueKj~>{ z0NM*yAXtA`NwEL+m!va;U9pKjjb{giXCshSm%A9mewqdHVTUjDZf4jAYIgtKd(Pq2 zqVn6r2HG&nhY#uvZ>1SF=S5sU2(`O=5t+RuEh+LtXAZOB#Qv1XmUH0XXg)xeFD2_4 zPJyVHRU#nQJ&T10VS4K*-Lm(OhaR7goD_tat0sTZydj#z*6GjPOi+*6O-u;LvY$-z zNuzg%13d~6fhsCj+R?v%evS@Y?gG<~6jB_v`7`SZzKa^$ODy&c8s~jUlN5&hxwDvt zIxEe;vIX)?nU6h(aJ%(alf=-ZDj^d z|sBD^N}lW`?yb)FaXx6hRh=j@s`i{@zUwRMJ7^LH zGvc!HxTfww+9-|bCXS*_6B(>dN%genJEe90DkNbNES@G8dSiM!Xd=^O9DdcY=NV@{ zZH4P?!*#w@-`jN+OA7f`f9hve+iQQ=zgU-cO$}xJ;x~sf-`8#suhcWM+v56rjXKV+ ziJPtcO7pZ!LJ$l)nA@MPvLqQk@trl1Nu?NFl0{#%WoK%`2GxNqC=EY>sA@XDwb~@I> z3#x#Q3`IocephehxDuH}J`WonpF#Gb~ zmc`A(0uKT~k#B!26wH{%dEB;A@$O-QdM-@M16JGauZ7xIGzurs8)n?_;*lidfY;)uH2Bo-@)efvi1{W)Phww>|o@2Iwb`Y?e~8QtQ7v&zF#9hAo++$wdA+D z67TcXX_je{!EFZy0TP7#sAhLUAUu)%42Ka8rmf3ysHO3KC$o=Wl0o#tK-}Pw)+a~U zQ*%l%<<}dPHF?8)10@^;KTlS4NsUSv=89i%d;yRE;AK0lA6mCicKY(fKtfYB z$<<5Xx1N9Kb}yI$LHgH90|L_a5FGegW#1^DT>g;2>-H89)6SvA4d#7J=(U?v>*g-R z9*U4f0~$vZQ>H)P*nMz6Qz&)k$`gfdGiG12^gB)q%e+RS=XZfLsDG{5Fo##7T_Tz@ z@)X>?#QsW4|DUIh{0~t5@7e#iYvcTP*M`Xd)3tv|3HyJMI9SVnsXtNt^Zqd-|4N^= zawF;SkI89h9y@29lE*5{WYsmUL(?2N% z)8m$;tN)1X9O3^5Ir+a_jh!G5p8Os9HpBeDzfz%72^+h7&YXn8Umo%O*OcP^Fe?U$ zz2<*+{Fl)FSDX6Bn*WJU%+D57hEGxvhW->F{ojk`e*f~NtW$C~yJv{;C$c~4zbpS! z3`GOvJ8b+fv7O;~`M>RzAHy0p(PpD?B=#Td@^=yO&urmWF{)~ay))GK<8|miE0J;k z-fXeoT3&Yl3GRRA+x~qR9Quv&sr#(HQFE9IJ)dcG6}|qkV=2) zX&f`0TlRvxw!a1`oy#}Hk9Z*VNVrh*vdWaQyUAV;i&CjVz$ulZ3@KBoFo}ZxpiVHM zr_~JKZ;;m$bcd`}@(sd(RSRHlB#N=+@=JltVM1znoV!Ma+ z^GjmW`>IVwf}^DVt!bQk;?8o>6up0^Dr-E-oC0!7w^2|~ponakGTCVmQ?bkkKAg-` z6;UxXB;av|pP}4>jE>grtBA`<1dG{dGFSm&Dvb2`TYJPDFQBIXgXfcd(-!5pz}b$6 zb_Oq2YV@g4yvAZ%h=Q0widJq$hX@D7U-) zbWs?UeD64-aB7fD1nv>6J&b?M0lUGa%yFu`SutsRI#rE0O)0>ySO%@vSPApt)ex6_ zJy3*XlTTfA(NMqkX6d5A5M_(ic6oc%tj_=^7l#}-wLa5*(mit@A^#^_YSUXn??~^a zR(GkS$%xojI3?@CfPRGK+!}S!D+yfyQL`PB>Y$VAYKaSTu8|tQF4KRPUsa7ac+5Q7 zKzE%%v82c^QpcvUk6T@@R0tKX0zc0=^y3xx??lT~nC4}w4~=>I8u>!h7oodBT`mJP zT+}GLl+Fim7xYDi9Pf=yCv^vK7MTa17+G$My-5+8eJm;<3UkO&3}hX^@=ctAlJ^zQ z7P#BM|GeDYAp$pRHywW<pg;o$6Ecppogt@wv$O4=7p2cF6EN^ z5-?wR!9%T z+;dGbg((w7(2tj*<&pQa@C_zSaQ+CKIGjZiO;ElbJ)4Jl9T1JA;yO@i-z9?atHN|4 z<1F#pen!A~?;^GRwM>H~&&C(|@Fq^-!Un#o-eQx45TeMzOR(bm zcESLYIxz3zHi~~RFM@mdvS@A6cZ%D&dBOBUf+>|j$LyW&&!d@oTrm4epq$De_GNx{ zf&^y~LX97c?+>qghgal#QWa&x+WI&Yj&I|A(syw=CA8t5e|hAfo_{YxF zKs=)`3Lu^oXGMqH4(Wh1jLK@PO9cKr2hFMz@&m*$&e#XYM9MVMmZW5 z3o6d_<;s6#P3HX$={%(P-YRr46;p#??Iq?M+gJ7tYU8r}`4}ZVFJ!TUYqoaEg95KE zkpRJ?lrYf(=Tzm5+oG6ZyeX{vNf+U?szpt zMU4lks!3${yeb^>BJi_%x9Z0v$gL~mq>vlAhO??2isM)TC|#V1+ZH8m;pe%|&Ds$$ zt=oSUMW_|B9Q--AWO}U$i)#XNoqFN*_~;Ttf}+?QQ?8=vxsNbz6OeOx`8CHCoFOrquc*Fi?%2Z3zlH% zKLtlQku(53QM819P3JymcSKm2G|s@KMlDj*MjuR`)9NQo?(GU_<_9#gRPX7DB4pAY!#;bBz$-s->nIZgVx8$%HB3U z)8fhGdRsvr!N9G+uXveO^gVrdcE0S&XC<^YB)JM<4iGmYR|0ckv;5`Tb~1l54Y~;S zJR5@)dW$<{@vYa4yI~NPO9~M;t75o1pwg(Xo%U1i^&TzRFHpg+bm=-Dvmo2vXXSni zFaeU`)1(doQ4ZEMofm&ijWX?aUUy=Fz6_wpuCk z>LDYYXq~q5E2N16ye;dVn}?y2UDbtu@8=b~z({N$>jkC1I|Fhd5#n`#>^C~vGmBGl z0a2yt%XqQV@EgD9=_8;gie(M zoQZ0GuY`u#R4~Ci^&=%*aNm*_i77)T+oK zy7L@bkC9tJ@h%yc(>#B;1woX12J_64J2%YUB0pj*ICYCsSJCNinM;1o=s<{SqKUQD z^wk;MAiDPS&22{H`}WQ>{*dO4Yd;X{QjgRQNbOGV#oSYAg^XP~SRv0cXbMCx_shtW zE-_n?S;H-FoGkJeh!rrvlyH3!X^l)ayB%)oX0)I?|JSU%*RxJzMg#C=~n z=Ac#hoOmiYC_TpE%~|HFKF{~TTeyH z2&hj^6KWTy>Y9I)A1aZF3v+;&^38J4#6FdWr8nqrZ|^Dlm7o7xk9brj5Qn`W$sa$7 z9oX{t^|$4Rzr*g02c2}uS7XVF>is;#e?$G zGtH~w2US~NcMw2yA~#V23u9Gnw=;_G(;Ps~Bn( z*=n)A1wx`gV};Xqf)a9U5XguF_XD%MNue0qJ>-R$Mq$Y!cCkMDIz;1(e0SPJxup9s zoQ(7_SZxAlT;a|EGh_A1`_sv#S1tiG(={a{kLj|}sC4i34ZQR1zZKz%WT07qs?$kJ zq*c&0XtjT7zi!+%snnEmKzPTc*V_|}SS*|9m+LVr$rou|#(m&hoAjD+9Ify@%|Osd zL5zHsRyyQ`lAa;kj8h5(BD|ft64c%$v@!V)ZmZ)rWl{{hXTZV-|9#%fwxKu19QZh4 zI*p(HbB~L~^@PYUher%3rx1iw(cob!6i1{HILUtt(mY_%ktH-9aQ%!&F5~p;nyW37 zD6?LTJg0+Xq~j*6e8x`+Ag$y*g0tMx9D;((ntgG)7pquVa9W|u{S`+D`RxL?Yb@)o z?jRR~8^7U`?BlTxU%^r__G%O!ZA+A+`!wW6Hx8^#lRv;2vyX&m8XLyP;QYK#C1}u+ zdX0Yxs{9HhCe?cnedGz`c}lxeY~R_g@(C*V$tV6c>O9W|@&)M3DEmw@wtT1N<$ zC2+Xay0To3KNq$J=hSuCp#l5@>N8AJ*1OL=|HvUzYP6Ly7N;8&e>Fu+6l2lrSbUk@ z#bdlgJ>BQHIo-`a!-K*251=!-C&oD@mTf)6C0%b)nKm zB++!z6vW`D_({RvHyO9_txzQ}Hr8lf%VRAS@BB-#msiJ@er^V@Y7TnHyQBjRWGtvA=`SJ}$Xe6oW3njf7ZV|f|t(*=t@S^b+A z$O{Qu$OBS8<`6x!PMd(3zqQ|_r>mM)FP^!6&nb2ZHcppE&|sILQ)5v3>XHO2G%1nM z>zq|ah;F+!7s0^A&x8*L!Al<(dh&l)7YRtUU3dn(%>m6N(n%;5X%=iQrq}ok7sX4X zREiqA`lL&1*L1MbElSgi_}d=?jfeze}H~MJ8K86ygPjUYjy=H%|v5XWb z?7qogUm$f|GSG;W%*OUzUY7T5gTGrCr%Ab?hoehA%8>^hWdeN2j#)IOCPgd0Mg%q* zNX3RIf6B=)tb6aol+bm`xuBg`gRarkESQVEY``~$D<*`8?4XpsZfmuTS39N*H6lnG zO1=@+Qv`6x5b=;jIY%pd>-m510y~8K+ac)@cS#}r&0Uf&V0H6_u<4w2{SRyq|xi>BL#K5*w66 z1YUK01E34#f!zcVU9k@aw-b9r?odWbY_hT;JLm_XoXwYD*kJ|J>LNAZ)g{0z zB>;$E4XS03hh3z=aVgaO?LaJ>7-=wPr9cV8JQ0=NU~qv8&qRu$h?6uvzZ((vAT3_o zIX46^N@(o~KoWnnET$|7W`Y^{9n>KkhWY6OnOvpWm$;b^1h&UGeK9w#!wZ5dj5$Z2By| zyiB-&S5mxTd6_qwc}~KUQe0XoKxd*w z2OD;p_y|mZOn4x>qX!M9U?9~65JBbnh67jR(@UK5o7>jHrL@$}^^XqLxX~R5n&dtD z7Kw2LzU6;6S6@u=$ZDcX`Ut12{q@2UC4P@K{@Ehk;zufzRpIWH2ki`Q+w6L4(XeAz z?Y`UGxX1huZ$);g2yCTZUVjpLB{rL&w{XQ?`S7=JOAi2h80eb+TD*5ApP#B}jIBBW zf5?^g^%kY|4q5w9<70-XfDWLRx4D9Lb+Bh5j5xYJ&K6G_7 z7SPDw0Apo$eC_Dc36hsm!kBcfVpNfH=-%kmE9m;Bg+ZFy1a!NJ{^e`E(4BHsr-Ug4 zxtIMnqcLov3sh8~#wcv~vdz9Rh8AhdSFA8pf%mZtfGbw8bzYgtSk4s!WgJs~tGmZZ*}t^`OTpJDZW|sg8#J7P1$5qr#olejL}MG@`MHIYYB|>HfYIZa zLEIj<@QkY}ubsxe)o%$?q)BGp7r!*S@kzUb%fvN*fXn6=krcBuG55#}%54lOYJ0zt;R_sGnzmUm)kdi>U@DBSm zGi|~INjtfPbs57|J1#I52=UItr}i9+KoC-dP$&E-dOE8)G*Bxz6h(0+^b14_G{}(v ze-1vGR&!bzntk$!UJ@jh-a#^*t4)+IN)?(xe~pDw*C|QxA^}VoXX2cY`W4)}qQ!qK z;(kr5C>Gcc?uRvbeP8UbUdJx$BYD|KI+RMbrHe4k^6mQhLf)-tE&3tBam2)~Yy6qT z;e~$$gM}et>gnTgQBAw!4|<;a)Y8Ft1*6o$(T?56q+ji*kAVrlAI<;#yBW*?ii^z3o>8QuK1#LX zlD`pAT5f9)dhHTvSkh$FkCDIUH^vP@%fw;NbLukK?QwCQ*Q_r@7EU>TD|3H7SwhNM zQn*FO?a(twpMdLj9$np@RS?!FlVpvXU^$`)L;mG9tXTDMZX2(+MX5XzNB?J^*k{KX z_ANZxG-7n)=fgDn3v+a+Hhi6Z#jJZS=+$mx`QaS29@KXfQ@IMS)wph(IAp}5TN9Hk zu5U=Vn{A8y?6^~F5kO*S*ZqIX$b3|VTuV66`J+aK-sUK(y|d(}1ERrHgbXRw7=D&P z&`XXOsh2=Uux7nM2OuSLRe<0q`V8=%!-%H+jTt+DMH6sim-mO^yfdFRyc9?b(fNe% zC$AijAVJMGLLjmt1ce{E$Un|Vy&H#@gBC%J2dtK#l)+9dlo*4i603jw^MNpTZvJ-9 zl)`O)AYJ!S=DwxV5<%M;ML<<%H!x=(t(U8Mu(di44cyjpg51&RasSfjoW(VX-#>Ra zrsQXLbNM8=JGYqs@RuF&6;xalc}6NF${#x*yhdw=sS)}m@vGh)El-E|Qbh=DNciMv zvS};4FUMIws-N@B#}a?`DU89pP~_7om7<&#Id-}1PitF3(i>Ij`%iwG{VKCwGkusf zz26mW#M8T=%lAA`b!)Q#aqegSzWP!##{b7$9Xapw=RCawJ~2olCA zzROsI3FJ2AP__?|cQB(Ve!+XWkRIsU+d*s7yFWpProjRZX9?>^V!JP^! zklG2C|31p{9FfuD9sgRq)ieC8Eq(6#LkpUcN&_m8>&K+~p4FxUdU*s3#cf}Zr`;J(0&C`- z9e`uK@xl0@G}_A=?Ys{hdgO|#Zg<_^dcmzdMpefAU4I@S zm5H++`b^(r7dr^>fZUCLK2oaVNDUNewxq}SzfP2Y=;KQ{c~8oHuuG06B!jn^JCCE? zBBK%2o153dr)hvbYohu!+j;#I($GZXR7}sZUzxk2ofl(LpliW3!H&i}`0eCKO2bA9 zcI{AWY$Zsk8Y82R;#;Z;xHzAj8XFP8Fvs}r-SoIDV#$Ob=j%nHxzq*{QO#K(NQ9s| zLM+#R<}VuI7QT9)r4-ga-D>@a^J_-l5^w#@Y6TFmS#o-KQJ7{4pDasNO#CUakeU#t z+rLPwu(qL@!}n=+f~LbgYW$Nb1M;Bu3+=@}KW%;q^mZo9i0VxwDfWVC^l$>vEIiGc zbn~HDKah5O(9hPBsw}W+-w6bRE3*+}N!ycuu<@$h!tt2?_s(`m`h~|#J@sdsm9p6M zMbbR4MZzj8s;15kVWH&Vg!%FCp^DI$^QnQQ>#g#zPG?6#EJ~9*d88?Mz+(lU<_?>*!XkB|otiH-Z5c9sPjn|L7N%c+JP;?X zdC-?-Q!Xc^z)@Ne`-SH+5$BpwXk%Mf#^T3)@O^ZmzZk@{Yf@J?c-7}JvvMFCd81gz znq18?C1WPO*s!gV&6LjiIe#|DJPc|I)tJDEK*Q3lYZ!e{{v;wmBoc;FRXGQr&ZMo# zxOGAgFiNEikFtFWwJUMoZ6ry5d|1*9;?zoA5fR(l6?eyDJBltXPRj<nuw+3Zopd zGz)g2vI<`2B9_TO{GX_=dTj>U{D(cSs*2UJQ&Bzw#^Cpe3lu(%a5lDs3o*P6hujm940*k+$p0i^q#6N+GIa28)sO{*E&;H&T zbJC)RU2E>TfjDxta`)wBky_xL98b|xpk`kiL?e*@!GEyD`Jr zx36qpi7K`v_kHSk1{C(92{28+8>I*WHNc>w*X(U8wPB{hN+(e}Mm631ShLn(|9HIh zfUVL@5H-+a!F1T?0A-h)=0iNeS2OXYjm1I`g=M|f0V<@kv8^9}4~G-762;Lpzql~U zr&dRE^Fu6!`lCv6*lTeH8UM2Wg!Vgg8y2Eo_XFL&Y1&oX49GY0EmcGhP=?4kZH{3&_;PQ(o3_?2~j(qm&t zx9ce-H#1DoKmM?tWW`@t{0r{bt*|%PF6F& z`ua0-6x&+4W=SD@+N*?7=?m+8a|TRZ45mwjA(@+(b=h!cb*6t^**v5)oziF70F#u1 zE8;FUWkgf4S8s6uYoFxDNppjfF0@hkIbE|!0qse@#JfZiFw4cUm#0B|uU;c5NO}|| z*o_q|JjvF7al{xlfG`0riwu7nVl1qbbcm$cQ_euhF!VB-~NKQ4^yeJy{ zd{!?vW(ylMSR6^Wh@m-F?9$S3wk%c8WOJ+S1M~|9_jo@sK`2A>feK3Ge2r(}+xN=~ zgq9Dmnd6}cI_a@Zr;ZY^^PkUXrH}%Q-*x7QIkZSBS%EcS#y00R;f_$BiS2!G z$W&5)FM-s3>79u-Ny6s&Lj|V6Ei5P8*Vjf0VJWvL@Js`vV1=db9M`kA@oQ87celTG zh|5#`n&l4Bn>`Da)?xE8s1s$8d~& zDZng(AyZP9qR=KHQHd@&+9<37JCVNBCGI>2i+%&Z9N6(Oji&vkgD5n$Sz=B6eLxt0 zJEx4W>5cGz{t7A57t$$XV^>;6kqUkcv-BjrV9fRhg zS|UNEPNl|B1HGN!GOyZA3P^qC$w?gwf@a2U5;<(g0X}LwSiL-Y+xw-M`ZEw%cgk}| zO`-iA$u?(N$>}h+&V^okZwz3HUvuMsu;%qka2qGY;AEjKxWwDH9Y`s?P;I4GNFpo6Hj)-0$&CN&qngPZJa5 z7DvgNJFzQ$JyHYXb#@ktalWs4Y#I@Uv^i;hzU^8J!|(h$+XYo(t7p8#Jj4`#npph` zZJaz{r5Zr3%z-zW6Jt@)=5}9O*Tjb`MexPk1dB2!FLjMIjaK;q!WGx6vp^atAejNBKl6c99lFQV@_6Y`^1 zMb;Bu?T$Q>AulMj^G-etxL4|b=>i6fl1(th3<(YYLb*H~k44D_2Ttg;TGRMZFjE?q z{nX`R%7757ATW)fq`e{D;xLb(i8h~Nl8lw=$oGgGHiw2MI{{R2tWXlWBR0NG2+n=^ z{cI7f@YQRfh;T^9EE&mZeA^JJnQ}4?A|e56X!<_iVUT*{fNen7<0(OZas)kk0A#`& zAObU+i_#}uF$pRJF{65ocF3Yu5TVF19t9?R5?RMDmL^)!F~J+zKN0V4IU-0tckO^o z2JFd7^?u)=;@m#@J-WD{K?c7$xaJM6^e_p($GZ<5Oo-qwPDJ_ZM&a&A@)fO{>q$w` zZ}@q-QYRgE`ip)CDvlg~qkDc;C5l<{YpxQ#J2pr+*T&X!UDFk)ht&u;?WGdF%S$q1 z-*~Dir^obCiK3RTZ+tQP>%7Inb7jtpbs7kuEG) zOqTDpVX2&?-{;96OAh`CyD|Y(mGoOnW0c(LhkM`sl^NUet8t{C+Z6E!5lgs8++?oL z`8?U4o4kJNHG-Peh!$<1s3#$0@k>vYbN!3~C`Mc8e;$v2>+GwLM$et8aDe3sX9E$0 zuK_ELLFxxRq4biZAh#1z5NL}hgf1nZXZwB%0gJ!2Jp~I~QkD@tRxjuYm^ZTU*0N=# z71jy|<^#2#e9u4XvdrY-l7xEdZH?kP_ciZM)9Li6IH4yHljXJR-$yY=wprmU%kdF0*~A0hH^C=s-tpr5KVZ zOP{;9dfn}%ao2k0GNO|*u|21AV)p{#H1KHV(v2p>`B*Qs)b4f4F!h>^Ex5h4a^Qvf zgbAZ2X-pnM)%4=%-#VyMrnd%l@2KXg4(bC4MS8A(o!A<~9M8thh4cP_LqIDS#>3yx z#|UYpf=-N+kLwz$eBvpvl#4q6T%C>gj^VUBiNlJtc0SnIiOxkmq6~M$(gd!~3yI^> z%hZ-E*zXj*ubMo1`(YcatjYAuGB1#?zN|>+R9?LMwR>H!O$$@}YTnG?y?AN%_|Nxa zUx~nf$5IA0R@8XV=|bh00-S$C0F?<>Z(dy`oA=y61!BMI6%4;kAlh7f;*v2cQFjn| zA{<6`5N5!VT8e%LxuYV)U7j$-{5(;fNE7DasLeF5U>_H?ZhxoEHRud&y=TUn8$CJL zZSQ<xTlPpcMSOXlE=KL%h! znKE&=lkClUK+RR(XW;fR*=<-c7(zWT#9RR3jSPtZI95*SJUO7xg;Vv4E%^3(iP`3V ziy@9q3mfYckegV>b zQQGDOZeoHes`tF4DG2VwU>4U^P+{PI7Zd;& ztlA5GeEbIhl0ldURqd|Vf$h0o;O7SN79By8V!P(TP7ST%;(bx&ib!ENArJHZ8!9H3 z@sR08t*=iH0N@P;)DjZG#&zs%!EWI?8wxGIC$pZvQi0i&>A;#tCilk)$C5R`OI0!F z0~Yp~9F2RlzjBJ-T~|7TJNIaRrN_^6bFJ;xpa%Fgy$L&dJN|r8L0sO-Ot(mqJ8>5`Icv`q4pien|R?LA)ivfZuMLpGjV0QbXt|G?+ zh3xP^baJZtpneBod>1XnFGT=g7Xv7b3Fi^?J9b}%Qd}uK(ALhzWZd3$wF%7q(PzRe zu%k>n`}bq!E1J=Fo0EX@A0CCf(_-x`)ArC`Nno$7i8Nw@YCDkfnF4$}#dr^`AbsEp zt~;d%$WgBZbyH$*dk}3ebr1_rURvjkpT2#KBw|cE6?71bMt}4cf51v zjvdtPxTY_m6vLAw!%13d%pbRpxkiDu>1OI)sN^sQScdXZO0sf~06cgjN=T_7e$zUf z(519QN^}G+z+;-=4-ocj14hm=(F(XKiq<$fq_CYOIAbL_*0355ugw>FrEt3?DyAYn z&u&i~Xdh00FCE#QQIagl=YPsXjFAGV7AYq;i94$JNIwqjwudh^Ye&escYvL*?68Kl zfG&KxnNb!?fTd8)#1aV-kw#MtY(1NG_n~XT*c*SM9iTHqb6w#D%}kers=FP21o=3f z+1O$3zX!}anZ+K|hAT0K4QZe(-gq!`GJ7a$+yUu-+Ixox0V)4j1o$dNtCVtROS{2N zh52d6c*CHv?L9fPVHF9@rTm=UkwYr|L6-%p-Ut>|b;%BqPC9ojd-AKPrB4%71FcPh zZIg}iZ`9U%={hI0bEzONMeFKRWs5buYs`ruapLU4FKt73Kg-8=AmCdq*l$_R2cje<8>kL#2JDJ84iZm`Ea{1Q!6FEw0l^n?O` zfp@??11ZC?WV58{C(%+81`FRlq@9NcWc(Pb*6{O^Mh8+^mBf>D0zT~7({@4W%OX-T zGM=2m1uG&S<)xm5#V6pd$})QrX|o)^FFoR1$s5Ai8R1HuD*KnsC_f1hPu)ZcE!MT2 z#iB$`xHy=;QHBY&g2MbnlOwwi%R;b!Dtft2aX5KDO-p_2<`nN^b{E5e0QUIr+-!0} ztU5ftrZKa%C2~}u99n9mxTYgr0(w2wr{TmCyjQ^|2*L_jE@*;7uVyIhwtnVkCKJV< zuY#I3NK*8$k6J$1yKUB`xoD*AC!6`s358E*ZFjo&h**TtZ(A}O)m4HS^R=CSd-xUL zjk4~Ec1xWW*$ZfG>FXR>fMeZOlCoFzF=Bv7?l3&ls&YEIEXN8B7-?Tjr8eDt!t0n&VN-bc}Y+IZKitt@XxRrvmL-QP|>{|5tL{~!au|MC>-e^3d` zKgTR%a{q%WxXe#x_?mGUUT5lmo1dAiXKhByn7ihz+xUKe&`ZV{e$vhlj7$EkT`=#^ z`_;V|-I@IRbg8(jj^%qfzuS{FFy&YG5F6JDbuR;Z^qO)XD@)(HUB4&iVn%u+AJ^(; zZj&1aL)LtQBrnc3{Rj}B=dR|c4*$qr?^uK&)PDQ8uVBmp|M;^I%)#@2iwj}2!?VdX z0g)r}X>3v9W4swkG0{`*S!QvKQ?NNAkaoSQU?)zYdp*Z6I@+~-!wy2D0dG@IPy|nt@BdJTJ zeE;n34@be(BqEW0h-Sx%KAqD`sX4Et3nxFcJ4CmdSr$%E5XXHR_^Gi5=AyvOQ7q^u zn(>`DBQl2gC8}!S;;a7IsY|#9@|D6w5|0QG=)1oN?(y44^>Ma;P#J*qa#YAYHL?)!2$^ zTUFoy(%1NNfAZRYGC!Nm?K|QA>J=lu78}F-);aaoIseIAsgA|w2g#o`GIt_6tB>uv9)?zO|EY8F=lFxt2jH;Z4ksKj+OD_G*%FoOu!fYXGtQ1I{&<^n-WqiO zz8CN_9M6iw=|okSxQimohQ?%M(q0ioVwDTSGay&bJD3T7iRC61EyHC zDq+`$W;dj;46x^CMRFwyYX;$u)jFt0j@8;^>c`?u!WKM4Z&fVG-vuQ3J;P`jZU1pd zq%PfrEGmzGL+2V~J%ch3)RY(nBhY32y}5~jc(q~l5Fl@#$xuRuN(c>&jb1U(@|4@WXUZnQGhU$_1fUR1wWzPd zAyFCG4@VqoSOFrqWmaYHo-rL)TzD~)CW0&?-(XjNSV?i0?XOUT%E<=JN~v$Te1N%e zIW)(mi_61P@JfR`VU`Nw35`A3ob8lSb||=Gn4ueKLrt^pn=oRXmfU;(j3b6Ua=P&? zjO^S)W)4IsjfDk@gs4=ehq)ZOuezt3MyxUOD10OtS8ramfJRVnxV8XBEnVL`b+7u5##tL3rc2+ceY9~6^K+EmKuG=*`=3T zi2$txnC;_WV0+3nYb;JJ$Hh;8cqX%z4z;--wyUeGu$HUnRkUf(M=tS3L#wU(e7D5z z*^kP~7W5CnZax)LBpax&8toMj9%t0L-_o{!>=5b0x$}^fRa-MS-TBjQePe_#qw`_p zTY&bPZ}-2pCSy`gLiAh+7*bN!&Sn>{F?v4K+9s9b$$91%e22O-TgzBAA2B;u)IAD~ zvlN6y9f+MZWuo~T3wd&g9xHGE#?xK_>Za60eKw#W#JiklY!AjTd8RWpWerV_6vgC! zh+gFdN4P^{Z9E&tg{H6axqZhYmQ%6h@8L{di3~gLjs#(=8EhHAkICGcJ9&&mli9#Y zB!x6bZGEAY^)Sg}Me^mG=`lOAadYhovKo{*yKO5k9lM&;do%DIKxjlKf~C)hoM`uz zPYeQus8RX+w2ybM*jYQDd{ivWE3X!Rc7D@eFMc>%7*c6K*ssS(Ycv%0JdZDbOu2Ft z!{(bb&}gqv%j}2;&3}27zgm|tV!~;~K1#Q>x7++6$_}^OpG`|kymx({nN7u!Uvi4$ z?>AuCeCjaMj1NBy5tf0-nQ>TqilewAHCg)d^rPDB`6@MZFL}?1v%d|+cN1xUkdn|3 z4LooiP%GbUNzkEEY<Mx{z+eez7_~N?H?djxy3bIZYgR|A)A;TlsqU`UuSX8Wm?u~emht7xBCz{k# zxwYU*+h+lVl$>BjUZ-GNm0(M$;2}7?$~M!e)#j;j1n(Y@{#ADX0=G_1_1{b_ar7`1 zq`iIB*u*$l_Y!V-C@DADEs-I)8fun{lPY0z$B^&0fR=*o$&Q&`YhZ1EHH=NJg_)ak zFn7(*4+hMJUzPTjDVv9z(gZn#s;7pO{pWdOd`@V}I#ZkvM3qvEZve~s8~{#fKI45k zBCaKO+>za5D=UU+?B$#$_8&|}yxf+gM!R-g9IfvcM?U0`A}9pTR)tt}ay|8y>!WL4 zy83v#UZX5S!<=p_a34mF+FykS#kUE$1$Np1$L z9r1)vbhSdfV(J&;6NUskeu@5cp=MmK~)(uxBS0>ABoJE7_6T`=-00|t==e7X*a z!;Tpc0zujr@ynm&-Kb!5dzA}|j~D5!(x;r2@RW5rWed!%-2TIV-n(_Ds&rkWKPVTX zAv5H1)CmeKv^c1Erx*)CC5a{l{Q9pIXp*^}^}bL0+TLwn?Tgh`CLXCnAAR)ENBQ>9 zKED;)w?Exyv1O}hd0=?7Uf?88r`Ng}=UErM!{=_Zbi*@V^0IZ$;`zJ~Yo`$NxZ99U z(YH+1oBYqypV9k&dWz(Aa;!q9wJz3ZQ`hU%O6uc5zPwk@;?OLtKa=)7!d*6BY#-#u zl}(=f-Rd*RH}dV3%GKgIe0xOUNarftTX;O$XXghJjn515VOQ7n7t8-7uOh#^R`PKx zvi8r1H(q@)utzV?AN8j-38+%0>HQr|Gx07iFwGygi)QnG(vI`}o?YFEoE_8RG8&y` zk942H6DuP4!*UTo97a;|Y#~)vKOEySoB@t%9n0CNK5<^B*7zgxli=_kBv04quski3 zE9B?p>9zZaU_E}T&e0_Bvn8x|nZGC`UYt&U*z#g=YLW5j4CwPRecrr<$Ms>d^k3eV z?egR$Z|L)XF+R|HfxqUd&iy8SN)pLt$FC36EQo# zgV()a!X}e#^|^czUsH#yJ|0N08WdHPG ziFNV*k)IZCt8nyg=iOI3`j`|iWdh854c`t<_>6M!W!=eqhOlcT`=$G~XPb3p(Y?hQ zQqGwVv+RAg6syZLDjxQy^Mus3P0sR-9vJpe9lyUwXVXOL>G^@b#Lney^vU*rWHcS8 z3J#NhEB{P_qx;UB=e1%Ppu?WcKcb`a^)b3MW4mm;$8aCvcOc=P@N2AsCZYB9qghU# ztLQiqZ}X3-wc5GoPJQm?k+KeA@ivFU69gU(o(x}}1Bs=o@M*2q%s+A$1+Sw~*pQGy zK<7$7WOG(EU#TeP4-38LZ=+|YeM`va!+i9A@ugc%G)k`Ou^afeud*4c0@qjT6RK<( zw@lNf5RbeDQpsH*h^Ix-q6~KC)%bO>-o8x5WV>r7t9CO= zi|h0Iepik@#^XFMCpn3a`HHhAM4x7VmgROkT88_0eto<=U3SgM-(Q}l?PeY*x7&D^r>FPkW3MttJSLm5Uhe8K zK0b^V>>+yfmyh#&zhMnH??`@J^5tpl7TMe5CF~|E=8S{Wo7$E0PqO{giQ~E||Ye7mH*uCc9m}bHecY(oCB1*E52Mk1>#as3XZ&ZqE3Bv8=hvzpZ}xjd z87XQj3zyrxpvUCxGReZ_{#8|r>G6G>U6UAY>C4Y!<&o<>Di)A6PqS4ilm4?(l(EPp5R_`iEz^EIjDYb?xd|y|%9PR+dL^ zeb}WV(WMr3Y<*1@1rbMoO0P$Uo@Yr~1Z!RfYbgD->gI`(Zy4Kkmyf312}CR2Dsm`A z;THNjS@F)X-ll2Aa^8r4GSdJfE2(?^VLuuHIR7}O^FNO1yk5Ug=^RufnDs|XYNV2} z_!8A(YB)Hsq(3DxbIqxnxIbkR040ggNv*q}> z-aQcWA?rmilGVBU((MAfz_bQ%&*uKon`chVMExn50I2G})RY_5#+*_LE${_^0Hgs# zLpvJh9gRhMJjqsnD83lIURu~v07Yiz{UrFG<}_4S0l!9ajupq^aEU{bb*#uzq;nS? z_{necgcaF>L00lZHV(;l#-n=W>C{C9{iJ4Q3bp5`UVAOY9^ncir#((#h9RF{=FcBc z*lYIydpEkMUK8|clt?#*lVjAtBfw$o73mZdZ-YiyVab<&ERr5lnTHPI;J4YyZ?ngc zWGySw6zR-G+I*k<B05jy z3kJE%Td^RD5BGn_t+10NZ_*hk^B6z*oqP;2Kl_K&;=f*RzKajs1Fcb{Xf(xo&2;uP z)skYx{V7zGbh1Mr15ovXL=uzclA}Uxc*_xuFBg!1IYvrQP&7ja=M!WefF7PpiLn0u zw{n@Ld(lZm6JbH*xk%L>XoWz}e2^Rwlx>jadq2$>ZG<2bi$UQWBb6pp>-kou7?fHw zu)kk}fHP1TLrShOa11GPQ6Un7uC;kwrkQKr%bh3$Y9E3U+QxIIoPkB;}u?M0JG)?=lEmJFObF^bXy1i?*}3$k8-wlGHy!I4fdRK)zYn4@Sm z)yfNtEO;Yu2AQWb?J>8pcdU^SYUvb?;~biW?;jdZv{eYv<$#r1dPti&NETdyfzpcyPA%q28YT=A#iKy#*Sh7$fkJ2?qEJ`qi}>i4+1jeVJ)c>6tmk71cw z(DGB5$j4B@V_1qiCIHesMhw^bdHlwY#}J$M;T|gFij|@byR7tEiLe%17NtX?pbFU_@#=to*)JAU8`-heP2<(?7&NJ>Ge*^Fc^Ex7AOwG_q0!&n}$ z))fPXg#|1&(wd4+pXXo)OjebD2Vzw<6#>n2BDuh^%^|8!uH-Aw)&>_sdEm{W#U&tB z-G&KOyrESrtw{>`Yd>cZO$g)!b;^VGlMAW+Akxxnc$yR!9)g~R30!z&X}{s62aT7K zI)vEr-Ggmm(=|8$qh|?SBb2?(r9%)EiT z%j_|Xxy##W$Wbjwb5Zkz&atX@EQWW`)Po{7)=`DzQS}TJGHlPm?A)qGS zpUgTM*4}MA^ljgk^7j2w%Wv0LBC9jeV(PIlPYC0*fdy4ns0!@{ZH-^Gn&QI72&E5S zlXjM;sv(Fku`afC3m@6|Xc!g(Dq*QS4obmRfqUt40E0ngADcaYhNR0-YT{|z&&LnO zxTwirp%f-=_xLnbyyQi?U`Q?*N|#J#Kk=E!Nl4Ohh{fTX%M5#F61Mp#9E6$kes6k; zccM(kA(5w$q^B_1jycLP82mZt20`soq~^$_NSLydrgPSSk3lzF3j_vdv?H0~{ohkU zDOQ1D7?qn=(KoN+Wti5)~?cLq{YxO}$Bv=C@|02S-14 zi}wnYYO#q*5?@A-Das@kfGV>tLnLV%v~-xO``q7oS3B@Hvo1kah2N&HK@z#4tX$lf z7-}OD(-cYj$Rp=GYsauGIC1qQ!iqB7DCWsN3wZ+RpfRuoKyE(Q1!eXFx5Ad1`zvOV zg)(Hnj@|)(xkUQ7 zVvrg8^1ZFDuF9UKtoce+_KwQ@b452iv_ut@wU3e`ns;JGq#t=er_=GD!|Rqyki*~E zTKU*s%QZd5Alvmaolv68IzR@mkyic};C?i}c2)%63U1XNcG*tSw5F3^2*YsNl=dRjjDW#-XAF;R?M&9g-rIp!hs#QVNP= z6nQT%T6i9+r#Mj_I39o!yMF#nwkW*}PPR!|-AgOI9=`@&G#T~_|KYMjOg4MS>T{Ai z4`}J>8;|Zddk}}7A4JmKL~0i$qVjbp@+J*`d?dy*?|NJpTw|GIP}yu$S0R?1c1`ZE zf^B4%zk<4KVV%Fa$$CoFSj-!%m?zn5h@TXabV>{EBW)`m(h#Y1Mhjr*-HOA*WzV}K zo2oRTh04u3oz6#N*g1HsP-NDriFM*|N&0m^Vy|#E~I?{NXA0ecWW7F#Gp^ zwx0N~-iEk4Tt5$E&~wgy9QIMZLo6L3rLgN|ngJIxd3i@A6(D)|x`?tJRM?BM1&69y zEb0oU@(YwSY`x&lnlI5FKJBUK2}8BXOV4mft^{;&)U~OD1;jL zH4!aSVDkLOU-93pi?ZhgW%?Kve8AaKbU$l+;DjvcdG&40=aGqN?BQbO-^DUJ1$;&* zq!{FWkH8X8WI+IqH^p_&>4JlCwYU3mz&(Rv;Lb`OJfJ2T5jirRaH1I3G=5r%Zk>%=(x$+N_U9QD);1XWw(#=Mvv@ zop&?EC!*w?Xl(DEgNq?Q|JfghTm1Qn|EXD9FG;Z!aVjX)HPSk!DxD&r2q}jl1Pc$@ z6?MmF4h1l7F*1u^5SF6BD#b|G^!#Q~ zqEZAh#mHzHQc+i3&J}p{$TTWKK8A-nEh1b2c+_}f81d7&z?G6E5>JkRxij^m zPCBi0j3u2)YQ>b~wa^-S$hn41y{B?luB`W^upHtd-QY+Z{30)Dlr4Pelc1|7F4Bm^ z86!HQNTt)H%qk=$4pn>6@kh&JFNJVs<<0TinY-+ z^3u##5>Wywl^o4-q^O5WewpSt7fMa3syL`rpjclKN+kj1VeJH@h3s=gT>=XAzqq5} zIvQo@QTtcG7YHSyCeNEESvM`Jf64RudGrQO{f&nQ{}=k?5of@j3JfYF9@_BGcP0yUnzdszsI2C@?vq{JBX+f*?coqV3KLcEe= zh~(9scE=fx1?ng&KwdYqwjspuP{lE=XpIWK!Zg|}E6G{I-Js%0k7<&7v~vod$ksoo zR($kGH)( z9`dq5~FJ;PdF>D{+wVYxt zQL0mp!Cn%WK1_hV6IRa|2r|}k>ZuDEYe5+}GXjBs^QH8lBP&ASHAl4sZp1M>O=nnU zc)(-%$fK<1k?co9?!SXySg0uM&FCyPUSwrRY?)iwXkXMOAMo*p`)*PBAf42e^^jMP z9F2irjSCljh$)ynyUR^J{TLq7I6s|m>noBheI_-OnJ;#uL&%A?3SBb-kOeKob6;am0Yn~t0MWt@F zi=l3TyU~O43UhyL!;l8GSanJU~MGVFkcWn7b-83b)B9)s_`YXGPTFe9-KF zlQ$QJmCfD4DP1iygftT8PmMKo`RHXb$Hgp+X`MXQ0fRIy$K$k+4(6)HsF5KdQh=t@ zk7Kxu%rzL(Ailb$ZiIDjAU1p*IpBW*u}o6Qujvj#$jtsf-Z0hV>_2?FGrCT0w8jN0 zMJt-@p0j-O647zP^W*P&N|7(A$-h8LQkmOIG(-@VCF_2`Iine6b+h* z@)}2h;t1&rC5>ftq%A7|!(JlKvh_4PJGPLA^a62HiG)GUH_{ zzXWW-OrfcX=dyI#X_JA8=i#>|*yBO;i^EQCbq3A8$E?qNe(GjgoJ?J4s5|vsIPnkt z8ti%ey&e@F@;iSw^^XNoH3Lk)rD@U%adk@Odc$B-4N>dud`ALXf&r`LuB!!+7k^6BtdCOUep=6(dhh_2cpR z3L5(=^Kirisu!E3P6g7Hhnvf39*@1f$J|-UKdG{e59Y(fg`SVTrbZvmjZliGX~iRe zzP|Vq3!?k?|E@3n)TxqNo>S69n7KZ&0zBHtlqw~eVxzq}lBu=!1f{_?)laAtIwYwR zOX_)rnu7MGpf8!JH-Y4T^ZB?94vWkoilZqzxmbA*p`^3dUdJ_Co1z!9ZW#si5fY^7 zfPbC%17+5VA$MHuci`an>&Nkn6R4)h<$1*hQI(b(XnBouPu_ zd1QMaEf1{}BamW$40-6a!HA+f2D8pHeY;+*r~ns%6H;Zfe;JRH;pq+x^(v#cLa(=` zcYXD{p4#7cA1AY)TDkhp$?mzm*~?}>+;ttJW%OK9+;EXM>5$7ScJVFd?xI`}v_47% zd=@*dsb>=T*FNW<#LVp|M@bXTI-0V>Wy1<3u1=CjUKZZNe)lXu) z&{*;#0!QLPu;uZ>2;>|!yzt5jVSBttY`Vs!_~eozB&`#HJai)|;oX^GxR~*gX1$ES z>@RJ`C%N&r(|5hC6vo?e_~kbk*1?d+^!w=VSn>Dc^tX;<*3Lh)Aolm;XCf<%WW{Nd zE~r)!gGHKuYO_v^CZiIT_i@-qlKW&+1xRJ&{`}#(FxO3-F&a+#^L*hGNg0VuEvRAx z4ym=LjsHN-iF8~OLoUz4ydR~J8S{)1ZOxIY7>w*O=+sBbiHS;o-pA?0uj_r>hz-u> z(%@{}*TV+A`&$S5C&r4#X8n5m8LxgVt+W+TmA%Y=JLd%ze1U@36R3(9P43rqb6+kg z=oW)OtrpyE%>LmnsLaJG+hUkMgwkB+!gzKJZ@=ZQB@o{0&jU{Izi`4+{M#Qgd8nYA6$+t z7t;NIQ4o&K1`FmXZ{B;V8x3$h z=6)8^)FWzls7KuSt67&f&VusxSFU%TKNp$%z4O-}E#}dTfj{Jm?j840kuLuAxeaYu z0Vztd7X}&i4;6w$Es;#8xU)^|MKa}o%vk|3qCP>W|2IM{JtCzY0@;xppG%WTsv;~+ zo`ABsskf6R-673|Ih3YEl7f`m&r}Jldf(ZWNe7S0)I4u%;s`c=GCe(5Du3VF)OV5c z)zW!KFVh1kXW^Qfof=*Td5`-BsL+GKecI#xBh~)}_iy?vE%Mdv`V9y2Z=C3V`9ee8 zUs~oF4*B^XG2r8d@m^o|hTLGhjiM}o;`)rWFt~5cVk4db7T;_DGM@sP|v=oPn4h;FR+RC@2F>nHxm_ zOWNR(?fix53T&f?j={ege^Fq6^uCxb3D7}rDuvOTe)#_lBT=0F~V6 zq}qAz!TxmVD~=lV35gKF;V7n%VXX757pW{H+{Zd)Q0W3U+4cF=zA~u=iyuFqBLHPU zn!h_CCjBj<^7UZ7dXP~MtLy7o6x05<(}k(uDYIXu3Z9LQ5SB6pqa$=b`JHI!e|LRK zbi55sy`sq4U-hor{Qlq|?RpLV+;hi1zpIu=U0l|vjOi?5N^!S^H(HEwpqUVDj`4Vh zQMJG>$~_uwKEt+h=ZSXaGJ6ZPsnG7V&<>%Y~Kj9t&?)A&Tm$G7q-1?3W zgQmR_W&Qv|PG|1jQ(wMJJhH#Af00mS*?te!=x3$!9K3#lNUAhCzJNsEB-jX zb}&aag(wZM=7fN$`RjC*O|j%2l=UBwsX6^|7g@=5e;U&%J3L-p z-~JOc^m+D7QKIrW0!koO34>I|o$Vw@KL~x9GW{eZd78PU(_ua6gVf};sy1~j3CuA* z(?!}<$t@53PaWXzvAA*L++$I8tJ&l?PMl4C#gO!lmwxYg{Izy&{D%HsJNKH7wtP^j zkC9lI{Y@juHjipEwXzv%f3nEc%H-%7R+-5kDTS1p0|czxqKI+8(fEk&XA1XmlZ9~@mGSDN~pAgGa_a;bQj zAq%|%}pKxg}|_N5U8|Z(n);6;Z{E!oBH8aqov`6^P;aWe?Q)Q2iC1Va{l3o zCMWr>AKWdUtVEQ?RI#@+qNuG93$zwG$$h zt&OGxD&;~cA2mhpcX_*B2lBds1I2y?0ODC=qpQc)~5XG&qvneRr5<4tj*%~R#6 z3x&c)tmS*BnqWpI2<=>Nz^5v1toNRMbX&yA+EsOaDe}o1Jhe9Gd0`|!g9>E zihCGUDFTznQ%+RovcftA3C+gH)Y?p*FHIij!Rg8HZ5PYOe?5tW$+uA3@;#CHPtKnrdN-^!@bN0h;HNEFn<*jDu{?dNO|HvuuFV4F#7&c%P zzxfZ0_wSp}OhWDb!P4jwYUu)Xqb}QXdTf!VH!Nq{rMz+4Mj-^0PYcWCknN^QQINy) zknwAu#5b3JfB)ln^9fIh`;?$R4O;`Ua#jwe%Z!dr9bpF5dN#~+`qnG zH$AQ_f5Lk|!M#S`r321O_Tm;aMrYflh0S@RIZ_U0QOXD?g5wN9ocW-YKvB8?<+vE> z5+i$oD45(ea*H&i+F~F@LUbx3r5R}i0oh+SKEsu(6*0V*>-ocImyS_i3r<7SzH479 zgz@GxPVs!E3u8R(QxWDI~Km4`N`t`*NqYD{~`PRq2^9|k#4>fWR<&VE*czr+i z=RsHct~+_6==HEA>*_JmbQ~7>0wbEDbR72UudukQ?HShg8pq(1YAjH}W4Fm$k-PwF ze|ldtO)7iosS5dpNNB-J1X)as-Y(USg5|>FRTCN38@R_lX|UWk`y`C&jf<=av?3x> z4dXZ|YUI5+Ap6`6qsH#Nok^RY{dPAuUQmi;L{!$tJCmBjzVBxVOuS?2Zyn^O0huxQ z<&FB*9S-c~e-Oi1SvTLZ;R+r5sqq-kf7#+&w>0_PGyl$D?grLn`yYBI;1 z`KvGK+Y|O2f!FW(<;z#!GbA|ye@SC>?2*MaXB#0T(MGci0>|_dnLY=CRBDQ69*ib) z&}wqih5wj-5v%uy3-dk1;D`OL8^evG_UjUGewr0RGI{YJtuYjG#&pd`9~lA_B)u2) zYdkZaDH3H1qJtJmWj^Q4+;7-_P9MkT_xt^K9WYx_nRycBzwS%2?Zk-@f22{WD%53};ye~0koNv6Ew`JrZ+g=O7hb3ao+Ysp^Wy#mkRv9|ix!OaW* z8$*71jy#s|-ctc7lrA_^w?))LwQ1IxlPFbz62>dH;@W+63y0y(`BX5Z%W?e? zr`9D<4x7fp=LbQN(O^F;u99jI91g==9+K_?*tr z(9-{lU-Ug|_q^uEPpgd2>6X_REH-(OqDR?Dg0$(`lLAS)_nLeKiTym()~d&th_4yS zJdpL?W1$-*)nTS=O0n}EQpcU5%;1$v2JIN3#x@#jk4>wwe`Dt6l0=u22N48^{PQun zuKeiYy|#Ls_If$Oqfw14<=TLI+zJLho*sVbWs@Ht^sRq2&JVxqM&du}OSD|p`FO^1 ziZqXB6jB!TKFt84i_f&(N!JI_aF;Y`f{+NNYWfw8&3DnIf}$z{)qIuKQ?T>@8z-g zLfr1rdqk^w$$hVJR!q}}na%=CoFyym5klM#D|+V18ZT(tMOYFHG@45tVV!EZE7k=D z_W-6&Y;ll+(Y=jHROMK5&UEDk1+2GJ{aIMbr8=}le_>)kF`WHJh*^))G}#2EMHvp` zX&fi#)eCq~Am;I%G(_i(E zLH7$Z^~ut9$=H6FMp@)VY3xZJgwjURqP?eDrs8>~vKY(9hc9jWRAql=T|PktM~8+^ zF|yC}aIv9L32V}v6+ls3Q?Iawez(!{o8j?Pe`yaXEADxHloJd<06mUe?Y+RZ2B}a8 zE(QFt%M$C#gjVFo*O#xte|U65abyJ%)CNxUG&@%9!Y!fpqqas*R*Cb<|YF zf8I)uGntyYqUm8~qE$yh*i1;ubIdoS$zQ|jGRdIA6s3q((JR%t>mFdokY(y~2xIXn zPau)3IgKs|6h#OcIwwy8>xlS=7bLXgUYurux}1W8(&CVv1tA(_K4*=!VwfI1$QWyQ z*2ia40+yT`9FGidXw&JfzsJ-#{sp(?e-NMU*p2%7$gg_YH?Qxt(!pEiV3rIuINoNI zb@o3noX@1Peb1FEBrsAmXoQh=#nW`tQtz7w4A`yihr@j z-m@s%dn0?mhbTv=(}M?Mvpjz989m^f2h4_(G+(uky8=tE_dRNQtIOWMtNGsP>W*_m zjrOkb*ElSB`~Cfg9)$jk1HY3Ke~{_HMOn#4%9E;aRsKa}isGtTG9?JscEXSn z>!$ad#~HQSSpgnI8c&=2)oW|zjV4pC$z0?03Bj4X35_taOl}?`y*fF~0>_FkUG2== zc<@~jY4XO4$s7BAu{d%}euRGbmUx9;|H}J*_g$|s?wNv~_1c5g$zofbUuU7F}Xl8c9Aj@d>pNh5>+a~QP2A*xIcj@&C|qW|>yrO}CYM&E~`&%0%g2K@Zpi;}JA-P2=w%5Hfz zjm6n7#j@|8EJW%;bEYazP2SAD`a^3y{1ygrl-+zfl^iMF>wO+n+#57Ivp4&*NJgU` zUozOro32u`9$#)XRsK^?E&ZSRlbMTtPX6i*F&HvvTetZ2i%0i(f1TcSfjbs;-@R9Z zR=y~rT(V5BC{huPIHOVi7C;uFL=-brxj|wbS&UyywomL6wS%Yh3T}%-7nSGW&%{mB}kEbU4A5{H!pFP>`GTRTt|oKV!SmA6u+(M znD6Lg9lDYBhyE)WAQ|c%q`8Z&*oC*()qvHWS6rq&_VqZ|f9DsO;d4wu)t}qa811U( zbHC$}_m@sW@(KDK&-;4o+LD&t1a6u@Q8k+aX0tAmRB}v!;h5y(m}a(@WHV2)r+Jfh zy8`0b;i;gSA3`^iK0;!h9ODJaUseaYnhpmU(@`tXGq_{Z2QusXK=Lo+WBz+yOs?~* zNAlZ$Ee^w)e;w9zzxIci%l!B{knHQohA+1_KkU7)WkrAZ=kMNXi~s6x9`fCMf2fJa#)~dobnvf<<}*aoF-AON z(s6q%EjXPhY5XRNOU`FiG~b$j>@kdqR*um+y>aG;)yHc2SotPr#to0L8ogM9-^BPJ zCK$r~N@XA0)aGvalz_5cXk@K~OTA>!hce?YdeZI~NUsl_@KA4fMaKUZrN4FZLmy|< zz4!H}e?HFRUwg{nm1F7!L!Vyc(vb^U&LXivKtEWVw@5OW>nvvbXU>}4K_qm=wEG3d zLN5)cuUuNm52EzKE3yQ1I;EDnqKZBkj^Ih()6e|d19?BQ;hx9;e|Vk`eco~SN8i(mDN6U4u40-kR*`dF zXrANbvc-~mxPWgjBr`qvkw`?6?p|>gLnVcQ8zVy%6)tX7=qGspL znmi(K5f~#sXq?(S4;#->A($ws@kI*Sq#SEUFtB`T%z2SL!iaKQ$WPH+7?Rn7>hvXy zf4GNbMkzIU7pzU*uYtbwwo_C!N`iRR;@5K zbmNmvX!Fm0&dteUGIe64dwr6QMi-1tf8Q4A`_NZ-xIcqO{k{EFA8qp=kCnN%CjHej z|E(YV)c6yz1f`bq(xF7AzDRu&OYkp!pyx!GJWXAByWZJOf8V9E^b75S$|4lKankPI z)sHq{_|EoJ*bQ#R>rI~r7<@~G;mbXLMCE7%o&_5R!d^erFzAOp276rJYG5rtfAIFU z4_fi>_0wBj=evKk%+X_!E#0EFLdAUsbwHHqWvOsj%8eW4DO!Zdt)7~&*t)VvT}=)_ zW$Iz&ehvADdwf$eHrfq}IDLCh5o!!K=HKTfr6_U`NMZIkWsQiD)*L~WfuzKPyx?H`gPYUMFPD3gn?BTpDRTx; zwKP4ME+~&hmyD`xP8#jOQberho`Z)FqFQ|xDFb;?DXIwun;_60Tga-%e+nfo?*~P` zpKm;L5k;_B)(9>xXztPIcyJ;2K+D})Su+gO$K%EF&FE@8nrq?>?O&=ZOUw#`$C9-S%zo@3iP$kLc@!|APs8poBROW{psVAlvBRTetY-FE_P*m96kcbwKj+)GBH`ygB}79ZPdL zI%aGgRa~^$%8M=wf9O34se^m5nnJG|?k)x&sKE#Rm;by!N6=tc`ODKfoX7C@Sp~+E z-k$-8RG&xvhh9Gen?rvN9$$^3t;Dj|=%;-h9&8syV#8y3VOx!{6vjV;KxUZl?WwIR z|EDL!)ELtlleveaG~>B?jFkodq?(wqYzkJf%)bR)J+b|9* zrrb}r;R%YN_Li3v6eYyE&`|MU@HE#rRMC`ZftX)Ui4o$V@-b>YiOCUL)6}R;FKmdk zv+_(nC8dnyf7@}b(mgg_1}Op*@d(M<{Ecwrxa;M5i!}9dtb82395bDLx{$gqws>|j z#YYNH^M_A}D|7CSKlHCrbn~mWtf9RR<_W9?jX1@z7VxGw?w9p}3eo4AVsFmvxtMbe zJ@a}WzF)t*^#bK$12tUrIsz2pJXA=3R=UW^WF#?qKUdtc>5=VqLd3M^2R;@GNr zogaJ?5)^g42s~^sQ*qzNfydPwqhQup?z2d899`Jj<$M|`7nI1RYF)0S++wLxCM(yB z&q5;$f3_(y=MVOM9)FGRkk^QA&;|QgIdJ4dk3V<5*li=M($WKqk`5%1G9bz_KYH;I z6m^Ke(G)br+72PgUqA{RMB;;jJtOL}l2W|U_9BV0XSm6qjh?wNXL4>*qh|^ex9~Jw z8*cJT&;0iI@bWu%o7{Zv=DVFXNUs>AM=#2}f6%$Cs5v83QsE-oMoqSZGBrN2DKF$^ zTF`axm5S7IT3i6kxqvE1Z_=-fU*D6aKfg*%-{mhI|L%=Bg!l6pyPF3F8(-GC_E4Up zINdRoKekOSp+BEQXI`m1R#-TU43D@R7^**MRFR%5pzPB%`oT0sj%F)3U5uWXnS7OM zf7}1h{_vYuayW-#&<)ULEDmsX5p6kEE4rYD1kEU_H*m0X9;t{Hx^N>prCGc@ zCEku&jV|V#Tzu*CgL(RSP}LTI2UOan&dG-jCA_Ru_&G zHRs+WM63RssToQ+6zeaKoKqyzIf*1Ue>yqHoYM$Q&pJU`7hH?EiBq`9rU>y5imC|H z)EsPM>YK)gfjZ@WnIgUU-gxK-uzobbf#2NE8~t5NilGV*mXBDGp={wqp!v%jhC0z( zk)8gFrfu+Aj~b0$O&)?R^Z7?t_T$#}HTAB~F^1madoKM?FXyd(cf)ckJjwq{f6cnt z{@?1<8F)Tyw9#4BaL&VTe6;Z|z2`h=1X+oGo$(TWSp>t5Q=BItdw<0YYiiGceSy5p zu<^R8SpX-BA(ouU8leILiYPPZC3ua4=0+omsPSRAQ02q-QXYa$jb*>?P5;?%T*~k3 z?VsngOmbEGsI#3p(H@aGCDQR#e-n5}Is(c(a8bTuS`kmxOC;rWssv-IdL7O=tk4U1 z1S)Du%*N*d_r9F|e8O*E*G>_r5rgIFO;#>yGW25jVN4A-V-zlV@Ba`SJxMn+bt&5W zMk@Gq?%3lEvz9;S<7_L^h&j$GbFNX$l=i`TJ`>fl$*Amw11P5_kHexpKoHaFCyv_)Tf(=!c z;mEs}P5*D+exK)7$mKQR%5PL84p6hkWLhdC1#Dc!KRrw{8u6S* zRfHRunIec=4Et%Jz1W;1n(6-!Z5(V6XuLhL|r~+#<|1W0@FdYXA4GmLXgd zhdH~o)HxH|YIW<<$yTA159Tb?M&}xCX|Ipw669hkJ&jnCRbL z23`e>X-3>8e{rg|>4=^+$!@HcYwN@f3mc_Mx1&af!S~K|{a|lByGkQbp6Bd}uD!kB zx~Ib{ySUgUiE?wDbMzs__-GXhyn*_P=WSt9L_Zu|;>`a1?fW{5nW;}r83)Fe^HOQ& zZ8mj#hd7+}VI1nnwF$gmCG8^D4&Ub`bt2Dmw-{$yf5Wj&#$AKKXks`HZOk}fQ!rU; z!10VzxnlC$OcVt)HrRJ{fhc?LGM`=ZQJYXEXFemOjeg zy-35*fo17y!#zqIJohYW6~Hsjyec#A*CE=Be-p0h@SKlXBupK;BjQ9R-P#6e*%k$S z;c#sVqC*BY(WkB@_n#b$Ut!`IXhX-iQsC5S@^z3y`~_zJR{Vue<11|0dHR+33Qx~j zeiUCp_ARbQ&Fep%#21fjoo+NeJeR&1TgLV=cPN=Z%QfpGpW6D-nfDVvyVE!dNt_*d ze;!`OODLf|+I)9jVRm|6VW&HX^S^C7Id{ug9;(&YNdWwFZgi@_gw8 z&#d!#mx4PyUzhJYe=E9=(<0aBy{EHS;K{i}e|HuUV?n;3-LT)! zZq(aeM#l3>XwVON*`!Lf-Y(X_afzYK-&#{OJ4}+cvTX4z1J=JEvz%Z z`J7Bm!*FcEbkl)}wr%k#YdVfgmMyzZ5T}(V{3e8+5wjp~pPm2c&4Q?ex==(rk~~ z;SJ-F;5jCHc^rQd=krP02+vv{&iC!&qa}OsPU6FgXZ_Zqhq_Xxtl5W=K_atTnQ9d0 zA~2gi>v(pctL1>BewfANoY0eHfAD#|A^j&E+UOSRz|H#8+ngdM!BiVJBD2$Q=2fy~ z8s!e{!&>?{Hl}8UIp%W{M&xiV$iQy~n9Vk?>RT{t9b7XuCWrLV1mCTnI3rY#eo_KW z+MX9FF!IDJ%f@tGqfu0&({&B9rrwx|z5xo}OS`Me{up6@jO9L3D|!Ese`7uIx;x+I z_-JqM2!G`I@hyblX>+lX@)9hT2-D)QW7jQkKg`WX05_PNjFcmec zV>)!5$M?DM!5=ZIf|qa5e<^G&z8->aPQc++gWws$qTBTQ%z!%I(^8_yJvk3-w?)10 zWyh|N7Py8}OxEEgu0WZkstQhGnHmTCFVYNeMwpcHp8~Z77k^fR21m=}Ni$Ot1EF`F8Vt{9oCk zp8&8Vp2i423SW_DO1N3XSW^@f+cSi>pDUM*(1l(6;xWdLe`s=y9|6R{^R1|3ZV?bG zb?-3l;u$9*=X02*)N##F?j_^ehSP-!nEG=rt4AJ5_KjFocSKdjE^qfK9*DF1$~^7~ z5e~D&Q=p+Ae?Cp$#&{l#LmTH`JYs7Y&Zz^|5a*oDsQyO4V-096NJX#(v0!?{(B!k@Gh*$$64dRwaxBK1x8 z?Z(I{1bRkkuA_$nE3ci>)==1J)V5l+7TcPgd8;W8e_Z<8c5KwwUAA@CMt9xa5^roQ zo!(|TvJBOHDr~1txyroUbTKLS?Sa01S(kr$gNJGORLpY^x53_wxO!?mCOP-+&3 z;4|E>N;-|dr{zJ$P{0r0;atL$pU`{bBoEZVb9Xabw+9T2$AY~c~yS`p(1EYZKK$w43=f9Aj%4_mSbdiWg)hDxH(QBV4~pJCv~ z?1&8wtfB5e`SqcM_qtqUbHE0@C*?YXV?L1tEy6g_&q!oO7%yK2v5Ihg&3<^m!v#>s z#-b*=@|p7cAOIfzQ*mH`&uy;HF}{8`!r%vh4>M2|37;~6#|eHa3dTU1C^vM-6VjJ) ze}78n_})83el=_#I>+&!W1|F--of*WvcH1iLpdS?;Ms>#^Fe^mcKvw%a{%Wy*do%- z)@%BTkIjhsyoLiH(EzqGR@2E3-+c5}2D&I5@pa36qmPB7FgGKcKfH11q2FY~;$$n8e`f-sxwOYlCWQDo4F(EdR9oak=d z@RVLe=3UPmf?n+|+Pw^)nlT&t3(oO$V2_p~$CwXI3pCf@Q=w1$y*Ax~f0NN$y}IG` z+EL9Qo;hSYelEvow3yG9`E+m%F5B5(GA`i7>6HQbamf!}csZs~eSthc^+?Ej-9^7# z09qY_?Wm0ijH)MqSTZjyM>?~I?E*|>ev-4iA$OqMh;&w;ZNLR@vFP{9MqdC6dfoX< z0C9TdZok(ib3yvLsZ{&ce-I3d62r8BReQ$Vw4mZ-h$mn)a{@3;#T=S#w^+rSn_K`#+H$MQhr(fXN9oSw-vUsNb!P(HpJtmzl-b<(X${LGI$%_Xz^V(w z^9&ez-DwU93=Yo+=&jM20im70v|K4|PH6n2wQP&Oxswk^-f5%rNO5=#O>(q>C z1hh`QAy98Qh@jVqphqJTuR{TIVJ7u#IBG*9Kzy_d*YG2!O#1x*h^G>^t7px!%&N}u z^G@bZb11*rv(qmjuGj)D{_s|tM$i`6&bk0+;Zb7vejPpM(8FH#8IVn68^jY7hFG^= z8~c@b{q8b?*qwQ9e`{Zc`sX~`Q5E?>!slQMpjX@VX^XXS^a=%N(*8}Q_NL=Hx6*Zb zlnu?;=+}G}3MdMJ%ltz!_gp6w1O&>*NZYznuSuQ3>$pyPLvS4Br_@8!0%xZ`EsJ7g z@9I*3F*7aBpxmgLYB^$Votx$hH!WzHiaE1}(6(6U_AJKwf2`H7T2Sw|>zXx$j>W=O zkAl|)u4i~pf?0SJ*&F4tsa>RCtwcvT`GX}q&~!5 zr)M#Ox`OTJtbf)_b3OBh;M9CJ6JSh_?oofiQE8O$Fc-;l)c=y+Dq#J`h(njU6Vpha zqun{+`Be6)e_(|7dZ>qWE@T9sI`*wD0KN-c$8{6^HQEsf2WYF?@&qwwj!z5$8roFd zls05?&l-7UJo||;k#QQlmhI}AN-ct3d20XK~?Pat@D9A_%hjnYQ8kK3h zg{d1@L+D!;)06bI5RQ|i$MKf5WctXmSUE{ogm6YOf4$SVIUcYIFR8hHTRUEIrg0 zYI7?9GMb91*`0?1gV$QntQ8o*yzBNXXq$@3TSI7Id+2CWOG0t=4tnKGnQ#Jxfithw zJd3%Me{Lc_7DID^yqFLUaIBx{$6IIv)^BE6jG7ed=Lq50GQHZ4w}i^{rDZWANyql} zWO|LpTN2Ck7t3O1l8)^=m+6)t@umgDqiSTu5%(EP4WmQ0w(~=iNl5OYcrEV(Vw9_ahazLn^s(}m@3XKyqOa)8(1x( ziXE61eqDs)xZ3JjHZ#HH<$}$s+#U4Xf2tfe=^V%Adc{;m_yvTrXF*wx-`8{u$L4rH zk7?;%*_jFG>Rzwq^ekYG%7*gZz%i{C!SYJmw{0%x9Ozxu;X|_kHAfjl;P)ar-<6|> z`rFs9;;jm^q#TWi%n{pJAizbu#(4$3rZa^0r^!5r%*_Jm_!s9+<=CF~PRcVye=I6m z7Jw1J6JiV69cg>q+V|&MgnCmtrV+57IX-R}d9zjl%T}K@s2Vl>78-k34)rn%N| z+_#4Ca9Zvc-K4o)-xC=Ps2^Nwe?$AAgA4>>d`s&2ATpG?=aR79>vG;S<{QxgVj%u8 zc1br4ol83NGk!CZ=E|~d0(f#V({?#S`aKH8UOT_Ffw_pUP8>%KU!2oAby@I5pnC&zMMn0bg#9+doJgXSM3MuVlG-s z9ABVlzVRWRh;RAzisX&sPIq~zUmR~iUk6seB4|0-UM9{-8nKv4U9_N#bHav|q+{8w z03yuWZlk@pyj76%R-9)Of1~AetN_%SuSRV@B^_l0b6Ekhc;CUkZm&bZ6p5Rywv9Ml z#(DH!zBMEb%YqutA38X%VMQT0K+6}QFaWPfa?OO`Z1>#6>)mrZp^x#}N$BOZo6yT^ zFQJ#$BO&B*eH&$amSxL#46J{z9ly!243cgw1fH!;=!TLG3`zGyf8#f-dqSrm9e^w8 zCYFWY>D|%AaST8Vy!I$w8_9J$j=|YI_wjo7JfS!7x}VS|*9m=cozN%OF_r_Mg?3!# zSTFqUF_z;+mIDYwx`B`NVmmsK)ZdkKBB{S2>4r)DNkXS3^-t(tQHPpT9B`uoIDaeF z3Is6ksEv9T1%ff8e~%Lz#XOtH!G5aPI3IqeALE?3^G=`H(USwchx|*m_<$W_soP2* zn8X21mZ`mo9Mqf2JfSZTinJ4)Q+D2I3zo&mfi}Xjywg6*vTMgS^Ih`xH<2UjitlRh zy;dLu={Zg41v3L{NQvaVr!`IFeW1lMciw54qLM6Rnn2^9 zTYwPJJXv1je_+)Dc~l)*7Mp`NZQ=Oo8-5P=t^2IKtwGszyS_$&nmubrYs`nDJtDqg zcpL7cF*C=U&bqsNz*>D=!|4s(F(r5oMfhfDPvv%v3kJR*5<5w+&d`qvo{O_2f4w@#xUzjHYYz|TXC?FResdUc zT%E>q9RN_1>$dxQBG*3;=@gg1qr<(9i>}Ino?%2E^KfqfGl_e+FQ28?VFhG<;MV{h z&#@96%0*mM&q2k6fkReMmLHqt82rSG-~~%!bBHi!!+{X@32a<<#&vdl=_|YVS*I5$ zkI)60e}iFzC+}St;=VxH#U)*i@xtXj)qvh4m*Kkokz9^)Eu{AxBG;cC{N8h!K_AEh z$_h*zOlQ!=dxOJML|mxMQQiV7BN%b=f{Xrrhs0o*_;&>r7luHlSe_OquTa)-oxd#0 z$+3Jc?l;F+ejCfFuXpA6q8zG#%dvbT7&}B@f4&j_$N77*{W*Dq@*IXy(jK-08*H}* zo|~^YSdS&rj~$|5eFlL2qrJEHn8Oz9)xf299nX>cGrdUqTb5gvtG_Fk8(}}q@%aYI zHlBwQ_`JQlw|K(82_%d4A4jmk{`6$IISe@V5AKtaS1eaQg73J`^8a3+u9Zt*J5I43 zf4xik&AZ;{8lKA$xVtfqlKvcExe~p5+x4G{fBUlCyrds1^5c>XZ3_cT+fVWxJsq3P*aK*y%7LOQ|1GqW68QXL$}I)SGgI`DQyi zetR!pMc&VMa(GgoqWkqedS023x5;`CWhCpS}8{bhl+lrAwOJqVrI?iQ^8HyO0rXr8WSHI3%~^zI=qI0$d3_a@><$ zDX{8e++D0-J5%ZJI>UY7D(>Ub5$FrUT#6Ulc<`Vw`aY{b!krfBjIAhXfo_117Nj>cBK?2M7Dd<)njp-*re2_n&(} z`e3Y~7m*Pd3OsT0EC+2otKir$<5Y*b2APO4sB_Tp8w2t)nfee5m38z!)Qbdt(7{;~ z8sWF!6{2(2#Fk+WYlG!M7_kntoXWVDd$D!Mw)i+RT+So>SUAacH-iqufA-sGmtNHy z&MtjkC;N0qF^!gLgu@=&(b>5=blpL@t9VY|_Ieq=UuLvdo>R9xH(YhGLfY;Vc^mY$ zuR~sU2IaZ#R_ZT1o^MWPULN)qoAFa?kvuaMdnWx+$iqVs^yt6;`_FIB>tHzu|NTc8 zY$jp+GWp}r|9w9ngp=*Bf9LfoeEgQVS2L64x7wgHdHP@Wz+e5<@s|_vtv-pqwbskQ z>Mt<~{O2DFe>J(APJBKMA2avrk8rXL@BGzxvI-xSd}e`vi}2also5KLAf z#hX2=vfBoO|3^*DGAC&4ddiiP+?xzFV9)yyAf)XPCQT`qo&7XHg)Fd2pa&b(~@ zI{N?5o!m#|$G&3H zCXcIi6bxQ=Qtp=Px4Wgky4zKI_xR|^3G2W1HzEu+;d>eUmF&*bqF^5mpQT#dMM+B( z6y+q@50hjO#*4|L9}X7&=!76xCIruerN4+D4_b9XC7!QPf3*iI(MDA}Aq~STq@DO> z5(R!ZIcNs<@(NXbakT*9^vA~q$Gm>{LBUh_)0?$*Nd@;m&5hf ztMVbQ!BPavzfQ_C{OhE{hgXDv-9D+q=xVFd!NTXk;AMK&B+tQMej)=OT*+?)gL$|) z?Lw^kMgIN6e~)>5SbyYsiwcQRa3Wfaf@`We3jCMwG~@GMzoFs#Dxc*}9(~5)(+d0H z5x=Bxe)!j^9e;)W2TJd+KD6>5AKSv~!WZFP`0D`ji@}SSJnk=koMBTq7%nD16CkI(*gGCpG%yklBI8myyJinH|5WU=^hiMOwle<1WngT)={^y+cA4#V~Gl+oYf zWw1I$@%@0ITyngU_msg^2tFdde@P(wd>~5jUO8`9_1AI`aDR0d$P(SDN25_fBCfSW z(qI1DL=Jj5iX{Bim+kd7`SG59U_V@Me@U7@SnS}k0UX* z*J!=ie=HB9!xVY###2T4*U9sP22cGJ(tUybu{!LtEeG3OEtQMbT#C($*bINk`Akyp z#pLS)R7*-F=X}q-)-Z!;&E{%8Ygm>{4-uX1% z`6J#pwRRzmbK#itZLM8Q<6S)AUA(DDe@bc0OGnJJ_u7r7qomr7a>QS|&uBN5K9g>n znG^n{!WYZ0rkTEaVfy#Q?EZI)3^h%LdON(RX%L`Zfq+bY_bc2YL`xHLiUrxE~W7<{TA}5so0=iVuQq=mHc zw~$)8MMy49$SF{$?~QbekbIhuQ^eR;q?o@&NFhx~;Y7&qX;d*y$SHUzzok;8G$E%b zqUC?5xjsXa>?NA0_oe*r=vOA)f6g-}pq0zr)-PHbnrN43l2>!LFjv!ZTg?TC|41B3h(Fi>ydT}!=x__?VjAb- z@8C`$4H^nppy8{yQ%FUI!X+|D)>J-VcVZ}7SC|&}_mzR2qX^2t0M2vjtwh3o34Ka!r zh;cJ271OYzc!?die{e3PaW4H1Diu?Kqj(KCG)+-&c>yk_0!Q%@I1rz1d(SSV0Y~W) zIMiY(o68h)nbIvX)HE6DKLMvw8gP`Z0Ed!ON@}j8=5LXqrO7w}4lS>i@W1RWB64XW zPH-cqD#csOu9OBJrAzq86^aEVi~rpsqmU+}@J}GBlm;KAe+&4y8KO#QKvKE{lA8=q zNwvIF7*Q2P&1SS>CU=vVbOS$y5iMIPqLeQm*-%oCQz=(~M9U~eMah13J7~v4O_ida z!IfIdE816WaK|H?D&+)Bve`l|qbfP=CLvm?kP|e?7IOu)sN^$+o22AYrJMkYmd%zl zP05#Tt4lsre@gyz+NNgmicQ zc$8A5l+M5_Q&97n>}`tt=|L^!3dr2X{R=jKiY=K^DWB2Q!dIscne?NYnJd_$D5boX z&noIoQq)u_>KTG%a)n&(%h-~Bfb;SM=e=6YWOIsEe^hUiqNPeX1s64w)3TaU)NYcJ zOOW(e0j?_lYZ#)^37O4vAA2mq*n5fWibf&_0>ptml){XGnF2t~=-n|27HM zf60^e%V6Y(@ndF}Y2U8(!Z*1+_@XICx_8iS5eAcRG=Du)?@Pb_4?NFKeTa6vl86I;T|6F6 zp4Y+T5Q*Rrf?(>8(UsL-{Q51y)4}3-5CrQ;IxX$|#XhdXSqJ2ld-WPGsn5UCx$NB8 zu5ud&gVE%-N~PxRGv|B~Yfd<+xqsbI!*fo*SGUtLZSRtHdKrv=y~lm{cCt@78h=2O zEPv~NQ@%+Dv*Q(yB^K)7t0}$a{yG!+3BnE6+cQ}w6Zm8Q*k>6*16^Hr9)b6YHd*ex z0*CAET{3DvzI)dl@Om^@jV6opS{)C*I|}sLJaJD%qH{mG<9uSh2|w`-JWbh+`pR+p z4OV=yXOIZESG3Ymy<|$bemQ@BrM{;Hy?p3r-3(mjgH25i@^{T0-@mZ%xA_o8Z1Wt&ME);_Fd}opFchtr{85H#(#ZP zh*ZK0j+df87}oE0OwUG(L{0Dw;G6Q}Bir2P{d*Tz=aj`ot}C%!t~2`kD2&V*(~tV& z6ApjhhXelWLm)h3aKD)RT&+K3vxEF3?@_Ry_w3J@f8QWGGfIF0VdA`_<@9#1#PVhA z$TYsa>+*jdBx3yiU*Ep{=V3Y-j4tIBX2y$e-~Kc9U%&a||NaBLegF9TKM!O7H6c!D T{_`Mf@L&H2A1cEbSm`7H2?DvG delta 1206217 zcmV(!K;^%YyGq#BNq--U2mk;800062w7uPW+qkkQc;8PUaeH`(c3F|)UfI2xthCqzIlZCssNCX?DXz6-o5#3WY-b z;c0tk=ciWdY?@cCylN$_DwmT&w8nWMS{IAF$XeyNrKh4bR)3|`LS~(xTCG=_3NLgy zOLU$lMRD7j2q{#eMb^5wr3$9@T$QP)Dy;RQY{^o$M3(E8#t+g*vRsK&7q?jAtZdDa zOVLWFiJXWET1t$CYUz?13_gmB*85jMs}iZsOWDdLrJo$^zc@PR{Is*P_tVq%i?`3u zKD|3=P4%qU`+w=F`w>aD_fxC&bS89y?W;;?9=t#MZRgjZ$&krR@Zc&J*Yi^8pq0{? zH4m=yOiy_xuJTmu&|$cB(Wf1n)s>mfYmC#mprHtw)x7BEqyx|JeZZ) zqQFK~SyiRVC%N3aN>uA*j|;wcr>CmCZk^rE#eq_#iUeErF}q55BtU~?d{1|#NfimU zT)yl@!Sh7obOqazQORrFl#Hr`o&zP43{hj&nydUO(TF*s$N~m(AUlW|kzldzOoWaE zWAI+y|9{-T`&yD^G^bTE6Lpg_F}o0|H7k{9>1iTcrA$S@V%^GRr4yNoa@_hTlFL(} z2fCAFnfYji9|g(a)_oO}-}!5mYk}Cc*>^Qiw(lU<8j=h~)dOHiQbcmE!+Kg05LiDN zFC?LB#O~5kR=TBmR;G)Ykh+s9k!W#H2z-lzB7c{c0qZDHyvZxw&&QDtL*1zsb416= ze~#>g&y^_3Bnuc*LNAo;k4qJq?^L*S>rZ`K2z7fZ3X$qk?H5J#Kk#ekN)&vfsp0=% zOhpyzOL5&gTId9O|MsF1>Po1n;(K?^H%k0Q#qx1fb_fqS57KE~WH^9;H3d^FSmNA> z?0>k-M8(pMPA12=EI1F|ygL3p2*VV|j_>O|3PTm8Y{fdI=|$v51$Xtw;&vCR#f4Tv z#O>~i^&Q+>GO8TtOMbWN8$h+HPA;{Ws9fKMVN^D6+{}jNqn;m z)}nRzmewH-Xiiy&T<{Z8;Q7b zcFg(@pj_2Zz!|;E$5GW0bB8PV%-h|*0}eu}KNdQjMpb93#28ApTCs@rAN~CEPd~T* z=hZ?ys-}q&S?g-I^N&t%_ots*QOa76yS-m`=1NpT>ei>lP2P$Qt`f;wpUOomk$+k1 z!$J_fQ~AY0m#V_niaZsv60M_GXFvV?^P@*Uw_E=)8XdnKoxXZ;Fgp0{w}a%5MpN zw}S7ApATV6^N8`iyA_;P^wjRbrGG+7Dmu7(JTEh`ucI#GocDUYFkDOSndEL5Mr-Mh z*Lwv$)%3x@=b1yXQC)AN$rZ5<`^VF0HOXPyc@4B+?PgPAW9ErtR7IeReo zL`)8Du<5q~^ReeRy_^RQIRZG@%&N4?$> z_*gy&1Jm*_fI0Q)1p5+(*!$o(IS#^5oOg%#wiAS55Nywp&3Pqp^Xd_*3i#aRf8uJm z6t{p2T#4~$Tq^p-v{HVMWhS19ajC>nxsci=(Tm8|OBnogY36OSh<}{<&Dd9g+vDpQ zb>(fSv`&&^axai!3<<)U=9BDQXv0lMsy#}p?!SMn)MdbdD zlj8^L^>MTxdv?p4DkAP5d&_0?vB&M|pA_XqQs9~BX|Ms)5# zH6@$Jc&$0Fseky+XHWDlP0XF`t=N8j`u4bEC|5qdjWk4tZ?(-SpX3cxOH?nS6YK^Kr7Q%{fdx)2 z#a6pWGvuxZK{`bsZ*xt=xyFgRRsS#W+ZIzC5gjVTAjJmnvPiKwsUpRY|E`$N>*lgm zF@0VRd39b6S9Zqq3aM2qA4i0Q%cVK%xcRhGQtk3b{bCkP=6xfc=WHI&;M)q^Wa}U|Lf7CNi)KCna}n#JZwW$%3TNoW|^I8mvzRaI!MxK zB>5hKMmFP>wp6~$`pQCNuNQ`qK3C`6VMnWc7O^2$=RNm=Tp;L%nOy8zh_{6uXAtxq z0P2y`TGKoT)|j$fw!}K{ZB50NOAp5-p;6$9R)5%Uk2l)c3D`i!m5e0o>uvsJ>#kbG zTX$;J|FUYPrtWe|NK*Hx?yTmb@8-|o&)>lrZ0%eQSzjUCwz=94S^=vO!C=6>QDYTT zUQ&5CjoN~_>1}ckz%AUo5TbW!kyKTzq0+Ynl20z`cEYfIA|lNU@N)%jQC`gn@AX#L2 z3A1DlS|#ZvT;-W40c8{!7)A|AX)=SVT&Pq)n#il90wb*fS=tBWXL4DA8+Te3_nuqBGD_&jru3Ts=IalR`=CdNv0P#{Z%1r1aFDl6Lq$npa6LJC7Vm3?EEod>D7f9YOiZZ?Y zwkUBs8LMO_KqDpuQ0(%cx|%>D^M4r@w7*9secQgBsYF)P>oix0OKFkM-{BTwNrHRTw6h%Hq4pBTSaEN2^9BUjUTIDzNW_Kj>387qCh3J9L@~SMZ zL|x%UJ{~VB@dgEPxHn-+MU)lA47f#6icX}aGWe>>`RO#t%4-%Vbeic_N<`3+&UYSu(-?90}F@qId+KOEsUCMLD^Jv3NTk(-ez$W!`k* zbS~0Gktna*r}MJTK11M*#o0n#EDC{q^EfXM7mzK_C(>@q6PV)J#r&Tm_;c!3kxTP0 zQ7=`J<+#D)5)uk#Nrj}Lnt#&2Nu4VyY#=qss=OxhK?`w44ZD9@C<*hjxSf>dm%fF~ zBt`u#lnVonx+;r2Bb0zjmvgADbNq%m!x`=${}JXGBBg09FXW{xuO%!h0VvZAV7!a+ z6j>UX7Br7LH?sm}iJ?rhq{t`ItQPZ3lw*eN&xA~ivJx;8#Fos)2!GfNW@DA4=D%rU zQrym`Fw0>^i&11+!K^6d1nJ&vwkULN2tfri39}Mra9LQ2Xz915OZAqrMj;Iyy}nQZ zOo_cqcNxs-sn}&Oqo?sMg9$xly9~Z(Jjk_}RcSdFkQ7CEJzf+=l`0`5kwRHsB}Goh z)4W1zElVR+1TP2W*QG3O1Gu23zh?|S_sr9F07-Q#Q$R(>xPMF+6+O+1mzQ2#TiL^3Oo45w3( zUgG>OH2p7hJ}-o&&Uk2Un8I0#Jc8N!+ikI2k|Z^-yZSASha}T;4d!RZDn7u-tMR;V z(Uq{=fDw{Hdw&B@lX;Fi&39p5%h|jTT08`lizK~71e6(EOw4+{C>5g4#bP`bIBwkR za92;KNr8M7JyDn3z|97Ztc%&k@gHH8X)r~g<3C*BR)OMvQL{woOYb+D_N;oQ(y~-p z1)hEc(rRwmghUJ4>gm9Plyq}m=?|4S%^YXtwM6kWWPd7|Oz6XzD(ByEZ3vknD+|~w zbt!JAxugx+Y|{A80v;b-VSF~KgG~g+SDmPd(3ZujU@C~pTSS}YlPQqI7XKBAoWNA; z`3~aS7366tA z1V_}FC4Vv>iwgIN8~ih@Gt6@7o_}-wl!E^$xH)2{Y$g0d1vu4M@FVe0&Eeph} zd0F9pX7vQLfs}>>x`ho+GCwEDJSb5-unJXqUC}>c0TY>Bls9H+HVf1&K&Yk(rs8_Q zf0YqInTm5!6oki6sTBXRl70wul}c$d!!rz2T7S$fTU|3%$puMo!A`W@$&i5|UW?{q zfkbz1Bqw0>kk=}i2Mk7Lz2D3VS<(8IddK2OlBSE7l1nn`>wb8V7#h^?R} zwJ1DFQK}upaGd+Z@z|SbJBBB!Hh50+rUY(zh;^34oYGEALv<-Qi6+6PEDKd(A8sJM zg$&7xC`dg{d@MpSGaTphl2n-ZuIhtPMStWp%dc{5Br9jRO#GS377DT($ZkPo6G5;N zg}5?f6cS0%jz^4^z&)9DdJ=$GC0b1JNfsAW{jJqNjO!DUmaqC$g->P7mfuF7=LSygt>~d9gdy+W2f*gI+el z=F*;45*qA?D}i%4H&beU_Vu6j)2I6BGmy4bU5R}p+s#`w7pXqMal%~Dd|nCjB}Mz= zqJnj0HNO6d|32Bx(IP03Nq;(q%|)W0lMcr`zgVbx4?VciLRNVxt2*g7ksyzU2?jTm zsF0OXc-9i2Zb01v*~ma8+>YukjH;l0*P>`;&$uaS+E5ICDbRRkF_|{#v45W2;E0jhlMv}r z*q@y&L6tem!M%jib&gUH)zW0{zkV%q>E36#Ms%Fzbp@|~_-Z%-$4{tnl8RFU;9DH) z8^^VIJjO*KJ|GuS)%*ti{Aqtb(UE#5k?FKl<{5coWxz=9&5YrzT>!c?EDIre{bY%v z1t<=%V!2xJ02Tzwc}{_)s9b! zuXffb&6R<}6`98Di18Q&56Br}r*OX@`hII3Yx3^D-I9FaS$RXhX9l2Gd6i$}1x}OY z)bM`=*QLs83XJM7Tx>+xZs5i-JU2bKc?>ttr-_uJaCSRzgMTOJI7#&amy&%`sX{%8 zd86&gJbJ05EH#gYue8rNAH*`olz`7Mowo3Swhw$Ta)evB?ZNG1xZUj3clchGvwfMp zOQe55y>UWpVbtqZlHGRNH|-Q|jR2cBO8e!K-mDtHzFimjmvzcQo%QETB4Le#`XPFY_$l(cD7u$i!j6=nw%6TCmpi-W3@)f>RQPz!#-;d zS3Z>+*nc(7W%de5E-K>w=tdGWb6VQN3{}|eb|lbFXEeGR<<*H8bI#jc2Jf@l#$zC2 zyMzWo5MO&*M$+my4KU~Y4GzKBrr1Qr28XCU9K6pW(IHaD;tdtEfUOK3v$!4{Y-P@V z;4nkg`BsLnuKJNrgMp4fJ1p%dG&O~8ggUy%7Xxn8PL2{x`jw9WAMf3o1?8eMC?vxWw3cES=-En zjDPiteTl`&H;z*R)+9CPt`Or&&TCU~S} zhr}cm8q>IEem-QZGhb9wD$90yY&8+OWq*1T)oaA><7$rIjfiac>2_pZ#|$mhAp5SU z`6?-0HSdDtUaBJ^`l;;e?QP~|o$F!8ezm_**8=Ln$~ModjU=L4%pT}{7_RsJ{%Cst zJ4fTX@&9o+uKH@F7PzMF&xJEL)UvPDZ9R|XGoVqAolA5IxYLr$)oMk?&o+Nmj(^9^ zo5idZGTje;YaN5_CJTzjCV3asgJY9V73u5O#wLdy`iuq4{b~fNx=FVz9&FQPqKy=* zCvBq&w+qhB<=sY~*C%tWc{`b|$L%ib-><^EE8Jbz*1SbE1Jy8W>&AS-%W&7~6VzNZ`QI{+8hCur z74a8_ut_944SHT*aX+kV5lkI^!xdH;2} ze3{w)B8@Oyo?oL^8v$8hM;3wYh#teIp6$`^wx#+^u+Sj+OW9v&Lr-23Pwf*L*JMp* zKH4?_>tF>5YjD|J83NPL7k}5y+5pmgjOMeJQ;#b+uDEEH z^+g)7yARp+c9V3Qnz`B8`H<~(oqIw)j*cp4vxR*rRdnRmYG3!gorpfw*Xt{@2KGNe z<35B932LQ(T=iYU$EM-qziJrK=ErT<*n~)`o8`44oA5}nJF#3wf-AP7ejxmqamez( z@;ll1HTWnVLPp)Oa(@ZF(*v|I7gzg4Pu}G6lA}HD+!7c}fXxK%8N_|eYJ)hC_G^gJ zpt$iO>Z^XUdno$W3o-Ajx(l4|^ahdQoFhXhdBtFqMvB4yZloBCHCLcEd!RNt4VwuL z#EMrdVV4{<-Q(T{ag_WGkkQ;>Cv3Or>~5czOOu6KZB#>sVSn^Fi`LVaEA&8p)*Q`i zJ$z?GYpdzy=gf&gETa5^^wqueo-jm?8lQUq38G`{gg?}9{$ADoU7MNTtD17s@caJi zd<=D0_qt)&7M;90Jw3tWCPFF<+XO;f!8NUP%hxS|g$8BmvG9m7p=*7X7f6srXJihD z{YZOA(p@g4jDISqv(2g1yzr{&;`X*%G8MB9i(Vd7HN9VGF0m^L2#G=Xl)i@hD6J_L zb@MJ0^H`#!{OWXZ5i39p!KVsMDD!cIww$VCQ#p3nmbEC!02!pJPE{c4(yoN;pg9e? zTHul}FACZ!3}u5+?5=sKyZ(<_Zd7tFn=rcUm%Q|Mq<<3HUADr*SbJv&Ph8mkEScLM zx9LliqIjIv{%&nYd}r5ltiiTxOi48|74B19S|H$aJwzIrOVt(#UWw*=&#mZ8SXRe( z*nEe;6x?NX#?qmaBsT6HN`7P5olZZr_K+nll= z;}TTU%YQS1O5G9D6zvuU-A$99GJZa6_QU7hh~0h44Da?*GFlovuhDMp@F_T!BVPL@ zIwElks6o3MdmF%~UNbqCp^UXfO>vLKZ1$*y#zV70 zWweufTZbFUI$u9a)*19mal65=^X`gF(dx(1DMk2e>4#>z6*g(>(_h%ue%HNlTRq>&MF2Ek4$dRk-sxk_hxwz_ompY7OOjU z%wbcrp?t5lw=E`{Q;@FDLz>+on@Y>6@dv2!sxSM}4)}C&!DxP6BMfOUz9Y}z((3hy zoUVU1{8E+qT}~0*PV0@B2y!1Cze$E3WFf{1g3L zR*mDRYow3-ji3$Mty^g+zFXAj$KO=oVr84TDC^@l8cfd>w=z_8NPCg^Z-3i`1he3oHzhY+=j#R)ln(x>*9D{vDZ4}bS?87tp7FiAEDSU66oADMnSeA{ zNXJ#c(%M0%FiX;YCbLSH8PCFSp5jpQag>E2`GOn4Pwf*k;nv|zv3}EFG=fu>z=bXa%pqL7#u3nVz{&y31h5(p+s`or=|=e6U5na`R`S%gbw13 zLRit1S*>UxHYW+osJI2;pn-c)}QJ?pQ z{G+B{27+o{33a=sH=A@_Re#J10N1146cRO|&<_<`S@X1qYnnV$6+QhOtT!5v*^jEr z%vAoU?^Q7hy&u-2?7!%=%-$rG{&9WG{o%JU^S}9iUF64ou%zPQCQRlJE89>N)jF;u zZnj-^0D=mefCN7(OSz^fTD(LLAfHWNLE*;r!}h!qrcIpm2M0Mz=6^pNAhGJM#!Y2w z%*%OznaJ<2eZ48MsHWz#R_kf~tX;uBd$yp-rtjBU-4kO_u9}nW+g%%nqmD1p^my_E&c*b*;HlqJ$as3L z$@w;GbN+VsTn}B95zjiwd|n_`F6&MfgO55Iv3Nws$r+b@+kaqG3}(EJbA;?it?e

lUULE|Y7H1Mf z`c~LdM+CB?G5d4f#a*10ri&>QkUiK*%Asz?f3y5Y9Tm*h0o1-mB@zJTdjj9XLc?eL zt|PV171Y8wLVwRrr&G&4qKeLV%O(D{d#DXqMDn~lWCQxaCg~NLsw}QNmbfyVK%M>_ zNb=#d^&3`ALoiKf+k<#avJL?v)uM_*&x6>NAf7b5$F8jEQQRqm4`VJNbD*d+^j~4 z4$x^BMj3yxi>~~xJ6YY)^*OBjd$L=f_D;mOupOXjd4WA1=(wgxQHJpe246EeMM)OF z+QpO8av4?2rNNnr&;go1m26PNFOVP%QhW3p6n}AHHUTtgD%hYJ+@w*#;y+6KUpTqh zA6ZMF9d`C0ezCi1F&8RAV;;7u90q{}M9H;{!O-5o309v;g^Ef8y#pC3G^wIPClSW? zA}Z19?>$CvVBn0dUg)UAf-f{CJJ=4|y|~`u6bZ4{X2=cPo@*t0>g+7zuY%;1&_KNz zihqN9wRZk`FCtQcrvoX;+$;=lx(M=$Kn{w<<{i(g^vqo@mw)*A8hJ6j$dB1_tG^N)klmIq zwLHu{PTmKmXZngju(*z^UNpk5fkFJ(G}|<~A%nR?s_KPqT$`QL(w)N0QXe(!q-8A3 zf=Df4MGKSWt#CW|-88A!HL|sNL45XcS)AThj~PVonZ_KdPB0$z~Ce$SoCB-cu7fCJiCgq4FoIc zSPvxaB)IF*j#3$LvO03+COh~&k4lCRavesHMHK%OXZEIxtDUx;3P^H_O94A-py)nY z+K-mrR?BLh#(fkBn+rs-8h@wz=7?4_c-)^M*I|j;TFwK9s%9i<$Mp(kxQ~`FG5ct0 z_fgD>&AS~?>BeKV&>nKGcR8gl7u4m_bUEW`1G0YRb$Km^K@o<9SJHf7#>-Cq(b}M! zx?46oZ&mNSvz3{8Z&B7g#8WyhvNq>kNVzfcQ{IuCnytX1j1#4}rhiimnRRZqfkwp8 z$U--%ZH?*}U{{`3L;igijV5qgW#AdTye`+tq$Qhbpl#cGqdZ&j zao>iCa0y*+JNwl@C8-1POG1Kz}^7R1}+7O*V7LkB< z4}RN)_q(8{Uip0Lm$&8K?84SANWa`odaqm;p6$Z7T~L0xkAJ%llzX=eA9kVg%TZQ+ zuAsnWCSa;?I{+D#`i?Hv#JdR>*GR`2JzLqFA062vz|!drjf*y`7shx*l{YAez#@+a zD)v}$t2!)@*Kceuy7$10Nv`iK-t^M3o%aBs{Aes2g=v|2g%9M93cI6MQ&unht_{e- z%=9Yu?M+Aq(|_oSU5gEC(b5B?ZOk`0DLrgye?@7rCr${%hY>XlKdZHpxO8ik)JGFh z7w2z4$QuiIl{9IDc(Et6e4*FIx9JGAHpmy1S9UQa?DoN$-plr_krwL;5Bl&x#E37S z4H30Rp3grrk0F~F(j`O9W|^)!=bE)yto>(j8a^1Y?tehXGz~W6s!si=o7A;NNb1v~ zf*W2lsDroqYri=>hS~DoKC#{r=Y+8KspaxOlE{V3CHk7Hy0=mi7@WmGI=xIAr zSBuy>X4Y+xw4srBPdD5}Uhl5Ci~O@Pt^#WYTnmKslZO#;La{|^82x}W*5d8|9x{K0OPVN!M=G9d zh<|t-b$?sqhkimkk8zu=bp&+)+=0=Dr<*F6jrFe>vKT0!a=oyiQa?ti z#sR4^aFY*!12$Hz?__cx!2gaXQ}2m++cYNM>HWaP$r{cY7r%$5)ApYX%(-eDi`HCj zmiXZ{SD{}Snt56OGsnzX_ab5}+JCWJynhYQO~+^F3uFwL*Gw8cEB?ROFuh5!ZZqa( zok~@9s!0Y)Hfm7y*ES)=MI0$kzW9J9@|D1iR9v^z6FR0zw>{-g_}$Bcv)Bl!4);%E zBY=5zc5t$Pb`aN6krk9r_}wWMrL*_`ix;sub-y@xbATn?8Ta{{gZ-1(o=sQqK7Yr% zEpFE=^ys|(uInkhpEQf^UAGx(F|~&DAo;s4v*DzTUEXc!Ebi2Dd0SeGJG?`Kp=XD6 z)ANYC1KU<@>zvOh%#X&9@R<1n zj^hIv9W$rlG{4=!K{q;v9*gnU$bT0mq)NVqeH_jaH=zp*t%ydXq_-Rf>~rGMS!}jt zn~TRYtlVYB4}TCH;riRh=|y{sW4`yPOFf|uF>sBd>hajb;(nnq#{&A;d=9%o6&qpV zF&%Q=3NJAmWYIBt>K{j_47bEh4@Nv2;Aha7=`o8%Y`%&gJ2eVhvSdB=41ZBkC+h_8 z$NXKl|E|j?zMCC>OTaFiUk_OO{i(zwcM@y;w$pZa<(Wzp z(HUtZjI+gv!NhY~O$njW=nRio@flnrl!|F|i~@xTgHt}Su8qEF%dsN}69UaK5sQo= z*8XBm&l{fX1TW}0W+xHr-_~-w30ti|57gicbv^NS2kAH+q|upWM$ypG>_a1ev8M1zg4ovIP3?s*hB$vyjHB$g- zZ!`1c8DwN{ZX*M|vjaqC7=FQ$7}D%L9DE*Q$7}?SIdEZP{931q`bCDWOw>X|#rN(G zx{)%>NK^b{N$y_F94NyVueI&xmgClg1{g+sNdvxj-_6{Xd4E+M)K`DFn}kql?0;-k zUz6ZR(O5`~#=4W@jc@<^<7ik}v0C%zNs&9en>Bk@v;E<=h#9q8P@3SzQ5SgDt?l+u zDwR0j5G>XOqQ34FHggv)3}))Qe%a{_$*!N`nHe))#mVA0na`h_V747|txD%QQSva6 zSs@fueEh`FAb-X7Rir>duLtL1+gKKtHnd3!g_8~!JW6@l1c$LvWh|9(0(RzNoD`dp zVoJP)jSMpBmKp5g98apovObFLuJDeZxLh&5_a)J-r(1Wok0S}@b~v`$`(mqq`RNOT zYPe?mUh?xV=Qw{j#$nvrx)ZA}1lVEU@#&~iqZ#`06Mto8Pf!oz?g}C3ASEo>ij0nE zrkbfla1rAo_y+v@{#^^_!`jXVQ3QQqh3^!6-W~R7>@-@k^p+hZFzBbCKzZBSu{Pbb z0KrJpO?Hg;)758*X6or0G52EQDf9(bVW`Zf9=O*cd*Yvl&xg&`Ahqty3NLJrP!Ubc z(fKew|9|q+mmz|2`h=qBtX=Xkee#3e>y-zKz3$*kr_*U+u%1=Rl*_L~3o)r;cpDL{ zjd5^)hz1Gqa-fQTY1LTc6$C-}gTj+9Q1W0e*p|&ek=|5;^9lzte-haQ2IVk5ueQr! zGo<+jut07^p{mXO?LA9f@UBu(Yd{=NBLzsu2!C$AwV*8%v^t$mum;lcbmNptw{;96 z%{MRo!3#8XeEtR307uY%88UaFkX{Rm)}*=MXcQ~W#3=DDJndU6Q`%3x z@+V^a^b6i?xfcwH(bQ~Uv&IqwGhr0Y!I!7*akYnYjh~2&b=+EOUqBB%k~({0B(ats zoPU@@&HHzw-`*cTKYR7|IPO9>4%X8GI5{|ead6TU>On6KHWC9k-amWw;b3&|;o$ge zbhv-~;?2QH{15m?96Xd0!1K39@7^9`(t4FA@FWg?kQu;-x9^Y7Mt^vW zJ%0Z#?mdCt6El>JtY8HfPxxIm$0IS`Q2GwTo9cAhnwI5d02gT-T+l^y&*>64o1Dgh zxfCvQK`u9&&(&L%J7U|}x5!yzs7+LkA3Bm{uu`pT{Jga6{X?2Naphvf$7k-g-ZZaJ;o^@j8|gp1AkyQ4(Qf{ zR?{mCDo>?^$EGh$sBtSB+u1ckosNVXVbs;d{D2 zS5akH=7x^`CYu(Ftf0C7tm0n#o+08iC4EI@Zmc9G@wtjhds2XEwRw?R<(MF2E!ypu z&AFu9-exv_5%%T09NG^yUw`zv`&>1*2zEU?sDU=@jbVN7{P~Mf)27Pn_tGFoMz_Je z!gENrpjP8_#p#;ld%W^;xkZOC>qJ`cnx}Ght=e+0GyVmyOQ_;UkFFNtYt<>$sDo$u-D7PnIg7~(ngcG=q}9=M@$v3n{P zd#3NWX5p>`w>&A@`IEHl4~#Qoq4iC<)bCy5F5np+QY|Q_{GBRid4=&*E4*4MGV-=j zNo5<1u<*>|tTa21>dkXRM^Vt#|YU8K^Ea!GBGbya&_in$81X?+}8~ zsN~tyfNnnZ%}3so!rVwIzo|~+ycU1&;%Ot#OcbB*UH9c@YP&}tj_dI)m(jQSTa>HtbZT((X7#lee>l z7e&$uyspo?@V(3KzISVA*Tz{Z1j+X<*JkNFQ%}El9dMqh20D^|@18$X!@l%}C;j2c zCKMaWzP!AZzdD_OcdIg&OXGCY!Jx*(!A>tmQ7z^*i+^-ZDp(i%;19eB@eh9{|L~?# z<5d13hZw4*9cfRkQ@{M;JyZAhQ5U8=IsOfL|A>3PusVzu&X!*oG2Y5E38Vg2N7hb`Zpk3mIgh+N4l~DT= zA+?ne(PDKT!NJ!mD`x>?gD65bqz~^8MlTLNoPWK2`{tBRG^3M)cW?GTjYj;hvzN>B zp$#6349Y1+JR(EL%0#j*7dj%b^EXN8@|T~YNj^yo5D_RV*cH1NieN3Vp$YbMS}h{v zhnX`ty50o0V(KGKS=mYTX|a zet(rl4KEqf4R8U2*>oer9WfYOKiSAq0`HH*tji~KG4eNA-)8=;4xIUn?&0z;5Dvrk zW;W@^ud&k@+PK~NkJ0G(?dbH?i-Xa@Z@(QpKRX?bT0cK(!gTsJESm4#(W+SE$CCLR zG!k=gEZD$3U}{_kHS(i|)*5{TT{BX|@_zutX7g6>j;r;JHdCH2Hh7P3mCt*;an|Qp zL11aJnj;bRXhgA3UuDZBHRW*tS2Uei+Ka1&az*<&JFCxW8un9<-8b>b-nalKZB61ALJb#6<$c)uY;|U)Zc+E^c&V@oB5$5SA8f(w)eV<}8 zG<2=R(+ED-LrjrTep<)+O`A=t;MI9*hE{Z3XLxy@4y|0yTE!RG;JF(1F-9Ss3x1PP z1`>XSbJ4-;*>DBegF&s>olNUEJEJS|bhwxrP5@nP&qN{cLO6J9*aM3C)F zbgv+T*FCQo9*d}qAc{f-{-6Su>YRRfX?GX1>j*#iTayW5>{Y@G#OqyYoldRQUn>XC z5*Mw$W}wVcF8hs~RidICEfwgL8Os&iPWc^*F5|#KA^;yQczOL?m3UR6?^kdAyl2jS zVV1@dn5Xdw<`-#v3T1qk%YRBJ9bdz@g;2NdqKtSSjLeC4jwAcxwvTVC6`#Q~{y9Y+5$S7Qa@{dE zx|oFib#y{F4A;?1BPXa;(9t1mAwa)}y(j4Lf(N9$dVW9w4pPw>X@3hPbK40&M8~o- zIE1YRMi|`XtFInDhd)*(UOq#d37li*Ln?h=i0lScK!;HtfNe8vw>9;d|^e? zNGsie@DwjO;=+IHH9wF6k`kUg&h-Uc7V`Z{=j7cLFo$H9GT1-*1q$IgiA~7csq# z`6zyX>U|-?kUk`Y5$nvLYvXTywK-@&V){U6`V37X41;IU=Wa2J6%rbg8ds}T?R@7E zjI<#b1sFVtGk@^#C7yvl-|@u0A}z!IqK_#}^9YxHG-H6lH5im(SVb9w6vNlr5nuBX zeOs#NwAtGCdAGm+v~c_0ezWf#@#1`c*jLdJ4d@!$?W0XZz=JwniyUv|Ful#t1TH>) z;z>}qatDxx=&BXW;%WsYf6VSsw$lwm6>T6k**O?+iGKpE%gp!-Xn>bBz?)fUOjcaL zh-0IVEmTyLQezOcN6Y0~0#6ub2p0#0Q{L^LKAkxroTA7ynw_5-QR!pWr)CPnzt%%L zezHa?aDWtmFTW^qiBoIXXS^&tE;ja=3@j{W{)RJItzhKffBGPLW+ciuZKl-Qj9{Oi zc1FcKS${51JL6ncn&!#G#;(DJ14jWet0~YZO345Q3KEUk*fbkdY#%lV(S8`F(Y`eg zsM|_i)RrT@Z{&POed0ln!tkhrB4oUYlXL#7N$ue6zDS4MX4Rsb;aaA-fUS4ps9y39 zRCHDDHadC)1}Ynr6uZBkQb(RU;%RimR_nRT<$q`zodVv*K4UP7?D#>$X$`X#QG@H6 zA9Y|c-!jkcPx!NhOB`R1X3HrbVsI*Al$DXZ8JV(>}Z1?&c z8h^u-a&up!zjyU6j)o0+^+qIDxNzyk>ILn4VX{8pTCQRn(Xu{_jdR08VC-&>6=9GrnvAAF zlUD}_l>GcR+dm*v$RV7icy`K!z3vH;`F|X}pPw1)0hptKbcOwPDW!Zy+ROtSa{K}l zyvO~q7HTu=Ir8a6Au^L@>LUn*N$?)2{$|#L_4iFjO)dW!TezR;x3Pj7I{0|ORIA0s znG(&_bDoLH%{1~}QS$&Vp18BZv!oJdwdygjTfhI0!5msEmoX#QL0m8o;#fv z3Hn;s8nCg8?2@e)F#hA8RFpvqXjwMmdow;_{fs0eZO)(TFudwU$iN!YYAzj6!%&Z- z`i79V1PMYOdUG0Vd8-1uU4bJGtB`7ka%rgArP^Cb#y( zWOLrk`4PWF^PvgrAD!FiQWJ{RKI9WPJwNOYO<%7gW?!v*!uDxq8W-7SuYcAvOe6cv zSbkJ*71xi(S~v_Nm(qB=uYA|FgK49K6KE0V-H*7}^KDEeT9FDy=CIy0YcAcE*mz{<=7Y{_&(+pOo*- zY`$yBWJz}rBmY&?$pX=6oqtGXHI1CDlgP<^3OU*!kf$|$yZ}!?u)kg>kNfwjppdQ{uMxvK9jzhT(5AIs^s(ge`Q`ZzKfufoJQy zH|)PO=k9BPXZFL6iS7D{?|rhU@swlyIe{LUhfR`6L21F=eel9X1KH@%;M{)}C(g4i zn_ZES(tL}5TP`!h$yq{qhzlqT>4)yps(XBLemtZD*#|VF5Q2Kv5rwHxKSm5*Mq6-x zI5cr?{cg-a2tI*NeDtIKJX45S>!*XmFpJu_+uIlCvp}?bC7@eZrA#MGU4mx+6Eb zeKWX2pZ1-c3o$gxdP~kIg{}G~N5D0X?Hb3XaBPakmKLQ%Y_ucW!kvHRuwLDsWlR-q zG1zLN$$a%}z!R$`E->;@b5&~9H7ap;R5XeQPf)DK&R$@x=EXEJng&KTzqa-OX=?q3 zUhZg=ue$EKj^;e~WX&jf zfZeW{AFctbcm}<8$H#w5PGi57-s4}fYE{{r(bpR3m>Kf25u{c1o3$|eQDK))-}1|N z0S*?m>zeaeJ0AjS9R9+f>CTAk(VD3VDq;1YDXs>0QL1Fy_KA|)ei7ucXoflGM|u#Y zojKZDIcS6-UOs>a9vfS`TtsO-^>|ulsG!ssfW6r>@f<}8jlzF^tHQWDclfF51&Z2iK7lKUV1WiZVn<)6G13L#Qn4M=BG!d)I0pwRVGU?edV$tpn8yJk{~dQ!|2 zjCj!@WJK+}fKxmujTl_>k*R?5(C)?ieDAA>PEkagkE5BhZkV+>Ud0fdh-k(J(d2wK zG(`S7nxTI@d*l)rL*sf%o4FVelnif0lrePB$)JGVV>nM?vhMk^&rh%n9Pt-AGD5ih zMZDW@T@V^DA{uP z%-UD5cpt}=4LO=`Vv{tiY{)SYsgP}#@EP7SV|bgh+?G4x-o zTS?d3zyNOjbP#<*>!h6?oapEq2H%Lk(M>6I^UAp4AaZ4ZA;-&Qv~Iu{9FmNfyt&HR z965xnb+ORo50>PzYR&R$MwZGII%Z(A77uLd-=DnlY+6vALEE%WCKz`Z z^-q7ukLD9;gG#|~f8B!7Rt5|{P{c|zk-mJ4mh#DyhC!f@A{zYWCtAhHY6YpXAM`|? zoeI!e<@&&^c_>$$)mn^4N?+_PGq?~Q2EoUoSixVE5!Ldcs z_A&ZGl6063X%S<0p5?*erxz#thynT|LpnU=*7X={wrgP@T?mJ`&x_H9C^x? zE?ZXe-x4=@MQO7{T`uPS%nH@Ys}|O|?X=zrH7?Z*OO6*s(K3eEs5}-)M)PhQNU_7l z5bU@!GM-##jyjE^V^78nXGT*~|GQUjg?`>=a119#dGi(NGb$4K*X6R^KHgMq^qqe` zG*!_V3&XQo;Cvpy7Ge02-`*jEJEX<2KwqqJ{K->PC;|0$9#6%J9~0MoBlua3AN^xg zAD+Pv3_zmKDmtTxPFNv;WAX;76Bptgp~Q1Xm;*R5%F`)g-VO$F#}UK5aT*e6EP)L@ z?7O&ZHZWW3vK!yvn%W&4H*7T86AXW%ETV2#B-2gNTltK^NboZjj|AU}SR5I^wK;Dp zBi;FTu&tpQ$a&N>qLs@4&N|IvfGMTs8M=H2r6*FAQ=tV6|A`W#Q<{=xpt(o7Qt3e~->EM1jeKX9jL($VGnr(UVrB8rqjM%HX_1n z-tE7BiepD(IdUoNzuw+vpTp3D!ZshR)5yU{bjpPm3hfr-kuVU3*Bs~GYm4qOJFc}) z!oqCS$b=3D^cs(Z*|zY&>WzQEH z7T=>S>sqkD7PM#vO}3C(P@@Fs=omg5+K0XRkVl^xd}Z(_hO5BX2oHa9iRKjbx_>x` zUh_gnN1DOwW>#=E@q)3uTvTr7`P{+#TL?{A{My4c#IM1-6v;qb=qSkYD>O$kB<=V< zlgVG(pa%%EUj_!*JrLU-+l^%n-XP!_1qXCu{c+I`d4%w0t`6Rc@$b-_SMn+;@~m}O zR+=Q4Eu!uL8U=hoS6Y8#4kg5jeW{NEpBzv6p^84433-Wo*hsA4bbb5z@An?Fog7;A z#g?~)2;prmp8yTnaF1yY*k+T#^?zuj-}8kImP>jm=cE-uM7$Bpr9jZ{^zgVCh61Cp zSUhsxcR{^5m{2UZ7>Ja7otj;GiXxJeFg!6XLeAu3?X_cMvvz+eync}I^+zHMr=sJ_ z5?>>!d`%<~48`QH@l5oIY@XWDRv1QGom@&obe7Al4sH=nNWmi~U^4t-A&Uj+?819E2_}iSn4#SAx+5NZR z`M{=c@#Q`=f4t+bb>^=&`UVhgW8WBwzriE=ZP9VyZw`M4_g}mZ0M}ah%dhTq_}bfX znuEwr;79kkM3c(a7W?~%h?`KTY^b9n!S{~H=+!D#GThGIJc-WWQoz>vr5GYbVqr+k zOmu@-d?`A3mB7bbPa_i*Ibh4>r7(mf6IvkWj;@rLtfEU?{_W3Qe86%BlY&mLT)swe znE4n{3iE$OCo5((PHuHd`Mk)}OGf;xeF1UYbl*kRGdBV8EZL>mxR5ncb7ANU9W%OdVNEIL(&5A!@oMtYE9cxFjRdEw$7Cp#`K>TGNK3KHmAY~R4=>zvjHJg`w$UwyZ02e zIJWe5Z^)XGD0zq~%aL1RG8LQLDPwQ~#92G8+bhH1jE|x-n2H7($U}!=+bcE5u-*E+ za>9RNBu*#)F0udgH);LJ-=XzmagAQt{|&MK)X;i&IGzePaV&-w$nTsW)7N0j3ij`_ z99CW7aj5Yjo<|SK@|J5c(83A44_SQ3YQ&+_oe=ywBA)XYwmcTt$%sW5rYdr3I>*%x zgF)Fa{6ereB7&1OiL)pe1`M`P#fN_@IJ$q|c@(-Ko|qG#vn}uayIaebX5z?&-A!8j z(}O$77eEGljnfSx+#FaCKOh_!1d%aeE61&|bw*+VMp^mt(}Nq(7ceZWn_uE{Xkx#5 zx;{KF7#^ken*C%BgtV!LpU{F=$H<=!ANR7rd_E8%+|)L{4y?ahB*>cB{YXpY>76YLBO9gdGU5sD{{a z7{1)zhWh=4nY)Gk$l)~1mpeNiYCp5s(Wtjy@?%dF(0>_*FFQ)iizF391WEiwbWRI? zVttTBpJ~wyk-8K4DY`a8BJPusMYex%PT=sJpYgr34NJNWX6fkN8;qJ^p?*Grk*C;r z^TfAfAcc@IIwz%uxsfHQs(=+HOr>X_XEX5H1@v@XqY}4RkSyUTVGtpBrb*R+MN)L`wb(q+A>C z=h7Wb+xr#W8R@9VjGywoQwno12qIbPng+5U{<3wKu6VF@ce)C`z=V6GbL;MEdR`7! z{+{;*M*Och@)=%7^B0WxEXpYVxYQ!1u)?0OuWF@JkzVd#FxyV+Y$|_RFWw%tuJfX3 zVXrSxn2>7p3d@l7o7eFtTG>LNn0ju+&N%n2@=B;FN}GRbQDC}iS}uyLHO_BZdMa8~ zxlmYYOmD6aoiC1bK0{kK6e?iwTN|CK^-;V`7C1OBCn%I|Ejhh`H=wL zH4b~i-D{%h!Cmp!>py>Fvenk^qOrPIeScTu2R-%`sMdn@UURl>w6~i7#V}9r2ZFB6Na?E+ zp`TaPheTCK=sD+Vxl~S%N`?TWJaw#ReJ~?gLgPo+G!yyfwGw|3uVThj=8bMrYit;95x1UHRX5RI6z*6;sd#E zQ(z!N@i88dp^q2@C~@>=Eyzt(&kFXdOTo#wu~6s>@|s#BU*(wfnc!lb4ePJ@^{vr1 ztYB_z^n6f_Fv)-D5nNKYUr$?&a}ccE7}#C+X0J!g25t0xZz?r~uBLczZp{fj;Zqf{ zjuPJ%qSCLjLOi!tDs+?Rp69k@x`ixgfpjW+`jw4ap=ndNfw zoM`EZNYOU=&IrZ?l|<)SDb&Y2(^HJdW8w(7F6g~(80x3xa;fl1JtZGriO=$Bx?EQH zR6R`{PJSSLYGm$eXl6}Zw_f3)wh}aqw-<)?)0Ij7GAYNcimdNCdDT>q6Etzi@+vK* zG(*r)?UjE4UhqF^5G`r^-P~fL2^K<8CmrHYsFAgHk$QAW(>=WXZN&hmVZGD}UeYkN z)d)A{;~7FC$%-h9(%@vEebGufYBh{ptYnF3eh%^6A_>h4h7OCj{swB zVzjwGDvy3Q~LGg`!6W-&-&}qvk_oH-eN(-|cr4RPM!0n(3Z2ZHz_W)HuaqDv})!5mQbXxJs zE3bbiVJQD!&fdN|ZJb*l{{MW6!jr|4CkiF$q-Tz)!jHbB%}kTFf$5~>=1^h;p#?j& zO_K)r?$5gRmSh={-uF8HBu18HS+86B<+^rvn;VfcMj@LQm`)cB3^kAk`No>J1aS5r zp*(!1CF7kN3QGn@qlLyeW^+O=ae>96K(v3Aj9gcsRY>wt4KsDnXt>b6MB3FWlG!4= zHk3mUMjLROMj7P#V}cXtXgQGsheFk%(IW=CyK-)wbOLz#2zzpD+ff2CytbOFoR(e~ zq)&Moo~qN*tj9}RrxcY#*TU?zr$HcH$b?~R0sfT`-{o_>ZX`}=(tQ^lR05aJ#c6*a z2fO<~WM&Mm({;eGO{eOpT~TUYtIb>U5b@)%D+-h}d`8h3iZ+AE$RVZg5 zZTYD>b)KGbqE6*0#XZ{td1jpSs%`YW>y3X>m`_r~zI%frys^LE7~5A{p33iq_fgXF zVp#4}BEHM6I{N3SXiM0q@8}CI7ifQI`j0imK)T!o>+pMX(t0j}@J5&xAUCYj!lM|D zX3v3TK-8HV*2RLTV6rxWH-Yn|dQO4xXB0nYy^>M-cqk@t@3J8nU3IiH|$ zL{?J9nxEBb?YbF-dbPSpZC|?zch!+8wJY0!9Y#n!g*t*3{MHQODkIv3-8Fw>54aQV zcl9z86u*oMV7KFUrRmN#`Bl< z2I?Uu;pkDa$RAPM%Of1p=2`tq(4_A&MX4^_kboTE_22TltzAR5o%lJM3j2eF&j4*} z;a^O&&Dv|g<;IJ+F=^wqU*vzh&26Vn$KtuY>H5lVeRFpmn6Sr+X-O|HMI3u)rt#1Z zJb~X$Z0x0J;fI|`a8<08`8FBB+O12z<=D5zDwa__(kijBIOc(Rq-A6o#i#7nW!C6v z_LtVz?b-B)mY-YznoDD}3is2%Ra|EXg(m4VG_ghUTu)xzUfTdbefxi_yYu0#yTdfx z{q=tk+ZNl1(n?x4$MZDUigmm5W8J(!H^-Y=tKC_YF8F+a^dP)jJ5j?NEhQ6~bac)* zIP6*UkaJT^p#9M>y3)!~h0Z8ijiu@CJfNGt-ActvZDsZ z2cenPEl#TvchYE-i_d?}wYn?Z6Pk0ArvYGtk<$c9PR+H1b>^<*;Q_1)iz1OD((`43 z3`_KKx<0Jcc2X2JwBPns-B~-ix!5ovHNTR*=&N+fMtO%Qkl-N)vE!l0p*BzJ1v0I= zbKlu5edbLfy7hS=>HiQsy?2OWCJ0Ns!+sC6vQ&3m*t(xRSa*N7FEr1n_GivJOWwh1 zv(Yw6(Pfi;TV^Sj#0L+J@EZB+IKz!2^Mqrq*|! zT6WXtMc(c?;#*QgdGhx=2o>hp{4QTly+F`lEsOki_66bnc>_NL`gtIS3;eZF&qZh4 z6^18%H0)aOQ3DnymgnmA)YvlatSaW5E-=VI=7!%0bHaZZI4{Uq&OP6v(GXhy^IX8^ z#`=k3+|bd%ZBQ&vRNUHTx0lFoNN*X0o1eNf5j%H9{AB4dc}FX2yCJ&Zr4e1vp{BIelGuibF&D3qA z96t}~7tDt|t#;gg0$UQ?0THNmhDD;(N@OW=K}QPHk^2o=OcEAX zG7->8PXyNYzWkbl!>80#-`tYa>V{`PQn`4lK6z2KFUo;Dj1g;GJ7QWeVhZCrw>R!l ze&s=UF((!*O}=|4@krmQ2Iv}e2Bv|aMBmyrfxKP(uMSD$P}HY%bF^`{YgDi5dG2dp z2ZMi`QADtEbX6SmG8OYtJmaHyR^A?L41K0DYi+YeW{9E~tBZxmDf--oj%6yg_|aT4 z)YB#*m`z2tDdT>9$^<-+)4dmYfVoe%;VkI25FZh!Ix~yiC=zKe??cg10*M(7Io4@& zu9Q;__n#Dxyq=b5smmdHUI?BawY6Bd#s`0v^uDElb5dDP_rZtM(|sa-O9_SJQEhxRuOCu*k$w znKi8D5F$`hQRUf{I%|mP<-8im@szG{c^Yx@qKbbl zh^}!NEle`<%uG7v=VI_O`I4{Gd{PbMJNqN@K|@ZMTo1!~u@F@qRhjeM;CH~dY8RZW zD*t?{3Ml-nS`_o)X*nz}5-%f3WZh(zG{v1;JV0!oqWWVm9+fJi552*`6W^5tM+hD2 z)$07p3vcxwco&I3pGu!P>0M+=5*vSJ;MgJaHeapyobKr3K>Upb%b?h)mr((d_#zPr zs$fp$0><=wwZgw~PIgsX{zwJ=DoD62o=c<_W0BD?O?tXdje-;}J|1ll-Z_`6xoXIXa)GY%XOrd}P*jLt%f+bmPcC$C z4z9o6yf5e0A1CMF}XR6VhJ=4t@y=R8^%y#$XtW@^QE1Ou> zpas=d+r`Y6N!ky%&|ppNEwn|;q0E04p_t>C6CL6Q z?EEBYr3?*`tV86w4-z#qgB(PoR@~c zM=T%?z!=g>MG#^)j9@w#kUHb(@8RBoTVpZ+#u%JoijORXcJ$o6-RfRe?Gx%==}Q{e z8;gx3DsSwN<8pP>Zong?Vt`Ia;9sqB-ymH%0eqPAbqvBvgIIsafo1GUpB|P0#7gAa zhD0##<+a-5UVHCz|1k@IY0&zT#No~|U4IGxhKZxEK$+zxhgqKA4oUCTx;ns?a|ID4 zd^>bTYgU`elT3Zr;yx^wsrV7dovS3v`Pm3HoKMX!T*(~{n4rR0SMlyW zybC*n_WP5;pmiP*X*Hk3cE(8>8R8)>V@$H|nB`t@Qy6_Jj-)#{CUmFKH_jSU)>x$r z=GD;5eFBRzvG+VwgZGCfe-Q5E>p)&Puw6Jlf_2v znTY7j*=zK%qU=-)Si=Yl16I_EB~lMeSmKR|O56{v$ce0!bPziHIGgl)=pZ+2(i5AM z^zyy~ioJgt;wVs@%6oYqwnI~m%rxXz4T_&~t^=8wTw-p)O5gFA={5QRJv85Z=!s0s zLvc}byqW8FN!1pWd%AtKx*HN)D>60;KxuVC)uEUj*kwJz^pS6Idc=fuu~;p zCKK3##j{n}4@Kf0TuV4zEe9djO8d3SOA{cbI;Q^kH#5bIk}x4d4O1Z5t)~+ABFL*n z)p39HTCJ{zBBu*6Q;8d2GfJZGGZ!bBy~-YJ`6yV+4J3>*`5^&zRV*RNpzeJZjk=I} z-A0KNTEg4zYDC*Yf>P(t6~4k}Ap`i54;TTOf2e9{Tsg zy2*@V4fTnC1h7lX01l?<6C#NBPE(7+!c2cz-c5%O-ihcya-hRf zRm*v=y;6>^;53MB&f{m?yiXI^;~l$t2*!nxHq1z*9mh{lm)PLFlhNzU6I!eaD%GP! zq=j)m5fFb_HJJx;S;mu@Y=|XP-lk-d8)po1oHwCI1GD)Q>_SGx1L=hDGqPZ{dWV01 zElyP&F_}pl`kVLC{yNO8B3I#em>Dhv;3b>KO8El`H^$xrf+U;f6L%-6LP1?#~@t{rq_Q%z=FXM8m9nQ7|B3PG_2%3@hXKIbh-`#*^5a9 zPbnOQbq8*jVlv*AUPxkB$Xaa(h+{uVYFWL6=EU1MG6_Y^ zf*C31I=@OrSr|!{t!7w~e@sW3u$j?_4x4!tPNWz26%M2&w&+NoX)?TVJQsg$=@gma zsr0xa!|&#impaSS39(81+8)o9}r&mHQ{Q4Wz;WuDG+4%Tui?rk<29<9wOR zN4Z8?y@>=is_+|wL!H8L;r{8D%FmnAkGJH-)^r?c4{z$dE)vvxy{3N;pKX5fEO>q; zX82fuzqZrcHxz)U;c1aufdrjmzhPK>1H~w0wUh{*<44fA(QFo6y{X zuY0wmha70}S^i1Hb=0`FfNxqIeMSta?b{&yvsU{o%AGyX;_M3E^8+o$Q`w=5(@5UX z#c%<_iGddFD|t#6FRp*&ce*&aLcsPwi+5MByAQN@c}1~G11+Kjtjq%~W($t76sK2! zWx0VdC!jFXGJo7>AZXJ(7|}jxuGMlKGLVw<6dz zkMzoFgLZPmKS*AKZ(y(pw)dIB`5pT&X>Tw&v!LZPLGXQVCF(pA8R-C5t5mwpmxS%| zP5V2LFJUZb-QUXj_TKf-TC%ub4NM_+CAgYl39~e*Hw@icdF}ZjH-(E^VV;5VVVrr+wN3?ItzI zaz4GC#u?m7Z*xSGhbdreZE_}!PK6IhiRsXA$;qcWo#=l~4wuHsRA*4}D<67L&Q!uk z!bail3{tI;nuQLAyH=~fi!5!8R2~XUH(A^&yc6c?=v4xcY#A|R68*v?hhUhvOMkoRm9}ba<4RUxA1!pk>52{mB=xbWCifU=hI4lvj_c zXMVUjq_ck_nJS`wQ}?32nzd#E7c%cfAQ|jM{X!*dv@C~;1*2$eG&i%cgwBgDtbo+@ z#HMa0eIYCSkUJ6SLtc`RKIBeFa(9fB^dWa*lDlQ4p(3vlhd$(uh>GWqHLv~ zWeMm*z9|Bs>V>G&&3sv>n;D_7bLgbPXl%v8Z-;-LTT)auIprv{uVE!}6)&jYYyOaYX0#^ibF`v~SDEd%73* zq5O@V@;AT7i0~mV+t^w@2WziyPc{TF+xHoPyV=Hr4`_;!$$xIPVbp7tj#8z!gEyC$ zJ^Oz_KYXD0;-uATE!RQmRh>hpb*L1^4sz_tx2f|Us{cKwTRLW<+O{r zF@Q2>jk;YKr`Z{Cw0DP-u#-tIlo9+Co0^?)xt8~@rOUM}k#`%roJYNGlQ27`Go4x` z)e=-cytwuH{@huv{kgMT+bNTU_cc#x4Y67Nw=Hg;WnqjQ~LE_n+68$10W#8V>*nkYt_Na)mnX3eQ zlv)mmFY|DBR}MV(PdVTvHbbg`MC2}}-_Qysa!~Le;?55stS52!c}O1iJQj*&B{6@B zJ<^uiyOUtKmWxb_31-5;WsWFFl)|Pbbh4S$>1DH70O(`5H%$WR62+V!r^C{O^vo`g zyBEm0854MIiNUg@swR6FkDWp=LRDNUd5KK8K-XwQC*g{1iNuq1dy~Ez$axTMBzj|U z`|)C!dXJY9*Su@tQQ6^zU{n|-=JkI;qzMdcwVH91tDVO;o_wMbN%7Q;R>x4Sad>sD zCOX!bAqTY@$PRDhBnW|sKEWx7I6UzdJ3`b%PU?j3I2c}E4+d)fAVB22?naNg@qFNJ z5ib9E2q?&@@NSUOH8%S!`fp}?VQqI=5iDW$JdVXodd18Ar-9`8T&isFAJu=29((mv z%ma_}qA0}w+Q6+PrM^t+c7-wBxR&W6hStz@0KSI%Y@jA-bCRk#@vz!}6j2bdPErM* zaTj#N3(F+k;H7CaTxp2fIcLV>~4ekppO5~^I`=Om@ z)=g)>@H%*&!*EZi3#-+{^H6`Aq^(I>x9f^~QJA(;Qzhejr%9gRobnDaol2=YkB6&!xGmH_0$6{wOw^kz#CEc^ z=p`R(vwN9KcKAeyL)@k?zil9-WH9f7XVx*g#4VFkrL8QSfYNMp}a zi3hfRxZ%OLwEGZS)e}kGZJ>vll#l>}XF%c30trW&a-~ghA4hOw>_tZna&acIK@P~Q z{ZTmNSYy9|q=J}I5bu8?HgNE4Vnxw`MfdNhNpC>oXS@(>ins5V9nUCW*XF#}>9gi* zIu>8GNRSKGkXcMr7K=fEK{&8?CJa6@YO#M~(xC55;Hy>Kn-o7u zSm_O1wFlPAN*gjxUR{yJLVze=dpKZ)X{l25W_~W|4f^Iw=e62xqp%id##&sAvYk!Z zw_YZbd&f-#_5F+?`<+KuVFUWy0B+4j@6J^YpDu3H%vj+k=(tf3v?^e=Onkwjg=SLo z5MVfB1~ps23EO`|30Q5SBB%zC4FM*f!cmO0)T6$d%ZQqQsA86ulJ zqE$*{!ZY<%i%2?^oOQ><#$yJJ`-aG>=BOOYVPU!GYPEk# zL_*(3M(BTly=CqG=S~pT!63pmQa(F47|D&dRNncA_lX+7$6Q$#oL)!X^wsw(F<|1| z=6CdZCy+O_ni1pb_4-Y)ZbAc=TPfS>zFdOUKurRQO(KO=tu_(v2huq&)YMx~VOCVo zA1=P6S8fwJ zMOlAsip#aYS+~npuBK z{By-2GJ%Y1wa=+YWpz-M8A}+iuV9`rBQ&qo{^DcOTApmZf5~EID%o{2vmSs6j=YiD z(TpBVz>7soYgen3`b;w%#8597W_W zCNn<3R6bsb8E|0X5pUE(PMDhYJ@tR@IZh_V-~OI;yrQa?W|G!wf>4C+$whv`r9w7N z&<_@~k&j%bw+s3eAm*=&{?(JTG?}24TqOrZqkO*dR*Q?PlGX9k<~{lRsdxLroU_rc zPSJ{5HDgn+0a1qDuV?5)!e(>Huiw6V-hMlH^X}bWgF(S5ZmGyEdrBq?>Xm;)mV?S8 zpNeI+!LBlb@{R7yab=!iI&Yt+Wl*AU=^(FhBoA-YPHxCsI)){I1H9Tehn*^MB9G;T z%#7|NG5-yHk^ErU_pVJsq*bMIyv5bd%j$_z>evR?oiNMmi9DvWgjVlkaT3V)TAav4 z%LN@I zP^-lx|G{(efoEhV4%S^~=}$Bf2z0ePsnt&4&yKvutJR6D90vrt>RNxnbt*%viDN{? z&cv~YjLfa&8-KN;Y$tU+YZf^xN}YHVEpw4*>6No4&{hA%CsvEZZykAWWe`%z7`k@> zF11uZ52^CRU;y`3TND+r4OPk#tYue;rJjW+l2gLtwJVOc6F!Y$WS`(P(+m<2cAS7f z{DgDL6P4&@YK95I3b%iXw545a7Iwj9W%zTVE}EG@hn%a227@|U9@|I`QU$j@7?p65 z49+Mlz8{pOk3!kpw6DeeW9$vRu$;IC8SFjvupbj?8C$+eLy29UjXr@yTQjZ1OTAVn zbU@V9hbj^$Y~x>O+10gL7mydSs{?tdYk#yRB0`@P44Ie(Piud*uYT7?1`nRAc|8;- za-jpRtbEB>=qD~qK|~>QO~uO-8+ao>P>*)C&t^O)J!Dr0%mij3RUxa+36dR!%uOA- zx;NK-NnRsC(`2T{O-7Ssa~l^E)K|n>b@hV~^0B7z8?1jp>4fqfr_+h$bTX;;kUC|o z+PSBZO2G7%iH(0qIub40IGXBOtzBl4QkUAl#5Kh>+%OCtI(=n=6l zeWWzSYgh1g6+KYq=;Z@ikd?4nk?!Bh>QZMiBYGKD0Kdh860`nxIvro3Va&;*9bh>V8+NlU}U*>RPw{ZV0*#!dYytnx}>@ zP2V&7*K7G^yAH*<;M(5}9vZ3yDZxx?$E4l#)x}zFZ2#EpY4zyR4j<{2I`PR>pPKP` zi8Y0mBB+1SWE(YcKk0PvlDhArR!dTG0gbE_FSTlKOgeTf;aT@m%L{po5!vMdcr3B* zSA7>0Y)!VvMj*wdE+}MZ6uV!%3}Y|%=|CutPY=SoFEl6Dn!;#e-P#c+n_`iSHTs_V z;d&^rfCpGf!Vjy}pVURyW>wnELZ2S#qXui&v8I0pTH|e(Xg7I*eC{k7>Doc9cB$)x z0b5aMFuKH&h8h}dSbCMcM{kOVP=hlW*w{UzkBtRX#vAvvT8hbBgj<{>rNwheaeH5m5b-nH(_SgT{Y=wjJm1eHh^A)^ddL>qpvT1Ea5|GyhSYar;MAiQXv zaxN?#-j+;j2bnkk*>cY!eOyuMBCuCG+B<(kFKG8Y4zS%Wh#N+5(gpS13CXfpvZXoA z(8jy@{Drj!06%3zBlAIB(DPWiaMAowQ3^;wi_(5?-gS*{;kT@d?fru9+s*yhZ0i^1 ze?sf#r(el+=5%UT&lc9ta7M%;T_hS}3*0brCEeUuJTJEw&$gB&evX&4h+>$DYmI*$ z;M&EHywpMXQWxT)NU6n8hNWqYnkd_OV*`u>}&1X;ke9`+)5~R z#38}C6C%mdS9(*(M(Qme@TeMB`rm(SpeG&x(ddAL5?Vo+j{YRz9TZitFm|dF2lDy= zOHO59ehB2Xt`lU_OMOj4AI+A)6MOYY;^Kx(6(}*GYYvS*aDW;1ch_EW2D_VlBd#+Gu6M zsWztXIh}E=>ONP0UWpTiBFls2CFb2Fv46}dUj4H;!7lM32;^PvKHfvvJn<1ySF3Br zD>9!??j=95-N|F!OmHa0)_#AfTMxV>ywqxrO^I;$+BCiJxP{=eR}V`>$68KlW1H(h z@*5lWgz!eIMS{cZ1l?>UW_P9DXn86oIX2pXnB*9C@_j5Oc_5932z5uj-%!12t2PSL zg*cIv%4dOdELS8u{Q3jzHtTlStxwPoIvUNw zlS^~sWc+fP=0Dy2CUh<81WJhM2&GI%60%)%r4gdfwCmp@(e>uJjIsR4&)yneTH2d# z#Xs@+I-4hU?Ug6IR5S7*E&axtok8-JC!FMTI~Rcjewh5akCp>G*joPi;2D@XxQmwY zMR;nC)=ANmVMBjjYEP_}o*dQoRaq=Cfi29QnA)t1U7l(PNSFFvEwH~s@iQ1Y%Wwa| zGwYX7eu*wCyHcR&7t3QsDQHV3;zC|R(4t(em-P2n6$-s_D0~X#|G@=8%zSB8Yxh4M ztP%bA3;F3povc=OsXT`2<1Y0FC?7spkTutrTAdE_tNMR@N?M<;LAz?R!nKWaOIts? z)XlbcVtEM>y{-EyBEQOr>%Bd!>#;t%WOF%J z!mC4D@9v`tFZHvoZXNet>OKxdnJyX+l{WjwuNq2Tpkn13Nooc+J!5^;=5wcMm1Y3D z6?H>lyTUWR?c=3ZF+Li9=ek6M6hj?hE9qH(zSo5F3@JHX${r zK2mRVuzat@ZC*4|)1I%Xrt_Gnp}lVk<}t+5iTf%uIPhN5;WU48qy5BtK}QG;&fZk_ znXrH7V~p1wL#q=6%XzGRYUA@VkDXsyYok&jN}0!0N{n$ekB`({-nz@xJPy%f4SCAt z&g1_5V^|)eLKYZDt4ZEEJ`|sH5ZmzFFy*;lLr+OD-CRWG2 z+q^GFnYzt)_j#b>$_{nYElM?7W}wewc_DvEFX-Uiv;qH4d_wK(>F|OUi;sA) zvd>HE!XgS@>e`b(euv*Q+52ElAhFfj0yAB!z0Yx&zPF*AdV?&3o-+KWF16a$x4Q6M z=)-BW=IW$8HjkyAg%{QqpOt^C(uo~e$7bocj>YD>b8II5+18L)m?@Tt3ryTvtpZaW zV(c>FvPpEkn5Y{+1#`}v_;AeZsZlB0CReHhLdci{($Sy2WixU@|slu*QEEbL!_i0 zBNB+?Kpxj>$MtJX6n4*3^nI;XIWAKTo%ou@ld_@_ckQ%08NqUEOC#XUmr^kY@3HdvUNe{0{!GPk2iX1J4^V@))DT?2oGzDUIcYLRL3q5T z)$uMaLRuFMZyXi6bv6XoYVWnMKX`&Ou|gvVPu-2xih)7pv6p{^mwhOVj7|AO-U0mb zj$o+2?u$U)f~Ah?g7F-y<#l+9G5Q{3bfje(zR%@653jVGN8wj_9fg0&>&qzIk#YDw zmPx4jdxi}eHC#Y@ttNVu0&w6C0A&raOt>gm&X9pzPTJSkVf#=l8gkh1F)yx%g4Zg$ zjpl0~G|&3_)M|fZxWwu{ox~g`R$Z-P>-009*(}32PI98v-Mgh!;@;75lwb(^*B-FQ zRSs977VnIq7f7C@V#&&=kS97k{3&&hm#WB6JwzZbq;)v?GJ~hOo4y?S>be_s75m>K$R2EI;zrM z!l4y2mC`UD|tCSIS2^M9ObFM8%*FTXXFV*^9cbK-Xj$j zc%DZhlHf?6S)=T?2Fh{Id86t0u#BL>pp(Ep1Lp{FIU@E4wBSt}yW`|yG)t-)&Ggx* znHFs^XPAE;$T=0rwM4L>;$G|yJADba_Gj~8Nzgc%j5$Cc^^SumJW+7ZE}m;9&!T29 zQ^{_0bhIyZFYe9y{b%!5qGr4MVWeiw`7@*@eG1lrJR;~7aWC21?<2T1>8lhiK*igl z@fOO_O!oF39W|N(WsKm*4OEkDz=S`qHX6>*)|7v&RSviz_qUI{ynVFt@XssZvsv6E z21KLqLBjDoFF-SLg7eP%ZtpWv#)nM=|17$#22vl;zJH#H4_6+J*7GuG!DuSXTIyHk z?L(PLyg)vaQGW1mp1{|ap7@w|B>>G#!i@E0O>)|5f%K}*UdET5+;F=OQvMo;$gA!BdK7KIb zE;di9JbhPd=4t0|K-5YR|3q|exs}!-wF7^BEMe?c8q$9Z??RQ^ts4=9-y(4X=;&E} zxR}7x{Lkc)O2*c9Q!MGg7bt!o;GNfx3}@ z9n}VzqyrDzBD5&Dibl$ckV_V@UDG~0Xf4TQC>-tXwjww#hBG-zV&L+u!zEue)==Ti z%u13T*eEapBaW*hQ}#Dlz6xEk6h(0PF);-f7wM7JWdY8u7u!gIt5fb;BZJW@hztIe}iI` zP-XITI%m`!3JIL3qjMn6v?g>%1R-EyqB=8~cp2wO@bI~I5E6PlcRN9-Vzf&fG zG1rC$rdh$+pZHqL4QjBUAIMRyb{vb5)bgoxyp(ff+pR+NQe_d#AV$$xM7z0vD`0Lp zm_C~bJ{8llej3*O<5&{C^9`}Lh&ga`9F?A9;vxo zeI_b90IEIN4<;lcR~nMSkZE7}&*F|prMTnKe;9W>vQga?4h0t*v8x=dM1Mn$J8mV# z!|EtPTtDBY3MYi~=a&!x%hn5je_d?BI)6)vv|~RMxw2#WAgHJMdNz!-c#P4p^O(%E zOtjEz^y_)@Ho1XTGt(m2E#T!Dr(H`PcjiY4(`}JBQI}7ix$#{hy4%~IVJdH?Il|X& ze{Q17_maK|x{M6w$Ba<5!jXj(o=Z>Nim)|0)DD={q2nXl-w>94yNnute2Or&@9KMI z8IYdKJHp$66`;g}xU>wS8L&am)mihon#E$yZ~*7>3}oT*Ms`5gEbr88?V0Y*RLix_W&qphd_ubo6Vh6#HUVo9P$pInLB*88#uZ%yPv_A0J%+n=8pN~*u6I$R}EpE zSr=oKSwIL@;BK(d>>MJJoGO%iuEQ~hatkV!ecX+@g?-sqt8b}6hY*0i&lwsdX4J!= zfIXDo54Qrf?*m^Km2pInautLw(`^2qfMT-)ODd(yXn)1l`>`xMFYlwRcJOgvxR54G zRAiZXwwMtRRslkP=i{5u37GnyD20m7c9bqmwLLmAN`g^szj5$;>GlZL0Y~pQ8iQj3 z&}o%Q#J4@DpZq@u`LQPt3-c58I(-6P0nkomybyCB_}Gf9OXERgs} zoYBK!swmTBCGbuB#<|K>A;JAO5I_Tb~vjlCexW_wVhH5 zn*5YFRY|XfX4HYumNh-Vtyp5WGmM&rgNQvMe@mDgow$a~YiL8*89LX7*N2uR-R5AD zboy_1fu?KOM>#Af2=6oenP!*YdAJ_4=?_0shL$=TYYp zYc9Z@^G*}FLM%LC;Ge_omT{{{INxSPpRCtvBnN7LVo1?t$NN+@rSqo*%ELg$uE6G; zy%#!V1+Vgg<|J>gGL;=X7RZii@0Or%t0wV#ezqD&n_^qO3fggc+z+}l-^I=xDDrhG zI;9*z5O$p9hPT0@X+Cc^4e`Y<<`1+>0UUGhIA{*;>3+L%2MoTLaaQ1LGj;30qr#^)E)~cYGs{zyI@R{`>C>+vJ@DakE9a zJ0`bRvKwlMRrx${*2KLe>|%u99W`1&&fxp3*z#+lR2S_wHj@NePk9%FcWk?V zE#L4+5D)I#G~~nsXuk%3OW47Za3kZejb*{Z={U8BAm7((HRt1uE*ylFv z)#ACziw}@FHO=m>RU35e`reu(mU66ankTTJ%V#Q)=W1({Hy17tJd7pw(Cm5$6}v0| zg;TlFW5kAZorMjr{i%~}#~8SnZb24*+vTIU;U zTKIak61Q5tsnvE$2Pgj*spfFLNEZ3tG_J~9NKtS69EA%Bs}nWypO?H7I$}qEUTF!l z!C9*<-UMMH8-e^g6Q4+%`=(YCr{WF7GNsfNUXdu|3*?P>BU3HU85Q7GuU57&iGJD#t{Rf+#Rq4 z@iP2c?#NH_4KKY~lWfKv`#Rv*#MhjUExJTaStq9Jd8#8>-vXdJHe zYL${>5;>;5mbsQ+gXMWi0)VNgoFYv|d0sETtzWNYS)IOFN_(Re3JEO&BJInKQ?;sW zc4+fWt#)qKVbkh6>b2vZZN4tz_gEXGp;ma23a#a9*$CDF$MsS2bSz$%L;KD*f4Wgx zQR=m~$J&AQx@>lKc#dX&tbJOm*Ui6%;x+mi{*pVjn)n2{bH@&vRIADkI@ub#^$dU% zujMCsDt1;Y|3)BpTIjd1EnIRS&t)XPGELBTfi52{@HpjvQ>*RmoqMzDyqHjH4;LH_ zA1u!|Jh|j%_uXs`f|!QE)_TM_OG!3s0nL5xfrveP00LHQ>1dOG6DyWofUw2OQ4$d# zb-i>Nu#%?{akyBF_=y?;5OHG(^@aaf&grn?P{h3!oNt^}lOb*h@xAr6*gd2%E2>;GX*mSQ`E_OpTl9%Raye(u{C`0%!=^1aB zpbpPHPm}{D#J_#=Ya2jl(peIR8}C^i7>sKIey&S zI$7tW$hNS!`G(WVcmh}CZ;g%Q{1(YCHgiXFB!At^o#jzAkjI%ty#1VS!0P{#2^Rj2 zO{aX$mF6`I?i+>q<3Mm8aytE-tK+QawDx=ZefBaWP5o`E_B~kNA#1PqgXL|ytF+Y5 z8nxQzoJ0+O@j16X)T*RK7#@^}+BZk%ZFSf)TLj%Rm}W#=LA(^9uT=aPm-%(9!x}Tt zV=R5sKk4^3{nO(CV%l5dj&Y#G@|*q{YCxyy7ImS2941OE5nn*qe*@gl>1v#$VZ0{h zSU!E|;`BiaN42_(#xu;rV2o8oy_am%u} z%bPK z%vEA`Ga#_J(vxBg>B+Ch5Jn7m5Xd1GXS*xj{9fdNq?uU2*k%oyvoM(#3>bHjnBkjE z{S#Av-*@$C>vP`w45r_az9rI%zM6%$>g;A(IIZp-ND9E8P3%5QD?k*zyUw$K_DwZ^ zJ22#VOEQzecnS*{8IhVGLps-lfGctahf+~S@6mwVNAiOHS8g=_9vKh6Q4m0NjksXJlC{m7UL%NRbCtKo2e3vh*Ptx}E5W~CH5fmU zUpS|wV@^xQ+$|kA$Buz_oV2X}f-&mSq7sC?MAB71q<>-J)*07^u7AOw)_FMZ;!)S$nKjoFB zOd_(i%xvw|{+d&wkfbG(SW>f6uX^jS=SW;E$DWC(Sd=}{yjH^iD}D$5V(km*E~B$+ zV6i3;m7#566WNG$lF)-Bv%7geJ(_4j8WN z-M_zD>H1vHla>y%M(F@?7jg4hip9;79i0?l_%-%RBMw{rJ~0{wvD$xs>XOWN=TPBr z{0M?o)7sNRAE@2(cw|IVKZy9>h(a!@PIPuF-2u(0hj+1z)sus!5B-h}hRfWKse zBOs+X;li~f$Q{07YT*_A{jz7SrMcVpOg~xSloELRWzSs8l>hgOC%&cRN>k7UXMOk% z!<8UnSNaHUx;M(Svv&^C4TZXY)4WHI8O4qoi>VHd zIpM!$FP5MkFs(Ns%0xaAPke^P(ZQyxh1m-;CGu8Ye_*i=R;zR(6SV}fVm8^`mDWB& z`)OL3NxT9w26JMu&54{g-jNpyj*;Rc9Q&%$HA1F1H$_VViD44K{%8H=*c?K>c3cqi zdHX-!7k3NCJo_AfQ>WpJAE3(2+>Jm^N1PS^d)2CJ$VOidBREJHT1*m2JqNxrJ*Pb( zSb|PliNZnf`xU7br0+NsW(P&0`7m}>^$KwOKRdeY5tx>!%4q}J(drZ96fTGMSLx_b9FL@D$&6cZp@Iz?(kge&@xxbty}}^n0HF9s$yhbbUV41O zK%9xpp@sil&iTLjceyab$ozB3=3+E7buhh^Pwa&$Pn-W4SJv(D3HE)p9E8bQCUp#H zg~32$JCW?{rQRGG+HN|LKp7a(!9BuaAQRGyJU5#S&%C!W866CRAaSWC^5f8g;A!Gq zkhvn_CF5y-m)RJJjNRqHSVva~wUL<(ACn0}tP2;-63;a_QGyV(cOrpq_eY{~QDxub zle12@efHwjp!2HReRbX$45~6l9aS92kHaEkr*poM=OR_HU4CArYEo+Q6A;^BGF1Sd^p5~%R4~%Zg zcy{Nt+DA}1Hy`o`Tw4!(nJ9W;lgJ8FnSj~k-66U0#6^fq(wee7XIUDHDAMog*UOWWVt?MKGBTtf9 zc0Ro6xOG(JH1><%HxQ-*Ni~ew%ekG%e}=^XoztLhCrZnIhAKypnW;v7kVYizxV_Em z{+zl$ISd@tR5GEtRB0zg;gbhsfhLDi@O#)w3%qT3`ll|{j-9lg>-qI;nCsQ*-T;Pw zb9A`N#Pn=7axr_c$ns=voxzeV#_I?Z&Z9wPhGn@N>DhFSC?V)gHXj3f z{SyD-H5}YVLDPPiY9}HiA41pP_SR~(lgodGwc0;mcVFKhuu(J&&6Xz3GdSYk9@06* zaNg1k$-ud1fiWhEBF20@@L%{@ofxKnM~O(+wYT8W+;odr8hUHBpk}G!?iYoeM8iJO( zum~15^oo>v;wb%2vR>R51PMIMgdZ)-kFQqdfi;-r&B|yRc{ovzMJxQ@Rc~*9&=17F zZi2@X8BN$@*2awtCo)$@c|A}5zP`4RB3@Vto_#z)j}%?;B5@x2rZVqL;N@tVUC)L; z&xUj8CW$vLeD7{a-h`JC6f8l%^_w>+ScME)d2=K2#G&f-5O0QJh6g>jyVpu;MCJEYRy_ zsxskeXEJ=8Att8Pw73(jR>)eK#O|KshHOO>kp^Mhs+fiD-P`0xcJZW2se_hT?kl8!9B8#0_&c*v zPO_KxNwYz_e36_7p-7LOv|>dNau|oqc+9F14!3dd1J%7AmK@CWbzdy zV=&NrKIxMU=2rv+n(&XTRvR3M4CDaiJfCfrgsvI=z zH(5Uxd{xW-Fvrrd8o*7qvFYPtMIi(MOq8X;E4UhAWI_@{p_Vi2}7caxX;!+K7%Y zCHg==mq`-@w#5{JHV6%t(2=Gfah0z(i3W>YJ}FJU_W1`%%}G5cp>`)I*g#h7b%v$Z3-<1)4NG24NW@SE`pPP?ODnJdbxzPyg4xT z^ll_y9R%_R9TP-JqF2?f*X%|aqHYeP4rF_xeoO?GKR#jQaJKn75;s@!+X1=a1%X_o zB&R}qr;e7Emf0%n;CTnz39QYK*ZF!aCu2IXv}}HgWu`NW)!X3UW3NMB;q|R_W-P^j z(|+Q3TKf0u0HL|PPl%oU)Hh7UQHGgY+23G<=8+Vu>W`{52Dnj@Z4QV;ec4&Y>pe=| z8>h+q%l7DqN}1RZ@Jo5H%t~4E#*{c^26=yR& z3cE6i8P)R4UKKabr7fvV9~-__D9rrGwYqb^iif_cF5;KpDDepa4Od#;5>)%A`aqZ7 zt!}k^+n`*nNjMAKk9iouJH1xBBJcDaF3FTJ%}_}btwv7_%#%0fe}`S7*|lANYxPMT z393jTtK4e!sg>*Sj_7Ctb@T=~J|RzYk&0U_dnqV-`hmP0b41hI24!iD!X%JiWBx(a zo{YsU+*N*#k(kO;InV_1YX zRQ^j~UN)UX`EIDa&bYZg$y9zvqa!wE0lbDD`k-HYm3uWB;f?qNv;-4ig^ydLqD#%P z-X4hViN?kgU)>Ys;f@D(5X-VaNCxHcVbxwtERLL|=r#Hx9Ey+(anB@w_U1!ju%!b@ z*ED&9e$y?QNVS4%FPG5C73$idr=dWMneQiToIEY$)#HCWkysgmC7}KMZk=otu z!V=9e!ibPJH)^%iAOy#iQa^hkj!D5jPFq#vV({3U%Fa`xwd$~bgOqNOLtYRC{!p}A z)$xc?^Cm8`eqaL3Ov}Fdq2nAatz(@Mm;$UKYuO>VxEtfX#ex5SyStMkHLBGH_=wG@ z%*iuzN6vQlVMG{?M%#sZ9?Ki*8FSn=+J{B14aX3$)m2*^iMZG96JfN)W0Q}n07^^6 zm!0K#WBn2!%3okubSY+!{{oB9U<~*-*nsHvtIRzxDpHCE=6HYt+`0MaygX11*#W*I z95@@Th)^ph>bxO;2P%>sl~{EWokd?|t+~nZDw6{>mmM{7ImEyar3`NcpeN)+4USOe zymj6PUl|L0M@?E2ew!^kv}J^jpX)|(o?7kkzb*yI+2bUguZFW(AmnN%cwEnQmJ8c3 zaEsGv{_{&v&lBTD<2-$M*iGE98+gO61C4gqmLJ}D>&jq%%>7A+5G&d}v+`aufPblR zDk%=tt_2`A5?+@vCEo((W%qV02pGkm#<%0L=a;Gu7kN@`HA0s2g;fwXHklmV`BjyiC_5)#pZ=)J?pDQv&kq9|vAa zc2x~z`#>!{-e<`Bj768P<n43=C|vD10*xFev97kN)o|k;qI>`d=<^}y zAiQDF3WWcWv&1i78dID_Z1@tO4>EM9-cvG}jYzS75ys^L(si(`m_TLRa2Cz}na8oRiP066ReLD#5^UW{qF65BAa&Z;D+zGu?|~0QmNwmlOY31&~SF zIr1`pd>?|U7P*4UhRE446%ec_83=IB5N?{3R6Xt4Onm9VcW3X%UUGxD&FaoF2aM>t z+W!(1dwimf&JvMOgpy=7vSJ5yFKy=aNVm=s*olFA8GaoKFCZ$AvB-jT^EBe!=kH9U z5cI}V{{0d5|DAjj`qsG<5Tnn6!L;99W;*?Up;LewXZ3`C6)&snnXeIBlBWeWHE0_G zIa|;hGjw;9#IcTYLf7=9sv(aHI-!=4**)Wg!xGUd-Ux@>g1nFYD0G@Re#ja%z}KPJ zC?=;$@#^eFobkH65pmYstSf{vJqyl&NuXgl@&F4RpKwq(Tlz91s)G5n{m55_3j;5I zO>F&{$@`W0nrbz1me8{j;eBb)3}O}gdMI!IIZM1&h8ncX@7OOjGNulLWvgm+({(cs z2-wu_j^fMS9)XNX_&WwN-Bl(fe6+Jf+Pe9r8TVcs^i^tS-#gfE|9904{+0b^X6V1N z-@?bmL_FEvLX10S#*r!vZ)*S-WALSa(pTj$K%JBGuWtZ_->;R3npH21By#&;FW=O`eH04`Q;fdYL0OFPHPh0UYzI=EJK5L4bwVJb^+A zxMlNM;Jx#~*a88sxXm_yy96kU zfOg*7l2%%GOQ%_lDT2~2*Swy(kd#+r_4r@eZChv@@ z`-W-|Y64rg!}eyZdewjQ<+mw+JULF2yQ-|tvua;XhNObCAAG*y|1}u`e#!&odlZ}L zMX;%)!-m%|2kOFb!YVv_{>0-dZKmo6Ex%=4w43q0z-Tw)Ue@1DiE8zp(vrd_+2DwL2Sk;?>f*(k;XMwmxw-VXF*>_>-kXo0Fa`-KG*_RK=n zU>Q4}C3y9yjYnFy2mF>Yc@FIJ`EUXmwQ1kunAR~6-k-mFeg3M`sUnEY!~EX!mo9=v zXP+e?8&66CF&7u$C>UygL&QE(MNgQ$r5au7=-Xv-TWwX7xEfZgp=+F`m{pV-!N5}; zII?$5F7T#EH3GGQi|yF%2C3`(sETb9Fj}rzw^0Sgm1Hk?`)xKx1@i{cBSoZ2tfJk%)LXGLI^S1&z&!eYEC!82mvPUzo$q)| z6ca!DeQE^rgaHj7dj}Fd^<~oMlVV(!nkiAb*B-M%sXgukfD`D@X01tUwJ3;8Kc(K< z*qg~wU%j2!m8U>^z@zHTgq+oz^u04Vmm{+oVY{KPn#lWmUYIo+0iI;dpw*ED9PIq;5cOgluq0-;Y)3bk0uk7( z9dJ9@0tf~ow6)Ud-T|DTsv%|#7xg3vv)G-94{!?Y(?Go}z!u+6)cZ7~UN1cev5HF5GGM^Kf-!sPnIQdD3U z9Zu+E;B5a7=HTN?%&t=8H$FhtYr|Q^I*n8vW$LJsxqIwCfJhx5uGDQ8SMaiWPh^r21w<9YBjUbrxY#>5!7xg*j^EryU;IQ z)oS8#uf6xVZ`5b_LWDf0LC~TqF-HS?)`whyJYDDllYfAp?2nOh`;CXQ%w}MG6iP99 z0$Efn@FNGlduuMOY{vJj@*iyFt5t49;O>~b1PLuPaQkz1t!Fc8EBo{)9Zn{I%5}v- z-$D+5K_&s}KYKl!W^jpdJ%hB9v-f{@-P1p>^=5|dtBY>?{8hVJc+uJV+)PLLm3{V# z6mV5v$?nM)G-OXsUsqQvy8L+Z^3^;0<3;=IW4mL2b>97b{^C{fIx8ibHerl_%6`ag zWoF{p7)$o!-zty(zro<_-Jo;w^3~wg@$svF7v0Wa@aVV4u7kQ;j_Na32Q~jmh@a}a z2CqTTq-pgidt@N&A6+f@iF838Gf}Y27S~Wl;Jg&9Z8bB5Z>m&g>tED{U;S}nJxfic z-W&w-^~6dZ7$(Loh+M+F-yv0Qxi*!&TDV%io|I|{=ieo?H}4W#-#`+p%K+8P%{4oJ z8^q?9V@p%1Nip+`0D?Hdcp9?0ozsTb*Z$8Kgh9%^t4_b|8(9CM35tl5QU{`CQ&?_d zEt^*^Eo=_F1^S_X^zd6p+iyuOz1-Q=aFpD@+p<=}`Q}~HGOwf1t8$hw_kIwZPK!us z%wE-@dtwO?wy>I9>$tkMjd@>jLAY^$uW0!CU_g;3Z<8Sf1C*M8zF4@tB%X0=y|8$9 zCXfPIrU*^v+jb+~sD~~7dYERq0|yc}2E2Y~G?^3&H(2V)!R;h+Pp1aZ@v9`ugCf2!B1Gc|8(vIA>SQhSCG@#@x zH7*@@@985us>=@Fc3$#9Df8WbzukbsIZA9~?4^B*mP*>-eGIhZ;Cm9N@=ZKt>!?uGY4}{BRCjREfzb4>vhI8ANAEF*+>b0IABlA?h7&u zg5DU&Gkrb_Z>{z-nc2ZtS`*V?dsTOXZ1$7v(1?&XZO2144FE;(z|z}{?_Wq_VW1*Y zb(9xtu}TS>E6n}n*}NwYmJh`6RdO0~w3x=^>i2PB9ha-y)5LARkXdb*+JYQ`7=*6) zE!?wSG4(;jK?0@nC`I^x*-W+F9#uGQY|inGDQ-fpSev*(q7esQ&8P>DhUxo}9#3Ok zC`#!z#cL65T)d!Q4Sz*2dte#OCx%N|sz`m?ibJ2efnwBx=EjUIF+YG^E7TAw8+F8MyHOvW{g{E{=9Opssi?7QzINP}Ji7h-}zP7}FF6G5rfr&)0NNe6~ zcn#ylv838G7QAY(Wfw$=SHsNLff-e=8SEuBT^@vIV`pw-WpHd;!<&@%B=s^<$Yb)S zfgM@Jr=*6}X6?Q^-)Ij@i?DW2F&ssu!Rz9Z9dJ}RH!0GTIvcmxsc56HP?UcY%Y{hGlsB#@!%(wZ4P?-+e#%c6%^XwfSlNa@qVa?co&7lM#|gLAQA_aq>BA=Q(CXxw=M4u#KTJmEfwqXE#C`xOZpVK z<(W7549X=i8Q@6SwH_f*B%v|@0?*l|2VdltT}cj=UQY*q1>eu6#2G^f~&!R7=7AgNCUBnA@*OurNPPa>jTqt8HPBb^VYvoKmqN(+kw zs0x_l5XIN)^@s`Vi5eq|MG{mLxb-qYKgbVsOFmvhT~^xhtYNEg5}>7dY1G@U(ds|LUT_L$M=%TR?G-cRA&g#Y{7W%D%l)8~c@8$YJy}@R>@u84gN*@Xy=b}kLN;gxPA3p|6hLY@=j2}fjCV|{|pXGn9;@#?A zRs}|LdBDKe;h*8`n+96Le$X712g{sn+=O>;0PfX<+z)tpY6gRQ@~>y*Fq#o)8l6$S znQg>>=WGNcd0{o#W{3{x9tYDH(g7u~gJf=Z`_j~9mx2sl9|Nxp_lWN~aj@wbgdQiE z(BT1__&yJzV&aCCR%ZuAm@Y+a7gNaNBO=td9WynT54$2g+R>gjk{N4JmYpV`I?kAK zb}MhH;+`&8c0oL9O*f|pPM>o+y#D`rd(-ZJw2`h|`1}1UY#lFJ)h;`Lr$ls_V`p*( zXE5$`JA}p{3QQ~{EXlzM@ZZncyJjIf>HB%!cde`#l}a@ad%QM%=02WUCUiJXeUH#n zOKBzS)FJh=aIlf|fr3v4!40_6T#VS~kuSqm8Lyj{X>vRDB%G1(*O}GnJnxN^a}U;k zYJ2t&;pyvm=4f>VPEg98h6+53CaTvx$*sc(Wv*+phI@fPv9h|w;&NlIJpH|I-f|0G zT?%ZCz7Iq#82iK;TpEPZvN(g+^A!*p^waB+@ny>tKKI8FX`nfOWCz&PyK?|VkJv<- z=X{0mrXiuDZEeZQM%yzqPB^zxj1i!JvfBll9%bb`X`dz?)?$bAR;ALCcFgOHO%@A( zy|q|8(uHqG@qP>E6Nqi(yMaTUl&Uu^Pc|^H2LA(58wv>DRAQdd7+!o};` zdYC`a=s9B7b-=0-frUemdM_5IxsZCdJ2cS|=O?V%nEav4gBufb?2cH{o^%?2qir5R zFmWE)qBVJ0BGw41EtqSkzSFu4vrA+4O#e=ilo{q*G5$eN_dJs~3yRQh-CvVnb*E1hXHKmHlw3bu|9QJY7!jC|IgZ@5m)hO1DZi^C-8j0p4G= zOEa#qU}-N*oilKBR@*oN)UR@1jsZIaMdzw7#{gY&;_|N|b{M%|0hJhkVGwWEtX9}* zSwW@ktG@|ScZ+IB96#7pd005s>>>J8q!!?Wefg$k41;+Hp$ zr?-|F09ZTN6GE&hKiNL3Zwn21X3{rtLs`?bzo3tnpe`P5kiCrvwnP1d@k8n;PKk;bZCv@c< zp7PIz`i-pR*^rbQ#}i$5-fViFwm}y>bg4Z&EzU?PW+YQHk}Ls23N2&Rq=X3nR~$lh zar+5i3I+(!&l;tE3s>8JUqFQvPrmd>$!(>n8sd=z$ZoN>^(xct0#I zt(DPwuVUWXUJjU8>Xm`!VAdcq0U4P!w8>2dBbC!sZhf(44!3FztCyG!BX0O^J{_?Y zSB9oT*~hA%Q|uwex6t#o&RB*P7V?>8>Xf?)odZq#r9k&}ZY(x-T0noYl*F-u3kQe# z=UZ0{nkp54^bqSJpi7}0?nuCbe_tyjgh zod_H|IF-X~WmrQ{gdMWO7|w*(aLSNC{Aq}>;Ti{}nKjiKPDqdh35c}7d`et9;@;Eh zM>oBOG_y5q_nf4!hSCY?CkwsI-v|$4Q|V%%3bVFPAfk#Gj;IjD*+z~7W#ADt2Fw=v4y;4GX=V#dt^k!` z!VaZj>;w*1`8y&cu=3)*jbN|JGsP|j_G)Afw~tjZ_u%t^a4#Yw;&kav>7=qun-MS9 z(Z#rb_|aCaeOoTXZpQNHy`S1oK{oj;|Xox^jMaY<(R(sP#Z3poPW zY>a{J$UN+2+-p65`~2y-7bqaRdIOWS(Uy9D@hoX5=oMK)E=$nX*DC8qD9T8CgK?r$ zZ$e#wE^=v2s?~waYzSzzG6Pd*Gzd`*MM)&$xsD8CF>7((C-Y~?{qA$A2O=4s?n zFS{BK!KbAM!ewv5^PbnCS)a|p7FS?e!7t0E*C!Ovj3n+B-ztV$>2!64!fW~$K?EGZw%Q>Xl{7iy%iv@dRmm>u#q;>5h+-!D68d7yRNAeEux zzFr3AGVB8E?`bl~kXqMIXv`175aAiU?U|4vLdAhVj(!GuEVNHn*JTI>>qfnQ5SlA8 zVp+ePx5Ew+=ioU3igvZMmC%T*GJ!Tr`fSd#Fz|R5_NLsUA{r~iK)Ia;UN23P*%ruc zarEUg{PyHH$|QX7=pdQJDhYmjR8Zo92ewsMwzs6W_YTt={@ofTc>1+r%A~Z{M?Lg7 zUdsxK`-Ek&pMg+K82pME1+q@J-+zV1$kZx)cfyV0Q!?qa|&PghcW< zqh~`0r~GZ*m?a!TUyd{O`N}`8+j6oGP9}CXY#wL91?Ix?s`NOMd-zA4iJ%;Sk5l)F zL^u{{OWNz3*HG&A75c++_5OWg-fK+*lWhY2Vka*F2Nl)slWBK8KV=T>44n3;M9zmTJ z5^h~Vx{UStJxf?v< zc%pyJ3)~)Z%KE}jB9(=bNYt!=}|QP#)H52=^2-T9uYj{Lq-L*WAL>~TPiBY zn>~6iHPON8K1Gnq>nc4@d*Su_sc6-BpIUu98qtDcE%+Of#-kBntBX&PeQFHhf~#BF z%Rt2{><)v;J_R0s7Ob6)z+ex%b<&Fm2wV6f)^FVxM=1?^6AnnZ!7!&`Z#vEqH#hi} zQWTR5INYp?Sns|3&wLzZ!QBWS0nR2k38~&N!TUr$97job9kdb`@lzBASckS@gUI`O z&rKD1->T9jFaSg}hx3Q0!Nrw)g)8EWXSoP8^Ia9BbADETIa{qWXyr*=J);^jRUe4S z6cy(q3(r&}eNb(Ypu#x~(X8c+4};zn73&&D-+&o^-k0enDaurmgN}b*m&wl|5>3$C zQ;z}+K2*=AAc+bF>QO=OM=C+Yin~>ht=5@a&!>@kgPk8jMiDY0n2c0dprr>GfG}79>}6!qXAgfr&k-xWN zQqbIgBIsp+Ag99w9bIV%D#(1VtRy@$`5<@(iMS}AjWgUq-*zAc4c0%Te8}_gDmaB)CiYPT?UQD9 zK@T3}8h#4C%(v4Zx}xC9B2KKoGC%{DFu=io(Y}gq6@0i70kX~@pwmnsiK}ei_5on{ zmVZdH`~{%6JnaX6LgY=kAU-DWTP%mW5MyFKgV!U_wWo4PJe6@bf{DUP4}Pd$h~X0N zQDZ62ha=k^yS5}f3n7?*R(lB@)(kx5mf$)ZZ4?bQ& zg1D-_k0`Rl=Q}h{o(UzZxaxyH2Q*LcDA_GfCIvtYbO^91(QpB#ShVG!?&A6f7b z3KPQazjvtCU*ncMN=I4naSxPLuoU5c!;2ky8^cV-xAZpSB23t14}L#Y20+1IBl-NP zmuA5;d^$<8Ja|`!pRWeN`+fK!&i^0yH5~3LW*p{f8TYI=Sb{j@r4z5OY2AeDRl!_mcs5;&gkFY^x(p#C-0>VZl<#h}#CvRIZ1l>JMdz#+` zMyN8y!tWzOs1e4i?wG`Q{KZ#b=k!H7zhUTPY~;-%WZPxqHp26|T@bLYL060mz> zGQA}sHtN5Mld}~jDgXZf_g{v8V#q9rEc(`Y`V`tvuM+#|#)j6Sv()*{dH2$Na>{(; zJWa>;(_d~*rga|GiM}tWIyH3(}gph6Q_T% zlk;`Yej4_jb~^7~IJFO)oLA0M>eMiG-c6jR%x$&P_m3%?w?Ai{z*8)Ljz`;X;aPDk z9&LXLJAtRT8IQJK0mz$XgFM>4sdoa;Wy(C-o~0e(=ZPmjlmpo;Zos4MmnO-k{ozfh z-{2|g(e|6Lm|4AB`wZsD zPCyJEkI#ym zg#PyIZ{G0l9&NvXRu^dnkG2Q!wM8^}w0(t1n%eScJB{S9KF#lcU|=8v-m=hictA?p zqwP#(g!3JGTgR7*Huo&4%m|H@q?}^J^Y^n@$tlDx${?beGU|e$a351}n!x~JN)LY> zgA^Vjh|eLUT~MQ865USXphgAyW>8&H9uFRRoh7#^$ZhS$_0f_IL}qgv(zL2F=O$Vr zKzBz|E<=xjx3?946Sdv3BcirD^eP!=B3&S9;ZF=gG=N_q*vkSC01Wtu$H;a_Pv9sC zgX`E*Z2g_tFdQ_b5twSB&+P>M8?kE>HVov3QPbz!qOtxg1y#9xh#_>o{IccQ`IX<*y#2%QyU&t(N#0 z;>7eOSpe~OAw2*?K)k;TavJj)?oHsck%LOH-psB->tLLMs}WeGhc!cJIFeOE66}byhjU<1gq!|u^l=5wBCxTlgo+3FMA1g1_0ew6~L$g1}asC*G z#%}|LP_4exejztM7FzK&TH6T=OA&a!{%8KUl3^Uo7|`+uy^-Czgq8H!c7? z??6^iid4nlC-PAxTX4=Vs)cWN4@9Q@`eK0vTRS;%VpF;q|U?N)oS~+R{Na!GWi9*mNBl1DrM&mE?`pJGuj20~2eQF6`s4rrzywasUB3uUBVD4hycb?8 zG@7ulf1zkH3~eg`VfwejA)pm|@|}>Hwty~S5-tG`67|62e4KxG-uHVMZ`oyzdZL)`}D2#^yx#&^`kuF(5(X#z=l$lts}v{pYoU;4-2VJqa@?(Z82FHNXf2C z$upj?p$kuv$wcu*bvfbyz|wm06l5a>PjhS+e`9s&wT#0nHw0r0VCXXRPG22lyGQ?a zz<)lv5A{zCu4?DM8&(*O4+%mBh(iFbntZcnJE)e_QtGVgWCZ&p&{iHMWFE6@vkv8M zB@*3csqAh9!JATf$x6C6UoNMdUl(RajH`VdnX%lgktI$DY&vuqghR2~w6Ywo%!Zui zf3Tlz3&D}UlEs=XV(+cJlDW0A2J6;75Vqc{v(m11lsCO7v|hT%t5&_JyO<`C09~1K z^ip5m`$}08m(*QSQ->%hnL-Q73eXF|vM#h{W9!~88u5QO5wSD=J0;^EOSt6K9&Bzl z%qz9Kn)cA|)SXclIbd)De|59l3$1MQf7SXc*>Wp$zWeXy91FRpTJ`SUy36Lg`70BS zpX(Wy11Dx{>~{Qt{R2@`+zD{TKPVaJ^4+yrlOR*YNp|LH=e{8{qt}}9aNLWqe08?^ zd0|1j_lF#r!hx)owy=h7zJTz=fuApS>g1j;+N))E2q~Wa#-Bpp`1Ti@o64%Vf9$5S zx68gW80W&W6ap6&*SkiO`WM~S$BZOGZR}AqYg<1VxRn|I!}8QK#Wbw@Gp4_OY=|$? zWy!Iz>F#}4>_V0=2WjO}erVK}|9aY+B@@ePLFp_=M3f>uz}TmJThXWzt)e`SAxXi? zQS?jFLe2j_?<~>1VK2*HBE^<1I-;u6{kpc5o`Iw zCZbf-B6p)cL}3wy`VDBjE=`Eu)i$9NTaoHDdh?pK4_BMARio=QbsR|+fAH=2+P%+y zWjEb3s}Gz$YKBX9jGEP^b<~8A_TXK&RhLUvejq~{|KyYZX5}~xA9?W9C@hh(0j+i( z*n;c7JPTX3PI)n6v!=Pcc`a5JK`7KUR3!Fl%i`;K;Y_ZUZ_B)FNMo_gg!PFz=5d+r z>^Nj$z5a7b1P-CXSGzy`f9eCE@;3d4vQe8Qt=h!Weo#2M?XlVo3`1V{2(h&Ho!nKM z>=GIFn)N5azUbKQft~yDFB8Mos3I{WPYhwjwMu@-epp|PhdzE-en1H*?FX5)2H5+X zIPbCn_A)w7eD9A(-eydw0^M9`-?Er8w{|tCIP=~p#y7vfINH;^f8}mm`$>i`&GXb5 zC6T;BoXnBn0^lShz?%NQBBSlxVo5#;>IV<=wtCi9+ftwc^}#9*L0}^C-spciR`#uU zG?d(l(+=6f@|Qv~P!cO_jkqLojs?QgK=th zq;-(GJgBU9_h0Rh+QYYjInH=AH0$o3WdcrCp~$2+#p;aIe(CzRcoRKI{x6>|y507| z7F)TtScqfT-O2(3@mCK%vn-(S=dk=tozKSZUQ#vQKd1jie`SGKH~(#!zr0w)GIhu5 z-`BJMYljhz-BCLF>CpAk(f@4Zp49BqrnYxqKYy{Jf89-?nRk2pWXEaKZt&oO{e{Nq zqv2=ERT%{SblfL80JEcpa~hHN{jpq|W3% zmUo^!_uX?;e^d?Cmat0yQ_`z&n*Mr?t5@L)34vC_n(x?kFT^Lmbk^)Varus)iy7R@ z;pfv_uQ^f&-R^hE$J-=8Ymey)!6eFc1zK}??>NhQzf?d%iY)BzUbDUz)OVJpqB6sz zl!+kgW?}v-&fpc82ZlTYa_*H>)&I--#2`&|F=M-!U%plU4I}eZiKWDF7l`tQM4j41 zqija%f4`=^QDRL^H4B-L)u?|ra=#*E`|rm}>544LvNl0!?{xHiWn^1+4!SE2Y)F&# zC-;9OGr%|gFX(W}nt5(DQ|Sfntx#pK8fobX?*Gi>`BzO%^XF(`;*+_WdXs<{)JlA9 zTT|qX|3yi*#B%vXX}0N$z$A9y_pLcckj(86u*UH zoko^wItw2ZS}mnyg`Vx(nOv2No3r-BEw21}Eqqg7{RUS~AzNX{6bo6%wY!oF*oQ)< zy+S63)hOiMUnx+t0)|(9D(((uM*PpCb zps$p|v{diSdR{EGcF(A2O47|=Ty3tLs!(=umswO)A!mKH!4KpvSsmr;y13P(!Sx{-!x(`W!15UQf!L}7KjbOK<8qMy!4A-hae&~wr|+LV z_nPryu@hLqj=!fau;GuPFLF|dSId3~;7ScB1>3NhVo0GN#R!$AsISUV2{3B4!Ktjk zWJ@6Y@jSmuli8Yuu=qCu=a}b7e{R4D6&%pzRi00?;L)RRxBP3ious2j9AFK$0DB;X zHTmfOXusf%IbUAVL;&D^kMrIgLLm4Ow$cJbO18ldi*cH4M@h(Xnn;uUVJyNcOMK&9 z@QYGTIu?XHw!DCgsGUCpe9ZNutn+OqKn}H2sK}S;N15-F1Vc2=q9Wrpf9KP!o=A{c zUnL@I!Hrbp2h-@S+)7ojigg8r_q@*@{qtq8X#ewNOy!x{JrTJx6c+=`L0C8_`w>w|5-xpu9-^I_(MW z?v8a6P^YD56imyuaLJ?JfBFuj`fC_`3HZF7@@doyx!>wjK!;b5-VN!MK~u1s7g(xQ z8e-@GZJr&mY9lh3ago7{1K?6%e~xm%E>#?lX*r6dCkU%!C znQd;0-yJ~C-r54}wV0*b(_{*f8SE3{Lt|B%k_%`arp z{xaL>b~Xv*2-^R!f6gXpvcrY_#U^R0UG6snSh-(j8<6iy_WKwA5q*kl@ks6pT#AIA z_u13Fg@PSx-!p^fE8-uiurOs&kW{FE6pjBi`27J40$}6wyD3l|XBa0S6Hy%*gIEUG z7Lh_URG;l;H@Pi(!&xxHjjY%#3t%|W%@RHdiE7rq2&+V3f5(pFTP|LzMj@6Ry4Ecg zscurD!9#UU4}z|Fn4RBEA#$|}QGW_0iYLlh!S+Mz* z3*TwKEHgO5f9a-Rq%HHs+obG^nv9_?K>{tmw-3LPzb>zYXMGJH-@eG`WNiEE@u_qw z6_|UK$~rp5DwKYtsBFY*;a`fD-55f>QhC;QL%(5>+uHrKUG0eIqkxIebx4;sr+LSs zgc601j2nw$+;>_3PE$^AU>945@M7SOjXO)U8{%(ff2-||7>8h)jf_RmnuJB|T8D-# zX(##&>nufhc{q<23oMJI>{aZy^NxYKE-gHBI@^GTM6l#_-o56@5aF~Ji)h<8D{tS% zhzkqG@tGW0&=5tC4{zROK8@C%(_BdhI~&bAdmYgQ!&Ugs}(*rBO@v* z$4WCOe|#7C4_6P4<)LZrWL&BE&REtvfd6hyU)T(tA!2DDXOyKu=}UW>S!$Nai9}!* zq`*1`UchLZn&7ab*+9BHJRmxs-Df-~GIp3bAH(XgK4OfeIs}nLZNGNBcrUvf^L*Kb z$XbeWdDQ;r%dGQgM13H9*&z_z-j>(&x?bS;e~+d_W97hYd1>l+UwVerka*Q&pG8f3 ztZJQY<*Ci(-hb6+kNozR>`SZjdy{<0HvMKWju*)ge}8GoSL6}Rr)-YW=2MJI50s`y zV}Li%w(MEZ_U3bsddsEP0pvOm>r}Fs8MtUqQ$8Huy&8a5rhp+@uZb{>uQ|)7^kXKXaW$9AYUu1lu&XX|?gtpF)UIxX-uq^WI1hnNi}Kc2F_MmVF%#^BXRg zz&^$3i)y#P!&|eUBoKrJ$pSA#yOkMQ*|gD=;fcNNEf)AA@S1{n4Dj*)e|k+1m~VXe zKVDN%b3rWau#Bi$XGm2%*sB*jNGU$79x=pI}!hC^EEZ1OE?fEq%`f#-ifmK z-Bf+qOGoOb2@@kb*lZtj--BJ@k&e9=$dY0^rzr&Q+^DkaT)VMX;i!C>>k!8;C4RqN zUJ0uk!)@f2<>XKe?RcdKK1c0(Nh9OY3e-WmeWk5WMgeq$s0jXZ?c#UQ(r#4 zO+v_l(SuB=P!X@a?KR%ujmd_IpEhGXXEnN2BQ&LI=+ov-P}R-S24?qda=R_wwzqjt zd&H!9xX2Ia6f1)FnKpXqfi#PR( z|6cv0TB-QotBe!48FTZ2dy&CM+{&I~_SoH}@{m*~!5hAE6j2<$oN{chwJzT9ZMle9 zOsHB}f?iA6p`Yr^%~a(KQo$!udeo|VnO7=r1i-kMA~C2GSimZry1jVi zitEKCcU&*V_*2w0e}A7?bBjn7p7=PM`n{Ow&~%bjTkpl#RxtL)Vnlp12{WT_%wCLp zQbajC%Kh)Kp-F(hkct7r42ru`Dl97%ar83R;lpZN)Qd-6r2-A;pVK8d_a7=Ux?UN3LoVS2M!Y{nH3NA}ljl(4M#Ow}}fAj;8nP)t^jdJ#)?~78T zfDeeWhV`N1*>QWAbv+!%A}Ej`;tj#>fy~Dv5Gn=nUj+T5Np;5w)yQ+HBEN zcC^{ml0ZSt4->yDH^&h@g{?7b*5GoX5t~bdx)6kLcs$TpC=TBb{e96 zfq5k#e}>~2Q3VH@DH^b>C{{n~L#zjx+E5`0GhOX~K-_dWE`JVjTt;L*U;|ympTpH6 zx@-gts7!;pTkvJKQt?~NbgD}yn*-G?8Uee_LL6K?O-Jo|N6pxb!ILU+2AFu#T{iS* z);P^XG0m19`oOLOqXa50et@NjlQ#4RbRw#?e*&G~IH+-Jt*WFDT=f@Huygm+^?}rd z03UCYkiib!tWmxx8KIZCia{&T_u>q~X1oN!1g?4H5(LAH{(-${bCWu(Lr$7e;=hDD zGXN4-L{ z@TS)V$Q|{~uYVaK+&^E^=9l;pMz&dItJ^HaZML-q+zbY$^<6ecON3-E{yu$Mf2lll zZ&|ypMjBQ)N5LJz{o<~l@2N$fygNF7_33%{{Q2pJw@2sCfkq)xpkJWm)^YHSV$o(m;;k)HjF^YFme$4_4?XGu=`H_kVFD1>ve+|IB=SX~g zZmGsq6Ez^Ow^;n>`;NT4O2}e?mKuu%7I%C*j&igp72n7mUQ9M0yz2po?RjrhsjRyR zR%5D-2L{K0@U%s6=+xDpre>o61DMLVi{h1tY^{zeT*?^)AM(9#0FrODU_A8QJ=N;I zf7gBf_W9}acjvgHP#|DUe}vY=rbX^40_g#0U!h5)xLJQ%@ahczaI_PL`=FNi<4H{} z-4ienNx#H4DV|PU;mK>hJ-Zs`LS8G`rVK)mc#V7Hqr6;|T~uP%k%^B;qOXQzHf7!^L7fS}bDJe}yr&gVE<1+gU8& zPmL@Vp$zk8Cs!&PPR*q0qa+tPC2pR;bi-To#06hS4*@9Gaso=Kb*!gS@n7S~1xaLioeQv@*L+*+m6?(?HfupKJsXBz zvmR76vCWXS7L2X>S#Q|Ju}0;YHDW=37jRw^KgK}wE9z14s@74A&|1nqW~|@$+Z_-} z%elGEMH~HAm56H*82c(Ut2K2v8uLW7CfYJz_)CuPe-$hT1zJnTa5Xw~u06S9`{JBy z9xVnQO$SESn7lO@6=M#x{sFMhOkr1v!Da{0_x0LJ!NB~{khMGdR2{KuWAukI?`Vw7 zQQl=qdxY0-*Q&H@*&tO_))q^f)!;7nyEMiVzjau}rPexRbu#a6TWZ&B9fttcYABA_ zE>0BMf2A#L4akf+-$KzdV|zlhtZrsXg*^TdV-h!bv4G!uU|W8KmvyCbJH>Wl(?kKM ztT%-=oK`CSOd7Am+ZZ6iKP6;2+qMHHU?w|3=XH}oGNs8jGlsz0TEivVu{sXUM`on$ z38*z%5Gg%_=pfnlH^A)Q+@x0^ljIbI_8Floe_6J+N?KJT`m=74*Mj#TN^7iq_JpW) z)MC{}>yPesKkdcgmBvnPv^F=fWc{Mu>d0EGl+qeAvJ_Uof`16jyF_!B+!`UgL#)*) z%nSA6DLk5GRH;CScITbm=%hTI4SWc8anCmp{-de~%WhNc*V|NHc%`y_e`D|a$XJP~ ze`Pdc6SolR4Mfl|k2FS=ia%1uMxv+sQFe|j%X+s{spRevmAie<-M;7d^*w)}@A-Xw z&wsh^XzwdnYgpE_%SM&T2zaPUW#U#qF_|<#(%p5(q`NXEuw~aH+!C^?wPd&!l962^ zi?=bzjPImc9R2_V^6CiInN4VD*v?mze-s;32y?-(HeJuq4= zX1%p+>`;m{V}~;o@-x<5EM^7sx~^_>t5RtR8f&=>_1=xQbqK)|I6hZX{Dj8TCcp1u z=e|rZCTso_hDZdeRxTdc5$(ZjuTxY7%XU>Yc@P8148k7HzId-J-d$@Y!i7f!^&@8!Di8 zf{wxs;lO|TJmi>u!{)I;wHtJpUY@YSs|25X2QMHXl{jny61ysp#>oXddJhbtZCA7MEY#V z96Jr^^{hOvmDjJ?Cc@KMIj+leu{tDs1S&A$xv%Y7b%dmbbUPO2Lpudaf3?*$=oqe9 z3oyi$fhO3C#US=0V%!;&y!M)hRa1>)4uuydR`jiG$$vvbdQ;|AhRq7l^lg31rI5C=GOMH(Vw#)_!=Xq|i z7mC#v+ujs$0i51^IF2|9NKjO@YPyWhJIEKvkYUJ{T}{#eSThl6e@=(pO;CK041`x8 zT!2{se3}W(X1$0Quz*KW*eLey;AT4zs=5D z!8!a=7UA!hu?at8f5IxsYsBEwXQ_@KVf;0o?Gn16ebq(PM;mkWPCq+k~Q4P(Mpmk{y}1{GKJDDB0A z4_1QooJY%WF4W~{g9g>huyU|{#dUn57UTx|DzE4aDaaGDSX33+YhJ+(d_p(C$lzq@ zgnfIcC{)vHf5R`%HnwUF&YQ(zIVXWh8}Nwd+&=-cP?08GP)=;0mXB#$ed3>x&hipk zh1&`S0P=p_7Q=(dR66YIwl!i*_ey)e3etI=3V7ILB!GCzY+s#N?Fu<<>?TE#XIf}j&6x!i#FmtB0v2i=Xi&L^(k zFZQ&30{X66@IpK{M&R8FsQ25}0b8k{02Pz7EL|+V%10IFG&2O) z_V#wH7yLE`mSp?upMP4cx#fY8v5W+|gZYYuBtdBFB}8*b*mGceEcKW9V<#SKAE zGnFR)teamoTP^n5vEeXmJ9wteH-c!}6`+1967+-aaAD5wj!t=oX*a{E*D ze}i$FTZaZj;&BfRmeSF!pP~7_X)~^JK&EpJD#@tK3uFV%c4`ae1%(~2QrK%Di`z*@ zbN#^4&bS|vrgZt=-rf$ucn%*RHn7tI0wk36+^~4XWtsS7Cy*ObGo#b714OX5n&H;T z@FZ|M#aE=n&^s6}BNI+g)?gzlR155Be;~mfLtDQO~H9!~qjb%Aj- zGXm`*DMh=9b@ zPm;kcL}M3P>eJp-ut}O`&x~2q_P1NUjS5Ch0Vm?(Pp0{JGXB9|N%As8+=SaWA5Zwp zDCzeiB>E%feY)kIdUJ|ZO#|A>xLx_7Ixks9x=^C!6 z1(-X=&{+;gDTiTo+v>CF-)h&&`c>AWd-@_;Et{;`t1J_gu>iVB2A*ePw9A<7DpWgK zrpNhP{*6cAczK&5bv)^5zchIgoEC`AQzSKU-JQ}KwpFLcWIn?SW0RdOe`UEonIHL^ zo3m}J8nUDVE?=J_1#{Zm+H$f4M}8rNELF^DEVA&b?@14fhg zKm$mtTBI_wVqqVk-hXa^I(?L95nK>Yg{$yWWK;&ApD3mQoSQf;aD}8eEUE*z_!n*U9K|TH36J)L|MHeCoMbHt*pQf;ZPbd~B-%wBdz@p^$s#WvM_uxYDjPU;l zt2Tc8A+t1ofD5O8f4MRLaae7_paq_{?83W^`H$tLTKj&&zd3gdGPxgu@D!(sqV2mhterKOoDE`2@C6p5AiYN@Qc6G`)5FeMzWk8n`iLzRfhm z2U>@)uI-Q{jhJu4JqxA4nj9j8wRd%?uSghBu#o3h)xN9_q8&F;EbfkQgM7qA&fmspXbnI25vP(Ry zs;06F+YpAJs9Y-5DS;{p^+?UkkLcD831;{dMnbJ{k>g()I&PH#LR~7PDiH9o{Z=zjTq7SY%Qe~sh26@*nHQkq(gVMkxElKsRFZlS3`;nLxDPPWL z!Ik7e0ZY0nU`a_+bqcrH1#qB_$@93RAJA@Fjry3x;uMjMg2r|{V%b<}TsPg6+sK*@ ze+#Pe-YIJUN555vK^CU?{$(40pXB>x=dw5j$$vWqV^VT+n3gQbN>#VNRux6xoJ9KO zgvRYJqS}}w7*N|xF__po8(=4gMJ>yNV78Rvm7@kY-X1Zj zXBYsP2OUoR*CE*iq)FO^=Sv^@K1;Fhf32ox?nerDjT$7)d32kWnZ%3)x%~u_<)32l zV$R@7=^gm(goZStM|9F5W>t#@;I9#0dU#^P78sdK)@b(*j30svwv=yefpd@%1@6DI84eI_fj)zKAjHE&AM@?_;1>PJcBC1qu%0(FAgsWCU zXk9Ro)&-*_3(;8Uz@Nx22KKC)62#qYZtZ1+bQi@&EWN;6TiRnYRAU1t2p6i@%yE|( z{Nm_|G3%wh3Gna=zb&M2$2bi9e>g*(3Mbow0c)*9_m=7ccT(Z5coZ$nhY7nCu7-7p{e+YX(>&LyE zyDN}WG1wv1LJ&RJ1D$aqua9^IIIzpsZzDf8M}D_b34J`GhQf4)iWJx@cnQe27rA&V zUIFDCA+2i3lQuVLvRH(^w}#>guPN6eTz78Htu2~>r73YgfW_NTCT#}V@ks;GOx>C; za@04_;ww?~zz1_wJ>1g=e-9kJ)AdLudL%(o7zx7g7z|q;&IN}t9 zZuI+@M*|Nv3OOF3e^M;)8b)c6q*>=m5{|n?R!J8^r5yGNC|}t0wGn9u9R36>M3svIF!Y;jQq(zV zsg_jXBqVk8mM@2XMh74Xwfs>gtbgFAuHp7%TOU^Pqs_Eff7w6O?=(VmA{;DscXcxXBmVy6MnxG^MO%;pudEZ$Wk`| zU7LrVDgBc`Wu=L9k9&fSaNwti1ya@1pq+J=;?%V*NfJU+ZAJ~KH&l5K=o-prJh4z~ zxF8K03CuQlf9C7e0*XSZ1l8CPJ7gYjTh=I{>r_*KnOF}CBWtLcUo7MaaD!^>lmkD! z*UHRrbv<;T)&T zEC`7{JTd$h$yv&uZG~YY1}D?%J^3b;) z=^K@o+K|c9F`@0>JnuIuc2I|qKO@`X_fLQN@r%h-%c4@6~=vaTq{{Kk)0nKn#IA-I2X8_`gb{l{X z4cO2L-QQg`*jTT@Ke0yav$9Yhov`kEF zu|@U?v0HM?pdW&C>Y(c(-T{e?SDzS}hPLFl_T^q(#6p4fB7twufe(O_9o(8|Mo4Gb zxB_!$8VYx6ZTGJ0X#&2066h$sZA+G53hSDTQH3vmqA$v@A>j_ zwb#hF2;8<->kJT+%6lU~Rx6di0NKsk_U`RAs6|c2f4t)Kg`@+9n^RnThAxuM0W^it zu=p3QZu1m2VT=o6N&gz^&z)#3dT>qLe+6*TH~#K=Xusg0^krji3&56NE-wQc7E6!k zZ;<=9wM``LdD$2)X%`7YfooITS(+KKO=Dqy#p<}YQ`Ayo(TXo}KlML`gtilDxzGV$ zJPf!%2hz`P!Y7GG=!BX30ne-|ap=T@N@X3Lh$@w z-wldn8QQ4>TOQ6KJt{e_XXd!3Nc=)aPa6eK@tY+}=rOx-Zk!v0kCT@U{lPF&MU#WL zfS0-ORw|Igaei+)k2n%=TK;JIf2Zt+b6-T2N~==&k@%;yg>Z1k%~QLRttQ+{t&SRX z+pe(eo-&4ofq%-dD-TphPn91QgwO)jvrcIG8Txc(;Kn6S2Fz}GWr5y0zyGZ%ovqKP zRkH9FOCKybeX!(#y6?{!&-@g?C#MFIl_DgxU}py&0fdBe)OfIN-*3N(fBm*6B_*(J z*4Ucw(E(Cp9`QPaVvg|;EHOk8fHT%@w!O7VWlg4mMeZ77Y|W!xgq^;Gmn%o^MV%~F zF;vt?h`OU?`lYH;Hfo&n$r==V+97z;Vam2_YgcY{c%jOH_s~yj1)>ng(m6FlAwH4~ zi(PYQasJyT(CBm^#Y4qXe@n)k&{K(1NtPD>cjxkdg3AN0!zR9Kif?npslfEPSi2~R z&5EDbi=R(1R*Kl`uGu=uCkr;~L8x3SA|P88py@ygU?+%GI+8G@-6hLtOTqWokV3-R zsr%VcH11{MTk&&56ZGjoxr|8aQfvHd^uo8|#BmsaWy57TTL3)@f36^#n+;`yhip(w zTW;d9hk3TPz^-KGDB-751*fAiR}tP&KQTwVmWKXZ3<$>o$>o5=OSI_KoW}D5zip!L zb*vJeKdf$82YthXng$h$WU;_CtxeYeKiUTB{5(X?UyEqrCM0xrpveV?x_0m$@d9ud zJj-*@@~9WCiH2lFf4AHsLF6p(Ry0JfqGa8HAnrakI1r`{j)^2La-NddB zfE|~-TBJ=gLV|Cl_0P+hq z``8tP3}CeFBjd3&WW{%TidSZzpK2Cb|h(QDFas71IeKW`cQZn#t;oxN7IcJXj!UfPm9|Wl_$W%QMzS= zvrUs0 zQowpcb%g#9@9WJ8y9p*%S)vtoN(0f1j9EW>cK}Ayw8ZT(q+%68Bn>GV8qqGD(e?Es zplTB-hp`<|4lL+;kSXI5Un;-Bf3=KdA!tAoDb5;aTAGDQOju%zg@Zj_zyQuAB@wyd zVVeR$*f8gSHKPZtaDAh~ek;em!6BJ+*IYp+td5RZRO2Vg6NaeY2PjR2(ox$0lQ+WCQ$ z6_lw`-su$XAoT!0z+Ds`Lb;_V0x^IQ*De;^vQWi?g~L&HsK96%e;080r+nx&PuY4k zxG=$^nm0N@1m0*KV*>OCC>4MWGEN!L>ND+s57=*3QHzCD40+%~vG>l|dUdHe#SK<% zY#2A=4MX5?&f@k)N3cm23c6U*u$V7oWIBTke8uI2-|;2mVtIGbWdUbZjRw+%5NQtL z8`9L^sry;zhh(w1f8&+P-9tyhPmeeQ4CO4^l!GYYisOR4<4scu_z8766=~Qz;zVPc zV_z>Y;9iQ>H_%(ic*m{KdtK}}{md>jTp4{W9+KE=Alqo7!=cl(MZfP>>7B!zsQgJB zay8K?<&?k2F#Vqe2s~E^@M;u)GDM5T>q;eY0)qRdDAc24f0mX`gR$>(pm4TsamRVc zgv@X|nNMMg0g#dV5V08c^5@0kH9_JJRv(Q2c<6r?b}x|YrBA5SS{cvl{qgJPPtUvW z&z?O$6F1jLE1j;mHBR#Mx>B~=FJ-Wp+OC=+X*{bB`Z$8%c5E`V4l$U7+{i6ljsjJ)S8i?Q$q@eZ%Zn$gi`jMY< zG`N$VW5Iwg*)`&kAms#f=qUzYd)^DLfR%k+sld2{;3?&#`4Z0jQw|4yFwr0Zr=TR` zh$AUP9`GFd1CT(BrUEq%%+M5z|9awI09=BVy`^TKe=Jcw9q%ZirZjC5oAbo>pXgeC7Du$mss(4%WwnBt?KL*Eq3IOHR&0YplmtOA;3n;~ zBUnr;?D^xxVqGK{4;iVK&#WWN0K`}rt~6vAO2AJ@BW(JKS!jzzV$&bjDTI5fRGuFA ze<96~oQBgQ0r+WMSy{04rw2;YQkd7KN>S$b%0W&nw?IvKKlR(%&{s~jVybj4=LfEt zTDwD2HkvBP*R&bC?srl`loFdU_!}7m%hwqg0}&WfcM@X9V~XwEO>{UerFhWM4#40~ zoTaGyqg-GRi^s)6*-r@8(bLsnHbSU`e?mh5BrBAqv0%iEsJeQvJe{C!r=(~f+K4sl zV8_e^fkMa*&%tc~<9(S)?x%jdG7;}AJr{)QwOq?+YBXQ*A1Qj3<(sVNBGryNgvNiQ z=+OaTKF~Q?$yhhNj%PW@g|?up7<~v6G=u2L9dPQX!qC296`dbRiWI#OfR1}7e=$3v zB>$jLU!I922ihfok1RNlHxUzIdjq8+Uf{_eG^6N0W9#Q6H6k@sb@bT{+w~4hXvB61 zwE}I0?VS$j{M#L=Gpo@Q%wcwkQoy|$R4VJhIwNWzDx~&66tB=?ib4$g4E(E3Xb52} zEMbN0rG#brO-rtSqXsc~1=6U1f352P8!3ccY1C`Dj!=mv{7{>Wq5!xV7uQJW-XbJW zpA2*P3qW=iIFeE4ge8HMLJ?RAYZCNph&wfA_}BUtH!l8JaXN`a&G5>&8j1p7pMhzv zuuc%WMHv+AGFI{t;Fi)vP|-*FpBK!D_kmRWG0pnX{0Qne@3A1svGuc zsMQVXQq)@_wzfp7fQ7zKZ3VBzOlB4nl^~aemXz!Apj=NZ+;F=IdvNMBv_Q+)T%wrL zy7h!Tmo~#}bju}8U3U2mz`3>nPDBZrmY2qwE4PuvfqHa`flq;NT7T3&+WOpiBrv18 zr1>YJjdlqME|WL_428=Ef6CAxI4TS<%sozh#K7cb)kg6n+9=XcnBp{K-jGJWV&M2u z8aPHJ#(6TBERHnz@T?D3ZB+-#R#l3uq(zJZLWPeimGxLT@7NmgwJ!Tuhsd)nw(fTq zi}ODEh|ZX)#zY)#c%FZQpxvF3vM%MaqjtWa15(CT1>7@o5?$ZK16fyh4@NIY*kNK@f0i@k2_D29 zVnOwrIE#e*>ycG<`zSNiH$=PB@U=aE9rDPn0rjoH!4#>p95~Y~OMg+^7-~I+eC^eM ztb?(a@(&ZuN(i7s>4Bjy`M|m4LtwaJq?h#Xq1NUBuIH!hMZ zEQl|_KzX`i@efkI6n`%xMYro9;;uFRZcY2@T#S7~N2ekvnl$qjLBK9C&LggFv_mCRFlW=Q##S5wdQJ zzcECQt%KH4r;4o54LiKiy;UjxZRFpO=1mcI;)ak-FOcr77Sa+uH6yt(*Q2*QRaJ>m z%qW=Kdpgzo4Sz-v2hXH6W?J@rik?OQ-gLBF6W&H#f(9F=?PBcDY-Qxd*yol7qr619 zqUFvG*rWEB8QI?aven)A(&;>+FU%?7$a7!3dVBu-tOYV$`?%g~o+J(mN#LH|?o}EC z*sG0rsa+;P4F%6gDxQEvgop;RBzAXBlGOkWyX32MqJKkr>VE)s0tF_+wG^cFD!Pkr z?ewohbOjG|(&MIVkV)FZ)~D8h*mkL3SKRFsEvurc;%$$rM~WDj5vzi)EQKfumN5f8 zRcsj;4bIH9H4}kOTC7Vk_K*p!IRih=E=W$FR4T1?#-3mtGYqMzxS&jA*tIfbPKE?C zJlVH0B!5nZ5Hsu^SQ$bmLl+~c?ZRB!joYk|_0ktx=EZs>iPQ-(;o+M8h|M-Zu^MZu z+8I$kw!w`rUD~7sxT`R6cLSu4Y_y{%fL!Vt^z1`i2PbK_8ca4fos?QI+1heGs|5o$ z&y$^C;O42-cY?^xRIBd=k;=7{W3_uA7@uP{+kdLjAHrJj_z*20e#jX<%Kc+Pe=tF5 zG>*5nNW)+Mu~@919SHX22NCXS805+tywXt%fJUlTDlm2?uBlJvr_y>}eloHKn&K=# zqffQ)$}G=QdwCwQ;{*SQp0;O*5NxF2M<71Sa<+o$cYL7i$FvXU*U&_U%(a^__`RW` z@_!F2dXR!%1S%yb^b#T9a*el$4rhExE*q9Vwi*+((@>9Ki`^(KHdDoB#bP(+2^??4 zYkJh%OoC*2sSe-QE_wF$9`dYcqb*z{RD|Dnv3RY>G)d*(#v%$ zwIj*w*cM&uc&v{jfr)XytSJNGy*R^pW`7J7HHIjaOl@B9)aan?M$uCPh&$^R_;6sv zg6;OJ13EdNZx3kqfWA1Orw8=g0i7Mt_Xiz1PvYSF6ys+E-w!BWu0eOYw4mF6tYW}w z3CUuSL=@K7L%d1ilUVw)6;V-8kcGr?1Zc^ZW0QL>um=to41;Pa@y*2z~B6&77xe=V=_;Pc7%&H|Ocd zd3x$Ro!d{1QQx1p@1~u=`v%c=qqE*%oOrbT+$24{;^B2ax$|gyW>U}jU4QP;_O(fA z@d)S^+8&2GO`7X@wB3S?0109UOxpgY65sIe@O*@&wXTwxN83-~0Vi}UV8GkwZpzCv zxt)4TAyj&mnoauWfsCoF1^BN(uJ9^lgCs!&2{wSK*U6*=qu$`?F z5tR#rAbPzxPC%ZU#9Mch$RoPlV1cXZK;Tj29MC2sV~3e#Z=D&)Sy1Xxr9KED}S$hQxIWc(;nSHwoPoHFkY09Cy)M__|2d_PU5Vyfbz(O6cA$v zLB@no+@t9G^vY9Dy=Xj&C%yFAlVwT66fr)i2vfsTG(OfZ20RPx=X{(CV=+-65IrKG zbNP6xR3}JfU9pYna!GFvAUeSOA@3>nmpjZjB4urj#w5kHB z3`ioxhZKcI=+q2Cn@gD{X<%WrNb+!c z3&tlMut;!w_18+T)>77$R=h$`mO5HWxpTA#NXLbh8hf`(kA)&%y1^L}V2SDoax5pz zn=cT)Gc@Rnr?;?kGv^$a&zZQN7mGy2ft^jMijVDlo_`qW{Nxtc71%(Ya#VC8hmeqs zL(O0hq#{Eyyn})=71>`eMG_+j+1=FQK)q0r89~UNJ8vMgh49a#?od4`f(v0g-u3-l zaGYbM6tnw}Z?31L5g{OlWf{j}W7kcJU`9~ifv!(Yp$zw)YRv#*qKE8o-4AU;nq5%A zx&e-DA%7^SHE`=)-FHXluRcA8XatMJ z=L3I0n$qEKK!U&Vc?ihY&3Bd?k!$BonwkD0(Z771NOd9!F0e$COJ`C#>1+O76Rm(K znZ$4Gl!W4Byr07HZIZ*~l77@sD%fBVV!fof%rZ*{bP(Y&!41$3!Wj4-<@AyWbc7^+ zihtSHnrj~5r+90@n*aZ3d-LYDjx%rc@B1xcCeer^DgwX_)G_UV1P~{$oypj8-ny@- zcpwlY5rG7RMT;%J_YfPgkRS*G+(8oi!hHt`E>!|dEtcgic5KJXc{-?5{$r|gKfb{{ei9c~2QFU{|bpFct zpBd@3MCggHFFR(d->r~3-YHeRRQ0UWx*v^B_fL;b|GVvfZlyQ6RUCbUFA|;Y<(BD` zTKWE!-gi^|0!qJTz=Aweo4rEo=kmX%x0p1DA2IKlt4n+Bja+XS>Km@B8-8>VKY#ig z>dsz5hedt;g9pDioZTn%1DdP*_154G5aA{-(77>Er7EsMuTnXKRCg;Og&M(Q)$b)qicR!C6LW)%a4m%nX-} zf>GfYt!V1_p@~8xr3MP=f5%KQApKA56hHjHE4@vxzW!aoGI7VE`Txxpp?~Ra|A}v+ zo{f@8diF^P;bR2UKuG(J?#y`eSrrJLyD04!=Ch5~M!}!W*skfu%??~JoCz5$ue^J(i+^<%dcx#~+1qvPFTL=>`MlIMR96LW^t&?nEjKCsykdiz znqHNaQ?0^R#q{+TbvJ8{K)+>F)6YNRc(tqF?U!6MNh`)Ub9ehYHH55{p7}+ZyRU2L z_?oNVpJvLbwVGPkpb|@@=eMK;u8_vaFURPc25nXP(@s8x(`b$@Owbh$% zFB&T!q^zr}^?sq7?|&5^jThC7{t{jnS|M0!wD~O_7cBQfe@Wb+ztmXYAa0@BO@||1 z|BmUB;PX;@Oy{ERf<)3(PcL4OT)3dW)cB(M3_Sfr$xRLVOD)xsFLqxLw@K*h66x7> zwz?NzY`GwAlQuo*zR=Rh^ke-mn=ZB2{o~~5A16n3S523Ga(~YI>l?6ZXDzHWu}Oq= zNN__;f2rla+{A|dpL*Wfpug1i(_O%8(|)2yz1SnMFSa+gHa0cZbooE)buaC8%TM%LN_*WT!d|!if7a`U-k&5H zQG@;x9wGTJjDHI{mz^6X!AlVbNqRvnZEO~aq&Q4s$puNX6i27OI%d5%W^cXN(%jtG z^5DT+FNwwCCb3w2eyr$lU3}|uJI&Vc9}FBF?n@Wndg&#xsJ@}OrBNcfbn&eVVv(c~ zPxR_9wYSv_YPo{C3X7ZGt$+6i%-deaTzUEGuP?M;`hP{k&+8bY>Fh{t`FUN1z0Zq6 zeT^s__(@SX@c&K}Uis5?(d9PP<+pGA;)=HNMbGLtO7LMylR&I!;n z*=qcuY}Hi-g03tOD4aQ;+|np+!-k1#+53Dia0eFGC@-~B62EYvyILGJ5Y54wH(&ql z*|BlWb^VP?n(O*YV!ZxVxKFHx34W*17I_9SKM{HAlTzAGZI$h1Nin_DY5Q00sS+CaX9z3Y4YcLCwpfdGoOfT16 zxq=^G5fW7vbKlFtpG$SWrs?=OHa#lOuMgMZ$Rm_e*s|_%y%zfu`nlvh-y6-IBr!tjeXWa6p{$l+*=02^d+E3wo zur#UM(V7(OUQHAmP!p{*wl+y+s(nFdQf&$4RttA$3p8huwqN*ecz zqQCGmDV?tQ*xi1;?w559byw;dsG9*(*HHKKvwu9@ z()tDy=BYgYdEJeMK54t8xw%2x-G1dy*Dncum0kW{Z(Y9ej;Nuf@!elsX)p-m@DmT~ zFaGME@oND3?)G>7th%ET){O}C>uREPS6>jl+hFN#f2YrEHsDkIYr-ErLcCXR=+hW! zf>x)Y@6)Vx)t{#F@1AOE`JHkyzJEsRIpd`s-Izn%e>J8Er zXo~BAH}z+39TX0fKT-AWn|gsRQ9KJz+N=2V?A%QGwg_s#(YNa>UZO9Uk?+c1UHExj z{ZD-Ny}w;8{gvjPMqQTA{Z%!2jyU_Ti*GdaUzglKlwCu|m{d;*`CY-7Y=1ddnwr`| z@O(!Do>n<$C}}v)bm|6dxXE(UWX3Zym3uQAE}9=aP(OH}|5ZPpiM)8_Pe}3C|Mcsw zD>p9vy8QPFHM*G2-WN4cG=%aAzX|mMHbmcjU426^3{~?<>d*5z8uuGspgFO?2M^R2 zF1%o&FA5{wkDm^xs6Sb|h<}m2XsXzh7}=#u^?1ohMZTH3ugh-KHyEjmL(tZaKOQ_V zzSv(+qeele_BOo52+v-At6l#qoysKKV~Ha2*LC$*eo@X>P0a6qxKvB!}c5h3+)%uXsmL&roKUcz5lY9F6FP*U4OXHU;TFY8#N8> zN3&KX+FzjtB=A@CCNo~Zd0wKPi=hy&y5XLCQa$N=;lla!bFs6#?=bTXdZ)q*ch9_X z&u11G=t|eGuAF^d>a})J!|iriW$0de@iy~X?G1^t>#tGRiRa5Z%CxKPE>qFlwIJ$R zyQty!?H!epuD$qs=6~9yOZ6SKsN-7wOJYgu%Wq$5ue(=w_3caT9oMhX;$L{Xw(QFN zakAzUW-EpimCzJ476%I~Y^e_lt{#)PWzQ{VpkvQ6?= zb@i9@Kb)a!XAjR#Tm00pztktKh>_~cb>)xq{wI@OVOnv+Pk+v(q@m6u z1)pi;stC~4GK{F9YOwiPcWo&zpPdlxK37;-4pxinK3^OjQqouQR;8)FzQKYfogdIB zo*${r_oMf4kf^L>eykRdYid+O1o;Pn5l)tjKY{63AtKVtpHJsxjMe+F4e9% zlcm)mHlK-Q&3}*PN>l%C#lc@%u_pEyHJZ0I7t1l>Kt3t!a@V~!Oz)Lq`aPMv%OYx_FNj)1%@~S1F_hzjR*|yH zB9gX=Xeh=|A?dP+r6NU_MIsV)T&91N7)oR`BB4)OL^RUUM8g&#qU^FL6*v+~rLs*J ziH;i&Xnz-)XuCV)7X z9AXS5!W*sF`EFhNmFq8D{`oI2cl~q2<*qANnXZ?A-F5wP*NwNj-syU`>z+*9bx+pX zbx+pOb+4nP>t07|*S!w>jpNbSs=!d%+GPW9zOAD>1rG-A1H_^$Vv(`#qX;rq;h!RVb z(lHZB8?gz}W(?bKDnxB9IKyqN*r+xoz9(zNM#+?z(%FhV>Xc$mIc=b#3F}rg%V>y= zQh$n?uxO=NLPN}?Y@|(7Hez{7Db}Er(*`JMJC*V_8p^sXVo?+36^pQ3u}Im`Wf3=0 zS`jz4NV_cJCXuMiB5r9>VA$3{!%hqpNNVC1g}lomZf$I)VGD+GY>&8AE<~h4L`G?p zmMg{46}L4t(GXvhi4`=&CdeoqiDj}D8h=WK2v*WTCsy3if(`0urAavN;tpjeeSm~4 z?rfy(>TIMD`nORWkXhTLw2rnmDSw7^ z)Ff?kEUQh~NkgovlX9t~lO}a46*R=xC{U$wStJ^~##CURjF>IYQPVqLJ1{8sg&?rG!3E zwsu)$(pD^1CT*pWP6Z9|4XIK}AAcxiH0+vNj1mZfmAtCpK5s)+xtOP9Mwh zfvg$lT_&TsWb}bdc!10!lPSer7Fma)qsyX{BjYLMja1;#Syw8=Z5TERk!A@En`op( zOv6SRN@*ygAr_^iqo-8j=qZ(=W+5V@VW$wG0#!_t#Pk(0O_7K&r1Ytzynm@w(grD; zX{KhH(yXM9Tbj!vmkLa|pg-gy32KoxC5CcQIS0y8d5cnrNT_^mZpAdLRZjaXS2WWC z6wOi$TWOL~NK&?7X^m}ELD9_0HX+j1S+1cG3soQ!%9YK{^jUKohSC-qk+z*v8cnTK zt~XKL(a}M}P8urcgHGDwCV!zz$Zm3ZYZDEdG3=x@$UCJp#6slA2y(exP9Ml=>l>xC z^*D@jg{;zmW}yKcLIad^EMzpK&1e?XSqm*$ER=>rrD&oN1#OI?MHrYCY_p<+cC@im z7#>Bro)$q7(q=U^2_2!OqB52%8kItEn51Zv(MTIDu8j_XLPjUDjDHG5mDaZktrrWe zZ>GvoL_;y1DcV#ej*GI1>QyD}BE=`PfVS(xJAz(qDyMc_rvBbl zKo8Bdi>*RWJ81FcBE+-^MO&-T6C6>cLQ+xxSXrAue~olt6jC9hvPZb9KFYNh41 zmVZ;kCY4eRFQH*04P`X!pxPdhTn9w~9ZH;9Q3riYRhLLkmAVr2);j97_6 zL9FbQ3lV%%DHkGg8c_%l1&t`_n^gaankb@0`7IH(3Wyhrmx!c7L`owvLAfLNlZZt^ zD5fFRyJArb4W%?h$5hfNYNcT(jYy<4q@cb@f=z4^ifn4aH=EjMwkD)M zNwc8*TU)Sz*0v@Z>1?85s{k}8_Ml>4(uTAyX`_-}(uP7%(k7>+QHqqbDQRCjP^C*c zTIncsw0||zkd{W#mZX!inxs=9qv(Qy5Q>=;QUQG+iAWS}v0{~iIV10%Fh88L7fO7MBYkqg&YS# zE|+!+5vmm01Ta8#q#UuPTrL-qlob#nARB~`^3GNYV>+c3tuH{A8YL=-E>wE8>aMEB zsn%?2KBvGs1yx0{j#7Y2%Fc6(+43sBlYc783M7+IwJ#BR)D;utHfI&!6gPMfj}-y0RRqamWBA%8Tvvx(BWK#H9daCZvL?3B_cZRK&2SI4`vLwrV> ziso}@mI7q~8Y)_8v!y~rN(VzIOhFAkiVgw4cG5ZT6dr8WI-6P74V z0{_TvTxs|%V!fYr-IGy6f>cBeRDwFjWs^iiwVa6RP?3aMu9^jPAi8m-;eY0T&u}Ok zxPSFELqn(fs@%}f*>km5+VF>~-MWT1uKtUm;eTEIt>M3FGrTVqHsiDdW^oJsGX2GW zf}Oz^3!&o1Q_m;i&+>ouf4TFw`ER4&O>m2&T*kv?$GF`Hx4*(2y1Bz7*gW7$gEs>H z5Cp<7ZiDd!2qwBjXa69y1b?$5kQ{;a0PKa~=$H&H66ZScjF3^5jONLhl`O_dJV3Tx zWZy+je1B&r{_Z*W`_%s5=a>H;JN|n*{rAkq-l^So4HdzXZLcVZZoGi{2qs3G)TV&P7LbkAZ zi*DvIyldjU-NJ4m$M8!jO*g|2JMS|k|JW(3Hkyl$qPG}SSxg#+ckK-5Ejpdn#PIfI z>`tK4RfnO{{Czqw-YIoqvpC*w(A;Ktx9@BAm48vGyEQ$1T7Mlj=)IBm#@?GaWl!f% zqo+Hkd#4AF<{pQhY(2?6IaXucsXzRm-?RCLnUXbg%Bl_b-wVGNeeb|x#xZvmSw{O_ z>b*4Y4VYCrxwfZAgD2y)DxI9+9S)u5J~nXWF{{#O^{4Jr-)Zo4%1B3fxLZrdXQ*Tw zGOC%9-8G;cFn=?=Yh7<EhGnr|VC*pXT28zCZbX?=L+|6HY*nUL%aBl9w)!9`>)rsSQ|4|FrU*M5%HoFA`0I}&8o2i6aQU80X1 z6+XzHvW#5uf$IbJ2O}kG8fnBfciVzP>h|B%X`Zp2@>gGf^N%0IKG=M2{c!Zd=!dy; zV}D;*e!Q=Lvi-q6Ej|A*|ABizW;E*WGW^hZk=5v*4?U0fm`_>R>(`$9pHDoGlpp%> z97rW+P*X^}?-}SVvai1WeCb2GrpH`lWv@TqdcH5@Sjg36UpT0Z>|>4_>Y}kiuyVd4}@PAfowMy5aGBUg?G4PC)b-eoeHPuIfkFp=F ze?0&3$p?p+^^|2g-ux*1(fmhI-Z_q$7b%{RSsL8!>miyr2JU=u@bAL~2 zG)0z?ed7FNi1);WmN=1&%%224nXGWYf>D2qDcSwpBK!MmDs?w%6usfL$!xjz$Zdz@iTq{lFT0-wJ_~=g_*vpNOv&MS1x1j*Wb;=vio0ZUpRy|L z{Z9j*PJTKk(D;m?RXin|hbj42sDFT(sF1j;qFY=gn~QqUGrY@HWL1Vw4?aDnN+w{w z|783xPMoE`1o~Av-5a+pDx-$sC-Zndpf+V<33bt5l7Gq4nx`s6d}z|@FvSaFJDOKb!n)#>nvY^?ruWY=0qVP6}Gd zTe5kJtk&>Z_OmsU*~IWJ8+EqRs&op5cMLsc)hIwdcYN;peB|@ZzYe2Vh*!Go*6B3e zPsTr=#2$SfJJ+MbAN45v!TRS1U82uVF!x`p+Vx1LL!rhRwHl+@*u(Jl4RvAjuih`% z-(6GP#HrL9P3FI*|GNCwwST`Je8GMZ_+sLV*)PIh#J*hqa+As_+xKk#r4oDG->)jN zO!r^+{+cWK#+lOSTp1M1pRyY5{V#^T82-ZFt<^Cldz9*$Ii}63@yz0+0Q zd5i~2jL%qw@;9%(aqWwpFYRCYzMS}S_RAy+jv~vv`o-}VC%ikML4T3#owew6in2&a z(ZeW4hR;UnL-(FeHDE43%^a6!YO5;5qYUp}`iPaO=!jdsa(w0eYT~QeujaqXes%me z&+Bs9RYVHrul!#HHMf|OcZews&TDB`;yAy9oB}$u3OK zV1=nhO12S%XJ3zhJteS`uVh2TF-7NK9lJ1AVWZ)aZTOUBRDXuA7r$Qn`lQIJd%xcP zdJp+1s~u1o@BfzJ9WI?#Q`q`2{Al|dcbDj!D5~LaM!xa!uH_=zYy4*Xn<+tOE}1lC z{N}Z4P1>6}O_A-q_RaD)oBIBK6~hn41b8qQJ!N|h=5J2EwSDWNthZXGg%FE;Gp)u( z`UMqaPd{4vc7LWz^zA(Dr<1bOXvsE;Y@;_>zFq!y@7u%gtl#;u*WYe@yF;gHn<@FH zEA%!}p|#N9qu6&tU83)XD-@A1(qdh==pXxY_LbkMu-xzB-=)7>{%+&DBW}^kExPFA z1PzA`25jX_k=5M)Zuh&qKUvDyViwSNq!!0gz z5kD7+amg(%nd4GJTxx_%?{S$ST9x~IHveLYJ$%Zl`v(k0ZgG!WJmeyST*S^rMg(BE zf3q8#GuzAX3t2p~A~1LWQN;WytJ2)(B9mNXj*CPD340_oIM0;)vqiSY#6{9vV%_`~ zR@Nz3zJGr0%|EJ)T;!OGX1M4E7hB|FNiKHC#n-sR>~lYt$QIcFBNw%C(a~O=ro*UG z-_n>Fej!~}ZLf3DX)Zd$Mdx{^AJxlHyjC?kHCml^pqJr?UHV5mTy(EX#KmkkYfDNF zJPL9#7d{Bo=&ndrcX6>mA7 zx%i~!lx1Xp4M}6I>$6I}d4k06S8MJfAaF zgnuHJIOdXGE*avIVJ;cvk}KGR-&CZZccw-rCM&7#T`Ci^ky@E}RF;XG6;d5VcIJ{t zT=JOrPW6~o<$0RXc2j|Ia>@=E?sBOhm!9C#v%QbmS6|l|xzs$D+T+qzF6}RaD=wAf zQp;r!wNs0z_G%-$G`q_9e2Yta${jrs{(pYxsc?-=V?^l2rK4PWS<_Q_**RTPl5^^m zRri^>^f8xVxs0{2$wh`4-nmXCtz(ek-C1EVh@zg$%y5}7ms#X8dtBy_%R0EMm&^JL z3?FwOtIZYJZXK6NaG5o&rN1oR2N!hO0fC>kDg)*pu$cZpmpiWUDx2HYg-tHA%YT%7 zbDyxkqbpF8*}`RGTsFmJSGXmXTMlx|)7!!03c}1Ipl*jvVAJe2m)+;G$COPS zLzGP&4yNP}pR&ECyWG+cx3tbJ`{;@IQ}(rMf8>_NxupeeDXr-~Yc%Wa(-v9v0JoIn zmZl0z+|snhc>h|Twy?@AO`A2{MSu1;uX0N{ZrLf2ia* zv7Sn|#~!mC0|qm<9OahN-0~i`d_b>W;0G5w@P~GwSE#>aUBvMUeZ?}Ja>Z|6dG+^y zc>RrQf8l)6Q)sxveQ~C&KMI_@(6{tADp}E3@26 ze4w|ou!|Zx>6&YWt`z@x4rwP(S>5e>+^UUR^>C}R67Ey24hN_wNpXSAHuvs{_kXmsgW zPCz01XB>wQGRMzyWwzU~Ab$Yh);77dBLM;!gm-<nZa!W34n= zpW)VLx%CL|&8mcoQ={dVdGE668OtbWPu6$1^<8fLklV0x8(wZ>ig&M;yRlvs5wyXy z&g`??4_bXxZS}UsR3V#i#j z&hj3%T0?fNwocx=SY$O?ZgYv-+`pqSnkv_BF}!QIx9?+fn+9$x&28;C%r?nllD^yFFYtSviJ+|Demz;;%F8#811%4^)tDz~%7?d)(nC%k*GJjeDLl`vGE zNB23tiC3XXreaDPE7``Fk}v!`RytL(O=;g-=5l*n?ob$wP{|fTyqew<7IG6M+XO16jz4hw zUT%L8)fl($=k~|ZY;&wq7g^0MZhwZ`pJVvZdBiflZ50|$`ZOjqACAs_!m89|b0D{W z!W|582OjQVlsgD;2SM&2%pI(A`9ovb(&DnBmRP6^(0@2u;*w0sw`eiFMh$Nz>jthY zxq~Qouri=l(NoIuCQZQ$}5F25?6(MI(~MEpgTQ9Vv@`F&glgmE>@9S(Ab z&dMM!F@Ggr`Z4=vkKV)`j&X+*+~F*DxbQF6zGw4wB{o{=VbFl>o~aV%@)vA}0<~$M z77eJ};W2k)7qebp$i#twn$64-p1*{Wb&46_qtj8)N8dUthK||_l$+~*Vst1hR zk&inX=Z+=>eH%oqyoAWh%pJ{fM`7N%sAKr}gnyuik5b&xGIz8g=;0|r4^J>9|6-A4 z?r=vr?r5L4&mo5vS;d>&v6VZXF0#7Y-0={1?5hYZuSI)iBJdg%w`<~wHRHoPceJl( zO1?;MpZOySz7czKCxhI{5_hr%Y!uiu49<+NIz#j6RXc?y^D}~M02yhtm!{8VUhIr2w1=aqtVA~NuH4MgJ za1{n~wV-;mre$+yEwfs|YU7>9v}M7X^nWq12KXVCrjHj{g9WU!V4X)A3lDVvw%?%B zB7Y~=5mr`!?`)A(_kndEtcSP^on?6EhE5~b=v4P{mmszM6*cSk^lAI`X#4_O9Bdn4 z+XLGX*r&lh4~{W#On@T-jubd|z?lcE-3Rt@uut;YLxC0-1zMcPC9VSvE0GH4Oz4f~$1K`b!JY;CHrS8B;jUOG zALc)ruh7H8JQtm*;^>EY?j&5XK03#G`%%WmR61R!7ES&P@0zL#TF;YBaIDZRD#tof z_>9VkI_S4=ya|pxI1a(-2Im?-5r0ABdes=<8K?ReoTK2JFDo^0PJ(la_f(crV^wtr z&NMhPyr)7BfdQeAU{xrr;(U6YkJ+2*egh1-U}zRxZg6>vEMtP95g79EUVphXjE2fZ zIqA#}ErQE&rk!DE1%_5(Xd8x3cz3=m1J`S1U}ep8a;>ir;f{wH-Zgg0GJhHqxB}oB z2Um>uj#b<5ngdsu_t?sn4r$B}^WYl)oPFi>Hz?WYjV5p%gWC>nH@HLKo(J~^xOc&` z0A4G2UEm%2m)C!f8fU&_o0rKI3~!GX*;mxy_JKP##g*?$9{qe}#%C*Ze( z-vfR>Zuo&W55t3HG%{1N&7f$|!|)IcyLjh{;d22IB@0v8yb zhvAq2TxLtQS*GNVm4VB8Wh|#Ez-78*o2FZxj%<Hg!KnSiG>isdbP`6lDmxHzaQMNu2);Pofe6%WWH`=#J45HR z)~c{k0HaLy@RLhP4j&i8-_7Ij2+;s>;!JM z!q_N`h3E!a$A7DDT!XO)j3r=)rsSKizA{>UW$P1G_S-k9xzhsU8-Fmq1LONJegr`~1V?OcHcMaD+A$&7ez_is!18`m2?0_oFPzeZ;<^5~dS^I}qH5 z;0c7h5E_M07($B>iqonl%8GFxH$ov~hmc#4X-7=SKZyn^NB+?!go5ZcF{}Td+HP|p z%`Hu%0Dqf6^_`}O@@%QI!>vVN!Uhu_nDBRrU}7C6c46YADorMxFgXj8OEA3v(<@yf zmOrb@8u$SS}Cr{!qxlVgJ zR!zbgKhk2NqHg)5Y0HzW!mE>KKk0UU(4pW;jwGC6dFqMO; z1DHC5sbhvurWrn&VfbX0;gd@YpIm16*?7QhyOS9fRo4QLb*dFIYYgx9bTho$Yhp@l%Ba7Grc!)qCQ#Ub8C%&$B~h{^PFc12 zKFma5CJ8g?3bE}nB{th{sJ^yV*m&xythHk$TTBpd@scf$YbKbr!mLX_pdBb|KY!W8 zHAH$0H(OOy@M&-9sQ-=yW>;W#1LoFY?hwK@2)iJhgYXe7EW$z(7WX0Ihq)1mA3?$e z3H0&cmsY4UoZW@F`CE@!wZ>qAITq#|m~n1gkBbBMu%W#1Il61q()Heswx2eBZU*LN zdDqcxhWC!Vw+nMom`l<<@guYgF@L=eH;uO#-nUz1 z_4i?J5W<5@$-i;iqGxz^9``j2Ml*!RAv_D=2!!JhPOB^?thMMVx((+0cNjjoZYpG8 zZg@an*eQ;mvZ}tj5MG1u286eml5dmY?YkNcIt5jh^}&VS=d2oSuy^jl{C^0{hhcsT z76xG<1PimUfEy!P3(N;$egfua3hUo28r2Lxxn_8nhmnm_w%cHa`8dp{VIKFO`7}#4 zZ+cMJ!b)48Y`{EqQh@nmwZ*{jX(wuhDM#5%Rb;y@urLA(s970);>f`86I%wM6uYL7 zfH8X^0f7}2)r;|wQ&x3L4}S|Qu$Y9!9SXh{HeoS#>y*`*%&>6q`4TKz^>1qiP#L6_ z%Jn>EnQomC76Y(24vSN;80IIog({CVJ!XcVw4(ZD_=(&thWE!#H(+t8O9YEsr<<_2 zhQBr#ej>o|6Jt#bKQXSp#qbj@bhUuRV~C&&%Wa6*AcFR=c4A9dw10O5D^-Np5@v=U z_CIC~=DYVGG7XV=h@>F0Tw%6xN(ZA}h{W{$5A!fOY}TSGbA7?eUwQM7H@o}#4Q9(7 zh&muT0nsc(^AMYa*et}BA$9_B2gE%P_d`4)Y&<~J2T?yn0}u@|d}xy4L-==s;X~t3 zSw{Y5w*{gRh(;kAhks}iqG>+lXZTQ(;X@gQ55*Zil&Tc91<@Ud?m={);X`4D56v)q zXpZ4S)6ZGvl~-Ty(-|S=ftVL!BM=*f*cimdAr|68V+>n=d<8EtX4r#f4T6i7`k_ zKw<_Gm3{ocUVp!q;Ri?0WW=2Alx1%AK_U%_3?!CHgL4d@a+oYP8Gga_-ZCV%A#tpu z`4jt)IN;eRE{_ri1~e0EiffQ`KyrxTv#UsVf1CT;EVmVbWU5O9sWha=ARU7AW|s)k zdyw%{{>yIDKTCu3&vFEoS7BuYRsvljSUc(x!TKeNQH%c)1hZ9gOb$* z$rDJikg`L{1E~L}YCyY@a(2dDWK|YOjY2AbQ7;BVC>1iSpxiV6$gz}TQc*}D zPYezgR=*mjzIYZ$twSmg=|M<49<4)a2m6vUF??o(YNnC0cV4PwOVPb+x4}f0vgu(+ zkMNmcbbl!E)6F>VDcjvY0O?ssFG4!0)ggq)ovxI{D7`oYAnQ0 zt6P1Gr#O?4K7tGjnUVWUX>3T1lDZ!zk$;iZ86k5B z*+D9loCktXa^|11xGe65Yyh%B$WB0Z2C@ri92;WzahD0b3}+Dr^PU+2Du)H<#k61B z&G1WWR9UC2mYX&{cQ$9Nx>{jeN_<;k*~4C)KdFy6TfD~qtQ3@dxEYK7HhSPQ}$f+#v= zD`{BCGJHJ8@bR4|>#(v(M`>jTZ7}m{hW9Setc(FxPGFVgox2PlKYGS8H`U#G6Mw9Z z!m1xuCtx)Ks|i?5^RBE>jeB_+7v-weSy-LNs1pMZ^(q}f_~j5-EBlWe`{WDtn&6c}w!+H+Z^RRvb8)4YUz{U=2dSG)N zHkV;@54Q5KbpqQXu#<<~G1y&#-8}3$VQ<7(*nBQ{=_MYsy*i@_He9eV0viF?2nv4= zql+4IAH&;og-x(;^-;jQF#{X3JnMg$=i;9CmS7_S8>uqV7*!Qw&q8!TwXq5tTY^`} zC{yxpqc4#D*(Pip!loUad!C-a<{+ArJ!TU+HD~rt*vTZ5H_c9?AA-xbu+Fz zw##A&_auz48H3HV37I@`QWiiFl|X+~n_IBC^DxiF<{st&QJ_lGYq|ql)(>*9H33_5 zu(jBK$})P>vm|VJU@JI)D3ivr0gCO zbhs-gcnk!~j(VQRO`YbZ%6M1P-CIZ$4hBwHRd*lQS79d%J1PC$0ZNR8E!ecFj0Qc5 zEu9f|HeqL1(^J@n&24ROAO3&ceJ=_-C$Q_dr84RZd)$%r$r|i>Xs334v{MVTQ>X;> zX4svA-5Bg9i>&cJ?1o`CQkEh6Old5nWBAmr;HoxOWV?;9yAHeCRA6kN;id<64`6YE zpExP9sy^6bVb5kPY<`qc6*jr!Bt@hQpG;CF*$cp42nTwh*GLy-3nzbEoR!mczi=Ai zHZsRO$PGfy4Y_H^n2_!5SPKK|TV9SvcH-BRd@V;Ap1E%Jq=!K4w)~{e8$qA(w*O2IO`Hnx91}qK4cG;@u&pG&X-iweDn*l^bDy5cVB(P6K6fj-IlN(FFT`*dK%aNj^JMUa6-` zwltOf`*W}#=CjmSK0Ec8WwbgI>}O$r3HCQ(f1A%vQ&Ty^kER&jJ}$hp5BmpvHbh^V zJY`h|6C60;zzqkJ)Wvn&eE*bXOn2ZQ00-l65aL}9YGpncHxz$aF#`u-IEWXw;2;7A zF@|UD49_}v)>dSDO>mHfgJq`VKorAT=_>hP8xC@GM5Zgmjay+l6XflX_dz~jq<|#v zfxMUZQYwidJ!dwwr!1qp1Nj-q&q98ou zS*;%OYmnP8svm#mA-DP@19@Q?mCqMeK69xC%tf|W2ZvTTbefG1^ROS*m`wQhfbI#y zVE_&%;BZEVtMVhn2kwPCMV662-hsnKIm2gicoh(~F;QgYDmYw&LxCB`kq8qTr>weL z3x~&WGzj?|D%v%DVf%vsCFvtK9C?|NZx1JOy<}T2vi*NXI2wneNs7{zO133E!&{e6 zS+&*xM-e!R!%<4dl-P)=u>IL8#eql5aI~$aACW4oel}fp_bnto+viddI7VoCJdZ&H zj#ISzCr&sS#=mHWK1sq!mauG>h_Eg)IHSYuyKcSNq$zB`z8!hW-mgMdn`Td0t97$Q~b+)7Z*Gdzpe`)$H`ee`ndAKjXxk;ba<4=HMiP zvUa7C$vR+S_=ydD4^wgm9<9U4Dz4G85l>A#% zR7D=MjPADSi{me?#Ck-~zn-DdVPdnRYruc@mx;$LqwdyynIN_Rv4w~&p=S7OMvs;@ zJ+aLZTbR$X*gRs35?hSVS{Xh|aV4>3iERa8nRoUdIToD@tEW%mXa@Rn>xXI<6EthS%wy{W318TYKT#9<>2C$hDtpRP{6Tp|u1 zabVp(hR^!HA7)>|A-HY)>nd?95=WLeR*7SapqUxpPY_3n;j<}J9tTwUP5uEPSTbbM zpuou-K4n$i8l8za?fACS|K$X6&OU!yCr)>lh&X))qrQ+M)_D{+x!dR(@WlynPUz6L zj#ypV!pfIRMOHIFoH62DF`5T7w{HtJ6ZnAPg~jbzAAhJs{hk_^onO@(y`*n1x42^)Xc>I&Om zWl*D&p+z!8jq78cQde$^);qJ6nV-H%H*P|wtXgL#?lt1x zChlB?XOC2-LYfS13&T@(fwVN(USF4p zctgayMFogAM~0(hBt=G|WaOkvM0`%-%M#yqWnW;WWLrTvdjFdZ;#q$uo*lvDzgn`b zG9`bm$g1xV&mr-g;Qq#r8kNe!NXZ((Q)XlsWO#p+43BFE3W;xhDq~^$>%~5%WZ&pvc+Z}un<+VWp0bR(M{oXg zn+z|L;SDl;L`DWFnzcK$xU4pl;T##>=Uww?<4c9R^@VLRJdN*ZbcSyx$;dDn@sp8R zG7_PC;|I9JAR{3%GOaNdwjdNM#K_2q%Gh^{;nR~weK*6W4RyAHjI5E7O)|1e zM)F9!V-&BfRpx&N7a;q{o;aQ-CF==gWuJ%mMk`csJn$6vX-s!M-6p66T}}S{uJ@A5dQ}8?+}09U{KM1_{WJqSn?e( zrO|afsX+X*#4q@d*!P%{|De)}v=L3`C3}w6Ko2F3oo#>I%~kZ?ILocFvJORgY1`ZP z=jwUlJ7jE~1O`dKNdhAzFh+tt5=IIUE)}QrA++x@7!_Wp4KL=?o?_7o8^K^JF|p#xrDm ziHxt3@eMMLWV)*z12I0aIaia~!3A#zp!)I3nx6ze<!7v?~Vccg4Zz2XG!6*qXQ|AH_ zOp{=iDGlbC(%@{7)tE?dg9Nu2KIzrr9t8>RlVG0VlcNlu96^hvTzH8CrW+s=L zhOm>?71|`By*@f?DUO`9_G13vZ6pt3_mo4 z8U%NdOk{F|Oiqx=`Eq$CF1F1@Q$k6FE$(>gwnf{EFBbO5(0ZRyqp27YycAiH2``r% zCzA&hTXD%{6n5RcCN8;5raWY7giM9WR2mUhx{R=D44;3s zQG_)WB~uBCux82#Yn)<@UK5$xB2)Wh>WECc$+XuXV2p7mm0c6Ji)`<0GHoN%4s3_J z*Id{nfnoeTt1YZ>$xXUWAk!f(79rC!f??*cECVvs(KtCrreh4B9eXcArZZ%E*C-6; zCYeTSypBsG$n+M&C$c)+BeGLbf#QE%h37CbE;8dHGhQ+?MrJ0Ag-tR!s5cI13Y)O) zQW?7$KCxufGW_HLE&|ES0-1@^BPI^kV#1+Ok(p&Ov%>q<)qR?Wc`~z728c6xGIK;` z-DEb%@b0t#5NB;<*3M_$0zjO#Q%pGPBeSD?cBm4W$y0!s{D@WE9MBugci(>|vl%kG zO=kDV+zgppBjG_39wK2s`d5)P99GCxJ;!(4_X^HDOtO6K#z7H&nEjSo_+-OD96$$XZ~FEf99c7)-xqv)bW zf>|=ZRTc&dWPYFF*%`sV%lf^aeUVXj_i1&vjojiASqLDNFU*mJMY50}3t6(TPF4BB zBw3hd_-qnU`Myz8NRY`*ffwCWIV5(==saI!|DYucJ7i&>qV@@4z!q69z5l{&WFZljL>we)B~c%Vj+5vj zi6%%iO=2@77A3Jbm$i}D7KvM_qnqpCF{{=Yj3nYCkpPLrNaTp_v#ph^YeiOLCXq=J znP&KDE0qS3c@kOR=N3&kzmWxMgkkd}k~Zt_fVj!Uk0)i&4`}?RMz6k*uJCte)ZRDYTRfkmwqT zZj+eXNVVTi$-46xdA=LQ;v{-RVl0W-O@(ckN#PP=X4vUW~-%Ni1Su_@&vxCUFG{>s)paz0650Lt?8M9qxFhDrjUL z{RBxYM`A}6y0aSeXdRjeqamuF#N8zBA@Pxt@6fF6$9)D*j$3Dc^Xlt=tN<77pLLaU z4R=YxN)qEFaZHkKlAI!`8IsD7bcm#9NM?#;!X#@W*${up#z{6!mhEJDh^*Mj$_ZIr zCF|Q{BSkh2$yTPw_L@mzh$Os}V7$}MaukIn{M_28a5#wIp$3i4OcFCBu}KoUf*>J@ z2uY;Mo_8a)j>tZtsHsX(d2VA!II2Tzf!d6tMONNTl0K3g7k0?jgg>iK*_-MCgP9~_ zB$*<~Ws-l~AgN)U>S6x&m0K^J^q|!WUB7lrhD9KfTS}dy+YD?k{Klb4|ng{)JC=?hDjDnJ3YvHy&vqEMJnd_VLTMM`Um}`ioG<~*LpY7LYALz5s zVNu3<6@^G<^w}Hw>`i_4&Th{=($TY*Kdz96o(Dg!m;~VaHGTG`LV*chaku`Qi?^oI znNw~0?7IHqivHq`K7D>`W<`I0@l1d50;GR}vbDINzv$Uoe5Jp*1cO{MxR2}IvZDQI zMxVYwu1_y+#gxs*`phk2;<2?ftG|Fl=+xHI;?~kjmDu&`FQ)VtGn=DZFIP54w-)<} zmz@4$S)Uuy=fDN&kf7=>-s*F?t;Hw$-1*(U^Avs;eq13Y^TO8RJ$-+!{}gF>@u@zS zi__EveQtPjers{XXk_k*KKFEMX+)oUzS~FD!&5z0BTKjSxmnuC(j$HD=^t}RV0h=& z^|`nD`~`jf;^y?$i($k1ZhohGe)IX(e79+>B`W&-HGTd*;qM4u>hpIu=eOp@s9QOK zu0B7yIluLC7?jWY{Iq{QKeIEiPFw->`9*zxNuPhclY6Sb*?9{DVi*4<_sb)(1Pnyg z7Y6l(8~VZ>ec_?L@Kj%z(-)TYm)G@|Pbs;cp;z4VhDYcXz55!?m?oUT!nnRLO|w9B z3r}@=_ce(MeIEWk*8qjq^_S=LmlsG&U9|Kwz55!uY`pB#UtWLte{#86I6tU4f8Qd> zY|!+VxAm9zNwsdp%v^$kX^P`-$obU}lk}H!`pZRqv5U~sTa=c*T-9H$11)_@Y3cLr z!BK$_%;(#eu5VA?bOtzc|JN61;LnP_)T=K&(w9c{r3HQY{Py%klDm9GU!H0X=&yS9 zS0IUdbzfh(qpyEV=_?!h>u#b$Se)OU95$qHFL%0M5}C{58-3{l)$dI{)R(#-b8)8| z;zDfvV=i_AY^5h3=}Xu2rQtvH>PvU2QUB0)-0t{_EGD?!u{YG2@j94J>Pr*)(hTr7 zW2QEH`ai$WmxzW%UwQ*0?UJ~i{(FYhZegc;f!lq4?uUQeiL}1lyY-3~jgKw;F&B%c zG=2G|zI9@Uq}mCc215V)rleffdD{0NF|knmGuYtrWE*6VBB?(-Z`e(B5e`tp*# z{F*o!Tmgs3!Aq1$dzI5)UHHQl>Kyr&+Z|iBWH^tYuBgAdp}!jb1H^?Gd!=N!-Lcny z2K(QXtbTu|znauvP3f;@^_5nqPU7xa~Lh8O0_PWK8(2Ei3mUwNUgEYj_|w9|jRB!J{O_J@1=$|~H9mZ|aV@=o_M zk^Np?2kGtWhs^=~^?GwaU(M;O58(HSzDieVt$%Cvese&7bDg}tnbJ34-QM;ZwEmyF z_1Dk!*9%*#7r+u{nG^NbGy3bR{6(V zJe7aWX!>fuzB;I{Uf+H^s;>@98n=@hl@r`fZWiP@xj8{P$?fJ$@!pM@-{(?kSzjI3 zSLgKAMZ)N<&giSNJA*yk&fv{-Y;$pIVj058+=hFf_MN`^R$t5MYrQ*zxp-@fOboW( zu5&wsx4@o8UmMidMx-Bd@ss-6O?~YF*mr-m>K7OFwR_y|`GL)4{o*;?1?NEPQ zNJ$W&VXaqx^HyI!r>~#a*DvZD=k$$B`o=YVeM8?^=5{AvX1LwSS8!OZ-61FH+JL@( z8I1Vgd(}{V{f@pqqOXtX>*Mo4^6Wqo}WN}c9* zCtsgB#qCZm5plsda4KsfX@M}a8@+#9tFybgh2Q56#Z-NLMc=rpudi;->FZ0s&m~$i zTWiz$#vOg*1<{!68;|sjQGH`lBse)sA^}~#)&?;oE)#3mt+h3MV{!Av*4ml^sRLP78cD~Tm63tHQYWo zY>91qV7{{cb_M4y;oNnN@Uw3>aPEQvo^_iq|N9(SpId~b6>#n<&JFG4-fqrszqzxy zxb^17)?}{Xpk>iyNqtY_b2)r&2%o!&&)vb@kMQ{ce0~aFxQH)2!50>A&j9Z2!M!(e z?=##xfiK<0muB&$1>AQP_sxIdzE?!vi_cxe=lbxu%Rp(mUT!Y@(*r8+y&r>=`B8E! zfA;5GJRwM@p!_&Kw}j8F;B#+r*C6h?fx9pNkQ3VQxfy(Jo*q@}KhBfO&CS&2)9nkh zrkwX>67I}`xb=1jcU{6={kZE2w>x%?+Z`L-9Nl_5M5ViHJ6&taeifrmKVAXRE+~JX;z@jA0ACo{T0bv> zCM%pcANO3|89c}B49@>Pmy}f8a}!^D zjW51w)qW&`WmSK}Jv)ecUq>+nm7Ny|!=}_pITbw;*i6 z7dF6Mpz9iy3AW;kefZ*4YPYrZcG8q0|JNbwvJTScOSVWEclSF*%9oSePXAqprN^5E zkW!~{?-ks87582TsomSj*yh529pZL#gR&tv#k~)4@5q1dz&w$g;@*)ov5WZPdsjiqIRgybhO#-eo$Ds@NPOuczSM^=4FHeOV~`YL=rcZR z$Rn4)w+LT)fG<4<4j_pyJ;j%v?c`wL#%{>QHRu}bUVQN>FgvZM@TE1}m)q`ogZnN} zsiCQSx_*BV#Dlnd4fpj?thw>W>%Y&nrWD*ajQj54zDFSF!xvX5g!)p17ZGvasnjzD0jZ-F|7fpxryww)sMTxN03*8THKe z#TnfH68A6R{jo4)?D%2Qb{rCy9;Kg~x51quUoZ zh^^Pfm-zBUvd2b<``iO^%HYel@a5b1@;z>Mav6@jRdOy(nBcvh+ML<$Jw+ZKt zuUy4fu5C`~yZ;dZ+jV}@%>hqnh=P7O{x7JSh_=z;Aobi>3%{i%U^X2Be#_ir; zCR`xCvaFxGNw?=5EV+CJ4|L&y9y~CF2OfU`uX*Ri54qMec%UB-3=-kQC2nV^7p|Ik zfY@E*f%~b=XWK*1Vh{sVfBgc6FtgJ&^FuC~#slMcU@;v7JM8c9z&svUzyk|AgWcTD zVAnqbQBR6eM%nJ0!h;v^;3YgbfCq2k!H0P8F&-Qv#@Ca?kEL&Z8_rwq5FWga2XBAu zPCn#zCm+#-dwB5v?&Je*cX9-LID4q8&NC`oULfbu`E_E_ui?QJe6<%}9mH3Mf5?d{ z9$d#)&x@*{r8g(=mC07{2YP9mp*@4pQrkmM@zoo^3_K<0?A81D>Vw@$7?z5!KEYR? z?oQI{-Q+Y;@U@5;e03IIeTj#0_L7x zJTyfwfY<+ZC1)fq;-Mv)c;jDJa^&WZhu-3AgVYlq{Q2+WYd!QbzTGp7uU&uHoZs$w zU}TSP_srmHx6JHueC+{b&w|>sW&6S!zBcyX=9K?5imy%mxB2fOm;eFzgU$zyeRSYr}+9L@pymRoZVh~)VevLuf9-0&VCK#rfDgIudm^o54U?S;amOq z*1*4j*hzpmd@8;%fNu=q8$+(#~wRk?W=gA>!#O>TGUdfa4?fmGd`#y1!6&DZ$m8ooS6 z8D!kQYB0##7dPO~OTr-ITV428FTQ-wD0TVnsm*zOc{s`K-kJRGxnOw(0Hak*OBog4 zn!vZF@vRrzz4!601$=*N5fA6^@FhHa6A$0T!y|Zj91qXHj()w<^%{N`cDfee_vKC( zT%vk@$jK_c^%~!L18~~_f1#<{1bV$s@Gybq)Oi&yAb5BX4__0x-H}UBXqbX*jY77I zKz8^U9vrym-x=><^aC4f$w$MauIy*9=_k*9KiQ4;Rn-3+QW-~ zTEKVc$#myl5(0k)T)xHa44wyF?+xO6SMj~uJKe*?k@(plzIPKY zq4V7C_}uSvLPo{+9^-pY@%`N9V}0VvAHm_Iz9{_LTa8IQukit zdv7R#B+P&F)ZNX;_=<61$}NIJXa?UO#P_e^2hS3lquZBy{|uVE6`1V4HKrueNQesTvtxr?9N-x<6B4!5gdkB^_+%~%(Z zUa(x(@RK?G^gMpr`==N9$ugPL({sk8p1i?N)^{&IfJr@BCzBc=5jtLwslB#@3w^)O zB~l80dIvv!fS*1grYLhXbjJj77lD6`@pSzAwe3gOf5^qs_~{G$bOG)LPsWM#?#)j3 z8`Ejz`Tw13I7rp&)YJVees&)}8^O;e@Us~_wuHx4@Yp(jK7yY=!_UX@^OyMfYy5m2 zkDtTij{wD95krRW@Ut=e?D_80IJY}B0j8h-^qAayo-N>KuaXK}Rqp>eC!BwTqg(?< zV6+F1_T$lOc=Q1teTK(w#WI=A$NJTqveufCHlJ^=-lDgX>r?t#FCM)kXEqo04Y>2S zZV$Y|qfa(p;2wy(+=fRd@aRl3z4;gqyd;Om=mH))uL@vJK+cxYbv%|MoK)Yxu22Wk zseUl_v5Sli;<0O_NWWF&GVy;gZB_8tJv=rdXq%7m-TSuo0*_7Nu@}T=ZR^>n;j?yb z2&9t>F|`#Ot(HOmtl;P8@$+l=`7pUS=&vU6^M3q%5H@Gu8aQv>CC0p9VtPWu&+l&! zjl+H#nlKNImzuIUfyZX4`UgLMfuGNRzqYk_|NFJAB|FB+b4A{q+gg8Hqoo(YX%Ub2 z;PHMFdHPB9@vC_JwnWbDu^b-1_5B(i-7wy-;qiyxui>$)spQ#MlG`1-`)3j*1vs$r zWjwxyCvM@1dwAjoo_LHWCTX;kB_h3;$l-~hA95#>c;W({xCGOi7!k-pKfH=32D#l! zcLcbaoVVhnELvk9CtQE(&}%#~7SlGz|L2p1@|U-!o^C%Jz>^o719%cn%E_yE@*deh zlMnFZIG&uslP}2oR5zX)fZyBOBlovQMgSDsBSU6?OF#V2TN<9cfhUIndAo_tBgBy1 zpc#+xLQ-%gAN9uo$+%&$Spi|pN#N^9XA(G&EctaJl%_@FXQPU zJbe>S-^0_7<3Hru&fuwKJheut`1(%wI=3@4NXYec7oP4Ri}8xv8R{o4mZLjeqvp() z$@w&F!F$pI?*M;%yaG>;;b{nvHZ(w}#M_l>vp1kn!!r-?%wrAi4L3)?@^jf7 z9r0bbx@wa@c{ZHNb^5I)#a7+u^LrW z(fT!zSObrh0aQ^zda5i(71crV1PuibkjDe?_z8K0mdZ&>mDTV_%R*D-q-+%#HA$@s z!XxRcsvLb%L7ys6b!9butV9)6WY$&CR~2cs>HvSLs33!?szwz?!5UQY$$?0OMTe?+ z@97v%zX?^^qR450@E3AQ(%2H7hmT8nJf~wNJpY}XO6&{NpN@rEjIS7Z1-3&Jw0#=S zFq|T2ZHlx{r~}AXc|2ah&%@&Zr1@OU8;j@_>gwu(0Z%YcUXiV;Dz7=fB7v_uP*EPp z3M7A(-Loc(v`)?(#lH4x7ziMPW{-j=O?o^+EnoFXMP)@cE7S#p!OCDTSWj>xzuL>7D|tOd6ciHitx;_FOM<^Qko7O{gkh&iSP_O z(X>N<|7F8xP5gmiglFh8P5dM?OI>atS`U#f^>BNCS|-hbAH8t z60mZ9<;?ktud#E!Vmevxms&RK{ZivQI$74(ohPgLigL1H|1MAn&0jZs_T{1IXNP}| z9X@`9udE4H!SI}kw7-Lm4aa_Q#4Hy)P+oN)!ZWT^TJUebJo-iSc*DWZ4)MWqfSsKH z$yMx#aGI~MiyRS78`-h6kf7c3m5^`0!*U)0 zbUR|88_ed{MNsTBAsLIGArqmG#;(aCBt=N4G>gRYl98w=vZC?| zeN(lVCh$iz=MvBy=%y_#B={hGR|K_9)c62>(-cAYR7wb^%>?0eTu@cBFdfTLIU(@B z4j41|?)Bf(|5f04>|q*}aE3C}Am z-QN3TUhzwu6YU+(A0Lfrt)&^c-K!#*^?_zY_*BxoTB+K0Le&(ngk%<}Z2shAH(bp% zGP+cFiEEJ+FDKQSz~z6;JeAJ_F_&ub%5ksCX0uJzUX^7fA0OldQ5E(%SGVYt6(phH z0oJBbMgao>R8+xsesClHVCP;C|BL4JlP45-$Arp1bV30#p~5D}bl|*EV1TY=t>q<_ zHttn;2Ze_n0DNXPtMDeDnawJtu@kBw#f8sXewj+9H1$)-=m~#V-dJ1{ltzJkpD-@C z`(aUl^i4MwFrIAw-l5PK@PQ_(WmKs{%Bb9}-bTwG|T}(_RY<$`aD#pO}l;*yS?_6C89#M!+3+5qcZIfFX!d?1@Gy6hH5%>u^8rFp;|^TS$2LbNzl>?IJkWn2IgfTaV9auP^DJ?oHy~s_ z(Y9O7+c1tsT`Ww1?KJ3xNeuP`k8u~J7)Cn{NvIIjkji_NuoNkdRqq3iWMYZE3B6iz zVxu{TjrLRbptX;Rt`OyQ3pSH#Y^J^GiNy1KuwD&o5zp`P<{whSnlBhZ3X7mrg-JqO z^`T5TDWiXM3R1#?`a8JEz1SVPM1Y|trsU(1wO@?=WNk1vF0u6=uz5m(58?L;kDCB0 ze=oOJtX9Q4`O*(VnZM+GdIv8b=NC4%2WZ8c@KESaZnvYjYRWTJ8ym2J5NGf$+ zgk)1}k>;gRdQ(x6=nE3Um#z)49dTatNu>nuxD)Vkuue!xBau%7IjxWTf}waPZ6v{K zKC3fp=rBo?aLoTWOwzfZ_NlyA1KQw{aIkfCpCFkh8Mau1Qe>d9M)NX7XJ9krdMV@& z5+;AjY&pRze%TyJqBfAtChB+<*mfZ-`4SO-u#*B)LLwjcss11ZXuOseIlGa#PXr+z zO7lWE5uw0Q*f^i?*=$F;PGLnp;dk?GLO9`5BQ`;$Egq}p8#CTB)hyST@kXm1!kJ%X z064G}>FwRRbsQs5a|k?+It3n|pZHdYYo&h~Mb>2POhzcxWJ91BkBOp7P|_+o>XAlx zo@c@`X<;9&6=95l{<4}cXu36~e%>y9p~x9Q(azXI|D2A{Z2FhgM&GVcpJ{XSPK4%; zmI><6S}`Tr1|Q#1QIIWdk(EQSc&jbzkm#L-{Dqow%RZ@uXXqRlR%=z{_I@O_~ zakYG1{eKKMH#bMVj(*dT?fhMYElZ+LtNAjIHyrby{p$$)4fty!9YItP>|}qrvSbQ1 zQvFGzkUVrc*FjT<|ej#`kHp@rIaWieb^Os`;|7|Jv)V51kIj{4GXP)t%X} z^~kdO%To3xIB0AFKuotD2M+iukLOp_rJA5>UX8V{h*~%pDb?i9YbNsf-WoEg$)Uv<^cnw#VP{#eW6|oIS~3Ymju;5 zPMVt;wl+{NgoIM^QL?z;{Vao+{XxcoE2{dg${vs4Jf8BKYph1ro-+;xbC}ue!D_gl zvYjSAd{NDpg*~-(Or#8*wV#vdNHyQVc$g65iKR0fgP2-)7BzURgU5d)JbuJ{6k

fxzbSqyvCx4_mEJWw1eo4ssD zXVxDnOWOUNas%6JXn!?gnw$4~oS+4YVhImTF0Up;ek#fh7NaC!Y^IW+T~kO|8^^>_9~6` zDvkCkjrJ;y_Nt8bs*LtbFdQ)2J7BbTz-aG)(O!*_Ut{Fg82NuSX1>v1%>fgH73M>w z`A}s(9I#-^C4`n3T*E9F+i;7vxilm~sivgTUe?0BvS!s+mPE`xhDFTw>O-cGByGRm zU_j%^+ZbE6z4{PpZH_Q3iv)L_QceCclQG|n-SO*cz9XGzQ^ZiZ8nwmKskkDCPE>cI z!xppAq6orS!E1lK!51?m!DKkR9nmOZ8KTh;u<$4bTtp)sPJT-L8l}{ui3T;EN__!v3rQ)IQpqEnOd$W<`|L6vPdqKPx2)JWYq>zuKI$tpj7hlh4qnWth@0<$U(ba4=UP1(T-abJST?ay&F=)kmDwzA zwM*b7`+|r}#^;8p%t8kg@8agI8#=doB`ar*Q8f@n21J1$GUQIHFDGzGn|t+XoD%== z^9}>T8Gn|NPX1C*H4sXrWGVEUYCHY2*a<-l{hcG_cuEt6&{rQyV^Iop;#?|F3Zj3r zC?#mTt%gUITFs;_LRgC!G>WN(N{kOAYf?f`K;EW$W!A^kF+NH0%5aC&1W99kOl=vI ztz+!IPKa{+StA&n<91y|+^o=jGCr)FRib zs+T^o27?h|7z6b|5CkGG*zIQZBp-h_OyJ7A&8m+rOR}8EC&PgVO7KbR+Ee}`F6fdQH-~81#c>|V9 zW16NAL8zA@mJtk#qDHb>Z~PiVXG7r#m#hucM@utpYO6PF)Id_H7?YA;C_;Zr>a0UJ>9CM#n0Qc`fqAaDErFl{_2|$JeiYG89gwvX{v7yZ(Hl3@-_|T zKM@uqUI8?yh?<umNq7&ykiQK1noT$bJQvyK(ak*wN~apA#RE(!YUmz z6<07o7Z?~B$uRaOXE47|fDlzCZ|p9jNNbvOk(fI<>N^Mn6S7kX0==_iivXI{Op;1! z!C^=*cJBn;zw>{(iw-odF9sqnDeD4G=r2SThqzjC5Ykr3B&5+OnNBpyha<>HiO1rt zLNrQLjh(HM15VLsOG-*aqrB68(}aY^2RN3qyJ(`7|;yHPKbX(!sC%jW1?16g`!#fl|n?p z8fAotNEhzuHkdj(kyf-HN1?D30RX>qWFSAbN9L7)xWeenaY!s^0^N-ogDPzSV>$Tm z23o9;1X$90x)gLhL7t)VQa0PsY2I)BL5D($Dx;V$;PRpDgBk3TWNlxI+$JSL`xu{K z7}!)Upy1S=Ep(9 z96%j_tsxB=ixesepcW}4p_r(JWOGWL><3IL21XtK-_OYx{5PEyAmg40N3kn!AOwBs z0ED~&Xk-A&PZ|JiA&;k|`r4FAot;2F?-ls45Mh5UBZjcW=LQ_&=jgOcaI^FQvd(BBEg@xB1u6bUAvA?r-6`N-*R{|lFhTv zo8Ny^Scs7AVIS_lv9=yHe*y+tFf0;dNs}g%O9~w(c~8eOjyIFBHXI;C;X6Szod!$> z#Zlbg$l&s1G^U(v%UG`d55PfZvL#Bb$|;mJ0c(kHAOc2zV%7CUHgREISrML4B4VD|+B@~Y0Bzz9Sw($!&CS7t|2 zVMiPV87vlOyAcG4MBdR2z3C#QNLPO=r=$;^R*~tDY35A{tgWuW#2GU@>9Tnyn4^dq-1(W3=^N zjo5}5gY%kq48$-9EbOh_Nm@i2+3Mi{3PvD*&)m#}JEagYk(Lb+v)=J-Xmp0nMQ>&ra%kmBp6%9{eJth zs)pmlPS6&+K)=zrD60ZPZ+daRpMzrbOeo=bkv@4mB-I=~sV8RyiQ!nN;?%C$tvm1g zVBA~d6|G5mJaS#YKp;S^hcs>m3Kh{vkvJLNFbuv$P2 z!-n>(eCi!}Osl44LStk^@xcvD9CbW%|k!Tv?@XigN{Zb=&aeE&xg)h z%#wk93_5EtOtCgiX3Q?nnln4T_wbKf!#{l@qQ4UT?78@drzRf5dJJ`&!0&<+v%czaJ|UxD*(2O;~dq6*(;PVl}oYeZX-p3N~p5 zHj9&N(yZ_T9E(amu`2<8vAbeH&3dx7N2&(3)vzp5+!USQC}BYCC&7p$ykuWSK#qn*jfNhOwP6%@kZ*o=_9>>4b)=J&H^7RY@I z*gLOUWAIpxQuf-zo^fWFJRUL;rFgaY9@R|x-BQDsg+BtL+*rbRN}_mzXY|}1#@^k( zP37rRoyPJ$$>%hGmbU?yFlYkV2%L%Y;fPcF!l|{k_QgKyWuoZ-#RAA32>m0qBwlgv zCy%FuIH}PxwS1fG15uYra87F zmau6%pRl;2SfW%oEyTSdWk8aKITYoXN&=_}4C}xwf}F;GKJ$X!PV6NB2|l8x77Y-D zB-P?I3i^X}e1L5LR?W5wvhrAJWk-RcI-nt#Tr{+7>xHseSB9-1q5uQLcCyqR4ZQQ+ zWkcvpHy6Y7GlF>>HRh41O|wVc!%w(}?F({}wf;YI@Kk)^9{kZg2OqE;qR1MY;-!?g zfz4*Ml((3FKSKk{qJvQ8=-y?HvMf5w3(yWa4!=hU9vrWa@+HC0L666KHk&<_&3fS- z%GNhQ#wm}-dpw&xn9X{dXvR^V8k8Am@e%Wp$J0>9S5|vG4d7nl@pzkf(g=!rJWU>t zH_D$vP5eO?l|((Bk|wsJ9kkHaDe>Es!>Go<928G~(6Bewb)<1?zc?haE>SGGM*Nb{Msxp+pnuhRY-9aM6qk zu(By$C$Lu?kjK0S4u@GJgpX0;7;zRYOhw0tDQqK)5NRuIz*fE-8rSJ{4BLde@yps(zc|0Gu*iJ9T)*92d z;9aK}dxnO8E z(Kk&<6_>(UiDHciODB%Tl#{k#R&c*+O|7XG&3aXTA-75P z>xuk#@g(pok`)ArSok=ekH)NUNE+CGVOql9M8c@(51OSZ@8d|y#1ui&juYHhWLaZe z`VzO@h^oZ%tVW10w83$iWCkU{ZYo}z=HfOx80A&xAs#mwtyGIQrg@_*+adaV2((5u z^$8ze=}-JkWhmkE+2i>>iXVrlC=-L>hFfnex1VG`fFc8U!nF8HVlM|G}8kyp^mxi0ti9 zLi;r8vZB>V9BVXRtaHC#V=Z?Q%@=&v)PcuV^Ln*anT2y#4CE=8FX)qh-wSBcTfsbm zhSQlU1v6F1Ou;L6&zZh|qDeu^M=(c-FU@vFu?+EKve~HhfaKR$hk64@s>X@65(k@y zkWk1-Sj0H(+~;ADGUiD>jiR&_a0JO_)%vJwZ-^0JFqEIfC2OM|Poh?_VlFflg+r2{ zTTJsdv22q~qrFCm9D^u-g~bSKP;JwHEa^|zIya9PVXbU>`$+nHoryZ7UJl124xl9b ziZ5t-AM9gTpEWEhr%0kG8zOazWLO5Xw)cX2!SZyV=^Dem-VXU1qk<1pu`D=wgyqL z?1?68Z1xGZfUmsH+yf;+Q^)5EQfZulJ!+&@u$djO6SOk?j*L3K(%q75ua4?i9hKa7 zRh`YgyG3#D7Dcl~(P&X5EqXjaXGNB> zGmuVR?S>5>Ge@V^Zar3{H`3QJv#(=DU&kDM9dk4ezG9TTP?YxtS(FCd1&xlKWn3ym zlS&=q1KcrN#dpj;t{VAt_*lg7Kq6;UBf)@UGzb{{r0pwzwsoDe+@_&M+1H_Zzb{n3 zzl`2O8y(|qbk5arS?6JT^`>&GAOb{Hbv+XoBu!A55TgK-%&?)N#f&BxQD}iO<}1U( z&`cF7-0S{a4xeQyl z!?OX3jjEajvIs@ zg3MCsK82h?mf+BZoYVD{J za2(u!oAhV1-ha!jgO2<7sNc*+$LrEHtSuIW(@^u3n`mJ#qjo}0oUt$iUI=m=CU|z1 zhsbd#5a}$yrhLR~q==~^)rpvg`2csgHcEhKG7Ey;h`k>Y21*_Vl+=Xx;^bimPJX)w zPJZja$-|L)_+$H%qHl}irUS(i@54=#IXeq~0WDncv?*8$3P)f{jyhRCZNjLN$4f#n1x zX$fBSi&S$}k>6RGcQWQ8gi21jk|4$(XGBDRh+s+Ua4)7bijz=(d!4CNb*O>n(BR;} zRHnomtphg#ugZ%ySfdWGMqOZyMvz*6D}zOt0obUUB(vo9S_FomxN*_)XaNR-!ny#6 zsKl%K#oB~3mr4(4%nG$uW?7c)XtD`8gfg*~-Q00epSP(#7z(fsmVx3iaceJv!tGRr zpG56NaoHC{QIvpF1ht#yJZbW{Cn86GCL-IYX$pnjgW5&lU*d=~vIoB<0tHQfz@Iey z0sb0AKbW(6C%5J($x(~Ek|1gzXxPAOTtltIHP~m%aeDz9T<0XB4)H>k_Ed?P)HDZ)CY566)80yL;rwr*la z_M&7l&3lApkC1VFON1uh`dkC^Y696QAUgThEgOMQEbTmi7A36FST)ITtT#6_ecr>Q zQIt08xvJT|<(7hd%{=IuJ*+8u@xE~c=N51_-m;~jOMk9J|u~jJwn_`oC4>0_^i`_NMA^sgZY;H za3`{Nx5)@P_PcpIqV|q;jCDDp_bhsGI*1%(hDhd9XNmI@YA^}**<#(9e3!F7KQR%t zJ4E*S#q-&*gTSQGd<#>5;0kiQ*1V!}$9+E5g7tBq7U9M4@raF%Bq>Rg3^UT#ZJ)MX~LfB;cm)WYcCN6H{WufW|aGwM|?UUwOoG>kA-fZ97%*@#PLd ze|t?q+%pu1s7O4R5|sp8R3%Z2rLF9=NCn(aI}O*g=z>GiO{4 z)j3ZjkH-s|cY4Q)q9)W16Y#e41jQ5dCe%TsjwLjhgn{V^2c{phAhTUmuk-Y7Ei=!hjpmPTMri3d=hHu|j+F6I7J`s60Gu}{$ne9ZZ zkdIwlwB9R!T4YdG5`rQmqOcz3VJwS8vPq^w7#4?AhJ3fIDH37Xklo0MHc=EbR~v1l z4eR6&C*bTB34uL@M3b%-H+Lj!bda~My=UKbr+`iwV)W>|O&C!r9XAS6j!O`+hs?Jl z)d^b8p^Pxm$Jg%n?tuk#gpK^M!o%#eu$MyPG~jA~4ckaK=tc(zodBJqrEes~D$3bL zHzN&N4pW9FlkP!A2@&Zvfe*qJpwWK}X+m8k5v1`!TS|7!r$OKobFs)Or8FUJE|r)f z%l*DZF>eXZr91*gpk9$)e2eS|qtbrBK~PyBfkcnBcGEvN0ge=bcM=9lVH^fOPC<~9 z8~PT138vXl06CiH5BnVph#XzQ=Ob#tL7f1U^l#S4^w z+aD}8stGzYzn0yA_pDP|ceFQ|8_{YuT$1pLgqG?i|Zm_gEl`@G%m&)lmXW zy^Pe7V=gVko@a?-232NQ^zFOEQ+7czz9UZD-p>bs%xRWc#Qs`A;PxBAmxSk_6#bYCJM(71Iy2Uo+(@{ z&YFbBv|}JB+$x9}K~YPK#jW)wkf{nNNtVhd^A!ovV!q2sh^xDo|8mrE{Z{!T3H@zq z-Br}i$58{Xn(?ClSHgrk`xJ)=iO*}of{g8`E1EE8k5M!Xde0hBUyRcr;p5LDm6!9z zfLj3-mXshSRDnJw?e<$z$uptPZj(*0Dp*G1^E$7$ruR@!5}7Owk8TmK}r~$B=N>z z5JVb)p`-<=&G-SPlR=CWgGop-9fQFSF>*#TVIZ5+fET#LLoHQ^De+eF5I3^bwsbnC zoUzsArhc91>KUzt82uBY64OA0Axmzk>={jo5v6mR$VAY0H9P}0H98*cD_74x5ejTpZHe@$~UjLdhm#>adNHJY2t>gyQvC6sP%F59C5l&oXWFKc+F z`E3jFjyE(nhwW<3@C6&F`Kua#=ZBM+uL+=zWC(=5q(8r; z@cNwod`f>B3G`TW@ICdROryew!)0IBdeyj+%4pe)B4mjXNft~>Z`+%2KE#5JBOxuML=$p)$X}uFOoWZLTjvWzF@D6at+{87gz8nV<(u)_EFW8?Z6l)t5+by?I35(!qMGY#!Si&?)<$2yJ zSX4%~kJc@Z7zP%`&BkalXf;Xg`awqIGCar=NpcB)%F3&I$_QnrXbBaWVqwRo{{Yny zf`K{ZZcJ;Mgu?>F&|la=c@4WKGw!^p#YjX@y0}$M0=aBz0`4p07ylkeQheEqTFt4-AEsOqRlRi95~xw!F_G{T;Uwfq5&CEf>n6bHnqJCMzi;=ty_*}deeGN12*;+2ug zIy6duF2@b8j4G-iKT)R=OXz|DBtVovlnDuo6|q%RjwKdiiIkKy<#er#kBSDs%PYuo zX-jAM*Tz4_iijYB=GLvv;_TZ)F{`oGxM~|n5cWcO7K^;pT%VflA0SSM?Jj$& zcCe>v=kt^jsJ)JF023;irxojip>mU6xo1!ta0j_)atcb?`2cFLy{Ox38%U%r;yB%Z z4nh*q6#q53gd|O@^;lA1kC$*$IuYer51MxU#z^ zlZ)@MJ;v<9aks7Mk)OHeP~JA8yT#lmdu}Q@Z~w@wD9R;iKz9S>(k_%sy96+QaW{_1@4+$I`SCQeYTU0$sag+$=&Qq?U-Svy*e-0Fjjz3A<0^&StoOaFY{*@e`aN z@hz;vob8bhYFHIf_jhD@!Lxat((o8^z}^aHQhSHTa0JD$BW1Y5W6-xK38YP}!xVQo z1o%Q;zl}$22VG!~EzGh83GL?lw^=y23}~FGA53 zppIJXp0Ospg~28Yn;_w!-Mz8Ja6sid?1WHWfljsvVmXFWEYL-NKDryhKS%@&7x2}1 zO!_5srRIHVz!lrv_MHbm5QH~N2X-(Zb(i(g(G!13My~~I!am;~sz##(4f*TFk%9NzD*<>~g zCI@^;5)u;GY&4sHO`98~0~A!i7D99_ua?c0w3FgQZw1_1sNG0!f?9|YlTf@fZ#Ap{ z0rrQ=Yl?jR+WcO(^%d;*eEquME7w)-uUYT;73;tJ>(xK;qt*MJkslo&D<;wYC>DA= zB>COwfeOx;H?hy9QzoCk^i|X; zMI^KmYXk@=%%M8)%1?^smD9YUaufIx#=6$$Yx=eSv1DWWsb2E9|3R~;a?$)sq;{o_v|mW8-lzOMd1h9h4`zv;+!{?5;sK|C0?@C!r&+mRH*f;$36gb#K)zhz$p zryS^FVMcKzt0~WB0TxICzk~eXFGcE@9``}xL+`{ma<7#{6v~R}r<&gwDgm{Gx;L~P zz7$4iRJD$;swxM^U$vI6I#3a;p&$2D9G^meEJoTnu?~ut2Z-jR>XVAf3i>5cqj6s_ zLZUfZ@-L>jneoXcrOOv@xnffqBu51l@OYA9SdD;^tKRzf122a}hX~6ugM>#)O9V9@ z%Ls02LINr~$;WSg3i)vQV_R2Eh_2{#TBq1yung3NIln35W=+?}?H^@oPbAhE(UXjS zC;!q#1Qb?cSd3(|0k)P`t*BjYej{!$7d~D@hL?7Nmr$Ax)N49ZSwUUCVZiBte2tc) zSef9{z95$J|c-r{%Re|@8$w(F5 z_F6WpP>=s&Y><65GI&JO6GgUSPFe_5>_VX!-cjXwd=4l7O*s*|6glOQb63d_kK825 z5Qr^ND}!-cIs4waK5&A}zlnChstQU-G4CG%L}LaQ1nfnElnD8Qh+JvZQuB6ygXmp` zCXnY&mUFWUYB4UMHBmgCJ5udPsYT=Qn&1T;0ns#<(kakI%Vy)Hf|Q_Nb<7LV3tvJe zxe-pXoN2;wBR(^djIg_^!L>=`3vUehPP_RDH~3ba$twNscaY=wTAQO$v9M8t&8SmHV%}>??*EA{q#CvZhs#O;^tY;vLjuN@UE{;{X@}DL=j1b1M*a|Va$o( zm=ot&=eO%f=oEzn;#DH>b#Eo&DC+T040WfPC+GeQ-(gBELtoWEd~NN23mPIg^ba+T zPnrDoXHlP;Dvw7YzM&sPgy0jimE~;yez88I?A$iRt`TtyS&GxF51|{@CjZOHN9PBS zk4&CJYXQwrc<>H|I|8TJUrB+_c`1~|vdIQO+wodip3SOz5)ql#!Xy`NxeAFe8&+Vy z(F!EX7qn7ot0aRyS~*sKw0CnyLXJ`0&Xwtt4+8uam0}sqQfD7y>ljT&xCGsn@p?wp z6xmdThZxIeEJ3J|l~1xD)uwlr>vX(c^ob!6B^f@kms{ zz>*jDN}>JspOB=`m*TzH=!2K`oJ12SjNA{E>*I!sHp0tu5!oG?(KgelI}H}FJ! z9*WadY52LbQQzsjP&j1LO`?S78-U=+C+YVYcrsLE4C-f%rIP@*h)l78NECyHBV~Yv zwWD?zBo^!5egm#WdIw_E1nXf9eL)i7@m)yRz@mmaK2WcJxNa>87!eyHIB5JJaA|Ot zYC`QKCT)RJLbT1Run}aLFB!L)B!Z2@%cUUaBOmV|X6&7|L{U~rOgKZM*v{FpSN4z| zqEoiqFdm6;M-5@(DI|J4r)aIPX)P~C40}W>PbOMi`-7jh*Q=|P-;drQBXnQaY^>s= zu5^YZz|Ccn=EM!JR#>KygyvgkAHeulAteyh@v3prT^?7SDX|;>p9K%%AmM)la2e_U3d1_~Yo(yt2rr z-ux{EN!X|MK17r*+9&hgl!y{$36`<>&mGM4BdAif!_*7{#K7M-r%Jd~6jUke)z9h+ zX-G$ZK)MXXdvIu;5vUq=pYbdlF>3e}LUj+8fKXjxHtg*}lc%Nhx~uRrfR>7C>n>p|yNWq|<>*%WVLa4UJBHkJJL z3C;m|pY%l=7DB$N_N8)&spE;kt>Q*)+TB+FFHfJj^Gi6sgzrj`Z+7zu>e67pwPn74 ztQuGA$@=Av$4&PbRb-;pMuxX(g4rOzYC|@?fSA+o*tlqidx? z%5`K7Dt`2fR_mnuzCRS_pzybf2b;To_84y3?c*mCKBSU0Z;_nNA6NZFlAA+%!-tHW zCc3+uRim{PAvbpIg>=+vJG@+G&dDcXed9#bErU_goA;GF$5*|X87koS%@Yy_zMidG zC{$d`&Q;Lv*0Q*QjpAN|V`;&ExWd`~lh5X@q}}~&-oia8rV?(2rnxXR&Q_DvP4oWA zbMoE2qaPNd&xifhRSVS=r`+h}_7g0qdyS+&qw{;{&}HpXm)lMCQWx|vU%{0dUW6{z zV8z&AS@qhqX)o<3?jn=o%CA-mg;k+Boo0`BcT0N9O%1qh8uqz=WuC@!50b)_ zo_t&%U7W1ogfm2wyx)V_c`uW?S*B6S{{~MJ4Kb3<0*3<)& zIx$_@?&OXC(hM?h?}eBnLb~D?z044M`D}E2DSGq%<)EV~*3$kZ!Q!mc@Jaer$qKil(H7ub?b}(#n zK{jakE*^qwt1_$YUz$PhqN-Y*-h8rZ6?pw~6|0w{){a`WjO}@UQ@I4?k%3n$}OI5#Bb>mPbX}BR==e42YQ{Q*N(mZmR>Vo4*m_j754ci|C77E zbG4YfO>uT%eKBr-^3SdctrLBGc{Ls`C(9=Psh;6PvENY@*=F${>7|s@W;eyboSfm$ zDJi3xJh>SR_$wODo8fdgOVYEe;beiEMQ`?Zt)frg>D^Y|`C@oM_mpNAk)GdFkL8?A zKH^x;+j}n}eVboI`Zm9a^tddd_t*I9K#kf}`6{pSRdKg}>DgrKx;p1&yzh3?%oq0H ziJnDomUE@$p%Xo{i0Ht>aj8w20$Cdb7P%f0)P{PsJiP~16E;}GfIlQ~!OWpOa4@ZFnNw#^Coh|Lkm3L|kD zo8ua!_Z*vldhi-qUK%$m^50=tJGMpTDwpYHj!?%R1=v{*Sa?}DaN+_t0^)h*}jeg@w`nL|#R)iWSl06%m_=@XA7wM*k~WpUMJDJeFI$8}&P z1k5?$xZLiZJI!=ck^4tCbqVe`?WK2jIOhWhoP9BWyXn2W$|8hhY0s11ueXx^L+|UY zl<#_@=~kKVo+rJ2_MDe0svMN@OY-u&#cRcion^EC?CJmgfBy5JS>Doz!dJ_5J({0Q z)72_nP!yfk$)jdxM!{doE+Gg^rx+{+-EnV|E8l&kZC?sIxYKdEP8|MYawY7g38yYs8|N&el{e6gbM z@L>Se9~wJ79o?GEovps>;$7qY&QR~#?e<+?2|GAJ8O8lgyR3XXU#`wp>EaS9|2aXf z_u1WEz)_!DX}>9iXbw)AqIL2}i1uBx+Y}Uki016Bi&jrhJ%K;1lSiLz*HHlDYM23| z|K#KsgMAP|A70<}TWvt)cH{Bp(tk zZf7Ohcc-VfyCojq^*33G&E4r~hDeVqC0^h4n>0nIr)co;zJBx{XC)rH61}{M8k(Yi zpctSd-mT^p{p~NG91SuEZuQ<9+1k2<>zKN`5alijQnYO`~9ditcqs}yR#v)S-6?LF>e*1AW%)6>rB>1X`( z)GJ1MHrUB**7HAqoYnY#)@#0>3DGZqV^`fOZsSefqk~nqdJh3lU1)w>FgzcNjt+R zKdh%8{Jcm7B|X?IG9T^%Rq-+1_=M^zz}!=5CHIr6cJ)E?+iOne$#9C)yX))g(^Jq- z&oKkD!7Ig?^U8^;%%vmTcUfdHSHD}_`RxCNNcH18P{Y<;l~gv{t-P9&9(0<&LgL;z z%@{KBNU>fv(K>n5_qtC`PsQ1PuLkH}|Fh21-m|~FzJoBZ!)972UoFwDwYf8kosuhU z_o0sJ|0+G%MSz|DluCtlQY!h~9n#g!7W9i=GMr9Z!^Op#_L5xfITkZ(-_DMXe*fmj zG+B9X)0-ufk~^gq?OW*Jdb9pwuxTSAX64?Bb)_F!Mb*7X=1)j{shS>t#uX!ls{~4j z{+w!H?yEtw;){82dDn2Y{N=snT^zh>`HRtg<-f{`)xM~GgpNCaMin|iH1?B`$%$n`3+V_@%Ht1h&5VD?Qd>&Ax%?8vOCT| zjMhoos^2|d9t-*5YFp2LIcsmQ=gPWtbJM$Rj#sOzZnJxv8|J!)LT_X^)>;*Q_%m;j zdiWm4pz`N>B*xRzC#R=RnjOl{W#RH|n>d#_SO65xu+59Aoi%IGK3Qa1^dhHS>ae?V zv*9$VWw+VQhR2)dG|J-j$&zx!*I+xTYz%)tY?mn(*8;qx;VDmyt~^@d(u3S-5SiZT6Vymn|yyhY+VgG zXx-aE$ZqkLIy4+cRmbjX+mA&qZ)-psesYDuM##q9oo5=%+`au>;Cz2aCg&B zSA)9`{o&>Hpvm9g^qOxbAMTR*Az}LFLf{Z6;aXDJ3FI8Z)ayP zJ>ZqBXrw+oJL@$obpEvdj}*=a*NLY1(S@qCm#EMrUVt zcQdEWJ-fjwabZNV{ZFoh&vj;%i(^e*|;4PqJGUf#4yU!L~KW%1m* zuH|$uMu!TP$z2C2PGt-HN8#e7SIOvBo=@C=r`>8%tgc+)kIP_OkFx_t(;~}#xq-+1 zK4@+YejBI*IwP_LpK@>yeO;8~yOoq9d9aoeSey2CP0i+DdwNR{0!Ev{?`|8>%39jq z`DD6E7d6ln3*M2oXA~}BYZM!JI$X6bZrT;PkrIQZ5!6;&w!1o`O_kH~?rx`=9a7?d zx`kV<$wh?!nRQ)kRW^j+FkF;f`%AO4+gPshF8gw|Nb4i2e}=pw0C)Q?&)p&~l@hTlwRj$k5_khw7-JKK`-U0yFY@0Yh^sOw@zBO8~(!; zUllnO5h6>e0=$x(1G5O$wj*!UOP1~3-0-w$uf|)7iipYg-rv*`N~T`KXDymin3+o>E zt@g~wqt$KU{G&pkp?x zGDOF(nr(jJAiJ!?Y>`%_>N*rMn%!AepfIhZ%1?ofiveCwJ35*^+XIWeV9dw%anPJr zV3(ym>&Y!bk#pQbgr{W6`MTaiY4W96qlB>J*Oy9-9o4ww;N;;Vg|OFJaTgx6-Q6Ay zO=@i{Ifv}$*FnqU9gq}%cvK^ey&kT!%{FsGM|SxF0VaF<<|~U=v^ZH6j?&$gbN47n z7bhm#ghTyIYti_w$75#E@UUO}& zez`bI+ACzYkY^Srt$%F_zu@_#$aZ@lFlh_FpWD0ewBBUt!ql#R+9#_nF7KEX-Oj{~ z+8cZVlOa}>x129lt=)|R@!X=B1-UdYg&L6F`dzvp*|H{8w7c6a=6Z~Sa*o`pVQbjs zR;r3Xt8B*M>`U^g<8n`)V4tvFOiAzI#Nu8_x{i(>J^4%alAo+_#X;~~J;jk1m$B8G zLKRaDjhpYnzFG8twDLngGJzlRne3T>znS(FUrc&B;ID3aCgSgIdZCp3w^zMT^KY(t zN=N)pSG}fa@-J`#0A}(h{P|7qN&hdWIM)?^HF(0m$NTAf{hQOZky4+ozxqo427dzu zJHUK7V49+0}Z*W@$QFvg^qxU9ic2Y?WT5izQpl*_Cr%q#L|e z;>~b5Nt)f}*VAcwF`Q=KKI<|0K9a08o3GfAQ6YxVPwZ+wnXPC8<`f`;K0pPTLd3mq z_`6|mCdHd6Ur2ZD?)h{+U+^!6`@&X~x|g3M)McgWf z_uDHM&pqF;{QkW4r>pjHLZPmco^0>9C3ZT0({^q8r|oATR7XcY$yS9}fXceK-SxuN z8k1-8B%$L4Q@WTvS8*;X!SfYA>&@ix@fiW*cKQsS=xC}=TH3{Rq`$w}YO>R5w~t>v z>&@ie5BaNS3)yOYvk;5+lZ&=1lM~D)2|2GmzbN7ZoIUN$Pu@(sXU}@`&SG-1m~_v7 z1jRyp_4K;*MWxGlW>Q=qU!eML7Eiv|t@`5g-WQu(SDVW+9d$hK{nJ%zA+VVp;kX9J zj6b<-Kl^4OmOHg933-su7r*RXg1cR1hh#fk@^Tp+S~#orhX3vm@*JlL*RO-KQ z^}Z)<`VcpI`DtZ?s!Gf*vA3XQ#7# z_RV5<_7Gv*_3kXY{MMb=0lL0&=$d2As|;&im00u2Va?eN)|`oJ{?g%23AmeoQR}+% z#V=n%0z_2tg=~HC zkFyD-e|@vzUqMWAZt>k)oD=CjQv5w0e!pJfnI>t8r|EEZ@!cXlpS!o`R*-@*kAr<)N{SJ<8;$Vx8E$}_52bWk`S}w zx3t@%<1vF`p!tX8jW&aF?0F`gJ{oX()!m)rJKKkE27uYOyD*a#n92Vaz^rz^kqgM0 z-^WzmGgMymTU;ht-X@fP-Gx-Ud+6SoD34oJk9Ji~Mir;6ii2QVXRz~7A6YHM>9GeV zxzfhI`Tp((_hPuq0=~N8AJWCV`{fP5emS4vG-CJLn+^X!VKv^p$!>-HarkbSJ)Tdd z>D_3!8n(Mn_zM)nOP9KP`lNL+IlsG_UEKY6m0rNF;i`RH3AXWn$L*twbU#>~Smv_b zhm!uiC?30I5E8%FFT9*GFR0Qlr*fw3vs#pU8ask~!{d^$VqB zncRNXlkN6uadTUK*!xGEzav6gsjj_Apwqd{`OBM0YdVGAyyE8YuivYTrr*=A#nSbQ zFK{;s;RM}x*lGTMvRUctm-iW+zN{OazATMSV|YbE^^l#wNqfz#k!E3QbN5H3*X(SP z-Nb{nM=9^u**!n=g)4G7&22riQM$-Z2c9p>i+U#KH$7+ga%t5N9KAngHl$k2i{5AZ zQV|KG_fwX$UQMRU-ftuKmIWQ^-K=52`g4>GBqN}Os zAO)lO^-N4wui4cCcc;T^W7c`xJ-1vmLI7vt)?VZEsSR;=-(E z#Uh%G7_u+mRV2Xre_XPLx?gWKXTKwGwM2h5paW=VBY$3a=?r)J{eCvRY3(gsZ@);^ z)k^e(wdnR{?@YSywzk(!dC?yn*Y%dPrqUjCJ9Nk3Vh=wY>_TDLzMb`Eg*J0MS@xIp z!8;Pvb|?%tt6J$iP*rWD*(VkK%C>TJS>W`nAYRM%Q@J5pe^)G4c{CY;mC0omOmo+>wZOM>_Ra=)&;oYU=ct`_|9yG1Xdp%tt*sv_89N?hgNJQs6>{VaxG+Jsp88 z4KocvWqVY+2-fd^>W-@+*9oJ>7wzPFWQ^-@gHY-neur) zp&OU-*Y$*=Y38qgTGU=g3qI>To3)Q`%Hpk=vxuL(n@>hec4yvjI-OrX|3mxa1c%RW z%BpsYf4W&HuLIrsX0-2{%i+~ky7+26YoDB4ukwQJ_GUAmxC;c+;oFzPE55G3{%KKr z9o1ilW#L!piYG~lFJ!HzOL=;Ed-F-VGdr&6@Js6Vy7AgYOJ3z5YL`m;vRLzU;ci8r zXF*x;J1c^&lOqB$?(UXCJ*x(StBkoa&9mb6f1zaY`^C5C&iAE#T=e5l2)%;~Lcw)^ zTBmEeb{cHJeK<$_ui^y%CW{0nRDQ+M@_0tZMHuW(7tNz1T%z=`eVTttPnNZwILDfs z5?yE6lA9*IjLcq4=WmA7@5hrRqEeicuidP`^<*}hUsI-g?AE@le3fS`)9E=SuLHR?QmDFEd@foR8Miw2%%<5>!7PlpWdP<$mNO%{M^Zbc127{N8Q)9%oOM&@COe z>U67S{e^s)mF+EY*KccY_ooT3m_xU{;@h`qb_U1lRwZ%fRje*+K5*giUXgvT{KX5$~F=aX4l9Ew`=e<8n3XY0#!0p1Sf&PBTFPWnR}ecQBKZC-16 zzSZ)flW_Zhiw`!NR-15SflC)_ z^8|F_{n62TkGvG`-=v(p-PRJ+scvOKoqx4q}t8XfbS$^}9FztCyC&yN1D|06TQ z{_%MHOxcq^_L_hD>;Km56rTxYfBCrG{bNy)Qx2pk%L? z;nvBdyWa3o?@6nVsI=`T7yPpFO{MO9?nb;RaH&&W6}I6Qz1!wdv)eoxUS1tHdGi^b zFIISbipMD)A2%O&n@3P^BKpkfoJ$u@GJa}?Z633b^2v8X#If%e_Y5u29wLaW0&($%GvdJl8o8a zdbOk*+_wuYS2xp?wU%>sdODe{*l;#tr>D#HB^xeMHXSZ5e^SoQ=L?p;A6{Nf(>A9f zNjh7R!%tr2d`8b#!;2Kplhu-8A57cxCQqi5Zc~gn6C!I^pm)s;pRZRSfjLdjSK`O@ay2=hKs_+LShl_9hM%L&N-6W%OL)CA>?!>`+a1^!*&tkv*;umSWz+w7aq;Fcn=jbo#p2@P&6~&YOJl!i zdkoSzbDL&1s$-d}71|O9vTPSV0-TU9R@sLRvd)E9#v$Njt%VDyZFY_V0 z@B;Y&e=jjWD!`tO((^q7`!xxxELksC^GoK6W~h#FvB#&U=ks|teSbBa!S4N-wbsj& zU9P9A$<;Jv0xLYz9nk_Wa$LXKUe>0Yf&QRA3}WTIXRV2IBUrZ~!1Bw<3?a(CA5W6E zGh7#x4Q{r+e3LG=aQzkH)Sge$B^$EK$!v1Df4+ob#eK;!N$KU%5ubCAc?#x<3E=GY z>FM&P#p*R%&992I+ac6so7e1Jx>!w;;j~hEjK|mRaWq_3Ue}nn@)d3x*&FB_!1-pr z7=hN!&r#s~&HTM9`eyz<9cAT^K`4P-ugR~f^G*JVm&5lh$=B^{g2m-zCZHu0#gN(4 zf92#u`fLkdNYwsnIv=iJ?48aRUoT;BsV$fNwB+URZMtNqrw&!$q~qbcluajZQ}!){ z_{U48MUDdO^c38mit_5p&Wp}xX|=Gc6EvJ9WB5=Og!K(Dm$JoVDNw0?qV=!^>ecb57nIf7FNgJ-9LAoKwy0iR&&qA5K=|^YxUVT4jSo zr4??FQ!FrzDIAU*(M0g`&T&Bf4Q>= zT@Te0S#SR9bCTR(Kssi{NeTZHnZiE%}-G91Y*=qoutqeKZWSC%`k z==2>{P6oU4$!xO9C@q`L=T}RQeGLW33WdLR`8Pb1XNq>fq6xmM#SIFcucyTPh=TSeU1GVsh@s}?aU4>LNkxlf2N{@`aat` ztaIg2yVx9M3CFpZ?)Eu+xOE6&(mt8=<)AyM#C083%oSOELl3m2F9*lV>j{NbZ{H@v zWm|#Lf;)No zq&IJt*iN6}p9UuUHDz=N3w}NtxIE zzIEQlAxMz;f1pX6Br9mq__@;~k9!pWR8Vh>eRxjGkB(a7-Z=Y=e{P8y-`!yopR=|$ zAwN9Oajjve$@61sQyJwzH z9?5QHHd_C-(*c`UxApHlClqCDvLrz#9f3?v&RY{4io#tjrI%K*vcL4Uv^uUd8KKT= zu}ctSx+vT`&!^^5KI96)s!Z@nncSMRyX9LuC+vUN)~y49f1LM0ouDnZ>0YuJUS6g3 zLX%N%QYk*HmptDr*H@*Af1LK;r~T}^!Cv8VZqp5dSMe4eH~ z%m3U9_#*9ve|((w`gAU<^;<3}47ht+rFfP3>yKPjGGE}W$oq`zN?w}cwaEN4Hz-_DsydMS;kHUB`*zl}u za?rr`1M$X*9Q1 zEA*z~5&Iov-6CZDq$iIjProbgxhb!#8TP(Q`;+okn{yo7dOCDYn&ovKVX-j*iw(dt>N~3xO7$E&?^sL0(tIe-*i7eU}#ARgl{A7W1@8@7Yy$NZa$g zNxNMM)%9x6=&okv=z6;CI(9YdPh8LN)G0dmNFEnA17$8(t6n7eceA~ApXSTh@@A(M zXx&+NChgAaMk2Hqa>+&LsgTo)P{y-dM2WCo#D(8<>=Rzx!1QCtJ*IXIugN`iMlA7~%+(?2L**?CYY!!MTSFV88 zsxf`tSwGR93~i^$M6f0hJ}7d}*^^4;rB%a@&XyYu=*lL{V3r1gPRVFTf`jQk9pWgM1UCr_;KA4fVdPY_Aw6hB70hjz;Zw2 z?&m|}GCq3KWF!TQvtBmyUN*B& z+(?YiLx3iTJQajo*@ytDJSPCU$ zF0@6}*;67yw*i`McG>4^-4RV8!ML$hf-WvY|OML(LJ{O_yHxlYUI-t0=m;-L&r7)VCn7bb=Ye!ub(NuJnCWOjM z_nB-Y+BKlvN)v&h$%)mHf8L}^luIom9%{|A-$r5sE>viVRuKS940O;~CqWP-5y3zc zom3V`EBq+(Py^A+z($^JB-R0d5IS;{G6=kg$J!Cq*t0(aOjY_y4^l1I^O z(9$#!F`!WA%THM4CnBW9rrjC=Yf;9nP?;6U7?xWt$|rDW?#B)re?mvqBd#nB)?!^- zFu;(%p8YOELP5o|-^NnQm_vHX zeqEQ{G?@SxsI8A7gmI#R*fBhsQtQxJh9MO~{W8ipnD1#_F0p%8SYwM^`d-jjiU>`Z zkl3jXwHI)uf&i-wf4CJ=w$Mb_m5Q`mYG4VG1r_4d8y?d#iyMiEvQ_3Ll-9@xwu?Zo z8{_SDU&El`AwuJa#IAIxOo2hh5V%wr6Vu5EDdKimBBY_zjG$#b9Y+;{WZa}0hX!CT zfSB4tlK`;L109Khbbbh$6b7E=%Fn?S@;Hhduq;pL{UiWneJP!szyqo2$>)tg1|L3(sAOOVMkI2t6VCcc zexGjhrTd&=e{!4bg=F7StxU3MV3}{(x5~HRQfm=OH2R{%x36pH1~{9p;H<}@)Mx~)FG@EF+DM%W8X8ze+cbM5$bR%w9-rjzF{J=Oog(M z2p`1D7*H=`L`YRA!-O(Zp$vY)P$+8|{RBx6Foi{A3{xg%zKZclL#vntfnrmwB0PK` zAiM^2U+m6V=xc!Vs~wO|dMY)aPjZjPW^P1SXGR2Ao?i|pvu5|a_uZ^D;fqyv$=Gp` z4;{Ede}ND86woNJmWK-%6nJ${4L%AC57hAea5_c)*ObkNML37^4gaSty0taLzx~I5 zF03Ihb*vc3vkKV{4pm?1SbboJGBWeY-~Q|WrK|%eE!hhdMMY8N+YjteMp4KA`ghdP zX*FM(Aas3{MV<-55A09|j<7fg{7+@ezNNA@e=q8qo|_-0Ua0=HGT!)fx%4d&nW8n-z)uyeCo3F%jO~<{n($8*YV%{x7D#|YR=k9 zmo<%LZ$@6nU;n$xSRCk#Ea9&SrUx=;HvKT7K5A9MJB%<9`!kyD)PZKohK;F`LdnGV!UQbPUMRwlMS)P-Fyk2!F$?X4!B4;> zco?J^5eY3t1U}th5<(gXrSd@|Ju9?lm=eAhG2gQyM5+XhjXlF45<~d%LPjDL)BsGY zKB1SYgWE$+>coC?ie?{#~d7&`O_p~r9^z2eG>qU&78wq8zz{3D6 zbKkHaVi4Sz(vhc`A9@kPp2f0Qd6rpp*f(CtV&g?D4m{12_C4DGkWk7qEMm%14fqo= zWyV3Gw7@tb;p2~zObRW~yHHr6tk8g=AAm(fVM92;P6YvdY~naH(L3SG7g|M$f6